63ae2a409915d3c826b36cd7b8ee15ab74bf87ce
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 vld_to_ro_ip_profile,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm2 = K8sHelmConnector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helmpath,
178 log=self.logger,
179 on_update_db=None,
180 fs=self.fs,
181 db=self.db,
182 )
183
184 self.k8sclusterhelm3 = K8sHelm3Connector(
185 kubectl_command=self.vca_config.kubectlpath,
186 helm_command=self.vca_config.helm3path,
187 fs=self.fs,
188 log=self.logger,
189 db=self.db,
190 on_update_db=None,
191 )
192
193 self.k8sclusterjuju = K8sJujuConnector(
194 kubectl_command=self.vca_config.kubectlpath,
195 juju_command=self.vca_config.jujupath,
196 log=self.logger,
197 on_update_db=self._on_update_k8s_db,
198 fs=self.fs,
199 db=self.db,
200 )
201
202 self.k8scluster_map = {
203 "helm-chart": self.k8sclusterhelm2,
204 "helm-chart-v3": self.k8sclusterhelm3,
205 "chart": self.k8sclusterhelm3,
206 "juju-bundle": self.k8sclusterjuju,
207 "juju": self.k8sclusterjuju,
208 }
209
210 self.vca_map = {
211 "lxc_proxy_charm": self.n2vc,
212 "native_charm": self.n2vc,
213 "k8s_proxy_charm": self.n2vc,
214 "helm": self.conn_helm_ee,
215 "helm-v3": self.conn_helm_ee,
216 }
217
218 # create RO client
219 self.RO = NgRoClient(**self.ro_config.to_dict())
220
221 self.op_status_map = {
222 "instantiation": self.RO.status,
223 "termination": self.RO.status,
224 "migrate": self.RO.status,
225 "healing": self.RO.recreate_status,
226 "verticalscale": self.RO.status,
227 "start_stop_rebuild": self.RO.status,
228 }
229
230 @staticmethod
231 def increment_ip_mac(ip_mac, vm_index=1):
232 if not isinstance(ip_mac, str):
233 return ip_mac
234 try:
235 # try with ipv4 look for last dot
236 i = ip_mac.rfind(".")
237 if i > 0:
238 i += 1
239 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
240 # try with ipv6 or mac look for last colon. Operate in hex
241 i = ip_mac.rfind(":")
242 if i > 0:
243 i += 1
244 # format in hex, len can be 2 for mac or 4 for ipv6
245 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
246 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
247 )
248 except Exception:
249 pass
250 return None
251
252 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
253 # remove last dot from path (if exists)
254 if path.endswith("."):
255 path = path[:-1]
256
257 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
258 # .format(table, filter, path, updated_data))
259 try:
260 nsr_id = filter.get("_id")
261
262 # read ns record from database
263 nsr = self.db.get_one(table="nsrs", q_filter=filter)
264 current_ns_status = nsr.get("nsState")
265
266 # get vca status for NS
267 status_dict = await self.n2vc.get_status(
268 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
269 )
270
271 # vcaStatus
272 db_dict = dict()
273 db_dict["vcaStatus"] = status_dict
274
275 # update configurationStatus for this VCA
276 try:
277 vca_index = int(path[path.rfind(".") + 1 :])
278
279 vca_list = deep_get(
280 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
281 )
282 vca_status = vca_list[vca_index].get("status")
283
284 configuration_status_list = nsr.get("configurationStatus")
285 config_status = configuration_status_list[vca_index].get("status")
286
287 if config_status == "BROKEN" and vca_status != "failed":
288 db_dict["configurationStatus"][vca_index] = "READY"
289 elif config_status != "BROKEN" and vca_status == "failed":
290 db_dict["configurationStatus"][vca_index] = "BROKEN"
291 except Exception as e:
292 # not update configurationStatus
293 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
294
295 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
296 # if nsState = 'DEGRADED' check if all is OK
297 is_degraded = False
298 if current_ns_status in ("READY", "DEGRADED"):
299 error_description = ""
300 # check machines
301 if status_dict.get("machines"):
302 for machine_id in status_dict.get("machines"):
303 machine = status_dict.get("machines").get(machine_id)
304 # check machine agent-status
305 if machine.get("agent-status"):
306 s = machine.get("agent-status").get("status")
307 if s != "started":
308 is_degraded = True
309 error_description += (
310 "machine {} agent-status={} ; ".format(
311 machine_id, s
312 )
313 )
314 # check machine instance status
315 if machine.get("instance-status"):
316 s = machine.get("instance-status").get("status")
317 if s != "running":
318 is_degraded = True
319 error_description += (
320 "machine {} instance-status={} ; ".format(
321 machine_id, s
322 )
323 )
324 # check applications
325 if status_dict.get("applications"):
326 for app_id in status_dict.get("applications"):
327 app = status_dict.get("applications").get(app_id)
328 # check application status
329 if app.get("status"):
330 s = app.get("status").get("status")
331 if s != "active":
332 is_degraded = True
333 error_description += (
334 "application {} status={} ; ".format(app_id, s)
335 )
336
337 if error_description:
338 db_dict["errorDescription"] = error_description
339 if current_ns_status == "READY" and is_degraded:
340 db_dict["nsState"] = "DEGRADED"
341 if current_ns_status == "DEGRADED" and not is_degraded:
342 db_dict["nsState"] = "READY"
343
344 # write to database
345 self.update_db_2("nsrs", nsr_id, db_dict)
346
347 except (asyncio.CancelledError, asyncio.TimeoutError):
348 raise
349 except Exception as e:
350 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
351
352 async def _on_update_k8s_db(
353 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
354 ):
355 """
356 Updating vca status in NSR record
357 :param cluster_uuid: UUID of a k8s cluster
358 :param kdu_instance: The unique name of the KDU instance
359 :param filter: To get nsr_id
360 :cluster_type: The cluster type (juju, k8s)
361 :return: none
362 """
363
364 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
365 # .format(cluster_uuid, kdu_instance, filter))
366
367 nsr_id = filter.get("_id")
368 try:
369 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
370 cluster_uuid=cluster_uuid,
371 kdu_instance=kdu_instance,
372 yaml_format=False,
373 complete_status=True,
374 vca_id=vca_id,
375 )
376
377 # vcaStatus
378 db_dict = dict()
379 db_dict["vcaStatus"] = {nsr_id: vca_status}
380
381 self.logger.debug(
382 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
383 )
384
385 # write to database
386 self.update_db_2("nsrs", nsr_id, db_dict)
387 except (asyncio.CancelledError, asyncio.TimeoutError):
388 raise
389 except Exception as e:
390 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
391
392 @staticmethod
393 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
394 try:
395 env = Environment(
396 undefined=StrictUndefined,
397 autoescape=select_autoescape(default_for_string=True, default=True),
398 )
399 template = env.from_string(cloud_init_text)
400 return template.render(additional_params or {})
401 except UndefinedError as e:
402 raise LcmException(
403 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
404 "file, must be provided in the instantiation parameters inside the "
405 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
406 )
407 except (TemplateError, TemplateNotFound) as e:
408 raise LcmException(
409 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
410 vnfd_id, vdu_id, e
411 )
412 )
413
414 def _get_vdu_cloud_init_content(self, vdu, vnfd):
415 cloud_init_content = cloud_init_file = None
416 try:
417 if vdu.get("cloud-init-file"):
418 base_folder = vnfd["_admin"]["storage"]
419 if base_folder["pkg-dir"]:
420 cloud_init_file = "{}/{}/cloud_init/{}".format(
421 base_folder["folder"],
422 base_folder["pkg-dir"],
423 vdu["cloud-init-file"],
424 )
425 else:
426 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
427 base_folder["folder"],
428 vdu["cloud-init-file"],
429 )
430 with self.fs.file_open(cloud_init_file, "r") as ci_file:
431 cloud_init_content = ci_file.read()
432 elif vdu.get("cloud-init"):
433 cloud_init_content = vdu["cloud-init"]
434
435 return cloud_init_content
436 except FsException as e:
437 raise LcmException(
438 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
439 vnfd["id"], vdu["id"], cloud_init_file, e
440 )
441 )
442
443 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
444 vdur = next(
445 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
446 )
447 additional_params = vdur.get("additionalParams")
448 return parse_yaml_strings(additional_params)
449
450 @staticmethod
451 def ip_profile_2_RO(ip_profile):
452 RO_ip_profile = deepcopy(ip_profile)
453 if "dns-server" in RO_ip_profile:
454 if isinstance(RO_ip_profile["dns-server"], list):
455 RO_ip_profile["dns-address"] = []
456 for ds in RO_ip_profile.pop("dns-server"):
457 RO_ip_profile["dns-address"].append(ds["address"])
458 else:
459 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
460 if RO_ip_profile.get("ip-version") == "ipv4":
461 RO_ip_profile["ip-version"] = "IPv4"
462 if RO_ip_profile.get("ip-version") == "ipv6":
463 RO_ip_profile["ip-version"] = "IPv6"
464 if "dhcp-params" in RO_ip_profile:
465 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
466 return RO_ip_profile
467
468 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
469 db_vdu_push_list = []
470 template_vdur = []
471 db_update = {"_admin.modified": time()}
472 if vdu_create:
473 for vdu_id, vdu_count in vdu_create.items():
474 vdur = next(
475 (
476 vdur
477 for vdur in reversed(db_vnfr["vdur"])
478 if vdur["vdu-id-ref"] == vdu_id
479 ),
480 None,
481 )
482 if not vdur:
483 # Read the template saved in the db:
484 self.logger.debug(
485 "No vdur in the database. Using the vdur-template to scale"
486 )
487 vdur_template = db_vnfr.get("vdur-template")
488 if not vdur_template:
489 raise LcmException(
490 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
491 vdu_id
492 )
493 )
494 vdur = vdur_template[0]
495 # Delete a template from the database after using it
496 self.db.set_one(
497 "vnfrs",
498 {"_id": db_vnfr["_id"]},
499 None,
500 pull={"vdur-template": {"_id": vdur["_id"]}},
501 )
502 for count in range(vdu_count):
503 vdur_copy = deepcopy(vdur)
504 vdur_copy["status"] = "BUILD"
505 vdur_copy["status-detailed"] = None
506 vdur_copy["ip-address"] = None
507 vdur_copy["_id"] = str(uuid4())
508 vdur_copy["count-index"] += count + 1
509 vdur_copy["id"] = "{}-{}".format(
510 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
511 )
512 vdur_copy.pop("vim_info", None)
513 for iface in vdur_copy["interfaces"]:
514 if iface.get("fixed-ip"):
515 iface["ip-address"] = self.increment_ip_mac(
516 iface["ip-address"], count + 1
517 )
518 else:
519 iface.pop("ip-address", None)
520 if iface.get("fixed-mac"):
521 iface["mac-address"] = self.increment_ip_mac(
522 iface["mac-address"], count + 1
523 )
524 else:
525 iface.pop("mac-address", None)
526 if db_vnfr["vdur"]:
527 iface.pop(
528 "mgmt_vnf", None
529 ) # only first vdu can be managment of vnf
530 db_vdu_push_list.append(vdur_copy)
531 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
532 if vdu_delete:
533 if len(db_vnfr["vdur"]) == 1:
534 # The scale will move to 0 instances
535 self.logger.debug(
536 "Scaling to 0 !, creating the template with the last vdur"
537 )
538 template_vdur = [db_vnfr["vdur"][0]]
539 for vdu_id, vdu_count in vdu_delete.items():
540 if mark_delete:
541 indexes_to_delete = [
542 iv[0]
543 for iv in enumerate(db_vnfr["vdur"])
544 if iv[1]["vdu-id-ref"] == vdu_id
545 ]
546 db_update.update(
547 {
548 "vdur.{}.status".format(i): "DELETING"
549 for i in indexes_to_delete[-vdu_count:]
550 }
551 )
552 else:
553 # it must be deleted one by one because common.db does not allow otherwise
554 vdus_to_delete = [
555 v
556 for v in reversed(db_vnfr["vdur"])
557 if v["vdu-id-ref"] == vdu_id
558 ]
559 for vdu in vdus_to_delete[:vdu_count]:
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur": {"_id": vdu["_id"]}},
565 )
566 db_push = {}
567 if db_vdu_push_list:
568 db_push["vdur"] = db_vdu_push_list
569 if template_vdur:
570 db_push["vdur-template"] = template_vdur
571 if not db_push:
572 db_push = None
573 db_vnfr["vdur-template"] = template_vdur
574 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
575 # modify passed dictionary db_vnfr
576 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
577 db_vnfr["vdur"] = db_vnfr_["vdur"]
578
579 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
580 """
581 Updates database nsr with the RO info for the created vld
582 :param ns_update_nsr: dictionary to be filled with the updated info
583 :param db_nsr: content of db_nsr. This is also modified
584 :param nsr_desc_RO: nsr descriptor from RO
585 :return: Nothing, LcmException is raised on errors
586 """
587
588 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
589 for net_RO in get_iterable(nsr_desc_RO, "nets"):
590 if vld["id"] != net_RO.get("ns_net_osm_id"):
591 continue
592 vld["vim-id"] = net_RO.get("vim_net_id")
593 vld["name"] = net_RO.get("vim_name")
594 vld["status"] = net_RO.get("status")
595 vld["status-detailed"] = net_RO.get("error_msg")
596 ns_update_nsr["vld.{}".format(vld_index)] = vld
597 break
598 else:
599 raise LcmException(
600 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
601 )
602
603 def set_vnfr_at_error(self, db_vnfrs, error_text):
604 try:
605 for db_vnfr in db_vnfrs.values():
606 vnfr_update = {"status": "ERROR"}
607 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
608 if "status" not in vdur:
609 vdur["status"] = "ERROR"
610 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
611 if error_text:
612 vdur["status-detailed"] = str(error_text)
613 vnfr_update[
614 "vdur.{}.status-detailed".format(vdu_index)
615 ] = "ERROR"
616 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
617 except DbException as e:
618 self.logger.error("Cannot update vnf. {}".format(e))
619
620 def _get_ns_config_info(self, nsr_id):
621 """
622 Generates a mapping between vnf,vdu elements and the N2VC id
623 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
624 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
625 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
626 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
627 """
628 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
629 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
630 mapping = {}
631 ns_config_info = {"osm-config-mapping": mapping}
632 for vca in vca_deployed_list:
633 if not vca["member-vnf-index"]:
634 continue
635 if not vca["vdu_id"]:
636 mapping[vca["member-vnf-index"]] = vca["application"]
637 else:
638 mapping[
639 "{}.{}.{}".format(
640 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
641 )
642 ] = vca["application"]
643 return ns_config_info
644
645 async def _instantiate_ng_ro(
646 self,
647 logging_text,
648 nsr_id,
649 nsd,
650 db_nsr,
651 db_nslcmop,
652 db_vnfrs,
653 db_vnfds,
654 n2vc_key_list,
655 stage,
656 start_deploy,
657 timeout_ns_deploy,
658 ):
659 db_vims = {}
660
661 def get_vim_account(vim_account_id):
662 nonlocal db_vims
663 if vim_account_id in db_vims:
664 return db_vims[vim_account_id]
665 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
666 db_vims[vim_account_id] = db_vim
667 return db_vim
668
669 # modify target_vld info with instantiation parameters
670 def parse_vld_instantiation_params(
671 target_vim, target_vld, vld_params, target_sdn
672 ):
673 if vld_params.get("ip-profile"):
674 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
675 vld_params["ip-profile"]
676 )
677 if vld_params.get("provider-network"):
678 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
679 "provider-network"
680 ]
681 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
682 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
683 "provider-network"
684 ]["sdn-ports"]
685
686 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
687 # if wim_account_id is specified in vld_params, validate if it is feasible.
688 wim_account_id, db_wim = select_feasible_wim_account(
689 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
690 )
691
692 if wim_account_id:
693 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
694 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
695 # update vld_params with correct WIM account Id
696 vld_params["wimAccountId"] = wim_account_id
697
698 target_wim = "wim:{}".format(wim_account_id)
699 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
700 sdn_ports = get_sdn_ports(vld_params, db_wim)
701 if len(sdn_ports) > 0:
702 target_vld["vim_info"][target_wim] = target_wim_attrs
703 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
704
705 self.logger.debug(
706 "Target VLD with WIM data: {:s}".format(str(target_vld))
707 )
708
709 for param in ("vim-network-name", "vim-network-id"):
710 if vld_params.get(param):
711 if isinstance(vld_params[param], dict):
712 for vim, vim_net in vld_params[param].items():
713 other_target_vim = "vim:" + vim
714 populate_dict(
715 target_vld["vim_info"],
716 (other_target_vim, param.replace("-", "_")),
717 vim_net,
718 )
719 else: # isinstance str
720 target_vld["vim_info"][target_vim][
721 param.replace("-", "_")
722 ] = vld_params[param]
723 if vld_params.get("common_id"):
724 target_vld["common_id"] = vld_params.get("common_id")
725
726 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
727 def update_ns_vld_target(target, ns_params):
728 for vnf_params in ns_params.get("vnf", ()):
729 if vnf_params.get("vimAccountId"):
730 target_vnf = next(
731 (
732 vnfr
733 for vnfr in db_vnfrs.values()
734 if vnf_params["member-vnf-index"]
735 == vnfr["member-vnf-index-ref"]
736 ),
737 None,
738 )
739 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
740 if not vdur:
741 continue
742 for a_index, a_vld in enumerate(target["ns"]["vld"]):
743 target_vld = find_in_list(
744 get_iterable(vdur, "interfaces"),
745 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
746 )
747
748 vld_params = find_in_list(
749 get_iterable(ns_params, "vld"),
750 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
751 )
752 if target_vld:
753 if vnf_params.get("vimAccountId") not in a_vld.get(
754 "vim_info", {}
755 ):
756 target_vim_network_list = [
757 v for _, v in a_vld.get("vim_info").items()
758 ]
759 target_vim_network_name = next(
760 (
761 item.get("vim_network_name", "")
762 for item in target_vim_network_list
763 ),
764 "",
765 )
766
767 target["ns"]["vld"][a_index].get("vim_info").update(
768 {
769 "vim:{}".format(vnf_params["vimAccountId"]): {
770 "vim_network_name": target_vim_network_name,
771 }
772 }
773 )
774
775 if vld_params:
776 for param in ("vim-network-name", "vim-network-id"):
777 if vld_params.get(param) and isinstance(
778 vld_params[param], dict
779 ):
780 for vim, vim_net in vld_params[
781 param
782 ].items():
783 other_target_vim = "vim:" + vim
784 populate_dict(
785 target["ns"]["vld"][a_index].get(
786 "vim_info"
787 ),
788 (
789 other_target_vim,
790 param.replace("-", "_"),
791 ),
792 vim_net,
793 )
794
795 nslcmop_id = db_nslcmop["_id"]
796 target = {
797 "name": db_nsr["name"],
798 "ns": {"vld": []},
799 "vnf": [],
800 "image": deepcopy(db_nsr["image"]),
801 "flavor": deepcopy(db_nsr["flavor"]),
802 "action_id": nslcmop_id,
803 "cloud_init_content": {},
804 }
805 for image in target["image"]:
806 image["vim_info"] = {}
807 for flavor in target["flavor"]:
808 flavor["vim_info"] = {}
809 if db_nsr.get("shared-volumes"):
810 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
811 for shared_volumes in target["shared-volumes"]:
812 shared_volumes["vim_info"] = {}
813 if db_nsr.get("affinity-or-anti-affinity-group"):
814 target["affinity-or-anti-affinity-group"] = deepcopy(
815 db_nsr["affinity-or-anti-affinity-group"]
816 )
817 for affinity_or_anti_affinity_group in target[
818 "affinity-or-anti-affinity-group"
819 ]:
820 affinity_or_anti_affinity_group["vim_info"] = {}
821
822 if db_nslcmop.get("lcmOperationType") != "instantiate":
823 # get parameters of instantiation:
824 db_nslcmop_instantiate = self.db.get_list(
825 "nslcmops",
826 {
827 "nsInstanceId": db_nslcmop["nsInstanceId"],
828 "lcmOperationType": "instantiate",
829 },
830 )[-1]
831 ns_params = db_nslcmop_instantiate.get("operationParams")
832 else:
833 ns_params = db_nslcmop.get("operationParams")
834 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
835 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
836
837 cp2target = {}
838 for vld_index, vld in enumerate(db_nsr.get("vld")):
839 target_vim = "vim:{}".format(ns_params["vimAccountId"])
840 target_vld = {
841 "id": vld["id"],
842 "name": vld["name"],
843 "mgmt-network": vld.get("mgmt-network", False),
844 "type": vld.get("type"),
845 "vim_info": {
846 target_vim: {
847 "vim_network_name": vld.get("vim-network-name"),
848 "vim_account_id": ns_params["vimAccountId"],
849 }
850 },
851 }
852 # check if this network needs SDN assist
853 if vld.get("pci-interfaces"):
854 db_vim = get_vim_account(ns_params["vimAccountId"])
855 if vim_config := db_vim.get("config"):
856 if sdnc_id := vim_config.get("sdn-controller"):
857 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
858 target_sdn = "sdn:{}".format(sdnc_id)
859 target_vld["vim_info"][target_sdn] = {
860 "sdn": True,
861 "target_vim": target_vim,
862 "vlds": [sdn_vld],
863 "type": vld.get("type"),
864 }
865
866 nsd_vnf_profiles = get_vnf_profiles(nsd)
867 for nsd_vnf_profile in nsd_vnf_profiles:
868 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
869 if cp["virtual-link-profile-id"] == vld["id"]:
870 cp2target[
871 "member_vnf:{}.{}".format(
872 cp["constituent-cpd-id"][0][
873 "constituent-base-element-id"
874 ],
875 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
876 )
877 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
878
879 # check at nsd descriptor, if there is an ip-profile
880 vld_params = {}
881 nsd_vlp = find_in_list(
882 get_virtual_link_profiles(nsd),
883 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
884 == vld["id"],
885 )
886 if (
887 nsd_vlp
888 and nsd_vlp.get("virtual-link-protocol-data")
889 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
890 ):
891 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
892 "l3-protocol-data"
893 ]
894
895 # update vld_params with instantiation params
896 vld_instantiation_params = find_in_list(
897 get_iterable(ns_params, "vld"),
898 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
899 )
900 if vld_instantiation_params:
901 vld_params.update(vld_instantiation_params)
902 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
903 target["ns"]["vld"].append(target_vld)
904 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
905 update_ns_vld_target(target, ns_params)
906
907 for vnfr in db_vnfrs.values():
908 vnfd = find_in_list(
909 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
910 )
911 vnf_params = find_in_list(
912 get_iterable(ns_params, "vnf"),
913 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
914 )
915 target_vnf = deepcopy(vnfr)
916 target_vim = "vim:{}".format(vnfr["vim-account-id"])
917 for vld in target_vnf.get("vld", ()):
918 # check if connected to a ns.vld, to fill target'
919 vnf_cp = find_in_list(
920 vnfd.get("int-virtual-link-desc", ()),
921 lambda cpd: cpd.get("id") == vld["id"],
922 )
923 if vnf_cp:
924 ns_cp = "member_vnf:{}.{}".format(
925 vnfr["member-vnf-index-ref"], vnf_cp["id"]
926 )
927 if cp2target.get(ns_cp):
928 vld["target"] = cp2target[ns_cp]
929
930 vld["vim_info"] = {
931 target_vim: {"vim_network_name": vld.get("vim-network-name")}
932 }
933 # check if this network needs SDN assist
934 target_sdn = None
935 if vld.get("pci-interfaces"):
936 db_vim = get_vim_account(vnfr["vim-account-id"])
937 sdnc_id = db_vim["config"].get("sdn-controller")
938 if sdnc_id:
939 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
940 target_sdn = "sdn:{}".format(sdnc_id)
941 vld["vim_info"][target_sdn] = {
942 "sdn": True,
943 "target_vim": target_vim,
944 "vlds": [sdn_vld],
945 "type": vld.get("type"),
946 }
947
948 # check at vnfd descriptor, if there is an ip-profile
949 vld_params = {}
950 vnfd_vlp = find_in_list(
951 get_virtual_link_profiles(vnfd),
952 lambda a_link_profile: a_link_profile["id"] == vld["id"],
953 )
954 if (
955 vnfd_vlp
956 and vnfd_vlp.get("virtual-link-protocol-data")
957 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
958 ):
959 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
960 "l3-protocol-data"
961 ]
962 # update vld_params with instantiation params
963 if vnf_params:
964 vld_instantiation_params = find_in_list(
965 get_iterable(vnf_params, "internal-vld"),
966 lambda i_vld: i_vld["name"] == vld["id"],
967 )
968 if vld_instantiation_params:
969 vld_params.update(vld_instantiation_params)
970 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
971
972 vdur_list = []
973 for vdur in target_vnf.get("vdur", ()):
974 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
975 continue # This vdu must not be created
976 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
977
978 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
979
980 if ssh_keys_all:
981 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
982 vnf_configuration = get_configuration(vnfd, vnfd["id"])
983 if (
984 vdu_configuration
985 and vdu_configuration.get("config-access")
986 and vdu_configuration.get("config-access").get("ssh-access")
987 ):
988 vdur["ssh-keys"] = ssh_keys_all
989 vdur["ssh-access-required"] = vdu_configuration[
990 "config-access"
991 ]["ssh-access"]["required"]
992 elif (
993 vnf_configuration
994 and vnf_configuration.get("config-access")
995 and vnf_configuration.get("config-access").get("ssh-access")
996 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
997 ):
998 vdur["ssh-keys"] = ssh_keys_all
999 vdur["ssh-access-required"] = vnf_configuration[
1000 "config-access"
1001 ]["ssh-access"]["required"]
1002 elif ssh_keys_instantiation and find_in_list(
1003 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1004 ):
1005 vdur["ssh-keys"] = ssh_keys_instantiation
1006
1007 self.logger.debug("NS > vdur > {}".format(vdur))
1008
1009 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1010 # cloud-init
1011 if vdud.get("cloud-init-file"):
1012 vdur["cloud-init"] = "{}:file:{}".format(
1013 vnfd["_id"], vdud.get("cloud-init-file")
1014 )
1015 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1016 if vdur["cloud-init"] not in target["cloud_init_content"]:
1017 base_folder = vnfd["_admin"]["storage"]
1018 if base_folder["pkg-dir"]:
1019 cloud_init_file = "{}/{}/cloud_init/{}".format(
1020 base_folder["folder"],
1021 base_folder["pkg-dir"],
1022 vdud.get("cloud-init-file"),
1023 )
1024 else:
1025 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1026 base_folder["folder"],
1027 vdud.get("cloud-init-file"),
1028 )
1029 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1030 target["cloud_init_content"][
1031 vdur["cloud-init"]
1032 ] = ci_file.read()
1033 elif vdud.get("cloud-init"):
1034 vdur["cloud-init"] = "{}:vdu:{}".format(
1035 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1036 )
1037 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1038 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1039 "cloud-init"
1040 ]
1041 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1042 deploy_params_vdu = self._format_additional_params(
1043 vdur.get("additionalParams") or {}
1044 )
1045 deploy_params_vdu["OSM"] = get_osm_params(
1046 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1047 )
1048 vdur["additionalParams"] = deploy_params_vdu
1049
1050 # flavor
1051 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1052 if target_vim not in ns_flavor["vim_info"]:
1053 ns_flavor["vim_info"][target_vim] = {}
1054
1055 # deal with images
1056 # in case alternative images are provided we must check if they should be applied
1057 # for the vim_type, modify the vim_type taking into account
1058 ns_image_id = int(vdur["ns-image-id"])
1059 if vdur.get("alt-image-ids"):
1060 db_vim = get_vim_account(vnfr["vim-account-id"])
1061 vim_type = db_vim["vim_type"]
1062 for alt_image_id in vdur.get("alt-image-ids"):
1063 ns_alt_image = target["image"][int(alt_image_id)]
1064 if vim_type == ns_alt_image.get("vim-type"):
1065 # must use alternative image
1066 self.logger.debug(
1067 "use alternative image id: {}".format(alt_image_id)
1068 )
1069 ns_image_id = alt_image_id
1070 vdur["ns-image-id"] = ns_image_id
1071 break
1072 ns_image = target["image"][int(ns_image_id)]
1073 if target_vim not in ns_image["vim_info"]:
1074 ns_image["vim_info"][target_vim] = {}
1075
1076 # Affinity groups
1077 if vdur.get("affinity-or-anti-affinity-group-id"):
1078 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1079 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1080 if target_vim not in ns_ags["vim_info"]:
1081 ns_ags["vim_info"][target_vim] = {}
1082
1083 # shared-volumes
1084 if vdur.get("shared-volumes-id"):
1085 for sv_id in vdur["shared-volumes-id"]:
1086 ns_sv = find_in_list(
1087 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1088 )
1089 if ns_sv:
1090 ns_sv["vim_info"][target_vim] = {}
1091
1092 vdur["vim_info"] = {target_vim: {}}
1093 # instantiation parameters
1094 if vnf_params:
1095 vdu_instantiation_params = find_in_list(
1096 get_iterable(vnf_params, "vdu"),
1097 lambda i_vdu: i_vdu["id"] == vdud["id"],
1098 )
1099 if vdu_instantiation_params:
1100 # Parse the vdu_volumes from the instantiation params
1101 vdu_volumes = get_volumes_from_instantiation_params(
1102 vdu_instantiation_params, vdud
1103 )
1104 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1105 vdur["additionalParams"]["OSM"][
1106 "vim_flavor_id"
1107 ] = vdu_instantiation_params.get("vim-flavor-id")
1108 vdur_list.append(vdur)
1109 target_vnf["vdur"] = vdur_list
1110 target["vnf"].append(target_vnf)
1111
1112 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1113 desc = await self.RO.deploy(nsr_id, target)
1114 self.logger.debug("RO return > {}".format(desc))
1115 action_id = desc["action_id"]
1116 await self._wait_ng_ro(
1117 nsr_id,
1118 action_id,
1119 nslcmop_id,
1120 start_deploy,
1121 timeout_ns_deploy,
1122 stage,
1123 operation="instantiation",
1124 )
1125
1126 # Updating NSR
1127 db_nsr_update = {
1128 "_admin.deployed.RO.operational-status": "running",
1129 "detailed-status": " ".join(stage),
1130 }
1131 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1132 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1133 self._write_op_status(nslcmop_id, stage)
1134 self.logger.debug(
1135 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1136 )
1137 return
1138
1139 async def _wait_ng_ro(
1140 self,
1141 nsr_id,
1142 action_id,
1143 nslcmop_id=None,
1144 start_time=None,
1145 timeout=600,
1146 stage=None,
1147 operation=None,
1148 ):
1149 detailed_status_old = None
1150 db_nsr_update = {}
1151 start_time = start_time or time()
1152 while time() <= start_time + timeout:
1153 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1154 self.logger.debug("Wait NG RO > {}".format(desc_status))
1155 if desc_status["status"] == "FAILED":
1156 raise NgRoException(desc_status["details"])
1157 elif desc_status["status"] == "BUILD":
1158 if stage:
1159 stage[2] = "VIM: ({})".format(desc_status["details"])
1160 elif desc_status["status"] == "DONE":
1161 if stage:
1162 stage[2] = "Deployed at VIM"
1163 break
1164 else:
1165 assert False, "ROclient.check_ns_status returns unknown {}".format(
1166 desc_status["status"]
1167 )
1168 if stage and nslcmop_id and stage[2] != detailed_status_old:
1169 detailed_status_old = stage[2]
1170 db_nsr_update["detailed-status"] = " ".join(stage)
1171 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1172 self._write_op_status(nslcmop_id, stage)
1173 await asyncio.sleep(15)
1174 else: # timeout_ns_deploy
1175 raise NgRoException("Timeout waiting ns to deploy")
1176
1177 async def _terminate_ng_ro(
1178 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1179 ):
1180 db_nsr_update = {}
1181 failed_detail = []
1182 action_id = None
1183 start_deploy = time()
1184 try:
1185 target = {
1186 "ns": {"vld": []},
1187 "vnf": [],
1188 "image": [],
1189 "flavor": [],
1190 "action_id": nslcmop_id,
1191 }
1192 desc = await self.RO.deploy(nsr_id, target)
1193 action_id = desc["action_id"]
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1195 self.logger.debug(
1196 logging_text
1197 + "ns terminate action at RO. action_id={}".format(action_id)
1198 )
1199
1200 # wait until done
1201 delete_timeout = 20 * 60 # 20 minutes
1202 await self._wait_ng_ro(
1203 nsr_id,
1204 action_id,
1205 nslcmop_id,
1206 start_deploy,
1207 delete_timeout,
1208 stage,
1209 operation="termination",
1210 )
1211 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1212 # delete all nsr
1213 await self.RO.delete(nsr_id)
1214 except NgRoException as e:
1215 if e.http_code == 404: # not found
1216 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1217 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1218 self.logger.debug(
1219 logging_text + "RO_action_id={} already deleted".format(action_id)
1220 )
1221 elif e.http_code == 409: # conflict
1222 failed_detail.append("delete conflict: {}".format(e))
1223 self.logger.debug(
1224 logging_text
1225 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1226 )
1227 else:
1228 failed_detail.append("delete error: {}".format(e))
1229 self.logger.error(
1230 logging_text
1231 + "RO_action_id={} delete error: {}".format(action_id, e)
1232 )
1233 except Exception as e:
1234 failed_detail.append("delete error: {}".format(e))
1235 self.logger.error(
1236 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1237 )
1238
1239 if failed_detail:
1240 stage[2] = "Error deleting from VIM"
1241 else:
1242 stage[2] = "Deleted from VIM"
1243 db_nsr_update["detailed-status"] = " ".join(stage)
1244 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1245 self._write_op_status(nslcmop_id, stage)
1246
1247 if failed_detail:
1248 raise LcmException("; ".join(failed_detail))
1249 return
1250
1251 async def instantiate_RO(
1252 self,
1253 logging_text,
1254 nsr_id,
1255 nsd,
1256 db_nsr,
1257 db_nslcmop,
1258 db_vnfrs,
1259 db_vnfds,
1260 n2vc_key_list,
1261 stage,
1262 ):
1263 """
1264 Instantiate at RO
1265 :param logging_text: preffix text to use at logging
1266 :param nsr_id: nsr identity
1267 :param nsd: database content of ns descriptor
1268 :param db_nsr: database content of ns record
1269 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1270 :param db_vnfrs:
1271 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1272 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1273 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1274 :return: None or exception
1275 """
1276 try:
1277 start_deploy = time()
1278 ns_params = db_nslcmop.get("operationParams")
1279 if ns_params and ns_params.get("timeout_ns_deploy"):
1280 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1281 else:
1282 timeout_ns_deploy = self.timeout.ns_deploy
1283
1284 # Check for and optionally request placement optimization. Database will be updated if placement activated
1285 stage[2] = "Waiting for Placement."
1286 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1287 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1288 for vnfr in db_vnfrs.values():
1289 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1290 break
1291 else:
1292 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1293
1294 return await self._instantiate_ng_ro(
1295 logging_text,
1296 nsr_id,
1297 nsd,
1298 db_nsr,
1299 db_nslcmop,
1300 db_vnfrs,
1301 db_vnfds,
1302 n2vc_key_list,
1303 stage,
1304 start_deploy,
1305 timeout_ns_deploy,
1306 )
1307 except Exception as e:
1308 stage[2] = "ERROR deploying at VIM"
1309 self.set_vnfr_at_error(db_vnfrs, str(e))
1310 self.logger.error(
1311 "Error deploying at VIM {}".format(e),
1312 exc_info=not isinstance(
1313 e,
1314 (
1315 ROclient.ROClientException,
1316 LcmException,
1317 DbException,
1318 NgRoException,
1319 ),
1320 ),
1321 )
1322 raise
1323
1324 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1325 """
1326 Wait for kdu to be up, get ip address
1327 :param logging_text: prefix use for logging
1328 :param nsr_id:
1329 :param vnfr_id:
1330 :param kdu_name:
1331 :return: IP address, K8s services
1332 """
1333
1334 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1335 nb_tries = 0
1336
1337 while nb_tries < 360:
1338 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1339 kdur = next(
1340 (
1341 x
1342 for x in get_iterable(db_vnfr, "kdur")
1343 if x.get("kdu-name") == kdu_name
1344 ),
1345 None,
1346 )
1347 if not kdur:
1348 raise LcmException(
1349 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1350 )
1351 if kdur.get("status"):
1352 if kdur["status"] in ("READY", "ENABLED"):
1353 return kdur.get("ip-address"), kdur.get("services")
1354 else:
1355 raise LcmException(
1356 "target KDU={} is in error state".format(kdu_name)
1357 )
1358
1359 await asyncio.sleep(10)
1360 nb_tries += 1
1361 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1362
1363 async def wait_vm_up_insert_key_ro(
1364 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1365 ):
1366 """
1367 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1368 :param logging_text: prefix use for logging
1369 :param nsr_id:
1370 :param vnfr_id:
1371 :param vdu_id:
1372 :param vdu_index:
1373 :param pub_key: public ssh key to inject, None to skip
1374 :param user: user to apply the public ssh key
1375 :return: IP address
1376 """
1377
1378 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1379 ip_address = None
1380 target_vdu_id = None
1381 ro_retries = 0
1382
1383 while True:
1384 ro_retries += 1
1385 if ro_retries >= 360: # 1 hour
1386 raise LcmException(
1387 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1388 )
1389
1390 await asyncio.sleep(10)
1391
1392 # get ip address
1393 if not target_vdu_id:
1394 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1395
1396 if not vdu_id: # for the VNF case
1397 if db_vnfr.get("status") == "ERROR":
1398 raise LcmException(
1399 "Cannot inject ssh-key because target VNF is in error state"
1400 )
1401 ip_address = db_vnfr.get("ip-address")
1402 if not ip_address:
1403 continue
1404 vdur = next(
1405 (
1406 x
1407 for x in get_iterable(db_vnfr, "vdur")
1408 if x.get("ip-address") == ip_address
1409 ),
1410 None,
1411 )
1412 else: # VDU case
1413 vdur = next(
1414 (
1415 x
1416 for x in get_iterable(db_vnfr, "vdur")
1417 if x.get("vdu-id-ref") == vdu_id
1418 and x.get("count-index") == vdu_index
1419 ),
1420 None,
1421 )
1422
1423 if (
1424 not vdur and len(db_vnfr.get("vdur", ())) == 1
1425 ): # If only one, this should be the target vdu
1426 vdur = db_vnfr["vdur"][0]
1427 if not vdur:
1428 raise LcmException(
1429 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1430 vnfr_id, vdu_id, vdu_index
1431 )
1432 )
1433 # New generation RO stores information at "vim_info"
1434 ng_ro_status = None
1435 target_vim = None
1436 if vdur.get("vim_info"):
1437 target_vim = next(
1438 t for t in vdur["vim_info"]
1439 ) # there should be only one key
1440 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1441 if (
1442 vdur.get("pdu-type")
1443 or vdur.get("status") == "ACTIVE"
1444 or ng_ro_status == "ACTIVE"
1445 ):
1446 ip_address = vdur.get("ip-address")
1447 if not ip_address:
1448 continue
1449 target_vdu_id = vdur["vdu-id-ref"]
1450 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1451 raise LcmException(
1452 "Cannot inject ssh-key because target VM is in error state"
1453 )
1454
1455 if not target_vdu_id:
1456 continue
1457
1458 # inject public key into machine
1459 if pub_key and user:
1460 self.logger.debug(logging_text + "Inserting RO key")
1461 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1462 if vdur.get("pdu-type"):
1463 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1464 return ip_address
1465 try:
1466 target = {
1467 "action": {
1468 "action": "inject_ssh_key",
1469 "key": pub_key,
1470 "user": user,
1471 },
1472 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1473 }
1474 desc = await self.RO.deploy(nsr_id, target)
1475 action_id = desc["action_id"]
1476 await self._wait_ng_ro(
1477 nsr_id, action_id, timeout=600, operation="instantiation"
1478 )
1479 break
1480 except NgRoException as e:
1481 raise LcmException(
1482 "Reaching max tries injecting key. Error: {}".format(e)
1483 )
1484 else:
1485 break
1486
1487 return ip_address
1488
1489 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1490 """
1491 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1492 """
1493 my_vca = vca_deployed_list[vca_index]
1494 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1495 # vdu or kdu: no dependencies
1496 return
1497 timeout = 300
1498 while timeout >= 0:
1499 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1500 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1501 configuration_status_list = db_nsr["configurationStatus"]
1502 for index, vca_deployed in enumerate(configuration_status_list):
1503 if index == vca_index:
1504 # myself
1505 continue
1506 if not my_vca.get("member-vnf-index") or (
1507 vca_deployed.get("member-vnf-index")
1508 == my_vca.get("member-vnf-index")
1509 ):
1510 internal_status = configuration_status_list[index].get("status")
1511 if internal_status == "READY":
1512 continue
1513 elif internal_status == "BROKEN":
1514 raise LcmException(
1515 "Configuration aborted because dependent charm/s has failed"
1516 )
1517 else:
1518 break
1519 else:
1520 # no dependencies, return
1521 return
1522 await asyncio.sleep(10)
1523 timeout -= 1
1524
1525 raise LcmException("Configuration aborted because dependent charm/s timeout")
1526
1527 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1528 vca_id = None
1529 if db_vnfr:
1530 vca_id = deep_get(db_vnfr, ("vca-id",))
1531 elif db_nsr:
1532 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1533 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1534 return vca_id
1535
1536 async def instantiate_N2VC(
1537 self,
1538 logging_text,
1539 vca_index,
1540 nsi_id,
1541 db_nsr,
1542 db_vnfr,
1543 vdu_id,
1544 kdu_name,
1545 vdu_index,
1546 kdu_index,
1547 config_descriptor,
1548 deploy_params,
1549 base_folder,
1550 nslcmop_id,
1551 stage,
1552 vca_type,
1553 vca_name,
1554 ee_config_descriptor,
1555 ):
1556 nsr_id = db_nsr["_id"]
1557 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1558 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1559 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1560 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1561 db_dict = {
1562 "collection": "nsrs",
1563 "filter": {"_id": nsr_id},
1564 "path": db_update_entry,
1565 }
1566 step = ""
1567 try:
1568 element_type = "NS"
1569 element_under_configuration = nsr_id
1570
1571 vnfr_id = None
1572 if db_vnfr:
1573 vnfr_id = db_vnfr["_id"]
1574 osm_config["osm"]["vnf_id"] = vnfr_id
1575
1576 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1577
1578 if vca_type == "native_charm":
1579 index_number = 0
1580 else:
1581 index_number = vdu_index or 0
1582
1583 if vnfr_id:
1584 element_type = "VNF"
1585 element_under_configuration = vnfr_id
1586 namespace += ".{}-{}".format(vnfr_id, index_number)
1587 if vdu_id:
1588 namespace += ".{}-{}".format(vdu_id, index_number)
1589 element_type = "VDU"
1590 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1591 osm_config["osm"]["vdu_id"] = vdu_id
1592 elif kdu_name:
1593 namespace += ".{}".format(kdu_name)
1594 element_type = "KDU"
1595 element_under_configuration = kdu_name
1596 osm_config["osm"]["kdu_name"] = kdu_name
1597
1598 # Get artifact path
1599 if base_folder["pkg-dir"]:
1600 artifact_path = "{}/{}/{}/{}".format(
1601 base_folder["folder"],
1602 base_folder["pkg-dir"],
1603 "charms"
1604 if vca_type
1605 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1606 else "helm-charts",
1607 vca_name,
1608 )
1609 else:
1610 artifact_path = "{}/Scripts/{}/{}/".format(
1611 base_folder["folder"],
1612 "charms"
1613 if vca_type
1614 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1615 else "helm-charts",
1616 vca_name,
1617 )
1618
1619 self.logger.debug("Artifact path > {}".format(artifact_path))
1620
1621 # get initial_config_primitive_list that applies to this element
1622 initial_config_primitive_list = config_descriptor.get(
1623 "initial-config-primitive"
1624 )
1625
1626 self.logger.debug(
1627 "Initial config primitive list > {}".format(
1628 initial_config_primitive_list
1629 )
1630 )
1631
1632 # add config if not present for NS charm
1633 ee_descriptor_id = ee_config_descriptor.get("id")
1634 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1635 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1636 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1637 )
1638
1639 self.logger.debug(
1640 "Initial config primitive list #2 > {}".format(
1641 initial_config_primitive_list
1642 )
1643 )
1644 # n2vc_redesign STEP 3.1
1645 # find old ee_id if exists
1646 ee_id = vca_deployed.get("ee_id")
1647
1648 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1649 # create or register execution environment in VCA
1650 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1651 self._write_configuration_status(
1652 nsr_id=nsr_id,
1653 vca_index=vca_index,
1654 status="CREATING",
1655 element_under_configuration=element_under_configuration,
1656 element_type=element_type,
1657 )
1658
1659 step = "create execution environment"
1660 self.logger.debug(logging_text + step)
1661
1662 ee_id = None
1663 credentials = None
1664 if vca_type == "k8s_proxy_charm":
1665 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1666 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1667 namespace=namespace,
1668 artifact_path=artifact_path,
1669 db_dict=db_dict,
1670 vca_id=vca_id,
1671 )
1672 elif vca_type == "helm" or vca_type == "helm-v3":
1673 ee_id, credentials = await self.vca_map[
1674 vca_type
1675 ].create_execution_environment(
1676 namespace=nsr_id,
1677 reuse_ee_id=ee_id,
1678 db_dict=db_dict,
1679 config=osm_config,
1680 artifact_path=artifact_path,
1681 chart_model=vca_name,
1682 vca_type=vca_type,
1683 )
1684 else:
1685 ee_id, credentials = await self.vca_map[
1686 vca_type
1687 ].create_execution_environment(
1688 namespace=namespace,
1689 reuse_ee_id=ee_id,
1690 db_dict=db_dict,
1691 vca_id=vca_id,
1692 )
1693
1694 elif vca_type == "native_charm":
1695 step = "Waiting to VM being up and getting IP address"
1696 self.logger.debug(logging_text + step)
1697 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1698 logging_text,
1699 nsr_id,
1700 vnfr_id,
1701 vdu_id,
1702 vdu_index,
1703 user=None,
1704 pub_key=None,
1705 )
1706 credentials = {"hostname": rw_mgmt_ip}
1707 # get username
1708 username = deep_get(
1709 config_descriptor, ("config-access", "ssh-access", "default-user")
1710 )
1711 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1712 # merged. Meanwhile let's get username from initial-config-primitive
1713 if not username and initial_config_primitive_list:
1714 for config_primitive in initial_config_primitive_list:
1715 for param in config_primitive.get("parameter", ()):
1716 if param["name"] == "ssh-username":
1717 username = param["value"]
1718 break
1719 if not username:
1720 raise LcmException(
1721 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1722 "'config-access.ssh-access.default-user'"
1723 )
1724 credentials["username"] = username
1725 # n2vc_redesign STEP 3.2
1726
1727 self._write_configuration_status(
1728 nsr_id=nsr_id,
1729 vca_index=vca_index,
1730 status="REGISTERING",
1731 element_under_configuration=element_under_configuration,
1732 element_type=element_type,
1733 )
1734
1735 step = "register execution environment {}".format(credentials)
1736 self.logger.debug(logging_text + step)
1737 ee_id = await self.vca_map[vca_type].register_execution_environment(
1738 credentials=credentials,
1739 namespace=namespace,
1740 db_dict=db_dict,
1741 vca_id=vca_id,
1742 )
1743
1744 # for compatibility with MON/POL modules, the need model and application name at database
1745 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1746 ee_id_parts = ee_id.split(".")
1747 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1748 if len(ee_id_parts) >= 2:
1749 model_name = ee_id_parts[0]
1750 application_name = ee_id_parts[1]
1751 db_nsr_update[db_update_entry + "model"] = model_name
1752 db_nsr_update[db_update_entry + "application"] = application_name
1753
1754 # n2vc_redesign STEP 3.3
1755 step = "Install configuration Software"
1756
1757 self._write_configuration_status(
1758 nsr_id=nsr_id,
1759 vca_index=vca_index,
1760 status="INSTALLING SW",
1761 element_under_configuration=element_under_configuration,
1762 element_type=element_type,
1763 other_update=db_nsr_update,
1764 )
1765
1766 # TODO check if already done
1767 self.logger.debug(logging_text + step)
1768 config = None
1769 if vca_type == "native_charm":
1770 config_primitive = next(
1771 (p for p in initial_config_primitive_list if p["name"] == "config"),
1772 None,
1773 )
1774 if config_primitive:
1775 config = self._map_primitive_params(
1776 config_primitive, {}, deploy_params
1777 )
1778 num_units = 1
1779 if vca_type == "lxc_proxy_charm":
1780 if element_type == "NS":
1781 num_units = db_nsr.get("config-units") or 1
1782 elif element_type == "VNF":
1783 num_units = db_vnfr.get("config-units") or 1
1784 elif element_type == "VDU":
1785 for v in db_vnfr["vdur"]:
1786 if vdu_id == v["vdu-id-ref"]:
1787 num_units = v.get("config-units") or 1
1788 break
1789 if vca_type != "k8s_proxy_charm":
1790 await self.vca_map[vca_type].install_configuration_sw(
1791 ee_id=ee_id,
1792 artifact_path=artifact_path,
1793 db_dict=db_dict,
1794 config=config,
1795 num_units=num_units,
1796 vca_id=vca_id,
1797 vca_type=vca_type,
1798 )
1799
1800 # write in db flag of configuration_sw already installed
1801 self.update_db_2(
1802 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1803 )
1804
1805 # add relations for this VCA (wait for other peers related with this VCA)
1806 is_relation_added = await self._add_vca_relations(
1807 logging_text=logging_text,
1808 nsr_id=nsr_id,
1809 vca_type=vca_type,
1810 vca_index=vca_index,
1811 )
1812
1813 if not is_relation_added:
1814 raise LcmException("Relations could not be added to VCA.")
1815
1816 # if SSH access is required, then get execution environment SSH public
1817 # if native charm we have waited already to VM be UP
1818 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1819 pub_key = None
1820 user = None
1821 # self.logger.debug("get ssh key block")
1822 if deep_get(
1823 config_descriptor, ("config-access", "ssh-access", "required")
1824 ):
1825 # self.logger.debug("ssh key needed")
1826 # Needed to inject a ssh key
1827 user = deep_get(
1828 config_descriptor,
1829 ("config-access", "ssh-access", "default-user"),
1830 )
1831 step = "Install configuration Software, getting public ssh key"
1832 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1833 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1834 )
1835
1836 step = "Insert public key into VM user={} ssh_key={}".format(
1837 user, pub_key
1838 )
1839 else:
1840 # self.logger.debug("no need to get ssh key")
1841 step = "Waiting to VM being up and getting IP address"
1842 self.logger.debug(logging_text + step)
1843
1844 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1845 rw_mgmt_ip = None
1846
1847 # n2vc_redesign STEP 5.1
1848 # wait for RO (ip-address) Insert pub_key into VM
1849 if vnfr_id:
1850 if kdu_name:
1851 rw_mgmt_ip, services = await self.wait_kdu_up(
1852 logging_text, nsr_id, vnfr_id, kdu_name
1853 )
1854 vnfd = self.db.get_one(
1855 "vnfds_revisions",
1856 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1857 )
1858 kdu = get_kdu(vnfd, kdu_name)
1859 kdu_services = [
1860 service["name"] for service in get_kdu_services(kdu)
1861 ]
1862 exposed_services = []
1863 for service in services:
1864 if any(s in service["name"] for s in kdu_services):
1865 exposed_services.append(service)
1866 await self.vca_map[vca_type].exec_primitive(
1867 ee_id=ee_id,
1868 primitive_name="config",
1869 params_dict={
1870 "osm-config": json.dumps(
1871 OsmConfigBuilder(
1872 k8s={"services": exposed_services}
1873 ).build()
1874 )
1875 },
1876 vca_id=vca_id,
1877 )
1878
1879 # This verification is needed in order to avoid trying to add a public key
1880 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1881 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1882 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1883 # or it is a KNF)
1884 elif db_vnfr.get("vdur"):
1885 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1886 logging_text,
1887 nsr_id,
1888 vnfr_id,
1889 vdu_id,
1890 vdu_index,
1891 user=user,
1892 pub_key=pub_key,
1893 )
1894
1895 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1896
1897 # store rw_mgmt_ip in deploy params for later replacement
1898 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1899
1900 # n2vc_redesign STEP 6 Execute initial config primitive
1901 step = "execute initial config primitive"
1902
1903 # wait for dependent primitives execution (NS -> VNF -> VDU)
1904 if initial_config_primitive_list:
1905 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1906
1907 # stage, in function of element type: vdu, kdu, vnf or ns
1908 my_vca = vca_deployed_list[vca_index]
1909 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1910 # VDU or KDU
1911 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1912 elif my_vca.get("member-vnf-index"):
1913 # VNF
1914 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1915 else:
1916 # NS
1917 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1918
1919 self._write_configuration_status(
1920 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1921 )
1922
1923 self._write_op_status(op_id=nslcmop_id, stage=stage)
1924
1925 check_if_terminated_needed = True
1926 for initial_config_primitive in initial_config_primitive_list:
1927 # adding information on the vca_deployed if it is a NS execution environment
1928 if not vca_deployed["member-vnf-index"]:
1929 deploy_params["ns_config_info"] = json.dumps(
1930 self._get_ns_config_info(nsr_id)
1931 )
1932 # TODO check if already done
1933 primitive_params_ = self._map_primitive_params(
1934 initial_config_primitive, {}, deploy_params
1935 )
1936
1937 step = "execute primitive '{}' params '{}'".format(
1938 initial_config_primitive["name"], primitive_params_
1939 )
1940 self.logger.debug(logging_text + step)
1941 await self.vca_map[vca_type].exec_primitive(
1942 ee_id=ee_id,
1943 primitive_name=initial_config_primitive["name"],
1944 params_dict=primitive_params_,
1945 db_dict=db_dict,
1946 vca_id=vca_id,
1947 vca_type=vca_type,
1948 )
1949 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1950 if check_if_terminated_needed:
1951 if config_descriptor.get("terminate-config-primitive"):
1952 self.update_db_2(
1953 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1954 )
1955 check_if_terminated_needed = False
1956
1957 # TODO register in database that primitive is done
1958
1959 # STEP 7 Configure metrics
1960 if vca_type == "helm" or vca_type == "helm-v3":
1961 # TODO: review for those cases where the helm chart is a reference and
1962 # is not part of the NF package
1963 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1964 ee_id=ee_id,
1965 artifact_path=artifact_path,
1966 ee_config_descriptor=ee_config_descriptor,
1967 vnfr_id=vnfr_id,
1968 nsr_id=nsr_id,
1969 target_ip=rw_mgmt_ip,
1970 element_type=element_type,
1971 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
1972 vdu_id=vdu_id,
1973 vdu_index=vdu_index,
1974 kdu_name=kdu_name,
1975 kdu_index=kdu_index,
1976 )
1977 if prometheus_jobs:
1978 self.update_db_2(
1979 "nsrs",
1980 nsr_id,
1981 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1982 )
1983
1984 for job in prometheus_jobs:
1985 self.db.set_one(
1986 "prometheus_jobs",
1987 {"job_name": job["job_name"]},
1988 job,
1989 upsert=True,
1990 fail_on_empty=False,
1991 )
1992
1993 step = "instantiated at VCA"
1994 self.logger.debug(logging_text + step)
1995
1996 self._write_configuration_status(
1997 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1998 )
1999
2000 except Exception as e: # TODO not use Exception but N2VC exception
2001 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2002 if not isinstance(
2003 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2004 ):
2005 self.logger.error(
2006 "Exception while {} : {}".format(step, e), exc_info=True
2007 )
2008 self._write_configuration_status(
2009 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2010 )
2011 raise LcmException("{}. {}".format(step, e)) from e
2012
2013 def _write_ns_status(
2014 self,
2015 nsr_id: str,
2016 ns_state: str,
2017 current_operation: str,
2018 current_operation_id: str,
2019 error_description: str = None,
2020 error_detail: str = None,
2021 other_update: dict = None,
2022 ):
2023 """
2024 Update db_nsr fields.
2025 :param nsr_id:
2026 :param ns_state:
2027 :param current_operation:
2028 :param current_operation_id:
2029 :param error_description:
2030 :param error_detail:
2031 :param other_update: Other required changes at database if provided, will be cleared
2032 :return:
2033 """
2034 try:
2035 db_dict = other_update or {}
2036 db_dict[
2037 "_admin.nslcmop"
2038 ] = current_operation_id # for backward compatibility
2039 db_dict["_admin.current-operation"] = current_operation_id
2040 db_dict["_admin.operation-type"] = (
2041 current_operation if current_operation != "IDLE" else None
2042 )
2043 db_dict["currentOperation"] = current_operation
2044 db_dict["currentOperationID"] = current_operation_id
2045 db_dict["errorDescription"] = error_description
2046 db_dict["errorDetail"] = error_detail
2047
2048 if ns_state:
2049 db_dict["nsState"] = ns_state
2050 self.update_db_2("nsrs", nsr_id, db_dict)
2051 except DbException as e:
2052 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2053
2054 def _write_op_status(
2055 self,
2056 op_id: str,
2057 stage: list = None,
2058 error_message: str = None,
2059 queuePosition: int = 0,
2060 operation_state: str = None,
2061 other_update: dict = None,
2062 ):
2063 try:
2064 db_dict = other_update or {}
2065 db_dict["queuePosition"] = queuePosition
2066 if isinstance(stage, list):
2067 db_dict["stage"] = stage[0]
2068 db_dict["detailed-status"] = " ".join(stage)
2069 elif stage is not None:
2070 db_dict["stage"] = str(stage)
2071
2072 if error_message is not None:
2073 db_dict["errorMessage"] = error_message
2074 if operation_state is not None:
2075 db_dict["operationState"] = operation_state
2076 db_dict["statusEnteredTime"] = time()
2077 self.update_db_2("nslcmops", op_id, db_dict)
2078 except DbException as e:
2079 self.logger.warn(
2080 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2081 )
2082
2083 def _write_all_config_status(self, db_nsr: dict, status: str):
2084 try:
2085 nsr_id = db_nsr["_id"]
2086 # configurationStatus
2087 config_status = db_nsr.get("configurationStatus")
2088 if config_status:
2089 db_nsr_update = {
2090 "configurationStatus.{}.status".format(index): status
2091 for index, v in enumerate(config_status)
2092 if v
2093 }
2094 # update status
2095 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2096
2097 except DbException as e:
2098 self.logger.warn(
2099 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2100 )
2101
2102 def _write_configuration_status(
2103 self,
2104 nsr_id: str,
2105 vca_index: int,
2106 status: str = None,
2107 element_under_configuration: str = None,
2108 element_type: str = None,
2109 other_update: dict = None,
2110 ):
2111 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2112 # .format(vca_index, status))
2113
2114 try:
2115 db_path = "configurationStatus.{}.".format(vca_index)
2116 db_dict = other_update or {}
2117 if status:
2118 db_dict[db_path + "status"] = status
2119 if element_under_configuration:
2120 db_dict[
2121 db_path + "elementUnderConfiguration"
2122 ] = element_under_configuration
2123 if element_type:
2124 db_dict[db_path + "elementType"] = element_type
2125 self.update_db_2("nsrs", nsr_id, db_dict)
2126 except DbException as e:
2127 self.logger.warn(
2128 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2129 status, nsr_id, vca_index, e
2130 )
2131 )
2132
2133 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2134 """
2135 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2136 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2137 Database is used because the result can be obtained from a different LCM worker in case of HA.
2138 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2139 :param db_nslcmop: database content of nslcmop
2140 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2141 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2142 computed 'vim-account-id'
2143 """
2144 modified = False
2145 nslcmop_id = db_nslcmop["_id"]
2146 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2147 if placement_engine == "PLA":
2148 self.logger.debug(
2149 logging_text + "Invoke and wait for placement optimization"
2150 )
2151 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2152 db_poll_interval = 5
2153 wait = db_poll_interval * 10
2154 pla_result = None
2155 while not pla_result and wait >= 0:
2156 await asyncio.sleep(db_poll_interval)
2157 wait -= db_poll_interval
2158 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2159 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2160
2161 if not pla_result:
2162 raise LcmException(
2163 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2164 )
2165
2166 for pla_vnf in pla_result["vnf"]:
2167 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2168 if not pla_vnf.get("vimAccountId") or not vnfr:
2169 continue
2170 modified = True
2171 self.db.set_one(
2172 "vnfrs",
2173 {"_id": vnfr["_id"]},
2174 {"vim-account-id": pla_vnf["vimAccountId"]},
2175 )
2176 # Modifies db_vnfrs
2177 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2178 return modified
2179
2180 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2181 alerts = []
2182 nsr_id = vnfr["nsr-id-ref"]
2183 df = vnfd.get("df", [{}])[0]
2184 # Checking for auto-healing configuration
2185 if "healing-aspect" in df:
2186 healing_aspects = df["healing-aspect"]
2187 for healing in healing_aspects:
2188 for healing_policy in healing.get("healing-policy", ()):
2189 vdu_id = healing_policy["vdu-id"]
2190 vdur = next(
2191 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2192 {},
2193 )
2194 if not vdur:
2195 continue
2196 metric_name = "vm_status"
2197 vdu_name = vdur.get("name")
2198 vnf_member_index = vnfr["member-vnf-index-ref"]
2199 uuid = str(uuid4())
2200 name = f"healing_{uuid}"
2201 action = healing_policy
2202 # action_on_recovery = healing.get("action-on-recovery")
2203 # cooldown_time = healing.get("cooldown-time")
2204 # day1 = healing.get("day1")
2205 alert = {
2206 "uuid": uuid,
2207 "name": name,
2208 "metric": metric_name,
2209 "tags": {
2210 "ns_id": nsr_id,
2211 "vnf_member_index": vnf_member_index,
2212 "vdu_name": vdu_name,
2213 },
2214 "alarm_status": "ok",
2215 "action_type": "healing",
2216 "action": action,
2217 }
2218 alerts.append(alert)
2219 return alerts
2220
2221 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2222 alerts = []
2223 nsr_id = vnfr["nsr-id-ref"]
2224 df = vnfd.get("df", [{}])[0]
2225 # Checking for auto-scaling configuration
2226 if "scaling-aspect" in df:
2227 scaling_aspects = df["scaling-aspect"]
2228 all_vnfd_monitoring_params = {}
2229 for ivld in vnfd.get("int-virtual-link-desc", ()):
2230 for mp in ivld.get("monitoring-parameters", ()):
2231 all_vnfd_monitoring_params[mp.get("id")] = mp
2232 for vdu in vnfd.get("vdu", ()):
2233 for mp in vdu.get("monitoring-parameter", ()):
2234 all_vnfd_monitoring_params[mp.get("id")] = mp
2235 for df in vnfd.get("df", ()):
2236 for mp in df.get("monitoring-parameter", ()):
2237 all_vnfd_monitoring_params[mp.get("id")] = mp
2238 for scaling_aspect in scaling_aspects:
2239 scaling_group_name = scaling_aspect.get("name", "")
2240 # Get monitored VDUs
2241 all_monitored_vdus = set()
2242 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2243 "deltas", ()
2244 ):
2245 for vdu_delta in delta.get("vdu-delta", ()):
2246 all_monitored_vdus.add(vdu_delta.get("id"))
2247 monitored_vdurs = list(
2248 filter(
2249 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2250 vnfr["vdur"],
2251 )
2252 )
2253 if not monitored_vdurs:
2254 self.logger.error(
2255 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2256 )
2257 continue
2258 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2259 if scaling_policy["scaling-type"] != "automatic":
2260 continue
2261 threshold_time = scaling_policy.get("threshold-time", "1")
2262 cooldown_time = scaling_policy.get("cooldown-time", "0")
2263 for scaling_criteria in scaling_policy["scaling-criteria"]:
2264 monitoring_param_ref = scaling_criteria.get(
2265 "vnf-monitoring-param-ref"
2266 )
2267 vnf_monitoring_param = all_vnfd_monitoring_params[
2268 monitoring_param_ref
2269 ]
2270 for vdur in monitored_vdurs:
2271 vdu_id = vdur["vdu-id-ref"]
2272 metric_name = vnf_monitoring_param.get("performance-metric")
2273 metric_name = f"osm_{metric_name}"
2274 vnf_member_index = vnfr["member-vnf-index-ref"]
2275 scalein_threshold = scaling_criteria.get(
2276 "scale-in-threshold"
2277 )
2278 scaleout_threshold = scaling_criteria.get(
2279 "scale-out-threshold"
2280 )
2281 # Looking for min/max-number-of-instances
2282 instances_min_number = 1
2283 instances_max_number = 1
2284 vdu_profile = df["vdu-profile"]
2285 if vdu_profile:
2286 profile = next(
2287 item for item in vdu_profile if item["id"] == vdu_id
2288 )
2289 instances_min_number = profile.get(
2290 "min-number-of-instances", 1
2291 )
2292 instances_max_number = profile.get(
2293 "max-number-of-instances", 1
2294 )
2295
2296 if scalein_threshold:
2297 uuid = str(uuid4())
2298 name = f"scalein_{uuid}"
2299 operation = scaling_criteria[
2300 "scale-in-relational-operation"
2301 ]
2302 rel_operator = self.rel_operation_types.get(
2303 operation, "<="
2304 )
2305 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2306 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2307 labels = {
2308 "ns_id": nsr_id,
2309 "vnf_member_index": vnf_member_index,
2310 "vdu_id": vdu_id,
2311 }
2312 prom_cfg = {
2313 "alert": name,
2314 "expr": expression,
2315 "for": str(threshold_time) + "m",
2316 "labels": labels,
2317 }
2318 action = scaling_policy
2319 action = {
2320 "scaling-group": scaling_group_name,
2321 "cooldown-time": cooldown_time,
2322 }
2323 alert = {
2324 "uuid": uuid,
2325 "name": name,
2326 "metric": metric_name,
2327 "tags": {
2328 "ns_id": nsr_id,
2329 "vnf_member_index": vnf_member_index,
2330 "vdu_id": vdu_id,
2331 },
2332 "alarm_status": "ok",
2333 "action_type": "scale_in",
2334 "action": action,
2335 "prometheus_config": prom_cfg,
2336 }
2337 alerts.append(alert)
2338
2339 if scaleout_threshold:
2340 uuid = str(uuid4())
2341 name = f"scaleout_{uuid}"
2342 operation = scaling_criteria[
2343 "scale-out-relational-operation"
2344 ]
2345 rel_operator = self.rel_operation_types.get(
2346 operation, "<="
2347 )
2348 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2349 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2350 labels = {
2351 "ns_id": nsr_id,
2352 "vnf_member_index": vnf_member_index,
2353 "vdu_id": vdu_id,
2354 }
2355 prom_cfg = {
2356 "alert": name,
2357 "expr": expression,
2358 "for": str(threshold_time) + "m",
2359 "labels": labels,
2360 }
2361 action = scaling_policy
2362 action = {
2363 "scaling-group": scaling_group_name,
2364 "cooldown-time": cooldown_time,
2365 }
2366 alert = {
2367 "uuid": uuid,
2368 "name": name,
2369 "metric": metric_name,
2370 "tags": {
2371 "ns_id": nsr_id,
2372 "vnf_member_index": vnf_member_index,
2373 "vdu_id": vdu_id,
2374 },
2375 "alarm_status": "ok",
2376 "action_type": "scale_out",
2377 "action": action,
2378 "prometheus_config": prom_cfg,
2379 }
2380 alerts.append(alert)
2381 return alerts
2382
2383 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2384 alerts = []
2385 nsr_id = vnfr["nsr-id-ref"]
2386 vnf_member_index = vnfr["member-vnf-index-ref"]
2387
2388 # Checking for VNF alarm configuration
2389 for vdur in vnfr["vdur"]:
2390 vdu_id = vdur["vdu-id-ref"]
2391 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2392 if "alarm" in vdu:
2393 # Get VDU monitoring params, since alerts are based on them
2394 vdu_monitoring_params = {}
2395 for mp in vdu.get("monitoring-parameter", []):
2396 vdu_monitoring_params[mp.get("id")] = mp
2397 if not vdu_monitoring_params:
2398 self.logger.error(
2399 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2400 )
2401 continue
2402 # Get alarms in the VDU
2403 alarm_descriptors = vdu["alarm"]
2404 # Create VDU alarms for each alarm in the VDU
2405 for alarm_descriptor in alarm_descriptors:
2406 # Check that the VDU alarm refers to a proper monitoring param
2407 alarm_monitoring_param = alarm_descriptor.get(
2408 "vnf-monitoring-param-ref", ""
2409 )
2410 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2411 alarm_monitoring_param, {}
2412 )
2413 if not vdu_specific_monitoring_param:
2414 self.logger.error(
2415 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2416 )
2417 continue
2418 metric_name = vdu_specific_monitoring_param.get(
2419 "performance-metric"
2420 )
2421 if not metric_name:
2422 self.logger.error(
2423 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2424 )
2425 continue
2426 # Set params of the alarm to be created in Prometheus
2427 metric_name = f"osm_{metric_name}"
2428 metric_threshold = alarm_descriptor.get("value")
2429 uuid = str(uuid4())
2430 alert_name = f"vdu_alarm_{uuid}"
2431 operation = alarm_descriptor["operation"]
2432 rel_operator = self.rel_operation_types.get(operation, "<=")
2433 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2434 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2435 labels = {
2436 "ns_id": nsr_id,
2437 "vnf_member_index": vnf_member_index,
2438 "vdu_id": vdu_id,
2439 "vdu_name": "{{ $labels.vdu_name }}",
2440 }
2441 prom_cfg = {
2442 "alert": alert_name,
2443 "expr": expression,
2444 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2445 "labels": labels,
2446 }
2447 alarm_action = dict()
2448 for action_type in ["ok", "insufficient-data", "alarm"]:
2449 if (
2450 "actions" in alarm_descriptor
2451 and action_type in alarm_descriptor["actions"]
2452 ):
2453 alarm_action[action_type] = alarm_descriptor["actions"][
2454 action_type
2455 ]
2456 alert = {
2457 "uuid": uuid,
2458 "name": alert_name,
2459 "metric": metric_name,
2460 "tags": {
2461 "ns_id": nsr_id,
2462 "vnf_member_index": vnf_member_index,
2463 "vdu_id": vdu_id,
2464 },
2465 "alarm_status": "ok",
2466 "action_type": "vdu_alarm",
2467 "action": alarm_action,
2468 "prometheus_config": prom_cfg,
2469 }
2470 alerts.append(alert)
2471 return alerts
2472
2473 def update_nsrs_with_pla_result(self, params):
2474 try:
2475 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2476 self.update_db_2(
2477 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2478 )
2479 except Exception as e:
2480 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2481
2482 async def instantiate(self, nsr_id, nslcmop_id):
2483 """
2484
2485 :param nsr_id: ns instance to deploy
2486 :param nslcmop_id: operation to run
2487 :return:
2488 """
2489
2490 # Try to lock HA task here
2491 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2492 if not task_is_locked_by_me:
2493 self.logger.debug(
2494 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2495 )
2496 return
2497
2498 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2499 self.logger.debug(logging_text + "Enter")
2500
2501 # get all needed from database
2502
2503 # database nsrs record
2504 db_nsr = None
2505
2506 # database nslcmops record
2507 db_nslcmop = None
2508
2509 # update operation on nsrs
2510 db_nsr_update = {}
2511 # update operation on nslcmops
2512 db_nslcmop_update = {}
2513
2514 timeout_ns_deploy = self.timeout.ns_deploy
2515
2516 nslcmop_operation_state = None
2517 db_vnfrs = {} # vnf's info indexed by member-index
2518 # n2vc_info = {}
2519 tasks_dict_info = {} # from task to info text
2520 exc = None
2521 error_list = []
2522 stage = [
2523 "Stage 1/5: preparation of the environment.",
2524 "Waiting for previous operations to terminate.",
2525 "",
2526 ]
2527 # ^ stage, step, VIM progress
2528 try:
2529 # wait for any previous tasks in process
2530 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2531
2532 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2533 stage[1] = "Reading from database."
2534 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2535 db_nsr_update["detailed-status"] = "creating"
2536 db_nsr_update["operational-status"] = "init"
2537 self._write_ns_status(
2538 nsr_id=nsr_id,
2539 ns_state="BUILDING",
2540 current_operation="INSTANTIATING",
2541 current_operation_id=nslcmop_id,
2542 other_update=db_nsr_update,
2543 )
2544 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2545
2546 # read from db: operation
2547 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2548 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2549 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2550 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2551 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2552 )
2553 ns_params = db_nslcmop.get("operationParams")
2554 if ns_params and ns_params.get("timeout_ns_deploy"):
2555 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2556
2557 # read from db: ns
2558 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2559 self.logger.debug(logging_text + stage[1])
2560 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2561 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2562 self.logger.debug(logging_text + stage[1])
2563 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2564 self.fs.sync(db_nsr["nsd-id"])
2565 db_nsr["nsd"] = nsd
2566 # nsr_name = db_nsr["name"] # TODO short-name??
2567
2568 # read from db: vnf's of this ns
2569 stage[1] = "Getting vnfrs from db."
2570 self.logger.debug(logging_text + stage[1])
2571 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2572
2573 # read from db: vnfd's for every vnf
2574 db_vnfds = [] # every vnfd data
2575
2576 # for each vnf in ns, read vnfd
2577 for vnfr in db_vnfrs_list:
2578 if vnfr.get("kdur"):
2579 kdur_list = []
2580 for kdur in vnfr["kdur"]:
2581 if kdur.get("additionalParams"):
2582 kdur["additionalParams"] = json.loads(
2583 kdur["additionalParams"]
2584 )
2585 kdur_list.append(kdur)
2586 vnfr["kdur"] = kdur_list
2587
2588 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2589 vnfd_id = vnfr["vnfd-id"]
2590 vnfd_ref = vnfr["vnfd-ref"]
2591 self.fs.sync(vnfd_id)
2592
2593 # if we haven't this vnfd, read it from db
2594 if vnfd_id not in db_vnfds:
2595 # read from db
2596 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2597 vnfd_id, vnfd_ref
2598 )
2599 self.logger.debug(logging_text + stage[1])
2600 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2601
2602 # store vnfd
2603 db_vnfds.append(vnfd)
2604
2605 # Get or generates the _admin.deployed.VCA list
2606 vca_deployed_list = None
2607 if db_nsr["_admin"].get("deployed"):
2608 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2609 if vca_deployed_list is None:
2610 vca_deployed_list = []
2611 configuration_status_list = []
2612 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2613 db_nsr_update["configurationStatus"] = configuration_status_list
2614 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2615 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2616 elif isinstance(vca_deployed_list, dict):
2617 # maintain backward compatibility. Change a dict to list at database
2618 vca_deployed_list = list(vca_deployed_list.values())
2619 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2620 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2621
2622 if not isinstance(
2623 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2624 ):
2625 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2626 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2627
2628 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2629 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2630 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2631 self.db.set_list(
2632 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2633 )
2634
2635 # n2vc_redesign STEP 2 Deploy Network Scenario
2636 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2637 self._write_op_status(op_id=nslcmop_id, stage=stage)
2638
2639 stage[1] = "Deploying KDUs."
2640 # self.logger.debug(logging_text + "Before deploy_kdus")
2641 # Call to deploy_kdus in case exists the "vdu:kdu" param
2642 await self.deploy_kdus(
2643 logging_text=logging_text,
2644 nsr_id=nsr_id,
2645 nslcmop_id=nslcmop_id,
2646 db_vnfrs=db_vnfrs,
2647 db_vnfds=db_vnfds,
2648 task_instantiation_info=tasks_dict_info,
2649 )
2650
2651 stage[1] = "Getting VCA public key."
2652 # n2vc_redesign STEP 1 Get VCA public ssh-key
2653 # feature 1429. Add n2vc public key to needed VMs
2654 n2vc_key = self.n2vc.get_public_key()
2655 n2vc_key_list = [n2vc_key]
2656 if self.vca_config.public_key:
2657 n2vc_key_list.append(self.vca_config.public_key)
2658
2659 stage[1] = "Deploying NS at VIM."
2660 task_ro = asyncio.ensure_future(
2661 self.instantiate_RO(
2662 logging_text=logging_text,
2663 nsr_id=nsr_id,
2664 nsd=nsd,
2665 db_nsr=db_nsr,
2666 db_nslcmop=db_nslcmop,
2667 db_vnfrs=db_vnfrs,
2668 db_vnfds=db_vnfds,
2669 n2vc_key_list=n2vc_key_list,
2670 stage=stage,
2671 )
2672 )
2673 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2674 tasks_dict_info[task_ro] = "Deploying at VIM"
2675
2676 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2677 stage[1] = "Deploying Execution Environments."
2678 self.logger.debug(logging_text + stage[1])
2679
2680 # create namespace and certificate if any helm based EE is present in the NS
2681 if check_helm_ee_in_ns(db_vnfds):
2682 await self.vca_map["helm-v3"].setup_ns_namespace(
2683 name=nsr_id,
2684 )
2685 # create TLS certificates
2686 await self.vca_map["helm-v3"].create_tls_certificate(
2687 secret_name=self.EE_TLS_NAME,
2688 dns_prefix="*",
2689 nsr_id=nsr_id,
2690 usage="server auth",
2691 namespace=nsr_id,
2692 )
2693
2694 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2695 for vnf_profile in get_vnf_profiles(nsd):
2696 vnfd_id = vnf_profile["vnfd-id"]
2697 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2698 member_vnf_index = str(vnf_profile["id"])
2699 db_vnfr = db_vnfrs[member_vnf_index]
2700 base_folder = vnfd["_admin"]["storage"]
2701 vdu_id = None
2702 vdu_index = 0
2703 vdu_name = None
2704 kdu_name = None
2705 kdu_index = None
2706
2707 # Get additional parameters
2708 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2709 if db_vnfr.get("additionalParamsForVnf"):
2710 deploy_params.update(
2711 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2712 )
2713
2714 descriptor_config = get_configuration(vnfd, vnfd["id"])
2715 if descriptor_config:
2716 self._deploy_n2vc(
2717 logging_text=logging_text
2718 + "member_vnf_index={} ".format(member_vnf_index),
2719 db_nsr=db_nsr,
2720 db_vnfr=db_vnfr,
2721 nslcmop_id=nslcmop_id,
2722 nsr_id=nsr_id,
2723 nsi_id=nsi_id,
2724 vnfd_id=vnfd_id,
2725 vdu_id=vdu_id,
2726 kdu_name=kdu_name,
2727 member_vnf_index=member_vnf_index,
2728 vdu_index=vdu_index,
2729 kdu_index=kdu_index,
2730 vdu_name=vdu_name,
2731 deploy_params=deploy_params,
2732 descriptor_config=descriptor_config,
2733 base_folder=base_folder,
2734 task_instantiation_info=tasks_dict_info,
2735 stage=stage,
2736 )
2737
2738 # Deploy charms for each VDU that supports one.
2739 for vdud in get_vdu_list(vnfd):
2740 vdu_id = vdud["id"]
2741 descriptor_config = get_configuration(vnfd, vdu_id)
2742 vdur = find_in_list(
2743 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2744 )
2745
2746 if vdur.get("additionalParams"):
2747 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2748 else:
2749 deploy_params_vdu = deploy_params
2750 deploy_params_vdu["OSM"] = get_osm_params(
2751 db_vnfr, vdu_id, vdu_count_index=0
2752 )
2753 vdud_count = get_number_of_instances(vnfd, vdu_id)
2754
2755 self.logger.debug("VDUD > {}".format(vdud))
2756 self.logger.debug(
2757 "Descriptor config > {}".format(descriptor_config)
2758 )
2759 if descriptor_config:
2760 vdu_name = None
2761 kdu_name = None
2762 kdu_index = None
2763 for vdu_index in range(vdud_count):
2764 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2765 self._deploy_n2vc(
2766 logging_text=logging_text
2767 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2768 member_vnf_index, vdu_id, vdu_index
2769 ),
2770 db_nsr=db_nsr,
2771 db_vnfr=db_vnfr,
2772 nslcmop_id=nslcmop_id,
2773 nsr_id=nsr_id,
2774 nsi_id=nsi_id,
2775 vnfd_id=vnfd_id,
2776 vdu_id=vdu_id,
2777 kdu_name=kdu_name,
2778 kdu_index=kdu_index,
2779 member_vnf_index=member_vnf_index,
2780 vdu_index=vdu_index,
2781 vdu_name=vdu_name,
2782 deploy_params=deploy_params_vdu,
2783 descriptor_config=descriptor_config,
2784 base_folder=base_folder,
2785 task_instantiation_info=tasks_dict_info,
2786 stage=stage,
2787 )
2788 for kdud in get_kdu_list(vnfd):
2789 kdu_name = kdud["name"]
2790 descriptor_config = get_configuration(vnfd, kdu_name)
2791 if descriptor_config:
2792 vdu_id = None
2793 vdu_index = 0
2794 vdu_name = None
2795 kdu_index, kdur = next(
2796 x
2797 for x in enumerate(db_vnfr["kdur"])
2798 if x[1]["kdu-name"] == kdu_name
2799 )
2800 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2801 if kdur.get("additionalParams"):
2802 deploy_params_kdu.update(
2803 parse_yaml_strings(kdur["additionalParams"].copy())
2804 )
2805
2806 self._deploy_n2vc(
2807 logging_text=logging_text,
2808 db_nsr=db_nsr,
2809 db_vnfr=db_vnfr,
2810 nslcmop_id=nslcmop_id,
2811 nsr_id=nsr_id,
2812 nsi_id=nsi_id,
2813 vnfd_id=vnfd_id,
2814 vdu_id=vdu_id,
2815 kdu_name=kdu_name,
2816 member_vnf_index=member_vnf_index,
2817 vdu_index=vdu_index,
2818 kdu_index=kdu_index,
2819 vdu_name=vdu_name,
2820 deploy_params=deploy_params_kdu,
2821 descriptor_config=descriptor_config,
2822 base_folder=base_folder,
2823 task_instantiation_info=tasks_dict_info,
2824 stage=stage,
2825 )
2826
2827 # Check if each vnf has exporter for metric collection if so update prometheus job records
2828 if "exporters-endpoints" in vnfd.get("df")[0]:
2829 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2830 self.logger.debug("exporter config :{}".format(exporter_config))
2831 artifact_path = "{}/{}/{}".format(
2832 base_folder["folder"],
2833 base_folder["pkg-dir"],
2834 "exporter-endpoint",
2835 )
2836 ee_id = None
2837 ee_config_descriptor = exporter_config
2838 vnfr_id = db_vnfr["id"]
2839 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2840 logging_text,
2841 nsr_id,
2842 vnfr_id,
2843 vdu_id=None,
2844 vdu_index=None,
2845 user=None,
2846 pub_key=None,
2847 )
2848 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2849 self.logger.debug("Artifact_path:{}".format(artifact_path))
2850 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2851 vdu_id_for_prom = None
2852 vdu_index_for_prom = None
2853 for x in get_iterable(db_vnfr, "vdur"):
2854 vdu_id_for_prom = x.get("vdu-id-ref")
2855 vdu_index_for_prom = x.get("count-index")
2856 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2857 ee_id=ee_id,
2858 artifact_path=artifact_path,
2859 ee_config_descriptor=ee_config_descriptor,
2860 vnfr_id=vnfr_id,
2861 nsr_id=nsr_id,
2862 target_ip=rw_mgmt_ip,
2863 element_type="VDU",
2864 vdu_id=vdu_id_for_prom,
2865 vdu_index=vdu_index_for_prom,
2866 )
2867
2868 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2869 if prometheus_jobs:
2870 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2871 self.update_db_2(
2872 "nsrs",
2873 nsr_id,
2874 db_nsr_update,
2875 )
2876
2877 for job in prometheus_jobs:
2878 self.db.set_one(
2879 "prometheus_jobs",
2880 {"job_name": job["job_name"]},
2881 job,
2882 upsert=True,
2883 fail_on_empty=False,
2884 )
2885
2886 # Check if this NS has a charm configuration
2887 descriptor_config = nsd.get("ns-configuration")
2888 if descriptor_config and descriptor_config.get("juju"):
2889 vnfd_id = None
2890 db_vnfr = None
2891 member_vnf_index = None
2892 vdu_id = None
2893 kdu_name = None
2894 kdu_index = None
2895 vdu_index = 0
2896 vdu_name = None
2897
2898 # Get additional parameters
2899 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2900 if db_nsr.get("additionalParamsForNs"):
2901 deploy_params.update(
2902 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2903 )
2904 base_folder = nsd["_admin"]["storage"]
2905 self._deploy_n2vc(
2906 logging_text=logging_text,
2907 db_nsr=db_nsr,
2908 db_vnfr=db_vnfr,
2909 nslcmop_id=nslcmop_id,
2910 nsr_id=nsr_id,
2911 nsi_id=nsi_id,
2912 vnfd_id=vnfd_id,
2913 vdu_id=vdu_id,
2914 kdu_name=kdu_name,
2915 member_vnf_index=member_vnf_index,
2916 vdu_index=vdu_index,
2917 kdu_index=kdu_index,
2918 vdu_name=vdu_name,
2919 deploy_params=deploy_params,
2920 descriptor_config=descriptor_config,
2921 base_folder=base_folder,
2922 task_instantiation_info=tasks_dict_info,
2923 stage=stage,
2924 )
2925
2926 # rest of staff will be done at finally
2927
2928 except (
2929 ROclient.ROClientException,
2930 DbException,
2931 LcmException,
2932 N2VCException,
2933 ) as e:
2934 self.logger.error(
2935 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2936 )
2937 exc = e
2938 except asyncio.CancelledError:
2939 self.logger.error(
2940 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2941 )
2942 exc = "Operation was cancelled"
2943 except Exception as e:
2944 exc = traceback.format_exc()
2945 self.logger.critical(
2946 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2947 exc_info=True,
2948 )
2949 finally:
2950 if exc:
2951 error_list.append(str(exc))
2952 try:
2953 # wait for pending tasks
2954 if tasks_dict_info:
2955 stage[1] = "Waiting for instantiate pending tasks."
2956 self.logger.debug(logging_text + stage[1])
2957 error_list += await self._wait_for_tasks(
2958 logging_text,
2959 tasks_dict_info,
2960 timeout_ns_deploy,
2961 stage,
2962 nslcmop_id,
2963 nsr_id=nsr_id,
2964 )
2965 stage[1] = stage[2] = ""
2966 except asyncio.CancelledError:
2967 error_list.append("Cancelled")
2968 # TODO cancel all tasks
2969 except Exception as exc:
2970 error_list.append(str(exc))
2971
2972 # update operation-status
2973 db_nsr_update["operational-status"] = "running"
2974 # let's begin with VCA 'configured' status (later we can change it)
2975 db_nsr_update["config-status"] = "configured"
2976 for task, task_name in tasks_dict_info.items():
2977 if not task.done() or task.cancelled() or task.exception():
2978 if task_name.startswith(self.task_name_deploy_vca):
2979 # A N2VC task is pending
2980 db_nsr_update["config-status"] = "failed"
2981 else:
2982 # RO or KDU task is pending
2983 db_nsr_update["operational-status"] = "failed"
2984
2985 # update status at database
2986 if error_list:
2987 error_detail = ". ".join(error_list)
2988 self.logger.error(logging_text + error_detail)
2989 error_description_nslcmop = "{} Detail: {}".format(
2990 stage[0], error_detail
2991 )
2992 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2993 nslcmop_id, stage[0]
2994 )
2995
2996 db_nsr_update["detailed-status"] = (
2997 error_description_nsr + " Detail: " + error_detail
2998 )
2999 db_nslcmop_update["detailed-status"] = error_detail
3000 nslcmop_operation_state = "FAILED"
3001 ns_state = "BROKEN"
3002 else:
3003 error_detail = None
3004 error_description_nsr = error_description_nslcmop = None
3005 ns_state = "READY"
3006 db_nsr_update["detailed-status"] = "Done"
3007 db_nslcmop_update["detailed-status"] = "Done"
3008 nslcmop_operation_state = "COMPLETED"
3009 # Gather auto-healing and auto-scaling alerts for each vnfr
3010 healing_alerts = []
3011 scaling_alerts = []
3012 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3013 vnfd = next(
3014 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3015 )
3016 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3017 for alert in healing_alerts:
3018 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3019 self.db.create("alerts", alert)
3020
3021 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3022 for alert in scaling_alerts:
3023 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3024 self.db.create("alerts", alert)
3025
3026 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3027 for alert in alarm_alerts:
3028 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3029 self.db.create("alerts", alert)
3030 if db_nsr:
3031 self._write_ns_status(
3032 nsr_id=nsr_id,
3033 ns_state=ns_state,
3034 current_operation="IDLE",
3035 current_operation_id=None,
3036 error_description=error_description_nsr,
3037 error_detail=error_detail,
3038 other_update=db_nsr_update,
3039 )
3040 self._write_op_status(
3041 op_id=nslcmop_id,
3042 stage="",
3043 error_message=error_description_nslcmop,
3044 operation_state=nslcmop_operation_state,
3045 other_update=db_nslcmop_update,
3046 )
3047
3048 if nslcmop_operation_state:
3049 try:
3050 await self.msg.aiowrite(
3051 "ns",
3052 "instantiated",
3053 {
3054 "nsr_id": nsr_id,
3055 "nslcmop_id": nslcmop_id,
3056 "operationState": nslcmop_operation_state,
3057 "startTime": db_nslcmop["startTime"],
3058 "links": db_nslcmop["links"],
3059 "operationParams": {
3060 "nsInstanceId": nsr_id,
3061 "nsdId": db_nsr["nsd-id"],
3062 },
3063 },
3064 )
3065 except Exception as e:
3066 self.logger.error(
3067 logging_text + "kafka_write notification Exception {}".format(e)
3068 )
3069
3070 self.logger.debug(logging_text + "Exit")
3071 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3072
3073 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3074 if vnfd_id not in cached_vnfds:
3075 cached_vnfds[vnfd_id] = self.db.get_one(
3076 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3077 )
3078 return cached_vnfds[vnfd_id]
3079
3080 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3081 if vnf_profile_id not in cached_vnfrs:
3082 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3083 "vnfrs",
3084 {
3085 "member-vnf-index-ref": vnf_profile_id,
3086 "nsr-id-ref": nsr_id,
3087 },
3088 )
3089 return cached_vnfrs[vnf_profile_id]
3090
3091 def _is_deployed_vca_in_relation(
3092 self, vca: DeployedVCA, relation: Relation
3093 ) -> bool:
3094 found = False
3095 for endpoint in (relation.provider, relation.requirer):
3096 if endpoint["kdu-resource-profile-id"]:
3097 continue
3098 found = (
3099 vca.vnf_profile_id == endpoint.vnf_profile_id
3100 and vca.vdu_profile_id == endpoint.vdu_profile_id
3101 and vca.execution_environment_ref == endpoint.execution_environment_ref
3102 )
3103 if found:
3104 break
3105 return found
3106
3107 def _update_ee_relation_data_with_implicit_data(
3108 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3109 ):
3110 ee_relation_data = safe_get_ee_relation(
3111 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3112 )
3113 ee_relation_level = EELevel.get_level(ee_relation_data)
3114 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3115 "execution-environment-ref"
3116 ]:
3117 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3118 vnfd_id = vnf_profile["vnfd-id"]
3119 project = nsd["_admin"]["projects_read"][0]
3120 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3121 entity_id = (
3122 vnfd_id
3123 if ee_relation_level == EELevel.VNF
3124 else ee_relation_data["vdu-profile-id"]
3125 )
3126 ee = get_juju_ee_ref(db_vnfd, entity_id)
3127 if not ee:
3128 raise Exception(
3129 f"not execution environments found for ee_relation {ee_relation_data}"
3130 )
3131 ee_relation_data["execution-environment-ref"] = ee["id"]
3132 return ee_relation_data
3133
3134 def _get_ns_relations(
3135 self,
3136 nsr_id: str,
3137 nsd: Dict[str, Any],
3138 vca: DeployedVCA,
3139 cached_vnfds: Dict[str, Any],
3140 ) -> List[Relation]:
3141 relations = []
3142 db_ns_relations = get_ns_configuration_relation_list(nsd)
3143 for r in db_ns_relations:
3144 provider_dict = None
3145 requirer_dict = None
3146 if all(key in r for key in ("provider", "requirer")):
3147 provider_dict = r["provider"]
3148 requirer_dict = r["requirer"]
3149 elif "entities" in r:
3150 provider_id = r["entities"][0]["id"]
3151 provider_dict = {
3152 "nsr-id": nsr_id,
3153 "endpoint": r["entities"][0]["endpoint"],
3154 }
3155 if provider_id != nsd["id"]:
3156 provider_dict["vnf-profile-id"] = provider_id
3157 requirer_id = r["entities"][1]["id"]
3158 requirer_dict = {
3159 "nsr-id": nsr_id,
3160 "endpoint": r["entities"][1]["endpoint"],
3161 }
3162 if requirer_id != nsd["id"]:
3163 requirer_dict["vnf-profile-id"] = requirer_id
3164 else:
3165 raise Exception(
3166 "provider/requirer or entities must be included in the relation."
3167 )
3168 relation_provider = self._update_ee_relation_data_with_implicit_data(
3169 nsr_id, nsd, provider_dict, cached_vnfds
3170 )
3171 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3172 nsr_id, nsd, requirer_dict, cached_vnfds
3173 )
3174 provider = EERelation(relation_provider)
3175 requirer = EERelation(relation_requirer)
3176 relation = Relation(r["name"], provider, requirer)
3177 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3178 if vca_in_relation:
3179 relations.append(relation)
3180 return relations
3181
3182 def _get_vnf_relations(
3183 self,
3184 nsr_id: str,
3185 nsd: Dict[str, Any],
3186 vca: DeployedVCA,
3187 cached_vnfds: Dict[str, Any],
3188 ) -> List[Relation]:
3189 relations = []
3190 if vca.target_element == "ns":
3191 self.logger.debug("VCA is a NS charm, not a VNF.")
3192 return relations
3193 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3194 vnf_profile_id = vnf_profile["id"]
3195 vnfd_id = vnf_profile["vnfd-id"]
3196 project = nsd["_admin"]["projects_read"][0]
3197 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3198 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3199 for r in db_vnf_relations:
3200 provider_dict = None
3201 requirer_dict = None
3202 if all(key in r for key in ("provider", "requirer")):
3203 provider_dict = r["provider"]
3204 requirer_dict = r["requirer"]
3205 elif "entities" in r:
3206 provider_id = r["entities"][0]["id"]
3207 provider_dict = {
3208 "nsr-id": nsr_id,
3209 "vnf-profile-id": vnf_profile_id,
3210 "endpoint": r["entities"][0]["endpoint"],
3211 }
3212 if provider_id != vnfd_id:
3213 provider_dict["vdu-profile-id"] = provider_id
3214 requirer_id = r["entities"][1]["id"]
3215 requirer_dict = {
3216 "nsr-id": nsr_id,
3217 "vnf-profile-id": vnf_profile_id,
3218 "endpoint": r["entities"][1]["endpoint"],
3219 }
3220 if requirer_id != vnfd_id:
3221 requirer_dict["vdu-profile-id"] = requirer_id
3222 else:
3223 raise Exception(
3224 "provider/requirer or entities must be included in the relation."
3225 )
3226 relation_provider = self._update_ee_relation_data_with_implicit_data(
3227 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3228 )
3229 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3230 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3231 )
3232 provider = EERelation(relation_provider)
3233 requirer = EERelation(relation_requirer)
3234 relation = Relation(r["name"], provider, requirer)
3235 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3236 if vca_in_relation:
3237 relations.append(relation)
3238 return relations
3239
3240 def _get_kdu_resource_data(
3241 self,
3242 ee_relation: EERelation,
3243 db_nsr: Dict[str, Any],
3244 cached_vnfds: Dict[str, Any],
3245 ) -> DeployedK8sResource:
3246 nsd = get_nsd(db_nsr)
3247 vnf_profiles = get_vnf_profiles(nsd)
3248 vnfd_id = find_in_list(
3249 vnf_profiles,
3250 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3251 )["vnfd-id"]
3252 project = nsd["_admin"]["projects_read"][0]
3253 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3254 kdu_resource_profile = get_kdu_resource_profile(
3255 db_vnfd, ee_relation.kdu_resource_profile_id
3256 )
3257 kdu_name = kdu_resource_profile["kdu-name"]
3258 deployed_kdu, _ = get_deployed_kdu(
3259 db_nsr.get("_admin", ()).get("deployed", ()),
3260 kdu_name,
3261 ee_relation.vnf_profile_id,
3262 )
3263 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3264 return deployed_kdu
3265
3266 def _get_deployed_component(
3267 self,
3268 ee_relation: EERelation,
3269 db_nsr: Dict[str, Any],
3270 cached_vnfds: Dict[str, Any],
3271 ) -> DeployedComponent:
3272 nsr_id = db_nsr["_id"]
3273 deployed_component = None
3274 ee_level = EELevel.get_level(ee_relation)
3275 if ee_level == EELevel.NS:
3276 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3277 if vca:
3278 deployed_component = DeployedVCA(nsr_id, vca)
3279 elif ee_level == EELevel.VNF:
3280 vca = get_deployed_vca(
3281 db_nsr,
3282 {
3283 "vdu_id": None,
3284 "member-vnf-index": ee_relation.vnf_profile_id,
3285 "ee_descriptor_id": ee_relation.execution_environment_ref,
3286 },
3287 )
3288 if vca:
3289 deployed_component = DeployedVCA(nsr_id, vca)
3290 elif ee_level == EELevel.VDU:
3291 vca = get_deployed_vca(
3292 db_nsr,
3293 {
3294 "vdu_id": ee_relation.vdu_profile_id,
3295 "member-vnf-index": ee_relation.vnf_profile_id,
3296 "ee_descriptor_id": ee_relation.execution_environment_ref,
3297 },
3298 )
3299 if vca:
3300 deployed_component = DeployedVCA(nsr_id, vca)
3301 elif ee_level == EELevel.KDU:
3302 kdu_resource_data = self._get_kdu_resource_data(
3303 ee_relation, db_nsr, cached_vnfds
3304 )
3305 if kdu_resource_data:
3306 deployed_component = DeployedK8sResource(kdu_resource_data)
3307 return deployed_component
3308
3309 async def _add_relation(
3310 self,
3311 relation: Relation,
3312 vca_type: str,
3313 db_nsr: Dict[str, Any],
3314 cached_vnfds: Dict[str, Any],
3315 cached_vnfrs: Dict[str, Any],
3316 ) -> bool:
3317 deployed_provider = self._get_deployed_component(
3318 relation.provider, db_nsr, cached_vnfds
3319 )
3320 deployed_requirer = self._get_deployed_component(
3321 relation.requirer, db_nsr, cached_vnfds
3322 )
3323 if (
3324 deployed_provider
3325 and deployed_requirer
3326 and deployed_provider.config_sw_installed
3327 and deployed_requirer.config_sw_installed
3328 ):
3329 provider_db_vnfr = (
3330 self._get_vnfr(
3331 relation.provider.nsr_id,
3332 relation.provider.vnf_profile_id,
3333 cached_vnfrs,
3334 )
3335 if relation.provider.vnf_profile_id
3336 else None
3337 )
3338 requirer_db_vnfr = (
3339 self._get_vnfr(
3340 relation.requirer.nsr_id,
3341 relation.requirer.vnf_profile_id,
3342 cached_vnfrs,
3343 )
3344 if relation.requirer.vnf_profile_id
3345 else None
3346 )
3347 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3348 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3349 provider_relation_endpoint = RelationEndpoint(
3350 deployed_provider.ee_id,
3351 provider_vca_id,
3352 relation.provider.endpoint,
3353 )
3354 requirer_relation_endpoint = RelationEndpoint(
3355 deployed_requirer.ee_id,
3356 requirer_vca_id,
3357 relation.requirer.endpoint,
3358 )
3359 try:
3360 await self.vca_map[vca_type].add_relation(
3361 provider=provider_relation_endpoint,
3362 requirer=requirer_relation_endpoint,
3363 )
3364 except N2VCException as exception:
3365 self.logger.error(exception)
3366 raise LcmException(exception)
3367 return True
3368 return False
3369
3370 async def _add_vca_relations(
3371 self,
3372 logging_text,
3373 nsr_id,
3374 vca_type: str,
3375 vca_index: int,
3376 timeout: int = 3600,
3377 ) -> bool:
3378 # steps:
3379 # 1. find all relations for this VCA
3380 # 2. wait for other peers related
3381 # 3. add relations
3382
3383 try:
3384 # STEP 1: find all relations for this VCA
3385
3386 # read nsr record
3387 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3388 nsd = get_nsd(db_nsr)
3389
3390 # this VCA data
3391 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3392 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3393
3394 cached_vnfds = {}
3395 cached_vnfrs = {}
3396 relations = []
3397 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3398 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3399
3400 # if no relations, terminate
3401 if not relations:
3402 self.logger.debug(logging_text + " No relations")
3403 return True
3404
3405 self.logger.debug(logging_text + " adding relations {}".format(relations))
3406
3407 # add all relations
3408 start = time()
3409 while True:
3410 # check timeout
3411 now = time()
3412 if now - start >= timeout:
3413 self.logger.error(logging_text + " : timeout adding relations")
3414 return False
3415
3416 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3417 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3418
3419 # for each relation, find the VCA's related
3420 for relation in relations.copy():
3421 added = await self._add_relation(
3422 relation,
3423 vca_type,
3424 db_nsr,
3425 cached_vnfds,
3426 cached_vnfrs,
3427 )
3428 if added:
3429 relations.remove(relation)
3430
3431 if not relations:
3432 self.logger.debug("Relations added")
3433 break
3434 await asyncio.sleep(5.0)
3435
3436 return True
3437
3438 except Exception as e:
3439 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3440 return False
3441
3442 async def _install_kdu(
3443 self,
3444 nsr_id: str,
3445 nsr_db_path: str,
3446 vnfr_data: dict,
3447 kdu_index: int,
3448 kdud: dict,
3449 vnfd: dict,
3450 k8s_instance_info: dict,
3451 k8params: dict = None,
3452 timeout: int = 600,
3453 vca_id: str = None,
3454 ):
3455 try:
3456 k8sclustertype = k8s_instance_info["k8scluster-type"]
3457 # Instantiate kdu
3458 db_dict_install = {
3459 "collection": "nsrs",
3460 "filter": {"_id": nsr_id},
3461 "path": nsr_db_path,
3462 }
3463
3464 if k8s_instance_info.get("kdu-deployment-name"):
3465 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3466 else:
3467 kdu_instance = self.k8scluster_map[
3468 k8sclustertype
3469 ].generate_kdu_instance_name(
3470 db_dict=db_dict_install,
3471 kdu_model=k8s_instance_info["kdu-model"],
3472 kdu_name=k8s_instance_info["kdu-name"],
3473 )
3474
3475 # Update the nsrs table with the kdu-instance value
3476 self.update_db_2(
3477 item="nsrs",
3478 _id=nsr_id,
3479 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3480 )
3481
3482 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3483 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3484 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3485 # namespace, this first verification could be removed, and the next step would be done for any kind
3486 # of KNF.
3487 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3488 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3489 if k8sclustertype in ("juju", "juju-bundle"):
3490 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3491 # that the user passed a namespace which he wants its KDU to be deployed in)
3492 if (
3493 self.db.count(
3494 table="nsrs",
3495 q_filter={
3496 "_id": nsr_id,
3497 "_admin.projects_write": k8s_instance_info["namespace"],
3498 "_admin.projects_read": k8s_instance_info["namespace"],
3499 },
3500 )
3501 > 0
3502 ):
3503 self.logger.debug(
3504 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3505 )
3506 self.update_db_2(
3507 item="nsrs",
3508 _id=nsr_id,
3509 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3510 )
3511 k8s_instance_info["namespace"] = kdu_instance
3512
3513 await self.k8scluster_map[k8sclustertype].install(
3514 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3515 kdu_model=k8s_instance_info["kdu-model"],
3516 atomic=True,
3517 params=k8params,
3518 db_dict=db_dict_install,
3519 timeout=timeout,
3520 kdu_name=k8s_instance_info["kdu-name"],
3521 namespace=k8s_instance_info["namespace"],
3522 kdu_instance=kdu_instance,
3523 vca_id=vca_id,
3524 )
3525
3526 # Obtain services to obtain management service ip
3527 services = await self.k8scluster_map[k8sclustertype].get_services(
3528 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3529 kdu_instance=kdu_instance,
3530 namespace=k8s_instance_info["namespace"],
3531 )
3532
3533 # Obtain management service info (if exists)
3534 vnfr_update_dict = {}
3535 kdu_config = get_configuration(vnfd, kdud["name"])
3536 if kdu_config:
3537 target_ee_list = kdu_config.get("execution-environment-list", [])
3538 else:
3539 target_ee_list = []
3540
3541 if services:
3542 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3543 mgmt_services = [
3544 service
3545 for service in kdud.get("service", [])
3546 if service.get("mgmt-service")
3547 ]
3548 for mgmt_service in mgmt_services:
3549 for service in services:
3550 if service["name"].startswith(mgmt_service["name"]):
3551 # Mgmt service found, Obtain service ip
3552 ip = service.get("external_ip", service.get("cluster_ip"))
3553 if isinstance(ip, list) and len(ip) == 1:
3554 ip = ip[0]
3555
3556 vnfr_update_dict[
3557 "kdur.{}.ip-address".format(kdu_index)
3558 ] = ip
3559
3560 # Check if must update also mgmt ip at the vnf
3561 service_external_cp = mgmt_service.get(
3562 "external-connection-point-ref"
3563 )
3564 if service_external_cp:
3565 if (
3566 deep_get(vnfd, ("mgmt-interface", "cp"))
3567 == service_external_cp
3568 ):
3569 vnfr_update_dict["ip-address"] = ip
3570
3571 if find_in_list(
3572 target_ee_list,
3573 lambda ee: ee.get(
3574 "external-connection-point-ref", ""
3575 )
3576 == service_external_cp,
3577 ):
3578 vnfr_update_dict[
3579 "kdur.{}.ip-address".format(kdu_index)
3580 ] = ip
3581 break
3582 else:
3583 self.logger.warn(
3584 "Mgmt service name: {} not found".format(
3585 mgmt_service["name"]
3586 )
3587 )
3588
3589 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3590 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3591
3592 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3593 if (
3594 kdu_config
3595 and kdu_config.get("initial-config-primitive")
3596 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3597 ):
3598 initial_config_primitive_list = kdu_config.get(
3599 "initial-config-primitive"
3600 )
3601 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3602
3603 for initial_config_primitive in initial_config_primitive_list:
3604 primitive_params_ = self._map_primitive_params(
3605 initial_config_primitive, {}, {}
3606 )
3607
3608 await asyncio.wait_for(
3609 self.k8scluster_map[k8sclustertype].exec_primitive(
3610 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3611 kdu_instance=kdu_instance,
3612 primitive_name=initial_config_primitive["name"],
3613 params=primitive_params_,
3614 db_dict=db_dict_install,
3615 vca_id=vca_id,
3616 ),
3617 timeout=timeout,
3618 )
3619
3620 except Exception as e:
3621 # Prepare update db with error and raise exception
3622 try:
3623 self.update_db_2(
3624 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3625 )
3626 self.update_db_2(
3627 "vnfrs",
3628 vnfr_data.get("_id"),
3629 {"kdur.{}.status".format(kdu_index): "ERROR"},
3630 )
3631 except Exception:
3632 # ignore to keep original exception
3633 pass
3634 # reraise original error
3635 raise
3636
3637 return kdu_instance
3638
3639 async def deploy_kdus(
3640 self,
3641 logging_text,
3642 nsr_id,
3643 nslcmop_id,
3644 db_vnfrs,
3645 db_vnfds,
3646 task_instantiation_info,
3647 ):
3648 # Launch kdus if present in the descriptor
3649
3650 k8scluster_id_2_uuic = {
3651 "helm-chart-v3": {},
3652 "helm-chart": {},
3653 "juju-bundle": {},
3654 }
3655
3656 async def _get_cluster_id(cluster_id, cluster_type):
3657 nonlocal k8scluster_id_2_uuic
3658 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3659 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3660
3661 # check if K8scluster is creating and wait look if previous tasks in process
3662 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3663 "k8scluster", cluster_id
3664 )
3665 if task_dependency:
3666 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3667 task_name, cluster_id
3668 )
3669 self.logger.debug(logging_text + text)
3670 await asyncio.wait(task_dependency, timeout=3600)
3671
3672 db_k8scluster = self.db.get_one(
3673 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3674 )
3675 if not db_k8scluster:
3676 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3677
3678 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3679 if not k8s_id:
3680 if cluster_type == "helm-chart-v3":
3681 try:
3682 # backward compatibility for existing clusters that have not been initialized for helm v3
3683 k8s_credentials = yaml.safe_dump(
3684 db_k8scluster.get("credentials")
3685 )
3686 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3687 k8s_credentials, reuse_cluster_uuid=cluster_id
3688 )
3689 db_k8scluster_update = {}
3690 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3691 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3692 db_k8scluster_update[
3693 "_admin.helm-chart-v3.created"
3694 ] = uninstall_sw
3695 db_k8scluster_update[
3696 "_admin.helm-chart-v3.operationalState"
3697 ] = "ENABLED"
3698 self.update_db_2(
3699 "k8sclusters", cluster_id, db_k8scluster_update
3700 )
3701 except Exception as e:
3702 self.logger.error(
3703 logging_text
3704 + "error initializing helm-v3 cluster: {}".format(str(e))
3705 )
3706 raise LcmException(
3707 "K8s cluster '{}' has not been initialized for '{}'".format(
3708 cluster_id, cluster_type
3709 )
3710 )
3711 else:
3712 raise LcmException(
3713 "K8s cluster '{}' has not been initialized for '{}'".format(
3714 cluster_id, cluster_type
3715 )
3716 )
3717 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3718 return k8s_id
3719
3720 logging_text += "Deploy kdus: "
3721 step = ""
3722 try:
3723 db_nsr_update = {"_admin.deployed.K8s": []}
3724 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3725
3726 index = 0
3727 updated_cluster_list = []
3728 updated_v3_cluster_list = []
3729
3730 for vnfr_data in db_vnfrs.values():
3731 vca_id = self.get_vca_id(vnfr_data, {})
3732 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3733 # Step 0: Prepare and set parameters
3734 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3735 vnfd_id = vnfr_data.get("vnfd-id")
3736 vnfd_with_id = find_in_list(
3737 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3738 )
3739 kdud = next(
3740 kdud
3741 for kdud in vnfd_with_id["kdu"]
3742 if kdud["name"] == kdur["kdu-name"]
3743 )
3744 namespace = kdur.get("k8s-namespace")
3745 kdu_deployment_name = kdur.get("kdu-deployment-name")
3746 if kdur.get("helm-chart"):
3747 kdumodel = kdur["helm-chart"]
3748 # Default version: helm3, if helm-version is v2 assign v2
3749 k8sclustertype = "helm-chart-v3"
3750 self.logger.debug("kdur: {}".format(kdur))
3751 if (
3752 kdur.get("helm-version")
3753 and kdur.get("helm-version") == "v2"
3754 ):
3755 k8sclustertype = "helm-chart"
3756 elif kdur.get("juju-bundle"):
3757 kdumodel = kdur["juju-bundle"]
3758 k8sclustertype = "juju-bundle"
3759 else:
3760 raise LcmException(
3761 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3762 "juju-bundle. Maybe an old NBI version is running".format(
3763 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3764 )
3765 )
3766 # check if kdumodel is a file and exists
3767 try:
3768 vnfd_with_id = find_in_list(
3769 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3770 )
3771 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3772 if storage: # may be not present if vnfd has not artifacts
3773 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3774 if storage["pkg-dir"]:
3775 filename = "{}/{}/{}s/{}".format(
3776 storage["folder"],
3777 storage["pkg-dir"],
3778 k8sclustertype,
3779 kdumodel,
3780 )
3781 else:
3782 filename = "{}/Scripts/{}s/{}".format(
3783 storage["folder"],
3784 k8sclustertype,
3785 kdumodel,
3786 )
3787 if self.fs.file_exists(
3788 filename, mode="file"
3789 ) or self.fs.file_exists(filename, mode="dir"):
3790 kdumodel = self.fs.path + filename
3791 except (asyncio.TimeoutError, asyncio.CancelledError):
3792 raise
3793 except Exception: # it is not a file
3794 pass
3795
3796 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3797 step = "Synchronize repos for k8s cluster '{}'".format(
3798 k8s_cluster_id
3799 )
3800 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3801
3802 # Synchronize repos
3803 if (
3804 k8sclustertype == "helm-chart"
3805 and cluster_uuid not in updated_cluster_list
3806 ) or (
3807 k8sclustertype == "helm-chart-v3"
3808 and cluster_uuid not in updated_v3_cluster_list
3809 ):
3810 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3811 self.k8scluster_map[k8sclustertype].synchronize_repos(
3812 cluster_uuid=cluster_uuid
3813 )
3814 )
3815 if del_repo_list or added_repo_dict:
3816 if k8sclustertype == "helm-chart":
3817 unset = {
3818 "_admin.helm_charts_added." + item: None
3819 for item in del_repo_list
3820 }
3821 updated = {
3822 "_admin.helm_charts_added." + item: name
3823 for item, name in added_repo_dict.items()
3824 }
3825 updated_cluster_list.append(cluster_uuid)
3826 elif k8sclustertype == "helm-chart-v3":
3827 unset = {
3828 "_admin.helm_charts_v3_added." + item: None
3829 for item in del_repo_list
3830 }
3831 updated = {
3832 "_admin.helm_charts_v3_added." + item: name
3833 for item, name in added_repo_dict.items()
3834 }
3835 updated_v3_cluster_list.append(cluster_uuid)
3836 self.logger.debug(
3837 logging_text + "repos synchronized on k8s cluster "
3838 "'{}' to_delete: {}, to_add: {}".format(
3839 k8s_cluster_id, del_repo_list, added_repo_dict
3840 )
3841 )
3842 self.db.set_one(
3843 "k8sclusters",
3844 {"_id": k8s_cluster_id},
3845 updated,
3846 unset=unset,
3847 )
3848
3849 # Instantiate kdu
3850 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3851 vnfr_data["member-vnf-index-ref"],
3852 kdur["kdu-name"],
3853 k8s_cluster_id,
3854 )
3855 k8s_instance_info = {
3856 "kdu-instance": None,
3857 "k8scluster-uuid": cluster_uuid,
3858 "k8scluster-type": k8sclustertype,
3859 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3860 "kdu-name": kdur["kdu-name"],
3861 "kdu-model": kdumodel,
3862 "namespace": namespace,
3863 "kdu-deployment-name": kdu_deployment_name,
3864 }
3865 db_path = "_admin.deployed.K8s.{}".format(index)
3866 db_nsr_update[db_path] = k8s_instance_info
3867 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3868 vnfd_with_id = find_in_list(
3869 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3870 )
3871 task = asyncio.ensure_future(
3872 self._install_kdu(
3873 nsr_id,
3874 db_path,
3875 vnfr_data,
3876 kdu_index,
3877 kdud,
3878 vnfd_with_id,
3879 k8s_instance_info,
3880 k8params=desc_params,
3881 timeout=1800,
3882 vca_id=vca_id,
3883 )
3884 )
3885 self.lcm_tasks.register(
3886 "ns",
3887 nsr_id,
3888 nslcmop_id,
3889 "instantiate_KDU-{}".format(index),
3890 task,
3891 )
3892 task_instantiation_info[task] = "Deploying KDU {}".format(
3893 kdur["kdu-name"]
3894 )
3895
3896 index += 1
3897
3898 except (LcmException, asyncio.CancelledError):
3899 raise
3900 except Exception as e:
3901 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3902 if isinstance(e, (N2VCException, DbException)):
3903 self.logger.error(logging_text + msg)
3904 else:
3905 self.logger.critical(logging_text + msg, exc_info=True)
3906 raise LcmException(msg)
3907 finally:
3908 if db_nsr_update:
3909 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3910
3911 def _deploy_n2vc(
3912 self,
3913 logging_text,
3914 db_nsr,
3915 db_vnfr,
3916 nslcmop_id,
3917 nsr_id,
3918 nsi_id,
3919 vnfd_id,
3920 vdu_id,
3921 kdu_name,
3922 member_vnf_index,
3923 vdu_index,
3924 kdu_index,
3925 vdu_name,
3926 deploy_params,
3927 descriptor_config,
3928 base_folder,
3929 task_instantiation_info,
3930 stage,
3931 ):
3932 # launch instantiate_N2VC in a asyncio task and register task object
3933 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3934 # if not found, create one entry and update database
3935 # fill db_nsr._admin.deployed.VCA.<index>
3936
3937 self.logger.debug(
3938 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3939 )
3940
3941 charm_name = ""
3942 get_charm_name = False
3943 if "execution-environment-list" in descriptor_config:
3944 ee_list = descriptor_config.get("execution-environment-list", [])
3945 elif "juju" in descriptor_config:
3946 ee_list = [descriptor_config] # ns charms
3947 if "execution-environment-list" not in descriptor_config:
3948 # charm name is only required for ns charms
3949 get_charm_name = True
3950 else: # other types as script are not supported
3951 ee_list = []
3952
3953 for ee_item in ee_list:
3954 self.logger.debug(
3955 logging_text
3956 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3957 ee_item.get("juju"), ee_item.get("helm-chart")
3958 )
3959 )
3960 ee_descriptor_id = ee_item.get("id")
3961 if ee_item.get("juju"):
3962 vca_name = ee_item["juju"].get("charm")
3963 if get_charm_name:
3964 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3965 vca_type = (
3966 "lxc_proxy_charm"
3967 if ee_item["juju"].get("charm") is not None
3968 else "native_charm"
3969 )
3970 if ee_item["juju"].get("cloud") == "k8s":
3971 vca_type = "k8s_proxy_charm"
3972 elif ee_item["juju"].get("proxy") is False:
3973 vca_type = "native_charm"
3974 elif ee_item.get("helm-chart"):
3975 vca_name = ee_item["helm-chart"]
3976 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3977 vca_type = "helm"
3978 else:
3979 vca_type = "helm-v3"
3980 else:
3981 self.logger.debug(
3982 logging_text + "skipping non juju neither charm configuration"
3983 )
3984 continue
3985
3986 vca_index = -1
3987 for vca_index, vca_deployed in enumerate(
3988 db_nsr["_admin"]["deployed"]["VCA"]
3989 ):
3990 if not vca_deployed:
3991 continue
3992 if (
3993 vca_deployed.get("member-vnf-index") == member_vnf_index
3994 and vca_deployed.get("vdu_id") == vdu_id
3995 and vca_deployed.get("kdu_name") == kdu_name
3996 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3997 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3998 ):
3999 break
4000 else:
4001 # not found, create one.
4002 target = (
4003 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4004 )
4005 if vdu_id:
4006 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4007 elif kdu_name:
4008 target += "/kdu/{}".format(kdu_name)
4009 vca_deployed = {
4010 "target_element": target,
4011 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4012 "member-vnf-index": member_vnf_index,
4013 "vdu_id": vdu_id,
4014 "kdu_name": kdu_name,
4015 "vdu_count_index": vdu_index,
4016 "operational-status": "init", # TODO revise
4017 "detailed-status": "", # TODO revise
4018 "step": "initial-deploy", # TODO revise
4019 "vnfd_id": vnfd_id,
4020 "vdu_name": vdu_name,
4021 "type": vca_type,
4022 "ee_descriptor_id": ee_descriptor_id,
4023 "charm_name": charm_name,
4024 }
4025 vca_index += 1
4026
4027 # create VCA and configurationStatus in db
4028 db_dict = {
4029 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4030 "configurationStatus.{}".format(vca_index): dict(),
4031 }
4032 self.update_db_2("nsrs", nsr_id, db_dict)
4033
4034 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4035
4036 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4037 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4038 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4039
4040 # Launch task
4041 task_n2vc = asyncio.ensure_future(
4042 self.instantiate_N2VC(
4043 logging_text=logging_text,
4044 vca_index=vca_index,
4045 nsi_id=nsi_id,
4046 db_nsr=db_nsr,
4047 db_vnfr=db_vnfr,
4048 vdu_id=vdu_id,
4049 kdu_name=kdu_name,
4050 vdu_index=vdu_index,
4051 kdu_index=kdu_index,
4052 deploy_params=deploy_params,
4053 config_descriptor=descriptor_config,
4054 base_folder=base_folder,
4055 nslcmop_id=nslcmop_id,
4056 stage=stage,
4057 vca_type=vca_type,
4058 vca_name=vca_name,
4059 ee_config_descriptor=ee_item,
4060 )
4061 )
4062 self.lcm_tasks.register(
4063 "ns",
4064 nsr_id,
4065 nslcmop_id,
4066 "instantiate_N2VC-{}".format(vca_index),
4067 task_n2vc,
4068 )
4069 task_instantiation_info[
4070 task_n2vc
4071 ] = self.task_name_deploy_vca + " {}.{}".format(
4072 member_vnf_index or "", vdu_id or ""
4073 )
4074
4075 def _format_additional_params(self, params):
4076 params = params or {}
4077 for key, value in params.items():
4078 if str(value).startswith("!!yaml "):
4079 params[key] = yaml.safe_load(value[7:])
4080 return params
4081
4082 def _get_terminate_primitive_params(self, seq, vnf_index):
4083 primitive = seq.get("name")
4084 primitive_params = {}
4085 params = {
4086 "member_vnf_index": vnf_index,
4087 "primitive": primitive,
4088 "primitive_params": primitive_params,
4089 }
4090 desc_params = {}
4091 return self._map_primitive_params(seq, params, desc_params)
4092
4093 # sub-operations
4094
4095 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4096 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4097 if op.get("operationState") == "COMPLETED":
4098 # b. Skip sub-operation
4099 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4100 return self.SUBOPERATION_STATUS_SKIP
4101 else:
4102 # c. retry executing sub-operation
4103 # The sub-operation exists, and operationState != 'COMPLETED'
4104 # Update operationState = 'PROCESSING' to indicate a retry.
4105 operationState = "PROCESSING"
4106 detailed_status = "In progress"
4107 self._update_suboperation_status(
4108 db_nslcmop, op_index, operationState, detailed_status
4109 )
4110 # Return the sub-operation index
4111 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4112 # with arguments extracted from the sub-operation
4113 return op_index
4114
4115 # Find a sub-operation where all keys in a matching dictionary must match
4116 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4117 def _find_suboperation(self, db_nslcmop, match):
4118 if db_nslcmop and match:
4119 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4120 for i, op in enumerate(op_list):
4121 if all(op.get(k) == match[k] for k in match):
4122 return i
4123 return self.SUBOPERATION_STATUS_NOT_FOUND
4124
4125 # Update status for a sub-operation given its index
4126 def _update_suboperation_status(
4127 self, db_nslcmop, op_index, operationState, detailed_status
4128 ):
4129 # Update DB for HA tasks
4130 q_filter = {"_id": db_nslcmop["_id"]}
4131 update_dict = {
4132 "_admin.operations.{}.operationState".format(op_index): operationState,
4133 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4134 }
4135 self.db.set_one(
4136 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4137 )
4138
4139 # Add sub-operation, return the index of the added sub-operation
4140 # Optionally, set operationState, detailed-status, and operationType
4141 # Status and type are currently set for 'scale' sub-operations:
4142 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4143 # 'detailed-status' : status message
4144 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4145 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4146 def _add_suboperation(
4147 self,
4148 db_nslcmop,
4149 vnf_index,
4150 vdu_id,
4151 vdu_count_index,
4152 vdu_name,
4153 primitive,
4154 mapped_primitive_params,
4155 operationState=None,
4156 detailed_status=None,
4157 operationType=None,
4158 RO_nsr_id=None,
4159 RO_scaling_info=None,
4160 ):
4161 if not db_nslcmop:
4162 return self.SUBOPERATION_STATUS_NOT_FOUND
4163 # Get the "_admin.operations" list, if it exists
4164 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4165 op_list = db_nslcmop_admin.get("operations")
4166 # Create or append to the "_admin.operations" list
4167 new_op = {
4168 "member_vnf_index": vnf_index,
4169 "vdu_id": vdu_id,
4170 "vdu_count_index": vdu_count_index,
4171 "primitive": primitive,
4172 "primitive_params": mapped_primitive_params,
4173 }
4174 if operationState:
4175 new_op["operationState"] = operationState
4176 if detailed_status:
4177 new_op["detailed-status"] = detailed_status
4178 if operationType:
4179 new_op["lcmOperationType"] = operationType
4180 if RO_nsr_id:
4181 new_op["RO_nsr_id"] = RO_nsr_id
4182 if RO_scaling_info:
4183 new_op["RO_scaling_info"] = RO_scaling_info
4184 if not op_list:
4185 # No existing operations, create key 'operations' with current operation as first list element
4186 db_nslcmop_admin.update({"operations": [new_op]})
4187 op_list = db_nslcmop_admin.get("operations")
4188 else:
4189 # Existing operations, append operation to list
4190 op_list.append(new_op)
4191
4192 db_nslcmop_update = {"_admin.operations": op_list}
4193 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4194 op_index = len(op_list) - 1
4195 return op_index
4196
4197 # Helper methods for scale() sub-operations
4198
4199 # pre-scale/post-scale:
4200 # Check for 3 different cases:
4201 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4202 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4203 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4204 def _check_or_add_scale_suboperation(
4205 self,
4206 db_nslcmop,
4207 vnf_index,
4208 vnf_config_primitive,
4209 primitive_params,
4210 operationType,
4211 RO_nsr_id=None,
4212 RO_scaling_info=None,
4213 ):
4214 # Find this sub-operation
4215 if RO_nsr_id and RO_scaling_info:
4216 operationType = "SCALE-RO"
4217 match = {
4218 "member_vnf_index": vnf_index,
4219 "RO_nsr_id": RO_nsr_id,
4220 "RO_scaling_info": RO_scaling_info,
4221 }
4222 else:
4223 match = {
4224 "member_vnf_index": vnf_index,
4225 "primitive": vnf_config_primitive,
4226 "primitive_params": primitive_params,
4227 "lcmOperationType": operationType,
4228 }
4229 op_index = self._find_suboperation(db_nslcmop, match)
4230 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4231 # a. New sub-operation
4232 # The sub-operation does not exist, add it.
4233 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4234 # The following parameters are set to None for all kind of scaling:
4235 vdu_id = None
4236 vdu_count_index = None
4237 vdu_name = None
4238 if RO_nsr_id and RO_scaling_info:
4239 vnf_config_primitive = None
4240 primitive_params = None
4241 else:
4242 RO_nsr_id = None
4243 RO_scaling_info = None
4244 # Initial status for sub-operation
4245 operationState = "PROCESSING"
4246 detailed_status = "In progress"
4247 # Add sub-operation for pre/post-scaling (zero or more operations)
4248 self._add_suboperation(
4249 db_nslcmop,
4250 vnf_index,
4251 vdu_id,
4252 vdu_count_index,
4253 vdu_name,
4254 vnf_config_primitive,
4255 primitive_params,
4256 operationState,
4257 detailed_status,
4258 operationType,
4259 RO_nsr_id,
4260 RO_scaling_info,
4261 )
4262 return self.SUBOPERATION_STATUS_NEW
4263 else:
4264 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4265 # or op_index (operationState != 'COMPLETED')
4266 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4267
4268 # Function to return execution_environment id
4269
4270 async def destroy_N2VC(
4271 self,
4272 logging_text,
4273 db_nslcmop,
4274 vca_deployed,
4275 config_descriptor,
4276 vca_index,
4277 destroy_ee=True,
4278 exec_primitives=True,
4279 scaling_in=False,
4280 vca_id: str = None,
4281 ):
4282 """
4283 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4284 :param logging_text:
4285 :param db_nslcmop:
4286 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4287 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4288 :param vca_index: index in the database _admin.deployed.VCA
4289 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4290 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4291 not executed properly
4292 :param scaling_in: True destroys the application, False destroys the model
4293 :return: None or exception
4294 """
4295
4296 self.logger.debug(
4297 logging_text
4298 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4299 vca_index, vca_deployed, config_descriptor, destroy_ee
4300 )
4301 )
4302
4303 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4304
4305 # execute terminate_primitives
4306 if exec_primitives:
4307 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4308 config_descriptor.get("terminate-config-primitive"),
4309 vca_deployed.get("ee_descriptor_id"),
4310 )
4311 vdu_id = vca_deployed.get("vdu_id")
4312 vdu_count_index = vca_deployed.get("vdu_count_index")
4313 vdu_name = vca_deployed.get("vdu_name")
4314 vnf_index = vca_deployed.get("member-vnf-index")
4315 if terminate_primitives and vca_deployed.get("needed_terminate"):
4316 for seq in terminate_primitives:
4317 # For each sequence in list, get primitive and call _ns_execute_primitive()
4318 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4319 vnf_index, seq.get("name")
4320 )
4321 self.logger.debug(logging_text + step)
4322 # Create the primitive for each sequence, i.e. "primitive": "touch"
4323 primitive = seq.get("name")
4324 mapped_primitive_params = self._get_terminate_primitive_params(
4325 seq, vnf_index
4326 )
4327
4328 # Add sub-operation
4329 self._add_suboperation(
4330 db_nslcmop,
4331 vnf_index,
4332 vdu_id,
4333 vdu_count_index,
4334 vdu_name,
4335 primitive,
4336 mapped_primitive_params,
4337 )
4338 # Sub-operations: Call _ns_execute_primitive() instead of action()
4339 try:
4340 result, result_detail = await self._ns_execute_primitive(
4341 vca_deployed["ee_id"],
4342 primitive,
4343 mapped_primitive_params,
4344 vca_type=vca_type,
4345 vca_id=vca_id,
4346 )
4347 except LcmException:
4348 # this happens when VCA is not deployed. In this case it is not needed to terminate
4349 continue
4350 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4351 if result not in result_ok:
4352 raise LcmException(
4353 "terminate_primitive {} for vnf_member_index={} fails with "
4354 "error {}".format(seq.get("name"), vnf_index, result_detail)
4355 )
4356 # set that this VCA do not need terminated
4357 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4358 vca_index
4359 )
4360 self.update_db_2(
4361 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4362 )
4363
4364 # Delete Prometheus Jobs if any
4365 # This uses NSR_ID, so it will destroy any jobs under this index
4366 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4367
4368 if destroy_ee:
4369 await self.vca_map[vca_type].delete_execution_environment(
4370 vca_deployed["ee_id"],
4371 scaling_in=scaling_in,
4372 vca_type=vca_type,
4373 vca_id=vca_id,
4374 )
4375
4376 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4377 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4378 namespace = "." + db_nsr["_id"]
4379 try:
4380 await self.n2vc.delete_namespace(
4381 namespace=namespace,
4382 total_timeout=self.timeout.charm_delete,
4383 vca_id=vca_id,
4384 )
4385 except N2VCNotFound: # already deleted. Skip
4386 pass
4387 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4388
4389 async def terminate(self, nsr_id, nslcmop_id):
4390 # Try to lock HA task here
4391 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4392 if not task_is_locked_by_me:
4393 return
4394
4395 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4396 self.logger.debug(logging_text + "Enter")
4397 timeout_ns_terminate = self.timeout.ns_terminate
4398 db_nsr = None
4399 db_nslcmop = None
4400 operation_params = None
4401 exc = None
4402 error_list = [] # annotates all failed error messages
4403 db_nslcmop_update = {}
4404 autoremove = False # autoremove after terminated
4405 tasks_dict_info = {}
4406 db_nsr_update = {}
4407 stage = [
4408 "Stage 1/3: Preparing task.",
4409 "Waiting for previous operations to terminate.",
4410 "",
4411 ]
4412 # ^ contains [stage, step, VIM-status]
4413 try:
4414 # wait for any previous tasks in process
4415 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4416
4417 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4418 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4419 operation_params = db_nslcmop.get("operationParams") or {}
4420 if operation_params.get("timeout_ns_terminate"):
4421 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4422 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4423 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4424
4425 db_nsr_update["operational-status"] = "terminating"
4426 db_nsr_update["config-status"] = "terminating"
4427 self._write_ns_status(
4428 nsr_id=nsr_id,
4429 ns_state="TERMINATING",
4430 current_operation="TERMINATING",
4431 current_operation_id=nslcmop_id,
4432 other_update=db_nsr_update,
4433 )
4434 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4435 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4436 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4437 return
4438
4439 stage[1] = "Getting vnf descriptors from db."
4440 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4441 db_vnfrs_dict = {
4442 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4443 }
4444 db_vnfds_from_id = {}
4445 db_vnfds_from_member_index = {}
4446 # Loop over VNFRs
4447 for vnfr in db_vnfrs_list:
4448 vnfd_id = vnfr["vnfd-id"]
4449 if vnfd_id not in db_vnfds_from_id:
4450 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4451 db_vnfds_from_id[vnfd_id] = vnfd
4452 db_vnfds_from_member_index[
4453 vnfr["member-vnf-index-ref"]
4454 ] = db_vnfds_from_id[vnfd_id]
4455
4456 # Destroy individual execution environments when there are terminating primitives.
4457 # Rest of EE will be deleted at once
4458 # TODO - check before calling _destroy_N2VC
4459 # if not operation_params.get("skip_terminate_primitives"):#
4460 # or not vca.get("needed_terminate"):
4461 stage[0] = "Stage 2/3 execute terminating primitives."
4462 self.logger.debug(logging_text + stage[0])
4463 stage[1] = "Looking execution environment that needs terminate."
4464 self.logger.debug(logging_text + stage[1])
4465
4466 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4467 config_descriptor = None
4468 vca_member_vnf_index = vca.get("member-vnf-index")
4469 vca_id = self.get_vca_id(
4470 db_vnfrs_dict.get(vca_member_vnf_index)
4471 if vca_member_vnf_index
4472 else None,
4473 db_nsr,
4474 )
4475 if not vca or not vca.get("ee_id"):
4476 continue
4477 if not vca.get("member-vnf-index"):
4478 # ns
4479 config_descriptor = db_nsr.get("ns-configuration")
4480 elif vca.get("vdu_id"):
4481 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4482 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4483 elif vca.get("kdu_name"):
4484 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4485 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4486 else:
4487 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4488 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4489 vca_type = vca.get("type")
4490 exec_terminate_primitives = not operation_params.get(
4491 "skip_terminate_primitives"
4492 ) and vca.get("needed_terminate")
4493 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4494 # pending native charms
4495 destroy_ee = (
4496 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4497 )
4498 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4499 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4500 task = asyncio.ensure_future(
4501 self.destroy_N2VC(
4502 logging_text,
4503 db_nslcmop,
4504 vca,
4505 config_descriptor,
4506 vca_index,
4507 destroy_ee,
4508 exec_terminate_primitives,
4509 vca_id=vca_id,
4510 )
4511 )
4512 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4513
4514 # wait for pending tasks of terminate primitives
4515 if tasks_dict_info:
4516 self.logger.debug(
4517 logging_text
4518 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4519 )
4520 error_list = await self._wait_for_tasks(
4521 logging_text,
4522 tasks_dict_info,
4523 min(self.timeout.charm_delete, timeout_ns_terminate),
4524 stage,
4525 nslcmop_id,
4526 )
4527 tasks_dict_info.clear()
4528 if error_list:
4529 return # raise LcmException("; ".join(error_list))
4530
4531 # remove All execution environments at once
4532 stage[0] = "Stage 3/3 delete all."
4533
4534 if nsr_deployed.get("VCA"):
4535 stage[1] = "Deleting all execution environments."
4536 self.logger.debug(logging_text + stage[1])
4537 vca_id = self.get_vca_id({}, db_nsr)
4538 task_delete_ee = asyncio.ensure_future(
4539 asyncio.wait_for(
4540 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4541 timeout=self.timeout.charm_delete,
4542 )
4543 )
4544 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4545 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4546
4547 # Delete Namespace and Certificates if necessary
4548 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4549 await self.vca_map["helm-v3"].delete_tls_certificate(
4550 namespace=db_nslcmop["nsInstanceId"],
4551 certificate_name=self.EE_TLS_NAME,
4552 )
4553 await self.vca_map["helm-v3"].delete_namespace(
4554 namespace=db_nslcmop["nsInstanceId"],
4555 )
4556
4557 # Delete from k8scluster
4558 stage[1] = "Deleting KDUs."
4559 self.logger.debug(logging_text + stage[1])
4560 # print(nsr_deployed)
4561 for kdu in get_iterable(nsr_deployed, "K8s"):
4562 if not kdu or not kdu.get("kdu-instance"):
4563 continue
4564 kdu_instance = kdu.get("kdu-instance")
4565 if kdu.get("k8scluster-type") in self.k8scluster_map:
4566 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4567 vca_id = self.get_vca_id({}, db_nsr)
4568 task_delete_kdu_instance = asyncio.ensure_future(
4569 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4570 cluster_uuid=kdu.get("k8scluster-uuid"),
4571 kdu_instance=kdu_instance,
4572 vca_id=vca_id,
4573 namespace=kdu.get("namespace"),
4574 )
4575 )
4576 else:
4577 self.logger.error(
4578 logging_text
4579 + "Unknown k8s deployment type {}".format(
4580 kdu.get("k8scluster-type")
4581 )
4582 )
4583 continue
4584 tasks_dict_info[
4585 task_delete_kdu_instance
4586 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4587
4588 # remove from RO
4589 stage[1] = "Deleting ns from VIM."
4590 if self.ro_config.ng:
4591 task_delete_ro = asyncio.ensure_future(
4592 self._terminate_ng_ro(
4593 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4594 )
4595 )
4596 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4597
4598 # rest of staff will be done at finally
4599
4600 except (
4601 ROclient.ROClientException,
4602 DbException,
4603 LcmException,
4604 N2VCException,
4605 ) as e:
4606 self.logger.error(logging_text + "Exit Exception {}".format(e))
4607 exc = e
4608 except asyncio.CancelledError:
4609 self.logger.error(
4610 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4611 )
4612 exc = "Operation was cancelled"
4613 except Exception as e:
4614 exc = traceback.format_exc()
4615 self.logger.critical(
4616 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4617 exc_info=True,
4618 )
4619 finally:
4620 if exc:
4621 error_list.append(str(exc))
4622 try:
4623 # wait for pending tasks
4624 if tasks_dict_info:
4625 stage[1] = "Waiting for terminate pending tasks."
4626 self.logger.debug(logging_text + stage[1])
4627 error_list += await self._wait_for_tasks(
4628 logging_text,
4629 tasks_dict_info,
4630 timeout_ns_terminate,
4631 stage,
4632 nslcmop_id,
4633 )
4634 stage[1] = stage[2] = ""
4635 except asyncio.CancelledError:
4636 error_list.append("Cancelled")
4637 # TODO cancell all tasks
4638 except Exception as exc:
4639 error_list.append(str(exc))
4640 # update status at database
4641 if error_list:
4642 error_detail = "; ".join(error_list)
4643 # self.logger.error(logging_text + error_detail)
4644 error_description_nslcmop = "{} Detail: {}".format(
4645 stage[0], error_detail
4646 )
4647 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4648 nslcmop_id, stage[0]
4649 )
4650
4651 db_nsr_update["operational-status"] = "failed"
4652 db_nsr_update["detailed-status"] = (
4653 error_description_nsr + " Detail: " + error_detail
4654 )
4655 db_nslcmop_update["detailed-status"] = error_detail
4656 nslcmop_operation_state = "FAILED"
4657 ns_state = "BROKEN"
4658 else:
4659 error_detail = None
4660 error_description_nsr = error_description_nslcmop = None
4661 ns_state = "NOT_INSTANTIATED"
4662 db_nsr_update["operational-status"] = "terminated"
4663 db_nsr_update["detailed-status"] = "Done"
4664 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4665 db_nslcmop_update["detailed-status"] = "Done"
4666 nslcmop_operation_state = "COMPLETED"
4667
4668 if db_nsr:
4669 self._write_ns_status(
4670 nsr_id=nsr_id,
4671 ns_state=ns_state,
4672 current_operation="IDLE",
4673 current_operation_id=None,
4674 error_description=error_description_nsr,
4675 error_detail=error_detail,
4676 other_update=db_nsr_update,
4677 )
4678 self._write_op_status(
4679 op_id=nslcmop_id,
4680 stage="",
4681 error_message=error_description_nslcmop,
4682 operation_state=nslcmop_operation_state,
4683 other_update=db_nslcmop_update,
4684 )
4685 if ns_state == "NOT_INSTANTIATED":
4686 try:
4687 self.db.set_list(
4688 "vnfrs",
4689 {"nsr-id-ref": nsr_id},
4690 {"_admin.nsState": "NOT_INSTANTIATED"},
4691 )
4692 except DbException as e:
4693 self.logger.warn(
4694 logging_text
4695 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4696 nsr_id, e
4697 )
4698 )
4699 if operation_params:
4700 autoremove = operation_params.get("autoremove", False)
4701 if nslcmop_operation_state:
4702 try:
4703 await self.msg.aiowrite(
4704 "ns",
4705 "terminated",
4706 {
4707 "nsr_id": nsr_id,
4708 "nslcmop_id": nslcmop_id,
4709 "operationState": nslcmop_operation_state,
4710 "autoremove": autoremove,
4711 },
4712 )
4713 except Exception as e:
4714 self.logger.error(
4715 logging_text + "kafka_write notification Exception {}".format(e)
4716 )
4717 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4718 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4719
4720 self.logger.debug(logging_text + "Exit")
4721 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4722
4723 async def _wait_for_tasks(
4724 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4725 ):
4726 time_start = time()
4727 error_detail_list = []
4728 error_list = []
4729 pending_tasks = list(created_tasks_info.keys())
4730 num_tasks = len(pending_tasks)
4731 num_done = 0
4732 stage[1] = "{}/{}.".format(num_done, num_tasks)
4733 self._write_op_status(nslcmop_id, stage)
4734 while pending_tasks:
4735 new_error = None
4736 _timeout = timeout + time_start - time()
4737 done, pending_tasks = await asyncio.wait(
4738 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4739 )
4740 num_done += len(done)
4741 if not done: # Timeout
4742 for task in pending_tasks:
4743 new_error = created_tasks_info[task] + ": Timeout"
4744 error_detail_list.append(new_error)
4745 error_list.append(new_error)
4746 break
4747 for task in done:
4748 if task.cancelled():
4749 exc = "Cancelled"
4750 else:
4751 exc = task.exception()
4752 if exc:
4753 if isinstance(exc, asyncio.TimeoutError):
4754 exc = "Timeout"
4755 new_error = created_tasks_info[task] + ": {}".format(exc)
4756 error_list.append(created_tasks_info[task])
4757 error_detail_list.append(new_error)
4758 if isinstance(
4759 exc,
4760 (
4761 str,
4762 DbException,
4763 N2VCException,
4764 ROclient.ROClientException,
4765 LcmException,
4766 K8sException,
4767 NgRoException,
4768 ),
4769 ):
4770 self.logger.error(logging_text + new_error)
4771 else:
4772 exc_traceback = "".join(
4773 traceback.format_exception(None, exc, exc.__traceback__)
4774 )
4775 self.logger.error(
4776 logging_text
4777 + created_tasks_info[task]
4778 + " "
4779 + exc_traceback
4780 )
4781 else:
4782 self.logger.debug(
4783 logging_text + created_tasks_info[task] + ": Done"
4784 )
4785 stage[1] = "{}/{}.".format(num_done, num_tasks)
4786 if new_error:
4787 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4788 if nsr_id: # update also nsr
4789 self.update_db_2(
4790 "nsrs",
4791 nsr_id,
4792 {
4793 "errorDescription": "Error at: " + ", ".join(error_list),
4794 "errorDetail": ". ".join(error_detail_list),
4795 },
4796 )
4797 self._write_op_status(nslcmop_id, stage)
4798 return error_detail_list
4799
4800 @staticmethod
4801 def _map_primitive_params(primitive_desc, params, instantiation_params):
4802 """
4803 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4804 The default-value is used. If it is between < > it look for a value at instantiation_params
4805 :param primitive_desc: portion of VNFD/NSD that describes primitive
4806 :param params: Params provided by user
4807 :param instantiation_params: Instantiation params provided by user
4808 :return: a dictionary with the calculated params
4809 """
4810 calculated_params = {}
4811 for parameter in primitive_desc.get("parameter", ()):
4812 param_name = parameter["name"]
4813 if param_name in params:
4814 calculated_params[param_name] = params[param_name]
4815 elif "default-value" in parameter or "value" in parameter:
4816 if "value" in parameter:
4817 calculated_params[param_name] = parameter["value"]
4818 else:
4819 calculated_params[param_name] = parameter["default-value"]
4820 if (
4821 isinstance(calculated_params[param_name], str)
4822 and calculated_params[param_name].startswith("<")
4823 and calculated_params[param_name].endswith(">")
4824 ):
4825 if calculated_params[param_name][1:-1] in instantiation_params:
4826 calculated_params[param_name] = instantiation_params[
4827 calculated_params[param_name][1:-1]
4828 ]
4829 else:
4830 raise LcmException(
4831 "Parameter {} needed to execute primitive {} not provided".format(
4832 calculated_params[param_name], primitive_desc["name"]
4833 )
4834 )
4835 else:
4836 raise LcmException(
4837 "Parameter {} needed to execute primitive {} not provided".format(
4838 param_name, primitive_desc["name"]
4839 )
4840 )
4841
4842 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4843 calculated_params[param_name] = yaml.safe_dump(
4844 calculated_params[param_name], default_flow_style=True, width=256
4845 )
4846 elif isinstance(calculated_params[param_name], str) and calculated_params[
4847 param_name
4848 ].startswith("!!yaml "):
4849 calculated_params[param_name] = calculated_params[param_name][7:]
4850 if parameter.get("data-type") == "INTEGER":
4851 try:
4852 calculated_params[param_name] = int(calculated_params[param_name])
4853 except ValueError: # error converting string to int
4854 raise LcmException(
4855 "Parameter {} of primitive {} must be integer".format(
4856 param_name, primitive_desc["name"]
4857 )
4858 )
4859 elif parameter.get("data-type") == "BOOLEAN":
4860 calculated_params[param_name] = not (
4861 (str(calculated_params[param_name])).lower() == "false"
4862 )
4863
4864 # add always ns_config_info if primitive name is config
4865 if primitive_desc["name"] == "config":
4866 if "ns_config_info" in instantiation_params:
4867 calculated_params["ns_config_info"] = instantiation_params[
4868 "ns_config_info"
4869 ]
4870 return calculated_params
4871
4872 def _look_for_deployed_vca(
4873 self,
4874 deployed_vca,
4875 member_vnf_index,
4876 vdu_id,
4877 vdu_count_index,
4878 kdu_name=None,
4879 ee_descriptor_id=None,
4880 ):
4881 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4882 for vca in deployed_vca:
4883 if not vca:
4884 continue
4885 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4886 continue
4887 if (
4888 vdu_count_index is not None
4889 and vdu_count_index != vca["vdu_count_index"]
4890 ):
4891 continue
4892 if kdu_name and kdu_name != vca["kdu_name"]:
4893 continue
4894 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4895 continue
4896 break
4897 else:
4898 # vca_deployed not found
4899 raise LcmException(
4900 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4901 " is not deployed".format(
4902 member_vnf_index,
4903 vdu_id,
4904 vdu_count_index,
4905 kdu_name,
4906 ee_descriptor_id,
4907 )
4908 )
4909 # get ee_id
4910 ee_id = vca.get("ee_id")
4911 vca_type = vca.get(
4912 "type", "lxc_proxy_charm"
4913 ) # default value for backward compatibility - proxy charm
4914 if not ee_id:
4915 raise LcmException(
4916 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4917 "execution environment".format(
4918 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4919 )
4920 )
4921 return ee_id, vca_type
4922
4923 async def _ns_execute_primitive(
4924 self,
4925 ee_id,
4926 primitive,
4927 primitive_params,
4928 retries=0,
4929 retries_interval=30,
4930 timeout=None,
4931 vca_type=None,
4932 db_dict=None,
4933 vca_id: str = None,
4934 ) -> (str, str):
4935 try:
4936 if primitive == "config":
4937 primitive_params = {"params": primitive_params}
4938
4939 vca_type = vca_type or "lxc_proxy_charm"
4940
4941 while retries >= 0:
4942 try:
4943 output = await asyncio.wait_for(
4944 self.vca_map[vca_type].exec_primitive(
4945 ee_id=ee_id,
4946 primitive_name=primitive,
4947 params_dict=primitive_params,
4948 progress_timeout=self.timeout.progress_primitive,
4949 total_timeout=self.timeout.primitive,
4950 db_dict=db_dict,
4951 vca_id=vca_id,
4952 vca_type=vca_type,
4953 ),
4954 timeout=timeout or self.timeout.primitive,
4955 )
4956 # execution was OK
4957 break
4958 except asyncio.CancelledError:
4959 raise
4960 except Exception as e:
4961 retries -= 1
4962 if retries >= 0:
4963 self.logger.debug(
4964 "Error executing action {} on {} -> {}".format(
4965 primitive, ee_id, e
4966 )
4967 )
4968 # wait and retry
4969 await asyncio.sleep(retries_interval)
4970 else:
4971 if isinstance(e, asyncio.TimeoutError):
4972 e = N2VCException(
4973 message="Timed out waiting for action to complete"
4974 )
4975 return "FAILED", getattr(e, "message", repr(e))
4976
4977 return "COMPLETED", output
4978
4979 except (LcmException, asyncio.CancelledError):
4980 raise
4981 except Exception as e:
4982 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4983
4984 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4985 """
4986 Updating the vca_status with latest juju information in nsrs record
4987 :param: nsr_id: Id of the nsr
4988 :param: nslcmop_id: Id of the nslcmop
4989 :return: None
4990 """
4991
4992 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4993 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4994 vca_id = self.get_vca_id({}, db_nsr)
4995 if db_nsr["_admin"]["deployed"]["K8s"]:
4996 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4997 cluster_uuid, kdu_instance, cluster_type = (
4998 k8s["k8scluster-uuid"],
4999 k8s["kdu-instance"],
5000 k8s["k8scluster-type"],
5001 )
5002 await self._on_update_k8s_db(
5003 cluster_uuid=cluster_uuid,
5004 kdu_instance=kdu_instance,
5005 filter={"_id": nsr_id},
5006 vca_id=vca_id,
5007 cluster_type=cluster_type,
5008 )
5009 else:
5010 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5011 table, filter = "nsrs", {"_id": nsr_id}
5012 path = "_admin.deployed.VCA.{}.".format(vca_index)
5013 await self._on_update_n2vc_db(table, filter, path, {})
5014
5015 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5016 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5017
5018 async def action(self, nsr_id, nslcmop_id):
5019 # Try to lock HA task here
5020 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5021 if not task_is_locked_by_me:
5022 return
5023
5024 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5025 self.logger.debug(logging_text + "Enter")
5026 # get all needed from database
5027 db_nsr = None
5028 db_nslcmop = None
5029 db_nsr_update = {}
5030 db_nslcmop_update = {}
5031 nslcmop_operation_state = None
5032 error_description_nslcmop = None
5033 exc = None
5034 step = ""
5035 try:
5036 # wait for any previous tasks in process
5037 step = "Waiting for previous operations to terminate"
5038 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5039
5040 self._write_ns_status(
5041 nsr_id=nsr_id,
5042 ns_state=None,
5043 current_operation="RUNNING ACTION",
5044 current_operation_id=nslcmop_id,
5045 )
5046
5047 step = "Getting information from database"
5048 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5049 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5050 if db_nslcmop["operationParams"].get("primitive_params"):
5051 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5052 db_nslcmop["operationParams"]["primitive_params"]
5053 )
5054
5055 nsr_deployed = db_nsr["_admin"].get("deployed")
5056 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5057 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5058 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5059 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5060 primitive = db_nslcmop["operationParams"]["primitive"]
5061 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5062 timeout_ns_action = db_nslcmop["operationParams"].get(
5063 "timeout_ns_action", self.timeout.primitive
5064 )
5065
5066 if vnf_index:
5067 step = "Getting vnfr from database"
5068 db_vnfr = self.db.get_one(
5069 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5070 )
5071 if db_vnfr.get("kdur"):
5072 kdur_list = []
5073 for kdur in db_vnfr["kdur"]:
5074 if kdur.get("additionalParams"):
5075 kdur["additionalParams"] = json.loads(
5076 kdur["additionalParams"]
5077 )
5078 kdur_list.append(kdur)
5079 db_vnfr["kdur"] = kdur_list
5080 step = "Getting vnfd from database"
5081 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5082
5083 # Sync filesystem before running a primitive
5084 self.fs.sync(db_vnfr["vnfd-id"])
5085 else:
5086 step = "Getting nsd from database"
5087 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5088
5089 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5090 # for backward compatibility
5091 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5092 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5093 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5094 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5095
5096 # look for primitive
5097 config_primitive_desc = descriptor_configuration = None
5098 if vdu_id:
5099 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5100 elif kdu_name:
5101 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5102 elif vnf_index:
5103 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5104 else:
5105 descriptor_configuration = db_nsd.get("ns-configuration")
5106
5107 if descriptor_configuration and descriptor_configuration.get(
5108 "config-primitive"
5109 ):
5110 for config_primitive in descriptor_configuration["config-primitive"]:
5111 if config_primitive["name"] == primitive:
5112 config_primitive_desc = config_primitive
5113 break
5114
5115 if not config_primitive_desc:
5116 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5117 raise LcmException(
5118 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5119 primitive
5120 )
5121 )
5122 primitive_name = primitive
5123 ee_descriptor_id = None
5124 else:
5125 primitive_name = config_primitive_desc.get(
5126 "execution-environment-primitive", primitive
5127 )
5128 ee_descriptor_id = config_primitive_desc.get(
5129 "execution-environment-ref"
5130 )
5131
5132 if vnf_index:
5133 if vdu_id:
5134 vdur = next(
5135 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5136 )
5137 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5138 elif kdu_name:
5139 kdur = next(
5140 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5141 )
5142 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5143 else:
5144 desc_params = parse_yaml_strings(
5145 db_vnfr.get("additionalParamsForVnf")
5146 )
5147 else:
5148 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5149 if kdu_name and get_configuration(db_vnfd, kdu_name):
5150 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5151 actions = set()
5152 for primitive in kdu_configuration.get("initial-config-primitive", []):
5153 actions.add(primitive["name"])
5154 for primitive in kdu_configuration.get("config-primitive", []):
5155 actions.add(primitive["name"])
5156 kdu = find_in_list(
5157 nsr_deployed["K8s"],
5158 lambda kdu: kdu_name == kdu["kdu-name"]
5159 and kdu["member-vnf-index"] == vnf_index,
5160 )
5161 kdu_action = (
5162 True
5163 if primitive_name in actions
5164 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5165 else False
5166 )
5167
5168 # TODO check if ns is in a proper status
5169 if kdu_name and (
5170 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5171 ):
5172 # kdur and desc_params already set from before
5173 if primitive_params:
5174 desc_params.update(primitive_params)
5175 # TODO Check if we will need something at vnf level
5176 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5177 if (
5178 kdu_name == kdu["kdu-name"]
5179 and kdu["member-vnf-index"] == vnf_index
5180 ):
5181 break
5182 else:
5183 raise LcmException(
5184 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5185 )
5186
5187 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5188 msg = "unknown k8scluster-type '{}'".format(
5189 kdu.get("k8scluster-type")
5190 )
5191 raise LcmException(msg)
5192
5193 db_dict = {
5194 "collection": "nsrs",
5195 "filter": {"_id": nsr_id},
5196 "path": "_admin.deployed.K8s.{}".format(index),
5197 }
5198 self.logger.debug(
5199 logging_text
5200 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5201 )
5202 step = "Executing kdu {}".format(primitive_name)
5203 if primitive_name == "upgrade":
5204 if desc_params.get("kdu_model"):
5205 kdu_model = desc_params.get("kdu_model")
5206 del desc_params["kdu_model"]
5207 else:
5208 kdu_model = kdu.get("kdu-model")
5209 if kdu_model.count("/") < 2: # helm chart is not embedded
5210 parts = kdu_model.split(sep=":")
5211 if len(parts) == 2:
5212 kdu_model = parts[0]
5213 if desc_params.get("kdu_atomic_upgrade"):
5214 atomic_upgrade = desc_params.get(
5215 "kdu_atomic_upgrade"
5216 ).lower() in ("yes", "true", "1")
5217 del desc_params["kdu_atomic_upgrade"]
5218 else:
5219 atomic_upgrade = True
5220
5221 detailed_status = await asyncio.wait_for(
5222 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5223 cluster_uuid=kdu.get("k8scluster-uuid"),
5224 kdu_instance=kdu.get("kdu-instance"),
5225 atomic=atomic_upgrade,
5226 kdu_model=kdu_model,
5227 params=desc_params,
5228 db_dict=db_dict,
5229 timeout=timeout_ns_action,
5230 ),
5231 timeout=timeout_ns_action + 10,
5232 )
5233 self.logger.debug(
5234 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5235 )
5236 elif primitive_name == "rollback":
5237 detailed_status = await asyncio.wait_for(
5238 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5239 cluster_uuid=kdu.get("k8scluster-uuid"),
5240 kdu_instance=kdu.get("kdu-instance"),
5241 db_dict=db_dict,
5242 ),
5243 timeout=timeout_ns_action,
5244 )
5245 elif primitive_name == "status":
5246 detailed_status = await asyncio.wait_for(
5247 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5248 cluster_uuid=kdu.get("k8scluster-uuid"),
5249 kdu_instance=kdu.get("kdu-instance"),
5250 vca_id=vca_id,
5251 ),
5252 timeout=timeout_ns_action,
5253 )
5254 else:
5255 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5256 kdu["kdu-name"], nsr_id
5257 )
5258 params = self._map_primitive_params(
5259 config_primitive_desc, primitive_params, desc_params
5260 )
5261
5262 detailed_status = await asyncio.wait_for(
5263 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5264 cluster_uuid=kdu.get("k8scluster-uuid"),
5265 kdu_instance=kdu_instance,
5266 primitive_name=primitive_name,
5267 params=params,
5268 db_dict=db_dict,
5269 timeout=timeout_ns_action,
5270 vca_id=vca_id,
5271 ),
5272 timeout=timeout_ns_action,
5273 )
5274
5275 if detailed_status:
5276 nslcmop_operation_state = "COMPLETED"
5277 else:
5278 detailed_status = ""
5279 nslcmop_operation_state = "FAILED"
5280 else:
5281 ee_id, vca_type = self._look_for_deployed_vca(
5282 nsr_deployed["VCA"],
5283 member_vnf_index=vnf_index,
5284 vdu_id=vdu_id,
5285 vdu_count_index=vdu_count_index,
5286 ee_descriptor_id=ee_descriptor_id,
5287 )
5288 for vca_index, vca_deployed in enumerate(
5289 db_nsr["_admin"]["deployed"]["VCA"]
5290 ):
5291 if vca_deployed.get("member-vnf-index") == vnf_index:
5292 db_dict = {
5293 "collection": "nsrs",
5294 "filter": {"_id": nsr_id},
5295 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5296 }
5297 break
5298 (
5299 nslcmop_operation_state,
5300 detailed_status,
5301 ) = await self._ns_execute_primitive(
5302 ee_id,
5303 primitive=primitive_name,
5304 primitive_params=self._map_primitive_params(
5305 config_primitive_desc, primitive_params, desc_params
5306 ),
5307 timeout=timeout_ns_action,
5308 vca_type=vca_type,
5309 db_dict=db_dict,
5310 vca_id=vca_id,
5311 )
5312
5313 db_nslcmop_update["detailed-status"] = detailed_status
5314 error_description_nslcmop = (
5315 detailed_status if nslcmop_operation_state == "FAILED" else ""
5316 )
5317 self.logger.debug(
5318 logging_text
5319 + "Done with result {} {}".format(
5320 nslcmop_operation_state, detailed_status
5321 )
5322 )
5323 return # database update is called inside finally
5324
5325 except (DbException, LcmException, N2VCException, K8sException) as e:
5326 self.logger.error(logging_text + "Exit Exception {}".format(e))
5327 exc = e
5328 except asyncio.CancelledError:
5329 self.logger.error(
5330 logging_text + "Cancelled Exception while '{}'".format(step)
5331 )
5332 exc = "Operation was cancelled"
5333 except asyncio.TimeoutError:
5334 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5335 exc = "Timeout"
5336 except Exception as e:
5337 exc = traceback.format_exc()
5338 self.logger.critical(
5339 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5340 exc_info=True,
5341 )
5342 finally:
5343 if exc:
5344 db_nslcmop_update[
5345 "detailed-status"
5346 ] = (
5347 detailed_status
5348 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5349 nslcmop_operation_state = "FAILED"
5350 if db_nsr:
5351 self._write_ns_status(
5352 nsr_id=nsr_id,
5353 ns_state=db_nsr[
5354 "nsState"
5355 ], # TODO check if degraded. For the moment use previous status
5356 current_operation="IDLE",
5357 current_operation_id=None,
5358 # error_description=error_description_nsr,
5359 # error_detail=error_detail,
5360 other_update=db_nsr_update,
5361 )
5362
5363 self._write_op_status(
5364 op_id=nslcmop_id,
5365 stage="",
5366 error_message=error_description_nslcmop,
5367 operation_state=nslcmop_operation_state,
5368 other_update=db_nslcmop_update,
5369 )
5370
5371 if nslcmop_operation_state:
5372 try:
5373 await self.msg.aiowrite(
5374 "ns",
5375 "actioned",
5376 {
5377 "nsr_id": nsr_id,
5378 "nslcmop_id": nslcmop_id,
5379 "operationState": nslcmop_operation_state,
5380 },
5381 )
5382 except Exception as e:
5383 self.logger.error(
5384 logging_text + "kafka_write notification Exception {}".format(e)
5385 )
5386 self.logger.debug(logging_text + "Exit")
5387 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5388 return nslcmop_operation_state, detailed_status
5389
5390 async def terminate_vdus(
5391 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5392 ):
5393 """This method terminates VDUs
5394
5395 Args:
5396 db_vnfr: VNF instance record
5397 member_vnf_index: VNF index to identify the VDUs to be removed
5398 db_nsr: NS instance record
5399 update_db_nslcmops: Nslcmop update record
5400 """
5401 vca_scaling_info = []
5402 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5403 scaling_info["scaling_direction"] = "IN"
5404 scaling_info["vdu-delete"] = {}
5405 scaling_info["kdu-delete"] = {}
5406 db_vdur = db_vnfr.get("vdur")
5407 vdur_list = copy(db_vdur)
5408 count_index = 0
5409 for index, vdu in enumerate(vdur_list):
5410 vca_scaling_info.append(
5411 {
5412 "osm_vdu_id": vdu["vdu-id-ref"],
5413 "member-vnf-index": member_vnf_index,
5414 "type": "delete",
5415 "vdu_index": count_index,
5416 }
5417 )
5418 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5419 scaling_info["vdu"].append(
5420 {
5421 "name": vdu.get("name") or vdu.get("vdu-name"),
5422 "vdu_id": vdu["vdu-id-ref"],
5423 "interface": [],
5424 }
5425 )
5426 for interface in vdu["interfaces"]:
5427 scaling_info["vdu"][index]["interface"].append(
5428 {
5429 "name": interface["name"],
5430 "ip_address": interface["ip-address"],
5431 "mac_address": interface.get("mac-address"),
5432 }
5433 )
5434 self.logger.info("NS update scaling info{}".format(scaling_info))
5435 stage[2] = "Terminating VDUs"
5436 if scaling_info.get("vdu-delete"):
5437 # scale_process = "RO"
5438 if self.ro_config.ng:
5439 await self._scale_ng_ro(
5440 logging_text,
5441 db_nsr,
5442 update_db_nslcmops,
5443 db_vnfr,
5444 scaling_info,
5445 stage,
5446 )
5447
5448 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5449 """This method is to Remove VNF instances from NS.
5450
5451 Args:
5452 nsr_id: NS instance id
5453 nslcmop_id: nslcmop id of update
5454 vnf_instance_id: id of the VNF instance to be removed
5455
5456 Returns:
5457 result: (str, str) COMPLETED/FAILED, details
5458 """
5459 try:
5460 db_nsr_update = {}
5461 logging_text = "Task ns={} update ".format(nsr_id)
5462 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5463 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5464 if check_vnfr_count > 1:
5465 stage = ["", "", ""]
5466 step = "Getting nslcmop from database"
5467 self.logger.debug(
5468 step + " after having waited for previous tasks to be completed"
5469 )
5470 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5471 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5472 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5473 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5474 """ db_vnfr = self.db.get_one(
5475 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5476
5477 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5478 await self.terminate_vdus(
5479 db_vnfr,
5480 member_vnf_index,
5481 db_nsr,
5482 update_db_nslcmops,
5483 stage,
5484 logging_text,
5485 )
5486
5487 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5488 constituent_vnfr.remove(db_vnfr.get("_id"))
5489 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5490 "constituent-vnfr-ref"
5491 )
5492 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5493 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5495 return "COMPLETED", "Done"
5496 else:
5497 step = "Terminate VNF Failed with"
5498 raise LcmException(
5499 "{} Cannot terminate the last VNF in this NS.".format(
5500 vnf_instance_id
5501 )
5502 )
5503 except (LcmException, asyncio.CancelledError):
5504 raise
5505 except Exception as e:
5506 self.logger.debug("Error removing VNF {}".format(e))
5507 return "FAILED", "Error removing VNF {}".format(e)
5508
5509 async def _ns_redeploy_vnf(
5510 self,
5511 nsr_id,
5512 nslcmop_id,
5513 db_vnfd,
5514 db_vnfr,
5515 db_nsr,
5516 ):
5517 """This method updates and redeploys VNF instances
5518
5519 Args:
5520 nsr_id: NS instance id
5521 nslcmop_id: nslcmop id
5522 db_vnfd: VNF descriptor
5523 db_vnfr: VNF instance record
5524 db_nsr: NS instance record
5525
5526 Returns:
5527 result: (str, str) COMPLETED/FAILED, details
5528 """
5529 try:
5530 count_index = 0
5531 stage = ["", "", ""]
5532 logging_text = "Task ns={} update ".format(nsr_id)
5533 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5534 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5535
5536 # Terminate old VNF resources
5537 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5538 await self.terminate_vdus(
5539 db_vnfr,
5540 member_vnf_index,
5541 db_nsr,
5542 update_db_nslcmops,
5543 stage,
5544 logging_text,
5545 )
5546
5547 # old_vnfd_id = db_vnfr["vnfd-id"]
5548 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5549 new_db_vnfd = db_vnfd
5550 # new_vnfd_ref = new_db_vnfd["id"]
5551 # new_vnfd_id = vnfd_id
5552
5553 # Create VDUR
5554 new_vnfr_cp = []
5555 for cp in new_db_vnfd.get("ext-cpd", ()):
5556 vnf_cp = {
5557 "name": cp.get("id"),
5558 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5559 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5560 "id": cp.get("id"),
5561 }
5562 new_vnfr_cp.append(vnf_cp)
5563 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5564 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5565 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5566 new_vnfr_update = {
5567 "revision": latest_vnfd_revision,
5568 "connection-point": new_vnfr_cp,
5569 "vdur": new_vdur,
5570 "ip-address": "",
5571 }
5572 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5573 updated_db_vnfr = self.db.get_one(
5574 "vnfrs",
5575 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5576 )
5577
5578 # Instantiate new VNF resources
5579 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5580 vca_scaling_info = []
5581 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5582 scaling_info["scaling_direction"] = "OUT"
5583 scaling_info["vdu-create"] = {}
5584 scaling_info["kdu-create"] = {}
5585 vdud_instantiate_list = db_vnfd["vdu"]
5586 for index, vdud in enumerate(vdud_instantiate_list):
5587 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5588 if cloud_init_text:
5589 additional_params = (
5590 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5591 or {}
5592 )
5593 cloud_init_list = []
5594 if cloud_init_text:
5595 # TODO Information of its own ip is not available because db_vnfr is not updated.
5596 additional_params["OSM"] = get_osm_params(
5597 updated_db_vnfr, vdud["id"], 1
5598 )
5599 cloud_init_list.append(
5600 self._parse_cloud_init(
5601 cloud_init_text,
5602 additional_params,
5603 db_vnfd["id"],
5604 vdud["id"],
5605 )
5606 )
5607 vca_scaling_info.append(
5608 {
5609 "osm_vdu_id": vdud["id"],
5610 "member-vnf-index": member_vnf_index,
5611 "type": "create",
5612 "vdu_index": count_index,
5613 }
5614 )
5615 scaling_info["vdu-create"][vdud["id"]] = count_index
5616 if self.ro_config.ng:
5617 self.logger.debug(
5618 "New Resources to be deployed: {}".format(scaling_info)
5619 )
5620 await self._scale_ng_ro(
5621 logging_text,
5622 db_nsr,
5623 update_db_nslcmops,
5624 updated_db_vnfr,
5625 scaling_info,
5626 stage,
5627 )
5628 return "COMPLETED", "Done"
5629 except (LcmException, asyncio.CancelledError):
5630 raise
5631 except Exception as e:
5632 self.logger.debug("Error updating VNF {}".format(e))
5633 return "FAILED", "Error updating VNF {}".format(e)
5634
5635 async def _ns_charm_upgrade(
5636 self,
5637 ee_id,
5638 charm_id,
5639 charm_type,
5640 path,
5641 timeout: float = None,
5642 ) -> (str, str):
5643 """This method upgrade charms in VNF instances
5644
5645 Args:
5646 ee_id: Execution environment id
5647 path: Local path to the charm
5648 charm_id: charm-id
5649 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5650 timeout: (Float) Timeout for the ns update operation
5651
5652 Returns:
5653 result: (str, str) COMPLETED/FAILED, details
5654 """
5655 try:
5656 charm_type = charm_type or "lxc_proxy_charm"
5657 output = await self.vca_map[charm_type].upgrade_charm(
5658 ee_id=ee_id,
5659 path=path,
5660 charm_id=charm_id,
5661 charm_type=charm_type,
5662 timeout=timeout or self.timeout.ns_update,
5663 )
5664
5665 if output:
5666 return "COMPLETED", output
5667
5668 except (LcmException, asyncio.CancelledError):
5669 raise
5670
5671 except Exception as e:
5672 self.logger.debug("Error upgrading charm {}".format(path))
5673
5674 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5675
5676 async def update(self, nsr_id, nslcmop_id):
5677 """Update NS according to different update types
5678
5679 This method performs upgrade of VNF instances then updates the revision
5680 number in VNF record
5681
5682 Args:
5683 nsr_id: Network service will be updated
5684 nslcmop_id: ns lcm operation id
5685
5686 Returns:
5687 It may raise DbException, LcmException, N2VCException, K8sException
5688
5689 """
5690 # Try to lock HA task here
5691 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5692 if not task_is_locked_by_me:
5693 return
5694
5695 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5696 self.logger.debug(logging_text + "Enter")
5697
5698 # Set the required variables to be filled up later
5699 db_nsr = None
5700 db_nslcmop_update = {}
5701 vnfr_update = {}
5702 nslcmop_operation_state = None
5703 db_nsr_update = {}
5704 error_description_nslcmop = ""
5705 exc = None
5706 change_type = "updated"
5707 detailed_status = ""
5708 member_vnf_index = None
5709
5710 try:
5711 # wait for any previous tasks in process
5712 step = "Waiting for previous operations to terminate"
5713 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5714 self._write_ns_status(
5715 nsr_id=nsr_id,
5716 ns_state=None,
5717 current_operation="UPDATING",
5718 current_operation_id=nslcmop_id,
5719 )
5720
5721 step = "Getting nslcmop from database"
5722 db_nslcmop = self.db.get_one(
5723 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5724 )
5725 update_type = db_nslcmop["operationParams"]["updateType"]
5726
5727 step = "Getting nsr from database"
5728 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5729 old_operational_status = db_nsr["operational-status"]
5730 db_nsr_update["operational-status"] = "updating"
5731 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5732 nsr_deployed = db_nsr["_admin"].get("deployed")
5733
5734 if update_type == "CHANGE_VNFPKG":
5735 # Get the input parameters given through update request
5736 vnf_instance_id = db_nslcmop["operationParams"][
5737 "changeVnfPackageData"
5738 ].get("vnfInstanceId")
5739
5740 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5741 "vnfdId"
5742 )
5743 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5744
5745 step = "Getting vnfr from database"
5746 db_vnfr = self.db.get_one(
5747 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5748 )
5749
5750 step = "Getting vnfds from database"
5751 # Latest VNFD
5752 latest_vnfd = self.db.get_one(
5753 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5754 )
5755 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5756
5757 # Current VNFD
5758 current_vnf_revision = db_vnfr.get("revision", 1)
5759 current_vnfd = self.db.get_one(
5760 "vnfds_revisions",
5761 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5762 fail_on_empty=False,
5763 )
5764 # Charm artifact paths will be filled up later
5765 (
5766 current_charm_artifact_path,
5767 target_charm_artifact_path,
5768 charm_artifact_paths,
5769 helm_artifacts,
5770 ) = ([], [], [], [])
5771
5772 step = "Checking if revision has changed in VNFD"
5773 if current_vnf_revision != latest_vnfd_revision:
5774 change_type = "policy_updated"
5775
5776 # There is new revision of VNFD, update operation is required
5777 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5778 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5779
5780 step = "Removing the VNFD packages if they exist in the local path"
5781 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5782 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5783
5784 step = "Get the VNFD packages from FSMongo"
5785 self.fs.sync(from_path=latest_vnfd_path)
5786 self.fs.sync(from_path=current_vnfd_path)
5787
5788 step = (
5789 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5790 )
5791 current_base_folder = current_vnfd["_admin"]["storage"]
5792 latest_base_folder = latest_vnfd["_admin"]["storage"]
5793
5794 for vca_index, vca_deployed in enumerate(
5795 get_iterable(nsr_deployed, "VCA")
5796 ):
5797 vnf_index = db_vnfr.get("member-vnf-index-ref")
5798
5799 # Getting charm-id and charm-type
5800 if vca_deployed.get("member-vnf-index") == vnf_index:
5801 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5802 vca_type = vca_deployed.get("type")
5803 vdu_count_index = vca_deployed.get("vdu_count_index")
5804
5805 # Getting ee-id
5806 ee_id = vca_deployed.get("ee_id")
5807
5808 step = "Getting descriptor config"
5809 if current_vnfd.get("kdu"):
5810 search_key = "kdu_name"
5811 else:
5812 search_key = "vnfd_id"
5813
5814 entity_id = vca_deployed.get(search_key)
5815
5816 descriptor_config = get_configuration(
5817 current_vnfd, entity_id
5818 )
5819
5820 if "execution-environment-list" in descriptor_config:
5821 ee_list = descriptor_config.get(
5822 "execution-environment-list", []
5823 )
5824 else:
5825 ee_list = []
5826
5827 # There could be several charm used in the same VNF
5828 for ee_item in ee_list:
5829 if ee_item.get("juju"):
5830 step = "Getting charm name"
5831 charm_name = ee_item["juju"].get("charm")
5832
5833 step = "Setting Charm artifact paths"
5834 current_charm_artifact_path.append(
5835 get_charm_artifact_path(
5836 current_base_folder,
5837 charm_name,
5838 vca_type,
5839 current_vnf_revision,
5840 )
5841 )
5842 target_charm_artifact_path.append(
5843 get_charm_artifact_path(
5844 latest_base_folder,
5845 charm_name,
5846 vca_type,
5847 latest_vnfd_revision,
5848 )
5849 )
5850 elif ee_item.get("helm-chart"):
5851 # add chart to list and all parameters
5852 step = "Getting helm chart name"
5853 chart_name = ee_item.get("helm-chart")
5854 if (
5855 ee_item.get("helm-version")
5856 and ee_item.get("helm-version") == "v2"
5857 ):
5858 vca_type = "helm"
5859 else:
5860 vca_type = "helm-v3"
5861 step = "Setting Helm chart artifact paths"
5862
5863 helm_artifacts.append(
5864 {
5865 "current_artifact_path": get_charm_artifact_path(
5866 current_base_folder,
5867 chart_name,
5868 vca_type,
5869 current_vnf_revision,
5870 ),
5871 "target_artifact_path": get_charm_artifact_path(
5872 latest_base_folder,
5873 chart_name,
5874 vca_type,
5875 latest_vnfd_revision,
5876 ),
5877 "ee_id": ee_id,
5878 "vca_index": vca_index,
5879 "vdu_index": vdu_count_index,
5880 }
5881 )
5882
5883 charm_artifact_paths = zip(
5884 current_charm_artifact_path, target_charm_artifact_path
5885 )
5886
5887 step = "Checking if software version has changed in VNFD"
5888 if find_software_version(current_vnfd) != find_software_version(
5889 latest_vnfd
5890 ):
5891 step = "Checking if existing VNF has charm"
5892 for current_charm_path, target_charm_path in list(
5893 charm_artifact_paths
5894 ):
5895 if current_charm_path:
5896 raise LcmException(
5897 "Software version change is not supported as VNF instance {} has charm.".format(
5898 vnf_instance_id
5899 )
5900 )
5901
5902 # There is no change in the charm package, then redeploy the VNF
5903 # based on new descriptor
5904 step = "Redeploying VNF"
5905 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5906 (result, detailed_status) = await self._ns_redeploy_vnf(
5907 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5908 )
5909 if result == "FAILED":
5910 nslcmop_operation_state = result
5911 error_description_nslcmop = detailed_status
5912 db_nslcmop_update["detailed-status"] = detailed_status
5913 self.logger.debug(
5914 logging_text
5915 + " step {} Done with result {} {}".format(
5916 step, nslcmop_operation_state, detailed_status
5917 )
5918 )
5919
5920 else:
5921 step = "Checking if any charm package has changed or not"
5922 for current_charm_path, target_charm_path in list(
5923 charm_artifact_paths
5924 ):
5925 if (
5926 current_charm_path
5927 and target_charm_path
5928 and self.check_charm_hash_changed(
5929 current_charm_path, target_charm_path
5930 )
5931 ):
5932 step = "Checking whether VNF uses juju bundle"
5933 if check_juju_bundle_existence(current_vnfd):
5934 raise LcmException(
5935 "Charm upgrade is not supported for the instance which"
5936 " uses juju-bundle: {}".format(
5937 check_juju_bundle_existence(current_vnfd)
5938 )
5939 )
5940
5941 step = "Upgrading Charm"
5942 (
5943 result,
5944 detailed_status,
5945 ) = await self._ns_charm_upgrade(
5946 ee_id=ee_id,
5947 charm_id=vca_id,
5948 charm_type=vca_type,
5949 path=self.fs.path + target_charm_path,
5950 timeout=timeout_seconds,
5951 )
5952
5953 if result == "FAILED":
5954 nslcmop_operation_state = result
5955 error_description_nslcmop = detailed_status
5956
5957 db_nslcmop_update["detailed-status"] = detailed_status
5958 self.logger.debug(
5959 logging_text
5960 + " step {} Done with result {} {}".format(
5961 step, nslcmop_operation_state, detailed_status
5962 )
5963 )
5964
5965 step = "Updating policies"
5966 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5967 result = "COMPLETED"
5968 detailed_status = "Done"
5969 db_nslcmop_update["detailed-status"] = "Done"
5970
5971 # helm base EE
5972 for item in helm_artifacts:
5973 if not (
5974 item["current_artifact_path"]
5975 and item["target_artifact_path"]
5976 and self.check_charm_hash_changed(
5977 item["current_artifact_path"],
5978 item["target_artifact_path"],
5979 )
5980 ):
5981 continue
5982 db_update_entry = "_admin.deployed.VCA.{}.".format(
5983 item["vca_index"]
5984 )
5985 vnfr_id = db_vnfr["_id"]
5986 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5987 db_dict = {
5988 "collection": "nsrs",
5989 "filter": {"_id": nsr_id},
5990 "path": db_update_entry,
5991 }
5992 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5993 await self.vca_map[vca_type].upgrade_execution_environment(
5994 namespace=namespace,
5995 helm_id=helm_id,
5996 db_dict=db_dict,
5997 config=osm_config,
5998 artifact_path=item["target_artifact_path"],
5999 vca_type=vca_type,
6000 )
6001 vnf_id = db_vnfr.get("vnfd-ref")
6002 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6003 self.logger.debug("get ssh key block")
6004 rw_mgmt_ip = None
6005 if deep_get(
6006 config_descriptor,
6007 ("config-access", "ssh-access", "required"),
6008 ):
6009 # Needed to inject a ssh key
6010 user = deep_get(
6011 config_descriptor,
6012 ("config-access", "ssh-access", "default-user"),
6013 )
6014 step = (
6015 "Install configuration Software, getting public ssh key"
6016 )
6017 pub_key = await self.vca_map[
6018 vca_type
6019 ].get_ee_ssh_public__key(
6020 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6021 )
6022
6023 step = (
6024 "Insert public key into VM user={} ssh_key={}".format(
6025 user, pub_key
6026 )
6027 )
6028 self.logger.debug(logging_text + step)
6029
6030 # wait for RO (ip-address) Insert pub_key into VM
6031 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6032 logging_text,
6033 nsr_id,
6034 vnfr_id,
6035 None,
6036 item["vdu_index"],
6037 user=user,
6038 pub_key=pub_key,
6039 )
6040
6041 initial_config_primitive_list = config_descriptor.get(
6042 "initial-config-primitive"
6043 )
6044 config_primitive = next(
6045 (
6046 p
6047 for p in initial_config_primitive_list
6048 if p["name"] == "config"
6049 ),
6050 None,
6051 )
6052 if not config_primitive:
6053 continue
6054
6055 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6056 if rw_mgmt_ip:
6057 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6058 if db_vnfr.get("additionalParamsForVnf"):
6059 deploy_params.update(
6060 parse_yaml_strings(
6061 db_vnfr["additionalParamsForVnf"].copy()
6062 )
6063 )
6064 primitive_params_ = self._map_primitive_params(
6065 config_primitive, {}, deploy_params
6066 )
6067
6068 step = "execute primitive '{}' params '{}'".format(
6069 config_primitive["name"], primitive_params_
6070 )
6071 self.logger.debug(logging_text + step)
6072 await self.vca_map[vca_type].exec_primitive(
6073 ee_id=ee_id,
6074 primitive_name=config_primitive["name"],
6075 params_dict=primitive_params_,
6076 db_dict=db_dict,
6077 vca_id=vca_id,
6078 vca_type=vca_type,
6079 )
6080
6081 step = "Updating policies"
6082 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6083 detailed_status = "Done"
6084 db_nslcmop_update["detailed-status"] = "Done"
6085
6086 # If nslcmop_operation_state is None, so any operation is not failed.
6087 if not nslcmop_operation_state:
6088 nslcmop_operation_state = "COMPLETED"
6089
6090 # If update CHANGE_VNFPKG nslcmop_operation is successful
6091 # vnf revision need to be updated
6092 vnfr_update["revision"] = latest_vnfd_revision
6093 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6094
6095 self.logger.debug(
6096 logging_text
6097 + " task Done with result {} {}".format(
6098 nslcmop_operation_state, detailed_status
6099 )
6100 )
6101 elif update_type == "REMOVE_VNF":
6102 # This part is included in https://osm.etsi.org/gerrit/11876
6103 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6104 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6105 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6106 step = "Removing VNF"
6107 (result, detailed_status) = await self.remove_vnf(
6108 nsr_id, nslcmop_id, vnf_instance_id
6109 )
6110 if result == "FAILED":
6111 nslcmop_operation_state = result
6112 error_description_nslcmop = detailed_status
6113 db_nslcmop_update["detailed-status"] = detailed_status
6114 change_type = "vnf_terminated"
6115 if not nslcmop_operation_state:
6116 nslcmop_operation_state = "COMPLETED"
6117 self.logger.debug(
6118 logging_text
6119 + " task Done with result {} {}".format(
6120 nslcmop_operation_state, detailed_status
6121 )
6122 )
6123
6124 elif update_type == "OPERATE_VNF":
6125 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6126 "vnfInstanceId"
6127 ]
6128 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6129 "changeStateTo"
6130 ]
6131 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6132 "additionalParam"
6133 ]
6134 (result, detailed_status) = await self.rebuild_start_stop(
6135 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6136 )
6137 if result == "FAILED":
6138 nslcmop_operation_state = result
6139 error_description_nslcmop = detailed_status
6140 db_nslcmop_update["detailed-status"] = detailed_status
6141 if not nslcmop_operation_state:
6142 nslcmop_operation_state = "COMPLETED"
6143 self.logger.debug(
6144 logging_text
6145 + " task Done with result {} {}".format(
6146 nslcmop_operation_state, detailed_status
6147 )
6148 )
6149
6150 # If nslcmop_operation_state is None, so any operation is not failed.
6151 # All operations are executed in overall.
6152 if not nslcmop_operation_state:
6153 nslcmop_operation_state = "COMPLETED"
6154 db_nsr_update["operational-status"] = old_operational_status
6155
6156 except (DbException, LcmException, N2VCException, K8sException) as e:
6157 self.logger.error(logging_text + "Exit Exception {}".format(e))
6158 exc = e
6159 except asyncio.CancelledError:
6160 self.logger.error(
6161 logging_text + "Cancelled Exception while '{}'".format(step)
6162 )
6163 exc = "Operation was cancelled"
6164 except asyncio.TimeoutError:
6165 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6166 exc = "Timeout"
6167 except Exception as e:
6168 exc = traceback.format_exc()
6169 self.logger.critical(
6170 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6171 exc_info=True,
6172 )
6173 finally:
6174 if exc:
6175 db_nslcmop_update[
6176 "detailed-status"
6177 ] = (
6178 detailed_status
6179 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6180 nslcmop_operation_state = "FAILED"
6181 db_nsr_update["operational-status"] = old_operational_status
6182 if db_nsr:
6183 self._write_ns_status(
6184 nsr_id=nsr_id,
6185 ns_state=db_nsr["nsState"],
6186 current_operation="IDLE",
6187 current_operation_id=None,
6188 other_update=db_nsr_update,
6189 )
6190
6191 self._write_op_status(
6192 op_id=nslcmop_id,
6193 stage="",
6194 error_message=error_description_nslcmop,
6195 operation_state=nslcmop_operation_state,
6196 other_update=db_nslcmop_update,
6197 )
6198
6199 if nslcmop_operation_state:
6200 try:
6201 msg = {
6202 "nsr_id": nsr_id,
6203 "nslcmop_id": nslcmop_id,
6204 "operationState": nslcmop_operation_state,
6205 }
6206 if (
6207 change_type in ("vnf_terminated", "policy_updated")
6208 and member_vnf_index
6209 ):
6210 msg.update({"vnf_member_index": member_vnf_index})
6211 await self.msg.aiowrite("ns", change_type, msg)
6212 except Exception as e:
6213 self.logger.error(
6214 logging_text + "kafka_write notification Exception {}".format(e)
6215 )
6216 self.logger.debug(logging_text + "Exit")
6217 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6218 return nslcmop_operation_state, detailed_status
6219
6220 async def scale(self, nsr_id, nslcmop_id):
6221 # Try to lock HA task here
6222 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6223 if not task_is_locked_by_me:
6224 return
6225
6226 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6227 stage = ["", "", ""]
6228 tasks_dict_info = {}
6229 # ^ stage, step, VIM progress
6230 self.logger.debug(logging_text + "Enter")
6231 # get all needed from database
6232 db_nsr = None
6233 db_nslcmop_update = {}
6234 db_nsr_update = {}
6235 exc = None
6236 # in case of error, indicates what part of scale was failed to put nsr at error status
6237 scale_process = None
6238 old_operational_status = ""
6239 old_config_status = ""
6240 nsi_id = None
6241 try:
6242 # wait for any previous tasks in process
6243 step = "Waiting for previous operations to terminate"
6244 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6245 self._write_ns_status(
6246 nsr_id=nsr_id,
6247 ns_state=None,
6248 current_operation="SCALING",
6249 current_operation_id=nslcmop_id,
6250 )
6251
6252 step = "Getting nslcmop from database"
6253 self.logger.debug(
6254 step + " after having waited for previous tasks to be completed"
6255 )
6256 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6257
6258 step = "Getting nsr from database"
6259 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6260 old_operational_status = db_nsr["operational-status"]
6261 old_config_status = db_nsr["config-status"]
6262
6263 step = "Parsing scaling parameters"
6264 db_nsr_update["operational-status"] = "scaling"
6265 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6266 nsr_deployed = db_nsr["_admin"].get("deployed")
6267
6268 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6269 "scaleByStepData"
6270 ]["member-vnf-index"]
6271 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6272 "scaleByStepData"
6273 ]["scaling-group-descriptor"]
6274 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6275 # for backward compatibility
6276 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6277 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6278 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6279 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6280
6281 step = "Getting vnfr from database"
6282 db_vnfr = self.db.get_one(
6283 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6284 )
6285
6286 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6287
6288 step = "Getting vnfd from database"
6289 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6290
6291 base_folder = db_vnfd["_admin"]["storage"]
6292
6293 step = "Getting scaling-group-descriptor"
6294 scaling_descriptor = find_in_list(
6295 get_scaling_aspect(db_vnfd),
6296 lambda scale_desc: scale_desc["name"] == scaling_group,
6297 )
6298 if not scaling_descriptor:
6299 raise LcmException(
6300 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6301 "at vnfd:scaling-group-descriptor".format(scaling_group)
6302 )
6303
6304 step = "Sending scale order to VIM"
6305 # TODO check if ns is in a proper status
6306 nb_scale_op = 0
6307 if not db_nsr["_admin"].get("scaling-group"):
6308 self.update_db_2(
6309 "nsrs",
6310 nsr_id,
6311 {
6312 "_admin.scaling-group": [
6313 {"name": scaling_group, "nb-scale-op": 0}
6314 ]
6315 },
6316 )
6317 admin_scale_index = 0
6318 else:
6319 for admin_scale_index, admin_scale_info in enumerate(
6320 db_nsr["_admin"]["scaling-group"]
6321 ):
6322 if admin_scale_info["name"] == scaling_group:
6323 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6324 break
6325 else: # not found, set index one plus last element and add new entry with the name
6326 admin_scale_index += 1
6327 db_nsr_update[
6328 "_admin.scaling-group.{}.name".format(admin_scale_index)
6329 ] = scaling_group
6330
6331 vca_scaling_info = []
6332 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6333 if scaling_type == "SCALE_OUT":
6334 if "aspect-delta-details" not in scaling_descriptor:
6335 raise LcmException(
6336 "Aspect delta details not fount in scaling descriptor {}".format(
6337 scaling_descriptor["name"]
6338 )
6339 )
6340 # count if max-instance-count is reached
6341 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6342
6343 scaling_info["scaling_direction"] = "OUT"
6344 scaling_info["vdu-create"] = {}
6345 scaling_info["kdu-create"] = {}
6346 for delta in deltas:
6347 for vdu_delta in delta.get("vdu-delta", {}):
6348 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6349 # vdu_index also provides the number of instance of the targeted vdu
6350 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6351 cloud_init_text = self._get_vdu_cloud_init_content(
6352 vdud, db_vnfd
6353 )
6354 if cloud_init_text:
6355 additional_params = (
6356 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6357 or {}
6358 )
6359 cloud_init_list = []
6360
6361 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6362 max_instance_count = 10
6363 if vdu_profile and "max-number-of-instances" in vdu_profile:
6364 max_instance_count = vdu_profile.get(
6365 "max-number-of-instances", 10
6366 )
6367
6368 default_instance_num = get_number_of_instances(
6369 db_vnfd, vdud["id"]
6370 )
6371 instances_number = vdu_delta.get("number-of-instances", 1)
6372 nb_scale_op += instances_number
6373
6374 new_instance_count = nb_scale_op + default_instance_num
6375 # Control if new count is over max and vdu count is less than max.
6376 # Then assign new instance count
6377 if new_instance_count > max_instance_count > vdu_count:
6378 instances_number = new_instance_count - max_instance_count
6379 else:
6380 instances_number = instances_number
6381
6382 if new_instance_count > max_instance_count:
6383 raise LcmException(
6384 "reached the limit of {} (max-instance-count) "
6385 "scaling-out operations for the "
6386 "scaling-group-descriptor '{}'".format(
6387 nb_scale_op, scaling_group
6388 )
6389 )
6390 for x in range(vdu_delta.get("number-of-instances", 1)):
6391 if cloud_init_text:
6392 # TODO Information of its own ip is not available because db_vnfr is not updated.
6393 additional_params["OSM"] = get_osm_params(
6394 db_vnfr, vdu_delta["id"], vdu_index + x
6395 )
6396 cloud_init_list.append(
6397 self._parse_cloud_init(
6398 cloud_init_text,
6399 additional_params,
6400 db_vnfd["id"],
6401 vdud["id"],
6402 )
6403 )
6404 vca_scaling_info.append(
6405 {
6406 "osm_vdu_id": vdu_delta["id"],
6407 "member-vnf-index": vnf_index,
6408 "type": "create",
6409 "vdu_index": vdu_index + x,
6410 }
6411 )
6412 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6413 for kdu_delta in delta.get("kdu-resource-delta", {}):
6414 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6415 kdu_name = kdu_profile["kdu-name"]
6416 resource_name = kdu_profile.get("resource-name", "")
6417
6418 # Might have different kdus in the same delta
6419 # Should have list for each kdu
6420 if not scaling_info["kdu-create"].get(kdu_name, None):
6421 scaling_info["kdu-create"][kdu_name] = []
6422
6423 kdur = get_kdur(db_vnfr, kdu_name)
6424 if kdur.get("helm-chart"):
6425 k8s_cluster_type = "helm-chart-v3"
6426 self.logger.debug("kdur: {}".format(kdur))
6427 if (
6428 kdur.get("helm-version")
6429 and kdur.get("helm-version") == "v2"
6430 ):
6431 k8s_cluster_type = "helm-chart"
6432 elif kdur.get("juju-bundle"):
6433 k8s_cluster_type = "juju-bundle"
6434 else:
6435 raise LcmException(
6436 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6437 "juju-bundle. Maybe an old NBI version is running".format(
6438 db_vnfr["member-vnf-index-ref"], kdu_name
6439 )
6440 )
6441
6442 max_instance_count = 10
6443 if kdu_profile and "max-number-of-instances" in kdu_profile:
6444 max_instance_count = kdu_profile.get(
6445 "max-number-of-instances", 10
6446 )
6447
6448 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6449 deployed_kdu, _ = get_deployed_kdu(
6450 nsr_deployed, kdu_name, vnf_index
6451 )
6452 if deployed_kdu is None:
6453 raise LcmException(
6454 "KDU '{}' for vnf '{}' not deployed".format(
6455 kdu_name, vnf_index
6456 )
6457 )
6458 kdu_instance = deployed_kdu.get("kdu-instance")
6459 instance_num = await self.k8scluster_map[
6460 k8s_cluster_type
6461 ].get_scale_count(
6462 resource_name,
6463 kdu_instance,
6464 vca_id=vca_id,
6465 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6466 kdu_model=deployed_kdu.get("kdu-model"),
6467 )
6468 kdu_replica_count = instance_num + kdu_delta.get(
6469 "number-of-instances", 1
6470 )
6471
6472 # Control if new count is over max and instance_num is less than max.
6473 # Then assign max instance number to kdu replica count
6474 if kdu_replica_count > max_instance_count > instance_num:
6475 kdu_replica_count = max_instance_count
6476 if kdu_replica_count > max_instance_count:
6477 raise LcmException(
6478 "reached the limit of {} (max-instance-count) "
6479 "scaling-out operations for the "
6480 "scaling-group-descriptor '{}'".format(
6481 instance_num, scaling_group
6482 )
6483 )
6484
6485 for x in range(kdu_delta.get("number-of-instances", 1)):
6486 vca_scaling_info.append(
6487 {
6488 "osm_kdu_id": kdu_name,
6489 "member-vnf-index": vnf_index,
6490 "type": "create",
6491 "kdu_index": instance_num + x - 1,
6492 }
6493 )
6494 scaling_info["kdu-create"][kdu_name].append(
6495 {
6496 "member-vnf-index": vnf_index,
6497 "type": "create",
6498 "k8s-cluster-type": k8s_cluster_type,
6499 "resource-name": resource_name,
6500 "scale": kdu_replica_count,
6501 }
6502 )
6503 elif scaling_type == "SCALE_IN":
6504 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6505
6506 scaling_info["scaling_direction"] = "IN"
6507 scaling_info["vdu-delete"] = {}
6508 scaling_info["kdu-delete"] = {}
6509
6510 for delta in deltas:
6511 for vdu_delta in delta.get("vdu-delta", {}):
6512 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6513 min_instance_count = 0
6514 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6515 if vdu_profile and "min-number-of-instances" in vdu_profile:
6516 min_instance_count = vdu_profile["min-number-of-instances"]
6517
6518 default_instance_num = get_number_of_instances(
6519 db_vnfd, vdu_delta["id"]
6520 )
6521 instance_num = vdu_delta.get("number-of-instances", 1)
6522 nb_scale_op -= instance_num
6523
6524 new_instance_count = nb_scale_op + default_instance_num
6525
6526 if new_instance_count < min_instance_count < vdu_count:
6527 instances_number = min_instance_count - new_instance_count
6528 else:
6529 instances_number = instance_num
6530
6531 if new_instance_count < min_instance_count:
6532 raise LcmException(
6533 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6534 "scaling-group-descriptor '{}'".format(
6535 nb_scale_op, scaling_group
6536 )
6537 )
6538 for x in range(vdu_delta.get("number-of-instances", 1)):
6539 vca_scaling_info.append(
6540 {
6541 "osm_vdu_id": vdu_delta["id"],
6542 "member-vnf-index": vnf_index,
6543 "type": "delete",
6544 "vdu_index": vdu_index - 1 - x,
6545 }
6546 )
6547 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6548 for kdu_delta in delta.get("kdu-resource-delta", {}):
6549 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6550 kdu_name = kdu_profile["kdu-name"]
6551 resource_name = kdu_profile.get("resource-name", "")
6552
6553 if not scaling_info["kdu-delete"].get(kdu_name, None):
6554 scaling_info["kdu-delete"][kdu_name] = []
6555
6556 kdur = get_kdur(db_vnfr, kdu_name)
6557 if kdur.get("helm-chart"):
6558 k8s_cluster_type = "helm-chart-v3"
6559 self.logger.debug("kdur: {}".format(kdur))
6560 if (
6561 kdur.get("helm-version")
6562 and kdur.get("helm-version") == "v2"
6563 ):
6564 k8s_cluster_type = "helm-chart"
6565 elif kdur.get("juju-bundle"):
6566 k8s_cluster_type = "juju-bundle"
6567 else:
6568 raise LcmException(
6569 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6570 "juju-bundle. Maybe an old NBI version is running".format(
6571 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6572 )
6573 )
6574
6575 min_instance_count = 0
6576 if kdu_profile and "min-number-of-instances" in kdu_profile:
6577 min_instance_count = kdu_profile["min-number-of-instances"]
6578
6579 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6580 deployed_kdu, _ = get_deployed_kdu(
6581 nsr_deployed, kdu_name, vnf_index
6582 )
6583 if deployed_kdu is None:
6584 raise LcmException(
6585 "KDU '{}' for vnf '{}' not deployed".format(
6586 kdu_name, vnf_index
6587 )
6588 )
6589 kdu_instance = deployed_kdu.get("kdu-instance")
6590 instance_num = await self.k8scluster_map[
6591 k8s_cluster_type
6592 ].get_scale_count(
6593 resource_name,
6594 kdu_instance,
6595 vca_id=vca_id,
6596 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6597 kdu_model=deployed_kdu.get("kdu-model"),
6598 )
6599 kdu_replica_count = instance_num - kdu_delta.get(
6600 "number-of-instances", 1
6601 )
6602
6603 if kdu_replica_count < min_instance_count < instance_num:
6604 kdu_replica_count = min_instance_count
6605 if kdu_replica_count < min_instance_count:
6606 raise LcmException(
6607 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6608 "scaling-group-descriptor '{}'".format(
6609 instance_num, scaling_group
6610 )
6611 )
6612
6613 for x in range(kdu_delta.get("number-of-instances", 1)):
6614 vca_scaling_info.append(
6615 {
6616 "osm_kdu_id": kdu_name,
6617 "member-vnf-index": vnf_index,
6618 "type": "delete",
6619 "kdu_index": instance_num - x - 1,
6620 }
6621 )
6622 scaling_info["kdu-delete"][kdu_name].append(
6623 {
6624 "member-vnf-index": vnf_index,
6625 "type": "delete",
6626 "k8s-cluster-type": k8s_cluster_type,
6627 "resource-name": resource_name,
6628 "scale": kdu_replica_count,
6629 }
6630 )
6631
6632 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6633 vdu_delete = copy(scaling_info.get("vdu-delete"))
6634 if scaling_info["scaling_direction"] == "IN":
6635 for vdur in reversed(db_vnfr["vdur"]):
6636 if vdu_delete.get(vdur["vdu-id-ref"]):
6637 vdu_delete[vdur["vdu-id-ref"]] -= 1
6638 scaling_info["vdu"].append(
6639 {
6640 "name": vdur.get("name") or vdur.get("vdu-name"),
6641 "vdu_id": vdur["vdu-id-ref"],
6642 "interface": [],
6643 }
6644 )
6645 for interface in vdur["interfaces"]:
6646 scaling_info["vdu"][-1]["interface"].append(
6647 {
6648 "name": interface["name"],
6649 "ip_address": interface["ip-address"],
6650 "mac_address": interface.get("mac-address"),
6651 }
6652 )
6653 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6654
6655 # PRE-SCALE BEGIN
6656 step = "Executing pre-scale vnf-config-primitive"
6657 if scaling_descriptor.get("scaling-config-action"):
6658 for scaling_config_action in scaling_descriptor[
6659 "scaling-config-action"
6660 ]:
6661 if (
6662 scaling_config_action.get("trigger") == "pre-scale-in"
6663 and scaling_type == "SCALE_IN"
6664 ) or (
6665 scaling_config_action.get("trigger") == "pre-scale-out"
6666 and scaling_type == "SCALE_OUT"
6667 ):
6668 vnf_config_primitive = scaling_config_action[
6669 "vnf-config-primitive-name-ref"
6670 ]
6671 step = db_nslcmop_update[
6672 "detailed-status"
6673 ] = "executing pre-scale scaling-config-action '{}'".format(
6674 vnf_config_primitive
6675 )
6676
6677 # look for primitive
6678 for config_primitive in (
6679 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6680 ).get("config-primitive", ()):
6681 if config_primitive["name"] == vnf_config_primitive:
6682 break
6683 else:
6684 raise LcmException(
6685 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6686 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6687 "primitive".format(scaling_group, vnf_config_primitive)
6688 )
6689
6690 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6691 if db_vnfr.get("additionalParamsForVnf"):
6692 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6693
6694 scale_process = "VCA"
6695 db_nsr_update["config-status"] = "configuring pre-scaling"
6696 primitive_params = self._map_primitive_params(
6697 config_primitive, {}, vnfr_params
6698 )
6699
6700 # Pre-scale retry check: Check if this sub-operation has been executed before
6701 op_index = self._check_or_add_scale_suboperation(
6702 db_nslcmop,
6703 vnf_index,
6704 vnf_config_primitive,
6705 primitive_params,
6706 "PRE-SCALE",
6707 )
6708 if op_index == self.SUBOPERATION_STATUS_SKIP:
6709 # Skip sub-operation
6710 result = "COMPLETED"
6711 result_detail = "Done"
6712 self.logger.debug(
6713 logging_text
6714 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6715 vnf_config_primitive, result, result_detail
6716 )
6717 )
6718 else:
6719 if op_index == self.SUBOPERATION_STATUS_NEW:
6720 # New sub-operation: Get index of this sub-operation
6721 op_index = (
6722 len(db_nslcmop.get("_admin", {}).get("operations"))
6723 - 1
6724 )
6725 self.logger.debug(
6726 logging_text
6727 + "vnf_config_primitive={} New sub-operation".format(
6728 vnf_config_primitive
6729 )
6730 )
6731 else:
6732 # retry: Get registered params for this existing sub-operation
6733 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6734 op_index
6735 ]
6736 vnf_index = op.get("member_vnf_index")
6737 vnf_config_primitive = op.get("primitive")
6738 primitive_params = op.get("primitive_params")
6739 self.logger.debug(
6740 logging_text
6741 + "vnf_config_primitive={} Sub-operation retry".format(
6742 vnf_config_primitive
6743 )
6744 )
6745 # Execute the primitive, either with new (first-time) or registered (reintent) args
6746 ee_descriptor_id = config_primitive.get(
6747 "execution-environment-ref"
6748 )
6749 primitive_name = config_primitive.get(
6750 "execution-environment-primitive", vnf_config_primitive
6751 )
6752 ee_id, vca_type = self._look_for_deployed_vca(
6753 nsr_deployed["VCA"],
6754 member_vnf_index=vnf_index,
6755 vdu_id=None,
6756 vdu_count_index=None,
6757 ee_descriptor_id=ee_descriptor_id,
6758 )
6759 result, result_detail = await self._ns_execute_primitive(
6760 ee_id,
6761 primitive_name,
6762 primitive_params,
6763 vca_type=vca_type,
6764 vca_id=vca_id,
6765 )
6766 self.logger.debug(
6767 logging_text
6768 + "vnf_config_primitive={} Done with result {} {}".format(
6769 vnf_config_primitive, result, result_detail
6770 )
6771 )
6772 # Update operationState = COMPLETED | FAILED
6773 self._update_suboperation_status(
6774 db_nslcmop, op_index, result, result_detail
6775 )
6776
6777 if result == "FAILED":
6778 raise LcmException(result_detail)
6779 db_nsr_update["config-status"] = old_config_status
6780 scale_process = None
6781 # PRE-SCALE END
6782
6783 db_nsr_update[
6784 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6785 ] = nb_scale_op
6786 db_nsr_update[
6787 "_admin.scaling-group.{}.time".format(admin_scale_index)
6788 ] = time()
6789
6790 # SCALE-IN VCA - BEGIN
6791 if vca_scaling_info:
6792 step = db_nslcmop_update[
6793 "detailed-status"
6794 ] = "Deleting the execution environments"
6795 scale_process = "VCA"
6796 for vca_info in vca_scaling_info:
6797 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6798 member_vnf_index = str(vca_info["member-vnf-index"])
6799 self.logger.debug(
6800 logging_text + "vdu info: {}".format(vca_info)
6801 )
6802 if vca_info.get("osm_vdu_id"):
6803 vdu_id = vca_info["osm_vdu_id"]
6804 vdu_index = int(vca_info["vdu_index"])
6805 stage[
6806 1
6807 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6808 member_vnf_index, vdu_id, vdu_index
6809 )
6810 stage[2] = step = "Scaling in VCA"
6811 self._write_op_status(op_id=nslcmop_id, stage=stage)
6812 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6813 config_update = db_nsr["configurationStatus"]
6814 for vca_index, vca in enumerate(vca_update):
6815 if (
6816 (vca or vca.get("ee_id"))
6817 and vca["member-vnf-index"] == member_vnf_index
6818 and vca["vdu_count_index"] == vdu_index
6819 ):
6820 if vca.get("vdu_id"):
6821 config_descriptor = get_configuration(
6822 db_vnfd, vca.get("vdu_id")
6823 )
6824 elif vca.get("kdu_name"):
6825 config_descriptor = get_configuration(
6826 db_vnfd, vca.get("kdu_name")
6827 )
6828 else:
6829 config_descriptor = get_configuration(
6830 db_vnfd, db_vnfd["id"]
6831 )
6832 operation_params = (
6833 db_nslcmop.get("operationParams") or {}
6834 )
6835 exec_terminate_primitives = not operation_params.get(
6836 "skip_terminate_primitives"
6837 ) and vca.get("needed_terminate")
6838 task = asyncio.ensure_future(
6839 asyncio.wait_for(
6840 self.destroy_N2VC(
6841 logging_text,
6842 db_nslcmop,
6843 vca,
6844 config_descriptor,
6845 vca_index,
6846 destroy_ee=True,
6847 exec_primitives=exec_terminate_primitives,
6848 scaling_in=True,
6849 vca_id=vca_id,
6850 ),
6851 timeout=self.timeout.charm_delete,
6852 )
6853 )
6854 tasks_dict_info[task] = "Terminating VCA {}".format(
6855 vca.get("ee_id")
6856 )
6857 del vca_update[vca_index]
6858 del config_update[vca_index]
6859 # wait for pending tasks of terminate primitives
6860 if tasks_dict_info:
6861 self.logger.debug(
6862 logging_text
6863 + "Waiting for tasks {}".format(
6864 list(tasks_dict_info.keys())
6865 )
6866 )
6867 error_list = await self._wait_for_tasks(
6868 logging_text,
6869 tasks_dict_info,
6870 min(
6871 self.timeout.charm_delete, self.timeout.ns_terminate
6872 ),
6873 stage,
6874 nslcmop_id,
6875 )
6876 tasks_dict_info.clear()
6877 if error_list:
6878 raise LcmException("; ".join(error_list))
6879
6880 db_vca_and_config_update = {
6881 "_admin.deployed.VCA": vca_update,
6882 "configurationStatus": config_update,
6883 }
6884 self.update_db_2(
6885 "nsrs", db_nsr["_id"], db_vca_and_config_update
6886 )
6887 scale_process = None
6888 # SCALE-IN VCA - END
6889
6890 # SCALE RO - BEGIN
6891 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6892 scale_process = "RO"
6893 if self.ro_config.ng:
6894 await self._scale_ng_ro(
6895 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6896 )
6897 scaling_info.pop("vdu-create", None)
6898 scaling_info.pop("vdu-delete", None)
6899
6900 scale_process = None
6901 # SCALE RO - END
6902
6903 # SCALE KDU - BEGIN
6904 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6905 scale_process = "KDU"
6906 await self._scale_kdu(
6907 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6908 )
6909 scaling_info.pop("kdu-create", None)
6910 scaling_info.pop("kdu-delete", None)
6911
6912 scale_process = None
6913 # SCALE KDU - END
6914
6915 if db_nsr_update:
6916 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6917
6918 # SCALE-UP VCA - BEGIN
6919 if vca_scaling_info:
6920 step = db_nslcmop_update[
6921 "detailed-status"
6922 ] = "Creating new execution environments"
6923 scale_process = "VCA"
6924 for vca_info in vca_scaling_info:
6925 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6926 member_vnf_index = str(vca_info["member-vnf-index"])
6927 self.logger.debug(
6928 logging_text + "vdu info: {}".format(vca_info)
6929 )
6930 vnfd_id = db_vnfr["vnfd-ref"]
6931 if vca_info.get("osm_vdu_id"):
6932 vdu_index = int(vca_info["vdu_index"])
6933 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6934 if db_vnfr.get("additionalParamsForVnf"):
6935 deploy_params.update(
6936 parse_yaml_strings(
6937 db_vnfr["additionalParamsForVnf"].copy()
6938 )
6939 )
6940 descriptor_config = get_configuration(
6941 db_vnfd, db_vnfd["id"]
6942 )
6943 if descriptor_config:
6944 vdu_id = None
6945 vdu_name = None
6946 kdu_name = None
6947 kdu_index = None
6948 self._deploy_n2vc(
6949 logging_text=logging_text
6950 + "member_vnf_index={} ".format(member_vnf_index),
6951 db_nsr=db_nsr,
6952 db_vnfr=db_vnfr,
6953 nslcmop_id=nslcmop_id,
6954 nsr_id=nsr_id,
6955 nsi_id=nsi_id,
6956 vnfd_id=vnfd_id,
6957 vdu_id=vdu_id,
6958 kdu_name=kdu_name,
6959 kdu_index=kdu_index,
6960 member_vnf_index=member_vnf_index,
6961 vdu_index=vdu_index,
6962 vdu_name=vdu_name,
6963 deploy_params=deploy_params,
6964 descriptor_config=descriptor_config,
6965 base_folder=base_folder,
6966 task_instantiation_info=tasks_dict_info,
6967 stage=stage,
6968 )
6969 vdu_id = vca_info["osm_vdu_id"]
6970 vdur = find_in_list(
6971 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6972 )
6973 descriptor_config = get_configuration(db_vnfd, vdu_id)
6974 if vdur.get("additionalParams"):
6975 deploy_params_vdu = parse_yaml_strings(
6976 vdur["additionalParams"]
6977 )
6978 else:
6979 deploy_params_vdu = deploy_params
6980 deploy_params_vdu["OSM"] = get_osm_params(
6981 db_vnfr, vdu_id, vdu_count_index=vdu_index
6982 )
6983 if descriptor_config:
6984 vdu_name = None
6985 kdu_name = None
6986 kdu_index = None
6987 stage[
6988 1
6989 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6990 member_vnf_index, vdu_id, vdu_index
6991 )
6992 stage[2] = step = "Scaling out VCA"
6993 self._write_op_status(op_id=nslcmop_id, stage=stage)
6994 self._deploy_n2vc(
6995 logging_text=logging_text
6996 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6997 member_vnf_index, vdu_id, vdu_index
6998 ),
6999 db_nsr=db_nsr,
7000 db_vnfr=db_vnfr,
7001 nslcmop_id=nslcmop_id,
7002 nsr_id=nsr_id,
7003 nsi_id=nsi_id,
7004 vnfd_id=vnfd_id,
7005 vdu_id=vdu_id,
7006 kdu_name=kdu_name,
7007 member_vnf_index=member_vnf_index,
7008 vdu_index=vdu_index,
7009 kdu_index=kdu_index,
7010 vdu_name=vdu_name,
7011 deploy_params=deploy_params_vdu,
7012 descriptor_config=descriptor_config,
7013 base_folder=base_folder,
7014 task_instantiation_info=tasks_dict_info,
7015 stage=stage,
7016 )
7017 # SCALE-UP VCA - END
7018 scale_process = None
7019
7020 # POST-SCALE BEGIN
7021 # execute primitive service POST-SCALING
7022 step = "Executing post-scale vnf-config-primitive"
7023 if scaling_descriptor.get("scaling-config-action"):
7024 for scaling_config_action in scaling_descriptor[
7025 "scaling-config-action"
7026 ]:
7027 if (
7028 scaling_config_action.get("trigger") == "post-scale-in"
7029 and scaling_type == "SCALE_IN"
7030 ) or (
7031 scaling_config_action.get("trigger") == "post-scale-out"
7032 and scaling_type == "SCALE_OUT"
7033 ):
7034 vnf_config_primitive = scaling_config_action[
7035 "vnf-config-primitive-name-ref"
7036 ]
7037 step = db_nslcmop_update[
7038 "detailed-status"
7039 ] = "executing post-scale scaling-config-action '{}'".format(
7040 vnf_config_primitive
7041 )
7042
7043 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7044 if db_vnfr.get("additionalParamsForVnf"):
7045 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7046
7047 # look for primitive
7048 for config_primitive in (
7049 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7050 ).get("config-primitive", ()):
7051 if config_primitive["name"] == vnf_config_primitive:
7052 break
7053 else:
7054 raise LcmException(
7055 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7056 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7057 "config-primitive".format(
7058 scaling_group, vnf_config_primitive
7059 )
7060 )
7061 scale_process = "VCA"
7062 db_nsr_update["config-status"] = "configuring post-scaling"
7063 primitive_params = self._map_primitive_params(
7064 config_primitive, {}, vnfr_params
7065 )
7066
7067 # Post-scale retry check: Check if this sub-operation has been executed before
7068 op_index = self._check_or_add_scale_suboperation(
7069 db_nslcmop,
7070 vnf_index,
7071 vnf_config_primitive,
7072 primitive_params,
7073 "POST-SCALE",
7074 )
7075 if op_index == self.SUBOPERATION_STATUS_SKIP:
7076 # Skip sub-operation
7077 result = "COMPLETED"
7078 result_detail = "Done"
7079 self.logger.debug(
7080 logging_text
7081 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7082 vnf_config_primitive, result, result_detail
7083 )
7084 )
7085 else:
7086 if op_index == self.SUBOPERATION_STATUS_NEW:
7087 # New sub-operation: Get index of this sub-operation
7088 op_index = (
7089 len(db_nslcmop.get("_admin", {}).get("operations"))
7090 - 1
7091 )
7092 self.logger.debug(
7093 logging_text
7094 + "vnf_config_primitive={} New sub-operation".format(
7095 vnf_config_primitive
7096 )
7097 )
7098 else:
7099 # retry: Get registered params for this existing sub-operation
7100 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7101 op_index
7102 ]
7103 vnf_index = op.get("member_vnf_index")
7104 vnf_config_primitive = op.get("primitive")
7105 primitive_params = op.get("primitive_params")
7106 self.logger.debug(
7107 logging_text
7108 + "vnf_config_primitive={} Sub-operation retry".format(
7109 vnf_config_primitive
7110 )
7111 )
7112 # Execute the primitive, either with new (first-time) or registered (reintent) args
7113 ee_descriptor_id = config_primitive.get(
7114 "execution-environment-ref"
7115 )
7116 primitive_name = config_primitive.get(
7117 "execution-environment-primitive", vnf_config_primitive
7118 )
7119 ee_id, vca_type = self._look_for_deployed_vca(
7120 nsr_deployed["VCA"],
7121 member_vnf_index=vnf_index,
7122 vdu_id=None,
7123 vdu_count_index=None,
7124 ee_descriptor_id=ee_descriptor_id,
7125 )
7126 result, result_detail = await self._ns_execute_primitive(
7127 ee_id,
7128 primitive_name,
7129 primitive_params,
7130 vca_type=vca_type,
7131 vca_id=vca_id,
7132 )
7133 self.logger.debug(
7134 logging_text
7135 + "vnf_config_primitive={} Done with result {} {}".format(
7136 vnf_config_primitive, result, result_detail
7137 )
7138 )
7139 # Update operationState = COMPLETED | FAILED
7140 self._update_suboperation_status(
7141 db_nslcmop, op_index, result, result_detail
7142 )
7143
7144 if result == "FAILED":
7145 raise LcmException(result_detail)
7146 db_nsr_update["config-status"] = old_config_status
7147 scale_process = None
7148 # POST-SCALE END
7149
7150 db_nsr_update[
7151 "detailed-status"
7152 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7153 db_nsr_update["operational-status"] = (
7154 "running"
7155 if old_operational_status == "failed"
7156 else old_operational_status
7157 )
7158 db_nsr_update["config-status"] = old_config_status
7159 return
7160 except (
7161 ROclient.ROClientException,
7162 DbException,
7163 LcmException,
7164 NgRoException,
7165 ) as e:
7166 self.logger.error(logging_text + "Exit Exception {}".format(e))
7167 exc = e
7168 except asyncio.CancelledError:
7169 self.logger.error(
7170 logging_text + "Cancelled Exception while '{}'".format(step)
7171 )
7172 exc = "Operation was cancelled"
7173 except Exception as e:
7174 exc = traceback.format_exc()
7175 self.logger.critical(
7176 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7177 exc_info=True,
7178 )
7179 finally:
7180 self._write_ns_status(
7181 nsr_id=nsr_id,
7182 ns_state=None,
7183 current_operation="IDLE",
7184 current_operation_id=None,
7185 )
7186 if tasks_dict_info:
7187 stage[1] = "Waiting for instantiate pending tasks."
7188 self.logger.debug(logging_text + stage[1])
7189 exc = await self._wait_for_tasks(
7190 logging_text,
7191 tasks_dict_info,
7192 self.timeout.ns_deploy,
7193 stage,
7194 nslcmop_id,
7195 nsr_id=nsr_id,
7196 )
7197 if exc:
7198 db_nslcmop_update[
7199 "detailed-status"
7200 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7201 nslcmop_operation_state = "FAILED"
7202 if db_nsr:
7203 db_nsr_update["operational-status"] = old_operational_status
7204 db_nsr_update["config-status"] = old_config_status
7205 db_nsr_update["detailed-status"] = ""
7206 if scale_process:
7207 if "VCA" in scale_process:
7208 db_nsr_update["config-status"] = "failed"
7209 if "RO" in scale_process:
7210 db_nsr_update["operational-status"] = "failed"
7211 db_nsr_update[
7212 "detailed-status"
7213 ] = "FAILED scaling nslcmop={} {}: {}".format(
7214 nslcmop_id, step, exc
7215 )
7216 else:
7217 error_description_nslcmop = None
7218 nslcmop_operation_state = "COMPLETED"
7219 db_nslcmop_update["detailed-status"] = "Done"
7220
7221 self._write_op_status(
7222 op_id=nslcmop_id,
7223 stage="",
7224 error_message=error_description_nslcmop,
7225 operation_state=nslcmop_operation_state,
7226 other_update=db_nslcmop_update,
7227 )
7228 if db_nsr:
7229 self._write_ns_status(
7230 nsr_id=nsr_id,
7231 ns_state=None,
7232 current_operation="IDLE",
7233 current_operation_id=None,
7234 other_update=db_nsr_update,
7235 )
7236
7237 if nslcmop_operation_state:
7238 try:
7239 msg = {
7240 "nsr_id": nsr_id,
7241 "nslcmop_id": nslcmop_id,
7242 "operationState": nslcmop_operation_state,
7243 }
7244 await self.msg.aiowrite("ns", "scaled", msg)
7245 except Exception as e:
7246 self.logger.error(
7247 logging_text + "kafka_write notification Exception {}".format(e)
7248 )
7249 self.logger.debug(logging_text + "Exit")
7250 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7251
7252 async def _scale_kdu(
7253 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7254 ):
7255 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7256 for kdu_name in _scaling_info:
7257 for kdu_scaling_info in _scaling_info[kdu_name]:
7258 deployed_kdu, index = get_deployed_kdu(
7259 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7260 )
7261 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7262 kdu_instance = deployed_kdu["kdu-instance"]
7263 kdu_model = deployed_kdu.get("kdu-model")
7264 scale = int(kdu_scaling_info["scale"])
7265 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7266
7267 db_dict = {
7268 "collection": "nsrs",
7269 "filter": {"_id": nsr_id},
7270 "path": "_admin.deployed.K8s.{}".format(index),
7271 }
7272
7273 step = "scaling application {}".format(
7274 kdu_scaling_info["resource-name"]
7275 )
7276 self.logger.debug(logging_text + step)
7277
7278 if kdu_scaling_info["type"] == "delete":
7279 kdu_config = get_configuration(db_vnfd, kdu_name)
7280 if (
7281 kdu_config
7282 and kdu_config.get("terminate-config-primitive")
7283 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7284 ):
7285 terminate_config_primitive_list = kdu_config.get(
7286 "terminate-config-primitive"
7287 )
7288 terminate_config_primitive_list.sort(
7289 key=lambda val: int(val["seq"])
7290 )
7291
7292 for (
7293 terminate_config_primitive
7294 ) in terminate_config_primitive_list:
7295 primitive_params_ = self._map_primitive_params(
7296 terminate_config_primitive, {}, {}
7297 )
7298 step = "execute terminate config primitive"
7299 self.logger.debug(logging_text + step)
7300 await asyncio.wait_for(
7301 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7302 cluster_uuid=cluster_uuid,
7303 kdu_instance=kdu_instance,
7304 primitive_name=terminate_config_primitive["name"],
7305 params=primitive_params_,
7306 db_dict=db_dict,
7307 total_timeout=self.timeout.primitive,
7308 vca_id=vca_id,
7309 ),
7310 timeout=self.timeout.primitive
7311 * self.timeout.primitive_outer_factor,
7312 )
7313
7314 await asyncio.wait_for(
7315 self.k8scluster_map[k8s_cluster_type].scale(
7316 kdu_instance=kdu_instance,
7317 scale=scale,
7318 resource_name=kdu_scaling_info["resource-name"],
7319 total_timeout=self.timeout.scale_on_error,
7320 vca_id=vca_id,
7321 cluster_uuid=cluster_uuid,
7322 kdu_model=kdu_model,
7323 atomic=True,
7324 db_dict=db_dict,
7325 ),
7326 timeout=self.timeout.scale_on_error
7327 * self.timeout.scale_on_error_outer_factor,
7328 )
7329
7330 if kdu_scaling_info["type"] == "create":
7331 kdu_config = get_configuration(db_vnfd, kdu_name)
7332 if (
7333 kdu_config
7334 and kdu_config.get("initial-config-primitive")
7335 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7336 ):
7337 initial_config_primitive_list = kdu_config.get(
7338 "initial-config-primitive"
7339 )
7340 initial_config_primitive_list.sort(
7341 key=lambda val: int(val["seq"])
7342 )
7343
7344 for initial_config_primitive in initial_config_primitive_list:
7345 primitive_params_ = self._map_primitive_params(
7346 initial_config_primitive, {}, {}
7347 )
7348 step = "execute initial config primitive"
7349 self.logger.debug(logging_text + step)
7350 await asyncio.wait_for(
7351 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7352 cluster_uuid=cluster_uuid,
7353 kdu_instance=kdu_instance,
7354 primitive_name=initial_config_primitive["name"],
7355 params=primitive_params_,
7356 db_dict=db_dict,
7357 vca_id=vca_id,
7358 ),
7359 timeout=600,
7360 )
7361
7362 async def _scale_ng_ro(
7363 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7364 ):
7365 nsr_id = db_nslcmop["nsInstanceId"]
7366 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7367 db_vnfrs = {}
7368
7369 # read from db: vnfd's for every vnf
7370 db_vnfds = []
7371
7372 # for each vnf in ns, read vnfd
7373 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7374 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7375 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7376 # if we haven't this vnfd, read it from db
7377 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7378 # read from db
7379 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7380 db_vnfds.append(vnfd)
7381 n2vc_key = self.n2vc.get_public_key()
7382 n2vc_key_list = [n2vc_key]
7383 self.scale_vnfr(
7384 db_vnfr,
7385 vdu_scaling_info.get("vdu-create"),
7386 vdu_scaling_info.get("vdu-delete"),
7387 mark_delete=True,
7388 )
7389 # db_vnfr has been updated, update db_vnfrs to use it
7390 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7391 await self._instantiate_ng_ro(
7392 logging_text,
7393 nsr_id,
7394 db_nsd,
7395 db_nsr,
7396 db_nslcmop,
7397 db_vnfrs,
7398 db_vnfds,
7399 n2vc_key_list,
7400 stage=stage,
7401 start_deploy=time(),
7402 timeout_ns_deploy=self.timeout.ns_deploy,
7403 )
7404 if vdu_scaling_info.get("vdu-delete"):
7405 self.scale_vnfr(
7406 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7407 )
7408
7409 async def extract_prometheus_scrape_jobs(
7410 self,
7411 ee_id: str,
7412 artifact_path: str,
7413 ee_config_descriptor: dict,
7414 vnfr_id: str,
7415 nsr_id: str,
7416 target_ip: str,
7417 element_type: str,
7418 vnf_member_index: str = "",
7419 vdu_id: str = "",
7420 vdu_index: int = None,
7421 kdu_name: str = "",
7422 kdu_index: int = None,
7423 ) -> dict:
7424 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7425 This method will wait until the corresponding VDU or KDU is fully instantiated
7426
7427 Args:
7428 ee_id (str): Execution Environment ID
7429 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7430 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7431 vnfr_id (str): VNFR ID where this EE applies
7432 nsr_id (str): NSR ID where this EE applies
7433 target_ip (str): VDU/KDU instance IP address
7434 element_type (str): NS or VNF or VDU or KDU
7435 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7436 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7437 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7438 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7439 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7440
7441 Raises:
7442 LcmException: When the VDU or KDU instance was not found in an hour
7443
7444 Returns:
7445 _type_: Prometheus jobs
7446 """
7447 # default the vdur and kdur names to an empty string, to avoid any later
7448 # problem with Prometheus when the element type is not VDU or KDU
7449 vdur_name = ""
7450 kdur_name = ""
7451
7452 # look if exist a file called 'prometheus*.j2' and
7453 artifact_content = self.fs.dir_ls(artifact_path)
7454 job_file = next(
7455 (
7456 f
7457 for f in artifact_content
7458 if f.startswith("prometheus") and f.endswith(".j2")
7459 ),
7460 None,
7461 )
7462 if not job_file:
7463 return
7464 self.logger.debug("Artifact path{}".format(artifact_path))
7465 self.logger.debug("job file{}".format(job_file))
7466 with self.fs.file_open((artifact_path, job_file), "r") as f:
7467 job_data = f.read()
7468
7469 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7470 if element_type in ("VDU", "KDU"):
7471 for _ in range(360):
7472 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7473 if vdu_id and vdu_index is not None:
7474 vdur = next(
7475 (
7476 x
7477 for x in get_iterable(db_vnfr, "vdur")
7478 if (
7479 x.get("vdu-id-ref") == vdu_id
7480 and x.get("count-index") == vdu_index
7481 )
7482 ),
7483 {},
7484 )
7485 if vdur.get("name"):
7486 vdur_name = vdur.get("name")
7487 break
7488 if kdu_name and kdu_index is not None:
7489 kdur = next(
7490 (
7491 x
7492 for x in get_iterable(db_vnfr, "kdur")
7493 if (
7494 x.get("kdu-name") == kdu_name
7495 and x.get("count-index") == kdu_index
7496 )
7497 ),
7498 {},
7499 )
7500 if kdur.get("name"):
7501 kdur_name = kdur.get("name")
7502 break
7503
7504 await asyncio.sleep(10)
7505 else:
7506 if vdu_id and vdu_index is not None:
7507 raise LcmException(
7508 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7509 )
7510 if kdu_name and kdu_index is not None:
7511 raise LcmException(
7512 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7513 )
7514
7515 if ee_id is not None:
7516 _, namespace, helm_id = get_ee_id_parts(
7517 ee_id
7518 ) # get namespace and EE gRPC service name
7519 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7520 host_port = "80"
7521 vnfr_id = vnfr_id.replace("-", "")
7522 variables = {
7523 "JOB_NAME": vnfr_id,
7524 "TARGET_IP": target_ip,
7525 "EXPORTER_POD_IP": host_name,
7526 "EXPORTER_POD_PORT": host_port,
7527 "NSR_ID": nsr_id,
7528 "VNF_MEMBER_INDEX": vnf_member_index,
7529 "VDUR_NAME": vdur_name,
7530 "KDUR_NAME": kdur_name,
7531 "ELEMENT_TYPE": element_type,
7532 }
7533 else:
7534 metric_path = ee_config_descriptor["metric-path"]
7535 target_port = ee_config_descriptor["metric-port"]
7536 vnfr_id = vnfr_id.replace("-", "")
7537 variables = {
7538 "JOB_NAME": vnfr_id,
7539 "TARGET_IP": target_ip,
7540 "TARGET_PORT": target_port,
7541 "METRIC_PATH": metric_path,
7542 }
7543
7544 job_list = parse_job(job_data, variables)
7545 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7546 for job in job_list:
7547 if (
7548 not isinstance(job.get("job_name"), str)
7549 or vnfr_id not in job["job_name"]
7550 ):
7551 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7552 job["nsr_id"] = nsr_id
7553 job["vnfr_id"] = vnfr_id
7554 return job_list
7555
7556 async def rebuild_start_stop(
7557 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7558 ):
7559 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7560 self.logger.info(logging_text + "Enter")
7561 stage = ["Preparing the environment", ""]
7562 # database nsrs record
7563 db_nsr_update = {}
7564 vdu_vim_name = None
7565 vim_vm_id = None
7566 # in case of error, indicates what part of scale was failed to put nsr at error status
7567 start_deploy = time()
7568 try:
7569 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7570 vim_account_id = db_vnfr.get("vim-account-id")
7571 vim_info_key = "vim:" + vim_account_id
7572 vdu_id = additional_param["vdu_id"]
7573 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7574 vdur = find_in_list(
7575 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7576 )
7577 if vdur:
7578 vdu_vim_name = vdur["name"]
7579 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7580 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7581 else:
7582 raise LcmException("Target vdu is not found")
7583 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7584 # wait for any previous tasks in process
7585 stage[1] = "Waiting for previous operations to terminate"
7586 self.logger.info(stage[1])
7587 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7588
7589 stage[1] = "Reading from database."
7590 self.logger.info(stage[1])
7591 self._write_ns_status(
7592 nsr_id=nsr_id,
7593 ns_state=None,
7594 current_operation=operation_type.upper(),
7595 current_operation_id=nslcmop_id,
7596 )
7597 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7598
7599 # read from db: ns
7600 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7601 db_nsr_update["operational-status"] = operation_type
7602 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7603 # Payload for RO
7604 desc = {
7605 operation_type: {
7606 "vim_vm_id": vim_vm_id,
7607 "vnf_id": vnf_id,
7608 "vdu_index": additional_param["count-index"],
7609 "vdu_id": vdur["id"],
7610 "target_vim": target_vim,
7611 "vim_account_id": vim_account_id,
7612 }
7613 }
7614 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7615 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7616 self.logger.info("ro nsr id: {}".format(nsr_id))
7617 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7618 self.logger.info("response from RO: {}".format(result_dict))
7619 action_id = result_dict["action_id"]
7620 await self._wait_ng_ro(
7621 nsr_id,
7622 action_id,
7623 nslcmop_id,
7624 start_deploy,
7625 self.timeout.operate,
7626 None,
7627 "start_stop_rebuild",
7628 )
7629 return "COMPLETED", "Done"
7630 except (ROclient.ROClientException, DbException, LcmException) as e:
7631 self.logger.error("Exit Exception {}".format(e))
7632 exc = e
7633 except asyncio.CancelledError:
7634 self.logger.error("Cancelled Exception while '{}'".format(stage))
7635 exc = "Operation was cancelled"
7636 except Exception as e:
7637 exc = traceback.format_exc()
7638 self.logger.critical(
7639 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7640 )
7641 return "FAILED", "Error in operate VNF {}".format(exc)
7642
7643 async def migrate(self, nsr_id, nslcmop_id):
7644 """
7645 Migrate VNFs and VDUs instances in a NS
7646
7647 :param: nsr_id: NS Instance ID
7648 :param: nslcmop_id: nslcmop ID of migrate
7649
7650 """
7651 # Try to lock HA task here
7652 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7653 if not task_is_locked_by_me:
7654 return
7655 logging_text = "Task ns={} migrate ".format(nsr_id)
7656 self.logger.debug(logging_text + "Enter")
7657 # get all needed from database
7658 db_nslcmop = None
7659 db_nslcmop_update = {}
7660 nslcmop_operation_state = None
7661 db_nsr_update = {}
7662 target = {}
7663 exc = None
7664 # in case of error, indicates what part of scale was failed to put nsr at error status
7665 start_deploy = time()
7666
7667 try:
7668 # wait for any previous tasks in process
7669 step = "Waiting for previous operations to terminate"
7670 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7671
7672 self._write_ns_status(
7673 nsr_id=nsr_id,
7674 ns_state=None,
7675 current_operation="MIGRATING",
7676 current_operation_id=nslcmop_id,
7677 )
7678 step = "Getting nslcmop from database"
7679 self.logger.debug(
7680 step + " after having waited for previous tasks to be completed"
7681 )
7682 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7683 migrate_params = db_nslcmop.get("operationParams")
7684
7685 target = {}
7686 target.update(migrate_params)
7687 desc = await self.RO.migrate(nsr_id, target)
7688 self.logger.debug("RO return > {}".format(desc))
7689 action_id = desc["action_id"]
7690 await self._wait_ng_ro(
7691 nsr_id,
7692 action_id,
7693 nslcmop_id,
7694 start_deploy,
7695 self.timeout.migrate,
7696 operation="migrate",
7697 )
7698 except (ROclient.ROClientException, DbException, LcmException) as e:
7699 self.logger.error("Exit Exception {}".format(e))
7700 exc = e
7701 except asyncio.CancelledError:
7702 self.logger.error("Cancelled Exception while '{}'".format(step))
7703 exc = "Operation was cancelled"
7704 except Exception as e:
7705 exc = traceback.format_exc()
7706 self.logger.critical(
7707 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7708 )
7709 finally:
7710 self._write_ns_status(
7711 nsr_id=nsr_id,
7712 ns_state=None,
7713 current_operation="IDLE",
7714 current_operation_id=None,
7715 )
7716 if exc:
7717 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7718 nslcmop_operation_state = "FAILED"
7719 else:
7720 nslcmop_operation_state = "COMPLETED"
7721 db_nslcmop_update["detailed-status"] = "Done"
7722 db_nsr_update["detailed-status"] = "Done"
7723
7724 self._write_op_status(
7725 op_id=nslcmop_id,
7726 stage="",
7727 error_message="",
7728 operation_state=nslcmop_operation_state,
7729 other_update=db_nslcmop_update,
7730 )
7731 if nslcmop_operation_state:
7732 try:
7733 msg = {
7734 "nsr_id": nsr_id,
7735 "nslcmop_id": nslcmop_id,
7736 "operationState": nslcmop_operation_state,
7737 }
7738 await self.msg.aiowrite("ns", "migrated", msg)
7739 except Exception as e:
7740 self.logger.error(
7741 logging_text + "kafka_write notification Exception {}".format(e)
7742 )
7743 self.logger.debug(logging_text + "Exit")
7744 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7745
7746 async def heal(self, nsr_id, nslcmop_id):
7747 """
7748 Heal NS
7749
7750 :param nsr_id: ns instance to heal
7751 :param nslcmop_id: operation to run
7752 :return:
7753 """
7754
7755 # Try to lock HA task here
7756 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7757 if not task_is_locked_by_me:
7758 return
7759
7760 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7761 stage = ["", "", ""]
7762 tasks_dict_info = {}
7763 # ^ stage, step, VIM progress
7764 self.logger.debug(logging_text + "Enter")
7765 # get all needed from database
7766 db_nsr = None
7767 db_nslcmop_update = {}
7768 db_nsr_update = {}
7769 db_vnfrs = {} # vnf's info indexed by _id
7770 exc = None
7771 old_operational_status = ""
7772 old_config_status = ""
7773 nsi_id = None
7774 try:
7775 # wait for any previous tasks in process
7776 step = "Waiting for previous operations to terminate"
7777 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7778 self._write_ns_status(
7779 nsr_id=nsr_id,
7780 ns_state=None,
7781 current_operation="HEALING",
7782 current_operation_id=nslcmop_id,
7783 )
7784
7785 step = "Getting nslcmop from database"
7786 self.logger.debug(
7787 step + " after having waited for previous tasks to be completed"
7788 )
7789 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7790
7791 step = "Getting nsr from database"
7792 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7793 old_operational_status = db_nsr["operational-status"]
7794 old_config_status = db_nsr["config-status"]
7795
7796 db_nsr_update = {
7797 "_admin.deployed.RO.operational-status": "healing",
7798 }
7799 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7800
7801 step = "Sending heal order to VIM"
7802 await self.heal_RO(
7803 logging_text=logging_text,
7804 nsr_id=nsr_id,
7805 db_nslcmop=db_nslcmop,
7806 stage=stage,
7807 )
7808 # VCA tasks
7809 # read from db: nsd
7810 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7811 self.logger.debug(logging_text + stage[1])
7812 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7813 self.fs.sync(db_nsr["nsd-id"])
7814 db_nsr["nsd"] = nsd
7815 # read from db: vnfr's of this ns
7816 step = "Getting vnfrs from db"
7817 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7818 for vnfr in db_vnfrs_list:
7819 db_vnfrs[vnfr["_id"]] = vnfr
7820 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7821
7822 # Check for each target VNF
7823 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7824 for target_vnf in target_list:
7825 # Find this VNF in the list from DB
7826 vnfr_id = target_vnf.get("vnfInstanceId", None)
7827 if vnfr_id:
7828 db_vnfr = db_vnfrs[vnfr_id]
7829 vnfd_id = db_vnfr.get("vnfd-id")
7830 vnfd_ref = db_vnfr.get("vnfd-ref")
7831 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7832 base_folder = vnfd["_admin"]["storage"]
7833 vdu_id = None
7834 vdu_index = 0
7835 vdu_name = None
7836 kdu_name = None
7837 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7838 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7839
7840 # Check each target VDU and deploy N2VC
7841 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7842 "vdu", []
7843 )
7844 if not target_vdu_list:
7845 # Codigo nuevo para crear diccionario
7846 target_vdu_list = []
7847 for existing_vdu in db_vnfr.get("vdur"):
7848 vdu_name = existing_vdu.get("vdu-name", None)
7849 vdu_index = existing_vdu.get("count-index", 0)
7850 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7851 "run-day1", False
7852 )
7853 vdu_to_be_healed = {
7854 "vdu-id": vdu_name,
7855 "count-index": vdu_index,
7856 "run-day1": vdu_run_day1,
7857 }
7858 target_vdu_list.append(vdu_to_be_healed)
7859 for target_vdu in target_vdu_list:
7860 deploy_params_vdu = target_vdu
7861 # Set run-day1 vnf level value if not vdu level value exists
7862 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7863 "additionalParams", {}
7864 ).get("run-day1"):
7865 deploy_params_vdu["run-day1"] = target_vnf[
7866 "additionalParams"
7867 ].get("run-day1")
7868 vdu_name = target_vdu.get("vdu-id", None)
7869 # TODO: Get vdu_id from vdud.
7870 vdu_id = vdu_name
7871 # For multi instance VDU count-index is mandatory
7872 # For single session VDU count-indes is 0
7873 vdu_index = target_vdu.get("count-index", 0)
7874
7875 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7876 stage[1] = "Deploying Execution Environments."
7877 self.logger.debug(logging_text + stage[1])
7878
7879 # VNF Level charm. Normal case when proxy charms.
7880 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7881 descriptor_config = get_configuration(vnfd, vnfd_ref)
7882 if descriptor_config:
7883 # Continue if healed machine is management machine
7884 vnf_ip_address = db_vnfr.get("ip-address")
7885 target_instance = None
7886 for instance in db_vnfr.get("vdur", None):
7887 if (
7888 instance["vdu-name"] == vdu_name
7889 and instance["count-index"] == vdu_index
7890 ):
7891 target_instance = instance
7892 break
7893 if vnf_ip_address == target_instance.get("ip-address"):
7894 self._heal_n2vc(
7895 logging_text=logging_text
7896 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7897 member_vnf_index, vdu_name, vdu_index
7898 ),
7899 db_nsr=db_nsr,
7900 db_vnfr=db_vnfr,
7901 nslcmop_id=nslcmop_id,
7902 nsr_id=nsr_id,
7903 nsi_id=nsi_id,
7904 vnfd_id=vnfd_ref,
7905 vdu_id=None,
7906 kdu_name=None,
7907 member_vnf_index=member_vnf_index,
7908 vdu_index=0,
7909 vdu_name=None,
7910 deploy_params=deploy_params_vdu,
7911 descriptor_config=descriptor_config,
7912 base_folder=base_folder,
7913 task_instantiation_info=tasks_dict_info,
7914 stage=stage,
7915 )
7916
7917 # VDU Level charm. Normal case with native charms.
7918 descriptor_config = get_configuration(vnfd, vdu_name)
7919 if descriptor_config:
7920 self._heal_n2vc(
7921 logging_text=logging_text
7922 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7923 member_vnf_index, vdu_name, vdu_index
7924 ),
7925 db_nsr=db_nsr,
7926 db_vnfr=db_vnfr,
7927 nslcmop_id=nslcmop_id,
7928 nsr_id=nsr_id,
7929 nsi_id=nsi_id,
7930 vnfd_id=vnfd_ref,
7931 vdu_id=vdu_id,
7932 kdu_name=kdu_name,
7933 member_vnf_index=member_vnf_index,
7934 vdu_index=vdu_index,
7935 vdu_name=vdu_name,
7936 deploy_params=deploy_params_vdu,
7937 descriptor_config=descriptor_config,
7938 base_folder=base_folder,
7939 task_instantiation_info=tasks_dict_info,
7940 stage=stage,
7941 )
7942
7943 except (
7944 ROclient.ROClientException,
7945 DbException,
7946 LcmException,
7947 NgRoException,
7948 ) as e:
7949 self.logger.error(logging_text + "Exit Exception {}".format(e))
7950 exc = e
7951 except asyncio.CancelledError:
7952 self.logger.error(
7953 logging_text + "Cancelled Exception while '{}'".format(step)
7954 )
7955 exc = "Operation was cancelled"
7956 except Exception as e:
7957 exc = traceback.format_exc()
7958 self.logger.critical(
7959 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7960 exc_info=True,
7961 )
7962 finally:
7963 if tasks_dict_info:
7964 stage[1] = "Waiting for healing pending tasks."
7965 self.logger.debug(logging_text + stage[1])
7966 exc = await self._wait_for_tasks(
7967 logging_text,
7968 tasks_dict_info,
7969 self.timeout.ns_deploy,
7970 stage,
7971 nslcmop_id,
7972 nsr_id=nsr_id,
7973 )
7974 if exc:
7975 db_nslcmop_update[
7976 "detailed-status"
7977 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7978 nslcmop_operation_state = "FAILED"
7979 if db_nsr:
7980 db_nsr_update["operational-status"] = old_operational_status
7981 db_nsr_update["config-status"] = old_config_status
7982 db_nsr_update[
7983 "detailed-status"
7984 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7985 for task, task_name in tasks_dict_info.items():
7986 if not task.done() or task.cancelled() or task.exception():
7987 if task_name.startswith(self.task_name_deploy_vca):
7988 # A N2VC task is pending
7989 db_nsr_update["config-status"] = "failed"
7990 else:
7991 # RO task is pending
7992 db_nsr_update["operational-status"] = "failed"
7993 else:
7994 error_description_nslcmop = None
7995 nslcmop_operation_state = "COMPLETED"
7996 db_nslcmop_update["detailed-status"] = "Done"
7997 db_nsr_update["detailed-status"] = "Done"
7998 db_nsr_update["operational-status"] = "running"
7999 db_nsr_update["config-status"] = "configured"
8000
8001 self._write_op_status(
8002 op_id=nslcmop_id,
8003 stage="",
8004 error_message=error_description_nslcmop,
8005 operation_state=nslcmop_operation_state,
8006 other_update=db_nslcmop_update,
8007 )
8008 if db_nsr:
8009 self._write_ns_status(
8010 nsr_id=nsr_id,
8011 ns_state=None,
8012 current_operation="IDLE",
8013 current_operation_id=None,
8014 other_update=db_nsr_update,
8015 )
8016
8017 if nslcmop_operation_state:
8018 try:
8019 msg = {
8020 "nsr_id": nsr_id,
8021 "nslcmop_id": nslcmop_id,
8022 "operationState": nslcmop_operation_state,
8023 }
8024 await self.msg.aiowrite("ns", "healed", msg)
8025 except Exception as e:
8026 self.logger.error(
8027 logging_text + "kafka_write notification Exception {}".format(e)
8028 )
8029 self.logger.debug(logging_text + "Exit")
8030 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8031
8032 async def heal_RO(
8033 self,
8034 logging_text,
8035 nsr_id,
8036 db_nslcmop,
8037 stage,
8038 ):
8039 """
8040 Heal at RO
8041 :param logging_text: preffix text to use at logging
8042 :param nsr_id: nsr identity
8043 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8044 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8045 :return: None or exception
8046 """
8047
8048 def get_vim_account(vim_account_id):
8049 nonlocal db_vims
8050 if vim_account_id in db_vims:
8051 return db_vims[vim_account_id]
8052 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8053 db_vims[vim_account_id] = db_vim
8054 return db_vim
8055
8056 try:
8057 start_heal = time()
8058 ns_params = db_nslcmop.get("operationParams")
8059 if ns_params and ns_params.get("timeout_ns_heal"):
8060 timeout_ns_heal = ns_params["timeout_ns_heal"]
8061 else:
8062 timeout_ns_heal = self.timeout.ns_heal
8063
8064 db_vims = {}
8065
8066 nslcmop_id = db_nslcmop["_id"]
8067 target = {
8068 "action_id": nslcmop_id,
8069 }
8070 self.logger.warning(
8071 "db_nslcmop={} and timeout_ns_heal={}".format(
8072 db_nslcmop, timeout_ns_heal
8073 )
8074 )
8075 target.update(db_nslcmop.get("operationParams", {}))
8076
8077 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8078 desc = await self.RO.recreate(nsr_id, target)
8079 self.logger.debug("RO return > {}".format(desc))
8080 action_id = desc["action_id"]
8081 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8082 await self._wait_ng_ro(
8083 nsr_id,
8084 action_id,
8085 nslcmop_id,
8086 start_heal,
8087 timeout_ns_heal,
8088 stage,
8089 operation="healing",
8090 )
8091
8092 # Updating NSR
8093 db_nsr_update = {
8094 "_admin.deployed.RO.operational-status": "running",
8095 "detailed-status": " ".join(stage),
8096 }
8097 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8098 self._write_op_status(nslcmop_id, stage)
8099 self.logger.debug(
8100 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8101 )
8102
8103 except Exception as e:
8104 stage[2] = "ERROR healing at VIM"
8105 # self.set_vnfr_at_error(db_vnfrs, str(e))
8106 self.logger.error(
8107 "Error healing at VIM {}".format(e),
8108 exc_info=not isinstance(
8109 e,
8110 (
8111 ROclient.ROClientException,
8112 LcmException,
8113 DbException,
8114 NgRoException,
8115 ),
8116 ),
8117 )
8118 raise
8119
8120 def _heal_n2vc(
8121 self,
8122 logging_text,
8123 db_nsr,
8124 db_vnfr,
8125 nslcmop_id,
8126 nsr_id,
8127 nsi_id,
8128 vnfd_id,
8129 vdu_id,
8130 kdu_name,
8131 member_vnf_index,
8132 vdu_index,
8133 vdu_name,
8134 deploy_params,
8135 descriptor_config,
8136 base_folder,
8137 task_instantiation_info,
8138 stage,
8139 ):
8140 # launch instantiate_N2VC in a asyncio task and register task object
8141 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8142 # if not found, create one entry and update database
8143 # fill db_nsr._admin.deployed.VCA.<index>
8144
8145 self.logger.debug(
8146 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8147 )
8148
8149 charm_name = ""
8150 get_charm_name = False
8151 if "execution-environment-list" in descriptor_config:
8152 ee_list = descriptor_config.get("execution-environment-list", [])
8153 elif "juju" in descriptor_config:
8154 ee_list = [descriptor_config] # ns charms
8155 if "execution-environment-list" not in descriptor_config:
8156 # charm name is only required for ns charms
8157 get_charm_name = True
8158 else: # other types as script are not supported
8159 ee_list = []
8160
8161 for ee_item in ee_list:
8162 self.logger.debug(
8163 logging_text
8164 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8165 ee_item.get("juju"), ee_item.get("helm-chart")
8166 )
8167 )
8168 ee_descriptor_id = ee_item.get("id")
8169 if ee_item.get("juju"):
8170 vca_name = ee_item["juju"].get("charm")
8171 if get_charm_name:
8172 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8173 vca_type = (
8174 "lxc_proxy_charm"
8175 if ee_item["juju"].get("charm") is not None
8176 else "native_charm"
8177 )
8178 if ee_item["juju"].get("cloud") == "k8s":
8179 vca_type = "k8s_proxy_charm"
8180 elif ee_item["juju"].get("proxy") is False:
8181 vca_type = "native_charm"
8182 elif ee_item.get("helm-chart"):
8183 vca_name = ee_item["helm-chart"]
8184 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8185 vca_type = "helm"
8186 else:
8187 vca_type = "helm-v3"
8188 else:
8189 self.logger.debug(
8190 logging_text + "skipping non juju neither charm configuration"
8191 )
8192 continue
8193
8194 vca_index = -1
8195 for vca_index, vca_deployed in enumerate(
8196 db_nsr["_admin"]["deployed"]["VCA"]
8197 ):
8198 if not vca_deployed:
8199 continue
8200 if (
8201 vca_deployed.get("member-vnf-index") == member_vnf_index
8202 and vca_deployed.get("vdu_id") == vdu_id
8203 and vca_deployed.get("kdu_name") == kdu_name
8204 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8205 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8206 ):
8207 break
8208 else:
8209 # not found, create one.
8210 target = (
8211 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8212 )
8213 if vdu_id:
8214 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8215 elif kdu_name:
8216 target += "/kdu/{}".format(kdu_name)
8217 vca_deployed = {
8218 "target_element": target,
8219 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8220 "member-vnf-index": member_vnf_index,
8221 "vdu_id": vdu_id,
8222 "kdu_name": kdu_name,
8223 "vdu_count_index": vdu_index,
8224 "operational-status": "init", # TODO revise
8225 "detailed-status": "", # TODO revise
8226 "step": "initial-deploy", # TODO revise
8227 "vnfd_id": vnfd_id,
8228 "vdu_name": vdu_name,
8229 "type": vca_type,
8230 "ee_descriptor_id": ee_descriptor_id,
8231 "charm_name": charm_name,
8232 }
8233 vca_index += 1
8234
8235 # create VCA and configurationStatus in db
8236 db_dict = {
8237 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8238 "configurationStatus.{}".format(vca_index): dict(),
8239 }
8240 self.update_db_2("nsrs", nsr_id, db_dict)
8241
8242 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8243
8244 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8245 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8246 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8247
8248 # Launch task
8249 task_n2vc = asyncio.ensure_future(
8250 self.heal_N2VC(
8251 logging_text=logging_text,
8252 vca_index=vca_index,
8253 nsi_id=nsi_id,
8254 db_nsr=db_nsr,
8255 db_vnfr=db_vnfr,
8256 vdu_id=vdu_id,
8257 kdu_name=kdu_name,
8258 vdu_index=vdu_index,
8259 deploy_params=deploy_params,
8260 config_descriptor=descriptor_config,
8261 base_folder=base_folder,
8262 nslcmop_id=nslcmop_id,
8263 stage=stage,
8264 vca_type=vca_type,
8265 vca_name=vca_name,
8266 ee_config_descriptor=ee_item,
8267 )
8268 )
8269 self.lcm_tasks.register(
8270 "ns",
8271 nsr_id,
8272 nslcmop_id,
8273 "instantiate_N2VC-{}".format(vca_index),
8274 task_n2vc,
8275 )
8276 task_instantiation_info[
8277 task_n2vc
8278 ] = self.task_name_deploy_vca + " {}.{}".format(
8279 member_vnf_index or "", vdu_id or ""
8280 )
8281
8282 async def heal_N2VC(
8283 self,
8284 logging_text,
8285 vca_index,
8286 nsi_id,
8287 db_nsr,
8288 db_vnfr,
8289 vdu_id,
8290 kdu_name,
8291 vdu_index,
8292 config_descriptor,
8293 deploy_params,
8294 base_folder,
8295 nslcmop_id,
8296 stage,
8297 vca_type,
8298 vca_name,
8299 ee_config_descriptor,
8300 ):
8301 nsr_id = db_nsr["_id"]
8302 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8303 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8304 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8305 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8306 db_dict = {
8307 "collection": "nsrs",
8308 "filter": {"_id": nsr_id},
8309 "path": db_update_entry,
8310 }
8311 step = ""
8312 try:
8313 element_type = "NS"
8314 element_under_configuration = nsr_id
8315
8316 vnfr_id = None
8317 if db_vnfr:
8318 vnfr_id = db_vnfr["_id"]
8319 osm_config["osm"]["vnf_id"] = vnfr_id
8320
8321 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8322
8323 if vca_type == "native_charm":
8324 index_number = 0
8325 else:
8326 index_number = vdu_index or 0
8327
8328 if vnfr_id:
8329 element_type = "VNF"
8330 element_under_configuration = vnfr_id
8331 namespace += ".{}-{}".format(vnfr_id, index_number)
8332 if vdu_id:
8333 namespace += ".{}-{}".format(vdu_id, index_number)
8334 element_type = "VDU"
8335 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8336 osm_config["osm"]["vdu_id"] = vdu_id
8337 elif kdu_name:
8338 namespace += ".{}".format(kdu_name)
8339 element_type = "KDU"
8340 element_under_configuration = kdu_name
8341 osm_config["osm"]["kdu_name"] = kdu_name
8342
8343 # Get artifact path
8344 if base_folder["pkg-dir"]:
8345 artifact_path = "{}/{}/{}/{}".format(
8346 base_folder["folder"],
8347 base_folder["pkg-dir"],
8348 "charms"
8349 if vca_type
8350 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8351 else "helm-charts",
8352 vca_name,
8353 )
8354 else:
8355 artifact_path = "{}/Scripts/{}/{}/".format(
8356 base_folder["folder"],
8357 "charms"
8358 if vca_type
8359 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8360 else "helm-charts",
8361 vca_name,
8362 )
8363
8364 self.logger.debug("Artifact path > {}".format(artifact_path))
8365
8366 # get initial_config_primitive_list that applies to this element
8367 initial_config_primitive_list = config_descriptor.get(
8368 "initial-config-primitive"
8369 )
8370
8371 self.logger.debug(
8372 "Initial config primitive list > {}".format(
8373 initial_config_primitive_list
8374 )
8375 )
8376
8377 # add config if not present for NS charm
8378 ee_descriptor_id = ee_config_descriptor.get("id")
8379 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8380 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8381 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8382 )
8383
8384 self.logger.debug(
8385 "Initial config primitive list #2 > {}".format(
8386 initial_config_primitive_list
8387 )
8388 )
8389 # n2vc_redesign STEP 3.1
8390 # find old ee_id if exists
8391 ee_id = vca_deployed.get("ee_id")
8392
8393 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8394 # create or register execution environment in VCA. Only for native charms when healing
8395 if vca_type == "native_charm":
8396 step = "Waiting to VM being up and getting IP address"
8397 self.logger.debug(logging_text + step)
8398 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8399 logging_text,
8400 nsr_id,
8401 vnfr_id,
8402 vdu_id,
8403 vdu_index,
8404 user=None,
8405 pub_key=None,
8406 )
8407 credentials = {"hostname": rw_mgmt_ip}
8408 # get username
8409 username = deep_get(
8410 config_descriptor, ("config-access", "ssh-access", "default-user")
8411 )
8412 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8413 # merged. Meanwhile let's get username from initial-config-primitive
8414 if not username and initial_config_primitive_list:
8415 for config_primitive in initial_config_primitive_list:
8416 for param in config_primitive.get("parameter", ()):
8417 if param["name"] == "ssh-username":
8418 username = param["value"]
8419 break
8420 if not username:
8421 raise LcmException(
8422 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8423 "'config-access.ssh-access.default-user'"
8424 )
8425 credentials["username"] = username
8426
8427 # n2vc_redesign STEP 3.2
8428 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8429 self._write_configuration_status(
8430 nsr_id=nsr_id,
8431 vca_index=vca_index,
8432 status="REGISTERING",
8433 element_under_configuration=element_under_configuration,
8434 element_type=element_type,
8435 )
8436
8437 step = "register execution environment {}".format(credentials)
8438 self.logger.debug(logging_text + step)
8439 ee_id = await self.vca_map[vca_type].register_execution_environment(
8440 credentials=credentials,
8441 namespace=namespace,
8442 db_dict=db_dict,
8443 vca_id=vca_id,
8444 )
8445
8446 # update ee_id en db
8447 db_dict_ee_id = {
8448 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8449 }
8450 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8451
8452 # for compatibility with MON/POL modules, the need model and application name at database
8453 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8454 # Not sure if this need to be done when healing
8455 """
8456 ee_id_parts = ee_id.split(".")
8457 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8458 if len(ee_id_parts) >= 2:
8459 model_name = ee_id_parts[0]
8460 application_name = ee_id_parts[1]
8461 db_nsr_update[db_update_entry + "model"] = model_name
8462 db_nsr_update[db_update_entry + "application"] = application_name
8463 """
8464
8465 # n2vc_redesign STEP 3.3
8466 # Install configuration software. Only for native charms.
8467 step = "Install configuration Software"
8468
8469 self._write_configuration_status(
8470 nsr_id=nsr_id,
8471 vca_index=vca_index,
8472 status="INSTALLING SW",
8473 element_under_configuration=element_under_configuration,
8474 element_type=element_type,
8475 # other_update=db_nsr_update,
8476 other_update=None,
8477 )
8478
8479 # TODO check if already done
8480 self.logger.debug(logging_text + step)
8481 config = None
8482 if vca_type == "native_charm":
8483 config_primitive = next(
8484 (p for p in initial_config_primitive_list if p["name"] == "config"),
8485 None,
8486 )
8487 if config_primitive:
8488 config = self._map_primitive_params(
8489 config_primitive, {}, deploy_params
8490 )
8491 await self.vca_map[vca_type].install_configuration_sw(
8492 ee_id=ee_id,
8493 artifact_path=artifact_path,
8494 db_dict=db_dict,
8495 config=config,
8496 num_units=1,
8497 vca_id=vca_id,
8498 vca_type=vca_type,
8499 )
8500
8501 # write in db flag of configuration_sw already installed
8502 self.update_db_2(
8503 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8504 )
8505
8506 # Not sure if this need to be done when healing
8507 """
8508 # add relations for this VCA (wait for other peers related with this VCA)
8509 await self._add_vca_relations(
8510 logging_text=logging_text,
8511 nsr_id=nsr_id,
8512 vca_type=vca_type,
8513 vca_index=vca_index,
8514 )
8515 """
8516
8517 # if SSH access is required, then get execution environment SSH public
8518 # if native charm we have waited already to VM be UP
8519 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8520 pub_key = None
8521 user = None
8522 # self.logger.debug("get ssh key block")
8523 if deep_get(
8524 config_descriptor, ("config-access", "ssh-access", "required")
8525 ):
8526 # self.logger.debug("ssh key needed")
8527 # Needed to inject a ssh key
8528 user = deep_get(
8529 config_descriptor,
8530 ("config-access", "ssh-access", "default-user"),
8531 )
8532 step = "Install configuration Software, getting public ssh key"
8533 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8534 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8535 )
8536
8537 step = "Insert public key into VM user={} ssh_key={}".format(
8538 user, pub_key
8539 )
8540 else:
8541 # self.logger.debug("no need to get ssh key")
8542 step = "Waiting to VM being up and getting IP address"
8543 self.logger.debug(logging_text + step)
8544
8545 # n2vc_redesign STEP 5.1
8546 # wait for RO (ip-address) Insert pub_key into VM
8547 # IMPORTANT: We need do wait for RO to complete healing operation.
8548 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8549 if vnfr_id:
8550 if kdu_name:
8551 rw_mgmt_ip = await self.wait_kdu_up(
8552 logging_text, nsr_id, vnfr_id, kdu_name
8553 )
8554 else:
8555 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8556 logging_text,
8557 nsr_id,
8558 vnfr_id,
8559 vdu_id,
8560 vdu_index,
8561 user=user,
8562 pub_key=pub_key,
8563 )
8564 else:
8565 rw_mgmt_ip = None # This is for a NS configuration
8566
8567 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8568
8569 # store rw_mgmt_ip in deploy params for later replacement
8570 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8571
8572 # Day1 operations.
8573 # get run-day1 operation parameter
8574 runDay1 = deploy_params.get("run-day1", False)
8575 self.logger.debug(
8576 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8577 )
8578 if runDay1:
8579 # n2vc_redesign STEP 6 Execute initial config primitive
8580 step = "execute initial config primitive"
8581
8582 # wait for dependent primitives execution (NS -> VNF -> VDU)
8583 if initial_config_primitive_list:
8584 await self._wait_dependent_n2vc(
8585 nsr_id, vca_deployed_list, vca_index
8586 )
8587
8588 # stage, in function of element type: vdu, kdu, vnf or ns
8589 my_vca = vca_deployed_list[vca_index]
8590 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8591 # VDU or KDU
8592 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8593 elif my_vca.get("member-vnf-index"):
8594 # VNF
8595 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8596 else:
8597 # NS
8598 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8599
8600 self._write_configuration_status(
8601 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8602 )
8603
8604 self._write_op_status(op_id=nslcmop_id, stage=stage)
8605
8606 check_if_terminated_needed = True
8607 for initial_config_primitive in initial_config_primitive_list:
8608 # adding information on the vca_deployed if it is a NS execution environment
8609 if not vca_deployed["member-vnf-index"]:
8610 deploy_params["ns_config_info"] = json.dumps(
8611 self._get_ns_config_info(nsr_id)
8612 )
8613 # TODO check if already done
8614 primitive_params_ = self._map_primitive_params(
8615 initial_config_primitive, {}, deploy_params
8616 )
8617
8618 step = "execute primitive '{}' params '{}'".format(
8619 initial_config_primitive["name"], primitive_params_
8620 )
8621 self.logger.debug(logging_text + step)
8622 await self.vca_map[vca_type].exec_primitive(
8623 ee_id=ee_id,
8624 primitive_name=initial_config_primitive["name"],
8625 params_dict=primitive_params_,
8626 db_dict=db_dict,
8627 vca_id=vca_id,
8628 vca_type=vca_type,
8629 )
8630 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8631 if check_if_terminated_needed:
8632 if config_descriptor.get("terminate-config-primitive"):
8633 self.update_db_2(
8634 "nsrs",
8635 nsr_id,
8636 {db_update_entry + "needed_terminate": True},
8637 )
8638 check_if_terminated_needed = False
8639
8640 # TODO register in database that primitive is done
8641
8642 # STEP 7 Configure metrics
8643 # Not sure if this need to be done when healing
8644 """
8645 if vca_type == "helm" or vca_type == "helm-v3":
8646 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8647 ee_id=ee_id,
8648 artifact_path=artifact_path,
8649 ee_config_descriptor=ee_config_descriptor,
8650 vnfr_id=vnfr_id,
8651 nsr_id=nsr_id,
8652 target_ip=rw_mgmt_ip,
8653 )
8654 if prometheus_jobs:
8655 self.update_db_2(
8656 "nsrs",
8657 nsr_id,
8658 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8659 )
8660
8661 for job in prometheus_jobs:
8662 self.db.set_one(
8663 "prometheus_jobs",
8664 {"job_name": job["job_name"]},
8665 job,
8666 upsert=True,
8667 fail_on_empty=False,
8668 )
8669
8670 """
8671 step = "instantiated at VCA"
8672 self.logger.debug(logging_text + step)
8673
8674 self._write_configuration_status(
8675 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8676 )
8677
8678 except Exception as e: # TODO not use Exception but N2VC exception
8679 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8680 if not isinstance(
8681 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8682 ):
8683 self.logger.error(
8684 "Exception while {} : {}".format(step, e), exc_info=True
8685 )
8686 self._write_configuration_status(
8687 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8688 )
8689 raise LcmException("{} {}".format(step, e)) from e
8690
8691 async def _wait_heal_ro(
8692 self,
8693 nsr_id,
8694 timeout=600,
8695 ):
8696 start_time = time()
8697 while time() <= start_time + timeout:
8698 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8699 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8700 "operational-status"
8701 ]
8702 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8703 if operational_status_ro != "healing":
8704 break
8705 await asyncio.sleep(15)
8706 else: # timeout_ns_deploy
8707 raise NgRoException("Timeout waiting ns to deploy")
8708
8709 async def vertical_scale(self, nsr_id, nslcmop_id):
8710 """
8711 Vertical Scale the VDUs in a NS
8712
8713 :param: nsr_id: NS Instance ID
8714 :param: nslcmop_id: nslcmop ID of migrate
8715
8716 """
8717 # Try to lock HA task here
8718 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8719 if not task_is_locked_by_me:
8720 return
8721 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8722 self.logger.debug(logging_text + "Enter")
8723 # get all needed from database
8724 db_nslcmop = None
8725 db_nslcmop_update = {}
8726 nslcmop_operation_state = None
8727 db_nsr_update = {}
8728 target = {}
8729 exc = None
8730 # in case of error, indicates what part of scale was failed to put nsr at error status
8731 start_deploy = time()
8732
8733 try:
8734 # wait for any previous tasks in process
8735 step = "Waiting for previous operations to terminate"
8736 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8737
8738 self._write_ns_status(
8739 nsr_id=nsr_id,
8740 ns_state=None,
8741 current_operation="VerticalScale",
8742 current_operation_id=nslcmop_id,
8743 )
8744 step = "Getting nslcmop from database"
8745 self.logger.debug(
8746 step + " after having waited for previous tasks to be completed"
8747 )
8748 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8749 operationParams = db_nslcmop.get("operationParams")
8750 target = {}
8751 target.update(operationParams)
8752 desc = await self.RO.vertical_scale(nsr_id, target)
8753 self.logger.debug("RO return > {}".format(desc))
8754 action_id = desc["action_id"]
8755 await self._wait_ng_ro(
8756 nsr_id,
8757 action_id,
8758 nslcmop_id,
8759 start_deploy,
8760 self.timeout.verticalscale,
8761 operation="verticalscale",
8762 )
8763 except (ROclient.ROClientException, DbException, LcmException) as e:
8764 self.logger.error("Exit Exception {}".format(e))
8765 exc = e
8766 except asyncio.CancelledError:
8767 self.logger.error("Cancelled Exception while '{}'".format(step))
8768 exc = "Operation was cancelled"
8769 except Exception as e:
8770 exc = traceback.format_exc()
8771 self.logger.critical(
8772 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8773 )
8774 finally:
8775 self._write_ns_status(
8776 nsr_id=nsr_id,
8777 ns_state=None,
8778 current_operation="IDLE",
8779 current_operation_id=None,
8780 )
8781 if exc:
8782 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8783 nslcmop_operation_state = "FAILED"
8784 else:
8785 nslcmop_operation_state = "COMPLETED"
8786 db_nslcmop_update["detailed-status"] = "Done"
8787 db_nsr_update["detailed-status"] = "Done"
8788
8789 self._write_op_status(
8790 op_id=nslcmop_id,
8791 stage="",
8792 error_message="",
8793 operation_state=nslcmop_operation_state,
8794 other_update=db_nslcmop_update,
8795 )
8796 if nslcmop_operation_state:
8797 try:
8798 msg = {
8799 "nsr_id": nsr_id,
8800 "nslcmop_id": nslcmop_id,
8801 "operationState": nslcmop_operation_state,
8802 }
8803 await self.msg.aiowrite("ns", "verticalscaled", msg)
8804 except Exception as e:
8805 self.logger.error(
8806 logging_text + "kafka_write notification Exception {}".format(e)
8807 )
8808 self.logger.debug(logging_text + "Exit")
8809 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")