Bug 2042 fixed
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import ipaddress
27 import json
28 from jinja2 import (
29 Environment,
30 TemplateError,
31 TemplateNotFound,
32 StrictUndefined,
33 UndefinedError,
34 select_autoescape,
35 )
36
37 from osm_lcm import ROclient
38 from osm_lcm.data_utils.lcm_config import LcmCfg
39 from osm_lcm.data_utils.nsr import (
40 get_deployed_kdu,
41 get_deployed_vca,
42 get_deployed_vca_list,
43 get_nsd,
44 )
45 from osm_lcm.data_utils.vca import (
46 DeployedComponent,
47 DeployedK8sResource,
48 DeployedVCA,
49 EELevel,
50 Relation,
51 EERelation,
52 safe_get_ee_relation,
53 )
54 from osm_lcm.ng_ro import NgRoClient, NgRoException
55 from osm_lcm.lcm_utils import (
56 LcmException,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm3 = K8sHelm3Connector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helm3path,
178 fs=self.fs,
179 log=self.logger,
180 db=self.db,
181 on_update_db=None,
182 )
183
184 self.k8sclusterjuju = K8sJujuConnector(
185 kubectl_command=self.vca_config.kubectlpath,
186 juju_command=self.vca_config.jujupath,
187 log=self.logger,
188 on_update_db=self._on_update_k8s_db,
189 fs=self.fs,
190 db=self.db,
191 )
192
193 self.k8scluster_map = {
194 "helm-chart-v3": self.k8sclusterhelm3,
195 "chart": self.k8sclusterhelm3,
196 "juju-bundle": self.k8sclusterjuju,
197 "juju": self.k8sclusterjuju,
198 }
199
200 self.vca_map = {
201 "lxc_proxy_charm": self.n2vc,
202 "native_charm": self.n2vc,
203 "k8s_proxy_charm": self.n2vc,
204 "helm": self.conn_helm_ee,
205 "helm-v3": self.conn_helm_ee,
206 }
207
208 # create RO client
209 self.RO = NgRoClient(**self.ro_config.to_dict())
210
211 self.op_status_map = {
212 "instantiation": self.RO.status,
213 "termination": self.RO.status,
214 "migrate": self.RO.status,
215 "healing": self.RO.recreate_status,
216 "verticalscale": self.RO.status,
217 "start_stop_rebuild": self.RO.status,
218 }
219
220 @staticmethod
221 def increment_ip_mac(ip_mac, vm_index=1):
222 if not isinstance(ip_mac, str):
223 return ip_mac
224 try:
225 next_ipv6 = None
226 next_ipv4 = None
227 dual_ip = ip_mac.split(";")
228 if len(dual_ip) == 2:
229 for ip in dual_ip:
230 if ipaddress.ip_address(ip).version == 6:
231 ipv6 = ipaddress.IPv6Address(ip)
232 next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1))
233 elif ipaddress.ip_address(ip).version == 4:
234 ipv4 = ipaddress.IPv4Address(ip)
235 next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1))
236 return [next_ipv4, next_ipv6]
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
255 # remove last dot from path (if exists)
256 if path.endswith("."):
257 path = path[:-1]
258
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
261 try:
262 nsr_id = filter.get("_id")
263
264 # read ns record from database
265 nsr = self.db.get_one(table="nsrs", q_filter=filter)
266 current_ns_status = nsr.get("nsState")
267
268 # First, we need to verify if the current vcaStatus is null, because if that is the case,
269 # MongoDB will not be able to create the fields used within the update key in the database
270 if not nsr.get("vcaStatus"):
271 # Write an empty dictionary to the vcaStatus field, it its value is null
272 self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
273
274 # Get vca status for NS
275 status_dict = await self.n2vc.get_status(
276 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
277 )
278
279 # Update the vcaStatus
280 db_key = f"vcaStatus.{nsr_id}.VNF"
281 db_dict = dict()
282
283 db_dict[db_key] = status_dict[nsr_id]
284 await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id)
285
286 # update configurationStatus for this VCA
287 try:
288 vca_index = int(path[path.rfind(".") + 1 :])
289
290 vca_list = deep_get(
291 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
292 )
293 vca_status = vca_list[vca_index].get("status")
294
295 configuration_status_list = nsr.get("configurationStatus")
296 config_status = configuration_status_list[vca_index].get("status")
297
298 if config_status == "BROKEN" and vca_status != "failed":
299 db_dict["configurationStatus"][vca_index] = "READY"
300 elif config_status != "BROKEN" and vca_status == "failed":
301 db_dict["configurationStatus"][vca_index] = "BROKEN"
302 except Exception as e:
303 # not update configurationStatus
304 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
305
306 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
307 # if nsState = 'DEGRADED' check if all is OK
308 is_degraded = False
309 if current_ns_status in ("READY", "DEGRADED"):
310 error_description = ""
311 # check machines
312 if status_dict.get("machines"):
313 for machine_id in status_dict.get("machines"):
314 machine = status_dict.get("machines").get(machine_id)
315 # check machine agent-status
316 if machine.get("agent-status"):
317 s = machine.get("agent-status").get("status")
318 if s != "started":
319 is_degraded = True
320 error_description += (
321 "machine {} agent-status={} ; ".format(
322 machine_id, s
323 )
324 )
325 # check machine instance status
326 if machine.get("instance-status"):
327 s = machine.get("instance-status").get("status")
328 if s != "running":
329 is_degraded = True
330 error_description += (
331 "machine {} instance-status={} ; ".format(
332 machine_id, s
333 )
334 )
335 # check applications
336 if status_dict.get("applications"):
337 for app_id in status_dict.get("applications"):
338 app = status_dict.get("applications").get(app_id)
339 # check application status
340 if app.get("status"):
341 s = app.get("status").get("status")
342 if s != "active":
343 is_degraded = True
344 error_description += (
345 "application {} status={} ; ".format(app_id, s)
346 )
347
348 if error_description:
349 db_dict["errorDescription"] = error_description
350 if current_ns_status == "READY" and is_degraded:
351 db_dict["nsState"] = "DEGRADED"
352 if current_ns_status == "DEGRADED" and not is_degraded:
353 db_dict["nsState"] = "READY"
354
355 # write to database
356 self.update_db_2("nsrs", nsr_id, db_dict)
357
358 except (asyncio.CancelledError, asyncio.TimeoutError):
359 raise
360 except Exception as e:
361 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
362
363 async def _on_update_k8s_db(
364 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
365 ):
366 """
367 Updating vca status in NSR record
368 :param cluster_uuid: UUID of a k8s cluster
369 :param kdu_instance: The unique name of the KDU instance
370 :param filter: To get nsr_id
371 :cluster_type: The cluster type (juju, k8s)
372 :return: none
373 """
374
375 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
376 # .format(cluster_uuid, kdu_instance, filter))
377
378 nsr_id = filter.get("_id")
379 try:
380 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
381 cluster_uuid=cluster_uuid,
382 kdu_instance=kdu_instance,
383 yaml_format=False,
384 complete_status=True,
385 vca_id=vca_id,
386 )
387
388 # First, we need to verify if the current vcaStatus is null, because if that is the case,
389 # MongoDB will not be able to create the fields used within the update key in the database
390 nsr = self.db.get_one(table="nsrs", q_filter=filter)
391 if not nsr.get("vcaStatus"):
392 # Write an empty dictionary to the vcaStatus field, it its value is null
393 self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
394
395 # Update the vcaStatus
396 db_key = f"vcaStatus.{nsr_id}.KNF"
397 db_dict = dict()
398
399 db_dict[db_key] = vca_status
400
401 if cluster_type in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self.k8sclusterjuju.update_vca_status(
405 db_dict[db_key],
406 kdu_instance,
407 vca_id=vca_id,
408 )
409
410 self.logger.debug(
411 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
412 )
413
414 # write to database
415 self.update_db_2("nsrs", nsr_id, db_dict)
416 except (asyncio.CancelledError, asyncio.TimeoutError):
417 raise
418 except Exception as e:
419 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
420
421 @staticmethod
422 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
423 try:
424 env = Environment(
425 undefined=StrictUndefined,
426 autoescape=select_autoescape(default_for_string=True, default=True),
427 )
428 template = env.from_string(cloud_init_text)
429 return template.render(additional_params or {})
430 except UndefinedError as e:
431 raise LcmException(
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
435 )
436 except (TemplateError, TemplateNotFound) as e:
437 raise LcmException(
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
439 vnfd_id, vdu_id, e
440 )
441 )
442
443 def _get_vdu_cloud_init_content(self, vdu, vnfd):
444 cloud_init_content = cloud_init_file = None
445 try:
446 if vdu.get("cloud-init-file"):
447 base_folder = vnfd["_admin"]["storage"]
448 if base_folder["pkg-dir"]:
449 cloud_init_file = "{}/{}/cloud_init/{}".format(
450 base_folder["folder"],
451 base_folder["pkg-dir"],
452 vdu["cloud-init-file"],
453 )
454 else:
455 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
456 base_folder["folder"],
457 vdu["cloud-init-file"],
458 )
459 with self.fs.file_open(cloud_init_file, "r") as ci_file:
460 cloud_init_content = ci_file.read()
461 elif vdu.get("cloud-init"):
462 cloud_init_content = vdu["cloud-init"]
463
464 return cloud_init_content
465 except FsException as e:
466 raise LcmException(
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd["id"], vdu["id"], cloud_init_file, e
469 )
470 )
471
472 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
473 vdur = next(
474 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
475 )
476 additional_params = vdur.get("additionalParams")
477 return parse_yaml_strings(additional_params)
478
479 @staticmethod
480 def ip_profile_2_RO(ip_profile):
481 RO_ip_profile = deepcopy(ip_profile)
482 if "dns-server" in RO_ip_profile:
483 if isinstance(RO_ip_profile["dns-server"], list):
484 RO_ip_profile["dns-address"] = []
485 for ds in RO_ip_profile.pop("dns-server"):
486 RO_ip_profile["dns-address"].append(ds["address"])
487 else:
488 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
489 if RO_ip_profile.get("ip-version") == "ipv4":
490 RO_ip_profile["ip-version"] = "IPv4"
491 if RO_ip_profile.get("ip-version") == "ipv6":
492 RO_ip_profile["ip-version"] = "IPv6"
493 if "dhcp-params" in RO_ip_profile:
494 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
495 return RO_ip_profile
496
497 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
498 db_vdu_push_list = []
499 template_vdur = []
500 db_update = {"_admin.modified": time()}
501 if vdu_create:
502 for vdu_id, vdu_count in vdu_create.items():
503 vdur = next(
504 (
505 vdur
506 for vdur in reversed(db_vnfr["vdur"])
507 if vdur["vdu-id-ref"] == vdu_id
508 ),
509 None,
510 )
511 if not vdur:
512 # Read the template saved in the db:
513 self.logger.debug(
514 "No vdur in the database. Using the vdur-template to scale"
515 )
516 vdur_template = db_vnfr.get("vdur-template")
517 if not vdur_template:
518 raise LcmException(
519 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
520 vdu_id
521 )
522 )
523 vdur = vdur_template[0]
524 # Delete a template from the database after using it
525 self.db.set_one(
526 "vnfrs",
527 {"_id": db_vnfr["_id"]},
528 None,
529 pull={"vdur-template": {"_id": vdur["_id"]}},
530 )
531 for count in range(vdu_count):
532 vdur_copy = deepcopy(vdur)
533 vdur_copy["status"] = "BUILD"
534 vdur_copy["status-detailed"] = None
535 vdur_copy["ip-address"] = None
536 vdur_copy["_id"] = str(uuid4())
537 vdur_copy["count-index"] += count + 1
538 vdur_copy["id"] = "{}-{}".format(
539 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
540 )
541 vdur_copy.pop("vim_info", None)
542 for iface in vdur_copy["interfaces"]:
543 if iface.get("fixed-ip"):
544 iface["ip-address"] = self.increment_ip_mac(
545 iface["ip-address"], count + 1
546 )
547 else:
548 iface.pop("ip-address", None)
549 if iface.get("fixed-mac"):
550 iface["mac-address"] = self.increment_ip_mac(
551 iface["mac-address"], count + 1
552 )
553 else:
554 iface.pop("mac-address", None)
555 if db_vnfr["vdur"]:
556 iface.pop(
557 "mgmt_vnf", None
558 ) # only first vdu can be managment of vnf
559 db_vdu_push_list.append(vdur_copy)
560 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
561 if vdu_delete:
562 if len(db_vnfr["vdur"]) == 1:
563 # The scale will move to 0 instances
564 self.logger.debug(
565 "Scaling to 0 !, creating the template with the last vdur"
566 )
567 template_vdur = [db_vnfr["vdur"][0]]
568 for vdu_id, vdu_count in vdu_delete.items():
569 if mark_delete:
570 indexes_to_delete = [
571 iv[0]
572 for iv in enumerate(db_vnfr["vdur"])
573 if iv[1]["vdu-id-ref"] == vdu_id
574 ]
575 db_update.update(
576 {
577 "vdur.{}.status".format(i): "DELETING"
578 for i in indexes_to_delete[-vdu_count:]
579 }
580 )
581 else:
582 # it must be deleted one by one because common.db does not allow otherwise
583 vdus_to_delete = [
584 v
585 for v in reversed(db_vnfr["vdur"])
586 if v["vdu-id-ref"] == vdu_id
587 ]
588 for vdu in vdus_to_delete[:vdu_count]:
589 self.db.set_one(
590 "vnfrs",
591 {"_id": db_vnfr["_id"]},
592 None,
593 pull={"vdur": {"_id": vdu["_id"]}},
594 )
595 db_push = {}
596 if db_vdu_push_list:
597 db_push["vdur"] = db_vdu_push_list
598 if template_vdur:
599 db_push["vdur-template"] = template_vdur
600 if not db_push:
601 db_push = None
602 db_vnfr["vdur-template"] = template_vdur
603 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
604 # modify passed dictionary db_vnfr
605 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
606 db_vnfr["vdur"] = db_vnfr_["vdur"]
607
608 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
609 """
610 Updates database nsr with the RO info for the created vld
611 :param ns_update_nsr: dictionary to be filled with the updated info
612 :param db_nsr: content of db_nsr. This is also modified
613 :param nsr_desc_RO: nsr descriptor from RO
614 :return: Nothing, LcmException is raised on errors
615 """
616
617 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
618 for net_RO in get_iterable(nsr_desc_RO, "nets"):
619 if vld["id"] != net_RO.get("ns_net_osm_id"):
620 continue
621 vld["vim-id"] = net_RO.get("vim_net_id")
622 vld["name"] = net_RO.get("vim_name")
623 vld["status"] = net_RO.get("status")
624 vld["status-detailed"] = net_RO.get("error_msg")
625 ns_update_nsr["vld.{}".format(vld_index)] = vld
626 break
627 else:
628 raise LcmException(
629 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
630 )
631
632 def set_vnfr_at_error(self, db_vnfrs, error_text):
633 try:
634 for db_vnfr in db_vnfrs.values():
635 vnfr_update = {"status": "ERROR"}
636 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
637 if "status" not in vdur:
638 vdur["status"] = "ERROR"
639 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
640 if error_text:
641 vdur["status-detailed"] = str(error_text)
642 vnfr_update[
643 "vdur.{}.status-detailed".format(vdu_index)
644 ] = "ERROR"
645 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
646 except DbException as e:
647 self.logger.error("Cannot update vnf. {}".format(e))
648
649 def _get_ns_config_info(self, nsr_id):
650 """
651 Generates a mapping between vnf,vdu elements and the N2VC id
652 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
653 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
654 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
655 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
656 """
657 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
658 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
659 mapping = {}
660 ns_config_info = {"osm-config-mapping": mapping}
661 for vca in vca_deployed_list:
662 if not vca["member-vnf-index"]:
663 continue
664 if not vca["vdu_id"]:
665 mapping[vca["member-vnf-index"]] = vca["application"]
666 else:
667 mapping[
668 "{}.{}.{}".format(
669 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
670 )
671 ] = vca["application"]
672 return ns_config_info
673
674 async def _instantiate_ng_ro(
675 self,
676 logging_text,
677 nsr_id,
678 nsd,
679 db_nsr,
680 db_nslcmop,
681 db_vnfrs,
682 db_vnfds,
683 n2vc_key_list,
684 stage,
685 start_deploy,
686 timeout_ns_deploy,
687 ):
688 db_vims = {}
689
690 def get_vim_account(vim_account_id):
691 nonlocal db_vims
692 if vim_account_id in db_vims:
693 return db_vims[vim_account_id]
694 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
695 db_vims[vim_account_id] = db_vim
696 return db_vim
697
698 # modify target_vld info with instantiation parameters
699 def parse_vld_instantiation_params(
700 target_vim, target_vld, vld_params, target_sdn
701 ):
702 if vld_params.get("ip-profile"):
703 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
704 vld_params["ip-profile"]
705 )
706 if vld_params.get("provider-network"):
707 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
708 "provider-network"
709 ]
710 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
711 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
712 "provider-network"
713 ]["sdn-ports"]
714
715 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
716 # if wim_account_id is specified in vld_params, validate if it is feasible.
717 wim_account_id, db_wim = select_feasible_wim_account(
718 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
719 )
720
721 if wim_account_id:
722 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
723 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
724 # update vld_params with correct WIM account Id
725 vld_params["wimAccountId"] = wim_account_id
726
727 target_wim = "wim:{}".format(wim_account_id)
728 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
729 sdn_ports = get_sdn_ports(vld_params, db_wim)
730 if len(sdn_ports) > 0:
731 target_vld["vim_info"][target_wim] = target_wim_attrs
732 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
733
734 self.logger.debug(
735 "Target VLD with WIM data: {:s}".format(str(target_vld))
736 )
737
738 for param in ("vim-network-name", "vim-network-id"):
739 if vld_params.get(param):
740 if isinstance(vld_params[param], dict):
741 for vim, vim_net in vld_params[param].items():
742 other_target_vim = "vim:" + vim
743 populate_dict(
744 target_vld["vim_info"],
745 (other_target_vim, param.replace("-", "_")),
746 vim_net,
747 )
748 else: # isinstance str
749 target_vld["vim_info"][target_vim][
750 param.replace("-", "_")
751 ] = vld_params[param]
752 if vld_params.get("common_id"):
753 target_vld["common_id"] = vld_params.get("common_id")
754
755 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
756 def update_ns_vld_target(target, ns_params):
757 for vnf_params in ns_params.get("vnf", ()):
758 if vnf_params.get("vimAccountId"):
759 target_vnf = next(
760 (
761 vnfr
762 for vnfr in db_vnfrs.values()
763 if vnf_params["member-vnf-index"]
764 == vnfr["member-vnf-index-ref"]
765 ),
766 None,
767 )
768 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
769 if not vdur:
770 continue
771 for a_index, a_vld in enumerate(target["ns"]["vld"]):
772 target_vld = find_in_list(
773 get_iterable(vdur, "interfaces"),
774 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
775 )
776
777 vld_params = find_in_list(
778 get_iterable(ns_params, "vld"),
779 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
780 )
781 if target_vld:
782 if vnf_params.get("vimAccountId") not in a_vld.get(
783 "vim_info", {}
784 ):
785 target_vim_network_list = [
786 v for _, v in a_vld.get("vim_info").items()
787 ]
788 target_vim_network_name = next(
789 (
790 item.get("vim_network_name", "")
791 for item in target_vim_network_list
792 ),
793 "",
794 )
795
796 target["ns"]["vld"][a_index].get("vim_info").update(
797 {
798 "vim:{}".format(vnf_params["vimAccountId"]): {
799 "vim_network_name": target_vim_network_name,
800 }
801 }
802 )
803
804 if vld_params:
805 for param in ("vim-network-name", "vim-network-id"):
806 if vld_params.get(param) and isinstance(
807 vld_params[param], dict
808 ):
809 for vim, vim_net in vld_params[
810 param
811 ].items():
812 other_target_vim = "vim:" + vim
813 populate_dict(
814 target["ns"]["vld"][a_index].get(
815 "vim_info"
816 ),
817 (
818 other_target_vim,
819 param.replace("-", "_"),
820 ),
821 vim_net,
822 )
823
824 nslcmop_id = db_nslcmop["_id"]
825 target = {
826 "name": db_nsr["name"],
827 "ns": {"vld": []},
828 "vnf": [],
829 "image": deepcopy(db_nsr["image"]),
830 "flavor": deepcopy(db_nsr["flavor"]),
831 "action_id": nslcmop_id,
832 "cloud_init_content": {},
833 }
834 for image in target["image"]:
835 image["vim_info"] = {}
836 for flavor in target["flavor"]:
837 flavor["vim_info"] = {}
838 if db_nsr.get("shared-volumes"):
839 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
840 for shared_volumes in target["shared-volumes"]:
841 shared_volumes["vim_info"] = {}
842 if db_nsr.get("affinity-or-anti-affinity-group"):
843 target["affinity-or-anti-affinity-group"] = deepcopy(
844 db_nsr["affinity-or-anti-affinity-group"]
845 )
846 for affinity_or_anti_affinity_group in target[
847 "affinity-or-anti-affinity-group"
848 ]:
849 affinity_or_anti_affinity_group["vim_info"] = {}
850
851 if db_nslcmop.get("lcmOperationType") != "instantiate":
852 # get parameters of instantiation:
853 db_nslcmop_instantiate = self.db.get_list(
854 "nslcmops",
855 {
856 "nsInstanceId": db_nslcmop["nsInstanceId"],
857 "lcmOperationType": "instantiate",
858 },
859 )[-1]
860 ns_params = db_nslcmop_instantiate.get("operationParams")
861 else:
862 ns_params = db_nslcmop.get("operationParams")
863 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
864 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
865
866 cp2target = {}
867 for vld_index, vld in enumerate(db_nsr.get("vld")):
868 target_vim = "vim:{}".format(ns_params["vimAccountId"])
869 target_vld = {
870 "id": vld["id"],
871 "name": vld["name"],
872 "mgmt-network": vld.get("mgmt-network", False),
873 "type": vld.get("type"),
874 "vim_info": {
875 target_vim: {
876 "vim_network_name": vld.get("vim-network-name"),
877 "vim_account_id": ns_params["vimAccountId"],
878 }
879 },
880 }
881 # check if this network needs SDN assist
882 if vld.get("pci-interfaces"):
883 db_vim = get_vim_account(ns_params["vimAccountId"])
884 if vim_config := db_vim.get("config"):
885 if sdnc_id := vim_config.get("sdn-controller"):
886 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
887 target_sdn = "sdn:{}".format(sdnc_id)
888 target_vld["vim_info"][target_sdn] = {
889 "sdn": True,
890 "target_vim": target_vim,
891 "vlds": [sdn_vld],
892 "type": vld.get("type"),
893 }
894
895 nsd_vnf_profiles = get_vnf_profiles(nsd)
896 for nsd_vnf_profile in nsd_vnf_profiles:
897 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
898 if cp["virtual-link-profile-id"] == vld["id"]:
899 cp2target[
900 "member_vnf:{}.{}".format(
901 cp["constituent-cpd-id"][0][
902 "constituent-base-element-id"
903 ],
904 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
905 )
906 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
907
908 # check at nsd descriptor, if there is an ip-profile
909 vld_params = {}
910 nsd_vlp = find_in_list(
911 get_virtual_link_profiles(nsd),
912 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
913 == vld["id"],
914 )
915 if (
916 nsd_vlp
917 and nsd_vlp.get("virtual-link-protocol-data")
918 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
919 ):
920 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
921 "l3-protocol-data"
922 ]
923
924 # update vld_params with instantiation params
925 vld_instantiation_params = find_in_list(
926 get_iterable(ns_params, "vld"),
927 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
928 )
929 if vld_instantiation_params:
930 vld_params.update(vld_instantiation_params)
931 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
932 target["ns"]["vld"].append(target_vld)
933 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
934 update_ns_vld_target(target, ns_params)
935
936 for vnfr in db_vnfrs.values():
937 vnfd = find_in_list(
938 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
939 )
940 vnf_params = find_in_list(
941 get_iterable(ns_params, "vnf"),
942 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
943 )
944 target_vnf = deepcopy(vnfr)
945 target_vim = "vim:{}".format(vnfr["vim-account-id"])
946 for vld in target_vnf.get("vld", ()):
947 # check if connected to a ns.vld, to fill target'
948 vnf_cp = find_in_list(
949 vnfd.get("int-virtual-link-desc", ()),
950 lambda cpd: cpd.get("id") == vld["id"],
951 )
952 if vnf_cp:
953 ns_cp = "member_vnf:{}.{}".format(
954 vnfr["member-vnf-index-ref"], vnf_cp["id"]
955 )
956 if cp2target.get(ns_cp):
957 vld["target"] = cp2target[ns_cp]
958
959 vld["vim_info"] = {
960 target_vim: {"vim_network_name": vld.get("vim-network-name")}
961 }
962 # check if this network needs SDN assist
963 target_sdn = None
964 if vld.get("pci-interfaces"):
965 db_vim = get_vim_account(vnfr["vim-account-id"])
966 sdnc_id = db_vim["config"].get("sdn-controller")
967 if sdnc_id:
968 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
969 target_sdn = "sdn:{}".format(sdnc_id)
970 vld["vim_info"][target_sdn] = {
971 "sdn": True,
972 "target_vim": target_vim,
973 "vlds": [sdn_vld],
974 "type": vld.get("type"),
975 }
976
977 # check at vnfd descriptor, if there is an ip-profile
978 vld_params = {}
979 vnfd_vlp = find_in_list(
980 get_virtual_link_profiles(vnfd),
981 lambda a_link_profile: a_link_profile["id"] == vld["id"],
982 )
983 if (
984 vnfd_vlp
985 and vnfd_vlp.get("virtual-link-protocol-data")
986 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
987 ):
988 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
989 "l3-protocol-data"
990 ]
991 # update vld_params with instantiation params
992 if vnf_params:
993 vld_instantiation_params = find_in_list(
994 get_iterable(vnf_params, "internal-vld"),
995 lambda i_vld: i_vld["name"] == vld["id"],
996 )
997 if vld_instantiation_params:
998 vld_params.update(vld_instantiation_params)
999 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1000
1001 vdur_list = []
1002 for vdur in target_vnf.get("vdur", ()):
1003 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1004 continue # This vdu must not be created
1005 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1006
1007 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1008
1009 if ssh_keys_all:
1010 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1011 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1012 if (
1013 vdu_configuration
1014 and vdu_configuration.get("config-access")
1015 and vdu_configuration.get("config-access").get("ssh-access")
1016 ):
1017 vdur["ssh-keys"] = ssh_keys_all
1018 vdur["ssh-access-required"] = vdu_configuration[
1019 "config-access"
1020 ]["ssh-access"]["required"]
1021 elif (
1022 vnf_configuration
1023 and vnf_configuration.get("config-access")
1024 and vnf_configuration.get("config-access").get("ssh-access")
1025 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1026 ):
1027 vdur["ssh-keys"] = ssh_keys_all
1028 vdur["ssh-access-required"] = vnf_configuration[
1029 "config-access"
1030 ]["ssh-access"]["required"]
1031 elif ssh_keys_instantiation and find_in_list(
1032 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1033 ):
1034 vdur["ssh-keys"] = ssh_keys_instantiation
1035
1036 self.logger.debug("NS > vdur > {}".format(vdur))
1037
1038 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1039 # cloud-init
1040 if vdud.get("cloud-init-file"):
1041 vdur["cloud-init"] = "{}:file:{}".format(
1042 vnfd["_id"], vdud.get("cloud-init-file")
1043 )
1044 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1045 if vdur["cloud-init"] not in target["cloud_init_content"]:
1046 base_folder = vnfd["_admin"]["storage"]
1047 if base_folder["pkg-dir"]:
1048 cloud_init_file = "{}/{}/cloud_init/{}".format(
1049 base_folder["folder"],
1050 base_folder["pkg-dir"],
1051 vdud.get("cloud-init-file"),
1052 )
1053 else:
1054 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1055 base_folder["folder"],
1056 vdud.get("cloud-init-file"),
1057 )
1058 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1059 target["cloud_init_content"][
1060 vdur["cloud-init"]
1061 ] = ci_file.read()
1062 elif vdud.get("cloud-init"):
1063 vdur["cloud-init"] = "{}:vdu:{}".format(
1064 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1065 )
1066 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1067 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1068 "cloud-init"
1069 ]
1070 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1071 deploy_params_vdu = self._format_additional_params(
1072 vdur.get("additionalParams") or {}
1073 )
1074 deploy_params_vdu["OSM"] = get_osm_params(
1075 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1076 )
1077 vdur["additionalParams"] = deploy_params_vdu
1078
1079 # flavor
1080 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1081 if target_vim not in ns_flavor["vim_info"]:
1082 ns_flavor["vim_info"][target_vim] = {}
1083
1084 # deal with images
1085 # in case alternative images are provided we must check if they should be applied
1086 # for the vim_type, modify the vim_type taking into account
1087 ns_image_id = int(vdur["ns-image-id"])
1088 if vdur.get("alt-image-ids"):
1089 db_vim = get_vim_account(vnfr["vim-account-id"])
1090 vim_type = db_vim["vim_type"]
1091 for alt_image_id in vdur.get("alt-image-ids"):
1092 ns_alt_image = target["image"][int(alt_image_id)]
1093 if vim_type == ns_alt_image.get("vim-type"):
1094 # must use alternative image
1095 self.logger.debug(
1096 "use alternative image id: {}".format(alt_image_id)
1097 )
1098 ns_image_id = alt_image_id
1099 vdur["ns-image-id"] = ns_image_id
1100 break
1101 ns_image = target["image"][int(ns_image_id)]
1102 if target_vim not in ns_image["vim_info"]:
1103 ns_image["vim_info"][target_vim] = {}
1104
1105 # Affinity groups
1106 if vdur.get("affinity-or-anti-affinity-group-id"):
1107 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1108 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1109 if target_vim not in ns_ags["vim_info"]:
1110 ns_ags["vim_info"][target_vim] = {}
1111
1112 # shared-volumes
1113 if vdur.get("shared-volumes-id"):
1114 for sv_id in vdur["shared-volumes-id"]:
1115 ns_sv = find_in_list(
1116 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1117 )
1118 if ns_sv:
1119 ns_sv["vim_info"][target_vim] = {}
1120
1121 vdur["vim_info"] = {target_vim: {}}
1122 # instantiation parameters
1123 if vnf_params:
1124 vdu_instantiation_params = find_in_list(
1125 get_iterable(vnf_params, "vdu"),
1126 lambda i_vdu: i_vdu["id"] == vdud["id"],
1127 )
1128 if vdu_instantiation_params:
1129 # Parse the vdu_volumes from the instantiation params
1130 vdu_volumes = get_volumes_from_instantiation_params(
1131 vdu_instantiation_params, vdud
1132 )
1133 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1134 vdur["additionalParams"]["OSM"][
1135 "vim_flavor_id"
1136 ] = vdu_instantiation_params.get("vim-flavor-id")
1137 vdur_list.append(vdur)
1138 target_vnf["vdur"] = vdur_list
1139 target["vnf"].append(target_vnf)
1140
1141 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1142 desc = await self.RO.deploy(nsr_id, target)
1143 self.logger.debug("RO return > {}".format(desc))
1144 action_id = desc["action_id"]
1145 await self._wait_ng_ro(
1146 nsr_id,
1147 action_id,
1148 nslcmop_id,
1149 start_deploy,
1150 timeout_ns_deploy,
1151 stage,
1152 operation="instantiation",
1153 )
1154
1155 # Updating NSR
1156 db_nsr_update = {
1157 "_admin.deployed.RO.operational-status": "running",
1158 "detailed-status": " ".join(stage),
1159 }
1160 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1161 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1162 self._write_op_status(nslcmop_id, stage)
1163 self.logger.debug(
1164 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1165 )
1166 return
1167
1168 async def _wait_ng_ro(
1169 self,
1170 nsr_id,
1171 action_id,
1172 nslcmop_id=None,
1173 start_time=None,
1174 timeout=600,
1175 stage=None,
1176 operation=None,
1177 ):
1178 detailed_status_old = None
1179 db_nsr_update = {}
1180 start_time = start_time or time()
1181 while time() <= start_time + timeout:
1182 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1183 self.logger.debug("Wait NG RO > {}".format(desc_status))
1184 if desc_status["status"] == "FAILED":
1185 raise NgRoException(desc_status["details"])
1186 elif desc_status["status"] == "BUILD":
1187 if stage:
1188 stage[2] = "VIM: ({})".format(desc_status["details"])
1189 elif desc_status["status"] == "DONE":
1190 if stage:
1191 stage[2] = "Deployed at VIM"
1192 break
1193 else:
1194 assert False, "ROclient.check_ns_status returns unknown {}".format(
1195 desc_status["status"]
1196 )
1197 if stage and nslcmop_id and stage[2] != detailed_status_old:
1198 detailed_status_old = stage[2]
1199 db_nsr_update["detailed-status"] = " ".join(stage)
1200 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1201 self._write_op_status(nslcmop_id, stage)
1202 await asyncio.sleep(15)
1203 else: # timeout_ns_deploy
1204 raise NgRoException("Timeout waiting ns to deploy")
1205
1206 async def _terminate_ng_ro(
1207 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1208 ):
1209 db_nsr_update = {}
1210 failed_detail = []
1211 action_id = None
1212 start_deploy = time()
1213 try:
1214 target = {
1215 "ns": {"vld": []},
1216 "vnf": [],
1217 "image": [],
1218 "flavor": [],
1219 "action_id": nslcmop_id,
1220 }
1221 desc = await self.RO.deploy(nsr_id, target)
1222 action_id = desc["action_id"]
1223 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1224 self.logger.debug(
1225 logging_text
1226 + "ns terminate action at RO. action_id={}".format(action_id)
1227 )
1228
1229 # wait until done
1230 delete_timeout = 20 * 60 # 20 minutes
1231 await self._wait_ng_ro(
1232 nsr_id,
1233 action_id,
1234 nslcmop_id,
1235 start_deploy,
1236 delete_timeout,
1237 stage,
1238 operation="termination",
1239 )
1240 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1241 # delete all nsr
1242 await self.RO.delete(nsr_id)
1243 except NgRoException as e:
1244 if e.http_code == 404: # not found
1245 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1246 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1247 self.logger.debug(
1248 logging_text + "RO_action_id={} already deleted".format(action_id)
1249 )
1250 elif e.http_code == 409: # conflict
1251 failed_detail.append("delete conflict: {}".format(e))
1252 self.logger.debug(
1253 logging_text
1254 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1255 )
1256 else:
1257 failed_detail.append("delete error: {}".format(e))
1258 self.logger.error(
1259 logging_text
1260 + "RO_action_id={} delete error: {}".format(action_id, e)
1261 )
1262 except Exception as e:
1263 failed_detail.append("delete error: {}".format(e))
1264 self.logger.error(
1265 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1266 )
1267
1268 if failed_detail:
1269 stage[2] = "Error deleting from VIM"
1270 else:
1271 stage[2] = "Deleted from VIM"
1272 db_nsr_update["detailed-status"] = " ".join(stage)
1273 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1274 self._write_op_status(nslcmop_id, stage)
1275
1276 if failed_detail:
1277 raise LcmException("; ".join(failed_detail))
1278 return
1279
1280 async def instantiate_RO(
1281 self,
1282 logging_text,
1283 nsr_id,
1284 nsd,
1285 db_nsr,
1286 db_nslcmop,
1287 db_vnfrs,
1288 db_vnfds,
1289 n2vc_key_list,
1290 stage,
1291 ):
1292 """
1293 Instantiate at RO
1294 :param logging_text: preffix text to use at logging
1295 :param nsr_id: nsr identity
1296 :param nsd: database content of ns descriptor
1297 :param db_nsr: database content of ns record
1298 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1299 :param db_vnfrs:
1300 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1301 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1302 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1303 :return: None or exception
1304 """
1305 try:
1306 start_deploy = time()
1307 ns_params = db_nslcmop.get("operationParams")
1308 if ns_params and ns_params.get("timeout_ns_deploy"):
1309 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1310 else:
1311 timeout_ns_deploy = self.timeout.ns_deploy
1312
1313 # Check for and optionally request placement optimization. Database will be updated if placement activated
1314 stage[2] = "Waiting for Placement."
1315 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1316 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1317 for vnfr in db_vnfrs.values():
1318 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1319 break
1320 else:
1321 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1322
1323 return await self._instantiate_ng_ro(
1324 logging_text,
1325 nsr_id,
1326 nsd,
1327 db_nsr,
1328 db_nslcmop,
1329 db_vnfrs,
1330 db_vnfds,
1331 n2vc_key_list,
1332 stage,
1333 start_deploy,
1334 timeout_ns_deploy,
1335 )
1336 except Exception as e:
1337 stage[2] = "ERROR deploying at VIM"
1338 self.set_vnfr_at_error(db_vnfrs, str(e))
1339 self.logger.error(
1340 "Error deploying at VIM {}".format(e),
1341 exc_info=not isinstance(
1342 e,
1343 (
1344 ROclient.ROClientException,
1345 LcmException,
1346 DbException,
1347 NgRoException,
1348 ),
1349 ),
1350 )
1351 raise
1352
1353 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1354 """
1355 Wait for kdu to be up, get ip address
1356 :param logging_text: prefix use for logging
1357 :param nsr_id:
1358 :param vnfr_id:
1359 :param kdu_name:
1360 :return: IP address, K8s services
1361 """
1362
1363 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1364 nb_tries = 0
1365
1366 while nb_tries < 360:
1367 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1368 kdur = next(
1369 (
1370 x
1371 for x in get_iterable(db_vnfr, "kdur")
1372 if x.get("kdu-name") == kdu_name
1373 ),
1374 None,
1375 )
1376 if not kdur:
1377 raise LcmException(
1378 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1379 )
1380 if kdur.get("status"):
1381 if kdur["status"] in ("READY", "ENABLED"):
1382 return kdur.get("ip-address"), kdur.get("services")
1383 else:
1384 raise LcmException(
1385 "target KDU={} is in error state".format(kdu_name)
1386 )
1387
1388 await asyncio.sleep(10)
1389 nb_tries += 1
1390 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1391
1392 async def wait_vm_up_insert_key_ro(
1393 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1394 ):
1395 """
1396 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1397 :param logging_text: prefix use for logging
1398 :param nsr_id:
1399 :param vnfr_id:
1400 :param vdu_id:
1401 :param vdu_index:
1402 :param pub_key: public ssh key to inject, None to skip
1403 :param user: user to apply the public ssh key
1404 :return: IP address
1405 """
1406
1407 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1408 ip_address = None
1409 target_vdu_id = None
1410 ro_retries = 0
1411
1412 while True:
1413 ro_retries += 1
1414 if ro_retries >= 360: # 1 hour
1415 raise LcmException(
1416 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1417 )
1418
1419 await asyncio.sleep(10)
1420
1421 # get ip address
1422 if not target_vdu_id:
1423 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1424
1425 if not vdu_id: # for the VNF case
1426 if db_vnfr.get("status") == "ERROR":
1427 raise LcmException(
1428 "Cannot inject ssh-key because target VNF is in error state"
1429 )
1430 ip_address = db_vnfr.get("ip-address")
1431 if not ip_address:
1432 continue
1433 vdur = next(
1434 (
1435 x
1436 for x in get_iterable(db_vnfr, "vdur")
1437 if x.get("ip-address") == ip_address
1438 ),
1439 None,
1440 )
1441 else: # VDU case
1442 vdur = next(
1443 (
1444 x
1445 for x in get_iterable(db_vnfr, "vdur")
1446 if x.get("vdu-id-ref") == vdu_id
1447 and x.get("count-index") == vdu_index
1448 ),
1449 None,
1450 )
1451
1452 if (
1453 not vdur and len(db_vnfr.get("vdur", ())) == 1
1454 ): # If only one, this should be the target vdu
1455 vdur = db_vnfr["vdur"][0]
1456 if not vdur:
1457 raise LcmException(
1458 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1459 vnfr_id, vdu_id, vdu_index
1460 )
1461 )
1462 # New generation RO stores information at "vim_info"
1463 ng_ro_status = None
1464 target_vim = None
1465 if vdur.get("vim_info"):
1466 target_vim = next(
1467 t for t in vdur["vim_info"]
1468 ) # there should be only one key
1469 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1470 if (
1471 vdur.get("pdu-type")
1472 or vdur.get("status") == "ACTIVE"
1473 or ng_ro_status == "ACTIVE"
1474 ):
1475 ip_address = vdur.get("ip-address")
1476 if not ip_address:
1477 continue
1478 target_vdu_id = vdur["vdu-id-ref"]
1479 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1480 raise LcmException(
1481 "Cannot inject ssh-key because target VM is in error state"
1482 )
1483
1484 if not target_vdu_id:
1485 continue
1486
1487 # inject public key into machine
1488 if pub_key and user:
1489 self.logger.debug(logging_text + "Inserting RO key")
1490 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1491 if vdur.get("pdu-type"):
1492 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1493 return ip_address
1494 try:
1495 target = {
1496 "action": {
1497 "action": "inject_ssh_key",
1498 "key": pub_key,
1499 "user": user,
1500 },
1501 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1502 }
1503 desc = await self.RO.deploy(nsr_id, target)
1504 action_id = desc["action_id"]
1505 await self._wait_ng_ro(
1506 nsr_id, action_id, timeout=600, operation="instantiation"
1507 )
1508 break
1509 except NgRoException as e:
1510 raise LcmException(
1511 "Reaching max tries injecting key. Error: {}".format(e)
1512 )
1513 else:
1514 break
1515
1516 return ip_address
1517
1518 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1519 """
1520 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1521 """
1522 my_vca = vca_deployed_list[vca_index]
1523 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1524 # vdu or kdu: no dependencies
1525 return
1526 timeout = 300
1527 while timeout >= 0:
1528 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1529 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1530 configuration_status_list = db_nsr["configurationStatus"]
1531 for index, vca_deployed in enumerate(configuration_status_list):
1532 if index == vca_index:
1533 # myself
1534 continue
1535 if not my_vca.get("member-vnf-index") or (
1536 vca_deployed.get("member-vnf-index")
1537 == my_vca.get("member-vnf-index")
1538 ):
1539 internal_status = configuration_status_list[index].get("status")
1540 if internal_status == "READY":
1541 continue
1542 elif internal_status == "BROKEN":
1543 raise LcmException(
1544 "Configuration aborted because dependent charm/s has failed"
1545 )
1546 else:
1547 break
1548 else:
1549 # no dependencies, return
1550 return
1551 await asyncio.sleep(10)
1552 timeout -= 1
1553
1554 raise LcmException("Configuration aborted because dependent charm/s timeout")
1555
1556 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1557 vca_id = None
1558 if db_vnfr:
1559 vca_id = deep_get(db_vnfr, ("vca-id",))
1560 elif db_nsr:
1561 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1562 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1563 return vca_id
1564
1565 async def instantiate_N2VC(
1566 self,
1567 logging_text,
1568 vca_index,
1569 nsi_id,
1570 db_nsr,
1571 db_vnfr,
1572 vdu_id,
1573 kdu_name,
1574 vdu_index,
1575 kdu_index,
1576 config_descriptor,
1577 deploy_params,
1578 base_folder,
1579 nslcmop_id,
1580 stage,
1581 vca_type,
1582 vca_name,
1583 ee_config_descriptor,
1584 ):
1585 nsr_id = db_nsr["_id"]
1586 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1587 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1588 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1589 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1590 db_dict = {
1591 "collection": "nsrs",
1592 "filter": {"_id": nsr_id},
1593 "path": db_update_entry,
1594 }
1595 step = ""
1596 try:
1597 element_type = "NS"
1598 element_under_configuration = nsr_id
1599
1600 vnfr_id = None
1601 if db_vnfr:
1602 vnfr_id = db_vnfr["_id"]
1603 osm_config["osm"]["vnf_id"] = vnfr_id
1604
1605 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1606
1607 if vca_type == "native_charm":
1608 index_number = 0
1609 else:
1610 index_number = vdu_index or 0
1611
1612 if vnfr_id:
1613 element_type = "VNF"
1614 element_under_configuration = vnfr_id
1615 namespace += ".{}-{}".format(vnfr_id, index_number)
1616 if vdu_id:
1617 namespace += ".{}-{}".format(vdu_id, index_number)
1618 element_type = "VDU"
1619 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1620 osm_config["osm"]["vdu_id"] = vdu_id
1621 elif kdu_name:
1622 namespace += ".{}".format(kdu_name)
1623 element_type = "KDU"
1624 element_under_configuration = kdu_name
1625 osm_config["osm"]["kdu_name"] = kdu_name
1626
1627 # Get artifact path
1628 if base_folder["pkg-dir"]:
1629 artifact_path = "{}/{}/{}/{}".format(
1630 base_folder["folder"],
1631 base_folder["pkg-dir"],
1632 "charms"
1633 if vca_type
1634 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1635 else "helm-charts",
1636 vca_name,
1637 )
1638 else:
1639 artifact_path = "{}/Scripts/{}/{}/".format(
1640 base_folder["folder"],
1641 "charms"
1642 if vca_type
1643 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1644 else "helm-charts",
1645 vca_name,
1646 )
1647
1648 self.logger.debug("Artifact path > {}".format(artifact_path))
1649
1650 # get initial_config_primitive_list that applies to this element
1651 initial_config_primitive_list = config_descriptor.get(
1652 "initial-config-primitive"
1653 )
1654
1655 self.logger.debug(
1656 "Initial config primitive list > {}".format(
1657 initial_config_primitive_list
1658 )
1659 )
1660
1661 # add config if not present for NS charm
1662 ee_descriptor_id = ee_config_descriptor.get("id")
1663 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1664 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1665 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1666 )
1667
1668 self.logger.debug(
1669 "Initial config primitive list #2 > {}".format(
1670 initial_config_primitive_list
1671 )
1672 )
1673 # n2vc_redesign STEP 3.1
1674 # find old ee_id if exists
1675 ee_id = vca_deployed.get("ee_id")
1676
1677 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1678 # create or register execution environment in VCA
1679 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1680 self._write_configuration_status(
1681 nsr_id=nsr_id,
1682 vca_index=vca_index,
1683 status="CREATING",
1684 element_under_configuration=element_under_configuration,
1685 element_type=element_type,
1686 )
1687
1688 step = "create execution environment"
1689 self.logger.debug(logging_text + step)
1690
1691 ee_id = None
1692 credentials = None
1693 if vca_type == "k8s_proxy_charm":
1694 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1695 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1696 namespace=namespace,
1697 artifact_path=artifact_path,
1698 db_dict=db_dict,
1699 vca_id=vca_id,
1700 )
1701 elif vca_type == "helm-v3":
1702 ee_id, credentials = await self.vca_map[
1703 vca_type
1704 ].create_execution_environment(
1705 namespace=nsr_id,
1706 reuse_ee_id=ee_id,
1707 db_dict=db_dict,
1708 config=osm_config,
1709 artifact_path=artifact_path,
1710 chart_model=vca_name,
1711 vca_type=vca_type,
1712 )
1713 else:
1714 ee_id, credentials = await self.vca_map[
1715 vca_type
1716 ].create_execution_environment(
1717 namespace=namespace,
1718 reuse_ee_id=ee_id,
1719 db_dict=db_dict,
1720 vca_id=vca_id,
1721 )
1722
1723 elif vca_type == "native_charm":
1724 step = "Waiting to VM being up and getting IP address"
1725 self.logger.debug(logging_text + step)
1726 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1727 logging_text,
1728 nsr_id,
1729 vnfr_id,
1730 vdu_id,
1731 vdu_index,
1732 user=None,
1733 pub_key=None,
1734 )
1735 credentials = {"hostname": rw_mgmt_ip}
1736 # get username
1737 username = deep_get(
1738 config_descriptor, ("config-access", "ssh-access", "default-user")
1739 )
1740 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1741 # merged. Meanwhile let's get username from initial-config-primitive
1742 if not username and initial_config_primitive_list:
1743 for config_primitive in initial_config_primitive_list:
1744 for param in config_primitive.get("parameter", ()):
1745 if param["name"] == "ssh-username":
1746 username = param["value"]
1747 break
1748 if not username:
1749 raise LcmException(
1750 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1751 "'config-access.ssh-access.default-user'"
1752 )
1753 credentials["username"] = username
1754 # n2vc_redesign STEP 3.2
1755
1756 self._write_configuration_status(
1757 nsr_id=nsr_id,
1758 vca_index=vca_index,
1759 status="REGISTERING",
1760 element_under_configuration=element_under_configuration,
1761 element_type=element_type,
1762 )
1763
1764 step = "register execution environment {}".format(credentials)
1765 self.logger.debug(logging_text + step)
1766 ee_id = await self.vca_map[vca_type].register_execution_environment(
1767 credentials=credentials,
1768 namespace=namespace,
1769 db_dict=db_dict,
1770 vca_id=vca_id,
1771 )
1772
1773 # for compatibility with MON/POL modules, the need model and application name at database
1774 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1775 ee_id_parts = ee_id.split(".")
1776 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1777 if len(ee_id_parts) >= 2:
1778 model_name = ee_id_parts[0]
1779 application_name = ee_id_parts[1]
1780 db_nsr_update[db_update_entry + "model"] = model_name
1781 db_nsr_update[db_update_entry + "application"] = application_name
1782
1783 # n2vc_redesign STEP 3.3
1784 step = "Install configuration Software"
1785
1786 self._write_configuration_status(
1787 nsr_id=nsr_id,
1788 vca_index=vca_index,
1789 status="INSTALLING SW",
1790 element_under_configuration=element_under_configuration,
1791 element_type=element_type,
1792 other_update=db_nsr_update,
1793 )
1794
1795 # TODO check if already done
1796 self.logger.debug(logging_text + step)
1797 config = None
1798 if vca_type == "native_charm":
1799 config_primitive = next(
1800 (p for p in initial_config_primitive_list if p["name"] == "config"),
1801 None,
1802 )
1803 if config_primitive:
1804 config = self._map_primitive_params(
1805 config_primitive, {}, deploy_params
1806 )
1807 num_units = 1
1808 if vca_type == "lxc_proxy_charm":
1809 if element_type == "NS":
1810 num_units = db_nsr.get("config-units") or 1
1811 elif element_type == "VNF":
1812 num_units = db_vnfr.get("config-units") or 1
1813 elif element_type == "VDU":
1814 for v in db_vnfr["vdur"]:
1815 if vdu_id == v["vdu-id-ref"]:
1816 num_units = v.get("config-units") or 1
1817 break
1818 if vca_type != "k8s_proxy_charm":
1819 await self.vca_map[vca_type].install_configuration_sw(
1820 ee_id=ee_id,
1821 artifact_path=artifact_path,
1822 db_dict=db_dict,
1823 config=config,
1824 num_units=num_units,
1825 vca_id=vca_id,
1826 vca_type=vca_type,
1827 )
1828
1829 # write in db flag of configuration_sw already installed
1830 self.update_db_2(
1831 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1832 )
1833
1834 # add relations for this VCA (wait for other peers related with this VCA)
1835 is_relation_added = await self._add_vca_relations(
1836 logging_text=logging_text,
1837 nsr_id=nsr_id,
1838 vca_type=vca_type,
1839 vca_index=vca_index,
1840 )
1841
1842 if not is_relation_added:
1843 raise LcmException("Relations could not be added to VCA.")
1844
1845 # if SSH access is required, then get execution environment SSH public
1846 # if native charm we have waited already to VM be UP
1847 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1848 pub_key = None
1849 user = None
1850 # self.logger.debug("get ssh key block")
1851 if deep_get(
1852 config_descriptor, ("config-access", "ssh-access", "required")
1853 ):
1854 # self.logger.debug("ssh key needed")
1855 # Needed to inject a ssh key
1856 user = deep_get(
1857 config_descriptor,
1858 ("config-access", "ssh-access", "default-user"),
1859 )
1860 step = "Install configuration Software, getting public ssh key"
1861 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1862 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1863 )
1864
1865 step = "Insert public key into VM user={} ssh_key={}".format(
1866 user, pub_key
1867 )
1868 else:
1869 # self.logger.debug("no need to get ssh key")
1870 step = "Waiting to VM being up and getting IP address"
1871 self.logger.debug(logging_text + step)
1872
1873 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1874 rw_mgmt_ip = None
1875
1876 # n2vc_redesign STEP 5.1
1877 # wait for RO (ip-address) Insert pub_key into VM
1878 if vnfr_id:
1879 if kdu_name:
1880 rw_mgmt_ip, services = await self.wait_kdu_up(
1881 logging_text, nsr_id, vnfr_id, kdu_name
1882 )
1883 vnfd = self.db.get_one(
1884 "vnfds_revisions",
1885 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1886 )
1887 kdu = get_kdu(vnfd, kdu_name)
1888 kdu_services = [
1889 service["name"] for service in get_kdu_services(kdu)
1890 ]
1891 exposed_services = []
1892 for service in services:
1893 if any(s in service["name"] for s in kdu_services):
1894 exposed_services.append(service)
1895 await self.vca_map[vca_type].exec_primitive(
1896 ee_id=ee_id,
1897 primitive_name="config",
1898 params_dict={
1899 "osm-config": json.dumps(
1900 OsmConfigBuilder(
1901 k8s={"services": exposed_services}
1902 ).build()
1903 )
1904 },
1905 vca_id=vca_id,
1906 )
1907
1908 # This verification is needed in order to avoid trying to add a public key
1909 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1910 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1911 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1912 # or it is a KNF)
1913 elif db_vnfr.get("vdur"):
1914 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1915 logging_text,
1916 nsr_id,
1917 vnfr_id,
1918 vdu_id,
1919 vdu_index,
1920 user=user,
1921 pub_key=pub_key,
1922 )
1923
1924 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1925
1926 # store rw_mgmt_ip in deploy params for later replacement
1927 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1928
1929 # n2vc_redesign STEP 6 Execute initial config primitive
1930 step = "execute initial config primitive"
1931
1932 # wait for dependent primitives execution (NS -> VNF -> VDU)
1933 if initial_config_primitive_list:
1934 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1935
1936 # stage, in function of element type: vdu, kdu, vnf or ns
1937 my_vca = vca_deployed_list[vca_index]
1938 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1939 # VDU or KDU
1940 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1941 elif my_vca.get("member-vnf-index"):
1942 # VNF
1943 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1944 else:
1945 # NS
1946 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1947
1948 self._write_configuration_status(
1949 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1950 )
1951
1952 self._write_op_status(op_id=nslcmop_id, stage=stage)
1953
1954 check_if_terminated_needed = True
1955 for initial_config_primitive in initial_config_primitive_list:
1956 # adding information on the vca_deployed if it is a NS execution environment
1957 if not vca_deployed["member-vnf-index"]:
1958 deploy_params["ns_config_info"] = json.dumps(
1959 self._get_ns_config_info(nsr_id)
1960 )
1961 # TODO check if already done
1962 primitive_params_ = self._map_primitive_params(
1963 initial_config_primitive, {}, deploy_params
1964 )
1965
1966 step = "execute primitive '{}' params '{}'".format(
1967 initial_config_primitive["name"], primitive_params_
1968 )
1969 self.logger.debug(logging_text + step)
1970 await self.vca_map[vca_type].exec_primitive(
1971 ee_id=ee_id,
1972 primitive_name=initial_config_primitive["name"],
1973 params_dict=primitive_params_,
1974 db_dict=db_dict,
1975 vca_id=vca_id,
1976 vca_type=vca_type,
1977 )
1978 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1979 if check_if_terminated_needed:
1980 if config_descriptor.get("terminate-config-primitive"):
1981 self.update_db_2(
1982 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1983 )
1984 check_if_terminated_needed = False
1985
1986 # TODO register in database that primitive is done
1987
1988 # STEP 7 Configure metrics
1989 if vca_type == "helm-v3":
1990 # TODO: review for those cases where the helm chart is a reference and
1991 # is not part of the NF package
1992 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1993 ee_id=ee_id,
1994 artifact_path=artifact_path,
1995 ee_config_descriptor=ee_config_descriptor,
1996 vnfr_id=vnfr_id,
1997 nsr_id=nsr_id,
1998 target_ip=rw_mgmt_ip,
1999 element_type=element_type,
2000 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2001 vdu_id=vdu_id,
2002 vdu_index=vdu_index,
2003 kdu_name=kdu_name,
2004 kdu_index=kdu_index,
2005 )
2006 if prometheus_jobs:
2007 self.update_db_2(
2008 "nsrs",
2009 nsr_id,
2010 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2011 )
2012
2013 for job in prometheus_jobs:
2014 self.db.set_one(
2015 "prometheus_jobs",
2016 {"job_name": job["job_name"]},
2017 job,
2018 upsert=True,
2019 fail_on_empty=False,
2020 )
2021
2022 step = "instantiated at VCA"
2023 self.logger.debug(logging_text + step)
2024
2025 self._write_configuration_status(
2026 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2027 )
2028
2029 except Exception as e: # TODO not use Exception but N2VC exception
2030 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2031 if not isinstance(
2032 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2033 ):
2034 self.logger.error(
2035 "Exception while {} : {}".format(step, e), exc_info=True
2036 )
2037 self._write_configuration_status(
2038 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2039 )
2040 raise LcmException("{}. {}".format(step, e)) from e
2041
2042 def _write_ns_status(
2043 self,
2044 nsr_id: str,
2045 ns_state: str,
2046 current_operation: str,
2047 current_operation_id: str,
2048 error_description: str = None,
2049 error_detail: str = None,
2050 other_update: dict = None,
2051 ):
2052 """
2053 Update db_nsr fields.
2054 :param nsr_id:
2055 :param ns_state:
2056 :param current_operation:
2057 :param current_operation_id:
2058 :param error_description:
2059 :param error_detail:
2060 :param other_update: Other required changes at database if provided, will be cleared
2061 :return:
2062 """
2063 try:
2064 db_dict = other_update or {}
2065 db_dict[
2066 "_admin.nslcmop"
2067 ] = current_operation_id # for backward compatibility
2068 db_dict["_admin.current-operation"] = current_operation_id
2069 db_dict["_admin.operation-type"] = (
2070 current_operation if current_operation != "IDLE" else None
2071 )
2072 db_dict["currentOperation"] = current_operation
2073 db_dict["currentOperationID"] = current_operation_id
2074 db_dict["errorDescription"] = error_description
2075 db_dict["errorDetail"] = error_detail
2076
2077 if ns_state:
2078 db_dict["nsState"] = ns_state
2079 self.update_db_2("nsrs", nsr_id, db_dict)
2080 except DbException as e:
2081 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2082
2083 def _write_op_status(
2084 self,
2085 op_id: str,
2086 stage: list = None,
2087 error_message: str = None,
2088 queuePosition: int = 0,
2089 operation_state: str = None,
2090 other_update: dict = None,
2091 ):
2092 try:
2093 db_dict = other_update or {}
2094 db_dict["queuePosition"] = queuePosition
2095 if isinstance(stage, list):
2096 db_dict["stage"] = stage[0]
2097 db_dict["detailed-status"] = " ".join(stage)
2098 elif stage is not None:
2099 db_dict["stage"] = str(stage)
2100
2101 if error_message is not None:
2102 db_dict["errorMessage"] = error_message
2103 if operation_state is not None:
2104 db_dict["operationState"] = operation_state
2105 db_dict["statusEnteredTime"] = time()
2106 self.update_db_2("nslcmops", op_id, db_dict)
2107 except DbException as e:
2108 self.logger.warn(
2109 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2110 )
2111
2112 def _write_all_config_status(self, db_nsr: dict, status: str):
2113 try:
2114 nsr_id = db_nsr["_id"]
2115 # configurationStatus
2116 config_status = db_nsr.get("configurationStatus")
2117 if config_status:
2118 db_nsr_update = {
2119 "configurationStatus.{}.status".format(index): status
2120 for index, v in enumerate(config_status)
2121 if v
2122 }
2123 # update status
2124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2125
2126 except DbException as e:
2127 self.logger.warn(
2128 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2129 )
2130
2131 def _write_configuration_status(
2132 self,
2133 nsr_id: str,
2134 vca_index: int,
2135 status: str = None,
2136 element_under_configuration: str = None,
2137 element_type: str = None,
2138 other_update: dict = None,
2139 ):
2140 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2141 # .format(vca_index, status))
2142
2143 try:
2144 db_path = "configurationStatus.{}.".format(vca_index)
2145 db_dict = other_update or {}
2146 if status:
2147 db_dict[db_path + "status"] = status
2148 if element_under_configuration:
2149 db_dict[
2150 db_path + "elementUnderConfiguration"
2151 ] = element_under_configuration
2152 if element_type:
2153 db_dict[db_path + "elementType"] = element_type
2154 self.update_db_2("nsrs", nsr_id, db_dict)
2155 except DbException as e:
2156 self.logger.warn(
2157 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2158 status, nsr_id, vca_index, e
2159 )
2160 )
2161
2162 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2163 """
2164 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2165 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2166 Database is used because the result can be obtained from a different LCM worker in case of HA.
2167 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2168 :param db_nslcmop: database content of nslcmop
2169 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2170 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2171 computed 'vim-account-id'
2172 """
2173 modified = False
2174 nslcmop_id = db_nslcmop["_id"]
2175 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2176 if placement_engine == "PLA":
2177 self.logger.debug(
2178 logging_text + "Invoke and wait for placement optimization"
2179 )
2180 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2181 db_poll_interval = 5
2182 wait = db_poll_interval * 10
2183 pla_result = None
2184 while not pla_result and wait >= 0:
2185 await asyncio.sleep(db_poll_interval)
2186 wait -= db_poll_interval
2187 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2188 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2189
2190 if not pla_result:
2191 raise LcmException(
2192 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2193 )
2194
2195 for pla_vnf in pla_result["vnf"]:
2196 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2197 if not pla_vnf.get("vimAccountId") or not vnfr:
2198 continue
2199 modified = True
2200 self.db.set_one(
2201 "vnfrs",
2202 {"_id": vnfr["_id"]},
2203 {"vim-account-id": pla_vnf["vimAccountId"]},
2204 )
2205 # Modifies db_vnfrs
2206 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2207 return modified
2208
2209 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2210 alerts = []
2211 nsr_id = vnfr["nsr-id-ref"]
2212 df = vnfd.get("df", [{}])[0]
2213 # Checking for auto-healing configuration
2214 if "healing-aspect" in df:
2215 healing_aspects = df["healing-aspect"]
2216 for healing in healing_aspects:
2217 for healing_policy in healing.get("healing-policy", ()):
2218 vdu_id = healing_policy["vdu-id"]
2219 vdur = next(
2220 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2221 {},
2222 )
2223 if not vdur:
2224 continue
2225 metric_name = "vm_status"
2226 vdu_name = vdur.get("name")
2227 vnf_member_index = vnfr["member-vnf-index-ref"]
2228 uuid = str(uuid4())
2229 name = f"healing_{uuid}"
2230 action = healing_policy
2231 # action_on_recovery = healing.get("action-on-recovery")
2232 # cooldown_time = healing.get("cooldown-time")
2233 # day1 = healing.get("day1")
2234 alert = {
2235 "uuid": uuid,
2236 "name": name,
2237 "metric": metric_name,
2238 "tags": {
2239 "ns_id": nsr_id,
2240 "vnf_member_index": vnf_member_index,
2241 "vdu_name": vdu_name,
2242 },
2243 "alarm_status": "ok",
2244 "action_type": "healing",
2245 "action": action,
2246 }
2247 alerts.append(alert)
2248 return alerts
2249
2250 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2251 alerts = []
2252 nsr_id = vnfr["nsr-id-ref"]
2253 df = vnfd.get("df", [{}])[0]
2254 # Checking for auto-scaling configuration
2255 if "scaling-aspect" in df:
2256 scaling_aspects = df["scaling-aspect"]
2257 all_vnfd_monitoring_params = {}
2258 for ivld in vnfd.get("int-virtual-link-desc", ()):
2259 for mp in ivld.get("monitoring-parameters", ()):
2260 all_vnfd_monitoring_params[mp.get("id")] = mp
2261 for vdu in vnfd.get("vdu", ()):
2262 for mp in vdu.get("monitoring-parameter", ()):
2263 all_vnfd_monitoring_params[mp.get("id")] = mp
2264 for df in vnfd.get("df", ()):
2265 for mp in df.get("monitoring-parameter", ()):
2266 all_vnfd_monitoring_params[mp.get("id")] = mp
2267 for scaling_aspect in scaling_aspects:
2268 scaling_group_name = scaling_aspect.get("name", "")
2269 # Get monitored VDUs
2270 all_monitored_vdus = set()
2271 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2272 "deltas", ()
2273 ):
2274 for vdu_delta in delta.get("vdu-delta", ()):
2275 all_monitored_vdus.add(vdu_delta.get("id"))
2276 monitored_vdurs = list(
2277 filter(
2278 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2279 vnfr["vdur"],
2280 )
2281 )
2282 if not monitored_vdurs:
2283 self.logger.error(
2284 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2285 )
2286 continue
2287 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2288 if scaling_policy["scaling-type"] != "automatic":
2289 continue
2290 threshold_time = scaling_policy.get("threshold-time", "1")
2291 cooldown_time = scaling_policy.get("cooldown-time", "0")
2292 for scaling_criteria in scaling_policy["scaling-criteria"]:
2293 monitoring_param_ref = scaling_criteria.get(
2294 "vnf-monitoring-param-ref"
2295 )
2296 vnf_monitoring_param = all_vnfd_monitoring_params[
2297 monitoring_param_ref
2298 ]
2299 for vdur in monitored_vdurs:
2300 vdu_id = vdur["vdu-id-ref"]
2301 metric_name = vnf_monitoring_param.get("performance-metric")
2302 metric_name = f"osm_{metric_name}"
2303 vnf_member_index = vnfr["member-vnf-index-ref"]
2304 scalein_threshold = scaling_criteria.get(
2305 "scale-in-threshold"
2306 )
2307 scaleout_threshold = scaling_criteria.get(
2308 "scale-out-threshold"
2309 )
2310 # Looking for min/max-number-of-instances
2311 instances_min_number = 1
2312 instances_max_number = 1
2313 vdu_profile = df["vdu-profile"]
2314 if vdu_profile:
2315 profile = next(
2316 item for item in vdu_profile if item["id"] == vdu_id
2317 )
2318 instances_min_number = profile.get(
2319 "min-number-of-instances", 1
2320 )
2321 instances_max_number = profile.get(
2322 "max-number-of-instances", 1
2323 )
2324
2325 if scalein_threshold:
2326 uuid = str(uuid4())
2327 name = f"scalein_{uuid}"
2328 operation = scaling_criteria[
2329 "scale-in-relational-operation"
2330 ]
2331 rel_operator = self.rel_operation_types.get(
2332 operation, "<="
2333 )
2334 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2335 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2336 labels = {
2337 "ns_id": nsr_id,
2338 "vnf_member_index": vnf_member_index,
2339 "vdu_id": vdu_id,
2340 }
2341 prom_cfg = {
2342 "alert": name,
2343 "expr": expression,
2344 "for": str(threshold_time) + "m",
2345 "labels": labels,
2346 }
2347 action = scaling_policy
2348 action = {
2349 "scaling-group": scaling_group_name,
2350 "cooldown-time": cooldown_time,
2351 }
2352 alert = {
2353 "uuid": uuid,
2354 "name": name,
2355 "metric": metric_name,
2356 "tags": {
2357 "ns_id": nsr_id,
2358 "vnf_member_index": vnf_member_index,
2359 "vdu_id": vdu_id,
2360 },
2361 "alarm_status": "ok",
2362 "action_type": "scale_in",
2363 "action": action,
2364 "prometheus_config": prom_cfg,
2365 }
2366 alerts.append(alert)
2367
2368 if scaleout_threshold:
2369 uuid = str(uuid4())
2370 name = f"scaleout_{uuid}"
2371 operation = scaling_criteria[
2372 "scale-out-relational-operation"
2373 ]
2374 rel_operator = self.rel_operation_types.get(
2375 operation, "<="
2376 )
2377 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2378 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2379 labels = {
2380 "ns_id": nsr_id,
2381 "vnf_member_index": vnf_member_index,
2382 "vdu_id": vdu_id,
2383 }
2384 prom_cfg = {
2385 "alert": name,
2386 "expr": expression,
2387 "for": str(threshold_time) + "m",
2388 "labels": labels,
2389 }
2390 action = scaling_policy
2391 action = {
2392 "scaling-group": scaling_group_name,
2393 "cooldown-time": cooldown_time,
2394 }
2395 alert = {
2396 "uuid": uuid,
2397 "name": name,
2398 "metric": metric_name,
2399 "tags": {
2400 "ns_id": nsr_id,
2401 "vnf_member_index": vnf_member_index,
2402 "vdu_id": vdu_id,
2403 },
2404 "alarm_status": "ok",
2405 "action_type": "scale_out",
2406 "action": action,
2407 "prometheus_config": prom_cfg,
2408 }
2409 alerts.append(alert)
2410 return alerts
2411
2412 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2413 alerts = []
2414 nsr_id = vnfr["nsr-id-ref"]
2415 vnf_member_index = vnfr["member-vnf-index-ref"]
2416
2417 # Checking for VNF alarm configuration
2418 for vdur in vnfr["vdur"]:
2419 vdu_id = vdur["vdu-id-ref"]
2420 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2421 if "alarm" in vdu:
2422 # Get VDU monitoring params, since alerts are based on them
2423 vdu_monitoring_params = {}
2424 for mp in vdu.get("monitoring-parameter", []):
2425 vdu_monitoring_params[mp.get("id")] = mp
2426 if not vdu_monitoring_params:
2427 self.logger.error(
2428 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2429 )
2430 continue
2431 # Get alarms in the VDU
2432 alarm_descriptors = vdu["alarm"]
2433 # Create VDU alarms for each alarm in the VDU
2434 for alarm_descriptor in alarm_descriptors:
2435 # Check that the VDU alarm refers to a proper monitoring param
2436 alarm_monitoring_param = alarm_descriptor.get(
2437 "vnf-monitoring-param-ref", ""
2438 )
2439 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2440 alarm_monitoring_param, {}
2441 )
2442 if not vdu_specific_monitoring_param:
2443 self.logger.error(
2444 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2445 )
2446 continue
2447 metric_name = vdu_specific_monitoring_param.get(
2448 "performance-metric"
2449 )
2450 if not metric_name:
2451 self.logger.error(
2452 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2453 )
2454 continue
2455 # Set params of the alarm to be created in Prometheus
2456 metric_name = f"osm_{metric_name}"
2457 metric_threshold = alarm_descriptor.get("value")
2458 uuid = str(uuid4())
2459 alert_name = f"vdu_alarm_{uuid}"
2460 operation = alarm_descriptor["operation"]
2461 rel_operator = self.rel_operation_types.get(operation, "<=")
2462 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2463 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2464 labels = {
2465 "ns_id": nsr_id,
2466 "vnf_member_index": vnf_member_index,
2467 "vdu_id": vdu_id,
2468 "vdu_name": "{{ $labels.vdu_name }}",
2469 }
2470 prom_cfg = {
2471 "alert": alert_name,
2472 "expr": expression,
2473 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2474 "labels": labels,
2475 }
2476 alarm_action = dict()
2477 for action_type in ["ok", "insufficient-data", "alarm"]:
2478 if (
2479 "actions" in alarm_descriptor
2480 and action_type in alarm_descriptor["actions"]
2481 ):
2482 alarm_action[action_type] = alarm_descriptor["actions"][
2483 action_type
2484 ]
2485 alert = {
2486 "uuid": uuid,
2487 "name": alert_name,
2488 "metric": metric_name,
2489 "tags": {
2490 "ns_id": nsr_id,
2491 "vnf_member_index": vnf_member_index,
2492 "vdu_id": vdu_id,
2493 },
2494 "alarm_status": "ok",
2495 "action_type": "vdu_alarm",
2496 "action": alarm_action,
2497 "prometheus_config": prom_cfg,
2498 }
2499 alerts.append(alert)
2500 return alerts
2501
2502 def update_nsrs_with_pla_result(self, params):
2503 try:
2504 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2505 self.update_db_2(
2506 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2507 )
2508 except Exception as e:
2509 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2510
2511 async def instantiate(self, nsr_id, nslcmop_id):
2512 """
2513
2514 :param nsr_id: ns instance to deploy
2515 :param nslcmop_id: operation to run
2516 :return:
2517 """
2518
2519 # Try to lock HA task here
2520 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2521 if not task_is_locked_by_me:
2522 self.logger.debug(
2523 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2524 )
2525 return
2526
2527 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2528 self.logger.debug(logging_text + "Enter")
2529
2530 # get all needed from database
2531
2532 # database nsrs record
2533 db_nsr = None
2534
2535 # database nslcmops record
2536 db_nslcmop = None
2537
2538 # update operation on nsrs
2539 db_nsr_update = {}
2540 # update operation on nslcmops
2541 db_nslcmop_update = {}
2542
2543 timeout_ns_deploy = self.timeout.ns_deploy
2544
2545 nslcmop_operation_state = None
2546 db_vnfrs = {} # vnf's info indexed by member-index
2547 # n2vc_info = {}
2548 tasks_dict_info = {} # from task to info text
2549 exc = None
2550 error_list = []
2551 stage = [
2552 "Stage 1/5: preparation of the environment.",
2553 "Waiting for previous operations to terminate.",
2554 "",
2555 ]
2556 # ^ stage, step, VIM progress
2557 try:
2558 # wait for any previous tasks in process
2559 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2560
2561 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2562 stage[1] = "Reading from database."
2563 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2564 db_nsr_update["detailed-status"] = "creating"
2565 db_nsr_update["operational-status"] = "init"
2566 self._write_ns_status(
2567 nsr_id=nsr_id,
2568 ns_state="BUILDING",
2569 current_operation="INSTANTIATING",
2570 current_operation_id=nslcmop_id,
2571 other_update=db_nsr_update,
2572 )
2573 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2574
2575 # read from db: operation
2576 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2577 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2578 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2579 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2580 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2581 )
2582 ns_params = db_nslcmop.get("operationParams")
2583 if ns_params and ns_params.get("timeout_ns_deploy"):
2584 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2585
2586 # read from db: ns
2587 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2588 self.logger.debug(logging_text + stage[1])
2589 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2590 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2591 self.logger.debug(logging_text + stage[1])
2592 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2593 self.fs.sync(db_nsr["nsd-id"])
2594 db_nsr["nsd"] = nsd
2595 # nsr_name = db_nsr["name"] # TODO short-name??
2596
2597 # read from db: vnf's of this ns
2598 stage[1] = "Getting vnfrs from db."
2599 self.logger.debug(logging_text + stage[1])
2600 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2601
2602 # read from db: vnfd's for every vnf
2603 db_vnfds = [] # every vnfd data
2604
2605 # for each vnf in ns, read vnfd
2606 for vnfr in db_vnfrs_list:
2607 if vnfr.get("kdur"):
2608 kdur_list = []
2609 for kdur in vnfr["kdur"]:
2610 if kdur.get("additionalParams"):
2611 kdur["additionalParams"] = json.loads(
2612 kdur["additionalParams"]
2613 )
2614 kdur_list.append(kdur)
2615 vnfr["kdur"] = kdur_list
2616
2617 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2618 vnfd_id = vnfr["vnfd-id"]
2619 vnfd_ref = vnfr["vnfd-ref"]
2620 self.fs.sync(vnfd_id)
2621
2622 # if we haven't this vnfd, read it from db
2623 if vnfd_id not in db_vnfds:
2624 # read from db
2625 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2626 vnfd_id, vnfd_ref
2627 )
2628 self.logger.debug(logging_text + stage[1])
2629 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2630
2631 # store vnfd
2632 db_vnfds.append(vnfd)
2633
2634 # Get or generates the _admin.deployed.VCA list
2635 vca_deployed_list = None
2636 if db_nsr["_admin"].get("deployed"):
2637 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2638 if vca_deployed_list is None:
2639 vca_deployed_list = []
2640 configuration_status_list = []
2641 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2642 db_nsr_update["configurationStatus"] = configuration_status_list
2643 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2644 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2645 elif isinstance(vca_deployed_list, dict):
2646 # maintain backward compatibility. Change a dict to list at database
2647 vca_deployed_list = list(vca_deployed_list.values())
2648 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2649 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2650
2651 if not isinstance(
2652 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2653 ):
2654 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2655 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2656
2657 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2658 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2659 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2660 self.db.set_list(
2661 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2662 )
2663
2664 # n2vc_redesign STEP 2 Deploy Network Scenario
2665 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2666 self._write_op_status(op_id=nslcmop_id, stage=stage)
2667
2668 stage[1] = "Deploying KDUs."
2669 # self.logger.debug(logging_text + "Before deploy_kdus")
2670 # Call to deploy_kdus in case exists the "vdu:kdu" param
2671 await self.deploy_kdus(
2672 logging_text=logging_text,
2673 nsr_id=nsr_id,
2674 nslcmop_id=nslcmop_id,
2675 db_vnfrs=db_vnfrs,
2676 db_vnfds=db_vnfds,
2677 task_instantiation_info=tasks_dict_info,
2678 )
2679
2680 stage[1] = "Getting VCA public key."
2681 # n2vc_redesign STEP 1 Get VCA public ssh-key
2682 # feature 1429. Add n2vc public key to needed VMs
2683 n2vc_key = self.n2vc.get_public_key()
2684 n2vc_key_list = [n2vc_key]
2685 if self.vca_config.public_key:
2686 n2vc_key_list.append(self.vca_config.public_key)
2687
2688 stage[1] = "Deploying NS at VIM."
2689 task_ro = asyncio.ensure_future(
2690 self.instantiate_RO(
2691 logging_text=logging_text,
2692 nsr_id=nsr_id,
2693 nsd=nsd,
2694 db_nsr=db_nsr,
2695 db_nslcmop=db_nslcmop,
2696 db_vnfrs=db_vnfrs,
2697 db_vnfds=db_vnfds,
2698 n2vc_key_list=n2vc_key_list,
2699 stage=stage,
2700 )
2701 )
2702 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2703 tasks_dict_info[task_ro] = "Deploying at VIM"
2704
2705 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2706 stage[1] = "Deploying Execution Environments."
2707 self.logger.debug(logging_text + stage[1])
2708
2709 # create namespace and certificate if any helm based EE is present in the NS
2710 if check_helm_ee_in_ns(db_vnfds):
2711 await self.vca_map["helm-v3"].setup_ns_namespace(
2712 name=nsr_id,
2713 )
2714 # create TLS certificates
2715 await self.vca_map["helm-v3"].create_tls_certificate(
2716 secret_name=self.EE_TLS_NAME,
2717 dns_prefix="*",
2718 nsr_id=nsr_id,
2719 usage="server auth",
2720 namespace=nsr_id,
2721 )
2722
2723 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2724 for vnf_profile in get_vnf_profiles(nsd):
2725 vnfd_id = vnf_profile["vnfd-id"]
2726 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2727 member_vnf_index = str(vnf_profile["id"])
2728 db_vnfr = db_vnfrs[member_vnf_index]
2729 base_folder = vnfd["_admin"]["storage"]
2730 vdu_id = None
2731 vdu_index = 0
2732 vdu_name = None
2733 kdu_name = None
2734 kdu_index = None
2735
2736 # Get additional parameters
2737 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2738 if db_vnfr.get("additionalParamsForVnf"):
2739 deploy_params.update(
2740 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2741 )
2742
2743 descriptor_config = get_configuration(vnfd, vnfd["id"])
2744 if descriptor_config:
2745 self._deploy_n2vc(
2746 logging_text=logging_text
2747 + "member_vnf_index={} ".format(member_vnf_index),
2748 db_nsr=db_nsr,
2749 db_vnfr=db_vnfr,
2750 nslcmop_id=nslcmop_id,
2751 nsr_id=nsr_id,
2752 nsi_id=nsi_id,
2753 vnfd_id=vnfd_id,
2754 vdu_id=vdu_id,
2755 kdu_name=kdu_name,
2756 member_vnf_index=member_vnf_index,
2757 vdu_index=vdu_index,
2758 kdu_index=kdu_index,
2759 vdu_name=vdu_name,
2760 deploy_params=deploy_params,
2761 descriptor_config=descriptor_config,
2762 base_folder=base_folder,
2763 task_instantiation_info=tasks_dict_info,
2764 stage=stage,
2765 )
2766
2767 # Deploy charms for each VDU that supports one.
2768 for vdud in get_vdu_list(vnfd):
2769 vdu_id = vdud["id"]
2770 descriptor_config = get_configuration(vnfd, vdu_id)
2771 vdur = find_in_list(
2772 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2773 )
2774
2775 if vdur.get("additionalParams"):
2776 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2777 else:
2778 deploy_params_vdu = deploy_params
2779 deploy_params_vdu["OSM"] = get_osm_params(
2780 db_vnfr, vdu_id, vdu_count_index=0
2781 )
2782 vdud_count = get_number_of_instances(vnfd, vdu_id)
2783
2784 self.logger.debug("VDUD > {}".format(vdud))
2785 self.logger.debug(
2786 "Descriptor config > {}".format(descriptor_config)
2787 )
2788 if descriptor_config:
2789 vdu_name = None
2790 kdu_name = None
2791 kdu_index = None
2792 for vdu_index in range(vdud_count):
2793 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2794 self._deploy_n2vc(
2795 logging_text=logging_text
2796 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2797 member_vnf_index, vdu_id, vdu_index
2798 ),
2799 db_nsr=db_nsr,
2800 db_vnfr=db_vnfr,
2801 nslcmop_id=nslcmop_id,
2802 nsr_id=nsr_id,
2803 nsi_id=nsi_id,
2804 vnfd_id=vnfd_id,
2805 vdu_id=vdu_id,
2806 kdu_name=kdu_name,
2807 kdu_index=kdu_index,
2808 member_vnf_index=member_vnf_index,
2809 vdu_index=vdu_index,
2810 vdu_name=vdu_name,
2811 deploy_params=deploy_params_vdu,
2812 descriptor_config=descriptor_config,
2813 base_folder=base_folder,
2814 task_instantiation_info=tasks_dict_info,
2815 stage=stage,
2816 )
2817 for kdud in get_kdu_list(vnfd):
2818 kdu_name = kdud["name"]
2819 descriptor_config = get_configuration(vnfd, kdu_name)
2820 if descriptor_config:
2821 vdu_id = None
2822 vdu_index = 0
2823 vdu_name = None
2824 kdu_index, kdur = next(
2825 x
2826 for x in enumerate(db_vnfr["kdur"])
2827 if x[1]["kdu-name"] == kdu_name
2828 )
2829 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2830 if kdur.get("additionalParams"):
2831 deploy_params_kdu.update(
2832 parse_yaml_strings(kdur["additionalParams"].copy())
2833 )
2834
2835 self._deploy_n2vc(
2836 logging_text=logging_text,
2837 db_nsr=db_nsr,
2838 db_vnfr=db_vnfr,
2839 nslcmop_id=nslcmop_id,
2840 nsr_id=nsr_id,
2841 nsi_id=nsi_id,
2842 vnfd_id=vnfd_id,
2843 vdu_id=vdu_id,
2844 kdu_name=kdu_name,
2845 member_vnf_index=member_vnf_index,
2846 vdu_index=vdu_index,
2847 kdu_index=kdu_index,
2848 vdu_name=vdu_name,
2849 deploy_params=deploy_params_kdu,
2850 descriptor_config=descriptor_config,
2851 base_folder=base_folder,
2852 task_instantiation_info=tasks_dict_info,
2853 stage=stage,
2854 )
2855
2856 # Check if each vnf has exporter for metric collection if so update prometheus job records
2857 if "exporters-endpoints" in vnfd.get("df")[0]:
2858 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2859 self.logger.debug("exporter config :{}".format(exporter_config))
2860 artifact_path = "{}/{}/{}".format(
2861 base_folder["folder"],
2862 base_folder["pkg-dir"],
2863 "exporter-endpoint",
2864 )
2865 ee_id = None
2866 ee_config_descriptor = exporter_config
2867 vnfr_id = db_vnfr["id"]
2868 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2869 logging_text,
2870 nsr_id,
2871 vnfr_id,
2872 vdu_id=None,
2873 vdu_index=None,
2874 user=None,
2875 pub_key=None,
2876 )
2877 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2878 self.logger.debug("Artifact_path:{}".format(artifact_path))
2879 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2880 vdu_id_for_prom = None
2881 vdu_index_for_prom = None
2882 for x in get_iterable(db_vnfr, "vdur"):
2883 vdu_id_for_prom = x.get("vdu-id-ref")
2884 vdu_index_for_prom = x.get("count-index")
2885 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2886 ee_id=ee_id,
2887 artifact_path=artifact_path,
2888 ee_config_descriptor=ee_config_descriptor,
2889 vnfr_id=vnfr_id,
2890 nsr_id=nsr_id,
2891 target_ip=rw_mgmt_ip,
2892 element_type="VDU",
2893 vdu_id=vdu_id_for_prom,
2894 vdu_index=vdu_index_for_prom,
2895 )
2896
2897 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2898 if prometheus_jobs:
2899 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2900 self.update_db_2(
2901 "nsrs",
2902 nsr_id,
2903 db_nsr_update,
2904 )
2905
2906 for job in prometheus_jobs:
2907 self.db.set_one(
2908 "prometheus_jobs",
2909 {"job_name": job["job_name"]},
2910 job,
2911 upsert=True,
2912 fail_on_empty=False,
2913 )
2914
2915 # Check if this NS has a charm configuration
2916 descriptor_config = nsd.get("ns-configuration")
2917 if descriptor_config and descriptor_config.get("juju"):
2918 vnfd_id = None
2919 db_vnfr = None
2920 member_vnf_index = None
2921 vdu_id = None
2922 kdu_name = None
2923 kdu_index = None
2924 vdu_index = 0
2925 vdu_name = None
2926
2927 # Get additional parameters
2928 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2929 if db_nsr.get("additionalParamsForNs"):
2930 deploy_params.update(
2931 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2932 )
2933 base_folder = nsd["_admin"]["storage"]
2934 self._deploy_n2vc(
2935 logging_text=logging_text,
2936 db_nsr=db_nsr,
2937 db_vnfr=db_vnfr,
2938 nslcmop_id=nslcmop_id,
2939 nsr_id=nsr_id,
2940 nsi_id=nsi_id,
2941 vnfd_id=vnfd_id,
2942 vdu_id=vdu_id,
2943 kdu_name=kdu_name,
2944 member_vnf_index=member_vnf_index,
2945 vdu_index=vdu_index,
2946 kdu_index=kdu_index,
2947 vdu_name=vdu_name,
2948 deploy_params=deploy_params,
2949 descriptor_config=descriptor_config,
2950 base_folder=base_folder,
2951 task_instantiation_info=tasks_dict_info,
2952 stage=stage,
2953 )
2954
2955 # rest of staff will be done at finally
2956
2957 except (
2958 ROclient.ROClientException,
2959 DbException,
2960 LcmException,
2961 N2VCException,
2962 ) as e:
2963 self.logger.error(
2964 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2965 )
2966 exc = e
2967 except asyncio.CancelledError:
2968 self.logger.error(
2969 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2970 )
2971 exc = "Operation was cancelled"
2972 except Exception as e:
2973 exc = traceback.format_exc()
2974 self.logger.critical(
2975 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2976 exc_info=True,
2977 )
2978 finally:
2979 if exc:
2980 error_list.append(str(exc))
2981 try:
2982 # wait for pending tasks
2983 if tasks_dict_info:
2984 stage[1] = "Waiting for instantiate pending tasks."
2985 self.logger.debug(logging_text + stage[1])
2986 error_list += await self._wait_for_tasks(
2987 logging_text,
2988 tasks_dict_info,
2989 timeout_ns_deploy,
2990 stage,
2991 nslcmop_id,
2992 nsr_id=nsr_id,
2993 )
2994 stage[1] = stage[2] = ""
2995 except asyncio.CancelledError:
2996 error_list.append("Cancelled")
2997 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
2998 await self._wait_for_tasks(
2999 logging_text,
3000 tasks_dict_info,
3001 timeout_ns_deploy,
3002 stage,
3003 nslcmop_id,
3004 nsr_id=nsr_id,
3005 )
3006 except Exception as exc:
3007 error_list.append(str(exc))
3008
3009 # update operation-status
3010 db_nsr_update["operational-status"] = "running"
3011 # let's begin with VCA 'configured' status (later we can change it)
3012 db_nsr_update["config-status"] = "configured"
3013 for task, task_name in tasks_dict_info.items():
3014 if not task.done() or task.cancelled() or task.exception():
3015 if task_name.startswith(self.task_name_deploy_vca):
3016 # A N2VC task is pending
3017 db_nsr_update["config-status"] = "failed"
3018 else:
3019 # RO or KDU task is pending
3020 db_nsr_update["operational-status"] = "failed"
3021
3022 # update status at database
3023 if error_list:
3024 error_detail = ". ".join(error_list)
3025 self.logger.error(logging_text + error_detail)
3026 error_description_nslcmop = "{} Detail: {}".format(
3027 stage[0], error_detail
3028 )
3029 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3030 nslcmop_id, stage[0]
3031 )
3032
3033 db_nsr_update["detailed-status"] = (
3034 error_description_nsr + " Detail: " + error_detail
3035 )
3036 db_nslcmop_update["detailed-status"] = error_detail
3037 nslcmop_operation_state = "FAILED"
3038 ns_state = "BROKEN"
3039 else:
3040 error_detail = None
3041 error_description_nsr = error_description_nslcmop = None
3042 ns_state = "READY"
3043 db_nsr_update["detailed-status"] = "Done"
3044 db_nslcmop_update["detailed-status"] = "Done"
3045 nslcmop_operation_state = "COMPLETED"
3046 # Gather auto-healing and auto-scaling alerts for each vnfr
3047 healing_alerts = []
3048 scaling_alerts = []
3049 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3050 vnfd = next(
3051 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3052 )
3053 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3054 for alert in healing_alerts:
3055 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3056 self.db.create("alerts", alert)
3057
3058 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3059 for alert in scaling_alerts:
3060 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3061 self.db.create("alerts", alert)
3062
3063 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3064 for alert in alarm_alerts:
3065 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3066 self.db.create("alerts", alert)
3067 if db_nsr:
3068 self._write_ns_status(
3069 nsr_id=nsr_id,
3070 ns_state=ns_state,
3071 current_operation="IDLE",
3072 current_operation_id=None,
3073 error_description=error_description_nsr,
3074 error_detail=error_detail,
3075 other_update=db_nsr_update,
3076 )
3077 self._write_op_status(
3078 op_id=nslcmop_id,
3079 stage="",
3080 error_message=error_description_nslcmop,
3081 operation_state=nslcmop_operation_state,
3082 other_update=db_nslcmop_update,
3083 )
3084
3085 if nslcmop_operation_state:
3086 try:
3087 await self.msg.aiowrite(
3088 "ns",
3089 "instantiated",
3090 {
3091 "nsr_id": nsr_id,
3092 "nslcmop_id": nslcmop_id,
3093 "operationState": nslcmop_operation_state,
3094 "startTime": db_nslcmop["startTime"],
3095 "links": db_nslcmop["links"],
3096 "operationParams": {
3097 "nsInstanceId": nsr_id,
3098 "nsdId": db_nsr["nsd-id"],
3099 },
3100 },
3101 )
3102 except Exception as e:
3103 self.logger.error(
3104 logging_text + "kafka_write notification Exception {}".format(e)
3105 )
3106
3107 self.logger.debug(logging_text + "Exit")
3108 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3109
3110 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3111 if vnfd_id not in cached_vnfds:
3112 cached_vnfds[vnfd_id] = self.db.get_one(
3113 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3114 )
3115 return cached_vnfds[vnfd_id]
3116
3117 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3118 if vnf_profile_id not in cached_vnfrs:
3119 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3120 "vnfrs",
3121 {
3122 "member-vnf-index-ref": vnf_profile_id,
3123 "nsr-id-ref": nsr_id,
3124 },
3125 )
3126 return cached_vnfrs[vnf_profile_id]
3127
3128 def _is_deployed_vca_in_relation(
3129 self, vca: DeployedVCA, relation: Relation
3130 ) -> bool:
3131 found = False
3132 for endpoint in (relation.provider, relation.requirer):
3133 if endpoint["kdu-resource-profile-id"]:
3134 continue
3135 found = (
3136 vca.vnf_profile_id == endpoint.vnf_profile_id
3137 and vca.vdu_profile_id == endpoint.vdu_profile_id
3138 and vca.execution_environment_ref == endpoint.execution_environment_ref
3139 )
3140 if found:
3141 break
3142 return found
3143
3144 def _update_ee_relation_data_with_implicit_data(
3145 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3146 ):
3147 ee_relation_data = safe_get_ee_relation(
3148 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3149 )
3150 ee_relation_level = EELevel.get_level(ee_relation_data)
3151 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3152 "execution-environment-ref"
3153 ]:
3154 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3155 vnfd_id = vnf_profile["vnfd-id"]
3156 project = nsd["_admin"]["projects_read"][0]
3157 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3158 entity_id = (
3159 vnfd_id
3160 if ee_relation_level == EELevel.VNF
3161 else ee_relation_data["vdu-profile-id"]
3162 )
3163 ee = get_juju_ee_ref(db_vnfd, entity_id)
3164 if not ee:
3165 raise Exception(
3166 f"not execution environments found for ee_relation {ee_relation_data}"
3167 )
3168 ee_relation_data["execution-environment-ref"] = ee["id"]
3169 return ee_relation_data
3170
3171 def _get_ns_relations(
3172 self,
3173 nsr_id: str,
3174 nsd: Dict[str, Any],
3175 vca: DeployedVCA,
3176 cached_vnfds: Dict[str, Any],
3177 ) -> List[Relation]:
3178 relations = []
3179 db_ns_relations = get_ns_configuration_relation_list(nsd)
3180 for r in db_ns_relations:
3181 provider_dict = None
3182 requirer_dict = None
3183 if all(key in r for key in ("provider", "requirer")):
3184 provider_dict = r["provider"]
3185 requirer_dict = r["requirer"]
3186 elif "entities" in r:
3187 provider_id = r["entities"][0]["id"]
3188 provider_dict = {
3189 "nsr-id": nsr_id,
3190 "endpoint": r["entities"][0]["endpoint"],
3191 }
3192 if provider_id != nsd["id"]:
3193 provider_dict["vnf-profile-id"] = provider_id
3194 requirer_id = r["entities"][1]["id"]
3195 requirer_dict = {
3196 "nsr-id": nsr_id,
3197 "endpoint": r["entities"][1]["endpoint"],
3198 }
3199 if requirer_id != nsd["id"]:
3200 requirer_dict["vnf-profile-id"] = requirer_id
3201 else:
3202 raise Exception(
3203 "provider/requirer or entities must be included in the relation."
3204 )
3205 relation_provider = self._update_ee_relation_data_with_implicit_data(
3206 nsr_id, nsd, provider_dict, cached_vnfds
3207 )
3208 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3209 nsr_id, nsd, requirer_dict, cached_vnfds
3210 )
3211 provider = EERelation(relation_provider)
3212 requirer = EERelation(relation_requirer)
3213 relation = Relation(r["name"], provider, requirer)
3214 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3215 if vca_in_relation:
3216 relations.append(relation)
3217 return relations
3218
3219 def _get_vnf_relations(
3220 self,
3221 nsr_id: str,
3222 nsd: Dict[str, Any],
3223 vca: DeployedVCA,
3224 cached_vnfds: Dict[str, Any],
3225 ) -> List[Relation]:
3226 relations = []
3227 if vca.target_element == "ns":
3228 self.logger.debug("VCA is a NS charm, not a VNF.")
3229 return relations
3230 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3231 vnf_profile_id = vnf_profile["id"]
3232 vnfd_id = vnf_profile["vnfd-id"]
3233 project = nsd["_admin"]["projects_read"][0]
3234 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3235 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3236 for r in db_vnf_relations:
3237 provider_dict = None
3238 requirer_dict = None
3239 if all(key in r for key in ("provider", "requirer")):
3240 provider_dict = r["provider"]
3241 requirer_dict = r["requirer"]
3242 elif "entities" in r:
3243 provider_id = r["entities"][0]["id"]
3244 provider_dict = {
3245 "nsr-id": nsr_id,
3246 "vnf-profile-id": vnf_profile_id,
3247 "endpoint": r["entities"][0]["endpoint"],
3248 }
3249 if provider_id != vnfd_id:
3250 provider_dict["vdu-profile-id"] = provider_id
3251 requirer_id = r["entities"][1]["id"]
3252 requirer_dict = {
3253 "nsr-id": nsr_id,
3254 "vnf-profile-id": vnf_profile_id,
3255 "endpoint": r["entities"][1]["endpoint"],
3256 }
3257 if requirer_id != vnfd_id:
3258 requirer_dict["vdu-profile-id"] = requirer_id
3259 else:
3260 raise Exception(
3261 "provider/requirer or entities must be included in the relation."
3262 )
3263 relation_provider = self._update_ee_relation_data_with_implicit_data(
3264 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3265 )
3266 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3267 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3268 )
3269 provider = EERelation(relation_provider)
3270 requirer = EERelation(relation_requirer)
3271 relation = Relation(r["name"], provider, requirer)
3272 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3273 if vca_in_relation:
3274 relations.append(relation)
3275 return relations
3276
3277 def _get_kdu_resource_data(
3278 self,
3279 ee_relation: EERelation,
3280 db_nsr: Dict[str, Any],
3281 cached_vnfds: Dict[str, Any],
3282 ) -> DeployedK8sResource:
3283 nsd = get_nsd(db_nsr)
3284 vnf_profiles = get_vnf_profiles(nsd)
3285 vnfd_id = find_in_list(
3286 vnf_profiles,
3287 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3288 )["vnfd-id"]
3289 project = nsd["_admin"]["projects_read"][0]
3290 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3291 kdu_resource_profile = get_kdu_resource_profile(
3292 db_vnfd, ee_relation.kdu_resource_profile_id
3293 )
3294 kdu_name = kdu_resource_profile["kdu-name"]
3295 deployed_kdu, _ = get_deployed_kdu(
3296 db_nsr.get("_admin", ()).get("deployed", ()),
3297 kdu_name,
3298 ee_relation.vnf_profile_id,
3299 )
3300 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3301 return deployed_kdu
3302
3303 def _get_deployed_component(
3304 self,
3305 ee_relation: EERelation,
3306 db_nsr: Dict[str, Any],
3307 cached_vnfds: Dict[str, Any],
3308 ) -> DeployedComponent:
3309 nsr_id = db_nsr["_id"]
3310 deployed_component = None
3311 ee_level = EELevel.get_level(ee_relation)
3312 if ee_level == EELevel.NS:
3313 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3314 if vca:
3315 deployed_component = DeployedVCA(nsr_id, vca)
3316 elif ee_level == EELevel.VNF:
3317 vca = get_deployed_vca(
3318 db_nsr,
3319 {
3320 "vdu_id": None,
3321 "member-vnf-index": ee_relation.vnf_profile_id,
3322 "ee_descriptor_id": ee_relation.execution_environment_ref,
3323 },
3324 )
3325 if vca:
3326 deployed_component = DeployedVCA(nsr_id, vca)
3327 elif ee_level == EELevel.VDU:
3328 vca = get_deployed_vca(
3329 db_nsr,
3330 {
3331 "vdu_id": ee_relation.vdu_profile_id,
3332 "member-vnf-index": ee_relation.vnf_profile_id,
3333 "ee_descriptor_id": ee_relation.execution_environment_ref,
3334 },
3335 )
3336 if vca:
3337 deployed_component = DeployedVCA(nsr_id, vca)
3338 elif ee_level == EELevel.KDU:
3339 kdu_resource_data = self._get_kdu_resource_data(
3340 ee_relation, db_nsr, cached_vnfds
3341 )
3342 if kdu_resource_data:
3343 deployed_component = DeployedK8sResource(kdu_resource_data)
3344 return deployed_component
3345
3346 async def _add_relation(
3347 self,
3348 relation: Relation,
3349 vca_type: str,
3350 db_nsr: Dict[str, Any],
3351 cached_vnfds: Dict[str, Any],
3352 cached_vnfrs: Dict[str, Any],
3353 ) -> bool:
3354 deployed_provider = self._get_deployed_component(
3355 relation.provider, db_nsr, cached_vnfds
3356 )
3357 deployed_requirer = self._get_deployed_component(
3358 relation.requirer, db_nsr, cached_vnfds
3359 )
3360 if (
3361 deployed_provider
3362 and deployed_requirer
3363 and deployed_provider.config_sw_installed
3364 and deployed_requirer.config_sw_installed
3365 ):
3366 provider_db_vnfr = (
3367 self._get_vnfr(
3368 relation.provider.nsr_id,
3369 relation.provider.vnf_profile_id,
3370 cached_vnfrs,
3371 )
3372 if relation.provider.vnf_profile_id
3373 else None
3374 )
3375 requirer_db_vnfr = (
3376 self._get_vnfr(
3377 relation.requirer.nsr_id,
3378 relation.requirer.vnf_profile_id,
3379 cached_vnfrs,
3380 )
3381 if relation.requirer.vnf_profile_id
3382 else None
3383 )
3384 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3385 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3386 provider_relation_endpoint = RelationEndpoint(
3387 deployed_provider.ee_id,
3388 provider_vca_id,
3389 relation.provider.endpoint,
3390 )
3391 requirer_relation_endpoint = RelationEndpoint(
3392 deployed_requirer.ee_id,
3393 requirer_vca_id,
3394 relation.requirer.endpoint,
3395 )
3396 try:
3397 await self.vca_map[vca_type].add_relation(
3398 provider=provider_relation_endpoint,
3399 requirer=requirer_relation_endpoint,
3400 )
3401 except N2VCException as exception:
3402 self.logger.error(exception)
3403 raise LcmException(exception)
3404 return True
3405 return False
3406
3407 async def _add_vca_relations(
3408 self,
3409 logging_text,
3410 nsr_id,
3411 vca_type: str,
3412 vca_index: int,
3413 timeout: int = 3600,
3414 ) -> bool:
3415 # steps:
3416 # 1. find all relations for this VCA
3417 # 2. wait for other peers related
3418 # 3. add relations
3419
3420 try:
3421 # STEP 1: find all relations for this VCA
3422
3423 # read nsr record
3424 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3425 nsd = get_nsd(db_nsr)
3426
3427 # this VCA data
3428 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3429 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3430
3431 cached_vnfds = {}
3432 cached_vnfrs = {}
3433 relations = []
3434 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3435 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3436
3437 # if no relations, terminate
3438 if not relations:
3439 self.logger.debug(logging_text + " No relations")
3440 return True
3441
3442 self.logger.debug(logging_text + " adding relations {}".format(relations))
3443
3444 # add all relations
3445 start = time()
3446 while True:
3447 # check timeout
3448 now = time()
3449 if now - start >= timeout:
3450 self.logger.error(logging_text + " : timeout adding relations")
3451 return False
3452
3453 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3454 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3455
3456 # for each relation, find the VCA's related
3457 for relation in relations.copy():
3458 added = await self._add_relation(
3459 relation,
3460 vca_type,
3461 db_nsr,
3462 cached_vnfds,
3463 cached_vnfrs,
3464 )
3465 if added:
3466 relations.remove(relation)
3467
3468 if not relations:
3469 self.logger.debug("Relations added")
3470 break
3471 await asyncio.sleep(5.0)
3472
3473 return True
3474
3475 except Exception as e:
3476 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3477 return False
3478
3479 async def _install_kdu(
3480 self,
3481 nsr_id: str,
3482 nsr_db_path: str,
3483 vnfr_data: dict,
3484 kdu_index: int,
3485 kdud: dict,
3486 vnfd: dict,
3487 k8s_instance_info: dict,
3488 k8params: dict = None,
3489 timeout: int = 600,
3490 vca_id: str = None,
3491 ):
3492 try:
3493 k8sclustertype = k8s_instance_info["k8scluster-type"]
3494 # Instantiate kdu
3495 db_dict_install = {
3496 "collection": "nsrs",
3497 "filter": {"_id": nsr_id},
3498 "path": nsr_db_path,
3499 }
3500
3501 if k8s_instance_info.get("kdu-deployment-name"):
3502 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3503 else:
3504 kdu_instance = self.k8scluster_map[
3505 k8sclustertype
3506 ].generate_kdu_instance_name(
3507 db_dict=db_dict_install,
3508 kdu_model=k8s_instance_info["kdu-model"],
3509 kdu_name=k8s_instance_info["kdu-name"],
3510 )
3511
3512 # Update the nsrs table with the kdu-instance value
3513 self.update_db_2(
3514 item="nsrs",
3515 _id=nsr_id,
3516 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3517 )
3518
3519 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3520 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3521 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3522 # namespace, this first verification could be removed, and the next step would be done for any kind
3523 # of KNF.
3524 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3525 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3526 if k8sclustertype in ("juju", "juju-bundle"):
3527 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3528 # that the user passed a namespace which he wants its KDU to be deployed in)
3529 if (
3530 self.db.count(
3531 table="nsrs",
3532 q_filter={
3533 "_id": nsr_id,
3534 "_admin.projects_write": k8s_instance_info["namespace"],
3535 "_admin.projects_read": k8s_instance_info["namespace"],
3536 },
3537 )
3538 > 0
3539 ):
3540 self.logger.debug(
3541 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3542 )
3543 self.update_db_2(
3544 item="nsrs",
3545 _id=nsr_id,
3546 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3547 )
3548 k8s_instance_info["namespace"] = kdu_instance
3549
3550 await self.k8scluster_map[k8sclustertype].install(
3551 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3552 kdu_model=k8s_instance_info["kdu-model"],
3553 atomic=True,
3554 params=k8params,
3555 db_dict=db_dict_install,
3556 timeout=timeout,
3557 kdu_name=k8s_instance_info["kdu-name"],
3558 namespace=k8s_instance_info["namespace"],
3559 kdu_instance=kdu_instance,
3560 vca_id=vca_id,
3561 )
3562
3563 # Obtain services to obtain management service ip
3564 services = await self.k8scluster_map[k8sclustertype].get_services(
3565 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3566 kdu_instance=kdu_instance,
3567 namespace=k8s_instance_info["namespace"],
3568 )
3569
3570 # Obtain management service info (if exists)
3571 vnfr_update_dict = {}
3572 kdu_config = get_configuration(vnfd, kdud["name"])
3573 if kdu_config:
3574 target_ee_list = kdu_config.get("execution-environment-list", [])
3575 else:
3576 target_ee_list = []
3577
3578 if services:
3579 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3580 mgmt_services = [
3581 service
3582 for service in kdud.get("service", [])
3583 if service.get("mgmt-service")
3584 ]
3585 for mgmt_service in mgmt_services:
3586 for service in services:
3587 if service["name"].startswith(mgmt_service["name"]):
3588 # Mgmt service found, Obtain service ip
3589 ip = service.get("external_ip", service.get("cluster_ip"))
3590 if isinstance(ip, list) and len(ip) == 1:
3591 ip = ip[0]
3592
3593 vnfr_update_dict[
3594 "kdur.{}.ip-address".format(kdu_index)
3595 ] = ip
3596
3597 # Check if must update also mgmt ip at the vnf
3598 service_external_cp = mgmt_service.get(
3599 "external-connection-point-ref"
3600 )
3601 if service_external_cp:
3602 if (
3603 deep_get(vnfd, ("mgmt-interface", "cp"))
3604 == service_external_cp
3605 ):
3606 vnfr_update_dict["ip-address"] = ip
3607
3608 if find_in_list(
3609 target_ee_list,
3610 lambda ee: ee.get(
3611 "external-connection-point-ref", ""
3612 )
3613 == service_external_cp,
3614 ):
3615 vnfr_update_dict[
3616 "kdur.{}.ip-address".format(kdu_index)
3617 ] = ip
3618 break
3619 else:
3620 self.logger.warn(
3621 "Mgmt service name: {} not found".format(
3622 mgmt_service["name"]
3623 )
3624 )
3625
3626 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3627 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3628
3629 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3630 if (
3631 kdu_config
3632 and kdu_config.get("initial-config-primitive")
3633 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3634 ):
3635 initial_config_primitive_list = kdu_config.get(
3636 "initial-config-primitive"
3637 )
3638 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3639
3640 for initial_config_primitive in initial_config_primitive_list:
3641 primitive_params_ = self._map_primitive_params(
3642 initial_config_primitive, {}, {}
3643 )
3644
3645 await asyncio.wait_for(
3646 self.k8scluster_map[k8sclustertype].exec_primitive(
3647 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3648 kdu_instance=kdu_instance,
3649 primitive_name=initial_config_primitive["name"],
3650 params=primitive_params_,
3651 db_dict=db_dict_install,
3652 vca_id=vca_id,
3653 ),
3654 timeout=timeout,
3655 )
3656
3657 except Exception as e:
3658 # Prepare update db with error and raise exception
3659 try:
3660 self.update_db_2(
3661 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3662 )
3663 self.update_db_2(
3664 "vnfrs",
3665 vnfr_data.get("_id"),
3666 {"kdur.{}.status".format(kdu_index): "ERROR"},
3667 )
3668 except Exception as error:
3669 # ignore to keep original exception
3670 self.logger.warning(
3671 f"An exception occurred while updating DB: {str(error)}"
3672 )
3673 # reraise original error
3674 raise
3675
3676 return kdu_instance
3677
3678 async def deploy_kdus(
3679 self,
3680 logging_text,
3681 nsr_id,
3682 nslcmop_id,
3683 db_vnfrs,
3684 db_vnfds,
3685 task_instantiation_info,
3686 ):
3687 # Launch kdus if present in the descriptor
3688
3689 k8scluster_id_2_uuic = {
3690 "helm-chart-v3": {},
3691 "juju-bundle": {},
3692 }
3693
3694 async def _get_cluster_id(cluster_id, cluster_type):
3695 nonlocal k8scluster_id_2_uuic
3696 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3697 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3698
3699 # check if K8scluster is creating and wait look if previous tasks in process
3700 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3701 "k8scluster", cluster_id
3702 )
3703 if task_dependency:
3704 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3705 task_name, cluster_id
3706 )
3707 self.logger.debug(logging_text + text)
3708 await asyncio.wait(task_dependency, timeout=3600)
3709
3710 db_k8scluster = self.db.get_one(
3711 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3712 )
3713 if not db_k8scluster:
3714 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3715
3716 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3717 if not k8s_id:
3718 if cluster_type == "helm-chart-v3":
3719 try:
3720 # backward compatibility for existing clusters that have not been initialized for helm v3
3721 k8s_credentials = yaml.safe_dump(
3722 db_k8scluster.get("credentials")
3723 )
3724 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3725 k8s_credentials, reuse_cluster_uuid=cluster_id
3726 )
3727 db_k8scluster_update = {}
3728 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3729 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3730 db_k8scluster_update[
3731 "_admin.helm-chart-v3.created"
3732 ] = uninstall_sw
3733 db_k8scluster_update[
3734 "_admin.helm-chart-v3.operationalState"
3735 ] = "ENABLED"
3736 self.update_db_2(
3737 "k8sclusters", cluster_id, db_k8scluster_update
3738 )
3739 except Exception as e:
3740 self.logger.error(
3741 logging_text
3742 + "error initializing helm-v3 cluster: {}".format(str(e))
3743 )
3744 raise LcmException(
3745 "K8s cluster '{}' has not been initialized for '{}'".format(
3746 cluster_id, cluster_type
3747 )
3748 )
3749 else:
3750 raise LcmException(
3751 "K8s cluster '{}' has not been initialized for '{}'".format(
3752 cluster_id, cluster_type
3753 )
3754 )
3755 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3756 return k8s_id
3757
3758 logging_text += "Deploy kdus: "
3759 step = ""
3760 try:
3761 db_nsr_update = {"_admin.deployed.K8s": []}
3762 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3763
3764 index = 0
3765 updated_cluster_list = []
3766 updated_v3_cluster_list = []
3767
3768 for vnfr_data in db_vnfrs.values():
3769 vca_id = self.get_vca_id(vnfr_data, {})
3770 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3771 # Step 0: Prepare and set parameters
3772 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3773 vnfd_id = vnfr_data.get("vnfd-id")
3774 vnfd_with_id = find_in_list(
3775 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3776 )
3777 kdud = next(
3778 kdud
3779 for kdud in vnfd_with_id["kdu"]
3780 if kdud["name"] == kdur["kdu-name"]
3781 )
3782 namespace = kdur.get("k8s-namespace")
3783 kdu_deployment_name = kdur.get("kdu-deployment-name")
3784 if kdur.get("helm-chart"):
3785 kdumodel = kdur["helm-chart"]
3786 # Default version: helm3, if helm-version is v2 assign v2
3787 k8sclustertype = "helm-chart-v3"
3788 self.logger.debug("kdur: {}".format(kdur))
3789 elif kdur.get("juju-bundle"):
3790 kdumodel = kdur["juju-bundle"]
3791 k8sclustertype = "juju-bundle"
3792 else:
3793 raise LcmException(
3794 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3795 "juju-bundle. Maybe an old NBI version is running".format(
3796 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3797 )
3798 )
3799 # check if kdumodel is a file and exists
3800 try:
3801 vnfd_with_id = find_in_list(
3802 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3803 )
3804 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3805 if storage: # may be not present if vnfd has not artifacts
3806 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3807 if storage["pkg-dir"]:
3808 filename = "{}/{}/{}s/{}".format(
3809 storage["folder"],
3810 storage["pkg-dir"],
3811 k8sclustertype,
3812 kdumodel,
3813 )
3814 else:
3815 filename = "{}/Scripts/{}s/{}".format(
3816 storage["folder"],
3817 k8sclustertype,
3818 kdumodel,
3819 )
3820 if self.fs.file_exists(
3821 filename, mode="file"
3822 ) or self.fs.file_exists(filename, mode="dir"):
3823 kdumodel = self.fs.path + filename
3824 except (asyncio.TimeoutError, asyncio.CancelledError):
3825 raise
3826 except Exception as e: # it is not a file
3827 self.logger.warning(f"An exception occurred: {str(e)}")
3828
3829 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3830 step = "Synchronize repos for k8s cluster '{}'".format(
3831 k8s_cluster_id
3832 )
3833 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3834
3835 # Synchronize repos
3836 if (
3837 k8sclustertype == "helm-chart"
3838 and cluster_uuid not in updated_cluster_list
3839 ) or (
3840 k8sclustertype == "helm-chart-v3"
3841 and cluster_uuid not in updated_v3_cluster_list
3842 ):
3843 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3844 self.k8scluster_map[k8sclustertype].synchronize_repos(
3845 cluster_uuid=cluster_uuid
3846 )
3847 )
3848 if del_repo_list or added_repo_dict:
3849 if k8sclustertype == "helm-chart":
3850 unset = {
3851 "_admin.helm_charts_added." + item: None
3852 for item in del_repo_list
3853 }
3854 updated = {
3855 "_admin.helm_charts_added." + item: name
3856 for item, name in added_repo_dict.items()
3857 }
3858 updated_cluster_list.append(cluster_uuid)
3859 elif k8sclustertype == "helm-chart-v3":
3860 unset = {
3861 "_admin.helm_charts_v3_added." + item: None
3862 for item in del_repo_list
3863 }
3864 updated = {
3865 "_admin.helm_charts_v3_added." + item: name
3866 for item, name in added_repo_dict.items()
3867 }
3868 updated_v3_cluster_list.append(cluster_uuid)
3869 self.logger.debug(
3870 logging_text + "repos synchronized on k8s cluster "
3871 "'{}' to_delete: {}, to_add: {}".format(
3872 k8s_cluster_id, del_repo_list, added_repo_dict
3873 )
3874 )
3875 self.db.set_one(
3876 "k8sclusters",
3877 {"_id": k8s_cluster_id},
3878 updated,
3879 unset=unset,
3880 )
3881
3882 # Instantiate kdu
3883 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3884 vnfr_data["member-vnf-index-ref"],
3885 kdur["kdu-name"],
3886 k8s_cluster_id,
3887 )
3888 k8s_instance_info = {
3889 "kdu-instance": None,
3890 "k8scluster-uuid": cluster_uuid,
3891 "k8scluster-type": k8sclustertype,
3892 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3893 "kdu-name": kdur["kdu-name"],
3894 "kdu-model": kdumodel,
3895 "namespace": namespace,
3896 "kdu-deployment-name": kdu_deployment_name,
3897 }
3898 db_path = "_admin.deployed.K8s.{}".format(index)
3899 db_nsr_update[db_path] = k8s_instance_info
3900 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3901 vnfd_with_id = find_in_list(
3902 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3903 )
3904 task = asyncio.ensure_future(
3905 self._install_kdu(
3906 nsr_id,
3907 db_path,
3908 vnfr_data,
3909 kdu_index,
3910 kdud,
3911 vnfd_with_id,
3912 k8s_instance_info,
3913 k8params=desc_params,
3914 timeout=1800,
3915 vca_id=vca_id,
3916 )
3917 )
3918 self.lcm_tasks.register(
3919 "ns",
3920 nsr_id,
3921 nslcmop_id,
3922 "instantiate_KDU-{}".format(index),
3923 task,
3924 )
3925 task_instantiation_info[task] = "Deploying KDU {}".format(
3926 kdur["kdu-name"]
3927 )
3928
3929 index += 1
3930
3931 except (LcmException, asyncio.CancelledError):
3932 raise
3933 except Exception as e:
3934 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3935 if isinstance(e, (N2VCException, DbException)):
3936 self.logger.error(logging_text + msg)
3937 else:
3938 self.logger.critical(logging_text + msg, exc_info=True)
3939 raise LcmException(msg)
3940 finally:
3941 if db_nsr_update:
3942 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3943
3944 def _deploy_n2vc(
3945 self,
3946 logging_text,
3947 db_nsr,
3948 db_vnfr,
3949 nslcmop_id,
3950 nsr_id,
3951 nsi_id,
3952 vnfd_id,
3953 vdu_id,
3954 kdu_name,
3955 member_vnf_index,
3956 vdu_index,
3957 kdu_index,
3958 vdu_name,
3959 deploy_params,
3960 descriptor_config,
3961 base_folder,
3962 task_instantiation_info,
3963 stage,
3964 ):
3965 # launch instantiate_N2VC in a asyncio task and register task object
3966 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3967 # if not found, create one entry and update database
3968 # fill db_nsr._admin.deployed.VCA.<index>
3969
3970 self.logger.debug(
3971 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3972 )
3973
3974 charm_name = ""
3975 get_charm_name = False
3976 if "execution-environment-list" in descriptor_config:
3977 ee_list = descriptor_config.get("execution-environment-list", [])
3978 elif "juju" in descriptor_config:
3979 ee_list = [descriptor_config] # ns charms
3980 if "execution-environment-list" not in descriptor_config:
3981 # charm name is only required for ns charms
3982 get_charm_name = True
3983 else: # other types as script are not supported
3984 ee_list = []
3985
3986 for ee_item in ee_list:
3987 self.logger.debug(
3988 logging_text
3989 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3990 ee_item.get("juju"), ee_item.get("helm-chart")
3991 )
3992 )
3993 ee_descriptor_id = ee_item.get("id")
3994 vca_name, charm_name, vca_type = self.get_vca_info(
3995 ee_item, db_nsr, get_charm_name
3996 )
3997 if not vca_type:
3998 self.logger.debug(
3999 logging_text + "skipping, non juju/charm/helm configuration"
4000 )
4001 continue
4002
4003 vca_index = -1
4004 for vca_index, vca_deployed in enumerate(
4005 db_nsr["_admin"]["deployed"]["VCA"]
4006 ):
4007 if not vca_deployed:
4008 continue
4009 if (
4010 vca_deployed.get("member-vnf-index") == member_vnf_index
4011 and vca_deployed.get("vdu_id") == vdu_id
4012 and vca_deployed.get("kdu_name") == kdu_name
4013 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4014 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4015 ):
4016 break
4017 else:
4018 # not found, create one.
4019 target = (
4020 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4021 )
4022 if vdu_id:
4023 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4024 elif kdu_name:
4025 target += "/kdu/{}".format(kdu_name)
4026 vca_deployed = {
4027 "target_element": target,
4028 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4029 "member-vnf-index": member_vnf_index,
4030 "vdu_id": vdu_id,
4031 "kdu_name": kdu_name,
4032 "vdu_count_index": vdu_index,
4033 "operational-status": "init", # TODO revise
4034 "detailed-status": "", # TODO revise
4035 "step": "initial-deploy", # TODO revise
4036 "vnfd_id": vnfd_id,
4037 "vdu_name": vdu_name,
4038 "type": vca_type,
4039 "ee_descriptor_id": ee_descriptor_id,
4040 "charm_name": charm_name,
4041 }
4042 vca_index += 1
4043
4044 # create VCA and configurationStatus in db
4045 db_dict = {
4046 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4047 "configurationStatus.{}".format(vca_index): dict(),
4048 }
4049 self.update_db_2("nsrs", nsr_id, db_dict)
4050
4051 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4052
4053 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4054 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4055 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4056
4057 # Launch task
4058 task_n2vc = asyncio.ensure_future(
4059 self.instantiate_N2VC(
4060 logging_text=logging_text,
4061 vca_index=vca_index,
4062 nsi_id=nsi_id,
4063 db_nsr=db_nsr,
4064 db_vnfr=db_vnfr,
4065 vdu_id=vdu_id,
4066 kdu_name=kdu_name,
4067 vdu_index=vdu_index,
4068 kdu_index=kdu_index,
4069 deploy_params=deploy_params,
4070 config_descriptor=descriptor_config,
4071 base_folder=base_folder,
4072 nslcmop_id=nslcmop_id,
4073 stage=stage,
4074 vca_type=vca_type,
4075 vca_name=vca_name,
4076 ee_config_descriptor=ee_item,
4077 )
4078 )
4079 self.lcm_tasks.register(
4080 "ns",
4081 nsr_id,
4082 nslcmop_id,
4083 "instantiate_N2VC-{}".format(vca_index),
4084 task_n2vc,
4085 )
4086 task_instantiation_info[
4087 task_n2vc
4088 ] = self.task_name_deploy_vca + " {}.{}".format(
4089 member_vnf_index or "", vdu_id or ""
4090 )
4091
4092 def _format_additional_params(self, params):
4093 params = params or {}
4094 for key, value in params.items():
4095 if str(value).startswith("!!yaml "):
4096 params[key] = yaml.safe_load(value[7:])
4097 return params
4098
4099 def _get_terminate_primitive_params(self, seq, vnf_index):
4100 primitive = seq.get("name")
4101 primitive_params = {}
4102 params = {
4103 "member_vnf_index": vnf_index,
4104 "primitive": primitive,
4105 "primitive_params": primitive_params,
4106 }
4107 desc_params = {}
4108 return self._map_primitive_params(seq, params, desc_params)
4109
4110 # sub-operations
4111
4112 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4113 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4114 if op.get("operationState") == "COMPLETED":
4115 # b. Skip sub-operation
4116 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4117 return self.SUBOPERATION_STATUS_SKIP
4118 else:
4119 # c. retry executing sub-operation
4120 # The sub-operation exists, and operationState != 'COMPLETED'
4121 # Update operationState = 'PROCESSING' to indicate a retry.
4122 operationState = "PROCESSING"
4123 detailed_status = "In progress"
4124 self._update_suboperation_status(
4125 db_nslcmop, op_index, operationState, detailed_status
4126 )
4127 # Return the sub-operation index
4128 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4129 # with arguments extracted from the sub-operation
4130 return op_index
4131
4132 # Find a sub-operation where all keys in a matching dictionary must match
4133 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4134 def _find_suboperation(self, db_nslcmop, match):
4135 if db_nslcmop and match:
4136 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4137 for i, op in enumerate(op_list):
4138 if all(op.get(k) == match[k] for k in match):
4139 return i
4140 return self.SUBOPERATION_STATUS_NOT_FOUND
4141
4142 # Update status for a sub-operation given its index
4143 def _update_suboperation_status(
4144 self, db_nslcmop, op_index, operationState, detailed_status
4145 ):
4146 # Update DB for HA tasks
4147 q_filter = {"_id": db_nslcmop["_id"]}
4148 update_dict = {
4149 "_admin.operations.{}.operationState".format(op_index): operationState,
4150 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4151 }
4152 self.db.set_one(
4153 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4154 )
4155
4156 # Add sub-operation, return the index of the added sub-operation
4157 # Optionally, set operationState, detailed-status, and operationType
4158 # Status and type are currently set for 'scale' sub-operations:
4159 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4160 # 'detailed-status' : status message
4161 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4162 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4163 def _add_suboperation(
4164 self,
4165 db_nslcmop,
4166 vnf_index,
4167 vdu_id,
4168 vdu_count_index,
4169 vdu_name,
4170 primitive,
4171 mapped_primitive_params,
4172 operationState=None,
4173 detailed_status=None,
4174 operationType=None,
4175 RO_nsr_id=None,
4176 RO_scaling_info=None,
4177 ):
4178 if not db_nslcmop:
4179 return self.SUBOPERATION_STATUS_NOT_FOUND
4180 # Get the "_admin.operations" list, if it exists
4181 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4182 op_list = db_nslcmop_admin.get("operations")
4183 # Create or append to the "_admin.operations" list
4184 new_op = {
4185 "member_vnf_index": vnf_index,
4186 "vdu_id": vdu_id,
4187 "vdu_count_index": vdu_count_index,
4188 "primitive": primitive,
4189 "primitive_params": mapped_primitive_params,
4190 }
4191 if operationState:
4192 new_op["operationState"] = operationState
4193 if detailed_status:
4194 new_op["detailed-status"] = detailed_status
4195 if operationType:
4196 new_op["lcmOperationType"] = operationType
4197 if RO_nsr_id:
4198 new_op["RO_nsr_id"] = RO_nsr_id
4199 if RO_scaling_info:
4200 new_op["RO_scaling_info"] = RO_scaling_info
4201 if not op_list:
4202 # No existing operations, create key 'operations' with current operation as first list element
4203 db_nslcmop_admin.update({"operations": [new_op]})
4204 op_list = db_nslcmop_admin.get("operations")
4205 else:
4206 # Existing operations, append operation to list
4207 op_list.append(new_op)
4208
4209 db_nslcmop_update = {"_admin.operations": op_list}
4210 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4211 op_index = len(op_list) - 1
4212 return op_index
4213
4214 # Helper methods for scale() sub-operations
4215
4216 # pre-scale/post-scale:
4217 # Check for 3 different cases:
4218 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4219 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4220 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4221 def _check_or_add_scale_suboperation(
4222 self,
4223 db_nslcmop,
4224 vnf_index,
4225 vnf_config_primitive,
4226 primitive_params,
4227 operationType,
4228 RO_nsr_id=None,
4229 RO_scaling_info=None,
4230 ):
4231 # Find this sub-operation
4232 if RO_nsr_id and RO_scaling_info:
4233 operationType = "SCALE-RO"
4234 match = {
4235 "member_vnf_index": vnf_index,
4236 "RO_nsr_id": RO_nsr_id,
4237 "RO_scaling_info": RO_scaling_info,
4238 }
4239 else:
4240 match = {
4241 "member_vnf_index": vnf_index,
4242 "primitive": vnf_config_primitive,
4243 "primitive_params": primitive_params,
4244 "lcmOperationType": operationType,
4245 }
4246 op_index = self._find_suboperation(db_nslcmop, match)
4247 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4248 # a. New sub-operation
4249 # The sub-operation does not exist, add it.
4250 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4251 # The following parameters are set to None for all kind of scaling:
4252 vdu_id = None
4253 vdu_count_index = None
4254 vdu_name = None
4255 if RO_nsr_id and RO_scaling_info:
4256 vnf_config_primitive = None
4257 primitive_params = None
4258 else:
4259 RO_nsr_id = None
4260 RO_scaling_info = None
4261 # Initial status for sub-operation
4262 operationState = "PROCESSING"
4263 detailed_status = "In progress"
4264 # Add sub-operation for pre/post-scaling (zero or more operations)
4265 self._add_suboperation(
4266 db_nslcmop,
4267 vnf_index,
4268 vdu_id,
4269 vdu_count_index,
4270 vdu_name,
4271 vnf_config_primitive,
4272 primitive_params,
4273 operationState,
4274 detailed_status,
4275 operationType,
4276 RO_nsr_id,
4277 RO_scaling_info,
4278 )
4279 return self.SUBOPERATION_STATUS_NEW
4280 else:
4281 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4282 # or op_index (operationState != 'COMPLETED')
4283 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4284
4285 # Function to return execution_environment id
4286
4287 async def destroy_N2VC(
4288 self,
4289 logging_text,
4290 db_nslcmop,
4291 vca_deployed,
4292 config_descriptor,
4293 vca_index,
4294 destroy_ee=True,
4295 exec_primitives=True,
4296 scaling_in=False,
4297 vca_id: str = None,
4298 ):
4299 """
4300 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4301 :param logging_text:
4302 :param db_nslcmop:
4303 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4304 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4305 :param vca_index: index in the database _admin.deployed.VCA
4306 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4307 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4308 not executed properly
4309 :param scaling_in: True destroys the application, False destroys the model
4310 :return: None or exception
4311 """
4312
4313 self.logger.debug(
4314 logging_text
4315 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4316 vca_index, vca_deployed, config_descriptor, destroy_ee
4317 )
4318 )
4319
4320 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4321
4322 # execute terminate_primitives
4323 if exec_primitives:
4324 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4325 config_descriptor.get("terminate-config-primitive"),
4326 vca_deployed.get("ee_descriptor_id"),
4327 )
4328 vdu_id = vca_deployed.get("vdu_id")
4329 vdu_count_index = vca_deployed.get("vdu_count_index")
4330 vdu_name = vca_deployed.get("vdu_name")
4331 vnf_index = vca_deployed.get("member-vnf-index")
4332 if terminate_primitives and vca_deployed.get("needed_terminate"):
4333 for seq in terminate_primitives:
4334 # For each sequence in list, get primitive and call _ns_execute_primitive()
4335 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4336 vnf_index, seq.get("name")
4337 )
4338 self.logger.debug(logging_text + step)
4339 # Create the primitive for each sequence, i.e. "primitive": "touch"
4340 primitive = seq.get("name")
4341 mapped_primitive_params = self._get_terminate_primitive_params(
4342 seq, vnf_index
4343 )
4344
4345 # Add sub-operation
4346 self._add_suboperation(
4347 db_nslcmop,
4348 vnf_index,
4349 vdu_id,
4350 vdu_count_index,
4351 vdu_name,
4352 primitive,
4353 mapped_primitive_params,
4354 )
4355 # Sub-operations: Call _ns_execute_primitive() instead of action()
4356 try:
4357 result, result_detail = await self._ns_execute_primitive(
4358 vca_deployed["ee_id"],
4359 primitive,
4360 mapped_primitive_params,
4361 vca_type=vca_type,
4362 vca_id=vca_id,
4363 )
4364 except LcmException:
4365 # this happens when VCA is not deployed. In this case it is not needed to terminate
4366 continue
4367 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4368 if result not in result_ok:
4369 raise LcmException(
4370 "terminate_primitive {} for vnf_member_index={} fails with "
4371 "error {}".format(seq.get("name"), vnf_index, result_detail)
4372 )
4373 # set that this VCA do not need terminated
4374 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4375 vca_index
4376 )
4377 self.update_db_2(
4378 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4379 )
4380
4381 # Delete Prometheus Jobs if any
4382 # This uses NSR_ID, so it will destroy any jobs under this index
4383 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4384
4385 if destroy_ee:
4386 await self.vca_map[vca_type].delete_execution_environment(
4387 vca_deployed["ee_id"],
4388 scaling_in=scaling_in,
4389 vca_type=vca_type,
4390 vca_id=vca_id,
4391 )
4392
4393 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4394 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4395 namespace = "." + db_nsr["_id"]
4396 try:
4397 await self.n2vc.delete_namespace(
4398 namespace=namespace,
4399 total_timeout=self.timeout.charm_delete,
4400 vca_id=vca_id,
4401 )
4402 except N2VCNotFound: # already deleted. Skip
4403 pass
4404 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4405
4406 async def terminate(self, nsr_id, nslcmop_id):
4407 # Try to lock HA task here
4408 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4409 if not task_is_locked_by_me:
4410 return
4411
4412 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4413 self.logger.debug(logging_text + "Enter")
4414 timeout_ns_terminate = self.timeout.ns_terminate
4415 db_nsr = None
4416 db_nslcmop = None
4417 operation_params = None
4418 exc = None
4419 error_list = [] # annotates all failed error messages
4420 db_nslcmop_update = {}
4421 autoremove = False # autoremove after terminated
4422 tasks_dict_info = {}
4423 db_nsr_update = {}
4424 stage = [
4425 "Stage 1/3: Preparing task.",
4426 "Waiting for previous operations to terminate.",
4427 "",
4428 ]
4429 # ^ contains [stage, step, VIM-status]
4430 try:
4431 # wait for any previous tasks in process
4432 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4433
4434 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4435 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4436 operation_params = db_nslcmop.get("operationParams") or {}
4437 if operation_params.get("timeout_ns_terminate"):
4438 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4439 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4440 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4441
4442 db_nsr_update["operational-status"] = "terminating"
4443 db_nsr_update["config-status"] = "terminating"
4444 self._write_ns_status(
4445 nsr_id=nsr_id,
4446 ns_state="TERMINATING",
4447 current_operation="TERMINATING",
4448 current_operation_id=nslcmop_id,
4449 other_update=db_nsr_update,
4450 )
4451 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4452 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4453 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4454 return
4455
4456 stage[1] = "Getting vnf descriptors from db."
4457 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4458 db_vnfrs_dict = {
4459 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4460 }
4461 db_vnfds_from_id = {}
4462 db_vnfds_from_member_index = {}
4463 # Loop over VNFRs
4464 for vnfr in db_vnfrs_list:
4465 vnfd_id = vnfr["vnfd-id"]
4466 if vnfd_id not in db_vnfds_from_id:
4467 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4468 db_vnfds_from_id[vnfd_id] = vnfd
4469 db_vnfds_from_member_index[
4470 vnfr["member-vnf-index-ref"]
4471 ] = db_vnfds_from_id[vnfd_id]
4472
4473 # Destroy individual execution environments when there are terminating primitives.
4474 # Rest of EE will be deleted at once
4475 # TODO - check before calling _destroy_N2VC
4476 # if not operation_params.get("skip_terminate_primitives"):#
4477 # or not vca.get("needed_terminate"):
4478 stage[0] = "Stage 2/3 execute terminating primitives."
4479 self.logger.debug(logging_text + stage[0])
4480 stage[1] = "Looking execution environment that needs terminate."
4481 self.logger.debug(logging_text + stage[1])
4482
4483 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4484 config_descriptor = None
4485 vca_member_vnf_index = vca.get("member-vnf-index")
4486 vca_id = self.get_vca_id(
4487 db_vnfrs_dict.get(vca_member_vnf_index)
4488 if vca_member_vnf_index
4489 else None,
4490 db_nsr,
4491 )
4492 if not vca or not vca.get("ee_id"):
4493 continue
4494 if not vca.get("member-vnf-index"):
4495 # ns
4496 config_descriptor = db_nsr.get("ns-configuration")
4497 elif vca.get("vdu_id"):
4498 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4499 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4500 elif vca.get("kdu_name"):
4501 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4502 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4503 else:
4504 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4505 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4506 vca_type = vca.get("type")
4507 exec_terminate_primitives = not operation_params.get(
4508 "skip_terminate_primitives"
4509 ) and vca.get("needed_terminate")
4510 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4511 # pending native charms
4512 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4513 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4514 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4515 task = asyncio.ensure_future(
4516 self.destroy_N2VC(
4517 logging_text,
4518 db_nslcmop,
4519 vca,
4520 config_descriptor,
4521 vca_index,
4522 destroy_ee,
4523 exec_terminate_primitives,
4524 vca_id=vca_id,
4525 )
4526 )
4527 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4528
4529 # wait for pending tasks of terminate primitives
4530 if tasks_dict_info:
4531 self.logger.debug(
4532 logging_text
4533 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4534 )
4535 error_list = await self._wait_for_tasks(
4536 logging_text,
4537 tasks_dict_info,
4538 min(self.timeout.charm_delete, timeout_ns_terminate),
4539 stage,
4540 nslcmop_id,
4541 )
4542 tasks_dict_info.clear()
4543 if error_list:
4544 return # raise LcmException("; ".join(error_list))
4545
4546 # remove All execution environments at once
4547 stage[0] = "Stage 3/3 delete all."
4548
4549 if nsr_deployed.get("VCA"):
4550 stage[1] = "Deleting all execution environments."
4551 self.logger.debug(logging_text + stage[1])
4552 helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"})
4553 if helm_vca_list:
4554 # Delete Namespace and Certificates
4555 await self.vca_map["helm-v3"].delete_tls_certificate(
4556 namespace=db_nslcmop["nsInstanceId"],
4557 certificate_name=self.EE_TLS_NAME,
4558 )
4559 await self.vca_map["helm-v3"].delete_namespace(
4560 namespace=db_nslcmop["nsInstanceId"],
4561 )
4562 else:
4563 vca_id = self.get_vca_id({}, db_nsr)
4564 task_delete_ee = asyncio.ensure_future(
4565 asyncio.wait_for(
4566 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4567 timeout=self.timeout.charm_delete,
4568 )
4569 )
4570 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4571
4572 # Delete from k8scluster
4573 stage[1] = "Deleting KDUs."
4574 self.logger.debug(logging_text + stage[1])
4575 # print(nsr_deployed)
4576 for kdu in get_iterable(nsr_deployed, "K8s"):
4577 if not kdu or not kdu.get("kdu-instance"):
4578 continue
4579 kdu_instance = kdu.get("kdu-instance")
4580 if kdu.get("k8scluster-type") in self.k8scluster_map:
4581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id = self.get_vca_id({}, db_nsr)
4583 task_delete_kdu_instance = asyncio.ensure_future(
4584 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4585 cluster_uuid=kdu.get("k8scluster-uuid"),
4586 kdu_instance=kdu_instance,
4587 vca_id=vca_id,
4588 namespace=kdu.get("namespace"),
4589 )
4590 )
4591 else:
4592 self.logger.error(
4593 logging_text
4594 + "Unknown k8s deployment type {}".format(
4595 kdu.get("k8scluster-type")
4596 )
4597 )
4598 continue
4599 tasks_dict_info[
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4602
4603 # remove from RO
4604 stage[1] = "Deleting ns from VIM."
4605 if self.ro_config.ng:
4606 task_delete_ro = asyncio.ensure_future(
4607 self._terminate_ng_ro(
4608 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4609 )
4610 )
4611 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4612
4613 # rest of staff will be done at finally
4614
4615 except (
4616 ROclient.ROClientException,
4617 DbException,
4618 LcmException,
4619 N2VCException,
4620 ) as e:
4621 self.logger.error(logging_text + "Exit Exception {}".format(e))
4622 exc = e
4623 except asyncio.CancelledError:
4624 self.logger.error(
4625 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4626 )
4627 exc = "Operation was cancelled"
4628 except Exception as e:
4629 exc = traceback.format_exc()
4630 self.logger.critical(
4631 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4632 exc_info=True,
4633 )
4634 finally:
4635 if exc:
4636 error_list.append(str(exc))
4637 try:
4638 # wait for pending tasks
4639 if tasks_dict_info:
4640 stage[1] = "Waiting for terminate pending tasks."
4641 self.logger.debug(logging_text + stage[1])
4642 error_list += await self._wait_for_tasks(
4643 logging_text,
4644 tasks_dict_info,
4645 timeout_ns_terminate,
4646 stage,
4647 nslcmop_id,
4648 )
4649 stage[1] = stage[2] = ""
4650 except asyncio.CancelledError:
4651 error_list.append("Cancelled")
4652 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
4653 await self._wait_for_tasks(
4654 logging_text,
4655 tasks_dict_info,
4656 timeout_ns_terminate,
4657 stage,
4658 nslcmop_id,
4659 )
4660 except Exception as exc:
4661 error_list.append(str(exc))
4662 # update status at database
4663 if error_list:
4664 error_detail = "; ".join(error_list)
4665 # self.logger.error(logging_text + error_detail)
4666 error_description_nslcmop = "{} Detail: {}".format(
4667 stage[0], error_detail
4668 )
4669 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4670 nslcmop_id, stage[0]
4671 )
4672
4673 db_nsr_update["operational-status"] = "failed"
4674 db_nsr_update["detailed-status"] = (
4675 error_description_nsr + " Detail: " + error_detail
4676 )
4677 db_nslcmop_update["detailed-status"] = error_detail
4678 nslcmop_operation_state = "FAILED"
4679 ns_state = "BROKEN"
4680 else:
4681 error_detail = None
4682 error_description_nsr = error_description_nslcmop = None
4683 ns_state = "NOT_INSTANTIATED"
4684 db_nsr_update["operational-status"] = "terminated"
4685 db_nsr_update["detailed-status"] = "Done"
4686 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4687 db_nslcmop_update["detailed-status"] = "Done"
4688 nslcmop_operation_state = "COMPLETED"
4689
4690 if db_nsr:
4691 self._write_ns_status(
4692 nsr_id=nsr_id,
4693 ns_state=ns_state,
4694 current_operation="IDLE",
4695 current_operation_id=None,
4696 error_description=error_description_nsr,
4697 error_detail=error_detail,
4698 other_update=db_nsr_update,
4699 )
4700 self._write_op_status(
4701 op_id=nslcmop_id,
4702 stage="",
4703 error_message=error_description_nslcmop,
4704 operation_state=nslcmop_operation_state,
4705 other_update=db_nslcmop_update,
4706 )
4707 if ns_state == "NOT_INSTANTIATED":
4708 try:
4709 self.db.set_list(
4710 "vnfrs",
4711 {"nsr-id-ref": nsr_id},
4712 {"_admin.nsState": "NOT_INSTANTIATED"},
4713 )
4714 except DbException as e:
4715 self.logger.warn(
4716 logging_text
4717 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4718 nsr_id, e
4719 )
4720 )
4721 if operation_params:
4722 autoremove = operation_params.get("autoremove", False)
4723 if nslcmop_operation_state:
4724 try:
4725 await self.msg.aiowrite(
4726 "ns",
4727 "terminated",
4728 {
4729 "nsr_id": nsr_id,
4730 "nslcmop_id": nslcmop_id,
4731 "operationState": nslcmop_operation_state,
4732 "autoremove": autoremove,
4733 },
4734 )
4735 except Exception as e:
4736 self.logger.error(
4737 logging_text + "kafka_write notification Exception {}".format(e)
4738 )
4739 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4740 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4741
4742 self.logger.debug(logging_text + "Exit")
4743 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4744
4745 async def _wait_for_tasks(
4746 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4747 ):
4748 time_start = time()
4749 error_detail_list = []
4750 error_list = []
4751 pending_tasks = list(created_tasks_info.keys())
4752 num_tasks = len(pending_tasks)
4753 num_done = 0
4754 stage[1] = "{}/{}.".format(num_done, num_tasks)
4755 self._write_op_status(nslcmop_id, stage)
4756 while pending_tasks:
4757 new_error = None
4758 _timeout = timeout + time_start - time()
4759 done, pending_tasks = await asyncio.wait(
4760 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4761 )
4762 num_done += len(done)
4763 if not done: # Timeout
4764 for task in pending_tasks:
4765 new_error = created_tasks_info[task] + ": Timeout"
4766 error_detail_list.append(new_error)
4767 error_list.append(new_error)
4768 break
4769 for task in done:
4770 if task.cancelled():
4771 exc = "Cancelled"
4772 else:
4773 exc = task.exception()
4774 if exc:
4775 if isinstance(exc, asyncio.TimeoutError):
4776 exc = "Timeout"
4777 new_error = created_tasks_info[task] + ": {}".format(exc)
4778 error_list.append(created_tasks_info[task])
4779 error_detail_list.append(new_error)
4780 if isinstance(
4781 exc,
4782 (
4783 str,
4784 DbException,
4785 N2VCException,
4786 ROclient.ROClientException,
4787 LcmException,
4788 K8sException,
4789 NgRoException,
4790 ),
4791 ):
4792 self.logger.error(logging_text + new_error)
4793 else:
4794 exc_traceback = "".join(
4795 traceback.format_exception(None, exc, exc.__traceback__)
4796 )
4797 self.logger.error(
4798 logging_text
4799 + created_tasks_info[task]
4800 + " "
4801 + exc_traceback
4802 )
4803 else:
4804 self.logger.debug(
4805 logging_text + created_tasks_info[task] + ": Done"
4806 )
4807 stage[1] = "{}/{}.".format(num_done, num_tasks)
4808 if new_error:
4809 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4810 if nsr_id: # update also nsr
4811 self.update_db_2(
4812 "nsrs",
4813 nsr_id,
4814 {
4815 "errorDescription": "Error at: " + ", ".join(error_list),
4816 "errorDetail": ". ".join(error_detail_list),
4817 },
4818 )
4819 self._write_op_status(nslcmop_id, stage)
4820 return error_detail_list
4821
4822 async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
4823 for task, name in created_tasks_info.items():
4824 self.logger.debug(logging_text + "Cancelling task: " + name)
4825 task.cancel()
4826
4827 @staticmethod
4828 def _map_primitive_params(primitive_desc, params, instantiation_params):
4829 """
4830 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4831 The default-value is used. If it is between < > it look for a value at instantiation_params
4832 :param primitive_desc: portion of VNFD/NSD that describes primitive
4833 :param params: Params provided by user
4834 :param instantiation_params: Instantiation params provided by user
4835 :return: a dictionary with the calculated params
4836 """
4837 calculated_params = {}
4838 for parameter in primitive_desc.get("parameter", ()):
4839 param_name = parameter["name"]
4840 if param_name in params:
4841 calculated_params[param_name] = params[param_name]
4842 elif "default-value" in parameter or "value" in parameter:
4843 if "value" in parameter:
4844 calculated_params[param_name] = parameter["value"]
4845 else:
4846 calculated_params[param_name] = parameter["default-value"]
4847 if (
4848 isinstance(calculated_params[param_name], str)
4849 and calculated_params[param_name].startswith("<")
4850 and calculated_params[param_name].endswith(">")
4851 ):
4852 if calculated_params[param_name][1:-1] in instantiation_params:
4853 calculated_params[param_name] = instantiation_params[
4854 calculated_params[param_name][1:-1]
4855 ]
4856 else:
4857 raise LcmException(
4858 "Parameter {} needed to execute primitive {} not provided".format(
4859 calculated_params[param_name], primitive_desc["name"]
4860 )
4861 )
4862 else:
4863 raise LcmException(
4864 "Parameter {} needed to execute primitive {} not provided".format(
4865 param_name, primitive_desc["name"]
4866 )
4867 )
4868
4869 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4870 calculated_params[param_name] = yaml.safe_dump(
4871 calculated_params[param_name], default_flow_style=True, width=256
4872 )
4873 elif isinstance(calculated_params[param_name], str) and calculated_params[
4874 param_name
4875 ].startswith("!!yaml "):
4876 calculated_params[param_name] = calculated_params[param_name][7:]
4877 if parameter.get("data-type") == "INTEGER":
4878 try:
4879 calculated_params[param_name] = int(calculated_params[param_name])
4880 except ValueError: # error converting string to int
4881 raise LcmException(
4882 "Parameter {} of primitive {} must be integer".format(
4883 param_name, primitive_desc["name"]
4884 )
4885 )
4886 elif parameter.get("data-type") == "BOOLEAN":
4887 calculated_params[param_name] = not (
4888 (str(calculated_params[param_name])).lower() == "false"
4889 )
4890
4891 # add always ns_config_info if primitive name is config
4892 if primitive_desc["name"] == "config":
4893 if "ns_config_info" in instantiation_params:
4894 calculated_params["ns_config_info"] = instantiation_params[
4895 "ns_config_info"
4896 ]
4897 return calculated_params
4898
4899 def _look_for_deployed_vca(
4900 self,
4901 deployed_vca,
4902 member_vnf_index,
4903 vdu_id,
4904 vdu_count_index,
4905 kdu_name=None,
4906 ee_descriptor_id=None,
4907 ):
4908 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4909 for vca in deployed_vca:
4910 if not vca:
4911 continue
4912 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4913 continue
4914 if (
4915 vdu_count_index is not None
4916 and vdu_count_index != vca["vdu_count_index"]
4917 ):
4918 continue
4919 if kdu_name and kdu_name != vca["kdu_name"]:
4920 continue
4921 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4922 continue
4923 break
4924 else:
4925 # vca_deployed not found
4926 raise LcmException(
4927 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4928 " is not deployed".format(
4929 member_vnf_index,
4930 vdu_id,
4931 vdu_count_index,
4932 kdu_name,
4933 ee_descriptor_id,
4934 )
4935 )
4936 # get ee_id
4937 ee_id = vca.get("ee_id")
4938 vca_type = vca.get(
4939 "type", "lxc_proxy_charm"
4940 ) # default value for backward compatibility - proxy charm
4941 if not ee_id:
4942 raise LcmException(
4943 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4944 "execution environment".format(
4945 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4946 )
4947 )
4948 return ee_id, vca_type
4949
4950 async def _ns_execute_primitive(
4951 self,
4952 ee_id,
4953 primitive,
4954 primitive_params,
4955 retries=0,
4956 retries_interval=30,
4957 timeout=None,
4958 vca_type=None,
4959 db_dict=None,
4960 vca_id: str = None,
4961 ) -> (str, str):
4962 try:
4963 if primitive == "config":
4964 primitive_params = {"params": primitive_params}
4965
4966 vca_type = vca_type or "lxc_proxy_charm"
4967
4968 while retries >= 0:
4969 try:
4970 output = await asyncio.wait_for(
4971 self.vca_map[vca_type].exec_primitive(
4972 ee_id=ee_id,
4973 primitive_name=primitive,
4974 params_dict=primitive_params,
4975 progress_timeout=self.timeout.progress_primitive,
4976 total_timeout=self.timeout.primitive,
4977 db_dict=db_dict,
4978 vca_id=vca_id,
4979 vca_type=vca_type,
4980 ),
4981 timeout=timeout or self.timeout.primitive,
4982 )
4983 # execution was OK
4984 break
4985 except asyncio.CancelledError:
4986 raise
4987 except Exception as e:
4988 retries -= 1
4989 if retries >= 0:
4990 self.logger.debug(
4991 "Error executing action {} on {} -> {}".format(
4992 primitive, ee_id, e
4993 )
4994 )
4995 # wait and retry
4996 await asyncio.sleep(retries_interval)
4997 else:
4998 if isinstance(e, asyncio.TimeoutError):
4999 e = N2VCException(
5000 message="Timed out waiting for action to complete"
5001 )
5002 return "FAILED", getattr(e, "message", repr(e))
5003
5004 return "COMPLETED", output
5005
5006 except (LcmException, asyncio.CancelledError):
5007 raise
5008 except Exception as e:
5009 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5010
5011 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5012 """
5013 Updating the vca_status with latest juju information in nsrs record
5014 :param: nsr_id: Id of the nsr
5015 :param: nslcmop_id: Id of the nslcmop
5016 :return: None
5017 """
5018
5019 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5020 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5021 vca_id = self.get_vca_id({}, db_nsr)
5022 if db_nsr["_admin"]["deployed"]["K8s"]:
5023 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5024 cluster_uuid, kdu_instance, cluster_type = (
5025 k8s["k8scluster-uuid"],
5026 k8s["kdu-instance"],
5027 k8s["k8scluster-type"],
5028 )
5029 await self._on_update_k8s_db(
5030 cluster_uuid=cluster_uuid,
5031 kdu_instance=kdu_instance,
5032 filter={"_id": nsr_id},
5033 vca_id=vca_id,
5034 cluster_type=cluster_type,
5035 )
5036 if db_nsr["_admin"]["deployed"]["VCA"]:
5037 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5038 table, filter = "nsrs", {"_id": nsr_id}
5039 path = "_admin.deployed.VCA.{}.".format(vca_index)
5040 await self._on_update_n2vc_db(table, filter, path, {})
5041
5042 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5043 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5044
5045 async def action(self, nsr_id, nslcmop_id):
5046 # Try to lock HA task here
5047 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5048 if not task_is_locked_by_me:
5049 return
5050
5051 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5052 self.logger.debug(logging_text + "Enter")
5053 # get all needed from database
5054 db_nsr = None
5055 db_nslcmop = None
5056 db_nsr_update = {}
5057 db_nslcmop_update = {}
5058 nslcmop_operation_state = None
5059 error_description_nslcmop = None
5060 exc = None
5061 step = ""
5062 try:
5063 # wait for any previous tasks in process
5064 step = "Waiting for previous operations to terminate"
5065 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5066
5067 self._write_ns_status(
5068 nsr_id=nsr_id,
5069 ns_state=None,
5070 current_operation="RUNNING ACTION",
5071 current_operation_id=nslcmop_id,
5072 )
5073
5074 step = "Getting information from database"
5075 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5077 if db_nslcmop["operationParams"].get("primitive_params"):
5078 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5079 db_nslcmop["operationParams"]["primitive_params"]
5080 )
5081
5082 nsr_deployed = db_nsr["_admin"].get("deployed")
5083 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5084 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5085 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5086 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5087 primitive = db_nslcmop["operationParams"]["primitive"]
5088 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5089 timeout_ns_action = db_nslcmop["operationParams"].get(
5090 "timeout_ns_action", self.timeout.primitive
5091 )
5092
5093 if vnf_index:
5094 step = "Getting vnfr from database"
5095 db_vnfr = self.db.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5097 )
5098 if db_vnfr.get("kdur"):
5099 kdur_list = []
5100 for kdur in db_vnfr["kdur"]:
5101 if kdur.get("additionalParams"):
5102 kdur["additionalParams"] = json.loads(
5103 kdur["additionalParams"]
5104 )
5105 kdur_list.append(kdur)
5106 db_vnfr["kdur"] = kdur_list
5107 step = "Getting vnfd from database"
5108 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5109
5110 # Sync filesystem before running a primitive
5111 self.fs.sync(db_vnfr["vnfd-id"])
5112 else:
5113 step = "Getting nsd from database"
5114 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5115
5116 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5117 # for backward compatibility
5118 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5119 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5120 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5122
5123 # look for primitive
5124 config_primitive_desc = descriptor_configuration = None
5125 if vdu_id:
5126 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5127 elif kdu_name:
5128 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5129 elif vnf_index:
5130 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5131 else:
5132 descriptor_configuration = db_nsd.get("ns-configuration")
5133
5134 if descriptor_configuration and descriptor_configuration.get(
5135 "config-primitive"
5136 ):
5137 for config_primitive in descriptor_configuration["config-primitive"]:
5138 if config_primitive["name"] == primitive:
5139 config_primitive_desc = config_primitive
5140 break
5141
5142 if not config_primitive_desc:
5143 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5144 raise LcmException(
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5146 primitive
5147 )
5148 )
5149 primitive_name = primitive
5150 ee_descriptor_id = None
5151 else:
5152 primitive_name = config_primitive_desc.get(
5153 "execution-environment-primitive", primitive
5154 )
5155 ee_descriptor_id = config_primitive_desc.get(
5156 "execution-environment-ref"
5157 )
5158
5159 if vnf_index:
5160 if vdu_id:
5161 vdur = next(
5162 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5163 )
5164 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5165 elif kdu_name:
5166 kdur = next(
5167 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5168 )
5169 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5170 else:
5171 desc_params = parse_yaml_strings(
5172 db_vnfr.get("additionalParamsForVnf")
5173 )
5174 else:
5175 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5176 if kdu_name and get_configuration(db_vnfd, kdu_name):
5177 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5178 actions = set()
5179 for primitive in kdu_configuration.get("initial-config-primitive", []):
5180 actions.add(primitive["name"])
5181 for primitive in kdu_configuration.get("config-primitive", []):
5182 actions.add(primitive["name"])
5183 kdu = find_in_list(
5184 nsr_deployed["K8s"],
5185 lambda kdu: kdu_name == kdu["kdu-name"]
5186 and kdu["member-vnf-index"] == vnf_index,
5187 )
5188 kdu_action = (
5189 True
5190 if primitive_name in actions
5191 and kdu["k8scluster-type"] != "helm-chart-v3"
5192 else False
5193 )
5194
5195 # TODO check if ns is in a proper status
5196 if kdu_name and (
5197 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5198 ):
5199 # kdur and desc_params already set from before
5200 if primitive_params:
5201 desc_params.update(primitive_params)
5202 # TODO Check if we will need something at vnf level
5203 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5204 if (
5205 kdu_name == kdu["kdu-name"]
5206 and kdu["member-vnf-index"] == vnf_index
5207 ):
5208 break
5209 else:
5210 raise LcmException(
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5212 )
5213
5214 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5215 msg = "unknown k8scluster-type '{}'".format(
5216 kdu.get("k8scluster-type")
5217 )
5218 raise LcmException(msg)
5219
5220 db_dict = {
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id},
5223 "path": "_admin.deployed.K8s.{}".format(index),
5224 }
5225 self.logger.debug(
5226 logging_text
5227 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5228 )
5229 step = "Executing kdu {}".format(primitive_name)
5230 if primitive_name == "upgrade":
5231 if desc_params.get("kdu_model"):
5232 kdu_model = desc_params.get("kdu_model")
5233 del desc_params["kdu_model"]
5234 else:
5235 kdu_model = kdu.get("kdu-model")
5236 if kdu_model.count("/") < 2: # helm chart is not embedded
5237 parts = kdu_model.split(sep=":")
5238 if len(parts) == 2:
5239 kdu_model = parts[0]
5240 if desc_params.get("kdu_atomic_upgrade"):
5241 atomic_upgrade = desc_params.get(
5242 "kdu_atomic_upgrade"
5243 ).lower() in ("yes", "true", "1")
5244 del desc_params["kdu_atomic_upgrade"]
5245 else:
5246 atomic_upgrade = True
5247
5248 detailed_status = await asyncio.wait_for(
5249 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5250 cluster_uuid=kdu.get("k8scluster-uuid"),
5251 kdu_instance=kdu.get("kdu-instance"),
5252 atomic=atomic_upgrade,
5253 kdu_model=kdu_model,
5254 params=desc_params,
5255 db_dict=db_dict,
5256 timeout=timeout_ns_action,
5257 ),
5258 timeout=timeout_ns_action + 10,
5259 )
5260 self.logger.debug(
5261 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5262 )
5263 elif primitive_name == "rollback":
5264 detailed_status = await asyncio.wait_for(
5265 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5266 cluster_uuid=kdu.get("k8scluster-uuid"),
5267 kdu_instance=kdu.get("kdu-instance"),
5268 db_dict=db_dict,
5269 ),
5270 timeout=timeout_ns_action,
5271 )
5272 elif primitive_name == "status":
5273 detailed_status = await asyncio.wait_for(
5274 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5275 cluster_uuid=kdu.get("k8scluster-uuid"),
5276 kdu_instance=kdu.get("kdu-instance"),
5277 vca_id=vca_id,
5278 ),
5279 timeout=timeout_ns_action,
5280 )
5281 else:
5282 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5283 kdu["kdu-name"], nsr_id
5284 )
5285 params = self._map_primitive_params(
5286 config_primitive_desc, primitive_params, desc_params
5287 )
5288
5289 detailed_status = await asyncio.wait_for(
5290 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5291 cluster_uuid=kdu.get("k8scluster-uuid"),
5292 kdu_instance=kdu_instance,
5293 primitive_name=primitive_name,
5294 params=params,
5295 db_dict=db_dict,
5296 timeout=timeout_ns_action,
5297 vca_id=vca_id,
5298 ),
5299 timeout=timeout_ns_action,
5300 )
5301
5302 if detailed_status:
5303 nslcmop_operation_state = "COMPLETED"
5304 else:
5305 detailed_status = ""
5306 nslcmop_operation_state = "FAILED"
5307 else:
5308 ee_id, vca_type = self._look_for_deployed_vca(
5309 nsr_deployed["VCA"],
5310 member_vnf_index=vnf_index,
5311 vdu_id=vdu_id,
5312 vdu_count_index=vdu_count_index,
5313 ee_descriptor_id=ee_descriptor_id,
5314 )
5315 for vca_index, vca_deployed in enumerate(
5316 db_nsr["_admin"]["deployed"]["VCA"]
5317 ):
5318 if vca_deployed.get("member-vnf-index") == vnf_index:
5319 db_dict = {
5320 "collection": "nsrs",
5321 "filter": {"_id": nsr_id},
5322 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5323 }
5324 break
5325 (
5326 nslcmop_operation_state,
5327 detailed_status,
5328 ) = await self._ns_execute_primitive(
5329 ee_id,
5330 primitive=primitive_name,
5331 primitive_params=self._map_primitive_params(
5332 config_primitive_desc, primitive_params, desc_params
5333 ),
5334 timeout=timeout_ns_action,
5335 vca_type=vca_type,
5336 db_dict=db_dict,
5337 vca_id=vca_id,
5338 )
5339
5340 db_nslcmop_update["detailed-status"] = detailed_status
5341 error_description_nslcmop = (
5342 detailed_status if nslcmop_operation_state == "FAILED" else ""
5343 )
5344 self.logger.debug(
5345 logging_text
5346 + "Done with result {} {}".format(
5347 nslcmop_operation_state, detailed_status
5348 )
5349 )
5350 return # database update is called inside finally
5351
5352 except (DbException, LcmException, N2VCException, K8sException) as e:
5353 self.logger.error(logging_text + "Exit Exception {}".format(e))
5354 exc = e
5355 except asyncio.CancelledError:
5356 self.logger.error(
5357 logging_text + "Cancelled Exception while '{}'".format(step)
5358 )
5359 exc = "Operation was cancelled"
5360 except asyncio.TimeoutError:
5361 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5362 exc = "Timeout"
5363 except Exception as e:
5364 exc = traceback.format_exc()
5365 self.logger.critical(
5366 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5367 exc_info=True,
5368 )
5369 finally:
5370 if exc:
5371 db_nslcmop_update[
5372 "detailed-status"
5373 ] = (
5374 detailed_status
5375 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5376 nslcmop_operation_state = "FAILED"
5377 if db_nsr:
5378 self._write_ns_status(
5379 nsr_id=nsr_id,
5380 ns_state=db_nsr[
5381 "nsState"
5382 ], # TODO check if degraded. For the moment use previous status
5383 current_operation="IDLE",
5384 current_operation_id=None,
5385 # error_description=error_description_nsr,
5386 # error_detail=error_detail,
5387 other_update=db_nsr_update,
5388 )
5389
5390 self._write_op_status(
5391 op_id=nslcmop_id,
5392 stage="",
5393 error_message=error_description_nslcmop,
5394 operation_state=nslcmop_operation_state,
5395 other_update=db_nslcmop_update,
5396 )
5397
5398 if nslcmop_operation_state:
5399 try:
5400 await self.msg.aiowrite(
5401 "ns",
5402 "actioned",
5403 {
5404 "nsr_id": nsr_id,
5405 "nslcmop_id": nslcmop_id,
5406 "operationState": nslcmop_operation_state,
5407 },
5408 )
5409 except Exception as e:
5410 self.logger.error(
5411 logging_text + "kafka_write notification Exception {}".format(e)
5412 )
5413 self.logger.debug(logging_text + "Exit")
5414 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5415 return nslcmop_operation_state, detailed_status
5416
5417 async def terminate_vdus(
5418 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5419 ):
5420 """This method terminates VDUs
5421
5422 Args:
5423 db_vnfr: VNF instance record
5424 member_vnf_index: VNF index to identify the VDUs to be removed
5425 db_nsr: NS instance record
5426 update_db_nslcmops: Nslcmop update record
5427 """
5428 vca_scaling_info = []
5429 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5430 scaling_info["scaling_direction"] = "IN"
5431 scaling_info["vdu-delete"] = {}
5432 scaling_info["kdu-delete"] = {}
5433 db_vdur = db_vnfr.get("vdur")
5434 vdur_list = copy(db_vdur)
5435 count_index = 0
5436 for index, vdu in enumerate(vdur_list):
5437 vca_scaling_info.append(
5438 {
5439 "osm_vdu_id": vdu["vdu-id-ref"],
5440 "member-vnf-index": member_vnf_index,
5441 "type": "delete",
5442 "vdu_index": count_index,
5443 }
5444 )
5445 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5446 scaling_info["vdu"].append(
5447 {
5448 "name": vdu.get("name") or vdu.get("vdu-name"),
5449 "vdu_id": vdu["vdu-id-ref"],
5450 "interface": [],
5451 }
5452 )
5453 for interface in vdu["interfaces"]:
5454 scaling_info["vdu"][index]["interface"].append(
5455 {
5456 "name": interface["name"],
5457 "ip_address": interface["ip-address"],
5458 "mac_address": interface.get("mac-address"),
5459 }
5460 )
5461 self.logger.info("NS update scaling info{}".format(scaling_info))
5462 stage[2] = "Terminating VDUs"
5463 if scaling_info.get("vdu-delete"):
5464 # scale_process = "RO"
5465 if self.ro_config.ng:
5466 await self._scale_ng_ro(
5467 logging_text,
5468 db_nsr,
5469 update_db_nslcmops,
5470 db_vnfr,
5471 scaling_info,
5472 stage,
5473 )
5474
5475 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5476 """This method is to Remove VNF instances from NS.
5477
5478 Args:
5479 nsr_id: NS instance id
5480 nslcmop_id: nslcmop id of update
5481 vnf_instance_id: id of the VNF instance to be removed
5482
5483 Returns:
5484 result: (str, str) COMPLETED/FAILED, details
5485 """
5486 try:
5487 db_nsr_update = {}
5488 logging_text = "Task ns={} update ".format(nsr_id)
5489 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5490 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5491 if check_vnfr_count > 1:
5492 stage = ["", "", ""]
5493 step = "Getting nslcmop from database"
5494 self.logger.debug(
5495 step + " after having waited for previous tasks to be completed"
5496 )
5497 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5498 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5499 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5500 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5501 """ db_vnfr = self.db.get_one(
5502 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5503
5504 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5505 await self.terminate_vdus(
5506 db_vnfr,
5507 member_vnf_index,
5508 db_nsr,
5509 update_db_nslcmops,
5510 stage,
5511 logging_text,
5512 )
5513
5514 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5515 constituent_vnfr.remove(db_vnfr.get("_id"))
5516 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5517 "constituent-vnfr-ref"
5518 )
5519 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5520 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5521 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5522 return "COMPLETED", "Done"
5523 else:
5524 step = "Terminate VNF Failed with"
5525 raise LcmException(
5526 "{} Cannot terminate the last VNF in this NS.".format(
5527 vnf_instance_id
5528 )
5529 )
5530 except (LcmException, asyncio.CancelledError):
5531 raise
5532 except Exception as e:
5533 self.logger.debug("Error removing VNF {}".format(e))
5534 return "FAILED", "Error removing VNF {}".format(e)
5535
5536 async def _ns_redeploy_vnf(
5537 self,
5538 nsr_id,
5539 nslcmop_id,
5540 db_vnfd,
5541 db_vnfr,
5542 db_nsr,
5543 ):
5544 """This method updates and redeploys VNF instances
5545
5546 Args:
5547 nsr_id: NS instance id
5548 nslcmop_id: nslcmop id
5549 db_vnfd: VNF descriptor
5550 db_vnfr: VNF instance record
5551 db_nsr: NS instance record
5552
5553 Returns:
5554 result: (str, str) COMPLETED/FAILED, details
5555 """
5556 try:
5557 count_index = 0
5558 stage = ["", "", ""]
5559 logging_text = "Task ns={} update ".format(nsr_id)
5560 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5561 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5562
5563 # Terminate old VNF resources
5564 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5565 await self.terminate_vdus(
5566 db_vnfr,
5567 member_vnf_index,
5568 db_nsr,
5569 update_db_nslcmops,
5570 stage,
5571 logging_text,
5572 )
5573
5574 # old_vnfd_id = db_vnfr["vnfd-id"]
5575 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5576 new_db_vnfd = db_vnfd
5577 # new_vnfd_ref = new_db_vnfd["id"]
5578 # new_vnfd_id = vnfd_id
5579
5580 # Create VDUR
5581 new_vnfr_cp = []
5582 for cp in new_db_vnfd.get("ext-cpd", ()):
5583 vnf_cp = {
5584 "name": cp.get("id"),
5585 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5586 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5587 "id": cp.get("id"),
5588 }
5589 new_vnfr_cp.append(vnf_cp)
5590 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5591 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5592 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5593 new_vnfr_update = {
5594 "revision": latest_vnfd_revision,
5595 "connection-point": new_vnfr_cp,
5596 "vdur": new_vdur,
5597 "ip-address": "",
5598 }
5599 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5600 updated_db_vnfr = self.db.get_one(
5601 "vnfrs",
5602 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5603 )
5604
5605 # Instantiate new VNF resources
5606 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5607 vca_scaling_info = []
5608 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5609 scaling_info["scaling_direction"] = "OUT"
5610 scaling_info["vdu-create"] = {}
5611 scaling_info["kdu-create"] = {}
5612 vdud_instantiate_list = db_vnfd["vdu"]
5613 for index, vdud in enumerate(vdud_instantiate_list):
5614 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5615 if cloud_init_text:
5616 additional_params = (
5617 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5618 or {}
5619 )
5620 cloud_init_list = []
5621 if cloud_init_text:
5622 # TODO Information of its own ip is not available because db_vnfr is not updated.
5623 additional_params["OSM"] = get_osm_params(
5624 updated_db_vnfr, vdud["id"], 1
5625 )
5626 cloud_init_list.append(
5627 self._parse_cloud_init(
5628 cloud_init_text,
5629 additional_params,
5630 db_vnfd["id"],
5631 vdud["id"],
5632 )
5633 )
5634 vca_scaling_info.append(
5635 {
5636 "osm_vdu_id": vdud["id"],
5637 "member-vnf-index": member_vnf_index,
5638 "type": "create",
5639 "vdu_index": count_index,
5640 }
5641 )
5642 scaling_info["vdu-create"][vdud["id"]] = count_index
5643 if self.ro_config.ng:
5644 self.logger.debug(
5645 "New Resources to be deployed: {}".format(scaling_info)
5646 )
5647 await self._scale_ng_ro(
5648 logging_text,
5649 db_nsr,
5650 update_db_nslcmops,
5651 updated_db_vnfr,
5652 scaling_info,
5653 stage,
5654 )
5655 return "COMPLETED", "Done"
5656 except (LcmException, asyncio.CancelledError):
5657 raise
5658 except Exception as e:
5659 self.logger.debug("Error updating VNF {}".format(e))
5660 return "FAILED", "Error updating VNF {}".format(e)
5661
5662 async def _ns_charm_upgrade(
5663 self,
5664 ee_id,
5665 charm_id,
5666 charm_type,
5667 path,
5668 timeout: float = None,
5669 ) -> (str, str):
5670 """This method upgrade charms in VNF instances
5671
5672 Args:
5673 ee_id: Execution environment id
5674 path: Local path to the charm
5675 charm_id: charm-id
5676 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5677 timeout: (Float) Timeout for the ns update operation
5678
5679 Returns:
5680 result: (str, str) COMPLETED/FAILED, details
5681 """
5682 try:
5683 charm_type = charm_type or "lxc_proxy_charm"
5684 output = await self.vca_map[charm_type].upgrade_charm(
5685 ee_id=ee_id,
5686 path=path,
5687 charm_id=charm_id,
5688 charm_type=charm_type,
5689 timeout=timeout or self.timeout.ns_update,
5690 )
5691
5692 if output:
5693 return "COMPLETED", output
5694
5695 except (LcmException, asyncio.CancelledError):
5696 raise
5697
5698 except Exception as e:
5699 self.logger.debug("Error upgrading charm {}".format(path))
5700
5701 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5702
5703 async def update(self, nsr_id, nslcmop_id):
5704 """Update NS according to different update types
5705
5706 This method performs upgrade of VNF instances then updates the revision
5707 number in VNF record
5708
5709 Args:
5710 nsr_id: Network service will be updated
5711 nslcmop_id: ns lcm operation id
5712
5713 Returns:
5714 It may raise DbException, LcmException, N2VCException, K8sException
5715
5716 """
5717 # Try to lock HA task here
5718 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5719 if not task_is_locked_by_me:
5720 return
5721
5722 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5723 self.logger.debug(logging_text + "Enter")
5724
5725 # Set the required variables to be filled up later
5726 db_nsr = None
5727 db_nslcmop_update = {}
5728 vnfr_update = {}
5729 nslcmop_operation_state = None
5730 db_nsr_update = {}
5731 error_description_nslcmop = ""
5732 exc = None
5733 change_type = "updated"
5734 detailed_status = ""
5735 member_vnf_index = None
5736
5737 try:
5738 # wait for any previous tasks in process
5739 step = "Waiting for previous operations to terminate"
5740 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5741 self._write_ns_status(
5742 nsr_id=nsr_id,
5743 ns_state=None,
5744 current_operation="UPDATING",
5745 current_operation_id=nslcmop_id,
5746 )
5747
5748 step = "Getting nslcmop from database"
5749 db_nslcmop = self.db.get_one(
5750 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5751 )
5752 update_type = db_nslcmop["operationParams"]["updateType"]
5753
5754 step = "Getting nsr from database"
5755 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5756 old_operational_status = db_nsr["operational-status"]
5757 db_nsr_update["operational-status"] = "updating"
5758 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5759 nsr_deployed = db_nsr["_admin"].get("deployed")
5760
5761 if update_type == "CHANGE_VNFPKG":
5762 # Get the input parameters given through update request
5763 vnf_instance_id = db_nslcmop["operationParams"][
5764 "changeVnfPackageData"
5765 ].get("vnfInstanceId")
5766
5767 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5768 "vnfdId"
5769 )
5770 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5771
5772 step = "Getting vnfr from database"
5773 db_vnfr = self.db.get_one(
5774 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5775 )
5776
5777 step = "Getting vnfds from database"
5778 # Latest VNFD
5779 latest_vnfd = self.db.get_one(
5780 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5781 )
5782 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5783
5784 # Current VNFD
5785 current_vnf_revision = db_vnfr.get("revision", 1)
5786 current_vnfd = self.db.get_one(
5787 "vnfds_revisions",
5788 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5789 fail_on_empty=False,
5790 )
5791 # Charm artifact paths will be filled up later
5792 (
5793 current_charm_artifact_path,
5794 target_charm_artifact_path,
5795 charm_artifact_paths,
5796 helm_artifacts,
5797 ) = ([], [], [], [])
5798
5799 step = "Checking if revision has changed in VNFD"
5800 if current_vnf_revision != latest_vnfd_revision:
5801 change_type = "policy_updated"
5802
5803 # There is new revision of VNFD, update operation is required
5804 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5805 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5806
5807 step = "Removing the VNFD packages if they exist in the local path"
5808 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5809 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5810
5811 step = "Get the VNFD packages from FSMongo"
5812 self.fs.sync(from_path=latest_vnfd_path)
5813 self.fs.sync(from_path=current_vnfd_path)
5814
5815 step = (
5816 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5817 )
5818 current_base_folder = current_vnfd["_admin"]["storage"]
5819 latest_base_folder = latest_vnfd["_admin"]["storage"]
5820
5821 for vca_index, vca_deployed in enumerate(
5822 get_iterable(nsr_deployed, "VCA")
5823 ):
5824 vnf_index = db_vnfr.get("member-vnf-index-ref")
5825
5826 # Getting charm-id and charm-type
5827 if vca_deployed.get("member-vnf-index") == vnf_index:
5828 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5829 vca_type = vca_deployed.get("type")
5830 vdu_count_index = vca_deployed.get("vdu_count_index")
5831
5832 # Getting ee-id
5833 ee_id = vca_deployed.get("ee_id")
5834
5835 step = "Getting descriptor config"
5836 if current_vnfd.get("kdu"):
5837 search_key = "kdu_name"
5838 else:
5839 search_key = "vnfd_id"
5840
5841 entity_id = vca_deployed.get(search_key)
5842
5843 descriptor_config = get_configuration(
5844 current_vnfd, entity_id
5845 )
5846
5847 if "execution-environment-list" in descriptor_config:
5848 ee_list = descriptor_config.get(
5849 "execution-environment-list", []
5850 )
5851 else:
5852 ee_list = []
5853
5854 # There could be several charm used in the same VNF
5855 for ee_item in ee_list:
5856 if ee_item.get("juju"):
5857 step = "Getting charm name"
5858 charm_name = ee_item["juju"].get("charm")
5859
5860 step = "Setting Charm artifact paths"
5861 current_charm_artifact_path.append(
5862 get_charm_artifact_path(
5863 current_base_folder,
5864 charm_name,
5865 vca_type,
5866 current_vnf_revision,
5867 )
5868 )
5869 target_charm_artifact_path.append(
5870 get_charm_artifact_path(
5871 latest_base_folder,
5872 charm_name,
5873 vca_type,
5874 latest_vnfd_revision,
5875 )
5876 )
5877 elif ee_item.get("helm-chart"):
5878 # add chart to list and all parameters
5879 step = "Getting helm chart name"
5880 chart_name = ee_item.get("helm-chart")
5881 vca_type = "helm-v3"
5882 step = "Setting Helm chart artifact paths"
5883
5884 helm_artifacts.append(
5885 {
5886 "current_artifact_path": get_charm_artifact_path(
5887 current_base_folder,
5888 chart_name,
5889 vca_type,
5890 current_vnf_revision,
5891 ),
5892 "target_artifact_path": get_charm_artifact_path(
5893 latest_base_folder,
5894 chart_name,
5895 vca_type,
5896 latest_vnfd_revision,
5897 ),
5898 "ee_id": ee_id,
5899 "vca_index": vca_index,
5900 "vdu_index": vdu_count_index,
5901 }
5902 )
5903
5904 charm_artifact_paths = zip(
5905 current_charm_artifact_path, target_charm_artifact_path
5906 )
5907
5908 step = "Checking if software version has changed in VNFD"
5909 if find_software_version(current_vnfd) != find_software_version(
5910 latest_vnfd
5911 ):
5912 step = "Checking if existing VNF has charm"
5913 for current_charm_path, target_charm_path in list(
5914 charm_artifact_paths
5915 ):
5916 if current_charm_path:
5917 raise LcmException(
5918 "Software version change is not supported as VNF instance {} has charm.".format(
5919 vnf_instance_id
5920 )
5921 )
5922
5923 step = "Checking whether the descriptor has SFC"
5924 if db_nsr.get("nsd", {}).get("vnffgd"):
5925 raise LcmException(
5926 "Ns update is not allowed for NS with SFC"
5927 )
5928
5929 # There is no change in the charm package, then redeploy the VNF
5930 # based on new descriptor
5931 step = "Redeploying VNF"
5932 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5933 (result, detailed_status) = await self._ns_redeploy_vnf(
5934 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5935 )
5936 if result == "FAILED":
5937 nslcmop_operation_state = result
5938 error_description_nslcmop = detailed_status
5939 old_operational_status = "failed"
5940 db_nslcmop_update["detailed-status"] = detailed_status
5941 db_nsr_update["detailed-status"] = detailed_status
5942 scaling_aspect = get_scaling_aspect(latest_vnfd)
5943 scaling_group_desc = db_nsr.get("_admin").get(
5944 "scaling-group", None
5945 )
5946 if scaling_group_desc:
5947 for aspect in scaling_aspect:
5948 scaling_group_id = aspect.get("id")
5949 for scale_index, scaling_group in enumerate(
5950 scaling_group_desc
5951 ):
5952 if scaling_group.get("name") == scaling_group_id:
5953 db_nsr_update[
5954 "_admin.scaling-group.{}.nb-scale-op".format(
5955 scale_index
5956 )
5957 ] = 0
5958 self.logger.debug(
5959 logging_text
5960 + " step {} Done with result {} {}".format(
5961 step, nslcmop_operation_state, detailed_status
5962 )
5963 )
5964
5965 else:
5966 step = "Checking if any charm package has changed or not"
5967 for current_charm_path, target_charm_path in list(
5968 charm_artifact_paths
5969 ):
5970 if (
5971 current_charm_path
5972 and target_charm_path
5973 and self.check_charm_hash_changed(
5974 current_charm_path, target_charm_path
5975 )
5976 ):
5977 step = "Checking whether VNF uses juju bundle"
5978 if check_juju_bundle_existence(current_vnfd):
5979 raise LcmException(
5980 "Charm upgrade is not supported for the instance which"
5981 " uses juju-bundle: {}".format(
5982 check_juju_bundle_existence(current_vnfd)
5983 )
5984 )
5985
5986 step = "Upgrading Charm"
5987 (
5988 result,
5989 detailed_status,
5990 ) = await self._ns_charm_upgrade(
5991 ee_id=ee_id,
5992 charm_id=vca_id,
5993 charm_type=vca_type,
5994 path=self.fs.path + target_charm_path,
5995 timeout=timeout_seconds,
5996 )
5997
5998 if result == "FAILED":
5999 nslcmop_operation_state = result
6000 error_description_nslcmop = detailed_status
6001
6002 db_nslcmop_update["detailed-status"] = detailed_status
6003 self.logger.debug(
6004 logging_text
6005 + " step {} Done with result {} {}".format(
6006 step, nslcmop_operation_state, detailed_status
6007 )
6008 )
6009
6010 step = "Updating policies"
6011 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6012 result = "COMPLETED"
6013 detailed_status = "Done"
6014 db_nslcmop_update["detailed-status"] = "Done"
6015
6016 # helm base EE
6017 for item in helm_artifacts:
6018 if not (
6019 item["current_artifact_path"]
6020 and item["target_artifact_path"]
6021 and self.check_charm_hash_changed(
6022 item["current_artifact_path"],
6023 item["target_artifact_path"],
6024 )
6025 ):
6026 continue
6027 db_update_entry = "_admin.deployed.VCA.{}.".format(
6028 item["vca_index"]
6029 )
6030 vnfr_id = db_vnfr["_id"]
6031 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6032 db_dict = {
6033 "collection": "nsrs",
6034 "filter": {"_id": nsr_id},
6035 "path": db_update_entry,
6036 }
6037 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6038 await self.vca_map[vca_type].upgrade_execution_environment(
6039 namespace=namespace,
6040 helm_id=helm_id,
6041 db_dict=db_dict,
6042 config=osm_config,
6043 artifact_path=item["target_artifact_path"],
6044 vca_type=vca_type,
6045 )
6046 vnf_id = db_vnfr.get("vnfd-ref")
6047 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6048 self.logger.debug("get ssh key block")
6049 rw_mgmt_ip = None
6050 if deep_get(
6051 config_descriptor,
6052 ("config-access", "ssh-access", "required"),
6053 ):
6054 # Needed to inject a ssh key
6055 user = deep_get(
6056 config_descriptor,
6057 ("config-access", "ssh-access", "default-user"),
6058 )
6059 step = (
6060 "Install configuration Software, getting public ssh key"
6061 )
6062 pub_key = await self.vca_map[
6063 vca_type
6064 ].get_ee_ssh_public__key(
6065 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6066 )
6067
6068 step = (
6069 "Insert public key into VM user={} ssh_key={}".format(
6070 user, pub_key
6071 )
6072 )
6073 self.logger.debug(logging_text + step)
6074
6075 # wait for RO (ip-address) Insert pub_key into VM
6076 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6077 logging_text,
6078 nsr_id,
6079 vnfr_id,
6080 None,
6081 item["vdu_index"],
6082 user=user,
6083 pub_key=pub_key,
6084 )
6085
6086 initial_config_primitive_list = config_descriptor.get(
6087 "initial-config-primitive"
6088 )
6089 config_primitive = next(
6090 (
6091 p
6092 for p in initial_config_primitive_list
6093 if p["name"] == "config"
6094 ),
6095 None,
6096 )
6097 if not config_primitive:
6098 continue
6099
6100 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6101 if rw_mgmt_ip:
6102 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6103 if db_vnfr.get("additionalParamsForVnf"):
6104 deploy_params.update(
6105 parse_yaml_strings(
6106 db_vnfr["additionalParamsForVnf"].copy()
6107 )
6108 )
6109 primitive_params_ = self._map_primitive_params(
6110 config_primitive, {}, deploy_params
6111 )
6112
6113 step = "execute primitive '{}' params '{}'".format(
6114 config_primitive["name"], primitive_params_
6115 )
6116 self.logger.debug(logging_text + step)
6117 await self.vca_map[vca_type].exec_primitive(
6118 ee_id=ee_id,
6119 primitive_name=config_primitive["name"],
6120 params_dict=primitive_params_,
6121 db_dict=db_dict,
6122 vca_id=vca_id,
6123 vca_type=vca_type,
6124 )
6125
6126 step = "Updating policies"
6127 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6128 detailed_status = "Done"
6129 db_nslcmop_update["detailed-status"] = "Done"
6130
6131 # If nslcmop_operation_state is None, so any operation is not failed.
6132 if not nslcmop_operation_state:
6133 nslcmop_operation_state = "COMPLETED"
6134
6135 # If update CHANGE_VNFPKG nslcmop_operation is successful
6136 # vnf revision need to be updated
6137 vnfr_update["revision"] = latest_vnfd_revision
6138 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6139
6140 self.logger.debug(
6141 logging_text
6142 + " task Done with result {} {}".format(
6143 nslcmop_operation_state, detailed_status
6144 )
6145 )
6146 elif update_type == "REMOVE_VNF":
6147 # This part is included in https://osm.etsi.org/gerrit/11876
6148 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6149 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6150 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6151 step = "Removing VNF"
6152 (result, detailed_status) = await self.remove_vnf(
6153 nsr_id, nslcmop_id, vnf_instance_id
6154 )
6155 if result == "FAILED":
6156 nslcmop_operation_state = result
6157 error_description_nslcmop = detailed_status
6158 db_nslcmop_update["detailed-status"] = detailed_status
6159 change_type = "vnf_terminated"
6160 if not nslcmop_operation_state:
6161 nslcmop_operation_state = "COMPLETED"
6162 self.logger.debug(
6163 logging_text
6164 + " task Done with result {} {}".format(
6165 nslcmop_operation_state, detailed_status
6166 )
6167 )
6168
6169 elif update_type == "OPERATE_VNF":
6170 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6171 "vnfInstanceId"
6172 ]
6173 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6174 "changeStateTo"
6175 ]
6176 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6177 "additionalParam"
6178 ]
6179 (result, detailed_status) = await self.rebuild_start_stop(
6180 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6181 )
6182 if result == "FAILED":
6183 nslcmop_operation_state = result
6184 error_description_nslcmop = detailed_status
6185 db_nslcmop_update["detailed-status"] = detailed_status
6186 if not nslcmop_operation_state:
6187 nslcmop_operation_state = "COMPLETED"
6188 self.logger.debug(
6189 logging_text
6190 + " task Done with result {} {}".format(
6191 nslcmop_operation_state, detailed_status
6192 )
6193 )
6194
6195 # If nslcmop_operation_state is None, so any operation is not failed.
6196 # All operations are executed in overall.
6197 if not nslcmop_operation_state:
6198 nslcmop_operation_state = "COMPLETED"
6199 db_nsr_update["operational-status"] = old_operational_status
6200
6201 except (DbException, LcmException, N2VCException, K8sException) as e:
6202 self.logger.error(logging_text + "Exit Exception {}".format(e))
6203 exc = e
6204 except asyncio.CancelledError:
6205 self.logger.error(
6206 logging_text + "Cancelled Exception while '{}'".format(step)
6207 )
6208 exc = "Operation was cancelled"
6209 except asyncio.TimeoutError:
6210 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6211 exc = "Timeout"
6212 except Exception as e:
6213 exc = traceback.format_exc()
6214 self.logger.critical(
6215 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6216 exc_info=True,
6217 )
6218 finally:
6219 if exc:
6220 db_nslcmop_update[
6221 "detailed-status"
6222 ] = (
6223 detailed_status
6224 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6225 nslcmop_operation_state = "FAILED"
6226 db_nsr_update["operational-status"] = old_operational_status
6227 if db_nsr:
6228 self._write_ns_status(
6229 nsr_id=nsr_id,
6230 ns_state=db_nsr["nsState"],
6231 current_operation="IDLE",
6232 current_operation_id=None,
6233 other_update=db_nsr_update,
6234 )
6235
6236 self._write_op_status(
6237 op_id=nslcmop_id,
6238 stage="",
6239 error_message=error_description_nslcmop,
6240 operation_state=nslcmop_operation_state,
6241 other_update=db_nslcmop_update,
6242 )
6243
6244 if nslcmop_operation_state:
6245 try:
6246 msg = {
6247 "nsr_id": nsr_id,
6248 "nslcmop_id": nslcmop_id,
6249 "operationState": nslcmop_operation_state,
6250 }
6251 if (
6252 change_type in ("vnf_terminated", "policy_updated")
6253 and member_vnf_index
6254 ):
6255 msg.update({"vnf_member_index": member_vnf_index})
6256 await self.msg.aiowrite("ns", change_type, msg)
6257 except Exception as e:
6258 self.logger.error(
6259 logging_text + "kafka_write notification Exception {}".format(e)
6260 )
6261 self.logger.debug(logging_text + "Exit")
6262 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6263 return nslcmop_operation_state, detailed_status
6264
6265 async def scale(self, nsr_id, nslcmop_id):
6266 # Try to lock HA task here
6267 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6268 if not task_is_locked_by_me:
6269 return
6270
6271 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6272 stage = ["", "", ""]
6273 tasks_dict_info = {}
6274 # ^ stage, step, VIM progress
6275 self.logger.debug(logging_text + "Enter")
6276 # get all needed from database
6277 db_nsr = None
6278 db_nslcmop_update = {}
6279 db_nsr_update = {}
6280 exc = None
6281 # in case of error, indicates what part of scale was failed to put nsr at error status
6282 scale_process = None
6283 old_operational_status = ""
6284 old_config_status = ""
6285 nsi_id = None
6286 prom_job_name = ""
6287 try:
6288 # wait for any previous tasks in process
6289 step = "Waiting for previous operations to terminate"
6290 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6291 self._write_ns_status(
6292 nsr_id=nsr_id,
6293 ns_state=None,
6294 current_operation="SCALING",
6295 current_operation_id=nslcmop_id,
6296 )
6297
6298 step = "Getting nslcmop from database"
6299 self.logger.debug(
6300 step + " after having waited for previous tasks to be completed"
6301 )
6302 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6303
6304 step = "Getting nsr from database"
6305 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6306 old_operational_status = db_nsr["operational-status"]
6307 old_config_status = db_nsr["config-status"]
6308
6309 step = "Checking whether the descriptor has SFC"
6310 if db_nsr.get("nsd", {}).get("vnffgd"):
6311 raise LcmException("Scaling is not allowed for NS with SFC")
6312
6313 step = "Parsing scaling parameters"
6314 db_nsr_update["operational-status"] = "scaling"
6315 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6316 nsr_deployed = db_nsr["_admin"].get("deployed")
6317
6318 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6319 "scaleByStepData"
6320 ]["member-vnf-index"]
6321 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6322 "scaleByStepData"
6323 ]["scaling-group-descriptor"]
6324 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6325 # for backward compatibility
6326 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6327 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6328 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6329 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6330
6331 step = "Getting vnfr from database"
6332 db_vnfr = self.db.get_one(
6333 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6334 )
6335
6336 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6337
6338 step = "Getting vnfd from database"
6339 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6340
6341 base_folder = db_vnfd["_admin"]["storage"]
6342
6343 step = "Getting scaling-group-descriptor"
6344 scaling_descriptor = find_in_list(
6345 get_scaling_aspect(db_vnfd),
6346 lambda scale_desc: scale_desc["name"] == scaling_group,
6347 )
6348 if not scaling_descriptor:
6349 raise LcmException(
6350 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6351 "at vnfd:scaling-group-descriptor".format(scaling_group)
6352 )
6353
6354 step = "Sending scale order to VIM"
6355 # TODO check if ns is in a proper status
6356 nb_scale_op = 0
6357 if not db_nsr["_admin"].get("scaling-group"):
6358 self.update_db_2(
6359 "nsrs",
6360 nsr_id,
6361 {
6362 "_admin.scaling-group": [
6363 {
6364 "name": scaling_group,
6365 "vnf_index": vnf_index,
6366 "nb-scale-op": 0,
6367 }
6368 ]
6369 },
6370 )
6371 admin_scale_index = 0
6372 else:
6373 for admin_scale_index, admin_scale_info in enumerate(
6374 db_nsr["_admin"]["scaling-group"]
6375 ):
6376 if (
6377 admin_scale_info["name"] == scaling_group
6378 and admin_scale_info["vnf_index"] == vnf_index
6379 ):
6380 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6381 break
6382 else: # not found, set index one plus last element and add new entry with the name
6383 admin_scale_index += 1
6384 db_nsr_update[
6385 "_admin.scaling-group.{}.name".format(admin_scale_index)
6386 ] = scaling_group
6387 db_nsr_update[
6388 "_admin.scaling-group.{}.vnf_index".format(admin_scale_index)
6389 ] = vnf_index
6390
6391 vca_scaling_info = []
6392 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6393 if scaling_type == "SCALE_OUT":
6394 if "aspect-delta-details" not in scaling_descriptor:
6395 raise LcmException(
6396 "Aspect delta details not fount in scaling descriptor {}".format(
6397 scaling_descriptor["name"]
6398 )
6399 )
6400 # count if max-instance-count is reached
6401 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6402
6403 scaling_info["scaling_direction"] = "OUT"
6404 scaling_info["vdu-create"] = {}
6405 scaling_info["kdu-create"] = {}
6406 for delta in deltas:
6407 for vdu_delta in delta.get("vdu-delta", {}):
6408 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6409 # vdu_index also provides the number of instance of the targeted vdu
6410 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6411 if vdu_index <= len(db_vnfr["vdur"]):
6412 vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
6413 prom_job_name = (
6414 db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
6415 )
6416 prom_job_name = prom_job_name.replace("_", "")
6417 prom_job_name = prom_job_name.replace("-", "")
6418 else:
6419 prom_job_name = None
6420 cloud_init_text = self._get_vdu_cloud_init_content(
6421 vdud, db_vnfd
6422 )
6423 if cloud_init_text:
6424 additional_params = (
6425 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6426 or {}
6427 )
6428 cloud_init_list = []
6429
6430 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6431 max_instance_count = 10
6432 if vdu_profile and "max-number-of-instances" in vdu_profile:
6433 max_instance_count = vdu_profile.get(
6434 "max-number-of-instances", 10
6435 )
6436
6437 default_instance_num = get_number_of_instances(
6438 db_vnfd, vdud["id"]
6439 )
6440 instances_number = vdu_delta.get("number-of-instances", 1)
6441 nb_scale_op += instances_number
6442
6443 new_instance_count = nb_scale_op + default_instance_num
6444 # Control if new count is over max and vdu count is less than max.
6445 # Then assign new instance count
6446 if new_instance_count > max_instance_count > vdu_count:
6447 instances_number = new_instance_count - max_instance_count
6448 else:
6449 instances_number = instances_number
6450
6451 if new_instance_count > max_instance_count:
6452 raise LcmException(
6453 "reached the limit of {} (max-instance-count) "
6454 "scaling-out operations for the "
6455 "scaling-group-descriptor '{}'".format(
6456 nb_scale_op, scaling_group
6457 )
6458 )
6459 for x in range(vdu_delta.get("number-of-instances", 1)):
6460 if cloud_init_text:
6461 # TODO Information of its own ip is not available because db_vnfr is not updated.
6462 additional_params["OSM"] = get_osm_params(
6463 db_vnfr, vdu_delta["id"], vdu_index + x
6464 )
6465 cloud_init_list.append(
6466 self._parse_cloud_init(
6467 cloud_init_text,
6468 additional_params,
6469 db_vnfd["id"],
6470 vdud["id"],
6471 )
6472 )
6473 vca_scaling_info.append(
6474 {
6475 "osm_vdu_id": vdu_delta["id"],
6476 "member-vnf-index": vnf_index,
6477 "type": "create",
6478 "vdu_index": vdu_index + x,
6479 }
6480 )
6481 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6482 for kdu_delta in delta.get("kdu-resource-delta", {}):
6483 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6484 kdu_name = kdu_profile["kdu-name"]
6485 resource_name = kdu_profile.get("resource-name", "")
6486
6487 # Might have different kdus in the same delta
6488 # Should have list for each kdu
6489 if not scaling_info["kdu-create"].get(kdu_name, None):
6490 scaling_info["kdu-create"][kdu_name] = []
6491
6492 kdur = get_kdur(db_vnfr, kdu_name)
6493 if kdur.get("helm-chart"):
6494 k8s_cluster_type = "helm-chart-v3"
6495 self.logger.debug("kdur: {}".format(kdur))
6496 elif kdur.get("juju-bundle"):
6497 k8s_cluster_type = "juju-bundle"
6498 else:
6499 raise LcmException(
6500 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6501 "juju-bundle. Maybe an old NBI version is running".format(
6502 db_vnfr["member-vnf-index-ref"], kdu_name
6503 )
6504 )
6505
6506 max_instance_count = 10
6507 if kdu_profile and "max-number-of-instances" in kdu_profile:
6508 max_instance_count = kdu_profile.get(
6509 "max-number-of-instances", 10
6510 )
6511
6512 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6513 deployed_kdu, _ = get_deployed_kdu(
6514 nsr_deployed, kdu_name, vnf_index
6515 )
6516 if deployed_kdu is None:
6517 raise LcmException(
6518 "KDU '{}' for vnf '{}' not deployed".format(
6519 kdu_name, vnf_index
6520 )
6521 )
6522 kdu_instance = deployed_kdu.get("kdu-instance")
6523 instance_num = await self.k8scluster_map[
6524 k8s_cluster_type
6525 ].get_scale_count(
6526 resource_name,
6527 kdu_instance,
6528 vca_id=vca_id,
6529 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6530 kdu_model=deployed_kdu.get("kdu-model"),
6531 )
6532 kdu_replica_count = instance_num + kdu_delta.get(
6533 "number-of-instances", 1
6534 )
6535
6536 # Control if new count is over max and instance_num is less than max.
6537 # Then assign max instance number to kdu replica count
6538 if kdu_replica_count > max_instance_count > instance_num:
6539 kdu_replica_count = max_instance_count
6540 if kdu_replica_count > max_instance_count:
6541 raise LcmException(
6542 "reached the limit of {} (max-instance-count) "
6543 "scaling-out operations for the "
6544 "scaling-group-descriptor '{}'".format(
6545 instance_num, scaling_group
6546 )
6547 )
6548
6549 for x in range(kdu_delta.get("number-of-instances", 1)):
6550 vca_scaling_info.append(
6551 {
6552 "osm_kdu_id": kdu_name,
6553 "member-vnf-index": vnf_index,
6554 "type": "create",
6555 "kdu_index": instance_num + x - 1,
6556 }
6557 )
6558 scaling_info["kdu-create"][kdu_name].append(
6559 {
6560 "member-vnf-index": vnf_index,
6561 "type": "create",
6562 "k8s-cluster-type": k8s_cluster_type,
6563 "resource-name": resource_name,
6564 "scale": kdu_replica_count,
6565 }
6566 )
6567 elif scaling_type == "SCALE_IN":
6568 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6569
6570 scaling_info["scaling_direction"] = "IN"
6571 scaling_info["vdu-delete"] = {}
6572 scaling_info["kdu-delete"] = {}
6573
6574 for delta in deltas:
6575 for vdu_delta in delta.get("vdu-delta", {}):
6576 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6577 min_instance_count = 0
6578 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6579 if vdu_profile and "min-number-of-instances" in vdu_profile:
6580 min_instance_count = vdu_profile["min-number-of-instances"]
6581
6582 default_instance_num = get_number_of_instances(
6583 db_vnfd, vdu_delta["id"]
6584 )
6585 instance_num = vdu_delta.get("number-of-instances", 1)
6586 nb_scale_op -= instance_num
6587
6588 new_instance_count = nb_scale_op + default_instance_num
6589
6590 if new_instance_count < min_instance_count < vdu_count:
6591 instances_number = min_instance_count - new_instance_count
6592 else:
6593 instances_number = instance_num
6594
6595 if new_instance_count < min_instance_count:
6596 raise LcmException(
6597 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6598 "scaling-group-descriptor '{}'".format(
6599 nb_scale_op, scaling_group
6600 )
6601 )
6602 for x in range(vdu_delta.get("number-of-instances", 1)):
6603 vca_scaling_info.append(
6604 {
6605 "osm_vdu_id": vdu_delta["id"],
6606 "member-vnf-index": vnf_index,
6607 "type": "delete",
6608 "vdu_index": vdu_index - 1 - x,
6609 }
6610 )
6611 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6612 for kdu_delta in delta.get("kdu-resource-delta", {}):
6613 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6614 kdu_name = kdu_profile["kdu-name"]
6615 resource_name = kdu_profile.get("resource-name", "")
6616
6617 if not scaling_info["kdu-delete"].get(kdu_name, None):
6618 scaling_info["kdu-delete"][kdu_name] = []
6619
6620 kdur = get_kdur(db_vnfr, kdu_name)
6621 if kdur.get("helm-chart"):
6622 k8s_cluster_type = "helm-chart-v3"
6623 self.logger.debug("kdur: {}".format(kdur))
6624 elif kdur.get("juju-bundle"):
6625 k8s_cluster_type = "juju-bundle"
6626 else:
6627 raise LcmException(
6628 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6629 "juju-bundle. Maybe an old NBI version is running".format(
6630 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6631 )
6632 )
6633
6634 min_instance_count = 0
6635 if kdu_profile and "min-number-of-instances" in kdu_profile:
6636 min_instance_count = kdu_profile["min-number-of-instances"]
6637
6638 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6639 deployed_kdu, _ = get_deployed_kdu(
6640 nsr_deployed, kdu_name, vnf_index
6641 )
6642 if deployed_kdu is None:
6643 raise LcmException(
6644 "KDU '{}' for vnf '{}' not deployed".format(
6645 kdu_name, vnf_index
6646 )
6647 )
6648 kdu_instance = deployed_kdu.get("kdu-instance")
6649 instance_num = await self.k8scluster_map[
6650 k8s_cluster_type
6651 ].get_scale_count(
6652 resource_name,
6653 kdu_instance,
6654 vca_id=vca_id,
6655 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6656 kdu_model=deployed_kdu.get("kdu-model"),
6657 )
6658 kdu_replica_count = instance_num - kdu_delta.get(
6659 "number-of-instances", 1
6660 )
6661
6662 if kdu_replica_count < min_instance_count < instance_num:
6663 kdu_replica_count = min_instance_count
6664 if kdu_replica_count < min_instance_count:
6665 raise LcmException(
6666 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6667 "scaling-group-descriptor '{}'".format(
6668 instance_num, scaling_group
6669 )
6670 )
6671
6672 for x in range(kdu_delta.get("number-of-instances", 1)):
6673 vca_scaling_info.append(
6674 {
6675 "osm_kdu_id": kdu_name,
6676 "member-vnf-index": vnf_index,
6677 "type": "delete",
6678 "kdu_index": instance_num - x - 1,
6679 }
6680 )
6681 scaling_info["kdu-delete"][kdu_name].append(
6682 {
6683 "member-vnf-index": vnf_index,
6684 "type": "delete",
6685 "k8s-cluster-type": k8s_cluster_type,
6686 "resource-name": resource_name,
6687 "scale": kdu_replica_count,
6688 }
6689 )
6690
6691 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6692 vdu_delete = copy(scaling_info.get("vdu-delete"))
6693 if scaling_info["scaling_direction"] == "IN":
6694 for vdur in reversed(db_vnfr["vdur"]):
6695 if vdu_delete.get(vdur["vdu-id-ref"]):
6696 vdu_delete[vdur["vdu-id-ref"]] -= 1
6697 scaling_info["vdu"].append(
6698 {
6699 "name": vdur.get("name") or vdur.get("vdu-name"),
6700 "vdu_id": vdur["vdu-id-ref"],
6701 "interface": [],
6702 }
6703 )
6704 for interface in vdur["interfaces"]:
6705 scaling_info["vdu"][-1]["interface"].append(
6706 {
6707 "name": interface["name"],
6708 "ip_address": interface["ip-address"],
6709 "mac_address": interface.get("mac-address"),
6710 }
6711 )
6712 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6713
6714 # PRE-SCALE BEGIN
6715 step = "Executing pre-scale vnf-config-primitive"
6716 if scaling_descriptor.get("scaling-config-action"):
6717 for scaling_config_action in scaling_descriptor[
6718 "scaling-config-action"
6719 ]:
6720 if (
6721 scaling_config_action.get("trigger") == "pre-scale-in"
6722 and scaling_type == "SCALE_IN"
6723 ) or (
6724 scaling_config_action.get("trigger") == "pre-scale-out"
6725 and scaling_type == "SCALE_OUT"
6726 ):
6727 vnf_config_primitive = scaling_config_action[
6728 "vnf-config-primitive-name-ref"
6729 ]
6730 step = db_nslcmop_update[
6731 "detailed-status"
6732 ] = "executing pre-scale scaling-config-action '{}'".format(
6733 vnf_config_primitive
6734 )
6735
6736 # look for primitive
6737 for config_primitive in (
6738 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6739 ).get("config-primitive", ()):
6740 if config_primitive["name"] == vnf_config_primitive:
6741 break
6742 else:
6743 raise LcmException(
6744 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6745 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6746 "primitive".format(scaling_group, vnf_config_primitive)
6747 )
6748
6749 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6750 if db_vnfr.get("additionalParamsForVnf"):
6751 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6752
6753 scale_process = "VCA"
6754 db_nsr_update["config-status"] = "configuring pre-scaling"
6755 primitive_params = self._map_primitive_params(
6756 config_primitive, {}, vnfr_params
6757 )
6758
6759 # Pre-scale retry check: Check if this sub-operation has been executed before
6760 op_index = self._check_or_add_scale_suboperation(
6761 db_nslcmop,
6762 vnf_index,
6763 vnf_config_primitive,
6764 primitive_params,
6765 "PRE-SCALE",
6766 )
6767 if op_index == self.SUBOPERATION_STATUS_SKIP:
6768 # Skip sub-operation
6769 result = "COMPLETED"
6770 result_detail = "Done"
6771 self.logger.debug(
6772 logging_text
6773 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6774 vnf_config_primitive, result, result_detail
6775 )
6776 )
6777 else:
6778 if op_index == self.SUBOPERATION_STATUS_NEW:
6779 # New sub-operation: Get index of this sub-operation
6780 op_index = (
6781 len(db_nslcmop.get("_admin", {}).get("operations"))
6782 - 1
6783 )
6784 self.logger.debug(
6785 logging_text
6786 + "vnf_config_primitive={} New sub-operation".format(
6787 vnf_config_primitive
6788 )
6789 )
6790 else:
6791 # retry: Get registered params for this existing sub-operation
6792 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6793 op_index
6794 ]
6795 vnf_index = op.get("member_vnf_index")
6796 vnf_config_primitive = op.get("primitive")
6797 primitive_params = op.get("primitive_params")
6798 self.logger.debug(
6799 logging_text
6800 + "vnf_config_primitive={} Sub-operation retry".format(
6801 vnf_config_primitive
6802 )
6803 )
6804 # Execute the primitive, either with new (first-time) or registered (reintent) args
6805 ee_descriptor_id = config_primitive.get(
6806 "execution-environment-ref"
6807 )
6808 primitive_name = config_primitive.get(
6809 "execution-environment-primitive", vnf_config_primitive
6810 )
6811 ee_id, vca_type = self._look_for_deployed_vca(
6812 nsr_deployed["VCA"],
6813 member_vnf_index=vnf_index,
6814 vdu_id=None,
6815 vdu_count_index=None,
6816 ee_descriptor_id=ee_descriptor_id,
6817 )
6818 result, result_detail = await self._ns_execute_primitive(
6819 ee_id,
6820 primitive_name,
6821 primitive_params,
6822 vca_type=vca_type,
6823 vca_id=vca_id,
6824 )
6825 self.logger.debug(
6826 logging_text
6827 + "vnf_config_primitive={} Done with result {} {}".format(
6828 vnf_config_primitive, result, result_detail
6829 )
6830 )
6831 # Update operationState = COMPLETED | FAILED
6832 self._update_suboperation_status(
6833 db_nslcmop, op_index, result, result_detail
6834 )
6835
6836 if result == "FAILED":
6837 raise LcmException(result_detail)
6838 db_nsr_update["config-status"] = old_config_status
6839 scale_process = None
6840 # PRE-SCALE END
6841
6842 db_nsr_update[
6843 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6844 ] = nb_scale_op
6845 db_nsr_update[
6846 "_admin.scaling-group.{}.time".format(admin_scale_index)
6847 ] = time()
6848
6849 # SCALE-IN VCA - BEGIN
6850 if vca_scaling_info:
6851 step = db_nslcmop_update[
6852 "detailed-status"
6853 ] = "Deleting the execution environments"
6854 scale_process = "VCA"
6855 for vca_info in vca_scaling_info:
6856 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6857 member_vnf_index = str(vca_info["member-vnf-index"])
6858 self.logger.debug(
6859 logging_text + "vdu info: {}".format(vca_info)
6860 )
6861 if vca_info.get("osm_vdu_id"):
6862 vdu_id = vca_info["osm_vdu_id"]
6863 vdu_index = int(vca_info["vdu_index"])
6864 stage[
6865 1
6866 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6867 member_vnf_index, vdu_id, vdu_index
6868 )
6869 stage[2] = step = "Scaling in VCA"
6870 self._write_op_status(op_id=nslcmop_id, stage=stage)
6871 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6872 config_update = db_nsr["configurationStatus"]
6873 for vca_index, vca in enumerate(vca_update):
6874 if (
6875 (vca or vca.get("ee_id"))
6876 and vca["member-vnf-index"] == member_vnf_index
6877 and vca["vdu_count_index"] == vdu_index
6878 ):
6879 if vca.get("vdu_id"):
6880 config_descriptor = get_configuration(
6881 db_vnfd, vca.get("vdu_id")
6882 )
6883 elif vca.get("kdu_name"):
6884 config_descriptor = get_configuration(
6885 db_vnfd, vca.get("kdu_name")
6886 )
6887 else:
6888 config_descriptor = get_configuration(
6889 db_vnfd, db_vnfd["id"]
6890 )
6891 operation_params = (
6892 db_nslcmop.get("operationParams") or {}
6893 )
6894 exec_terminate_primitives = not operation_params.get(
6895 "skip_terminate_primitives"
6896 ) and vca.get("needed_terminate")
6897 task = asyncio.ensure_future(
6898 asyncio.wait_for(
6899 self.destroy_N2VC(
6900 logging_text,
6901 db_nslcmop,
6902 vca,
6903 config_descriptor,
6904 vca_index,
6905 destroy_ee=True,
6906 exec_primitives=exec_terminate_primitives,
6907 scaling_in=True,
6908 vca_id=vca_id,
6909 ),
6910 timeout=self.timeout.charm_delete,
6911 )
6912 )
6913 tasks_dict_info[task] = "Terminating VCA {}".format(
6914 vca.get("ee_id")
6915 )
6916 del vca_update[vca_index]
6917 del config_update[vca_index]
6918 # wait for pending tasks of terminate primitives
6919 if tasks_dict_info:
6920 self.logger.debug(
6921 logging_text
6922 + "Waiting for tasks {}".format(
6923 list(tasks_dict_info.keys())
6924 )
6925 )
6926 error_list = await self._wait_for_tasks(
6927 logging_text,
6928 tasks_dict_info,
6929 min(
6930 self.timeout.charm_delete, self.timeout.ns_terminate
6931 ),
6932 stage,
6933 nslcmop_id,
6934 )
6935 tasks_dict_info.clear()
6936 if error_list:
6937 raise LcmException("; ".join(error_list))
6938
6939 db_vca_and_config_update = {
6940 "_admin.deployed.VCA": vca_update,
6941 "configurationStatus": config_update,
6942 }
6943 self.update_db_2(
6944 "nsrs", db_nsr["_id"], db_vca_and_config_update
6945 )
6946 scale_process = None
6947 # SCALE-IN VCA - END
6948
6949 # SCALE RO - BEGIN
6950 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6951 scale_process = "RO"
6952 if self.ro_config.ng:
6953 await self._scale_ng_ro(
6954 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6955 )
6956 scaling_info.pop("vdu-create", None)
6957 scaling_info.pop("vdu-delete", None)
6958
6959 scale_process = None
6960 # SCALE RO - END
6961
6962 # SCALE KDU - BEGIN
6963 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6964 scale_process = "KDU"
6965 await self._scale_kdu(
6966 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6967 )
6968 scaling_info.pop("kdu-create", None)
6969 scaling_info.pop("kdu-delete", None)
6970
6971 scale_process = None
6972 # SCALE KDU - END
6973
6974 if db_nsr_update:
6975 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6976
6977 # SCALE-UP VCA - BEGIN
6978 if vca_scaling_info:
6979 step = db_nslcmop_update[
6980 "detailed-status"
6981 ] = "Creating new execution environments"
6982 scale_process = "VCA"
6983 for vca_info in vca_scaling_info:
6984 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6985 member_vnf_index = str(vca_info["member-vnf-index"])
6986 self.logger.debug(
6987 logging_text + "vdu info: {}".format(vca_info)
6988 )
6989 vnfd_id = db_vnfr["vnfd-ref"]
6990 if vca_info.get("osm_vdu_id"):
6991 vdu_index = int(vca_info["vdu_index"])
6992 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6993 if db_vnfr.get("additionalParamsForVnf"):
6994 deploy_params.update(
6995 parse_yaml_strings(
6996 db_vnfr["additionalParamsForVnf"].copy()
6997 )
6998 )
6999 descriptor_config = get_configuration(
7000 db_vnfd, db_vnfd["id"]
7001 )
7002 if descriptor_config:
7003 vdu_id = None
7004 vdu_name = None
7005 kdu_name = None
7006 kdu_index = None
7007 self._deploy_n2vc(
7008 logging_text=logging_text
7009 + "member_vnf_index={} ".format(member_vnf_index),
7010 db_nsr=db_nsr,
7011 db_vnfr=db_vnfr,
7012 nslcmop_id=nslcmop_id,
7013 nsr_id=nsr_id,
7014 nsi_id=nsi_id,
7015 vnfd_id=vnfd_id,
7016 vdu_id=vdu_id,
7017 kdu_name=kdu_name,
7018 kdu_index=kdu_index,
7019 member_vnf_index=member_vnf_index,
7020 vdu_index=vdu_index,
7021 vdu_name=vdu_name,
7022 deploy_params=deploy_params,
7023 descriptor_config=descriptor_config,
7024 base_folder=base_folder,
7025 task_instantiation_info=tasks_dict_info,
7026 stage=stage,
7027 )
7028 vdu_id = vca_info["osm_vdu_id"]
7029 vdur = find_in_list(
7030 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7031 )
7032 descriptor_config = get_configuration(db_vnfd, vdu_id)
7033 if vdur.get("additionalParams"):
7034 deploy_params_vdu = parse_yaml_strings(
7035 vdur["additionalParams"]
7036 )
7037 else:
7038 deploy_params_vdu = deploy_params
7039 deploy_params_vdu["OSM"] = get_osm_params(
7040 db_vnfr, vdu_id, vdu_count_index=vdu_index
7041 )
7042 if descriptor_config:
7043 vdu_name = None
7044 kdu_name = None
7045 kdu_index = None
7046 stage[
7047 1
7048 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7049 member_vnf_index, vdu_id, vdu_index
7050 )
7051 stage[2] = step = "Scaling out VCA"
7052 self._write_op_status(op_id=nslcmop_id, stage=stage)
7053 self._deploy_n2vc(
7054 logging_text=logging_text
7055 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7056 member_vnf_index, vdu_id, vdu_index
7057 ),
7058 db_nsr=db_nsr,
7059 db_vnfr=db_vnfr,
7060 nslcmop_id=nslcmop_id,
7061 nsr_id=nsr_id,
7062 nsi_id=nsi_id,
7063 vnfd_id=vnfd_id,
7064 vdu_id=vdu_id,
7065 kdu_name=kdu_name,
7066 member_vnf_index=member_vnf_index,
7067 vdu_index=vdu_index,
7068 kdu_index=kdu_index,
7069 vdu_name=vdu_name,
7070 deploy_params=deploy_params_vdu,
7071 descriptor_config=descriptor_config,
7072 base_folder=base_folder,
7073 task_instantiation_info=tasks_dict_info,
7074 stage=stage,
7075 )
7076 # SCALE-UP VCA - END
7077 scale_process = None
7078
7079 # POST-SCALE BEGIN
7080 # execute primitive service POST-SCALING
7081 step = "Executing post-scale vnf-config-primitive"
7082 if scaling_descriptor.get("scaling-config-action"):
7083 for scaling_config_action in scaling_descriptor[
7084 "scaling-config-action"
7085 ]:
7086 if (
7087 scaling_config_action.get("trigger") == "post-scale-in"
7088 and scaling_type == "SCALE_IN"
7089 ) or (
7090 scaling_config_action.get("trigger") == "post-scale-out"
7091 and scaling_type == "SCALE_OUT"
7092 ):
7093 vnf_config_primitive = scaling_config_action[
7094 "vnf-config-primitive-name-ref"
7095 ]
7096 step = db_nslcmop_update[
7097 "detailed-status"
7098 ] = "executing post-scale scaling-config-action '{}'".format(
7099 vnf_config_primitive
7100 )
7101
7102 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7103 if db_vnfr.get("additionalParamsForVnf"):
7104 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7105
7106 # look for primitive
7107 for config_primitive in (
7108 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7109 ).get("config-primitive", ()):
7110 if config_primitive["name"] == vnf_config_primitive:
7111 break
7112 else:
7113 raise LcmException(
7114 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7115 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7116 "config-primitive".format(
7117 scaling_group, vnf_config_primitive
7118 )
7119 )
7120 scale_process = "VCA"
7121 db_nsr_update["config-status"] = "configuring post-scaling"
7122 primitive_params = self._map_primitive_params(
7123 config_primitive, {}, vnfr_params
7124 )
7125
7126 # Post-scale retry check: Check if this sub-operation has been executed before
7127 op_index = self._check_or_add_scale_suboperation(
7128 db_nslcmop,
7129 vnf_index,
7130 vnf_config_primitive,
7131 primitive_params,
7132 "POST-SCALE",
7133 )
7134 if op_index == self.SUBOPERATION_STATUS_SKIP:
7135 # Skip sub-operation
7136 result = "COMPLETED"
7137 result_detail = "Done"
7138 self.logger.debug(
7139 logging_text
7140 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7141 vnf_config_primitive, result, result_detail
7142 )
7143 )
7144 else:
7145 if op_index == self.SUBOPERATION_STATUS_NEW:
7146 # New sub-operation: Get index of this sub-operation
7147 op_index = (
7148 len(db_nslcmop.get("_admin", {}).get("operations"))
7149 - 1
7150 )
7151 self.logger.debug(
7152 logging_text
7153 + "vnf_config_primitive={} New sub-operation".format(
7154 vnf_config_primitive
7155 )
7156 )
7157 else:
7158 # retry: Get registered params for this existing sub-operation
7159 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7160 op_index
7161 ]
7162 vnf_index = op.get("member_vnf_index")
7163 vnf_config_primitive = op.get("primitive")
7164 primitive_params = op.get("primitive_params")
7165 self.logger.debug(
7166 logging_text
7167 + "vnf_config_primitive={} Sub-operation retry".format(
7168 vnf_config_primitive
7169 )
7170 )
7171 # Execute the primitive, either with new (first-time) or registered (reintent) args
7172 ee_descriptor_id = config_primitive.get(
7173 "execution-environment-ref"
7174 )
7175 primitive_name = config_primitive.get(
7176 "execution-environment-primitive", vnf_config_primitive
7177 )
7178 ee_id, vca_type = self._look_for_deployed_vca(
7179 nsr_deployed["VCA"],
7180 member_vnf_index=vnf_index,
7181 vdu_id=None,
7182 vdu_count_index=None,
7183 ee_descriptor_id=ee_descriptor_id,
7184 )
7185 result, result_detail = await self._ns_execute_primitive(
7186 ee_id,
7187 primitive_name,
7188 primitive_params,
7189 vca_type=vca_type,
7190 vca_id=vca_id,
7191 )
7192 self.logger.debug(
7193 logging_text
7194 + "vnf_config_primitive={} Done with result {} {}".format(
7195 vnf_config_primitive, result, result_detail
7196 )
7197 )
7198 # Update operationState = COMPLETED | FAILED
7199 self._update_suboperation_status(
7200 db_nslcmop, op_index, result, result_detail
7201 )
7202
7203 if result == "FAILED":
7204 raise LcmException(result_detail)
7205 db_nsr_update["config-status"] = old_config_status
7206 scale_process = None
7207 # POST-SCALE END
7208 # Check if each vnf has exporter for metric collection if so update prometheus job records
7209 if scaling_type == "SCALE_OUT":
7210 if "exporters-endpoints" in db_vnfd.get("df")[0]:
7211 vnfr_id = db_vnfr["id"]
7212 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7213 exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
7214 self.logger.debug("exporter config :{}".format(exporter_config))
7215 artifact_path = "{}/{}/{}".format(
7216 base_folder["folder"],
7217 base_folder["pkg-dir"],
7218 "exporter-endpoint",
7219 )
7220 ee_id = None
7221 ee_config_descriptor = exporter_config
7222 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
7223 logging_text,
7224 nsr_id,
7225 vnfr_id,
7226 vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
7227 vdu_index=db_vnfr["vdur"][-1]["count-index"],
7228 user=None,
7229 pub_key=None,
7230 )
7231 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
7232 self.logger.debug("Artifact_path:{}".format(artifact_path))
7233 vdu_id_for_prom = None
7234 vdu_index_for_prom = None
7235 for x in get_iterable(db_vnfr, "vdur"):
7236 vdu_id_for_prom = x.get("vdu-id-ref")
7237 vdu_index_for_prom = x.get("count-index")
7238 vnfr_id = vnfr_id + vdu_id + str(vdu_index)
7239 vnfr_id = vnfr_id.replace("_", "")
7240 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
7241 ee_id=ee_id,
7242 artifact_path=artifact_path,
7243 ee_config_descriptor=ee_config_descriptor,
7244 vnfr_id=vnfr_id,
7245 nsr_id=nsr_id,
7246 target_ip=rw_mgmt_ip,
7247 element_type="VDU",
7248 vdu_id=vdu_id_for_prom,
7249 vdu_index=vdu_index_for_prom,
7250 )
7251
7252 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
7253 if prometheus_jobs:
7254 db_nsr_update[
7255 "_admin.deployed.prometheus_jobs"
7256 ] = prometheus_jobs
7257 self.update_db_2(
7258 "nsrs",
7259 nsr_id,
7260 db_nsr_update,
7261 )
7262
7263 for job in prometheus_jobs:
7264 self.db.set_one(
7265 "prometheus_jobs",
7266 {"job_name": ""},
7267 job,
7268 upsert=True,
7269 fail_on_empty=False,
7270 )
7271 db_nsr_update[
7272 "detailed-status"
7273 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7274 db_nsr_update["operational-status"] = (
7275 "running"
7276 if old_operational_status == "failed"
7277 else old_operational_status
7278 )
7279 db_nsr_update["config-status"] = old_config_status
7280 return
7281 except (
7282 ROclient.ROClientException,
7283 DbException,
7284 LcmException,
7285 NgRoException,
7286 ) as e:
7287 self.logger.error(logging_text + "Exit Exception {}".format(e))
7288 exc = e
7289 except asyncio.CancelledError:
7290 self.logger.error(
7291 logging_text + "Cancelled Exception while '{}'".format(step)
7292 )
7293 exc = "Operation was cancelled"
7294 except Exception as e:
7295 exc = traceback.format_exc()
7296 self.logger.critical(
7297 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7298 exc_info=True,
7299 )
7300 finally:
7301 error_list = list()
7302 if exc:
7303 error_list.append(str(exc))
7304 self._write_ns_status(
7305 nsr_id=nsr_id,
7306 ns_state=None,
7307 current_operation="IDLE",
7308 current_operation_id=None,
7309 )
7310 try:
7311 if tasks_dict_info:
7312 stage[1] = "Waiting for instantiate pending tasks."
7313 self.logger.debug(logging_text + stage[1])
7314 exc = await self._wait_for_tasks(
7315 logging_text,
7316 tasks_dict_info,
7317 self.timeout.ns_deploy,
7318 stage,
7319 nslcmop_id,
7320 nsr_id=nsr_id,
7321 )
7322 except asyncio.CancelledError:
7323 error_list.append("Cancelled")
7324 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7325 await self._wait_for_tasks(
7326 logging_text,
7327 tasks_dict_info,
7328 self.timeout.ns_deploy,
7329 stage,
7330 nslcmop_id,
7331 nsr_id=nsr_id,
7332 )
7333 if error_list:
7334 error_detail = "; ".join(error_list)
7335 db_nslcmop_update[
7336 "detailed-status"
7337 ] = error_description_nslcmop = "FAILED {}: {}".format(
7338 step, error_detail
7339 )
7340 nslcmop_operation_state = "FAILED"
7341 if db_nsr:
7342 db_nsr_update["operational-status"] = old_operational_status
7343 db_nsr_update["config-status"] = old_config_status
7344 db_nsr_update["detailed-status"] = ""
7345 if scale_process:
7346 if "VCA" in scale_process:
7347 db_nsr_update["config-status"] = "failed"
7348 if "RO" in scale_process:
7349 db_nsr_update["operational-status"] = "failed"
7350 db_nsr_update[
7351 "detailed-status"
7352 ] = "FAILED scaling nslcmop={} {}: {}".format(
7353 nslcmop_id, step, error_detail
7354 )
7355 else:
7356 error_description_nslcmop = None
7357 nslcmop_operation_state = "COMPLETED"
7358 db_nslcmop_update["detailed-status"] = "Done"
7359 if scaling_type == "SCALE_IN" and prom_job_name is not None:
7360 self.db.del_one(
7361 "prometheus_jobs",
7362 {"job_name": prom_job_name},
7363 fail_on_empty=False,
7364 )
7365
7366 self._write_op_status(
7367 op_id=nslcmop_id,
7368 stage="",
7369 error_message=error_description_nslcmop,
7370 operation_state=nslcmop_operation_state,
7371 other_update=db_nslcmop_update,
7372 )
7373 if db_nsr:
7374 self._write_ns_status(
7375 nsr_id=nsr_id,
7376 ns_state=None,
7377 current_operation="IDLE",
7378 current_operation_id=None,
7379 other_update=db_nsr_update,
7380 )
7381
7382 if nslcmop_operation_state:
7383 try:
7384 msg = {
7385 "nsr_id": nsr_id,
7386 "nslcmop_id": nslcmop_id,
7387 "operationState": nslcmop_operation_state,
7388 }
7389 await self.msg.aiowrite("ns", "scaled", msg)
7390 except Exception as e:
7391 self.logger.error(
7392 logging_text + "kafka_write notification Exception {}".format(e)
7393 )
7394 self.logger.debug(logging_text + "Exit")
7395 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7396
7397 async def _scale_kdu(
7398 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7399 ):
7400 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7401 for kdu_name in _scaling_info:
7402 for kdu_scaling_info in _scaling_info[kdu_name]:
7403 deployed_kdu, index = get_deployed_kdu(
7404 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7405 )
7406 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7407 kdu_instance = deployed_kdu["kdu-instance"]
7408 kdu_model = deployed_kdu.get("kdu-model")
7409 scale = int(kdu_scaling_info["scale"])
7410 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7411
7412 db_dict = {
7413 "collection": "nsrs",
7414 "filter": {"_id": nsr_id},
7415 "path": "_admin.deployed.K8s.{}".format(index),
7416 }
7417
7418 step = "scaling application {}".format(
7419 kdu_scaling_info["resource-name"]
7420 )
7421 self.logger.debug(logging_text + step)
7422
7423 if kdu_scaling_info["type"] == "delete":
7424 kdu_config = get_configuration(db_vnfd, kdu_name)
7425 if (
7426 kdu_config
7427 and kdu_config.get("terminate-config-primitive")
7428 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7429 ):
7430 terminate_config_primitive_list = kdu_config.get(
7431 "terminate-config-primitive"
7432 )
7433 terminate_config_primitive_list.sort(
7434 key=lambda val: int(val["seq"])
7435 )
7436
7437 for (
7438 terminate_config_primitive
7439 ) in terminate_config_primitive_list:
7440 primitive_params_ = self._map_primitive_params(
7441 terminate_config_primitive, {}, {}
7442 )
7443 step = "execute terminate config primitive"
7444 self.logger.debug(logging_text + step)
7445 await asyncio.wait_for(
7446 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7447 cluster_uuid=cluster_uuid,
7448 kdu_instance=kdu_instance,
7449 primitive_name=terminate_config_primitive["name"],
7450 params=primitive_params_,
7451 db_dict=db_dict,
7452 total_timeout=self.timeout.primitive,
7453 vca_id=vca_id,
7454 ),
7455 timeout=self.timeout.primitive
7456 * self.timeout.primitive_outer_factor,
7457 )
7458
7459 await asyncio.wait_for(
7460 self.k8scluster_map[k8s_cluster_type].scale(
7461 kdu_instance=kdu_instance,
7462 scale=scale,
7463 resource_name=kdu_scaling_info["resource-name"],
7464 total_timeout=self.timeout.scale_on_error,
7465 vca_id=vca_id,
7466 cluster_uuid=cluster_uuid,
7467 kdu_model=kdu_model,
7468 atomic=True,
7469 db_dict=db_dict,
7470 ),
7471 timeout=self.timeout.scale_on_error
7472 * self.timeout.scale_on_error_outer_factor,
7473 )
7474
7475 if kdu_scaling_info["type"] == "create":
7476 kdu_config = get_configuration(db_vnfd, kdu_name)
7477 if (
7478 kdu_config
7479 and kdu_config.get("initial-config-primitive")
7480 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7481 ):
7482 initial_config_primitive_list = kdu_config.get(
7483 "initial-config-primitive"
7484 )
7485 initial_config_primitive_list.sort(
7486 key=lambda val: int(val["seq"])
7487 )
7488
7489 for initial_config_primitive in initial_config_primitive_list:
7490 primitive_params_ = self._map_primitive_params(
7491 initial_config_primitive, {}, {}
7492 )
7493 step = "execute initial config primitive"
7494 self.logger.debug(logging_text + step)
7495 await asyncio.wait_for(
7496 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7497 cluster_uuid=cluster_uuid,
7498 kdu_instance=kdu_instance,
7499 primitive_name=initial_config_primitive["name"],
7500 params=primitive_params_,
7501 db_dict=db_dict,
7502 vca_id=vca_id,
7503 ),
7504 timeout=600,
7505 )
7506
7507 async def _scale_ng_ro(
7508 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7509 ):
7510 nsr_id = db_nslcmop["nsInstanceId"]
7511 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7512 db_vnfrs = {}
7513
7514 # read from db: vnfd's for every vnf
7515 db_vnfds = []
7516
7517 # for each vnf in ns, read vnfd
7518 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7519 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7520 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7521 # if we haven't this vnfd, read it from db
7522 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7523 # read from db
7524 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7525 db_vnfds.append(vnfd)
7526 n2vc_key = self.n2vc.get_public_key()
7527 n2vc_key_list = [n2vc_key]
7528 self.scale_vnfr(
7529 db_vnfr,
7530 vdu_scaling_info.get("vdu-create"),
7531 vdu_scaling_info.get("vdu-delete"),
7532 mark_delete=True,
7533 )
7534 # db_vnfr has been updated, update db_vnfrs to use it
7535 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7536 await self._instantiate_ng_ro(
7537 logging_text,
7538 nsr_id,
7539 db_nsd,
7540 db_nsr,
7541 db_nslcmop,
7542 db_vnfrs,
7543 db_vnfds,
7544 n2vc_key_list,
7545 stage=stage,
7546 start_deploy=time(),
7547 timeout_ns_deploy=self.timeout.ns_deploy,
7548 )
7549 if vdu_scaling_info.get("vdu-delete"):
7550 self.scale_vnfr(
7551 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7552 )
7553
7554 async def extract_prometheus_scrape_jobs(
7555 self,
7556 ee_id: str,
7557 artifact_path: str,
7558 ee_config_descriptor: dict,
7559 vnfr_id: str,
7560 nsr_id: str,
7561 target_ip: str,
7562 element_type: str,
7563 vnf_member_index: str = "",
7564 vdu_id: str = "",
7565 vdu_index: int = None,
7566 kdu_name: str = "",
7567 kdu_index: int = None,
7568 ) -> dict:
7569 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7570 This method will wait until the corresponding VDU or KDU is fully instantiated
7571
7572 Args:
7573 ee_id (str): Execution Environment ID
7574 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7575 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7576 vnfr_id (str): VNFR ID where this EE applies
7577 nsr_id (str): NSR ID where this EE applies
7578 target_ip (str): VDU/KDU instance IP address
7579 element_type (str): NS or VNF or VDU or KDU
7580 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7581 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7582 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7583 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7584 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7585
7586 Raises:
7587 LcmException: When the VDU or KDU instance was not found in an hour
7588
7589 Returns:
7590 _type_: Prometheus jobs
7591 """
7592 # default the vdur and kdur names to an empty string, to avoid any later
7593 # problem with Prometheus when the element type is not VDU or KDU
7594 vdur_name = ""
7595 kdur_name = ""
7596
7597 # look if exist a file called 'prometheus*.j2' and
7598 artifact_content = self.fs.dir_ls(artifact_path)
7599 job_file = next(
7600 (
7601 f
7602 for f in artifact_content
7603 if f.startswith("prometheus") and f.endswith(".j2")
7604 ),
7605 None,
7606 )
7607 if not job_file:
7608 return
7609 self.logger.debug("Artifact path{}".format(artifact_path))
7610 self.logger.debug("job file{}".format(job_file))
7611 with self.fs.file_open((artifact_path, job_file), "r") as f:
7612 job_data = f.read()
7613
7614 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7615 if element_type in ("VDU", "KDU"):
7616 for _ in range(360):
7617 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7618 if vdu_id and vdu_index is not None:
7619 vdur = next(
7620 (
7621 x
7622 for x in get_iterable(db_vnfr, "vdur")
7623 if (
7624 x.get("vdu-id-ref") == vdu_id
7625 and x.get("count-index") == vdu_index
7626 )
7627 ),
7628 {},
7629 )
7630 if vdur.get("name"):
7631 vdur_name = vdur.get("name")
7632 break
7633 if kdu_name and kdu_index is not None:
7634 kdur = next(
7635 (
7636 x
7637 for x in get_iterable(db_vnfr, "kdur")
7638 if (
7639 x.get("kdu-name") == kdu_name
7640 and x.get("count-index") == kdu_index
7641 )
7642 ),
7643 {},
7644 )
7645 if kdur.get("name"):
7646 kdur_name = kdur.get("name")
7647 break
7648
7649 await asyncio.sleep(10)
7650 else:
7651 if vdu_id and vdu_index is not None:
7652 raise LcmException(
7653 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7654 )
7655 if kdu_name and kdu_index is not None:
7656 raise LcmException(
7657 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7658 )
7659
7660 if ee_id is not None:
7661 _, namespace, helm_id = get_ee_id_parts(
7662 ee_id
7663 ) # get namespace and EE gRPC service name
7664 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7665 host_port = "80"
7666 vnfr_id = vnfr_id.replace("-", "")
7667 variables = {
7668 "JOB_NAME": vnfr_id,
7669 "TARGET_IP": target_ip,
7670 "EXPORTER_POD_IP": host_name,
7671 "EXPORTER_POD_PORT": host_port,
7672 "NSR_ID": nsr_id,
7673 "VNF_MEMBER_INDEX": vnf_member_index,
7674 "VDUR_NAME": vdur_name,
7675 "KDUR_NAME": kdur_name,
7676 "ELEMENT_TYPE": element_type,
7677 }
7678 else:
7679 metric_path = ee_config_descriptor["metric-path"]
7680 target_port = ee_config_descriptor["metric-port"]
7681 vnfr_id = vnfr_id.replace("-", "")
7682 variables = {
7683 "JOB_NAME": vnfr_id,
7684 "TARGET_IP": target_ip,
7685 "TARGET_PORT": target_port,
7686 "METRIC_PATH": metric_path,
7687 }
7688
7689 job_list = parse_job(job_data, variables)
7690 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7691 for job in job_list:
7692 if (
7693 not isinstance(job.get("job_name"), str)
7694 or vnfr_id not in job["job_name"]
7695 ):
7696 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7697 job["nsr_id"] = nsr_id
7698 job["vnfr_id"] = vnfr_id
7699 return job_list
7700
7701 async def rebuild_start_stop(
7702 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7703 ):
7704 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7705 self.logger.info(logging_text + "Enter")
7706 stage = ["Preparing the environment", ""]
7707 # database nsrs record
7708 db_nsr_update = {}
7709 vdu_vim_name = None
7710 vim_vm_id = None
7711 # in case of error, indicates what part of scale was failed to put nsr at error status
7712 start_deploy = time()
7713 try:
7714 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7715 vim_account_id = db_vnfr.get("vim-account-id")
7716 vim_info_key = "vim:" + vim_account_id
7717 vdu_id = additional_param["vdu_id"]
7718 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7719 vdur = find_in_list(
7720 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7721 )
7722 if vdur:
7723 vdu_vim_name = vdur["name"]
7724 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7725 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7726 else:
7727 raise LcmException("Target vdu is not found")
7728 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7729 # wait for any previous tasks in process
7730 stage[1] = "Waiting for previous operations to terminate"
7731 self.logger.info(stage[1])
7732 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7733
7734 stage[1] = "Reading from database."
7735 self.logger.info(stage[1])
7736 self._write_ns_status(
7737 nsr_id=nsr_id,
7738 ns_state=None,
7739 current_operation=operation_type.upper(),
7740 current_operation_id=nslcmop_id,
7741 )
7742 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7743
7744 # read from db: ns
7745 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7746 db_nsr_update["operational-status"] = operation_type
7747 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7748 # Payload for RO
7749 desc = {
7750 operation_type: {
7751 "vim_vm_id": vim_vm_id,
7752 "vnf_id": vnf_id,
7753 "vdu_index": additional_param["count-index"],
7754 "vdu_id": vdur["id"],
7755 "target_vim": target_vim,
7756 "vim_account_id": vim_account_id,
7757 }
7758 }
7759 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7760 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7761 self.logger.info("ro nsr id: {}".format(nsr_id))
7762 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7763 self.logger.info("response from RO: {}".format(result_dict))
7764 action_id = result_dict["action_id"]
7765 await self._wait_ng_ro(
7766 nsr_id,
7767 action_id,
7768 nslcmop_id,
7769 start_deploy,
7770 self.timeout.operate,
7771 None,
7772 "start_stop_rebuild",
7773 )
7774 return "COMPLETED", "Done"
7775 except (ROclient.ROClientException, DbException, LcmException) as e:
7776 self.logger.error("Exit Exception {}".format(e))
7777 exc = e
7778 except asyncio.CancelledError:
7779 self.logger.error("Cancelled Exception while '{}'".format(stage))
7780 exc = "Operation was cancelled"
7781 except Exception as e:
7782 exc = traceback.format_exc()
7783 self.logger.critical(
7784 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7785 )
7786 return "FAILED", "Error in operate VNF {}".format(exc)
7787
7788 async def migrate(self, nsr_id, nslcmop_id):
7789 """
7790 Migrate VNFs and VDUs instances in a NS
7791
7792 :param: nsr_id: NS Instance ID
7793 :param: nslcmop_id: nslcmop ID of migrate
7794
7795 """
7796 # Try to lock HA task here
7797 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7798 if not task_is_locked_by_me:
7799 return
7800 logging_text = "Task ns={} migrate ".format(nsr_id)
7801 self.logger.debug(logging_text + "Enter")
7802 # get all needed from database
7803 db_nslcmop = None
7804 db_nslcmop_update = {}
7805 nslcmop_operation_state = None
7806 db_nsr_update = {}
7807 target = {}
7808 exc = None
7809 # in case of error, indicates what part of scale was failed to put nsr at error status
7810 start_deploy = time()
7811
7812 try:
7813 # wait for any previous tasks in process
7814 step = "Waiting for previous operations to terminate"
7815 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7816
7817 self._write_ns_status(
7818 nsr_id=nsr_id,
7819 ns_state=None,
7820 current_operation="MIGRATING",
7821 current_operation_id=nslcmop_id,
7822 )
7823 step = "Getting nslcmop from database"
7824 self.logger.debug(
7825 step + " after having waited for previous tasks to be completed"
7826 )
7827 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7828 migrate_params = db_nslcmop.get("operationParams")
7829
7830 target = {}
7831 target.update(migrate_params)
7832 desc = await self.RO.migrate(nsr_id, target)
7833 self.logger.debug("RO return > {}".format(desc))
7834 action_id = desc["action_id"]
7835 await self._wait_ng_ro(
7836 nsr_id,
7837 action_id,
7838 nslcmop_id,
7839 start_deploy,
7840 self.timeout.migrate,
7841 operation="migrate",
7842 )
7843 except (ROclient.ROClientException, DbException, LcmException) as e:
7844 self.logger.error("Exit Exception {}".format(e))
7845 exc = e
7846 except asyncio.CancelledError:
7847 self.logger.error("Cancelled Exception while '{}'".format(step))
7848 exc = "Operation was cancelled"
7849 except Exception as e:
7850 exc = traceback.format_exc()
7851 self.logger.critical(
7852 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7853 )
7854 finally:
7855 self._write_ns_status(
7856 nsr_id=nsr_id,
7857 ns_state=None,
7858 current_operation="IDLE",
7859 current_operation_id=None,
7860 )
7861 if exc:
7862 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7863 nslcmop_operation_state = "FAILED"
7864 else:
7865 nslcmop_operation_state = "COMPLETED"
7866 db_nslcmop_update["detailed-status"] = "Done"
7867 db_nsr_update["detailed-status"] = "Done"
7868
7869 self._write_op_status(
7870 op_id=nslcmop_id,
7871 stage="",
7872 error_message="",
7873 operation_state=nslcmop_operation_state,
7874 other_update=db_nslcmop_update,
7875 )
7876 if nslcmop_operation_state:
7877 try:
7878 msg = {
7879 "nsr_id": nsr_id,
7880 "nslcmop_id": nslcmop_id,
7881 "operationState": nslcmop_operation_state,
7882 }
7883 await self.msg.aiowrite("ns", "migrated", msg)
7884 except Exception as e:
7885 self.logger.error(
7886 logging_text + "kafka_write notification Exception {}".format(e)
7887 )
7888 self.logger.debug(logging_text + "Exit")
7889 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7890
7891 async def heal(self, nsr_id, nslcmop_id):
7892 """
7893 Heal NS
7894
7895 :param nsr_id: ns instance to heal
7896 :param nslcmop_id: operation to run
7897 :return:
7898 """
7899
7900 # Try to lock HA task here
7901 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7902 if not task_is_locked_by_me:
7903 return
7904
7905 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7906 stage = ["", "", ""]
7907 tasks_dict_info = {}
7908 # ^ stage, step, VIM progress
7909 self.logger.debug(logging_text + "Enter")
7910 # get all needed from database
7911 db_nsr = None
7912 db_nslcmop_update = {}
7913 db_nsr_update = {}
7914 db_vnfrs = {} # vnf's info indexed by _id
7915 exc = None
7916 old_operational_status = ""
7917 old_config_status = ""
7918 nsi_id = None
7919 try:
7920 # wait for any previous tasks in process
7921 step = "Waiting for previous operations to terminate"
7922 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7923 self._write_ns_status(
7924 nsr_id=nsr_id,
7925 ns_state=None,
7926 current_operation="HEALING",
7927 current_operation_id=nslcmop_id,
7928 )
7929
7930 step = "Getting nslcmop from database"
7931 self.logger.debug(
7932 step + " after having waited for previous tasks to be completed"
7933 )
7934 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7935
7936 step = "Getting nsr from database"
7937 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7938 old_operational_status = db_nsr["operational-status"]
7939 old_config_status = db_nsr["config-status"]
7940
7941 db_nsr_update = {
7942 "operational-status": "healing",
7943 "_admin.deployed.RO.operational-status": "healing",
7944 }
7945 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7946
7947 step = "Sending heal order to VIM"
7948 await self.heal_RO(
7949 logging_text=logging_text,
7950 nsr_id=nsr_id,
7951 db_nslcmop=db_nslcmop,
7952 stage=stage,
7953 )
7954 # VCA tasks
7955 # read from db: nsd
7956 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7957 self.logger.debug(logging_text + stage[1])
7958 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7959 self.fs.sync(db_nsr["nsd-id"])
7960 db_nsr["nsd"] = nsd
7961 # read from db: vnfr's of this ns
7962 step = "Getting vnfrs from db"
7963 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7964 for vnfr in db_vnfrs_list:
7965 db_vnfrs[vnfr["_id"]] = vnfr
7966 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7967
7968 # Check for each target VNF
7969 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7970 for target_vnf in target_list:
7971 # Find this VNF in the list from DB
7972 vnfr_id = target_vnf.get("vnfInstanceId", None)
7973 if vnfr_id:
7974 db_vnfr = db_vnfrs[vnfr_id]
7975 vnfd_id = db_vnfr.get("vnfd-id")
7976 vnfd_ref = db_vnfr.get("vnfd-ref")
7977 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7978 base_folder = vnfd["_admin"]["storage"]
7979 vdu_id = None
7980 vdu_index = 0
7981 vdu_name = None
7982 kdu_name = None
7983 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7984 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7985
7986 # Check each target VDU and deploy N2VC
7987 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7988 "vdu", []
7989 )
7990 if not target_vdu_list:
7991 # Codigo nuevo para crear diccionario
7992 target_vdu_list = []
7993 for existing_vdu in db_vnfr.get("vdur"):
7994 vdu_name = existing_vdu.get("vdu-name", None)
7995 vdu_index = existing_vdu.get("count-index", 0)
7996 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7997 "run-day1", False
7998 )
7999 vdu_to_be_healed = {
8000 "vdu-id": vdu_name,
8001 "count-index": vdu_index,
8002 "run-day1": vdu_run_day1,
8003 }
8004 target_vdu_list.append(vdu_to_be_healed)
8005 for target_vdu in target_vdu_list:
8006 deploy_params_vdu = target_vdu
8007 # Set run-day1 vnf level value if not vdu level value exists
8008 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
8009 "additionalParams", {}
8010 ).get("run-day1"):
8011 deploy_params_vdu["run-day1"] = target_vnf[
8012 "additionalParams"
8013 ].get("run-day1")
8014 vdu_name = target_vdu.get("vdu-id", None)
8015 # TODO: Get vdu_id from vdud.
8016 vdu_id = vdu_name
8017 # For multi instance VDU count-index is mandatory
8018 # For single session VDU count-indes is 0
8019 vdu_index = target_vdu.get("count-index", 0)
8020
8021 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8022 stage[1] = "Deploying Execution Environments."
8023 self.logger.debug(logging_text + stage[1])
8024
8025 # VNF Level charm. Normal case when proxy charms.
8026 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8027 descriptor_config = get_configuration(vnfd, vnfd_ref)
8028 if descriptor_config:
8029 # Continue if healed machine is management machine
8030 vnf_ip_address = db_vnfr.get("ip-address")
8031 target_instance = None
8032 for instance in db_vnfr.get("vdur", None):
8033 if (
8034 instance["vdu-name"] == vdu_name
8035 and instance["count-index"] == vdu_index
8036 ):
8037 target_instance = instance
8038 break
8039 if vnf_ip_address == target_instance.get("ip-address"):
8040 self._heal_n2vc(
8041 logging_text=logging_text
8042 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8043 member_vnf_index, vdu_name, vdu_index
8044 ),
8045 db_nsr=db_nsr,
8046 db_vnfr=db_vnfr,
8047 nslcmop_id=nslcmop_id,
8048 nsr_id=nsr_id,
8049 nsi_id=nsi_id,
8050 vnfd_id=vnfd_ref,
8051 vdu_id=None,
8052 kdu_name=None,
8053 member_vnf_index=member_vnf_index,
8054 vdu_index=0,
8055 vdu_name=None,
8056 deploy_params=deploy_params_vdu,
8057 descriptor_config=descriptor_config,
8058 base_folder=base_folder,
8059 task_instantiation_info=tasks_dict_info,
8060 stage=stage,
8061 )
8062
8063 # VDU Level charm. Normal case with native charms.
8064 descriptor_config = get_configuration(vnfd, vdu_name)
8065 if descriptor_config:
8066 self._heal_n2vc(
8067 logging_text=logging_text
8068 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8069 member_vnf_index, vdu_name, vdu_index
8070 ),
8071 db_nsr=db_nsr,
8072 db_vnfr=db_vnfr,
8073 nslcmop_id=nslcmop_id,
8074 nsr_id=nsr_id,
8075 nsi_id=nsi_id,
8076 vnfd_id=vnfd_ref,
8077 vdu_id=vdu_id,
8078 kdu_name=kdu_name,
8079 member_vnf_index=member_vnf_index,
8080 vdu_index=vdu_index,
8081 vdu_name=vdu_name,
8082 deploy_params=deploy_params_vdu,
8083 descriptor_config=descriptor_config,
8084 base_folder=base_folder,
8085 task_instantiation_info=tasks_dict_info,
8086 stage=stage,
8087 )
8088 except (
8089 ROclient.ROClientException,
8090 DbException,
8091 LcmException,
8092 NgRoException,
8093 ) as e:
8094 self.logger.error(logging_text + "Exit Exception {}".format(e))
8095 exc = e
8096 except asyncio.CancelledError:
8097 self.logger.error(
8098 logging_text + "Cancelled Exception while '{}'".format(step)
8099 )
8100 exc = "Operation was cancelled"
8101 except Exception as e:
8102 exc = traceback.format_exc()
8103 self.logger.critical(
8104 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8105 exc_info=True,
8106 )
8107 finally:
8108 error_list = list()
8109 if db_vnfrs_list and target_list:
8110 for vnfrs in db_vnfrs_list:
8111 for vnf_instance in target_list:
8112 if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"):
8113 self.db.set_list(
8114 "vnfrs",
8115 {"_id": vnfrs["_id"]},
8116 {"_admin.modified": time()},
8117 )
8118 if exc:
8119 error_list.append(str(exc))
8120 try:
8121 if tasks_dict_info:
8122 stage[1] = "Waiting for healing pending tasks."
8123 self.logger.debug(logging_text + stage[1])
8124 exc = await self._wait_for_tasks(
8125 logging_text,
8126 tasks_dict_info,
8127 self.timeout.ns_deploy,
8128 stage,
8129 nslcmop_id,
8130 nsr_id=nsr_id,
8131 )
8132 except asyncio.CancelledError:
8133 error_list.append("Cancelled")
8134 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
8135 await self._wait_for_tasks(
8136 logging_text,
8137 tasks_dict_info,
8138 self.timeout.ns_deploy,
8139 stage,
8140 nslcmop_id,
8141 nsr_id=nsr_id,
8142 )
8143 if error_list:
8144 error_detail = "; ".join(error_list)
8145 db_nslcmop_update[
8146 "detailed-status"
8147 ] = error_description_nslcmop = "FAILED {}: {}".format(
8148 step, error_detail
8149 )
8150 nslcmop_operation_state = "FAILED"
8151 if db_nsr:
8152 db_nsr_update["operational-status"] = old_operational_status
8153 db_nsr_update["config-status"] = old_config_status
8154 db_nsr_update[
8155 "detailed-status"
8156 ] = "FAILED healing nslcmop={} {}: {}".format(
8157 nslcmop_id, step, error_detail
8158 )
8159 for task, task_name in tasks_dict_info.items():
8160 if not task.done() or task.cancelled() or task.exception():
8161 if task_name.startswith(self.task_name_deploy_vca):
8162 # A N2VC task is pending
8163 db_nsr_update["config-status"] = "failed"
8164 else:
8165 # RO task is pending
8166 db_nsr_update["operational-status"] = "failed"
8167 else:
8168 error_description_nslcmop = None
8169 nslcmop_operation_state = "COMPLETED"
8170 db_nslcmop_update["detailed-status"] = "Done"
8171 db_nsr_update["detailed-status"] = "Done"
8172 db_nsr_update["operational-status"] = "running"
8173 db_nsr_update["config-status"] = "configured"
8174
8175 self._write_op_status(
8176 op_id=nslcmop_id,
8177 stage="",
8178 error_message=error_description_nslcmop,
8179 operation_state=nslcmop_operation_state,
8180 other_update=db_nslcmop_update,
8181 )
8182 if db_nsr:
8183 self._write_ns_status(
8184 nsr_id=nsr_id,
8185 ns_state=None,
8186 current_operation="IDLE",
8187 current_operation_id=None,
8188 other_update=db_nsr_update,
8189 )
8190
8191 if nslcmop_operation_state:
8192 try:
8193 msg = {
8194 "nsr_id": nsr_id,
8195 "nslcmop_id": nslcmop_id,
8196 "operationState": nslcmop_operation_state,
8197 }
8198 await self.msg.aiowrite("ns", "healed", msg)
8199 except Exception as e:
8200 self.logger.error(
8201 logging_text + "kafka_write notification Exception {}".format(e)
8202 )
8203 self.logger.debug(logging_text + "Exit")
8204 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8205
8206 async def heal_RO(
8207 self,
8208 logging_text,
8209 nsr_id,
8210 db_nslcmop,
8211 stage,
8212 ):
8213 """
8214 Heal at RO
8215 :param logging_text: preffix text to use at logging
8216 :param nsr_id: nsr identity
8217 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8218 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8219 :return: None or exception
8220 """
8221
8222 def get_vim_account(vim_account_id):
8223 nonlocal db_vims
8224 if vim_account_id in db_vims:
8225 return db_vims[vim_account_id]
8226 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8227 db_vims[vim_account_id] = db_vim
8228 return db_vim
8229
8230 try:
8231 start_heal = time()
8232 ns_params = db_nslcmop.get("operationParams")
8233 if ns_params and ns_params.get("timeout_ns_heal"):
8234 timeout_ns_heal = ns_params["timeout_ns_heal"]
8235 else:
8236 timeout_ns_heal = self.timeout.ns_heal
8237
8238 db_vims = {}
8239
8240 nslcmop_id = db_nslcmop["_id"]
8241 target = {
8242 "action_id": nslcmop_id,
8243 }
8244 self.logger.warning(
8245 "db_nslcmop={} and timeout_ns_heal={}".format(
8246 db_nslcmop, timeout_ns_heal
8247 )
8248 )
8249 target.update(db_nslcmop.get("operationParams", {}))
8250
8251 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8252 desc = await self.RO.recreate(nsr_id, target)
8253 self.logger.debug("RO return > {}".format(desc))
8254 action_id = desc["action_id"]
8255 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8256 await self._wait_ng_ro(
8257 nsr_id,
8258 action_id,
8259 nslcmop_id,
8260 start_heal,
8261 timeout_ns_heal,
8262 stage,
8263 operation="healing",
8264 )
8265
8266 # Updating NSR
8267 db_nsr_update = {
8268 "_admin.deployed.RO.operational-status": "running",
8269 "detailed-status": " ".join(stage),
8270 }
8271 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8272 self._write_op_status(nslcmop_id, stage)
8273 self.logger.debug(
8274 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8275 )
8276
8277 except Exception as e:
8278 stage[2] = "ERROR healing at VIM"
8279 # self.set_vnfr_at_error(db_vnfrs, str(e))
8280 self.logger.error(
8281 "Error healing at VIM {}".format(e),
8282 exc_info=not isinstance(
8283 e,
8284 (
8285 ROclient.ROClientException,
8286 LcmException,
8287 DbException,
8288 NgRoException,
8289 ),
8290 ),
8291 )
8292 raise
8293
8294 def _heal_n2vc(
8295 self,
8296 logging_text,
8297 db_nsr,
8298 db_vnfr,
8299 nslcmop_id,
8300 nsr_id,
8301 nsi_id,
8302 vnfd_id,
8303 vdu_id,
8304 kdu_name,
8305 member_vnf_index,
8306 vdu_index,
8307 vdu_name,
8308 deploy_params,
8309 descriptor_config,
8310 base_folder,
8311 task_instantiation_info,
8312 stage,
8313 ):
8314 # launch instantiate_N2VC in a asyncio task and register task object
8315 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8316 # if not found, create one entry and update database
8317 # fill db_nsr._admin.deployed.VCA.<index>
8318
8319 self.logger.debug(
8320 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8321 )
8322
8323 charm_name = ""
8324 get_charm_name = False
8325 if "execution-environment-list" in descriptor_config:
8326 ee_list = descriptor_config.get("execution-environment-list", [])
8327 elif "juju" in descriptor_config:
8328 ee_list = [descriptor_config] # ns charms
8329 if "execution-environment-list" not in descriptor_config:
8330 # charm name is only required for ns charms
8331 get_charm_name = True
8332 else: # other types as script are not supported
8333 ee_list = []
8334
8335 for ee_item in ee_list:
8336 self.logger.debug(
8337 logging_text
8338 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8339 ee_item.get("juju"), ee_item.get("helm-chart")
8340 )
8341 )
8342 ee_descriptor_id = ee_item.get("id")
8343 vca_name, charm_name, vca_type = self.get_vca_info(
8344 ee_item, db_nsr, get_charm_name
8345 )
8346 if not vca_type:
8347 self.logger.debug(
8348 logging_text + "skipping, non juju/charm/helm configuration"
8349 )
8350 continue
8351
8352 vca_index = -1
8353 for vca_index, vca_deployed in enumerate(
8354 db_nsr["_admin"]["deployed"]["VCA"]
8355 ):
8356 if not vca_deployed:
8357 continue
8358 if (
8359 vca_deployed.get("member-vnf-index") == member_vnf_index
8360 and vca_deployed.get("vdu_id") == vdu_id
8361 and vca_deployed.get("kdu_name") == kdu_name
8362 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8363 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8364 ):
8365 break
8366 else:
8367 # not found, create one.
8368 target = (
8369 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8370 )
8371 if vdu_id:
8372 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8373 elif kdu_name:
8374 target += "/kdu/{}".format(kdu_name)
8375 vca_deployed = {
8376 "target_element": target,
8377 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8378 "member-vnf-index": member_vnf_index,
8379 "vdu_id": vdu_id,
8380 "kdu_name": kdu_name,
8381 "vdu_count_index": vdu_index,
8382 "operational-status": "init", # TODO revise
8383 "detailed-status": "", # TODO revise
8384 "step": "initial-deploy", # TODO revise
8385 "vnfd_id": vnfd_id,
8386 "vdu_name": vdu_name,
8387 "type": vca_type,
8388 "ee_descriptor_id": ee_descriptor_id,
8389 "charm_name": charm_name,
8390 }
8391 vca_index += 1
8392
8393 # create VCA and configurationStatus in db
8394 db_dict = {
8395 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8396 "configurationStatus.{}".format(vca_index): dict(),
8397 }
8398 self.update_db_2("nsrs", nsr_id, db_dict)
8399
8400 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8401
8402 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8403 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8404 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8405
8406 # Launch task
8407 task_n2vc = asyncio.ensure_future(
8408 self.heal_N2VC(
8409 logging_text=logging_text,
8410 vca_index=vca_index,
8411 nsi_id=nsi_id,
8412 db_nsr=db_nsr,
8413 db_vnfr=db_vnfr,
8414 vdu_id=vdu_id,
8415 kdu_name=kdu_name,
8416 vdu_index=vdu_index,
8417 deploy_params=deploy_params,
8418 config_descriptor=descriptor_config,
8419 base_folder=base_folder,
8420 nslcmop_id=nslcmop_id,
8421 stage=stage,
8422 vca_type=vca_type,
8423 vca_name=vca_name,
8424 ee_config_descriptor=ee_item,
8425 )
8426 )
8427 self.lcm_tasks.register(
8428 "ns",
8429 nsr_id,
8430 nslcmop_id,
8431 "instantiate_N2VC-{}".format(vca_index),
8432 task_n2vc,
8433 )
8434 task_instantiation_info[
8435 task_n2vc
8436 ] = self.task_name_deploy_vca + " {}.{}".format(
8437 member_vnf_index or "", vdu_id or ""
8438 )
8439
8440 async def heal_N2VC(
8441 self,
8442 logging_text,
8443 vca_index,
8444 nsi_id,
8445 db_nsr,
8446 db_vnfr,
8447 vdu_id,
8448 kdu_name,
8449 vdu_index,
8450 config_descriptor,
8451 deploy_params,
8452 base_folder,
8453 nslcmop_id,
8454 stage,
8455 vca_type,
8456 vca_name,
8457 ee_config_descriptor,
8458 ):
8459 nsr_id = db_nsr["_id"]
8460 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8461 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8462 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8463 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8464 db_dict = {
8465 "collection": "nsrs",
8466 "filter": {"_id": nsr_id},
8467 "path": db_update_entry,
8468 }
8469 step = ""
8470 try:
8471 element_type = "NS"
8472 element_under_configuration = nsr_id
8473
8474 vnfr_id = None
8475 if db_vnfr:
8476 vnfr_id = db_vnfr["_id"]
8477 osm_config["osm"]["vnf_id"] = vnfr_id
8478
8479 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8480
8481 if vca_type == "native_charm":
8482 index_number = 0
8483 else:
8484 index_number = vdu_index or 0
8485
8486 if vnfr_id:
8487 element_type = "VNF"
8488 element_under_configuration = vnfr_id
8489 namespace += ".{}-{}".format(vnfr_id, index_number)
8490 if vdu_id:
8491 namespace += ".{}-{}".format(vdu_id, index_number)
8492 element_type = "VDU"
8493 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8494 osm_config["osm"]["vdu_id"] = vdu_id
8495 elif kdu_name:
8496 namespace += ".{}".format(kdu_name)
8497 element_type = "KDU"
8498 element_under_configuration = kdu_name
8499 osm_config["osm"]["kdu_name"] = kdu_name
8500
8501 # Get artifact path
8502 if base_folder["pkg-dir"]:
8503 artifact_path = "{}/{}/{}/{}".format(
8504 base_folder["folder"],
8505 base_folder["pkg-dir"],
8506 "charms"
8507 if vca_type
8508 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8509 else "helm-charts",
8510 vca_name,
8511 )
8512 else:
8513 artifact_path = "{}/Scripts/{}/{}/".format(
8514 base_folder["folder"],
8515 "charms"
8516 if vca_type
8517 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8518 else "helm-charts",
8519 vca_name,
8520 )
8521
8522 self.logger.debug("Artifact path > {}".format(artifact_path))
8523
8524 # get initial_config_primitive_list that applies to this element
8525 initial_config_primitive_list = config_descriptor.get(
8526 "initial-config-primitive"
8527 )
8528
8529 self.logger.debug(
8530 "Initial config primitive list > {}".format(
8531 initial_config_primitive_list
8532 )
8533 )
8534
8535 # add config if not present for NS charm
8536 ee_descriptor_id = ee_config_descriptor.get("id")
8537 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8538 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8539 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8540 )
8541
8542 self.logger.debug(
8543 "Initial config primitive list #2 > {}".format(
8544 initial_config_primitive_list
8545 )
8546 )
8547 # n2vc_redesign STEP 3.1
8548 # find old ee_id if exists
8549 ee_id = vca_deployed.get("ee_id")
8550
8551 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8552 # create or register execution environment in VCA. Only for native charms when healing
8553 if vca_type == "native_charm":
8554 step = "Waiting to VM being up and getting IP address"
8555 self.logger.debug(logging_text + step)
8556 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8557 logging_text,
8558 nsr_id,
8559 vnfr_id,
8560 vdu_id,
8561 vdu_index,
8562 user=None,
8563 pub_key=None,
8564 )
8565 credentials = {"hostname": rw_mgmt_ip}
8566 # get username
8567 username = deep_get(
8568 config_descriptor, ("config-access", "ssh-access", "default-user")
8569 )
8570 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8571 # merged. Meanwhile let's get username from initial-config-primitive
8572 if not username and initial_config_primitive_list:
8573 for config_primitive in initial_config_primitive_list:
8574 for param in config_primitive.get("parameter", ()):
8575 if param["name"] == "ssh-username":
8576 username = param["value"]
8577 break
8578 if not username:
8579 raise LcmException(
8580 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8581 "'config-access.ssh-access.default-user'"
8582 )
8583 credentials["username"] = username
8584
8585 # n2vc_redesign STEP 3.2
8586 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8587 self._write_configuration_status(
8588 nsr_id=nsr_id,
8589 vca_index=vca_index,
8590 status="REGISTERING",
8591 element_under_configuration=element_under_configuration,
8592 element_type=element_type,
8593 )
8594
8595 step = "register execution environment {}".format(credentials)
8596 self.logger.debug(logging_text + step)
8597 ee_id = await self.vca_map[vca_type].register_execution_environment(
8598 credentials=credentials,
8599 namespace=namespace,
8600 db_dict=db_dict,
8601 vca_id=vca_id,
8602 )
8603
8604 # update ee_id en db
8605 db_dict_ee_id = {
8606 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8607 }
8608 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8609
8610 # for compatibility with MON/POL modules, the need model and application name at database
8611 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8612 # Not sure if this need to be done when healing
8613 """
8614 ee_id_parts = ee_id.split(".")
8615 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8616 if len(ee_id_parts) >= 2:
8617 model_name = ee_id_parts[0]
8618 application_name = ee_id_parts[1]
8619 db_nsr_update[db_update_entry + "model"] = model_name
8620 db_nsr_update[db_update_entry + "application"] = application_name
8621 """
8622
8623 # n2vc_redesign STEP 3.3
8624 # Install configuration software. Only for native charms.
8625 step = "Install configuration Software"
8626
8627 self._write_configuration_status(
8628 nsr_id=nsr_id,
8629 vca_index=vca_index,
8630 status="INSTALLING SW",
8631 element_under_configuration=element_under_configuration,
8632 element_type=element_type,
8633 # other_update=db_nsr_update,
8634 other_update=None,
8635 )
8636
8637 # TODO check if already done
8638 self.logger.debug(logging_text + step)
8639 config = None
8640 if vca_type == "native_charm":
8641 config_primitive = next(
8642 (p for p in initial_config_primitive_list if p["name"] == "config"),
8643 None,
8644 )
8645 if config_primitive:
8646 config = self._map_primitive_params(
8647 config_primitive, {}, deploy_params
8648 )
8649 await self.vca_map[vca_type].install_configuration_sw(
8650 ee_id=ee_id,
8651 artifact_path=artifact_path,
8652 db_dict=db_dict,
8653 config=config,
8654 num_units=1,
8655 vca_id=vca_id,
8656 vca_type=vca_type,
8657 )
8658
8659 # write in db flag of configuration_sw already installed
8660 self.update_db_2(
8661 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8662 )
8663
8664 # Not sure if this need to be done when healing
8665 """
8666 # add relations for this VCA (wait for other peers related with this VCA)
8667 await self._add_vca_relations(
8668 logging_text=logging_text,
8669 nsr_id=nsr_id,
8670 vca_type=vca_type,
8671 vca_index=vca_index,
8672 )
8673 """
8674
8675 # if SSH access is required, then get execution environment SSH public
8676 # if native charm we have waited already to VM be UP
8677 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8678 pub_key = None
8679 user = None
8680 # self.logger.debug("get ssh key block")
8681 if deep_get(
8682 config_descriptor, ("config-access", "ssh-access", "required")
8683 ):
8684 # self.logger.debug("ssh key needed")
8685 # Needed to inject a ssh key
8686 user = deep_get(
8687 config_descriptor,
8688 ("config-access", "ssh-access", "default-user"),
8689 )
8690 step = "Install configuration Software, getting public ssh key"
8691 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8692 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8693 )
8694
8695 step = "Insert public key into VM user={} ssh_key={}".format(
8696 user, pub_key
8697 )
8698 else:
8699 # self.logger.debug("no need to get ssh key")
8700 step = "Waiting to VM being up and getting IP address"
8701 self.logger.debug(logging_text + step)
8702
8703 # n2vc_redesign STEP 5.1
8704 # wait for RO (ip-address) Insert pub_key into VM
8705 # IMPORTANT: We need do wait for RO to complete healing operation.
8706 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8707 if vnfr_id:
8708 if kdu_name:
8709 rw_mgmt_ip = await self.wait_kdu_up(
8710 logging_text, nsr_id, vnfr_id, kdu_name
8711 )
8712 else:
8713 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8714 logging_text,
8715 nsr_id,
8716 vnfr_id,
8717 vdu_id,
8718 vdu_index,
8719 user=user,
8720 pub_key=pub_key,
8721 )
8722 else:
8723 rw_mgmt_ip = None # This is for a NS configuration
8724
8725 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8726
8727 # store rw_mgmt_ip in deploy params for later replacement
8728 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8729
8730 # Day1 operations.
8731 # get run-day1 operation parameter
8732 runDay1 = deploy_params.get("run-day1", False)
8733 self.logger.debug(
8734 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8735 )
8736 if runDay1:
8737 # n2vc_redesign STEP 6 Execute initial config primitive
8738 step = "execute initial config primitive"
8739
8740 # wait for dependent primitives execution (NS -> VNF -> VDU)
8741 if initial_config_primitive_list:
8742 await self._wait_dependent_n2vc(
8743 nsr_id, vca_deployed_list, vca_index
8744 )
8745
8746 # stage, in function of element type: vdu, kdu, vnf or ns
8747 my_vca = vca_deployed_list[vca_index]
8748 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8749 # VDU or KDU
8750 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8751 elif my_vca.get("member-vnf-index"):
8752 # VNF
8753 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8754 else:
8755 # NS
8756 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8757
8758 self._write_configuration_status(
8759 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8760 )
8761
8762 self._write_op_status(op_id=nslcmop_id, stage=stage)
8763
8764 check_if_terminated_needed = True
8765 for initial_config_primitive in initial_config_primitive_list:
8766 # adding information on the vca_deployed if it is a NS execution environment
8767 if not vca_deployed["member-vnf-index"]:
8768 deploy_params["ns_config_info"] = json.dumps(
8769 self._get_ns_config_info(nsr_id)
8770 )
8771 # TODO check if already done
8772 primitive_params_ = self._map_primitive_params(
8773 initial_config_primitive, {}, deploy_params
8774 )
8775
8776 step = "execute primitive '{}' params '{}'".format(
8777 initial_config_primitive["name"], primitive_params_
8778 )
8779 self.logger.debug(logging_text + step)
8780 await self.vca_map[vca_type].exec_primitive(
8781 ee_id=ee_id,
8782 primitive_name=initial_config_primitive["name"],
8783 params_dict=primitive_params_,
8784 db_dict=db_dict,
8785 vca_id=vca_id,
8786 vca_type=vca_type,
8787 )
8788 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8789 if check_if_terminated_needed:
8790 if config_descriptor.get("terminate-config-primitive"):
8791 self.update_db_2(
8792 "nsrs",
8793 nsr_id,
8794 {db_update_entry + "needed_terminate": True},
8795 )
8796 check_if_terminated_needed = False
8797
8798 # TODO register in database that primitive is done
8799
8800 # STEP 7 Configure metrics
8801 # Not sure if this need to be done when healing
8802 """
8803 if vca_type == "helm" or vca_type == "helm-v3":
8804 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8805 ee_id=ee_id,
8806 artifact_path=artifact_path,
8807 ee_config_descriptor=ee_config_descriptor,
8808 vnfr_id=vnfr_id,
8809 nsr_id=nsr_id,
8810 target_ip=rw_mgmt_ip,
8811 )
8812 if prometheus_jobs:
8813 self.update_db_2(
8814 "nsrs",
8815 nsr_id,
8816 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8817 )
8818
8819 for job in prometheus_jobs:
8820 self.db.set_one(
8821 "prometheus_jobs",
8822 {"job_name": job["job_name"]},
8823 job,
8824 upsert=True,
8825 fail_on_empty=False,
8826 )
8827
8828 """
8829 step = "instantiated at VCA"
8830 self.logger.debug(logging_text + step)
8831
8832 self._write_configuration_status(
8833 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8834 )
8835
8836 except Exception as e: # TODO not use Exception but N2VC exception
8837 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8838 if not isinstance(
8839 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8840 ):
8841 self.logger.error(
8842 "Exception while {} : {}".format(step, e), exc_info=True
8843 )
8844 self._write_configuration_status(
8845 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8846 )
8847 raise LcmException("{} {}".format(step, e)) from e
8848
8849 async def _wait_heal_ro(
8850 self,
8851 nsr_id,
8852 timeout=600,
8853 ):
8854 start_time = time()
8855 while time() <= start_time + timeout:
8856 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8857 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8858 "operational-status"
8859 ]
8860 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8861 if operational_status_ro != "healing":
8862 break
8863 await asyncio.sleep(15)
8864 else: # timeout_ns_deploy
8865 raise NgRoException("Timeout waiting ns to deploy")
8866
8867 async def vertical_scale(self, nsr_id, nslcmop_id):
8868 """
8869 Vertical Scale the VDUs in a NS
8870
8871 :param: nsr_id: NS Instance ID
8872 :param: nslcmop_id: nslcmop ID of migrate
8873
8874 """
8875 # Try to lock HA task here
8876 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8877 if not task_is_locked_by_me:
8878 return
8879 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8880 self.logger.debug(logging_text + "Enter")
8881 # get all needed from database
8882 db_nslcmop = None
8883 db_nslcmop_update = {}
8884 nslcmop_operation_state = None
8885 old_db_update = {}
8886 q_filter = {}
8887 old_vdu_index = None
8888 old_flavor_id = None
8889 db_nsr_update = {}
8890 target = {}
8891 exc = None
8892 # in case of error, indicates what part of scale was failed to put nsr at error status
8893 start_deploy = time()
8894
8895 try:
8896 # wait for any previous tasks in process
8897 step = "Waiting for previous operations to terminate"
8898 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8899
8900 self._write_ns_status(
8901 nsr_id=nsr_id,
8902 ns_state=None,
8903 current_operation="VerticalScale",
8904 current_operation_id=nslcmop_id,
8905 )
8906 step = "Getting nslcmop from database"
8907 self.logger.debug(
8908 step + " after having waited for previous tasks to be completed"
8909 )
8910 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8911 operationParams = db_nslcmop.get("operationParams")
8912 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
8913 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8914 db_flavor = db_nsr.get("flavor")
8915 db_flavor_index = str(len(db_flavor))
8916 change_vnf_flavor_data = operationParams["changeVnfFlavorData"]
8917 flavor_dict = change_vnf_flavor_data["additionalParams"]
8918 count_index = flavor_dict["vduCountIndex"]
8919 vdu_id_ref = flavor_dict["vduid"]
8920 flavor_dict_update = {
8921 "id": db_flavor_index,
8922 "memory-mb": flavor_dict["virtualMemory"],
8923 "name": f"{vdu_id_ref}-{count_index}-flv",
8924 "storage-gb": flavor_dict["sizeOfStorage"],
8925 "vcpu-count": flavor_dict["numVirtualCpu"],
8926 }
8927 db_flavor.append(flavor_dict_update)
8928 db_update = {}
8929 db_update["flavor"] = db_flavor
8930 ns_q_filter = {
8931 "_id": nsr_id,
8932 }
8933 self.db.set_one(
8934 "nsrs",
8935 q_filter=ns_q_filter,
8936 update_dict=db_update,
8937 fail_on_empty=True,
8938 )
8939 db_vnfr = self.db.get_one(
8940 "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]}
8941 )
8942 for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
8943 if (
8944 vdur.get("count-index") == count_index
8945 and vdur.get("vdu-id-ref") == vdu_id_ref
8946 ):
8947 old_flavor_id = vdur.get("ns-flavor-id", 0)
8948 old_vdu_index = vdu_index
8949 filter_text = {
8950 "_id": change_vnf_flavor_data["vnfInstanceId"],
8951 "vdur.count-index": count_index,
8952 "vdur.vdu-id-ref": vdu_id_ref,
8953 }
8954 q_filter.update(filter_text)
8955 db_update = {}
8956 db_update[
8957 "vdur.{}.ns-flavor-id".format(vdu_index)
8958 ] = db_flavor_index
8959 self.db.set_one(
8960 "vnfrs",
8961 q_filter=q_filter,
8962 update_dict=db_update,
8963 fail_on_empty=True,
8964 )
8965 target = {}
8966 target.update(operationParams)
8967 desc = await self.RO.vertical_scale(nsr_id, target)
8968 self.logger.debug("RO return > {}".format(desc))
8969 action_id = desc["action_id"]
8970 await self._wait_ng_ro(
8971 nsr_id,
8972 action_id,
8973 nslcmop_id,
8974 start_deploy,
8975 self.timeout.verticalscale,
8976 operation="verticalscale",
8977 )
8978 except (
8979 NgRoException,
8980 ROclient.ROClientException,
8981 DbException,
8982 LcmException,
8983 ) as e:
8984 self.logger.error("Exit Exception {}".format(e))
8985 exc = e
8986 except asyncio.CancelledError:
8987 self.logger.error("Cancelled Exception while '{}'".format(step))
8988 exc = "Operation was cancelled"
8989 except Exception as e:
8990 exc = traceback.format_exc()
8991 self.logger.critical(
8992 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8993 )
8994 finally:
8995 self._write_ns_status(
8996 nsr_id=nsr_id,
8997 ns_state=None,
8998 current_operation="IDLE",
8999 current_operation_id=None,
9000 )
9001 if exc:
9002 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
9003 nslcmop_operation_state = "FAILED"
9004 old_db_update[
9005 "vdur.{}.ns-flavor-id".format(old_vdu_index)
9006 ] = old_flavor_id
9007 else:
9008 nslcmop_operation_state = "COMPLETED"
9009 db_nslcmop_update["detailed-status"] = "Done"
9010 db_nsr_update["detailed-status"] = "Done"
9011
9012 self._write_op_status(
9013 op_id=nslcmop_id,
9014 stage="",
9015 error_message="",
9016 operation_state=nslcmop_operation_state,
9017 other_update=db_nslcmop_update,
9018 )
9019 if old_vdu_index and old_db_update != {}:
9020 self.logger.critical(
9021 "Reverting Old Flavor -- : {}".format(old_db_update)
9022 )
9023 self.db.set_one(
9024 "vnfrs",
9025 q_filter=q_filter,
9026 update_dict=old_db_update,
9027 fail_on_empty=True,
9028 )
9029 if nslcmop_operation_state:
9030 try:
9031 msg = {
9032 "nsr_id": nsr_id,
9033 "nslcmop_id": nslcmop_id,
9034 "operationState": nslcmop_operation_state,
9035 }
9036 await self.msg.aiowrite("ns", "verticalscaled", msg)
9037 except Exception as e:
9038 self.logger.error(
9039 logging_text + "kafka_write notification Exception {}".format(e)
9040 )
9041 self.logger.debug(logging_text + "Exit")
9042 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")