ea57400dc6b1fc669c620f440211c9c71afd7a69
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import ipaddress
27 import json
28 from jinja2 import (
29 Environment,
30 TemplateError,
31 TemplateNotFound,
32 StrictUndefined,
33 UndefinedError,
34 select_autoescape,
35 )
36
37 from osm_lcm import ROclient
38 from osm_lcm.data_utils.lcm_config import LcmCfg
39 from osm_lcm.data_utils.nsr import (
40 get_deployed_kdu,
41 get_deployed_vca,
42 get_deployed_vca_list,
43 get_nsd,
44 )
45 from osm_lcm.data_utils.vca import (
46 DeployedComponent,
47 DeployedK8sResource,
48 DeployedVCA,
49 EELevel,
50 Relation,
51 EERelation,
52 safe_get_ee_relation,
53 )
54 from osm_lcm.ng_ro import NgRoClient, NgRoException
55 from osm_lcm.lcm_utils import (
56 LcmException,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm3 = K8sHelm3Connector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helm3path,
178 fs=self.fs,
179 log=self.logger,
180 db=self.db,
181 on_update_db=None,
182 )
183
184 self.k8sclusterjuju = K8sJujuConnector(
185 kubectl_command=self.vca_config.kubectlpath,
186 juju_command=self.vca_config.jujupath,
187 log=self.logger,
188 on_update_db=self._on_update_k8s_db,
189 fs=self.fs,
190 db=self.db,
191 )
192
193 self.k8scluster_map = {
194 "helm-chart-v3": self.k8sclusterhelm3,
195 "chart": self.k8sclusterhelm3,
196 "juju-bundle": self.k8sclusterjuju,
197 "juju": self.k8sclusterjuju,
198 }
199
200 self.vca_map = {
201 "lxc_proxy_charm": self.n2vc,
202 "native_charm": self.n2vc,
203 "k8s_proxy_charm": self.n2vc,
204 "helm": self.conn_helm_ee,
205 "helm-v3": self.conn_helm_ee,
206 }
207
208 # create RO client
209 self.RO = NgRoClient(**self.ro_config.to_dict())
210
211 self.op_status_map = {
212 "instantiation": self.RO.status,
213 "termination": self.RO.status,
214 "migrate": self.RO.status,
215 "healing": self.RO.recreate_status,
216 "verticalscale": self.RO.status,
217 "start_stop_rebuild": self.RO.status,
218 }
219
220 @staticmethod
221 def increment_ip_mac(ip_mac, vm_index=1):
222 if not isinstance(ip_mac, str):
223 return ip_mac
224 try:
225 next_ipv6 = None
226 next_ipv4 = None
227 dual_ip = ip_mac.split(";")
228 if len(dual_ip) == 2:
229 for ip in dual_ip:
230 if ipaddress.ip_address(ip).version == 6:
231 ipv6 = ipaddress.IPv6Address(ip)
232 next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1))
233 elif ipaddress.ip_address(ip).version == 4:
234 ipv4 = ipaddress.IPv4Address(ip)
235 next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1))
236 return [next_ipv4, next_ipv6]
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
255 # remove last dot from path (if exists)
256 if path.endswith("."):
257 path = path[:-1]
258
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
261 try:
262 nsr_id = filter.get("_id")
263
264 # read ns record from database
265 nsr = self.db.get_one(table="nsrs", q_filter=filter)
266 current_ns_status = nsr.get("nsState")
267
268 # First, we need to verify if the current vcaStatus is null, because if that is the case,
269 # MongoDB will not be able to create the fields used within the update key in the database
270 if not nsr.get("vcaStatus"):
271 # Write an empty dictionary to the vcaStatus field, it its value is null
272 self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
273
274 # Get vca status for NS
275 status_dict = await self.n2vc.get_status(
276 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
277 )
278
279 # Update the vcaStatus
280 db_key = f"vcaStatus.{nsr_id}.VNF"
281 db_dict = dict()
282
283 db_dict[db_key] = status_dict[nsr_id]
284 await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id)
285
286 # update configurationStatus for this VCA
287 try:
288 vca_index = int(path[path.rfind(".") + 1 :])
289
290 vca_list = deep_get(
291 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
292 )
293 vca_status = vca_list[vca_index].get("status")
294
295 configuration_status_list = nsr.get("configurationStatus")
296 config_status = configuration_status_list[vca_index].get("status")
297
298 if config_status == "BROKEN" and vca_status != "failed":
299 db_dict["configurationStatus"][vca_index] = "READY"
300 elif config_status != "BROKEN" and vca_status == "failed":
301 db_dict["configurationStatus"][vca_index] = "BROKEN"
302 except Exception as e:
303 # not update configurationStatus
304 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
305
306 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
307 # if nsState = 'DEGRADED' check if all is OK
308 is_degraded = False
309 if current_ns_status in ("READY", "DEGRADED"):
310 error_description = ""
311 # check machines
312 if status_dict.get("machines"):
313 for machine_id in status_dict.get("machines"):
314 machine = status_dict.get("machines").get(machine_id)
315 # check machine agent-status
316 if machine.get("agent-status"):
317 s = machine.get("agent-status").get("status")
318 if s != "started":
319 is_degraded = True
320 error_description += (
321 "machine {} agent-status={} ; ".format(
322 machine_id, s
323 )
324 )
325 # check machine instance status
326 if machine.get("instance-status"):
327 s = machine.get("instance-status").get("status")
328 if s != "running":
329 is_degraded = True
330 error_description += (
331 "machine {} instance-status={} ; ".format(
332 machine_id, s
333 )
334 )
335 # check applications
336 if status_dict.get("applications"):
337 for app_id in status_dict.get("applications"):
338 app = status_dict.get("applications").get(app_id)
339 # check application status
340 if app.get("status"):
341 s = app.get("status").get("status")
342 if s != "active":
343 is_degraded = True
344 error_description += (
345 "application {} status={} ; ".format(app_id, s)
346 )
347
348 if error_description:
349 db_dict["errorDescription"] = error_description
350 if current_ns_status == "READY" and is_degraded:
351 db_dict["nsState"] = "DEGRADED"
352 if current_ns_status == "DEGRADED" and not is_degraded:
353 db_dict["nsState"] = "READY"
354
355 # write to database
356 self.update_db_2("nsrs", nsr_id, db_dict)
357
358 except (asyncio.CancelledError, asyncio.TimeoutError):
359 raise
360 except Exception as e:
361 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
362
363 async def _on_update_k8s_db(
364 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
365 ):
366 """
367 Updating vca status in NSR record
368 :param cluster_uuid: UUID of a k8s cluster
369 :param kdu_instance: The unique name of the KDU instance
370 :param filter: To get nsr_id
371 :cluster_type: The cluster type (juju, k8s)
372 :return: none
373 """
374
375 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
376 # .format(cluster_uuid, kdu_instance, filter))
377
378 nsr_id = filter.get("_id")
379 try:
380 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
381 cluster_uuid=cluster_uuid,
382 kdu_instance=kdu_instance,
383 yaml_format=False,
384 complete_status=True,
385 vca_id=vca_id,
386 )
387
388 # First, we need to verify if the current vcaStatus is null, because if that is the case,
389 # MongoDB will not be able to create the fields used within the update key in the database
390 nsr = self.db.get_one(table="nsrs", q_filter=filter)
391 if not nsr.get("vcaStatus"):
392 # Write an empty dictionary to the vcaStatus field, it its value is null
393 self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
394
395 # Update the vcaStatus
396 db_key = f"vcaStatus.{nsr_id}.KNF"
397 db_dict = dict()
398
399 db_dict[db_key] = vca_status
400
401 if cluster_type in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self.k8sclusterjuju.update_vca_status(
405 db_dict[db_key],
406 kdu_instance,
407 vca_id=vca_id,
408 )
409
410 self.logger.debug(
411 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
412 )
413
414 # write to database
415 self.update_db_2("nsrs", nsr_id, db_dict)
416 except (asyncio.CancelledError, asyncio.TimeoutError):
417 raise
418 except Exception as e:
419 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
420
421 @staticmethod
422 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
423 try:
424 env = Environment(
425 undefined=StrictUndefined,
426 autoescape=select_autoescape(default_for_string=True, default=True),
427 )
428 template = env.from_string(cloud_init_text)
429 return template.render(additional_params or {})
430 except UndefinedError as e:
431 raise LcmException(
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
435 )
436 except (TemplateError, TemplateNotFound) as e:
437 raise LcmException(
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
439 vnfd_id, vdu_id, e
440 )
441 )
442
443 def _get_vdu_cloud_init_content(self, vdu, vnfd):
444 cloud_init_content = cloud_init_file = None
445 try:
446 if vdu.get("cloud-init-file"):
447 base_folder = vnfd["_admin"]["storage"]
448 if base_folder["pkg-dir"]:
449 cloud_init_file = "{}/{}/cloud_init/{}".format(
450 base_folder["folder"],
451 base_folder["pkg-dir"],
452 vdu["cloud-init-file"],
453 )
454 else:
455 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
456 base_folder["folder"],
457 vdu["cloud-init-file"],
458 )
459 with self.fs.file_open(cloud_init_file, "r") as ci_file:
460 cloud_init_content = ci_file.read()
461 elif vdu.get("cloud-init"):
462 cloud_init_content = vdu["cloud-init"]
463
464 return cloud_init_content
465 except FsException as e:
466 raise LcmException(
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd["id"], vdu["id"], cloud_init_file, e
469 )
470 )
471
472 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
473 vdur = next(
474 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
475 )
476 additional_params = vdur.get("additionalParams")
477 return parse_yaml_strings(additional_params)
478
479 @staticmethod
480 def ip_profile_2_RO(ip_profile):
481 RO_ip_profile = deepcopy(ip_profile)
482 if "dns-server" in RO_ip_profile:
483 if isinstance(RO_ip_profile["dns-server"], list):
484 RO_ip_profile["dns-address"] = []
485 for ds in RO_ip_profile.pop("dns-server"):
486 RO_ip_profile["dns-address"].append(ds["address"])
487 else:
488 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
489 if RO_ip_profile.get("ip-version") == "ipv4":
490 RO_ip_profile["ip-version"] = "IPv4"
491 if RO_ip_profile.get("ip-version") == "ipv6":
492 RO_ip_profile["ip-version"] = "IPv6"
493 if "dhcp-params" in RO_ip_profile:
494 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
495 return RO_ip_profile
496
497 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
498 db_vdu_push_list = []
499 template_vdur = []
500 db_update = {"_admin.modified": time()}
501 if vdu_create:
502 for vdu_id, vdu_count in vdu_create.items():
503 vdur = next(
504 (
505 vdur
506 for vdur in reversed(db_vnfr["vdur"])
507 if vdur["vdu-id-ref"] == vdu_id
508 ),
509 None,
510 )
511 if not vdur:
512 # Read the template saved in the db:
513 self.logger.debug(
514 "No vdur in the database. Using the vdur-template to scale"
515 )
516 vdur_template = db_vnfr.get("vdur-template")
517 if not vdur_template:
518 raise LcmException(
519 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
520 vdu_id
521 )
522 )
523 vdur = vdur_template[0]
524 # Delete a template from the database after using it
525 self.db.set_one(
526 "vnfrs",
527 {"_id": db_vnfr["_id"]},
528 None,
529 pull={"vdur-template": {"_id": vdur["_id"]}},
530 )
531 for count in range(vdu_count):
532 vdur_copy = deepcopy(vdur)
533 vdur_copy["status"] = "BUILD"
534 vdur_copy["status-detailed"] = None
535 vdur_copy["ip-address"] = None
536 vdur_copy["_id"] = str(uuid4())
537 vdur_copy["count-index"] += count + 1
538 vdur_copy["id"] = "{}-{}".format(
539 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
540 )
541 vdur_copy.pop("vim_info", None)
542 for iface in vdur_copy["interfaces"]:
543 if iface.get("fixed-ip"):
544 iface["ip-address"] = self.increment_ip_mac(
545 iface["ip-address"], count + 1
546 )
547 else:
548 iface.pop("ip-address", None)
549 if iface.get("fixed-mac"):
550 iface["mac-address"] = self.increment_ip_mac(
551 iface["mac-address"], count + 1
552 )
553 else:
554 iface.pop("mac-address", None)
555 if db_vnfr["vdur"]:
556 iface.pop(
557 "mgmt_vnf", None
558 ) # only first vdu can be managment of vnf
559 db_vdu_push_list.append(vdur_copy)
560 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
561 if vdu_delete:
562 if len(db_vnfr["vdur"]) == 1:
563 # The scale will move to 0 instances
564 self.logger.debug(
565 "Scaling to 0 !, creating the template with the last vdur"
566 )
567 template_vdur = [db_vnfr["vdur"][0]]
568 for vdu_id, vdu_count in vdu_delete.items():
569 if mark_delete:
570 indexes_to_delete = [
571 iv[0]
572 for iv in enumerate(db_vnfr["vdur"])
573 if iv[1]["vdu-id-ref"] == vdu_id
574 ]
575 db_update.update(
576 {
577 "vdur.{}.status".format(i): "DELETING"
578 for i in indexes_to_delete[-vdu_count:]
579 }
580 )
581 else:
582 # it must be deleted one by one because common.db does not allow otherwise
583 vdus_to_delete = [
584 v
585 for v in reversed(db_vnfr["vdur"])
586 if v["vdu-id-ref"] == vdu_id
587 ]
588 for vdu in vdus_to_delete[:vdu_count]:
589 self.db.set_one(
590 "vnfrs",
591 {"_id": db_vnfr["_id"]},
592 None,
593 pull={"vdur": {"_id": vdu["_id"]}},
594 )
595 db_push = {}
596 if db_vdu_push_list:
597 db_push["vdur"] = db_vdu_push_list
598 if template_vdur:
599 db_push["vdur-template"] = template_vdur
600 if not db_push:
601 db_push = None
602 db_vnfr["vdur-template"] = template_vdur
603 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
604 # modify passed dictionary db_vnfr
605 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
606 db_vnfr["vdur"] = db_vnfr_["vdur"]
607
608 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
609 """
610 Updates database nsr with the RO info for the created vld
611 :param ns_update_nsr: dictionary to be filled with the updated info
612 :param db_nsr: content of db_nsr. This is also modified
613 :param nsr_desc_RO: nsr descriptor from RO
614 :return: Nothing, LcmException is raised on errors
615 """
616
617 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
618 for net_RO in get_iterable(nsr_desc_RO, "nets"):
619 if vld["id"] != net_RO.get("ns_net_osm_id"):
620 continue
621 vld["vim-id"] = net_RO.get("vim_net_id")
622 vld["name"] = net_RO.get("vim_name")
623 vld["status"] = net_RO.get("status")
624 vld["status-detailed"] = net_RO.get("error_msg")
625 ns_update_nsr["vld.{}".format(vld_index)] = vld
626 break
627 else:
628 raise LcmException(
629 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
630 )
631
632 def set_vnfr_at_error(self, db_vnfrs, error_text):
633 try:
634 for db_vnfr in db_vnfrs.values():
635 vnfr_update = {"status": "ERROR"}
636 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
637 if "status" not in vdur:
638 vdur["status"] = "ERROR"
639 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
640 if error_text:
641 vdur["status-detailed"] = str(error_text)
642 vnfr_update[
643 "vdur.{}.status-detailed".format(vdu_index)
644 ] = "ERROR"
645 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
646 except DbException as e:
647 self.logger.error("Cannot update vnf. {}".format(e))
648
649 def _get_ns_config_info(self, nsr_id):
650 """
651 Generates a mapping between vnf,vdu elements and the N2VC id
652 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
653 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
654 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
655 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
656 """
657 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
658 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
659 mapping = {}
660 ns_config_info = {"osm-config-mapping": mapping}
661 for vca in vca_deployed_list:
662 if not vca["member-vnf-index"]:
663 continue
664 if not vca["vdu_id"]:
665 mapping[vca["member-vnf-index"]] = vca["application"]
666 else:
667 mapping[
668 "{}.{}.{}".format(
669 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
670 )
671 ] = vca["application"]
672 return ns_config_info
673
674 async def _instantiate_ng_ro(
675 self,
676 logging_text,
677 nsr_id,
678 nsd,
679 db_nsr,
680 db_nslcmop,
681 db_vnfrs,
682 db_vnfds,
683 n2vc_key_list,
684 stage,
685 start_deploy,
686 timeout_ns_deploy,
687 ):
688 db_vims = {}
689
690 def get_vim_account(vim_account_id):
691 nonlocal db_vims
692 if vim_account_id in db_vims:
693 return db_vims[vim_account_id]
694 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
695 db_vims[vim_account_id] = db_vim
696 return db_vim
697
698 # modify target_vld info with instantiation parameters
699 def parse_vld_instantiation_params(
700 target_vim, target_vld, vld_params, target_sdn
701 ):
702 if vld_params.get("ip-profile"):
703 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
704 vld_params["ip-profile"]
705 )
706 if vld_params.get("provider-network"):
707 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
708 "provider-network"
709 ]
710 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
711 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
712 "provider-network"
713 ]["sdn-ports"]
714
715 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
716 # if wim_account_id is specified in vld_params, validate if it is feasible.
717 wim_account_id, db_wim = select_feasible_wim_account(
718 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
719 )
720
721 if wim_account_id:
722 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
723 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
724 # update vld_params with correct WIM account Id
725 vld_params["wimAccountId"] = wim_account_id
726
727 target_wim = "wim:{}".format(wim_account_id)
728 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
729 sdn_ports = get_sdn_ports(vld_params, db_wim)
730 if len(sdn_ports) > 0:
731 target_vld["vim_info"][target_wim] = target_wim_attrs
732 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
733
734 self.logger.debug(
735 "Target VLD with WIM data: {:s}".format(str(target_vld))
736 )
737
738 for param in ("vim-network-name", "vim-network-id"):
739 if vld_params.get(param):
740 if isinstance(vld_params[param], dict):
741 for vim, vim_net in vld_params[param].items():
742 other_target_vim = "vim:" + vim
743 populate_dict(
744 target_vld["vim_info"],
745 (other_target_vim, param.replace("-", "_")),
746 vim_net,
747 )
748 else: # isinstance str
749 target_vld["vim_info"][target_vim][
750 param.replace("-", "_")
751 ] = vld_params[param]
752 if vld_params.get("common_id"):
753 target_vld["common_id"] = vld_params.get("common_id")
754
755 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
756 def update_ns_vld_target(target, ns_params):
757 for vnf_params in ns_params.get("vnf", ()):
758 if vnf_params.get("vimAccountId"):
759 target_vnf = next(
760 (
761 vnfr
762 for vnfr in db_vnfrs.values()
763 if vnf_params["member-vnf-index"]
764 == vnfr["member-vnf-index-ref"]
765 ),
766 None,
767 )
768 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
769 if not vdur:
770 continue
771 for a_index, a_vld in enumerate(target["ns"]["vld"]):
772 target_vld = find_in_list(
773 get_iterable(vdur, "interfaces"),
774 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
775 )
776
777 vld_params = find_in_list(
778 get_iterable(ns_params, "vld"),
779 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
780 )
781 if target_vld:
782 if vnf_params.get("vimAccountId") not in a_vld.get(
783 "vim_info", {}
784 ):
785 target_vim_network_list = [
786 v for _, v in a_vld.get("vim_info").items()
787 ]
788 target_vim_network_name = next(
789 (
790 item.get("vim_network_name", "")
791 for item in target_vim_network_list
792 ),
793 "",
794 )
795
796 target["ns"]["vld"][a_index].get("vim_info").update(
797 {
798 "vim:{}".format(vnf_params["vimAccountId"]): {
799 "vim_network_name": target_vim_network_name,
800 }
801 }
802 )
803
804 if vld_params:
805 for param in ("vim-network-name", "vim-network-id"):
806 if vld_params.get(param) and isinstance(
807 vld_params[param], dict
808 ):
809 for vim, vim_net in vld_params[
810 param
811 ].items():
812 other_target_vim = "vim:" + vim
813 populate_dict(
814 target["ns"]["vld"][a_index].get(
815 "vim_info"
816 ),
817 (
818 other_target_vim,
819 param.replace("-", "_"),
820 ),
821 vim_net,
822 )
823
824 nslcmop_id = db_nslcmop["_id"]
825 target = {
826 "name": db_nsr["name"],
827 "ns": {"vld": []},
828 "vnf": [],
829 "image": deepcopy(db_nsr["image"]),
830 "flavor": deepcopy(db_nsr["flavor"]),
831 "action_id": nslcmop_id,
832 "cloud_init_content": {},
833 }
834 for image in target["image"]:
835 image["vim_info"] = {}
836 for flavor in target["flavor"]:
837 flavor["vim_info"] = {}
838 if db_nsr.get("shared-volumes"):
839 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
840 for shared_volumes in target["shared-volumes"]:
841 shared_volumes["vim_info"] = {}
842 if db_nsr.get("affinity-or-anti-affinity-group"):
843 target["affinity-or-anti-affinity-group"] = deepcopy(
844 db_nsr["affinity-or-anti-affinity-group"]
845 )
846 for affinity_or_anti_affinity_group in target[
847 "affinity-or-anti-affinity-group"
848 ]:
849 affinity_or_anti_affinity_group["vim_info"] = {}
850
851 if db_nslcmop.get("lcmOperationType") != "instantiate":
852 # get parameters of instantiation:
853 db_nslcmop_instantiate = self.db.get_list(
854 "nslcmops",
855 {
856 "nsInstanceId": db_nslcmop["nsInstanceId"],
857 "lcmOperationType": "instantiate",
858 },
859 )[-1]
860 ns_params = db_nslcmop_instantiate.get("operationParams")
861 else:
862 ns_params = db_nslcmop.get("operationParams")
863 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
864 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
865
866 cp2target = {}
867 for vld_index, vld in enumerate(db_nsr.get("vld")):
868 target_vim = "vim:{}".format(ns_params["vimAccountId"])
869 target_vld = {
870 "id": vld["id"],
871 "name": vld["name"],
872 "mgmt-network": vld.get("mgmt-network", False),
873 "type": vld.get("type"),
874 "vim_info": {
875 target_vim: {
876 "vim_network_name": vld.get("vim-network-name"),
877 "vim_account_id": ns_params["vimAccountId"],
878 }
879 },
880 }
881 # check if this network needs SDN assist
882 if vld.get("pci-interfaces"):
883 db_vim = get_vim_account(ns_params["vimAccountId"])
884 if vim_config := db_vim.get("config"):
885 if sdnc_id := vim_config.get("sdn-controller"):
886 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
887 target_sdn = "sdn:{}".format(sdnc_id)
888 target_vld["vim_info"][target_sdn] = {
889 "sdn": True,
890 "target_vim": target_vim,
891 "vlds": [sdn_vld],
892 "type": vld.get("type"),
893 }
894
895 nsd_vnf_profiles = get_vnf_profiles(nsd)
896 for nsd_vnf_profile in nsd_vnf_profiles:
897 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
898 if cp["virtual-link-profile-id"] == vld["id"]:
899 cp2target[
900 "member_vnf:{}.{}".format(
901 cp["constituent-cpd-id"][0][
902 "constituent-base-element-id"
903 ],
904 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
905 )
906 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
907
908 # check at nsd descriptor, if there is an ip-profile
909 vld_params = {}
910 nsd_vlp = find_in_list(
911 get_virtual_link_profiles(nsd),
912 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
913 == vld["id"],
914 )
915 if (
916 nsd_vlp
917 and nsd_vlp.get("virtual-link-protocol-data")
918 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
919 ):
920 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
921 "l3-protocol-data"
922 ]
923
924 # update vld_params with instantiation params
925 vld_instantiation_params = find_in_list(
926 get_iterable(ns_params, "vld"),
927 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
928 )
929 if vld_instantiation_params:
930 vld_params.update(vld_instantiation_params)
931 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
932 target["ns"]["vld"].append(target_vld)
933 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
934 update_ns_vld_target(target, ns_params)
935
936 for vnfr in db_vnfrs.values():
937 vnfd = find_in_list(
938 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
939 )
940 vnf_params = find_in_list(
941 get_iterable(ns_params, "vnf"),
942 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
943 )
944 target_vnf = deepcopy(vnfr)
945 target_vim = "vim:{}".format(vnfr["vim-account-id"])
946 for vld in target_vnf.get("vld", ()):
947 # check if connected to a ns.vld, to fill target'
948 vnf_cp = find_in_list(
949 vnfd.get("int-virtual-link-desc", ()),
950 lambda cpd: cpd.get("id") == vld["id"],
951 )
952 if vnf_cp:
953 ns_cp = "member_vnf:{}.{}".format(
954 vnfr["member-vnf-index-ref"], vnf_cp["id"]
955 )
956 if cp2target.get(ns_cp):
957 vld["target"] = cp2target[ns_cp]
958
959 vld["vim_info"] = {
960 target_vim: {"vim_network_name": vld.get("vim-network-name")}
961 }
962 # check if this network needs SDN assist
963 target_sdn = None
964 if vld.get("pci-interfaces"):
965 db_vim = get_vim_account(vnfr["vim-account-id"])
966 sdnc_id = db_vim["config"].get("sdn-controller")
967 if sdnc_id:
968 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
969 target_sdn = "sdn:{}".format(sdnc_id)
970 vld["vim_info"][target_sdn] = {
971 "sdn": True,
972 "target_vim": target_vim,
973 "vlds": [sdn_vld],
974 "type": vld.get("type"),
975 }
976
977 # check at vnfd descriptor, if there is an ip-profile
978 vld_params = {}
979 vnfd_vlp = find_in_list(
980 get_virtual_link_profiles(vnfd),
981 lambda a_link_profile: a_link_profile["id"] == vld["id"],
982 )
983 if (
984 vnfd_vlp
985 and vnfd_vlp.get("virtual-link-protocol-data")
986 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
987 ):
988 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
989 "l3-protocol-data"
990 ]
991 # update vld_params with instantiation params
992 if vnf_params:
993 vld_instantiation_params = find_in_list(
994 get_iterable(vnf_params, "internal-vld"),
995 lambda i_vld: i_vld["name"] == vld["id"],
996 )
997 if vld_instantiation_params:
998 vld_params.update(vld_instantiation_params)
999 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1000
1001 vdur_list = []
1002 for vdur in target_vnf.get("vdur", ()):
1003 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1004 continue # This vdu must not be created
1005 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1006
1007 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1008
1009 if ssh_keys_all:
1010 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1011 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1012 if (
1013 vdu_configuration
1014 and vdu_configuration.get("config-access")
1015 and vdu_configuration.get("config-access").get("ssh-access")
1016 ):
1017 vdur["ssh-keys"] = ssh_keys_all
1018 vdur["ssh-access-required"] = vdu_configuration[
1019 "config-access"
1020 ]["ssh-access"]["required"]
1021 elif (
1022 vnf_configuration
1023 and vnf_configuration.get("config-access")
1024 and vnf_configuration.get("config-access").get("ssh-access")
1025 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1026 ):
1027 vdur["ssh-keys"] = ssh_keys_all
1028 vdur["ssh-access-required"] = vnf_configuration[
1029 "config-access"
1030 ]["ssh-access"]["required"]
1031 elif ssh_keys_instantiation and find_in_list(
1032 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1033 ):
1034 vdur["ssh-keys"] = ssh_keys_instantiation
1035
1036 self.logger.debug("NS > vdur > {}".format(vdur))
1037
1038 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1039 # cloud-init
1040 if vdud.get("cloud-init-file"):
1041 vdur["cloud-init"] = "{}:file:{}".format(
1042 vnfd["_id"], vdud.get("cloud-init-file")
1043 )
1044 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1045 if vdur["cloud-init"] not in target["cloud_init_content"]:
1046 base_folder = vnfd["_admin"]["storage"]
1047 if base_folder["pkg-dir"]:
1048 cloud_init_file = "{}/{}/cloud_init/{}".format(
1049 base_folder["folder"],
1050 base_folder["pkg-dir"],
1051 vdud.get("cloud-init-file"),
1052 )
1053 else:
1054 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1055 base_folder["folder"],
1056 vdud.get("cloud-init-file"),
1057 )
1058 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1059 target["cloud_init_content"][
1060 vdur["cloud-init"]
1061 ] = ci_file.read()
1062 elif vdud.get("cloud-init"):
1063 vdur["cloud-init"] = "{}:vdu:{}".format(
1064 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1065 )
1066 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1067 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1068 "cloud-init"
1069 ]
1070 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1071 deploy_params_vdu = self._format_additional_params(
1072 vdur.get("additionalParams") or {}
1073 )
1074 deploy_params_vdu["OSM"] = get_osm_params(
1075 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1076 )
1077 vdur["additionalParams"] = deploy_params_vdu
1078
1079 # flavor
1080 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1081 if target_vim not in ns_flavor["vim_info"]:
1082 ns_flavor["vim_info"][target_vim] = {}
1083
1084 # deal with images
1085 # in case alternative images are provided we must check if they should be applied
1086 # for the vim_type, modify the vim_type taking into account
1087 ns_image_id = int(vdur["ns-image-id"])
1088 if vdur.get("alt-image-ids"):
1089 db_vim = get_vim_account(vnfr["vim-account-id"])
1090 vim_type = db_vim["vim_type"]
1091 for alt_image_id in vdur.get("alt-image-ids"):
1092 ns_alt_image = target["image"][int(alt_image_id)]
1093 if vim_type == ns_alt_image.get("vim-type"):
1094 # must use alternative image
1095 self.logger.debug(
1096 "use alternative image id: {}".format(alt_image_id)
1097 )
1098 ns_image_id = alt_image_id
1099 vdur["ns-image-id"] = ns_image_id
1100 break
1101 ns_image = target["image"][int(ns_image_id)]
1102 if target_vim not in ns_image["vim_info"]:
1103 ns_image["vim_info"][target_vim] = {}
1104
1105 # Affinity groups
1106 if vdur.get("affinity-or-anti-affinity-group-id"):
1107 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1108 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1109 if target_vim not in ns_ags["vim_info"]:
1110 ns_ags["vim_info"][target_vim] = {}
1111
1112 # shared-volumes
1113 if vdur.get("shared-volumes-id"):
1114 for sv_id in vdur["shared-volumes-id"]:
1115 ns_sv = find_in_list(
1116 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1117 )
1118 if ns_sv:
1119 ns_sv["vim_info"][target_vim] = {}
1120
1121 vdur["vim_info"] = {target_vim: {}}
1122 # instantiation parameters
1123 if vnf_params:
1124 vdu_instantiation_params = find_in_list(
1125 get_iterable(vnf_params, "vdu"),
1126 lambda i_vdu: i_vdu["id"] == vdud["id"],
1127 )
1128 if vdu_instantiation_params:
1129 # Parse the vdu_volumes from the instantiation params
1130 vdu_volumes = get_volumes_from_instantiation_params(
1131 vdu_instantiation_params, vdud
1132 )
1133 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1134 vdur["additionalParams"]["OSM"][
1135 "vim_flavor_id"
1136 ] = vdu_instantiation_params.get("vim-flavor-id")
1137 vdur_list.append(vdur)
1138 target_vnf["vdur"] = vdur_list
1139 target["vnf"].append(target_vnf)
1140
1141 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1142 desc = await self.RO.deploy(nsr_id, target)
1143 self.logger.debug("RO return > {}".format(desc))
1144 action_id = desc["action_id"]
1145 await self._wait_ng_ro(
1146 nsr_id,
1147 action_id,
1148 nslcmop_id,
1149 start_deploy,
1150 timeout_ns_deploy,
1151 stage,
1152 operation="instantiation",
1153 )
1154
1155 # Updating NSR
1156 db_nsr_update = {
1157 "_admin.deployed.RO.operational-status": "running",
1158 "detailed-status": " ".join(stage),
1159 }
1160 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1161 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1162 self._write_op_status(nslcmop_id, stage)
1163 self.logger.debug(
1164 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1165 )
1166 return
1167
1168 async def _wait_ng_ro(
1169 self,
1170 nsr_id,
1171 action_id,
1172 nslcmop_id=None,
1173 start_time=None,
1174 timeout=600,
1175 stage=None,
1176 operation=None,
1177 ):
1178 detailed_status_old = None
1179 db_nsr_update = {}
1180 start_time = start_time or time()
1181 while time() <= start_time + timeout:
1182 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1183 self.logger.debug("Wait NG RO > {}".format(desc_status))
1184 if desc_status["status"] == "FAILED":
1185 raise NgRoException(desc_status["details"])
1186 elif desc_status["status"] == "BUILD":
1187 if stage:
1188 stage[2] = "VIM: ({})".format(desc_status["details"])
1189 elif desc_status["status"] == "DONE":
1190 if stage:
1191 stage[2] = "Deployed at VIM"
1192 break
1193 else:
1194 assert False, "ROclient.check_ns_status returns unknown {}".format(
1195 desc_status["status"]
1196 )
1197 if stage and nslcmop_id and stage[2] != detailed_status_old:
1198 detailed_status_old = stage[2]
1199 db_nsr_update["detailed-status"] = " ".join(stage)
1200 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1201 self._write_op_status(nslcmop_id, stage)
1202 await asyncio.sleep(15)
1203 else: # timeout_ns_deploy
1204 raise NgRoException("Timeout waiting ns to deploy")
1205
1206 async def _terminate_ng_ro(
1207 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1208 ):
1209 db_nsr_update = {}
1210 failed_detail = []
1211 action_id = None
1212 start_deploy = time()
1213 try:
1214 target = {
1215 "ns": {"vld": []},
1216 "vnf": [],
1217 "image": [],
1218 "flavor": [],
1219 "action_id": nslcmop_id,
1220 }
1221 desc = await self.RO.deploy(nsr_id, target)
1222 action_id = desc["action_id"]
1223 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1224 self.logger.debug(
1225 logging_text
1226 + "ns terminate action at RO. action_id={}".format(action_id)
1227 )
1228
1229 # wait until done
1230 delete_timeout = 20 * 60 # 20 minutes
1231 await self._wait_ng_ro(
1232 nsr_id,
1233 action_id,
1234 nslcmop_id,
1235 start_deploy,
1236 delete_timeout,
1237 stage,
1238 operation="termination",
1239 )
1240 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1241 # delete all nsr
1242 await self.RO.delete(nsr_id)
1243 except NgRoException as e:
1244 if e.http_code == 404: # not found
1245 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1246 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1247 self.logger.debug(
1248 logging_text + "RO_action_id={} already deleted".format(action_id)
1249 )
1250 elif e.http_code == 409: # conflict
1251 failed_detail.append("delete conflict: {}".format(e))
1252 self.logger.debug(
1253 logging_text
1254 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1255 )
1256 else:
1257 failed_detail.append("delete error: {}".format(e))
1258 self.logger.error(
1259 logging_text
1260 + "RO_action_id={} delete error: {}".format(action_id, e)
1261 )
1262 except Exception as e:
1263 failed_detail.append("delete error: {}".format(e))
1264 self.logger.error(
1265 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1266 )
1267
1268 if failed_detail:
1269 stage[2] = "Error deleting from VIM"
1270 else:
1271 stage[2] = "Deleted from VIM"
1272 db_nsr_update["detailed-status"] = " ".join(stage)
1273 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1274 self._write_op_status(nslcmop_id, stage)
1275
1276 if failed_detail:
1277 raise LcmException("; ".join(failed_detail))
1278 return
1279
1280 async def instantiate_RO(
1281 self,
1282 logging_text,
1283 nsr_id,
1284 nsd,
1285 db_nsr,
1286 db_nslcmop,
1287 db_vnfrs,
1288 db_vnfds,
1289 n2vc_key_list,
1290 stage,
1291 ):
1292 """
1293 Instantiate at RO
1294 :param logging_text: preffix text to use at logging
1295 :param nsr_id: nsr identity
1296 :param nsd: database content of ns descriptor
1297 :param db_nsr: database content of ns record
1298 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1299 :param db_vnfrs:
1300 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1301 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1302 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1303 :return: None or exception
1304 """
1305 try:
1306 start_deploy = time()
1307 ns_params = db_nslcmop.get("operationParams")
1308 if ns_params and ns_params.get("timeout_ns_deploy"):
1309 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1310 else:
1311 timeout_ns_deploy = self.timeout.ns_deploy
1312
1313 # Check for and optionally request placement optimization. Database will be updated if placement activated
1314 stage[2] = "Waiting for Placement."
1315 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1316 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1317 for vnfr in db_vnfrs.values():
1318 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1319 break
1320 else:
1321 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1322
1323 return await self._instantiate_ng_ro(
1324 logging_text,
1325 nsr_id,
1326 nsd,
1327 db_nsr,
1328 db_nslcmop,
1329 db_vnfrs,
1330 db_vnfds,
1331 n2vc_key_list,
1332 stage,
1333 start_deploy,
1334 timeout_ns_deploy,
1335 )
1336 except Exception as e:
1337 stage[2] = "ERROR deploying at VIM"
1338 self.set_vnfr_at_error(db_vnfrs, str(e))
1339 self.logger.error(
1340 "Error deploying at VIM {}".format(e),
1341 exc_info=not isinstance(
1342 e,
1343 (
1344 ROclient.ROClientException,
1345 LcmException,
1346 DbException,
1347 NgRoException,
1348 ),
1349 ),
1350 )
1351 raise
1352
1353 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1354 """
1355 Wait for kdu to be up, get ip address
1356 :param logging_text: prefix use for logging
1357 :param nsr_id:
1358 :param vnfr_id:
1359 :param kdu_name:
1360 :return: IP address, K8s services
1361 """
1362
1363 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1364 nb_tries = 0
1365
1366 while nb_tries < 360:
1367 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1368 kdur = next(
1369 (
1370 x
1371 for x in get_iterable(db_vnfr, "kdur")
1372 if x.get("kdu-name") == kdu_name
1373 ),
1374 None,
1375 )
1376 if not kdur:
1377 raise LcmException(
1378 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1379 )
1380 if kdur.get("status"):
1381 if kdur["status"] in ("READY", "ENABLED"):
1382 return kdur.get("ip-address"), kdur.get("services")
1383 else:
1384 raise LcmException(
1385 "target KDU={} is in error state".format(kdu_name)
1386 )
1387
1388 await asyncio.sleep(10)
1389 nb_tries += 1
1390 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1391
1392 async def wait_vm_up_insert_key_ro(
1393 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1394 ):
1395 """
1396 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1397 :param logging_text: prefix use for logging
1398 :param nsr_id:
1399 :param vnfr_id:
1400 :param vdu_id:
1401 :param vdu_index:
1402 :param pub_key: public ssh key to inject, None to skip
1403 :param user: user to apply the public ssh key
1404 :return: IP address
1405 """
1406
1407 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1408 ip_address = None
1409 target_vdu_id = None
1410 ro_retries = 0
1411
1412 while True:
1413 ro_retries += 1
1414 if ro_retries >= 360: # 1 hour
1415 raise LcmException(
1416 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1417 )
1418
1419 await asyncio.sleep(10)
1420
1421 # get ip address
1422 if not target_vdu_id:
1423 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1424
1425 if not vdu_id: # for the VNF case
1426 if db_vnfr.get("status") == "ERROR":
1427 raise LcmException(
1428 "Cannot inject ssh-key because target VNF is in error state"
1429 )
1430 ip_address = db_vnfr.get("ip-address")
1431 if not ip_address:
1432 continue
1433 vdur = next(
1434 (
1435 x
1436 for x in get_iterable(db_vnfr, "vdur")
1437 if x.get("ip-address") == ip_address
1438 ),
1439 None,
1440 )
1441 else: # VDU case
1442 vdur = next(
1443 (
1444 x
1445 for x in get_iterable(db_vnfr, "vdur")
1446 if x.get("vdu-id-ref") == vdu_id
1447 and x.get("count-index") == vdu_index
1448 ),
1449 None,
1450 )
1451
1452 if (
1453 not vdur and len(db_vnfr.get("vdur", ())) == 1
1454 ): # If only one, this should be the target vdu
1455 vdur = db_vnfr["vdur"][0]
1456 if not vdur:
1457 raise LcmException(
1458 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1459 vnfr_id, vdu_id, vdu_index
1460 )
1461 )
1462 # New generation RO stores information at "vim_info"
1463 ng_ro_status = None
1464 target_vim = None
1465 if vdur.get("vim_info"):
1466 target_vim = next(
1467 t for t in vdur["vim_info"]
1468 ) # there should be only one key
1469 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1470 if (
1471 vdur.get("pdu-type")
1472 or vdur.get("status") == "ACTIVE"
1473 or ng_ro_status == "ACTIVE"
1474 ):
1475 ip_address = vdur.get("ip-address")
1476 if not ip_address:
1477 continue
1478 target_vdu_id = vdur["vdu-id-ref"]
1479 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1480 raise LcmException(
1481 "Cannot inject ssh-key because target VM is in error state"
1482 )
1483
1484 if not target_vdu_id:
1485 continue
1486
1487 # inject public key into machine
1488 if pub_key and user:
1489 self.logger.debug(logging_text + "Inserting RO key")
1490 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1491 if vdur.get("pdu-type"):
1492 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1493 return ip_address
1494 try:
1495 target = {
1496 "action": {
1497 "action": "inject_ssh_key",
1498 "key": pub_key,
1499 "user": user,
1500 },
1501 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1502 }
1503 desc = await self.RO.deploy(nsr_id, target)
1504 action_id = desc["action_id"]
1505 await self._wait_ng_ro(
1506 nsr_id, action_id, timeout=600, operation="instantiation"
1507 )
1508 break
1509 except NgRoException as e:
1510 raise LcmException(
1511 "Reaching max tries injecting key. Error: {}".format(e)
1512 )
1513 else:
1514 break
1515
1516 return ip_address
1517
1518 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1519 """
1520 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1521 """
1522 my_vca = vca_deployed_list[vca_index]
1523 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1524 # vdu or kdu: no dependencies
1525 return
1526 timeout = 300
1527 while timeout >= 0:
1528 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1529 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1530 configuration_status_list = db_nsr["configurationStatus"]
1531 for index, vca_deployed in enumerate(configuration_status_list):
1532 if index == vca_index:
1533 # myself
1534 continue
1535 if not my_vca.get("member-vnf-index") or (
1536 vca_deployed.get("member-vnf-index")
1537 == my_vca.get("member-vnf-index")
1538 ):
1539 internal_status = configuration_status_list[index].get("status")
1540 if internal_status == "READY":
1541 continue
1542 elif internal_status == "BROKEN":
1543 raise LcmException(
1544 "Configuration aborted because dependent charm/s has failed"
1545 )
1546 else:
1547 break
1548 else:
1549 # no dependencies, return
1550 return
1551 await asyncio.sleep(10)
1552 timeout -= 1
1553
1554 raise LcmException("Configuration aborted because dependent charm/s timeout")
1555
1556 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1557 vca_id = None
1558 if db_vnfr:
1559 vca_id = deep_get(db_vnfr, ("vca-id",))
1560 elif db_nsr:
1561 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1562 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1563 return vca_id
1564
1565 async def instantiate_N2VC(
1566 self,
1567 logging_text,
1568 vca_index,
1569 nsi_id,
1570 db_nsr,
1571 db_vnfr,
1572 vdu_id,
1573 kdu_name,
1574 vdu_index,
1575 kdu_index,
1576 config_descriptor,
1577 deploy_params,
1578 base_folder,
1579 nslcmop_id,
1580 stage,
1581 vca_type,
1582 vca_name,
1583 ee_config_descriptor,
1584 ):
1585 nsr_id = db_nsr["_id"]
1586 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1587 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1588 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1589 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1590 db_dict = {
1591 "collection": "nsrs",
1592 "filter": {"_id": nsr_id},
1593 "path": db_update_entry,
1594 }
1595 step = ""
1596 try:
1597 element_type = "NS"
1598 element_under_configuration = nsr_id
1599
1600 vnfr_id = None
1601 if db_vnfr:
1602 vnfr_id = db_vnfr["_id"]
1603 osm_config["osm"]["vnf_id"] = vnfr_id
1604
1605 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1606
1607 if vca_type == "native_charm":
1608 index_number = 0
1609 else:
1610 index_number = vdu_index or 0
1611
1612 if vnfr_id:
1613 element_type = "VNF"
1614 element_under_configuration = vnfr_id
1615 namespace += ".{}-{}".format(vnfr_id, index_number)
1616 if vdu_id:
1617 namespace += ".{}-{}".format(vdu_id, index_number)
1618 element_type = "VDU"
1619 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1620 osm_config["osm"]["vdu_id"] = vdu_id
1621 elif kdu_name:
1622 namespace += ".{}".format(kdu_name)
1623 element_type = "KDU"
1624 element_under_configuration = kdu_name
1625 osm_config["osm"]["kdu_name"] = kdu_name
1626
1627 # Get artifact path
1628 if base_folder["pkg-dir"]:
1629 artifact_path = "{}/{}/{}/{}".format(
1630 base_folder["folder"],
1631 base_folder["pkg-dir"],
1632 "charms"
1633 if vca_type
1634 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1635 else "helm-charts",
1636 vca_name,
1637 )
1638 else:
1639 artifact_path = "{}/Scripts/{}/{}/".format(
1640 base_folder["folder"],
1641 "charms"
1642 if vca_type
1643 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1644 else "helm-charts",
1645 vca_name,
1646 )
1647
1648 self.logger.debug("Artifact path > {}".format(artifact_path))
1649
1650 # get initial_config_primitive_list that applies to this element
1651 initial_config_primitive_list = config_descriptor.get(
1652 "initial-config-primitive"
1653 )
1654
1655 self.logger.debug(
1656 "Initial config primitive list > {}".format(
1657 initial_config_primitive_list
1658 )
1659 )
1660
1661 # add config if not present for NS charm
1662 ee_descriptor_id = ee_config_descriptor.get("id")
1663 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1664 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1665 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1666 )
1667
1668 self.logger.debug(
1669 "Initial config primitive list #2 > {}".format(
1670 initial_config_primitive_list
1671 )
1672 )
1673 # n2vc_redesign STEP 3.1
1674 # find old ee_id if exists
1675 ee_id = vca_deployed.get("ee_id")
1676
1677 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1678 # create or register execution environment in VCA
1679 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1680 self._write_configuration_status(
1681 nsr_id=nsr_id,
1682 vca_index=vca_index,
1683 status="CREATING",
1684 element_under_configuration=element_under_configuration,
1685 element_type=element_type,
1686 )
1687
1688 step = "create execution environment"
1689 self.logger.debug(logging_text + step)
1690
1691 ee_id = None
1692 credentials = None
1693 if vca_type == "k8s_proxy_charm":
1694 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1695 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1696 namespace=namespace,
1697 artifact_path=artifact_path,
1698 db_dict=db_dict,
1699 vca_id=vca_id,
1700 )
1701 elif vca_type == "helm-v3":
1702 ee_id, credentials = await self.vca_map[
1703 vca_type
1704 ].create_execution_environment(
1705 namespace=nsr_id,
1706 reuse_ee_id=ee_id,
1707 db_dict=db_dict,
1708 config=osm_config,
1709 artifact_path=artifact_path,
1710 chart_model=vca_name,
1711 vca_type=vca_type,
1712 )
1713 else:
1714 ee_id, credentials = await self.vca_map[
1715 vca_type
1716 ].create_execution_environment(
1717 namespace=namespace,
1718 reuse_ee_id=ee_id,
1719 db_dict=db_dict,
1720 vca_id=vca_id,
1721 )
1722
1723 elif vca_type == "native_charm":
1724 step = "Waiting to VM being up and getting IP address"
1725 self.logger.debug(logging_text + step)
1726 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1727 logging_text,
1728 nsr_id,
1729 vnfr_id,
1730 vdu_id,
1731 vdu_index,
1732 user=None,
1733 pub_key=None,
1734 )
1735 credentials = {"hostname": rw_mgmt_ip}
1736 # get username
1737 username = deep_get(
1738 config_descriptor, ("config-access", "ssh-access", "default-user")
1739 )
1740 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1741 # merged. Meanwhile let's get username from initial-config-primitive
1742 if not username and initial_config_primitive_list:
1743 for config_primitive in initial_config_primitive_list:
1744 for param in config_primitive.get("parameter", ()):
1745 if param["name"] == "ssh-username":
1746 username = param["value"]
1747 break
1748 if not username:
1749 raise LcmException(
1750 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1751 "'config-access.ssh-access.default-user'"
1752 )
1753 credentials["username"] = username
1754 # n2vc_redesign STEP 3.2
1755
1756 self._write_configuration_status(
1757 nsr_id=nsr_id,
1758 vca_index=vca_index,
1759 status="REGISTERING",
1760 element_under_configuration=element_under_configuration,
1761 element_type=element_type,
1762 )
1763
1764 step = "register execution environment {}".format(credentials)
1765 self.logger.debug(logging_text + step)
1766 ee_id = await self.vca_map[vca_type].register_execution_environment(
1767 credentials=credentials,
1768 namespace=namespace,
1769 db_dict=db_dict,
1770 vca_id=vca_id,
1771 )
1772
1773 # for compatibility with MON/POL modules, the need model and application name at database
1774 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1775 ee_id_parts = ee_id.split(".")
1776 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1777 if len(ee_id_parts) >= 2:
1778 model_name = ee_id_parts[0]
1779 application_name = ee_id_parts[1]
1780 db_nsr_update[db_update_entry + "model"] = model_name
1781 db_nsr_update[db_update_entry + "application"] = application_name
1782
1783 # n2vc_redesign STEP 3.3
1784 step = "Install configuration Software"
1785
1786 self._write_configuration_status(
1787 nsr_id=nsr_id,
1788 vca_index=vca_index,
1789 status="INSTALLING SW",
1790 element_under_configuration=element_under_configuration,
1791 element_type=element_type,
1792 other_update=db_nsr_update,
1793 )
1794
1795 # TODO check if already done
1796 self.logger.debug(logging_text + step)
1797 config = None
1798 if vca_type == "native_charm":
1799 config_primitive = next(
1800 (p for p in initial_config_primitive_list if p["name"] == "config"),
1801 None,
1802 )
1803 if config_primitive:
1804 config = self._map_primitive_params(
1805 config_primitive, {}, deploy_params
1806 )
1807 num_units = 1
1808 if vca_type == "lxc_proxy_charm":
1809 if element_type == "NS":
1810 num_units = db_nsr.get("config-units") or 1
1811 elif element_type == "VNF":
1812 num_units = db_vnfr.get("config-units") or 1
1813 elif element_type == "VDU":
1814 for v in db_vnfr["vdur"]:
1815 if vdu_id == v["vdu-id-ref"]:
1816 num_units = v.get("config-units") or 1
1817 break
1818 if vca_type != "k8s_proxy_charm":
1819 await self.vca_map[vca_type].install_configuration_sw(
1820 ee_id=ee_id,
1821 artifact_path=artifact_path,
1822 db_dict=db_dict,
1823 config=config,
1824 num_units=num_units,
1825 vca_id=vca_id,
1826 vca_type=vca_type,
1827 )
1828
1829 # write in db flag of configuration_sw already installed
1830 self.update_db_2(
1831 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1832 )
1833
1834 # add relations for this VCA (wait for other peers related with this VCA)
1835 is_relation_added = await self._add_vca_relations(
1836 logging_text=logging_text,
1837 nsr_id=nsr_id,
1838 vca_type=vca_type,
1839 vca_index=vca_index,
1840 )
1841
1842 if not is_relation_added:
1843 raise LcmException("Relations could not be added to VCA.")
1844
1845 # if SSH access is required, then get execution environment SSH public
1846 # if native charm we have waited already to VM be UP
1847 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1848 pub_key = None
1849 user = None
1850 # self.logger.debug("get ssh key block")
1851 if deep_get(
1852 config_descriptor, ("config-access", "ssh-access", "required")
1853 ):
1854 # self.logger.debug("ssh key needed")
1855 # Needed to inject a ssh key
1856 user = deep_get(
1857 config_descriptor,
1858 ("config-access", "ssh-access", "default-user"),
1859 )
1860 step = "Install configuration Software, getting public ssh key"
1861 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1862 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1863 )
1864
1865 step = "Insert public key into VM user={} ssh_key={}".format(
1866 user, pub_key
1867 )
1868 else:
1869 # self.logger.debug("no need to get ssh key")
1870 step = "Waiting to VM being up and getting IP address"
1871 self.logger.debug(logging_text + step)
1872
1873 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1874 rw_mgmt_ip = None
1875
1876 # n2vc_redesign STEP 5.1
1877 # wait for RO (ip-address) Insert pub_key into VM
1878 if vnfr_id:
1879 if kdu_name:
1880 rw_mgmt_ip, services = await self.wait_kdu_up(
1881 logging_text, nsr_id, vnfr_id, kdu_name
1882 )
1883 vnfd = self.db.get_one(
1884 "vnfds_revisions",
1885 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1886 )
1887 kdu = get_kdu(vnfd, kdu_name)
1888 kdu_services = [
1889 service["name"] for service in get_kdu_services(kdu)
1890 ]
1891 exposed_services = []
1892 for service in services:
1893 if any(s in service["name"] for s in kdu_services):
1894 exposed_services.append(service)
1895 await self.vca_map[vca_type].exec_primitive(
1896 ee_id=ee_id,
1897 primitive_name="config",
1898 params_dict={
1899 "osm-config": json.dumps(
1900 OsmConfigBuilder(
1901 k8s={"services": exposed_services}
1902 ).build()
1903 )
1904 },
1905 vca_id=vca_id,
1906 )
1907
1908 # This verification is needed in order to avoid trying to add a public key
1909 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1910 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1911 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1912 # or it is a KNF)
1913 elif db_vnfr.get("vdur"):
1914 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1915 logging_text,
1916 nsr_id,
1917 vnfr_id,
1918 vdu_id,
1919 vdu_index,
1920 user=user,
1921 pub_key=pub_key,
1922 )
1923
1924 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1925
1926 # store rw_mgmt_ip in deploy params for later replacement
1927 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1928
1929 # n2vc_redesign STEP 6 Execute initial config primitive
1930 step = "execute initial config primitive"
1931
1932 # wait for dependent primitives execution (NS -> VNF -> VDU)
1933 if initial_config_primitive_list:
1934 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1935
1936 # stage, in function of element type: vdu, kdu, vnf or ns
1937 my_vca = vca_deployed_list[vca_index]
1938 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1939 # VDU or KDU
1940 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1941 elif my_vca.get("member-vnf-index"):
1942 # VNF
1943 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1944 else:
1945 # NS
1946 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1947
1948 self._write_configuration_status(
1949 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1950 )
1951
1952 self._write_op_status(op_id=nslcmop_id, stage=stage)
1953
1954 check_if_terminated_needed = True
1955 for initial_config_primitive in initial_config_primitive_list:
1956 # adding information on the vca_deployed if it is a NS execution environment
1957 if not vca_deployed["member-vnf-index"]:
1958 deploy_params["ns_config_info"] = json.dumps(
1959 self._get_ns_config_info(nsr_id)
1960 )
1961 # TODO check if already done
1962 primitive_params_ = self._map_primitive_params(
1963 initial_config_primitive, {}, deploy_params
1964 )
1965
1966 step = "execute primitive '{}' params '{}'".format(
1967 initial_config_primitive["name"], primitive_params_
1968 )
1969 self.logger.debug(logging_text + step)
1970 await self.vca_map[vca_type].exec_primitive(
1971 ee_id=ee_id,
1972 primitive_name=initial_config_primitive["name"],
1973 params_dict=primitive_params_,
1974 db_dict=db_dict,
1975 vca_id=vca_id,
1976 vca_type=vca_type,
1977 )
1978 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1979 if check_if_terminated_needed:
1980 if config_descriptor.get("terminate-config-primitive"):
1981 self.update_db_2(
1982 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1983 )
1984 check_if_terminated_needed = False
1985
1986 # TODO register in database that primitive is done
1987
1988 # STEP 7 Configure metrics
1989 if vca_type == "helm-v3":
1990 # TODO: review for those cases where the helm chart is a reference and
1991 # is not part of the NF package
1992 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1993 ee_id=ee_id,
1994 artifact_path=artifact_path,
1995 ee_config_descriptor=ee_config_descriptor,
1996 vnfr_id=vnfr_id,
1997 nsr_id=nsr_id,
1998 target_ip=rw_mgmt_ip,
1999 element_type=element_type,
2000 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2001 vdu_id=vdu_id,
2002 vdu_index=vdu_index,
2003 kdu_name=kdu_name,
2004 kdu_index=kdu_index,
2005 )
2006 if prometheus_jobs:
2007 self.update_db_2(
2008 "nsrs",
2009 nsr_id,
2010 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2011 )
2012
2013 for job in prometheus_jobs:
2014 self.db.set_one(
2015 "prometheus_jobs",
2016 {"job_name": job["job_name"]},
2017 job,
2018 upsert=True,
2019 fail_on_empty=False,
2020 )
2021
2022 step = "instantiated at VCA"
2023 self.logger.debug(logging_text + step)
2024
2025 self._write_configuration_status(
2026 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2027 )
2028
2029 except Exception as e: # TODO not use Exception but N2VC exception
2030 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2031 if not isinstance(
2032 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2033 ):
2034 self.logger.error(
2035 "Exception while {} : {}".format(step, e), exc_info=True
2036 )
2037 self._write_configuration_status(
2038 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2039 )
2040 raise LcmException("{}. {}".format(step, e)) from e
2041
2042 def _write_ns_status(
2043 self,
2044 nsr_id: str,
2045 ns_state: str,
2046 current_operation: str,
2047 current_operation_id: str,
2048 error_description: str = None,
2049 error_detail: str = None,
2050 other_update: dict = None,
2051 ):
2052 """
2053 Update db_nsr fields.
2054 :param nsr_id:
2055 :param ns_state:
2056 :param current_operation:
2057 :param current_operation_id:
2058 :param error_description:
2059 :param error_detail:
2060 :param other_update: Other required changes at database if provided, will be cleared
2061 :return:
2062 """
2063 try:
2064 db_dict = other_update or {}
2065 db_dict[
2066 "_admin.nslcmop"
2067 ] = current_operation_id # for backward compatibility
2068 db_dict["_admin.current-operation"] = current_operation_id
2069 db_dict["_admin.operation-type"] = (
2070 current_operation if current_operation != "IDLE" else None
2071 )
2072 db_dict["currentOperation"] = current_operation
2073 db_dict["currentOperationID"] = current_operation_id
2074 db_dict["errorDescription"] = error_description
2075 db_dict["errorDetail"] = error_detail
2076
2077 if ns_state:
2078 db_dict["nsState"] = ns_state
2079 self.update_db_2("nsrs", nsr_id, db_dict)
2080 except DbException as e:
2081 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2082
2083 def _write_op_status(
2084 self,
2085 op_id: str,
2086 stage: list = None,
2087 error_message: str = None,
2088 queuePosition: int = 0,
2089 operation_state: str = None,
2090 other_update: dict = None,
2091 ):
2092 try:
2093 db_dict = other_update or {}
2094 db_dict["queuePosition"] = queuePosition
2095 if isinstance(stage, list):
2096 db_dict["stage"] = stage[0]
2097 db_dict["detailed-status"] = " ".join(stage)
2098 elif stage is not None:
2099 db_dict["stage"] = str(stage)
2100
2101 if error_message is not None:
2102 db_dict["errorMessage"] = error_message
2103 if operation_state is not None:
2104 db_dict["operationState"] = operation_state
2105 db_dict["statusEnteredTime"] = time()
2106 self.update_db_2("nslcmops", op_id, db_dict)
2107 except DbException as e:
2108 self.logger.warn(
2109 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2110 )
2111
2112 def _write_all_config_status(self, db_nsr: dict, status: str):
2113 try:
2114 nsr_id = db_nsr["_id"]
2115 # configurationStatus
2116 config_status = db_nsr.get("configurationStatus")
2117 if config_status:
2118 db_nsr_update = {
2119 "configurationStatus.{}.status".format(index): status
2120 for index, v in enumerate(config_status)
2121 if v
2122 }
2123 # update status
2124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2125
2126 except DbException as e:
2127 self.logger.warn(
2128 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2129 )
2130
2131 def _write_configuration_status(
2132 self,
2133 nsr_id: str,
2134 vca_index: int,
2135 status: str = None,
2136 element_under_configuration: str = None,
2137 element_type: str = None,
2138 other_update: dict = None,
2139 ):
2140 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2141 # .format(vca_index, status))
2142
2143 try:
2144 db_path = "configurationStatus.{}.".format(vca_index)
2145 db_dict = other_update or {}
2146 if status:
2147 db_dict[db_path + "status"] = status
2148 if element_under_configuration:
2149 db_dict[
2150 db_path + "elementUnderConfiguration"
2151 ] = element_under_configuration
2152 if element_type:
2153 db_dict[db_path + "elementType"] = element_type
2154 self.update_db_2("nsrs", nsr_id, db_dict)
2155 except DbException as e:
2156 self.logger.warn(
2157 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2158 status, nsr_id, vca_index, e
2159 )
2160 )
2161
2162 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2163 """
2164 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2165 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2166 Database is used because the result can be obtained from a different LCM worker in case of HA.
2167 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2168 :param db_nslcmop: database content of nslcmop
2169 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2170 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2171 computed 'vim-account-id'
2172 """
2173 modified = False
2174 nslcmop_id = db_nslcmop["_id"]
2175 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2176 if placement_engine == "PLA":
2177 self.logger.debug(
2178 logging_text + "Invoke and wait for placement optimization"
2179 )
2180 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2181 db_poll_interval = 5
2182 wait = db_poll_interval * 10
2183 pla_result = None
2184 while not pla_result and wait >= 0:
2185 await asyncio.sleep(db_poll_interval)
2186 wait -= db_poll_interval
2187 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2188 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2189
2190 if not pla_result:
2191 raise LcmException(
2192 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2193 )
2194
2195 for pla_vnf in pla_result["vnf"]:
2196 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2197 if not pla_vnf.get("vimAccountId") or not vnfr:
2198 continue
2199 modified = True
2200 self.db.set_one(
2201 "vnfrs",
2202 {"_id": vnfr["_id"]},
2203 {"vim-account-id": pla_vnf["vimAccountId"]},
2204 )
2205 # Modifies db_vnfrs
2206 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2207 return modified
2208
2209 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2210 alerts = []
2211 nsr_id = vnfr["nsr-id-ref"]
2212 df = vnfd.get("df", [{}])[0]
2213 # Checking for auto-healing configuration
2214 if "healing-aspect" in df:
2215 healing_aspects = df["healing-aspect"]
2216 for healing in healing_aspects:
2217 for healing_policy in healing.get("healing-policy", ()):
2218 vdu_id = healing_policy["vdu-id"]
2219 vdur = next(
2220 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2221 {},
2222 )
2223 if not vdur:
2224 continue
2225 metric_name = "vm_status"
2226 vdu_name = vdur.get("name")
2227 vnf_member_index = vnfr["member-vnf-index-ref"]
2228 uuid = str(uuid4())
2229 name = f"healing_{uuid}"
2230 action = healing_policy
2231 # action_on_recovery = healing.get("action-on-recovery")
2232 # cooldown_time = healing.get("cooldown-time")
2233 # day1 = healing.get("day1")
2234 alert = {
2235 "uuid": uuid,
2236 "name": name,
2237 "metric": metric_name,
2238 "tags": {
2239 "ns_id": nsr_id,
2240 "vnf_member_index": vnf_member_index,
2241 "vdu_name": vdu_name,
2242 },
2243 "alarm_status": "ok",
2244 "action_type": "healing",
2245 "action": action,
2246 }
2247 alerts.append(alert)
2248 return alerts
2249
2250 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2251 alerts = []
2252 nsr_id = vnfr["nsr-id-ref"]
2253 df = vnfd.get("df", [{}])[0]
2254 # Checking for auto-scaling configuration
2255 if "scaling-aspect" in df:
2256 scaling_aspects = df["scaling-aspect"]
2257 all_vnfd_monitoring_params = {}
2258 for ivld in vnfd.get("int-virtual-link-desc", ()):
2259 for mp in ivld.get("monitoring-parameters", ()):
2260 all_vnfd_monitoring_params[mp.get("id")] = mp
2261 for vdu in vnfd.get("vdu", ()):
2262 for mp in vdu.get("monitoring-parameter", ()):
2263 all_vnfd_monitoring_params[mp.get("id")] = mp
2264 for df in vnfd.get("df", ()):
2265 for mp in df.get("monitoring-parameter", ()):
2266 all_vnfd_monitoring_params[mp.get("id")] = mp
2267 for scaling_aspect in scaling_aspects:
2268 scaling_group_name = scaling_aspect.get("name", "")
2269 # Get monitored VDUs
2270 all_monitored_vdus = set()
2271 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2272 "deltas", ()
2273 ):
2274 for vdu_delta in delta.get("vdu-delta", ()):
2275 all_monitored_vdus.add(vdu_delta.get("id"))
2276 monitored_vdurs = list(
2277 filter(
2278 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2279 vnfr["vdur"],
2280 )
2281 )
2282 if not monitored_vdurs:
2283 self.logger.error(
2284 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2285 )
2286 continue
2287 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2288 if scaling_policy["scaling-type"] != "automatic":
2289 continue
2290 threshold_time = scaling_policy.get("threshold-time", "1")
2291 cooldown_time = scaling_policy.get("cooldown-time", "0")
2292 for scaling_criteria in scaling_policy["scaling-criteria"]:
2293 monitoring_param_ref = scaling_criteria.get(
2294 "vnf-monitoring-param-ref"
2295 )
2296 vnf_monitoring_param = all_vnfd_monitoring_params[
2297 monitoring_param_ref
2298 ]
2299 for vdur in monitored_vdurs:
2300 vdu_id = vdur["vdu-id-ref"]
2301 metric_name = vnf_monitoring_param.get("performance-metric")
2302 metric_name = f"osm_{metric_name}"
2303 vnf_member_index = vnfr["member-vnf-index-ref"]
2304 scalein_threshold = scaling_criteria.get(
2305 "scale-in-threshold"
2306 )
2307 scaleout_threshold = scaling_criteria.get(
2308 "scale-out-threshold"
2309 )
2310 # Looking for min/max-number-of-instances
2311 instances_min_number = 1
2312 instances_max_number = 1
2313 vdu_profile = df["vdu-profile"]
2314 if vdu_profile:
2315 profile = next(
2316 item for item in vdu_profile if item["id"] == vdu_id
2317 )
2318 instances_min_number = profile.get(
2319 "min-number-of-instances", 1
2320 )
2321 instances_max_number = profile.get(
2322 "max-number-of-instances", 1
2323 )
2324
2325 if scalein_threshold:
2326 uuid = str(uuid4())
2327 name = f"scalein_{uuid}"
2328 operation = scaling_criteria[
2329 "scale-in-relational-operation"
2330 ]
2331 rel_operator = self.rel_operation_types.get(
2332 operation, "<="
2333 )
2334 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2335 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2336 labels = {
2337 "ns_id": nsr_id,
2338 "vnf_member_index": vnf_member_index,
2339 "vdu_id": vdu_id,
2340 }
2341 prom_cfg = {
2342 "alert": name,
2343 "expr": expression,
2344 "for": str(threshold_time) + "m",
2345 "labels": labels,
2346 }
2347 action = scaling_policy
2348 action = {
2349 "scaling-group": scaling_group_name,
2350 "cooldown-time": cooldown_time,
2351 }
2352 alert = {
2353 "uuid": uuid,
2354 "name": name,
2355 "metric": metric_name,
2356 "tags": {
2357 "ns_id": nsr_id,
2358 "vnf_member_index": vnf_member_index,
2359 "vdu_id": vdu_id,
2360 },
2361 "alarm_status": "ok",
2362 "action_type": "scale_in",
2363 "action": action,
2364 "prometheus_config": prom_cfg,
2365 }
2366 alerts.append(alert)
2367
2368 if scaleout_threshold:
2369 uuid = str(uuid4())
2370 name = f"scaleout_{uuid}"
2371 operation = scaling_criteria[
2372 "scale-out-relational-operation"
2373 ]
2374 rel_operator = self.rel_operation_types.get(
2375 operation, "<="
2376 )
2377 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2378 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2379 labels = {
2380 "ns_id": nsr_id,
2381 "vnf_member_index": vnf_member_index,
2382 "vdu_id": vdu_id,
2383 }
2384 prom_cfg = {
2385 "alert": name,
2386 "expr": expression,
2387 "for": str(threshold_time) + "m",
2388 "labels": labels,
2389 }
2390 action = scaling_policy
2391 action = {
2392 "scaling-group": scaling_group_name,
2393 "cooldown-time": cooldown_time,
2394 }
2395 alert = {
2396 "uuid": uuid,
2397 "name": name,
2398 "metric": metric_name,
2399 "tags": {
2400 "ns_id": nsr_id,
2401 "vnf_member_index": vnf_member_index,
2402 "vdu_id": vdu_id,
2403 },
2404 "alarm_status": "ok",
2405 "action_type": "scale_out",
2406 "action": action,
2407 "prometheus_config": prom_cfg,
2408 }
2409 alerts.append(alert)
2410 return alerts
2411
2412 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2413 alerts = []
2414 nsr_id = vnfr["nsr-id-ref"]
2415 vnf_member_index = vnfr["member-vnf-index-ref"]
2416
2417 # Checking for VNF alarm configuration
2418 for vdur in vnfr["vdur"]:
2419 vdu_id = vdur["vdu-id-ref"]
2420 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2421 if "alarm" in vdu:
2422 # Get VDU monitoring params, since alerts are based on them
2423 vdu_monitoring_params = {}
2424 for mp in vdu.get("monitoring-parameter", []):
2425 vdu_monitoring_params[mp.get("id")] = mp
2426 if not vdu_monitoring_params:
2427 self.logger.error(
2428 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2429 )
2430 continue
2431 # Get alarms in the VDU
2432 alarm_descriptors = vdu["alarm"]
2433 # Create VDU alarms for each alarm in the VDU
2434 for alarm_descriptor in alarm_descriptors:
2435 # Check that the VDU alarm refers to a proper monitoring param
2436 alarm_monitoring_param = alarm_descriptor.get(
2437 "vnf-monitoring-param-ref", ""
2438 )
2439 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2440 alarm_monitoring_param, {}
2441 )
2442 if not vdu_specific_monitoring_param:
2443 self.logger.error(
2444 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2445 )
2446 continue
2447 metric_name = vdu_specific_monitoring_param.get(
2448 "performance-metric"
2449 )
2450 if not metric_name:
2451 self.logger.error(
2452 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2453 )
2454 continue
2455 # Set params of the alarm to be created in Prometheus
2456 metric_name = f"osm_{metric_name}"
2457 metric_threshold = alarm_descriptor.get("value")
2458 uuid = str(uuid4())
2459 alert_name = f"vdu_alarm_{uuid}"
2460 operation = alarm_descriptor["operation"]
2461 rel_operator = self.rel_operation_types.get(operation, "<=")
2462 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2463 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2464 labels = {
2465 "ns_id": nsr_id,
2466 "vnf_member_index": vnf_member_index,
2467 "vdu_id": vdu_id,
2468 "vdu_name": "{{ $labels.vdu_name }}",
2469 }
2470 prom_cfg = {
2471 "alert": alert_name,
2472 "expr": expression,
2473 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2474 "labels": labels,
2475 }
2476 alarm_action = dict()
2477 for action_type in ["ok", "insufficient-data", "alarm"]:
2478 if (
2479 "actions" in alarm_descriptor
2480 and action_type in alarm_descriptor["actions"]
2481 ):
2482 alarm_action[action_type] = alarm_descriptor["actions"][
2483 action_type
2484 ]
2485 alert = {
2486 "uuid": uuid,
2487 "name": alert_name,
2488 "metric": metric_name,
2489 "tags": {
2490 "ns_id": nsr_id,
2491 "vnf_member_index": vnf_member_index,
2492 "vdu_id": vdu_id,
2493 },
2494 "alarm_status": "ok",
2495 "action_type": "vdu_alarm",
2496 "action": alarm_action,
2497 "prometheus_config": prom_cfg,
2498 }
2499 alerts.append(alert)
2500 return alerts
2501
2502 def update_nsrs_with_pla_result(self, params):
2503 try:
2504 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2505 self.update_db_2(
2506 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2507 )
2508 except Exception as e:
2509 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2510
2511 async def instantiate(self, nsr_id, nslcmop_id):
2512 """
2513
2514 :param nsr_id: ns instance to deploy
2515 :param nslcmop_id: operation to run
2516 :return:
2517 """
2518
2519 # Try to lock HA task here
2520 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2521 if not task_is_locked_by_me:
2522 self.logger.debug(
2523 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2524 )
2525 return
2526
2527 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2528 self.logger.debug(logging_text + "Enter")
2529
2530 # get all needed from database
2531
2532 # database nsrs record
2533 db_nsr = None
2534
2535 # database nslcmops record
2536 db_nslcmop = None
2537
2538 # update operation on nsrs
2539 db_nsr_update = {}
2540 # update operation on nslcmops
2541 db_nslcmop_update = {}
2542
2543 timeout_ns_deploy = self.timeout.ns_deploy
2544
2545 nslcmop_operation_state = None
2546 db_vnfrs = {} # vnf's info indexed by member-index
2547 # n2vc_info = {}
2548 tasks_dict_info = {} # from task to info text
2549 exc = None
2550 error_list = []
2551 stage = [
2552 "Stage 1/5: preparation of the environment.",
2553 "Waiting for previous operations to terminate.",
2554 "",
2555 ]
2556 # ^ stage, step, VIM progress
2557 try:
2558 # wait for any previous tasks in process
2559 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2560
2561 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2562 stage[1] = "Reading from database."
2563 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2564 db_nsr_update["detailed-status"] = "creating"
2565 db_nsr_update["operational-status"] = "init"
2566 self._write_ns_status(
2567 nsr_id=nsr_id,
2568 ns_state="BUILDING",
2569 current_operation="INSTANTIATING",
2570 current_operation_id=nslcmop_id,
2571 other_update=db_nsr_update,
2572 )
2573 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2574
2575 # read from db: operation
2576 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2577 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2578 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2579 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2580 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2581 )
2582 ns_params = db_nslcmop.get("operationParams")
2583 if ns_params and ns_params.get("timeout_ns_deploy"):
2584 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2585
2586 # read from db: ns
2587 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2588 self.logger.debug(logging_text + stage[1])
2589 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2590 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2591 self.logger.debug(logging_text + stage[1])
2592 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2593 self.fs.sync(db_nsr["nsd-id"])
2594 db_nsr["nsd"] = nsd
2595 # nsr_name = db_nsr["name"] # TODO short-name??
2596
2597 # read from db: vnf's of this ns
2598 stage[1] = "Getting vnfrs from db."
2599 self.logger.debug(logging_text + stage[1])
2600 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2601
2602 # read from db: vnfd's for every vnf
2603 db_vnfds = [] # every vnfd data
2604
2605 # for each vnf in ns, read vnfd
2606 for vnfr in db_vnfrs_list:
2607 if vnfr.get("kdur"):
2608 kdur_list = []
2609 for kdur in vnfr["kdur"]:
2610 if kdur.get("additionalParams"):
2611 kdur["additionalParams"] = json.loads(
2612 kdur["additionalParams"]
2613 )
2614 kdur_list.append(kdur)
2615 vnfr["kdur"] = kdur_list
2616
2617 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2618 vnfd_id = vnfr["vnfd-id"]
2619 vnfd_ref = vnfr["vnfd-ref"]
2620 self.fs.sync(vnfd_id)
2621
2622 # if we haven't this vnfd, read it from db
2623 if vnfd_id not in db_vnfds:
2624 # read from db
2625 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2626 vnfd_id, vnfd_ref
2627 )
2628 self.logger.debug(logging_text + stage[1])
2629 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2630
2631 # store vnfd
2632 db_vnfds.append(vnfd)
2633
2634 # Get or generates the _admin.deployed.VCA list
2635 vca_deployed_list = None
2636 if db_nsr["_admin"].get("deployed"):
2637 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2638 if vca_deployed_list is None:
2639 vca_deployed_list = []
2640 configuration_status_list = []
2641 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2642 db_nsr_update["configurationStatus"] = configuration_status_list
2643 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2644 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2645 elif isinstance(vca_deployed_list, dict):
2646 # maintain backward compatibility. Change a dict to list at database
2647 vca_deployed_list = list(vca_deployed_list.values())
2648 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2649 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2650
2651 if not isinstance(
2652 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2653 ):
2654 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2655 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2656
2657 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2658 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2659 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2660 self.db.set_list(
2661 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2662 )
2663
2664 # n2vc_redesign STEP 2 Deploy Network Scenario
2665 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2666 self._write_op_status(op_id=nslcmop_id, stage=stage)
2667
2668 stage[1] = "Deploying KDUs."
2669 # self.logger.debug(logging_text + "Before deploy_kdus")
2670 # Call to deploy_kdus in case exists the "vdu:kdu" param
2671 await self.deploy_kdus(
2672 logging_text=logging_text,
2673 nsr_id=nsr_id,
2674 nslcmop_id=nslcmop_id,
2675 db_vnfrs=db_vnfrs,
2676 db_vnfds=db_vnfds,
2677 task_instantiation_info=tasks_dict_info,
2678 )
2679
2680 stage[1] = "Getting VCA public key."
2681 # n2vc_redesign STEP 1 Get VCA public ssh-key
2682 # feature 1429. Add n2vc public key to needed VMs
2683 n2vc_key = self.n2vc.get_public_key()
2684 n2vc_key_list = [n2vc_key]
2685 if self.vca_config.public_key:
2686 n2vc_key_list.append(self.vca_config.public_key)
2687
2688 stage[1] = "Deploying NS at VIM."
2689 task_ro = asyncio.ensure_future(
2690 self.instantiate_RO(
2691 logging_text=logging_text,
2692 nsr_id=nsr_id,
2693 nsd=nsd,
2694 db_nsr=db_nsr,
2695 db_nslcmop=db_nslcmop,
2696 db_vnfrs=db_vnfrs,
2697 db_vnfds=db_vnfds,
2698 n2vc_key_list=n2vc_key_list,
2699 stage=stage,
2700 )
2701 )
2702 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2703 tasks_dict_info[task_ro] = "Deploying at VIM"
2704
2705 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2706 stage[1] = "Deploying Execution Environments."
2707 self.logger.debug(logging_text + stage[1])
2708
2709 # create namespace and certificate if any helm based EE is present in the NS
2710 if check_helm_ee_in_ns(db_vnfds):
2711 await self.vca_map["helm-v3"].setup_ns_namespace(
2712 name=nsr_id,
2713 )
2714 # create TLS certificates
2715 await self.vca_map["helm-v3"].create_tls_certificate(
2716 secret_name=self.EE_TLS_NAME,
2717 dns_prefix="*",
2718 nsr_id=nsr_id,
2719 usage="server auth",
2720 namespace=nsr_id,
2721 )
2722
2723 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2724 for vnf_profile in get_vnf_profiles(nsd):
2725 vnfd_id = vnf_profile["vnfd-id"]
2726 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2727 member_vnf_index = str(vnf_profile["id"])
2728 db_vnfr = db_vnfrs[member_vnf_index]
2729 base_folder = vnfd["_admin"]["storage"]
2730 vdu_id = None
2731 vdu_index = 0
2732 vdu_name = None
2733 kdu_name = None
2734 kdu_index = None
2735
2736 # Get additional parameters
2737 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2738 if db_vnfr.get("additionalParamsForVnf"):
2739 deploy_params.update(
2740 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2741 )
2742
2743 descriptor_config = get_configuration(vnfd, vnfd["id"])
2744 if descriptor_config:
2745 self._deploy_n2vc(
2746 logging_text=logging_text
2747 + "member_vnf_index={} ".format(member_vnf_index),
2748 db_nsr=db_nsr,
2749 db_vnfr=db_vnfr,
2750 nslcmop_id=nslcmop_id,
2751 nsr_id=nsr_id,
2752 nsi_id=nsi_id,
2753 vnfd_id=vnfd_id,
2754 vdu_id=vdu_id,
2755 kdu_name=kdu_name,
2756 member_vnf_index=member_vnf_index,
2757 vdu_index=vdu_index,
2758 kdu_index=kdu_index,
2759 vdu_name=vdu_name,
2760 deploy_params=deploy_params,
2761 descriptor_config=descriptor_config,
2762 base_folder=base_folder,
2763 task_instantiation_info=tasks_dict_info,
2764 stage=stage,
2765 )
2766
2767 # Deploy charms for each VDU that supports one.
2768 for vdud in get_vdu_list(vnfd):
2769 vdu_id = vdud["id"]
2770 descriptor_config = get_configuration(vnfd, vdu_id)
2771 vdur = find_in_list(
2772 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2773 )
2774
2775 if vdur.get("additionalParams"):
2776 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2777 else:
2778 deploy_params_vdu = deploy_params
2779 deploy_params_vdu["OSM"] = get_osm_params(
2780 db_vnfr, vdu_id, vdu_count_index=0
2781 )
2782 vdud_count = get_number_of_instances(vnfd, vdu_id)
2783
2784 self.logger.debug("VDUD > {}".format(vdud))
2785 self.logger.debug(
2786 "Descriptor config > {}".format(descriptor_config)
2787 )
2788 if descriptor_config:
2789 vdu_name = None
2790 kdu_name = None
2791 kdu_index = None
2792 for vdu_index in range(vdud_count):
2793 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2794 self._deploy_n2vc(
2795 logging_text=logging_text
2796 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2797 member_vnf_index, vdu_id, vdu_index
2798 ),
2799 db_nsr=db_nsr,
2800 db_vnfr=db_vnfr,
2801 nslcmop_id=nslcmop_id,
2802 nsr_id=nsr_id,
2803 nsi_id=nsi_id,
2804 vnfd_id=vnfd_id,
2805 vdu_id=vdu_id,
2806 kdu_name=kdu_name,
2807 kdu_index=kdu_index,
2808 member_vnf_index=member_vnf_index,
2809 vdu_index=vdu_index,
2810 vdu_name=vdu_name,
2811 deploy_params=deploy_params_vdu,
2812 descriptor_config=descriptor_config,
2813 base_folder=base_folder,
2814 task_instantiation_info=tasks_dict_info,
2815 stage=stage,
2816 )
2817 for kdud in get_kdu_list(vnfd):
2818 kdu_name = kdud["name"]
2819 descriptor_config = get_configuration(vnfd, kdu_name)
2820 if descriptor_config:
2821 vdu_id = None
2822 vdu_index = 0
2823 vdu_name = None
2824 kdu_index, kdur = next(
2825 x
2826 for x in enumerate(db_vnfr["kdur"])
2827 if x[1]["kdu-name"] == kdu_name
2828 )
2829 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2830 if kdur.get("additionalParams"):
2831 deploy_params_kdu.update(
2832 parse_yaml_strings(kdur["additionalParams"].copy())
2833 )
2834
2835 self._deploy_n2vc(
2836 logging_text=logging_text,
2837 db_nsr=db_nsr,
2838 db_vnfr=db_vnfr,
2839 nslcmop_id=nslcmop_id,
2840 nsr_id=nsr_id,
2841 nsi_id=nsi_id,
2842 vnfd_id=vnfd_id,
2843 vdu_id=vdu_id,
2844 kdu_name=kdu_name,
2845 member_vnf_index=member_vnf_index,
2846 vdu_index=vdu_index,
2847 kdu_index=kdu_index,
2848 vdu_name=vdu_name,
2849 deploy_params=deploy_params_kdu,
2850 descriptor_config=descriptor_config,
2851 base_folder=base_folder,
2852 task_instantiation_info=tasks_dict_info,
2853 stage=stage,
2854 )
2855
2856 # Check if each vnf has exporter for metric collection if so update prometheus job records
2857 if "exporters-endpoints" in vnfd.get("df")[0]:
2858 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2859 self.logger.debug("exporter config :{}".format(exporter_config))
2860 artifact_path = "{}/{}/{}".format(
2861 base_folder["folder"],
2862 base_folder["pkg-dir"],
2863 "exporter-endpoint",
2864 )
2865 ee_id = None
2866 ee_config_descriptor = exporter_config
2867 vnfr_id = db_vnfr["id"]
2868 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2869 logging_text,
2870 nsr_id,
2871 vnfr_id,
2872 vdu_id=None,
2873 vdu_index=None,
2874 user=None,
2875 pub_key=None,
2876 )
2877 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2878 self.logger.debug("Artifact_path:{}".format(artifact_path))
2879 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2880 vdu_id_for_prom = None
2881 vdu_index_for_prom = None
2882 for x in get_iterable(db_vnfr, "vdur"):
2883 vdu_id_for_prom = x.get("vdu-id-ref")
2884 vdu_index_for_prom = x.get("count-index")
2885 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2886 ee_id=ee_id,
2887 artifact_path=artifact_path,
2888 ee_config_descriptor=ee_config_descriptor,
2889 vnfr_id=vnfr_id,
2890 nsr_id=nsr_id,
2891 target_ip=rw_mgmt_ip,
2892 element_type="VDU",
2893 vdu_id=vdu_id_for_prom,
2894 vdu_index=vdu_index_for_prom,
2895 )
2896
2897 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2898 if prometheus_jobs:
2899 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2900 self.update_db_2(
2901 "nsrs",
2902 nsr_id,
2903 db_nsr_update,
2904 )
2905
2906 for job in prometheus_jobs:
2907 self.db.set_one(
2908 "prometheus_jobs",
2909 {"job_name": job["job_name"]},
2910 job,
2911 upsert=True,
2912 fail_on_empty=False,
2913 )
2914
2915 # Check if this NS has a charm configuration
2916 descriptor_config = nsd.get("ns-configuration")
2917 if descriptor_config and descriptor_config.get("juju"):
2918 vnfd_id = None
2919 db_vnfr = None
2920 member_vnf_index = None
2921 vdu_id = None
2922 kdu_name = None
2923 kdu_index = None
2924 vdu_index = 0
2925 vdu_name = None
2926
2927 # Get additional parameters
2928 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2929 if db_nsr.get("additionalParamsForNs"):
2930 deploy_params.update(
2931 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2932 )
2933 base_folder = nsd["_admin"]["storage"]
2934 self._deploy_n2vc(
2935 logging_text=logging_text,
2936 db_nsr=db_nsr,
2937 db_vnfr=db_vnfr,
2938 nslcmop_id=nslcmop_id,
2939 nsr_id=nsr_id,
2940 nsi_id=nsi_id,
2941 vnfd_id=vnfd_id,
2942 vdu_id=vdu_id,
2943 kdu_name=kdu_name,
2944 member_vnf_index=member_vnf_index,
2945 vdu_index=vdu_index,
2946 kdu_index=kdu_index,
2947 vdu_name=vdu_name,
2948 deploy_params=deploy_params,
2949 descriptor_config=descriptor_config,
2950 base_folder=base_folder,
2951 task_instantiation_info=tasks_dict_info,
2952 stage=stage,
2953 )
2954
2955 # rest of staff will be done at finally
2956
2957 except (
2958 ROclient.ROClientException,
2959 DbException,
2960 LcmException,
2961 N2VCException,
2962 ) as e:
2963 self.logger.error(
2964 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2965 )
2966 exc = e
2967 except asyncio.CancelledError:
2968 self.logger.error(
2969 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2970 )
2971 exc = "Operation was cancelled"
2972 except Exception as e:
2973 exc = traceback.format_exc()
2974 self.logger.critical(
2975 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2976 exc_info=True,
2977 )
2978 finally:
2979 if exc:
2980 error_list.append(str(exc))
2981 try:
2982 # wait for pending tasks
2983 if tasks_dict_info:
2984 stage[1] = "Waiting for instantiate pending tasks."
2985 self.logger.debug(logging_text + stage[1])
2986 error_list += await self._wait_for_tasks(
2987 logging_text,
2988 tasks_dict_info,
2989 timeout_ns_deploy,
2990 stage,
2991 nslcmop_id,
2992 nsr_id=nsr_id,
2993 )
2994 stage[1] = stage[2] = ""
2995 except asyncio.CancelledError:
2996 error_list.append("Cancelled")
2997 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
2998 await self._wait_for_tasks(
2999 logging_text,
3000 tasks_dict_info,
3001 timeout_ns_deploy,
3002 stage,
3003 nslcmop_id,
3004 nsr_id=nsr_id,
3005 )
3006 except Exception as exc:
3007 error_list.append(str(exc))
3008
3009 # update operation-status
3010 db_nsr_update["operational-status"] = "running"
3011 # let's begin with VCA 'configured' status (later we can change it)
3012 db_nsr_update["config-status"] = "configured"
3013 for task, task_name in tasks_dict_info.items():
3014 if not task.done() or task.cancelled() or task.exception():
3015 if task_name.startswith(self.task_name_deploy_vca):
3016 # A N2VC task is pending
3017 db_nsr_update["config-status"] = "failed"
3018 else:
3019 # RO or KDU task is pending
3020 db_nsr_update["operational-status"] = "failed"
3021
3022 # update status at database
3023 if error_list:
3024 error_detail = ". ".join(error_list)
3025 self.logger.error(logging_text + error_detail)
3026 error_description_nslcmop = "{} Detail: {}".format(
3027 stage[0], error_detail
3028 )
3029 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3030 nslcmop_id, stage[0]
3031 )
3032
3033 db_nsr_update["detailed-status"] = (
3034 error_description_nsr + " Detail: " + error_detail
3035 )
3036 db_nslcmop_update["detailed-status"] = error_detail
3037 nslcmop_operation_state = "FAILED"
3038 ns_state = "BROKEN"
3039 else:
3040 error_detail = None
3041 error_description_nsr = error_description_nslcmop = None
3042 ns_state = "READY"
3043 db_nsr_update["detailed-status"] = "Done"
3044 db_nslcmop_update["detailed-status"] = "Done"
3045 nslcmop_operation_state = "COMPLETED"
3046 # Gather auto-healing and auto-scaling alerts for each vnfr
3047 healing_alerts = []
3048 scaling_alerts = []
3049 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3050 vnfd = next(
3051 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3052 )
3053 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3054 for alert in healing_alerts:
3055 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3056 self.db.create("alerts", alert)
3057
3058 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3059 for alert in scaling_alerts:
3060 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3061 self.db.create("alerts", alert)
3062
3063 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3064 for alert in alarm_alerts:
3065 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3066 self.db.create("alerts", alert)
3067 if db_nsr:
3068 self._write_ns_status(
3069 nsr_id=nsr_id,
3070 ns_state=ns_state,
3071 current_operation="IDLE",
3072 current_operation_id=None,
3073 error_description=error_description_nsr,
3074 error_detail=error_detail,
3075 other_update=db_nsr_update,
3076 )
3077 self._write_op_status(
3078 op_id=nslcmop_id,
3079 stage="",
3080 error_message=error_description_nslcmop,
3081 operation_state=nslcmop_operation_state,
3082 other_update=db_nslcmop_update,
3083 )
3084
3085 if nslcmop_operation_state:
3086 try:
3087 await self.msg.aiowrite(
3088 "ns",
3089 "instantiated",
3090 {
3091 "nsr_id": nsr_id,
3092 "nslcmop_id": nslcmop_id,
3093 "operationState": nslcmop_operation_state,
3094 "startTime": db_nslcmop["startTime"],
3095 "links": db_nslcmop["links"],
3096 "operationParams": {
3097 "nsInstanceId": nsr_id,
3098 "nsdId": db_nsr["nsd-id"],
3099 },
3100 },
3101 )
3102 except Exception as e:
3103 self.logger.error(
3104 logging_text + "kafka_write notification Exception {}".format(e)
3105 )
3106
3107 self.logger.debug(logging_text + "Exit")
3108 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3109
3110 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3111 if vnfd_id not in cached_vnfds:
3112 cached_vnfds[vnfd_id] = self.db.get_one(
3113 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3114 )
3115 return cached_vnfds[vnfd_id]
3116
3117 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3118 if vnf_profile_id not in cached_vnfrs:
3119 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3120 "vnfrs",
3121 {
3122 "member-vnf-index-ref": vnf_profile_id,
3123 "nsr-id-ref": nsr_id,
3124 },
3125 )
3126 return cached_vnfrs[vnf_profile_id]
3127
3128 def _is_deployed_vca_in_relation(
3129 self, vca: DeployedVCA, relation: Relation
3130 ) -> bool:
3131 found = False
3132 for endpoint in (relation.provider, relation.requirer):
3133 if endpoint["kdu-resource-profile-id"]:
3134 continue
3135 found = (
3136 vca.vnf_profile_id == endpoint.vnf_profile_id
3137 and vca.vdu_profile_id == endpoint.vdu_profile_id
3138 and vca.execution_environment_ref == endpoint.execution_environment_ref
3139 )
3140 if found:
3141 break
3142 return found
3143
3144 def _update_ee_relation_data_with_implicit_data(
3145 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3146 ):
3147 ee_relation_data = safe_get_ee_relation(
3148 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3149 )
3150 ee_relation_level = EELevel.get_level(ee_relation_data)
3151 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3152 "execution-environment-ref"
3153 ]:
3154 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3155 vnfd_id = vnf_profile["vnfd-id"]
3156 project = nsd["_admin"]["projects_read"][0]
3157 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3158 entity_id = (
3159 vnfd_id
3160 if ee_relation_level == EELevel.VNF
3161 else ee_relation_data["vdu-profile-id"]
3162 )
3163 ee = get_juju_ee_ref(db_vnfd, entity_id)
3164 if not ee:
3165 raise Exception(
3166 f"not execution environments found for ee_relation {ee_relation_data}"
3167 )
3168 ee_relation_data["execution-environment-ref"] = ee["id"]
3169 return ee_relation_data
3170
3171 def _get_ns_relations(
3172 self,
3173 nsr_id: str,
3174 nsd: Dict[str, Any],
3175 vca: DeployedVCA,
3176 cached_vnfds: Dict[str, Any],
3177 ) -> List[Relation]:
3178 relations = []
3179 db_ns_relations = get_ns_configuration_relation_list(nsd)
3180 for r in db_ns_relations:
3181 provider_dict = None
3182 requirer_dict = None
3183 if all(key in r for key in ("provider", "requirer")):
3184 provider_dict = r["provider"]
3185 requirer_dict = r["requirer"]
3186 elif "entities" in r:
3187 provider_id = r["entities"][0]["id"]
3188 provider_dict = {
3189 "nsr-id": nsr_id,
3190 "endpoint": r["entities"][0]["endpoint"],
3191 }
3192 if provider_id != nsd["id"]:
3193 provider_dict["vnf-profile-id"] = provider_id
3194 requirer_id = r["entities"][1]["id"]
3195 requirer_dict = {
3196 "nsr-id": nsr_id,
3197 "endpoint": r["entities"][1]["endpoint"],
3198 }
3199 if requirer_id != nsd["id"]:
3200 requirer_dict["vnf-profile-id"] = requirer_id
3201 else:
3202 raise Exception(
3203 "provider/requirer or entities must be included in the relation."
3204 )
3205 relation_provider = self._update_ee_relation_data_with_implicit_data(
3206 nsr_id, nsd, provider_dict, cached_vnfds
3207 )
3208 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3209 nsr_id, nsd, requirer_dict, cached_vnfds
3210 )
3211 provider = EERelation(relation_provider)
3212 requirer = EERelation(relation_requirer)
3213 relation = Relation(r["name"], provider, requirer)
3214 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3215 if vca_in_relation:
3216 relations.append(relation)
3217 return relations
3218
3219 def _get_vnf_relations(
3220 self,
3221 nsr_id: str,
3222 nsd: Dict[str, Any],
3223 vca: DeployedVCA,
3224 cached_vnfds: Dict[str, Any],
3225 ) -> List[Relation]:
3226 relations = []
3227 if vca.target_element == "ns":
3228 self.logger.debug("VCA is a NS charm, not a VNF.")
3229 return relations
3230 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3231 vnf_profile_id = vnf_profile["id"]
3232 vnfd_id = vnf_profile["vnfd-id"]
3233 project = nsd["_admin"]["projects_read"][0]
3234 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3235 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3236 for r in db_vnf_relations:
3237 provider_dict = None
3238 requirer_dict = None
3239 if all(key in r for key in ("provider", "requirer")):
3240 provider_dict = r["provider"]
3241 requirer_dict = r["requirer"]
3242 elif "entities" in r:
3243 provider_id = r["entities"][0]["id"]
3244 provider_dict = {
3245 "nsr-id": nsr_id,
3246 "vnf-profile-id": vnf_profile_id,
3247 "endpoint": r["entities"][0]["endpoint"],
3248 }
3249 if provider_id != vnfd_id:
3250 provider_dict["vdu-profile-id"] = provider_id
3251 requirer_id = r["entities"][1]["id"]
3252 requirer_dict = {
3253 "nsr-id": nsr_id,
3254 "vnf-profile-id": vnf_profile_id,
3255 "endpoint": r["entities"][1]["endpoint"],
3256 }
3257 if requirer_id != vnfd_id:
3258 requirer_dict["vdu-profile-id"] = requirer_id
3259 else:
3260 raise Exception(
3261 "provider/requirer or entities must be included in the relation."
3262 )
3263 relation_provider = self._update_ee_relation_data_with_implicit_data(
3264 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3265 )
3266 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3267 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3268 )
3269 provider = EERelation(relation_provider)
3270 requirer = EERelation(relation_requirer)
3271 relation = Relation(r["name"], provider, requirer)
3272 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3273 if vca_in_relation:
3274 relations.append(relation)
3275 return relations
3276
3277 def _get_kdu_resource_data(
3278 self,
3279 ee_relation: EERelation,
3280 db_nsr: Dict[str, Any],
3281 cached_vnfds: Dict[str, Any],
3282 ) -> DeployedK8sResource:
3283 nsd = get_nsd(db_nsr)
3284 vnf_profiles = get_vnf_profiles(nsd)
3285 vnfd_id = find_in_list(
3286 vnf_profiles,
3287 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3288 )["vnfd-id"]
3289 project = nsd["_admin"]["projects_read"][0]
3290 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3291 kdu_resource_profile = get_kdu_resource_profile(
3292 db_vnfd, ee_relation.kdu_resource_profile_id
3293 )
3294 kdu_name = kdu_resource_profile["kdu-name"]
3295 deployed_kdu, _ = get_deployed_kdu(
3296 db_nsr.get("_admin", ()).get("deployed", ()),
3297 kdu_name,
3298 ee_relation.vnf_profile_id,
3299 )
3300 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3301 return deployed_kdu
3302
3303 def _get_deployed_component(
3304 self,
3305 ee_relation: EERelation,
3306 db_nsr: Dict[str, Any],
3307 cached_vnfds: Dict[str, Any],
3308 ) -> DeployedComponent:
3309 nsr_id = db_nsr["_id"]
3310 deployed_component = None
3311 ee_level = EELevel.get_level(ee_relation)
3312 if ee_level == EELevel.NS:
3313 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3314 if vca:
3315 deployed_component = DeployedVCA(nsr_id, vca)
3316 elif ee_level == EELevel.VNF:
3317 vca = get_deployed_vca(
3318 db_nsr,
3319 {
3320 "vdu_id": None,
3321 "member-vnf-index": ee_relation.vnf_profile_id,
3322 "ee_descriptor_id": ee_relation.execution_environment_ref,
3323 },
3324 )
3325 if vca:
3326 deployed_component = DeployedVCA(nsr_id, vca)
3327 elif ee_level == EELevel.VDU:
3328 vca = get_deployed_vca(
3329 db_nsr,
3330 {
3331 "vdu_id": ee_relation.vdu_profile_id,
3332 "member-vnf-index": ee_relation.vnf_profile_id,
3333 "ee_descriptor_id": ee_relation.execution_environment_ref,
3334 },
3335 )
3336 if vca:
3337 deployed_component = DeployedVCA(nsr_id, vca)
3338 elif ee_level == EELevel.KDU:
3339 kdu_resource_data = self._get_kdu_resource_data(
3340 ee_relation, db_nsr, cached_vnfds
3341 )
3342 if kdu_resource_data:
3343 deployed_component = DeployedK8sResource(kdu_resource_data)
3344 return deployed_component
3345
3346 async def _add_relation(
3347 self,
3348 relation: Relation,
3349 vca_type: str,
3350 db_nsr: Dict[str, Any],
3351 cached_vnfds: Dict[str, Any],
3352 cached_vnfrs: Dict[str, Any],
3353 ) -> bool:
3354 deployed_provider = self._get_deployed_component(
3355 relation.provider, db_nsr, cached_vnfds
3356 )
3357 deployed_requirer = self._get_deployed_component(
3358 relation.requirer, db_nsr, cached_vnfds
3359 )
3360 if (
3361 deployed_provider
3362 and deployed_requirer
3363 and deployed_provider.config_sw_installed
3364 and deployed_requirer.config_sw_installed
3365 ):
3366 provider_db_vnfr = (
3367 self._get_vnfr(
3368 relation.provider.nsr_id,
3369 relation.provider.vnf_profile_id,
3370 cached_vnfrs,
3371 )
3372 if relation.provider.vnf_profile_id
3373 else None
3374 )
3375 requirer_db_vnfr = (
3376 self._get_vnfr(
3377 relation.requirer.nsr_id,
3378 relation.requirer.vnf_profile_id,
3379 cached_vnfrs,
3380 )
3381 if relation.requirer.vnf_profile_id
3382 else None
3383 )
3384 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3385 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3386 provider_relation_endpoint = RelationEndpoint(
3387 deployed_provider.ee_id,
3388 provider_vca_id,
3389 relation.provider.endpoint,
3390 )
3391 requirer_relation_endpoint = RelationEndpoint(
3392 deployed_requirer.ee_id,
3393 requirer_vca_id,
3394 relation.requirer.endpoint,
3395 )
3396 try:
3397 await self.vca_map[vca_type].add_relation(
3398 provider=provider_relation_endpoint,
3399 requirer=requirer_relation_endpoint,
3400 )
3401 except N2VCException as exception:
3402 self.logger.error(exception)
3403 raise LcmException(exception)
3404 return True
3405 return False
3406
3407 async def _add_vca_relations(
3408 self,
3409 logging_text,
3410 nsr_id,
3411 vca_type: str,
3412 vca_index: int,
3413 timeout: int = 3600,
3414 ) -> bool:
3415 # steps:
3416 # 1. find all relations for this VCA
3417 # 2. wait for other peers related
3418 # 3. add relations
3419
3420 try:
3421 # STEP 1: find all relations for this VCA
3422
3423 # read nsr record
3424 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3425 nsd = get_nsd(db_nsr)
3426
3427 # this VCA data
3428 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3429 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3430
3431 cached_vnfds = {}
3432 cached_vnfrs = {}
3433 relations = []
3434 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3435 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3436
3437 # if no relations, terminate
3438 if not relations:
3439 self.logger.debug(logging_text + " No relations")
3440 return True
3441
3442 self.logger.debug(logging_text + " adding relations {}".format(relations))
3443
3444 # add all relations
3445 start = time()
3446 while True:
3447 # check timeout
3448 now = time()
3449 if now - start >= timeout:
3450 self.logger.error(logging_text + " : timeout adding relations")
3451 return False
3452
3453 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3454 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3455
3456 # for each relation, find the VCA's related
3457 for relation in relations.copy():
3458 added = await self._add_relation(
3459 relation,
3460 vca_type,
3461 db_nsr,
3462 cached_vnfds,
3463 cached_vnfrs,
3464 )
3465 if added:
3466 relations.remove(relation)
3467
3468 if not relations:
3469 self.logger.debug("Relations added")
3470 break
3471 await asyncio.sleep(5.0)
3472
3473 return True
3474
3475 except Exception as e:
3476 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3477 return False
3478
3479 async def _install_kdu(
3480 self,
3481 nsr_id: str,
3482 nsr_db_path: str,
3483 vnfr_data: dict,
3484 kdu_index: int,
3485 kdud: dict,
3486 vnfd: dict,
3487 k8s_instance_info: dict,
3488 k8params: dict = None,
3489 timeout: int = 600,
3490 vca_id: str = None,
3491 ):
3492 try:
3493 k8sclustertype = k8s_instance_info["k8scluster-type"]
3494 # Instantiate kdu
3495 db_dict_install = {
3496 "collection": "nsrs",
3497 "filter": {"_id": nsr_id},
3498 "path": nsr_db_path,
3499 }
3500
3501 if k8s_instance_info.get("kdu-deployment-name"):
3502 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3503 else:
3504 kdu_instance = self.k8scluster_map[
3505 k8sclustertype
3506 ].generate_kdu_instance_name(
3507 db_dict=db_dict_install,
3508 kdu_model=k8s_instance_info["kdu-model"],
3509 kdu_name=k8s_instance_info["kdu-name"],
3510 )
3511
3512 # Update the nsrs table with the kdu-instance value
3513 self.update_db_2(
3514 item="nsrs",
3515 _id=nsr_id,
3516 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3517 )
3518
3519 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3520 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3521 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3522 # namespace, this first verification could be removed, and the next step would be done for any kind
3523 # of KNF.
3524 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3525 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3526 if k8sclustertype in ("juju", "juju-bundle"):
3527 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3528 # that the user passed a namespace which he wants its KDU to be deployed in)
3529 if (
3530 self.db.count(
3531 table="nsrs",
3532 q_filter={
3533 "_id": nsr_id,
3534 "_admin.projects_write": k8s_instance_info["namespace"],
3535 "_admin.projects_read": k8s_instance_info["namespace"],
3536 },
3537 )
3538 > 0
3539 ):
3540 self.logger.debug(
3541 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3542 )
3543 self.update_db_2(
3544 item="nsrs",
3545 _id=nsr_id,
3546 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3547 )
3548 k8s_instance_info["namespace"] = kdu_instance
3549
3550 await self.k8scluster_map[k8sclustertype].install(
3551 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3552 kdu_model=k8s_instance_info["kdu-model"],
3553 atomic=True,
3554 params=k8params,
3555 db_dict=db_dict_install,
3556 timeout=timeout,
3557 kdu_name=k8s_instance_info["kdu-name"],
3558 namespace=k8s_instance_info["namespace"],
3559 kdu_instance=kdu_instance,
3560 vca_id=vca_id,
3561 )
3562
3563 # Obtain services to obtain management service ip
3564 services = await self.k8scluster_map[k8sclustertype].get_services(
3565 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3566 kdu_instance=kdu_instance,
3567 namespace=k8s_instance_info["namespace"],
3568 )
3569
3570 # Obtain management service info (if exists)
3571 vnfr_update_dict = {}
3572 kdu_config = get_configuration(vnfd, kdud["name"])
3573 if kdu_config:
3574 target_ee_list = kdu_config.get("execution-environment-list", [])
3575 else:
3576 target_ee_list = []
3577
3578 if services:
3579 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3580 mgmt_services = [
3581 service
3582 for service in kdud.get("service", [])
3583 if service.get("mgmt-service")
3584 ]
3585 for mgmt_service in mgmt_services:
3586 for service in services:
3587 if service["name"].startswith(mgmt_service["name"]):
3588 # Mgmt service found, Obtain service ip
3589 ip = service.get("external_ip", service.get("cluster_ip"))
3590 if isinstance(ip, list) and len(ip) == 1:
3591 ip = ip[0]
3592
3593 vnfr_update_dict[
3594 "kdur.{}.ip-address".format(kdu_index)
3595 ] = ip
3596
3597 # Check if must update also mgmt ip at the vnf
3598 service_external_cp = mgmt_service.get(
3599 "external-connection-point-ref"
3600 )
3601 if service_external_cp:
3602 if (
3603 deep_get(vnfd, ("mgmt-interface", "cp"))
3604 == service_external_cp
3605 ):
3606 vnfr_update_dict["ip-address"] = ip
3607
3608 if find_in_list(
3609 target_ee_list,
3610 lambda ee: ee.get(
3611 "external-connection-point-ref", ""
3612 )
3613 == service_external_cp,
3614 ):
3615 vnfr_update_dict[
3616 "kdur.{}.ip-address".format(kdu_index)
3617 ] = ip
3618 break
3619 else:
3620 self.logger.warn(
3621 "Mgmt service name: {} not found".format(
3622 mgmt_service["name"]
3623 )
3624 )
3625
3626 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3627 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3628
3629 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3630 if (
3631 kdu_config
3632 and kdu_config.get("initial-config-primitive")
3633 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3634 ):
3635 initial_config_primitive_list = kdu_config.get(
3636 "initial-config-primitive"
3637 )
3638 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3639
3640 for initial_config_primitive in initial_config_primitive_list:
3641 primitive_params_ = self._map_primitive_params(
3642 initial_config_primitive, {}, {}
3643 )
3644
3645 await asyncio.wait_for(
3646 self.k8scluster_map[k8sclustertype].exec_primitive(
3647 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3648 kdu_instance=kdu_instance,
3649 primitive_name=initial_config_primitive["name"],
3650 params=primitive_params_,
3651 db_dict=db_dict_install,
3652 vca_id=vca_id,
3653 ),
3654 timeout=timeout,
3655 )
3656
3657 except Exception as e:
3658 # Prepare update db with error and raise exception
3659 try:
3660 self.update_db_2(
3661 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3662 )
3663 self.update_db_2(
3664 "vnfrs",
3665 vnfr_data.get("_id"),
3666 {"kdur.{}.status".format(kdu_index): "ERROR"},
3667 )
3668 except Exception as error:
3669 # ignore to keep original exception
3670 self.logger.warning(
3671 f"An exception occurred while updating DB: {str(error)}"
3672 )
3673 # reraise original error
3674 raise
3675
3676 return kdu_instance
3677
3678 async def deploy_kdus(
3679 self,
3680 logging_text,
3681 nsr_id,
3682 nslcmop_id,
3683 db_vnfrs,
3684 db_vnfds,
3685 task_instantiation_info,
3686 ):
3687 # Launch kdus if present in the descriptor
3688
3689 k8scluster_id_2_uuic = {
3690 "helm-chart-v3": {},
3691 "juju-bundle": {},
3692 }
3693
3694 async def _get_cluster_id(cluster_id, cluster_type):
3695 nonlocal k8scluster_id_2_uuic
3696 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3697 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3698
3699 # check if K8scluster is creating and wait look if previous tasks in process
3700 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3701 "k8scluster", cluster_id
3702 )
3703 if task_dependency:
3704 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3705 task_name, cluster_id
3706 )
3707 self.logger.debug(logging_text + text)
3708 await asyncio.wait(task_dependency, timeout=3600)
3709
3710 db_k8scluster = self.db.get_one(
3711 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3712 )
3713 if not db_k8scluster:
3714 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3715
3716 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3717 if not k8s_id:
3718 if cluster_type == "helm-chart-v3":
3719 try:
3720 # backward compatibility for existing clusters that have not been initialized for helm v3
3721 k8s_credentials = yaml.safe_dump(
3722 db_k8scluster.get("credentials")
3723 )
3724 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3725 k8s_credentials, reuse_cluster_uuid=cluster_id
3726 )
3727 db_k8scluster_update = {}
3728 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3729 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3730 db_k8scluster_update[
3731 "_admin.helm-chart-v3.created"
3732 ] = uninstall_sw
3733 db_k8scluster_update[
3734 "_admin.helm-chart-v3.operationalState"
3735 ] = "ENABLED"
3736 self.update_db_2(
3737 "k8sclusters", cluster_id, db_k8scluster_update
3738 )
3739 except Exception as e:
3740 self.logger.error(
3741 logging_text
3742 + "error initializing helm-v3 cluster: {}".format(str(e))
3743 )
3744 raise LcmException(
3745 "K8s cluster '{}' has not been initialized for '{}'".format(
3746 cluster_id, cluster_type
3747 )
3748 )
3749 else:
3750 raise LcmException(
3751 "K8s cluster '{}' has not been initialized for '{}'".format(
3752 cluster_id, cluster_type
3753 )
3754 )
3755 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3756 return k8s_id
3757
3758 logging_text += "Deploy kdus: "
3759 step = ""
3760 try:
3761 db_nsr_update = {"_admin.deployed.K8s": []}
3762 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3763
3764 index = 0
3765 updated_cluster_list = []
3766 updated_v3_cluster_list = []
3767
3768 for vnfr_data in db_vnfrs.values():
3769 vca_id = self.get_vca_id(vnfr_data, {})
3770 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3771 # Step 0: Prepare and set parameters
3772 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3773 vnfd_id = vnfr_data.get("vnfd-id")
3774 vnfd_with_id = find_in_list(
3775 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3776 )
3777 kdud = next(
3778 kdud
3779 for kdud in vnfd_with_id["kdu"]
3780 if kdud["name"] == kdur["kdu-name"]
3781 )
3782 namespace = kdur.get("k8s-namespace")
3783 kdu_deployment_name = kdur.get("kdu-deployment-name")
3784 if kdur.get("helm-chart"):
3785 kdumodel = kdur["helm-chart"]
3786 # Default version: helm3, if helm-version is v2 assign v2
3787 k8sclustertype = "helm-chart-v3"
3788 self.logger.debug("kdur: {}".format(kdur))
3789 elif kdur.get("juju-bundle"):
3790 kdumodel = kdur["juju-bundle"]
3791 k8sclustertype = "juju-bundle"
3792 else:
3793 raise LcmException(
3794 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3795 "juju-bundle. Maybe an old NBI version is running".format(
3796 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3797 )
3798 )
3799 # check if kdumodel is a file and exists
3800 try:
3801 vnfd_with_id = find_in_list(
3802 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3803 )
3804 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3805 if storage: # may be not present if vnfd has not artifacts
3806 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3807 if storage["pkg-dir"]:
3808 filename = "{}/{}/{}s/{}".format(
3809 storage["folder"],
3810 storage["pkg-dir"],
3811 k8sclustertype,
3812 kdumodel,
3813 )
3814 else:
3815 filename = "{}/Scripts/{}s/{}".format(
3816 storage["folder"],
3817 k8sclustertype,
3818 kdumodel,
3819 )
3820 if self.fs.file_exists(
3821 filename, mode="file"
3822 ) or self.fs.file_exists(filename, mode="dir"):
3823 kdumodel = self.fs.path + filename
3824 except (asyncio.TimeoutError, asyncio.CancelledError):
3825 raise
3826 except Exception as e: # it is not a file
3827 self.logger.warning(f"An exception occurred: {str(e)}")
3828
3829 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3830 step = "Synchronize repos for k8s cluster '{}'".format(
3831 k8s_cluster_id
3832 )
3833 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3834
3835 # Synchronize repos
3836 if (
3837 k8sclustertype == "helm-chart"
3838 and cluster_uuid not in updated_cluster_list
3839 ) or (
3840 k8sclustertype == "helm-chart-v3"
3841 and cluster_uuid not in updated_v3_cluster_list
3842 ):
3843 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3844 self.k8scluster_map[k8sclustertype].synchronize_repos(
3845 cluster_uuid=cluster_uuid
3846 )
3847 )
3848 if del_repo_list or added_repo_dict:
3849 if k8sclustertype == "helm-chart":
3850 unset = {
3851 "_admin.helm_charts_added." + item: None
3852 for item in del_repo_list
3853 }
3854 updated = {
3855 "_admin.helm_charts_added." + item: name
3856 for item, name in added_repo_dict.items()
3857 }
3858 updated_cluster_list.append(cluster_uuid)
3859 elif k8sclustertype == "helm-chart-v3":
3860 unset = {
3861 "_admin.helm_charts_v3_added." + item: None
3862 for item in del_repo_list
3863 }
3864 updated = {
3865 "_admin.helm_charts_v3_added." + item: name
3866 for item, name in added_repo_dict.items()
3867 }
3868 updated_v3_cluster_list.append(cluster_uuid)
3869 self.logger.debug(
3870 logging_text + "repos synchronized on k8s cluster "
3871 "'{}' to_delete: {}, to_add: {}".format(
3872 k8s_cluster_id, del_repo_list, added_repo_dict
3873 )
3874 )
3875 self.db.set_one(
3876 "k8sclusters",
3877 {"_id": k8s_cluster_id},
3878 updated,
3879 unset=unset,
3880 )
3881
3882 # Instantiate kdu
3883 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3884 vnfr_data["member-vnf-index-ref"],
3885 kdur["kdu-name"],
3886 k8s_cluster_id,
3887 )
3888 k8s_instance_info = {
3889 "kdu-instance": None,
3890 "k8scluster-uuid": cluster_uuid,
3891 "k8scluster-type": k8sclustertype,
3892 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3893 "kdu-name": kdur["kdu-name"],
3894 "kdu-model": kdumodel,
3895 "namespace": namespace,
3896 "kdu-deployment-name": kdu_deployment_name,
3897 }
3898 db_path = "_admin.deployed.K8s.{}".format(index)
3899 db_nsr_update[db_path] = k8s_instance_info
3900 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3901 vnfd_with_id = find_in_list(
3902 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3903 )
3904 task = asyncio.ensure_future(
3905 self._install_kdu(
3906 nsr_id,
3907 db_path,
3908 vnfr_data,
3909 kdu_index,
3910 kdud,
3911 vnfd_with_id,
3912 k8s_instance_info,
3913 k8params=desc_params,
3914 timeout=1800,
3915 vca_id=vca_id,
3916 )
3917 )
3918 self.lcm_tasks.register(
3919 "ns",
3920 nsr_id,
3921 nslcmop_id,
3922 "instantiate_KDU-{}".format(index),
3923 task,
3924 )
3925 task_instantiation_info[task] = "Deploying KDU {}".format(
3926 kdur["kdu-name"]
3927 )
3928
3929 index += 1
3930
3931 except (LcmException, asyncio.CancelledError):
3932 raise
3933 except Exception as e:
3934 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3935 if isinstance(e, (N2VCException, DbException)):
3936 self.logger.error(logging_text + msg)
3937 else:
3938 self.logger.critical(logging_text + msg, exc_info=True)
3939 raise LcmException(msg)
3940 finally:
3941 if db_nsr_update:
3942 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3943
3944 def _deploy_n2vc(
3945 self,
3946 logging_text,
3947 db_nsr,
3948 db_vnfr,
3949 nslcmop_id,
3950 nsr_id,
3951 nsi_id,
3952 vnfd_id,
3953 vdu_id,
3954 kdu_name,
3955 member_vnf_index,
3956 vdu_index,
3957 kdu_index,
3958 vdu_name,
3959 deploy_params,
3960 descriptor_config,
3961 base_folder,
3962 task_instantiation_info,
3963 stage,
3964 ):
3965 # launch instantiate_N2VC in a asyncio task and register task object
3966 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3967 # if not found, create one entry and update database
3968 # fill db_nsr._admin.deployed.VCA.<index>
3969
3970 self.logger.debug(
3971 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3972 )
3973
3974 charm_name = ""
3975 get_charm_name = False
3976 if "execution-environment-list" in descriptor_config:
3977 ee_list = descriptor_config.get("execution-environment-list", [])
3978 elif "juju" in descriptor_config:
3979 ee_list = [descriptor_config] # ns charms
3980 if "execution-environment-list" not in descriptor_config:
3981 # charm name is only required for ns charms
3982 get_charm_name = True
3983 else: # other types as script are not supported
3984 ee_list = []
3985
3986 for ee_item in ee_list:
3987 self.logger.debug(
3988 logging_text
3989 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3990 ee_item.get("juju"), ee_item.get("helm-chart")
3991 )
3992 )
3993 ee_descriptor_id = ee_item.get("id")
3994 vca_name, charm_name, vca_type = self.get_vca_info(
3995 ee_item, db_nsr, get_charm_name
3996 )
3997 if not vca_type:
3998 self.logger.debug(
3999 logging_text + "skipping, non juju/charm/helm configuration"
4000 )
4001 continue
4002
4003 vca_index = -1
4004 for vca_index, vca_deployed in enumerate(
4005 db_nsr["_admin"]["deployed"]["VCA"]
4006 ):
4007 if not vca_deployed:
4008 continue
4009 if (
4010 vca_deployed.get("member-vnf-index") == member_vnf_index
4011 and vca_deployed.get("vdu_id") == vdu_id
4012 and vca_deployed.get("kdu_name") == kdu_name
4013 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4014 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4015 ):
4016 break
4017 else:
4018 # not found, create one.
4019 target = (
4020 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4021 )
4022 if vdu_id:
4023 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4024 elif kdu_name:
4025 target += "/kdu/{}".format(kdu_name)
4026 vca_deployed = {
4027 "target_element": target,
4028 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4029 "member-vnf-index": member_vnf_index,
4030 "vdu_id": vdu_id,
4031 "kdu_name": kdu_name,
4032 "vdu_count_index": vdu_index,
4033 "operational-status": "init", # TODO revise
4034 "detailed-status": "", # TODO revise
4035 "step": "initial-deploy", # TODO revise
4036 "vnfd_id": vnfd_id,
4037 "vdu_name": vdu_name,
4038 "type": vca_type,
4039 "ee_descriptor_id": ee_descriptor_id,
4040 "charm_name": charm_name,
4041 }
4042 vca_index += 1
4043
4044 # create VCA and configurationStatus in db
4045 db_dict = {
4046 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4047 "configurationStatus.{}".format(vca_index): dict(),
4048 }
4049 self.update_db_2("nsrs", nsr_id, db_dict)
4050
4051 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4052
4053 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4054 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4055 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4056
4057 # Launch task
4058 task_n2vc = asyncio.ensure_future(
4059 self.instantiate_N2VC(
4060 logging_text=logging_text,
4061 vca_index=vca_index,
4062 nsi_id=nsi_id,
4063 db_nsr=db_nsr,
4064 db_vnfr=db_vnfr,
4065 vdu_id=vdu_id,
4066 kdu_name=kdu_name,
4067 vdu_index=vdu_index,
4068 kdu_index=kdu_index,
4069 deploy_params=deploy_params,
4070 config_descriptor=descriptor_config,
4071 base_folder=base_folder,
4072 nslcmop_id=nslcmop_id,
4073 stage=stage,
4074 vca_type=vca_type,
4075 vca_name=vca_name,
4076 ee_config_descriptor=ee_item,
4077 )
4078 )
4079 self.lcm_tasks.register(
4080 "ns",
4081 nsr_id,
4082 nslcmop_id,
4083 "instantiate_N2VC-{}".format(vca_index),
4084 task_n2vc,
4085 )
4086 task_instantiation_info[
4087 task_n2vc
4088 ] = self.task_name_deploy_vca + " {}.{}".format(
4089 member_vnf_index or "", vdu_id or ""
4090 )
4091
4092 def _format_additional_params(self, params):
4093 params = params or {}
4094 for key, value in params.items():
4095 if str(value).startswith("!!yaml "):
4096 params[key] = yaml.safe_load(value[7:])
4097 return params
4098
4099 def _get_terminate_primitive_params(self, seq, vnf_index):
4100 primitive = seq.get("name")
4101 primitive_params = {}
4102 params = {
4103 "member_vnf_index": vnf_index,
4104 "primitive": primitive,
4105 "primitive_params": primitive_params,
4106 }
4107 desc_params = {}
4108 return self._map_primitive_params(seq, params, desc_params)
4109
4110 # sub-operations
4111
4112 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4113 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4114 if op.get("operationState") == "COMPLETED":
4115 # b. Skip sub-operation
4116 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4117 return self.SUBOPERATION_STATUS_SKIP
4118 else:
4119 # c. retry executing sub-operation
4120 # The sub-operation exists, and operationState != 'COMPLETED'
4121 # Update operationState = 'PROCESSING' to indicate a retry.
4122 operationState = "PROCESSING"
4123 detailed_status = "In progress"
4124 self._update_suboperation_status(
4125 db_nslcmop, op_index, operationState, detailed_status
4126 )
4127 # Return the sub-operation index
4128 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4129 # with arguments extracted from the sub-operation
4130 return op_index
4131
4132 # Find a sub-operation where all keys in a matching dictionary must match
4133 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4134 def _find_suboperation(self, db_nslcmop, match):
4135 if db_nslcmop and match:
4136 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4137 for i, op in enumerate(op_list):
4138 if all(op.get(k) == match[k] for k in match):
4139 return i
4140 return self.SUBOPERATION_STATUS_NOT_FOUND
4141
4142 # Update status for a sub-operation given its index
4143 def _update_suboperation_status(
4144 self, db_nslcmop, op_index, operationState, detailed_status
4145 ):
4146 # Update DB for HA tasks
4147 q_filter = {"_id": db_nslcmop["_id"]}
4148 update_dict = {
4149 "_admin.operations.{}.operationState".format(op_index): operationState,
4150 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4151 }
4152 self.db.set_one(
4153 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4154 )
4155
4156 # Add sub-operation, return the index of the added sub-operation
4157 # Optionally, set operationState, detailed-status, and operationType
4158 # Status and type are currently set for 'scale' sub-operations:
4159 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4160 # 'detailed-status' : status message
4161 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4162 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4163 def _add_suboperation(
4164 self,
4165 db_nslcmop,
4166 vnf_index,
4167 vdu_id,
4168 vdu_count_index,
4169 vdu_name,
4170 primitive,
4171 mapped_primitive_params,
4172 operationState=None,
4173 detailed_status=None,
4174 operationType=None,
4175 RO_nsr_id=None,
4176 RO_scaling_info=None,
4177 ):
4178 if not db_nslcmop:
4179 return self.SUBOPERATION_STATUS_NOT_FOUND
4180 # Get the "_admin.operations" list, if it exists
4181 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4182 op_list = db_nslcmop_admin.get("operations")
4183 # Create or append to the "_admin.operations" list
4184 new_op = {
4185 "member_vnf_index": vnf_index,
4186 "vdu_id": vdu_id,
4187 "vdu_count_index": vdu_count_index,
4188 "primitive": primitive,
4189 "primitive_params": mapped_primitive_params,
4190 }
4191 if operationState:
4192 new_op["operationState"] = operationState
4193 if detailed_status:
4194 new_op["detailed-status"] = detailed_status
4195 if operationType:
4196 new_op["lcmOperationType"] = operationType
4197 if RO_nsr_id:
4198 new_op["RO_nsr_id"] = RO_nsr_id
4199 if RO_scaling_info:
4200 new_op["RO_scaling_info"] = RO_scaling_info
4201 if not op_list:
4202 # No existing operations, create key 'operations' with current operation as first list element
4203 db_nslcmop_admin.update({"operations": [new_op]})
4204 op_list = db_nslcmop_admin.get("operations")
4205 else:
4206 # Existing operations, append operation to list
4207 op_list.append(new_op)
4208
4209 db_nslcmop_update = {"_admin.operations": op_list}
4210 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4211 op_index = len(op_list) - 1
4212 return op_index
4213
4214 # Helper methods for scale() sub-operations
4215
4216 # pre-scale/post-scale:
4217 # Check for 3 different cases:
4218 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4219 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4220 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4221 def _check_or_add_scale_suboperation(
4222 self,
4223 db_nslcmop,
4224 vnf_index,
4225 vnf_config_primitive,
4226 primitive_params,
4227 operationType,
4228 RO_nsr_id=None,
4229 RO_scaling_info=None,
4230 ):
4231 # Find this sub-operation
4232 if RO_nsr_id and RO_scaling_info:
4233 operationType = "SCALE-RO"
4234 match = {
4235 "member_vnf_index": vnf_index,
4236 "RO_nsr_id": RO_nsr_id,
4237 "RO_scaling_info": RO_scaling_info,
4238 }
4239 else:
4240 match = {
4241 "member_vnf_index": vnf_index,
4242 "primitive": vnf_config_primitive,
4243 "primitive_params": primitive_params,
4244 "lcmOperationType": operationType,
4245 }
4246 op_index = self._find_suboperation(db_nslcmop, match)
4247 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4248 # a. New sub-operation
4249 # The sub-operation does not exist, add it.
4250 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4251 # The following parameters are set to None for all kind of scaling:
4252 vdu_id = None
4253 vdu_count_index = None
4254 vdu_name = None
4255 if RO_nsr_id and RO_scaling_info:
4256 vnf_config_primitive = None
4257 primitive_params = None
4258 else:
4259 RO_nsr_id = None
4260 RO_scaling_info = None
4261 # Initial status for sub-operation
4262 operationState = "PROCESSING"
4263 detailed_status = "In progress"
4264 # Add sub-operation for pre/post-scaling (zero or more operations)
4265 self._add_suboperation(
4266 db_nslcmop,
4267 vnf_index,
4268 vdu_id,
4269 vdu_count_index,
4270 vdu_name,
4271 vnf_config_primitive,
4272 primitive_params,
4273 operationState,
4274 detailed_status,
4275 operationType,
4276 RO_nsr_id,
4277 RO_scaling_info,
4278 )
4279 return self.SUBOPERATION_STATUS_NEW
4280 else:
4281 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4282 # or op_index (operationState != 'COMPLETED')
4283 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4284
4285 # Function to return execution_environment id
4286
4287 async def destroy_N2VC(
4288 self,
4289 logging_text,
4290 db_nslcmop,
4291 vca_deployed,
4292 config_descriptor,
4293 vca_index,
4294 destroy_ee=True,
4295 exec_primitives=True,
4296 scaling_in=False,
4297 vca_id: str = None,
4298 ):
4299 """
4300 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4301 :param logging_text:
4302 :param db_nslcmop:
4303 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4304 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4305 :param vca_index: index in the database _admin.deployed.VCA
4306 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4307 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4308 not executed properly
4309 :param scaling_in: True destroys the application, False destroys the model
4310 :return: None or exception
4311 """
4312
4313 self.logger.debug(
4314 logging_text
4315 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4316 vca_index, vca_deployed, config_descriptor, destroy_ee
4317 )
4318 )
4319
4320 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4321
4322 # execute terminate_primitives
4323 if exec_primitives:
4324 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4325 config_descriptor.get("terminate-config-primitive"),
4326 vca_deployed.get("ee_descriptor_id"),
4327 )
4328 vdu_id = vca_deployed.get("vdu_id")
4329 vdu_count_index = vca_deployed.get("vdu_count_index")
4330 vdu_name = vca_deployed.get("vdu_name")
4331 vnf_index = vca_deployed.get("member-vnf-index")
4332 if terminate_primitives and vca_deployed.get("needed_terminate"):
4333 for seq in terminate_primitives:
4334 # For each sequence in list, get primitive and call _ns_execute_primitive()
4335 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4336 vnf_index, seq.get("name")
4337 )
4338 self.logger.debug(logging_text + step)
4339 # Create the primitive for each sequence, i.e. "primitive": "touch"
4340 primitive = seq.get("name")
4341 mapped_primitive_params = self._get_terminate_primitive_params(
4342 seq, vnf_index
4343 )
4344
4345 # Add sub-operation
4346 self._add_suboperation(
4347 db_nslcmop,
4348 vnf_index,
4349 vdu_id,
4350 vdu_count_index,
4351 vdu_name,
4352 primitive,
4353 mapped_primitive_params,
4354 )
4355 # Sub-operations: Call _ns_execute_primitive() instead of action()
4356 try:
4357 result, result_detail = await self._ns_execute_primitive(
4358 vca_deployed["ee_id"],
4359 primitive,
4360 mapped_primitive_params,
4361 vca_type=vca_type,
4362 vca_id=vca_id,
4363 )
4364 except LcmException:
4365 # this happens when VCA is not deployed. In this case it is not needed to terminate
4366 continue
4367 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4368 if result not in result_ok:
4369 raise LcmException(
4370 "terminate_primitive {} for vnf_member_index={} fails with "
4371 "error {}".format(seq.get("name"), vnf_index, result_detail)
4372 )
4373 # set that this VCA do not need terminated
4374 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4375 vca_index
4376 )
4377 self.update_db_2(
4378 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4379 )
4380
4381 # Delete Prometheus Jobs if any
4382 # This uses NSR_ID, so it will destroy any jobs under this index
4383 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4384
4385 if destroy_ee:
4386 await self.vca_map[vca_type].delete_execution_environment(
4387 vca_deployed["ee_id"],
4388 scaling_in=scaling_in,
4389 vca_type=vca_type,
4390 vca_id=vca_id,
4391 )
4392
4393 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4394 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4395 namespace = "." + db_nsr["_id"]
4396 try:
4397 await self.n2vc.delete_namespace(
4398 namespace=namespace,
4399 total_timeout=self.timeout.charm_delete,
4400 vca_id=vca_id,
4401 )
4402 except N2VCNotFound: # already deleted. Skip
4403 pass
4404 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4405
4406 async def terminate(self, nsr_id, nslcmop_id):
4407 # Try to lock HA task here
4408 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4409 if not task_is_locked_by_me:
4410 return
4411
4412 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4413 self.logger.debug(logging_text + "Enter")
4414 timeout_ns_terminate = self.timeout.ns_terminate
4415 db_nsr = None
4416 db_nslcmop = None
4417 operation_params = None
4418 exc = None
4419 error_list = [] # annotates all failed error messages
4420 db_nslcmop_update = {}
4421 autoremove = False # autoremove after terminated
4422 tasks_dict_info = {}
4423 db_nsr_update = {}
4424 stage = [
4425 "Stage 1/3: Preparing task.",
4426 "Waiting for previous operations to terminate.",
4427 "",
4428 ]
4429 # ^ contains [stage, step, VIM-status]
4430 try:
4431 # wait for any previous tasks in process
4432 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4433
4434 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4435 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4436 operation_params = db_nslcmop.get("operationParams") or {}
4437 if operation_params.get("timeout_ns_terminate"):
4438 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4439 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4440 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4441
4442 db_nsr_update["operational-status"] = "terminating"
4443 db_nsr_update["config-status"] = "terminating"
4444 self._write_ns_status(
4445 nsr_id=nsr_id,
4446 ns_state="TERMINATING",
4447 current_operation="TERMINATING",
4448 current_operation_id=nslcmop_id,
4449 other_update=db_nsr_update,
4450 )
4451 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4452 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4453 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4454 return
4455
4456 stage[1] = "Getting vnf descriptors from db."
4457 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4458 db_vnfrs_dict = {
4459 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4460 }
4461 db_vnfds_from_id = {}
4462 db_vnfds_from_member_index = {}
4463 # Loop over VNFRs
4464 for vnfr in db_vnfrs_list:
4465 vnfd_id = vnfr["vnfd-id"]
4466 if vnfd_id not in db_vnfds_from_id:
4467 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4468 db_vnfds_from_id[vnfd_id] = vnfd
4469 db_vnfds_from_member_index[
4470 vnfr["member-vnf-index-ref"]
4471 ] = db_vnfds_from_id[vnfd_id]
4472
4473 # Destroy individual execution environments when there are terminating primitives.
4474 # Rest of EE will be deleted at once
4475 # TODO - check before calling _destroy_N2VC
4476 # if not operation_params.get("skip_terminate_primitives"):#
4477 # or not vca.get("needed_terminate"):
4478 stage[0] = "Stage 2/3 execute terminating primitives."
4479 self.logger.debug(logging_text + stage[0])
4480 stage[1] = "Looking execution environment that needs terminate."
4481 self.logger.debug(logging_text + stage[1])
4482
4483 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4484 config_descriptor = None
4485 vca_member_vnf_index = vca.get("member-vnf-index")
4486 vca_id = self.get_vca_id(
4487 db_vnfrs_dict.get(vca_member_vnf_index)
4488 if vca_member_vnf_index
4489 else None,
4490 db_nsr,
4491 )
4492 if not vca or not vca.get("ee_id"):
4493 continue
4494 if not vca.get("member-vnf-index"):
4495 # ns
4496 config_descriptor = db_nsr.get("ns-configuration")
4497 elif vca.get("vdu_id"):
4498 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4499 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4500 elif vca.get("kdu_name"):
4501 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4502 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4503 else:
4504 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4505 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4506 vca_type = vca.get("type")
4507 exec_terminate_primitives = not operation_params.get(
4508 "skip_terminate_primitives"
4509 ) and vca.get("needed_terminate")
4510 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4511 # pending native charms
4512 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4513 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4514 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4515 task = asyncio.ensure_future(
4516 self.destroy_N2VC(
4517 logging_text,
4518 db_nslcmop,
4519 vca,
4520 config_descriptor,
4521 vca_index,
4522 destroy_ee,
4523 exec_terminate_primitives,
4524 vca_id=vca_id,
4525 )
4526 )
4527 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4528
4529 # wait for pending tasks of terminate primitives
4530 if tasks_dict_info:
4531 self.logger.debug(
4532 logging_text
4533 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4534 )
4535 error_list = await self._wait_for_tasks(
4536 logging_text,
4537 tasks_dict_info,
4538 min(self.timeout.charm_delete, timeout_ns_terminate),
4539 stage,
4540 nslcmop_id,
4541 )
4542 tasks_dict_info.clear()
4543 if error_list:
4544 return # raise LcmException("; ".join(error_list))
4545
4546 # remove All execution environments at once
4547 stage[0] = "Stage 3/3 delete all."
4548
4549 if nsr_deployed.get("VCA"):
4550 stage[1] = "Deleting all execution environments."
4551 self.logger.debug(logging_text + stage[1])
4552 helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"})
4553 if helm_vca_list:
4554 # Delete Namespace and Certificates
4555 await self.vca_map["helm-v3"].delete_tls_certificate(
4556 namespace=db_nslcmop["nsInstanceId"],
4557 certificate_name=self.EE_TLS_NAME,
4558 )
4559 await self.vca_map["helm-v3"].delete_namespace(
4560 namespace=db_nslcmop["nsInstanceId"],
4561 )
4562 else:
4563 vca_id = self.get_vca_id({}, db_nsr)
4564 task_delete_ee = asyncio.ensure_future(
4565 asyncio.wait_for(
4566 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4567 timeout=self.timeout.charm_delete,
4568 )
4569 )
4570 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4571
4572 # Delete from k8scluster
4573 stage[1] = "Deleting KDUs."
4574 self.logger.debug(logging_text + stage[1])
4575 # print(nsr_deployed)
4576 for kdu in get_iterable(nsr_deployed, "K8s"):
4577 if not kdu or not kdu.get("kdu-instance"):
4578 continue
4579 kdu_instance = kdu.get("kdu-instance")
4580 if kdu.get("k8scluster-type") in self.k8scluster_map:
4581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id = self.get_vca_id({}, db_nsr)
4583 task_delete_kdu_instance = asyncio.ensure_future(
4584 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4585 cluster_uuid=kdu.get("k8scluster-uuid"),
4586 kdu_instance=kdu_instance,
4587 vca_id=vca_id,
4588 namespace=kdu.get("namespace"),
4589 )
4590 )
4591 else:
4592 self.logger.error(
4593 logging_text
4594 + "Unknown k8s deployment type {}".format(
4595 kdu.get("k8scluster-type")
4596 )
4597 )
4598 continue
4599 tasks_dict_info[
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4602
4603 # remove from RO
4604 stage[1] = "Deleting ns from VIM."
4605 if self.ro_config.ng:
4606 task_delete_ro = asyncio.ensure_future(
4607 self._terminate_ng_ro(
4608 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4609 )
4610 )
4611 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4612
4613 # rest of staff will be done at finally
4614
4615 except (
4616 ROclient.ROClientException,
4617 DbException,
4618 LcmException,
4619 N2VCException,
4620 ) as e:
4621 self.logger.error(logging_text + "Exit Exception {}".format(e))
4622 exc = e
4623 except asyncio.CancelledError:
4624 self.logger.error(
4625 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4626 )
4627 exc = "Operation was cancelled"
4628 except Exception as e:
4629 exc = traceback.format_exc()
4630 self.logger.critical(
4631 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4632 exc_info=True,
4633 )
4634 finally:
4635 if exc:
4636 error_list.append(str(exc))
4637 try:
4638 # wait for pending tasks
4639 if tasks_dict_info:
4640 stage[1] = "Waiting for terminate pending tasks."
4641 self.logger.debug(logging_text + stage[1])
4642 error_list += await self._wait_for_tasks(
4643 logging_text,
4644 tasks_dict_info,
4645 timeout_ns_terminate,
4646 stage,
4647 nslcmop_id,
4648 )
4649 stage[1] = stage[2] = ""
4650 except asyncio.CancelledError:
4651 error_list.append("Cancelled")
4652 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
4653 await self._wait_for_tasks(
4654 logging_text,
4655 tasks_dict_info,
4656 timeout_ns_terminate,
4657 stage,
4658 nslcmop_id,
4659 )
4660 except Exception as exc:
4661 error_list.append(str(exc))
4662 # update status at database
4663 if error_list:
4664 error_detail = "; ".join(error_list)
4665 # self.logger.error(logging_text + error_detail)
4666 error_description_nslcmop = "{} Detail: {}".format(
4667 stage[0], error_detail
4668 )
4669 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4670 nslcmop_id, stage[0]
4671 )
4672
4673 db_nsr_update["operational-status"] = "failed"
4674 db_nsr_update["detailed-status"] = (
4675 error_description_nsr + " Detail: " + error_detail
4676 )
4677 db_nslcmop_update["detailed-status"] = error_detail
4678 nslcmop_operation_state = "FAILED"
4679 ns_state = "BROKEN"
4680 else:
4681 error_detail = None
4682 error_description_nsr = error_description_nslcmop = None
4683 ns_state = "NOT_INSTANTIATED"
4684 db_nsr_update["operational-status"] = "terminated"
4685 db_nsr_update["detailed-status"] = "Done"
4686 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4687 db_nslcmop_update["detailed-status"] = "Done"
4688 nslcmop_operation_state = "COMPLETED"
4689
4690 if db_nsr:
4691 self._write_ns_status(
4692 nsr_id=nsr_id,
4693 ns_state=ns_state,
4694 current_operation="IDLE",
4695 current_operation_id=None,
4696 error_description=error_description_nsr,
4697 error_detail=error_detail,
4698 other_update=db_nsr_update,
4699 )
4700 self._write_op_status(
4701 op_id=nslcmop_id,
4702 stage="",
4703 error_message=error_description_nslcmop,
4704 operation_state=nslcmop_operation_state,
4705 other_update=db_nslcmop_update,
4706 )
4707 if ns_state == "NOT_INSTANTIATED":
4708 try:
4709 self.db.set_list(
4710 "vnfrs",
4711 {"nsr-id-ref": nsr_id},
4712 {"_admin.nsState": "NOT_INSTANTIATED"},
4713 )
4714 except DbException as e:
4715 self.logger.warn(
4716 logging_text
4717 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4718 nsr_id, e
4719 )
4720 )
4721 if operation_params:
4722 autoremove = operation_params.get("autoremove", False)
4723 if nslcmop_operation_state:
4724 try:
4725 await self.msg.aiowrite(
4726 "ns",
4727 "terminated",
4728 {
4729 "nsr_id": nsr_id,
4730 "nslcmop_id": nslcmop_id,
4731 "operationState": nslcmop_operation_state,
4732 "autoremove": autoremove,
4733 },
4734 )
4735 except Exception as e:
4736 self.logger.error(
4737 logging_text + "kafka_write notification Exception {}".format(e)
4738 )
4739 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4740 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4741
4742 self.logger.debug(logging_text + "Exit")
4743 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4744
4745 async def _wait_for_tasks(
4746 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4747 ):
4748 time_start = time()
4749 error_detail_list = []
4750 error_list = []
4751 pending_tasks = list(created_tasks_info.keys())
4752 num_tasks = len(pending_tasks)
4753 num_done = 0
4754 stage[1] = "{}/{}.".format(num_done, num_tasks)
4755 self._write_op_status(nslcmop_id, stage)
4756 while pending_tasks:
4757 new_error = None
4758 _timeout = timeout + time_start - time()
4759 done, pending_tasks = await asyncio.wait(
4760 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4761 )
4762 num_done += len(done)
4763 if not done: # Timeout
4764 for task in pending_tasks:
4765 new_error = created_tasks_info[task] + ": Timeout"
4766 error_detail_list.append(new_error)
4767 error_list.append(new_error)
4768 break
4769 for task in done:
4770 if task.cancelled():
4771 exc = "Cancelled"
4772 else:
4773 exc = task.exception()
4774 if exc:
4775 if isinstance(exc, asyncio.TimeoutError):
4776 exc = "Timeout"
4777 new_error = created_tasks_info[task] + ": {}".format(exc)
4778 error_list.append(created_tasks_info[task])
4779 error_detail_list.append(new_error)
4780 if isinstance(
4781 exc,
4782 (
4783 str,
4784 DbException,
4785 N2VCException,
4786 ROclient.ROClientException,
4787 LcmException,
4788 K8sException,
4789 NgRoException,
4790 ),
4791 ):
4792 self.logger.error(logging_text + new_error)
4793 else:
4794 exc_traceback = "".join(
4795 traceback.format_exception(None, exc, exc.__traceback__)
4796 )
4797 self.logger.error(
4798 logging_text
4799 + created_tasks_info[task]
4800 + " "
4801 + exc_traceback
4802 )
4803 else:
4804 self.logger.debug(
4805 logging_text + created_tasks_info[task] + ": Done"
4806 )
4807 stage[1] = "{}/{}.".format(num_done, num_tasks)
4808 if new_error:
4809 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4810 if nsr_id: # update also nsr
4811 self.update_db_2(
4812 "nsrs",
4813 nsr_id,
4814 {
4815 "errorDescription": "Error at: " + ", ".join(error_list),
4816 "errorDetail": ". ".join(error_detail_list),
4817 },
4818 )
4819 self._write_op_status(nslcmop_id, stage)
4820 return error_detail_list
4821
4822 async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
4823 for task, name in created_tasks_info.items():
4824 self.logger.debug(logging_text + "Cancelling task: " + name)
4825 task.cancel()
4826
4827 @staticmethod
4828 def _map_primitive_params(primitive_desc, params, instantiation_params):
4829 """
4830 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4831 The default-value is used. If it is between < > it look for a value at instantiation_params
4832 :param primitive_desc: portion of VNFD/NSD that describes primitive
4833 :param params: Params provided by user
4834 :param instantiation_params: Instantiation params provided by user
4835 :return: a dictionary with the calculated params
4836 """
4837 calculated_params = {}
4838 for parameter in primitive_desc.get("parameter", ()):
4839 param_name = parameter["name"]
4840 if param_name in params:
4841 calculated_params[param_name] = params[param_name]
4842 elif "default-value" in parameter or "value" in parameter:
4843 if "value" in parameter:
4844 calculated_params[param_name] = parameter["value"]
4845 else:
4846 calculated_params[param_name] = parameter["default-value"]
4847 if (
4848 isinstance(calculated_params[param_name], str)
4849 and calculated_params[param_name].startswith("<")
4850 and calculated_params[param_name].endswith(">")
4851 ):
4852 if calculated_params[param_name][1:-1] in instantiation_params:
4853 calculated_params[param_name] = instantiation_params[
4854 calculated_params[param_name][1:-1]
4855 ]
4856 else:
4857 raise LcmException(
4858 "Parameter {} needed to execute primitive {} not provided".format(
4859 calculated_params[param_name], primitive_desc["name"]
4860 )
4861 )
4862 else:
4863 raise LcmException(
4864 "Parameter {} needed to execute primitive {} not provided".format(
4865 param_name, primitive_desc["name"]
4866 )
4867 )
4868
4869 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4870 calculated_params[param_name] = yaml.safe_dump(
4871 calculated_params[param_name], default_flow_style=True, width=256
4872 )
4873 elif isinstance(calculated_params[param_name], str) and calculated_params[
4874 param_name
4875 ].startswith("!!yaml "):
4876 calculated_params[param_name] = calculated_params[param_name][7:]
4877 if parameter.get("data-type") == "INTEGER":
4878 try:
4879 calculated_params[param_name] = int(calculated_params[param_name])
4880 except ValueError: # error converting string to int
4881 raise LcmException(
4882 "Parameter {} of primitive {} must be integer".format(
4883 param_name, primitive_desc["name"]
4884 )
4885 )
4886 elif parameter.get("data-type") == "BOOLEAN":
4887 calculated_params[param_name] = not (
4888 (str(calculated_params[param_name])).lower() == "false"
4889 )
4890
4891 # add always ns_config_info if primitive name is config
4892 if primitive_desc["name"] == "config":
4893 if "ns_config_info" in instantiation_params:
4894 calculated_params["ns_config_info"] = instantiation_params[
4895 "ns_config_info"
4896 ]
4897 return calculated_params
4898
4899 def _look_for_deployed_vca(
4900 self,
4901 deployed_vca,
4902 member_vnf_index,
4903 vdu_id,
4904 vdu_count_index,
4905 kdu_name=None,
4906 ee_descriptor_id=None,
4907 ):
4908 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4909 for vca in deployed_vca:
4910 if not vca:
4911 continue
4912 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4913 continue
4914 if (
4915 vdu_count_index is not None
4916 and vdu_count_index != vca["vdu_count_index"]
4917 ):
4918 continue
4919 if kdu_name and kdu_name != vca["kdu_name"]:
4920 continue
4921 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4922 continue
4923 break
4924 else:
4925 # vca_deployed not found
4926 raise LcmException(
4927 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4928 " is not deployed".format(
4929 member_vnf_index,
4930 vdu_id,
4931 vdu_count_index,
4932 kdu_name,
4933 ee_descriptor_id,
4934 )
4935 )
4936 # get ee_id
4937 ee_id = vca.get("ee_id")
4938 vca_type = vca.get(
4939 "type", "lxc_proxy_charm"
4940 ) # default value for backward compatibility - proxy charm
4941 if not ee_id:
4942 raise LcmException(
4943 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4944 "execution environment".format(
4945 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4946 )
4947 )
4948 return ee_id, vca_type
4949
4950 async def _ns_execute_primitive(
4951 self,
4952 ee_id,
4953 primitive,
4954 primitive_params,
4955 retries=0,
4956 retries_interval=30,
4957 timeout=None,
4958 vca_type=None,
4959 db_dict=None,
4960 vca_id: str = None,
4961 ) -> (str, str):
4962 try:
4963 if primitive == "config":
4964 primitive_params = {"params": primitive_params}
4965
4966 vca_type = vca_type or "lxc_proxy_charm"
4967
4968 while retries >= 0:
4969 try:
4970 output = await asyncio.wait_for(
4971 self.vca_map[vca_type].exec_primitive(
4972 ee_id=ee_id,
4973 primitive_name=primitive,
4974 params_dict=primitive_params,
4975 progress_timeout=self.timeout.progress_primitive,
4976 total_timeout=self.timeout.primitive,
4977 db_dict=db_dict,
4978 vca_id=vca_id,
4979 vca_type=vca_type,
4980 ),
4981 timeout=timeout or self.timeout.primitive,
4982 )
4983 # execution was OK
4984 break
4985 except asyncio.CancelledError:
4986 raise
4987 except Exception as e:
4988 retries -= 1
4989 if retries >= 0:
4990 self.logger.debug(
4991 "Error executing action {} on {} -> {}".format(
4992 primitive, ee_id, e
4993 )
4994 )
4995 # wait and retry
4996 await asyncio.sleep(retries_interval)
4997 else:
4998 if isinstance(e, asyncio.TimeoutError):
4999 e = N2VCException(
5000 message="Timed out waiting for action to complete"
5001 )
5002 return "FAILED", getattr(e, "message", repr(e))
5003
5004 return "COMPLETED", output
5005
5006 except (LcmException, asyncio.CancelledError):
5007 raise
5008 except Exception as e:
5009 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5010
5011 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5012 """
5013 Updating the vca_status with latest juju information in nsrs record
5014 :param: nsr_id: Id of the nsr
5015 :param: nslcmop_id: Id of the nslcmop
5016 :return: None
5017 """
5018
5019 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5020 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5021 vca_id = self.get_vca_id({}, db_nsr)
5022 if db_nsr["_admin"]["deployed"]["K8s"]:
5023 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5024 cluster_uuid, kdu_instance, cluster_type = (
5025 k8s["k8scluster-uuid"],
5026 k8s["kdu-instance"],
5027 k8s["k8scluster-type"],
5028 )
5029 await self._on_update_k8s_db(
5030 cluster_uuid=cluster_uuid,
5031 kdu_instance=kdu_instance,
5032 filter={"_id": nsr_id},
5033 vca_id=vca_id,
5034 cluster_type=cluster_type,
5035 )
5036 if db_nsr["_admin"]["deployed"]["VCA"]:
5037 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5038 table, filter = "nsrs", {"_id": nsr_id}
5039 path = "_admin.deployed.VCA.{}.".format(vca_index)
5040 await self._on_update_n2vc_db(table, filter, path, {})
5041
5042 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5043 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5044
5045 async def action(self, nsr_id, nslcmop_id):
5046 # Try to lock HA task here
5047 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5048 if not task_is_locked_by_me:
5049 return
5050
5051 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5052 self.logger.debug(logging_text + "Enter")
5053 # get all needed from database
5054 db_nsr = None
5055 db_nslcmop = None
5056 db_nsr_update = {}
5057 db_nslcmop_update = {}
5058 nslcmop_operation_state = None
5059 error_description_nslcmop = None
5060 exc = None
5061 step = ""
5062 try:
5063 # wait for any previous tasks in process
5064 step = "Waiting for previous operations to terminate"
5065 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5066
5067 self._write_ns_status(
5068 nsr_id=nsr_id,
5069 ns_state=None,
5070 current_operation="RUNNING ACTION",
5071 current_operation_id=nslcmop_id,
5072 )
5073
5074 step = "Getting information from database"
5075 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5077 if db_nslcmop["operationParams"].get("primitive_params"):
5078 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5079 db_nslcmop["operationParams"]["primitive_params"]
5080 )
5081
5082 nsr_deployed = db_nsr["_admin"].get("deployed")
5083 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5084 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5085 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5086 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5087 primitive = db_nslcmop["operationParams"]["primitive"]
5088 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5089 timeout_ns_action = db_nslcmop["operationParams"].get(
5090 "timeout_ns_action", self.timeout.primitive
5091 )
5092
5093 if vnf_index:
5094 step = "Getting vnfr from database"
5095 db_vnfr = self.db.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5097 )
5098 if db_vnfr.get("kdur"):
5099 kdur_list = []
5100 for kdur in db_vnfr["kdur"]:
5101 if kdur.get("additionalParams"):
5102 kdur["additionalParams"] = json.loads(
5103 kdur["additionalParams"]
5104 )
5105 kdur_list.append(kdur)
5106 db_vnfr["kdur"] = kdur_list
5107 step = "Getting vnfd from database"
5108 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5109
5110 # Sync filesystem before running a primitive
5111 self.fs.sync(db_vnfr["vnfd-id"])
5112 else:
5113 step = "Getting nsd from database"
5114 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5115
5116 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5117 # for backward compatibility
5118 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5119 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5120 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5122
5123 # look for primitive
5124 config_primitive_desc = descriptor_configuration = None
5125 if vdu_id:
5126 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5127 elif kdu_name:
5128 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5129 elif vnf_index:
5130 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5131 else:
5132 descriptor_configuration = db_nsd.get("ns-configuration")
5133
5134 if descriptor_configuration and descriptor_configuration.get(
5135 "config-primitive"
5136 ):
5137 for config_primitive in descriptor_configuration["config-primitive"]:
5138 if config_primitive["name"] == primitive:
5139 config_primitive_desc = config_primitive
5140 break
5141
5142 if not config_primitive_desc:
5143 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5144 raise LcmException(
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5146 primitive
5147 )
5148 )
5149 primitive_name = primitive
5150 ee_descriptor_id = None
5151 else:
5152 primitive_name = config_primitive_desc.get(
5153 "execution-environment-primitive", primitive
5154 )
5155 ee_descriptor_id = config_primitive_desc.get(
5156 "execution-environment-ref"
5157 )
5158
5159 if vnf_index:
5160 if vdu_id:
5161 vdur = next(
5162 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5163 )
5164 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5165 elif kdu_name:
5166 kdur = next(
5167 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5168 )
5169 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5170 else:
5171 desc_params = parse_yaml_strings(
5172 db_vnfr.get("additionalParamsForVnf")
5173 )
5174 else:
5175 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5176 if kdu_name and get_configuration(db_vnfd, kdu_name):
5177 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5178 actions = set()
5179 for primitive in kdu_configuration.get("initial-config-primitive", []):
5180 actions.add(primitive["name"])
5181 for primitive in kdu_configuration.get("config-primitive", []):
5182 actions.add(primitive["name"])
5183 kdu = find_in_list(
5184 nsr_deployed["K8s"],
5185 lambda kdu: kdu_name == kdu["kdu-name"]
5186 and kdu["member-vnf-index"] == vnf_index,
5187 )
5188 kdu_action = (
5189 True
5190 if primitive_name in actions
5191 and kdu["k8scluster-type"] != "helm-chart-v3"
5192 else False
5193 )
5194
5195 # TODO check if ns is in a proper status
5196 if kdu_name and (
5197 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5198 ):
5199 # kdur and desc_params already set from before
5200 if primitive_params:
5201 desc_params.update(primitive_params)
5202 # TODO Check if we will need something at vnf level
5203 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5204 if (
5205 kdu_name == kdu["kdu-name"]
5206 and kdu["member-vnf-index"] == vnf_index
5207 ):
5208 break
5209 else:
5210 raise LcmException(
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5212 )
5213
5214 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5215 msg = "unknown k8scluster-type '{}'".format(
5216 kdu.get("k8scluster-type")
5217 )
5218 raise LcmException(msg)
5219
5220 db_dict = {
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id},
5223 "path": "_admin.deployed.K8s.{}".format(index),
5224 }
5225 self.logger.debug(
5226 logging_text
5227 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5228 )
5229 step = "Executing kdu {}".format(primitive_name)
5230 if primitive_name == "upgrade":
5231 if desc_params.get("kdu_model"):
5232 kdu_model = desc_params.get("kdu_model")
5233 del desc_params["kdu_model"]
5234 else:
5235 kdu_model = kdu.get("kdu-model")
5236 if kdu_model.count("/") < 2: # helm chart is not embedded
5237 parts = kdu_model.split(sep=":")
5238 if len(parts) == 2:
5239 kdu_model = parts[0]
5240 if desc_params.get("kdu_atomic_upgrade"):
5241 atomic_upgrade = desc_params.get(
5242 "kdu_atomic_upgrade"
5243 ).lower() in ("yes", "true", "1")
5244 del desc_params["kdu_atomic_upgrade"]
5245 else:
5246 atomic_upgrade = True
5247
5248 detailed_status = await asyncio.wait_for(
5249 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5250 cluster_uuid=kdu.get("k8scluster-uuid"),
5251 kdu_instance=kdu.get("kdu-instance"),
5252 atomic=atomic_upgrade,
5253 kdu_model=kdu_model,
5254 params=desc_params,
5255 db_dict=db_dict,
5256 timeout=timeout_ns_action,
5257 ),
5258 timeout=timeout_ns_action + 10,
5259 )
5260 self.logger.debug(
5261 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5262 )
5263 elif primitive_name == "rollback":
5264 detailed_status = await asyncio.wait_for(
5265 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5266 cluster_uuid=kdu.get("k8scluster-uuid"),
5267 kdu_instance=kdu.get("kdu-instance"),
5268 db_dict=db_dict,
5269 ),
5270 timeout=timeout_ns_action,
5271 )
5272 elif primitive_name == "status":
5273 detailed_status = await asyncio.wait_for(
5274 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5275 cluster_uuid=kdu.get("k8scluster-uuid"),
5276 kdu_instance=kdu.get("kdu-instance"),
5277 vca_id=vca_id,
5278 ),
5279 timeout=timeout_ns_action,
5280 )
5281 else:
5282 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5283 kdu["kdu-name"], nsr_id
5284 )
5285 params = self._map_primitive_params(
5286 config_primitive_desc, primitive_params, desc_params
5287 )
5288
5289 detailed_status = await asyncio.wait_for(
5290 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5291 cluster_uuid=kdu.get("k8scluster-uuid"),
5292 kdu_instance=kdu_instance,
5293 primitive_name=primitive_name,
5294 params=params,
5295 db_dict=db_dict,
5296 timeout=timeout_ns_action,
5297 vca_id=vca_id,
5298 ),
5299 timeout=timeout_ns_action,
5300 )
5301
5302 if detailed_status:
5303 nslcmop_operation_state = "COMPLETED"
5304 else:
5305 detailed_status = ""
5306 nslcmop_operation_state = "FAILED"
5307 else:
5308 ee_id, vca_type = self._look_for_deployed_vca(
5309 nsr_deployed["VCA"],
5310 member_vnf_index=vnf_index,
5311 vdu_id=vdu_id,
5312 vdu_count_index=vdu_count_index,
5313 ee_descriptor_id=ee_descriptor_id,
5314 )
5315 for vca_index, vca_deployed in enumerate(
5316 db_nsr["_admin"]["deployed"]["VCA"]
5317 ):
5318 if vca_deployed.get("member-vnf-index") == vnf_index:
5319 db_dict = {
5320 "collection": "nsrs",
5321 "filter": {"_id": nsr_id},
5322 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5323 }
5324 break
5325 (
5326 nslcmop_operation_state,
5327 detailed_status,
5328 ) = await self._ns_execute_primitive(
5329 ee_id,
5330 primitive=primitive_name,
5331 primitive_params=self._map_primitive_params(
5332 config_primitive_desc, primitive_params, desc_params
5333 ),
5334 timeout=timeout_ns_action,
5335 vca_type=vca_type,
5336 db_dict=db_dict,
5337 vca_id=vca_id,
5338 )
5339
5340 db_nslcmop_update["detailed-status"] = detailed_status
5341 error_description_nslcmop = (
5342 detailed_status if nslcmop_operation_state == "FAILED" else ""
5343 )
5344 self.logger.debug(
5345 logging_text
5346 + "Done with result {} {}".format(
5347 nslcmop_operation_state, detailed_status
5348 )
5349 )
5350 return # database update is called inside finally
5351
5352 except (DbException, LcmException, N2VCException, K8sException) as e:
5353 self.logger.error(logging_text + "Exit Exception {}".format(e))
5354 exc = e
5355 except asyncio.CancelledError:
5356 self.logger.error(
5357 logging_text + "Cancelled Exception while '{}'".format(step)
5358 )
5359 exc = "Operation was cancelled"
5360 except asyncio.TimeoutError:
5361 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5362 exc = "Timeout"
5363 except Exception as e:
5364 exc = traceback.format_exc()
5365 self.logger.critical(
5366 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5367 exc_info=True,
5368 )
5369 finally:
5370 if exc:
5371 db_nslcmop_update[
5372 "detailed-status"
5373 ] = (
5374 detailed_status
5375 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5376 nslcmop_operation_state = "FAILED"
5377 if db_nsr:
5378 self._write_ns_status(
5379 nsr_id=nsr_id,
5380 ns_state=db_nsr[
5381 "nsState"
5382 ], # TODO check if degraded. For the moment use previous status
5383 current_operation="IDLE",
5384 current_operation_id=None,
5385 # error_description=error_description_nsr,
5386 # error_detail=error_detail,
5387 other_update=db_nsr_update,
5388 )
5389
5390 self._write_op_status(
5391 op_id=nslcmop_id,
5392 stage="",
5393 error_message=error_description_nslcmop,
5394 operation_state=nslcmop_operation_state,
5395 other_update=db_nslcmop_update,
5396 )
5397
5398 if nslcmop_operation_state:
5399 try:
5400 await self.msg.aiowrite(
5401 "ns",
5402 "actioned",
5403 {
5404 "nsr_id": nsr_id,
5405 "nslcmop_id": nslcmop_id,
5406 "operationState": nslcmop_operation_state,
5407 },
5408 )
5409 except Exception as e:
5410 self.logger.error(
5411 logging_text + "kafka_write notification Exception {}".format(e)
5412 )
5413 self.logger.debug(logging_text + "Exit")
5414 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5415 return nslcmop_operation_state, detailed_status
5416
5417 async def terminate_vdus(
5418 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5419 ):
5420 """This method terminates VDUs
5421
5422 Args:
5423 db_vnfr: VNF instance record
5424 member_vnf_index: VNF index to identify the VDUs to be removed
5425 db_nsr: NS instance record
5426 update_db_nslcmops: Nslcmop update record
5427 """
5428 vca_scaling_info = []
5429 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5430 scaling_info["scaling_direction"] = "IN"
5431 scaling_info["vdu-delete"] = {}
5432 scaling_info["kdu-delete"] = {}
5433 db_vdur = db_vnfr.get("vdur")
5434 vdur_list = copy(db_vdur)
5435 count_index = 0
5436 for index, vdu in enumerate(vdur_list):
5437 vca_scaling_info.append(
5438 {
5439 "osm_vdu_id": vdu["vdu-id-ref"],
5440 "member-vnf-index": member_vnf_index,
5441 "type": "delete",
5442 "vdu_index": count_index,
5443 }
5444 )
5445 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5446 scaling_info["vdu"].append(
5447 {
5448 "name": vdu.get("name") or vdu.get("vdu-name"),
5449 "vdu_id": vdu["vdu-id-ref"],
5450 "interface": [],
5451 }
5452 )
5453 for interface in vdu["interfaces"]:
5454 scaling_info["vdu"][index]["interface"].append(
5455 {
5456 "name": interface["name"],
5457 "ip_address": interface["ip-address"],
5458 "mac_address": interface.get("mac-address"),
5459 }
5460 )
5461 self.logger.info("NS update scaling info{}".format(scaling_info))
5462 stage[2] = "Terminating VDUs"
5463 if scaling_info.get("vdu-delete"):
5464 # scale_process = "RO"
5465 if self.ro_config.ng:
5466 await self._scale_ng_ro(
5467 logging_text,
5468 db_nsr,
5469 update_db_nslcmops,
5470 db_vnfr,
5471 scaling_info,
5472 stage,
5473 )
5474
5475 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5476 """This method is to Remove VNF instances from NS.
5477
5478 Args:
5479 nsr_id: NS instance id
5480 nslcmop_id: nslcmop id of update
5481 vnf_instance_id: id of the VNF instance to be removed
5482
5483 Returns:
5484 result: (str, str) COMPLETED/FAILED, details
5485 """
5486 try:
5487 db_nsr_update = {}
5488 logging_text = "Task ns={} update ".format(nsr_id)
5489 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5490 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5491 if check_vnfr_count > 1:
5492 stage = ["", "", ""]
5493 step = "Getting nslcmop from database"
5494 self.logger.debug(
5495 step + " after having waited for previous tasks to be completed"
5496 )
5497 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5498 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5499 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5500 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5501 """ db_vnfr = self.db.get_one(
5502 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5503
5504 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5505 await self.terminate_vdus(
5506 db_vnfr,
5507 member_vnf_index,
5508 db_nsr,
5509 update_db_nslcmops,
5510 stage,
5511 logging_text,
5512 )
5513
5514 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5515 constituent_vnfr.remove(db_vnfr.get("_id"))
5516 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5517 "constituent-vnfr-ref"
5518 )
5519 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5520 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5521 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5522 return "COMPLETED", "Done"
5523 else:
5524 step = "Terminate VNF Failed with"
5525 raise LcmException(
5526 "{} Cannot terminate the last VNF in this NS.".format(
5527 vnf_instance_id
5528 )
5529 )
5530 except (LcmException, asyncio.CancelledError):
5531 raise
5532 except Exception as e:
5533 self.logger.debug("Error removing VNF {}".format(e))
5534 return "FAILED", "Error removing VNF {}".format(e)
5535
5536 async def _ns_redeploy_vnf(
5537 self,
5538 nsr_id,
5539 nslcmop_id,
5540 db_vnfd,
5541 db_vnfr,
5542 db_nsr,
5543 ):
5544 """This method updates and redeploys VNF instances
5545
5546 Args:
5547 nsr_id: NS instance id
5548 nslcmop_id: nslcmop id
5549 db_vnfd: VNF descriptor
5550 db_vnfr: VNF instance record
5551 db_nsr: NS instance record
5552
5553 Returns:
5554 result: (str, str) COMPLETED/FAILED, details
5555 """
5556 try:
5557 count_index = 0
5558 stage = ["", "", ""]
5559 logging_text = "Task ns={} update ".format(nsr_id)
5560 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5561 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5562
5563 # Terminate old VNF resources
5564 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5565 await self.terminate_vdus(
5566 db_vnfr,
5567 member_vnf_index,
5568 db_nsr,
5569 update_db_nslcmops,
5570 stage,
5571 logging_text,
5572 )
5573
5574 # old_vnfd_id = db_vnfr["vnfd-id"]
5575 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5576 new_db_vnfd = db_vnfd
5577 # new_vnfd_ref = new_db_vnfd["id"]
5578 # new_vnfd_id = vnfd_id
5579
5580 # Create VDUR
5581 new_vnfr_cp = []
5582 for cp in new_db_vnfd.get("ext-cpd", ()):
5583 vnf_cp = {
5584 "name": cp.get("id"),
5585 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5586 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5587 "id": cp.get("id"),
5588 }
5589 new_vnfr_cp.append(vnf_cp)
5590 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5591 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5592 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5593 new_vnfr_update = {
5594 "revision": latest_vnfd_revision,
5595 "connection-point": new_vnfr_cp,
5596 "vdur": new_vdur,
5597 "ip-address": "",
5598 }
5599 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5600 updated_db_vnfr = self.db.get_one(
5601 "vnfrs",
5602 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5603 )
5604
5605 # Instantiate new VNF resources
5606 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5607 vca_scaling_info = []
5608 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5609 scaling_info["scaling_direction"] = "OUT"
5610 scaling_info["vdu-create"] = {}
5611 scaling_info["kdu-create"] = {}
5612 vdud_instantiate_list = db_vnfd["vdu"]
5613 for index, vdud in enumerate(vdud_instantiate_list):
5614 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5615 if cloud_init_text:
5616 additional_params = (
5617 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5618 or {}
5619 )
5620 cloud_init_list = []
5621 if cloud_init_text:
5622 # TODO Information of its own ip is not available because db_vnfr is not updated.
5623 additional_params["OSM"] = get_osm_params(
5624 updated_db_vnfr, vdud["id"], 1
5625 )
5626 cloud_init_list.append(
5627 self._parse_cloud_init(
5628 cloud_init_text,
5629 additional_params,
5630 db_vnfd["id"],
5631 vdud["id"],
5632 )
5633 )
5634 vca_scaling_info.append(
5635 {
5636 "osm_vdu_id": vdud["id"],
5637 "member-vnf-index": member_vnf_index,
5638 "type": "create",
5639 "vdu_index": count_index,
5640 }
5641 )
5642 scaling_info["vdu-create"][vdud["id"]] = count_index
5643 if self.ro_config.ng:
5644 self.logger.debug(
5645 "New Resources to be deployed: {}".format(scaling_info)
5646 )
5647 await self._scale_ng_ro(
5648 logging_text,
5649 db_nsr,
5650 update_db_nslcmops,
5651 updated_db_vnfr,
5652 scaling_info,
5653 stage,
5654 )
5655 return "COMPLETED", "Done"
5656 except (LcmException, asyncio.CancelledError):
5657 raise
5658 except Exception as e:
5659 self.logger.debug("Error updating VNF {}".format(e))
5660 return "FAILED", "Error updating VNF {}".format(e)
5661
5662 async def _ns_charm_upgrade(
5663 self,
5664 ee_id,
5665 charm_id,
5666 charm_type,
5667 path,
5668 timeout: float = None,
5669 ) -> (str, str):
5670 """This method upgrade charms in VNF instances
5671
5672 Args:
5673 ee_id: Execution environment id
5674 path: Local path to the charm
5675 charm_id: charm-id
5676 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5677 timeout: (Float) Timeout for the ns update operation
5678
5679 Returns:
5680 result: (str, str) COMPLETED/FAILED, details
5681 """
5682 try:
5683 charm_type = charm_type or "lxc_proxy_charm"
5684 output = await self.vca_map[charm_type].upgrade_charm(
5685 ee_id=ee_id,
5686 path=path,
5687 charm_id=charm_id,
5688 charm_type=charm_type,
5689 timeout=timeout or self.timeout.ns_update,
5690 )
5691
5692 if output:
5693 return "COMPLETED", output
5694
5695 except (LcmException, asyncio.CancelledError):
5696 raise
5697
5698 except Exception as e:
5699 self.logger.debug("Error upgrading charm {}".format(path))
5700
5701 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5702
5703 async def update(self, nsr_id, nslcmop_id):
5704 """Update NS according to different update types
5705
5706 This method performs upgrade of VNF instances then updates the revision
5707 number in VNF record
5708
5709 Args:
5710 nsr_id: Network service will be updated
5711 nslcmop_id: ns lcm operation id
5712
5713 Returns:
5714 It may raise DbException, LcmException, N2VCException, K8sException
5715
5716 """
5717 # Try to lock HA task here
5718 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5719 if not task_is_locked_by_me:
5720 return
5721
5722 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5723 self.logger.debug(logging_text + "Enter")
5724
5725 # Set the required variables to be filled up later
5726 db_nsr = None
5727 db_nslcmop_update = {}
5728 vnfr_update = {}
5729 nslcmop_operation_state = None
5730 db_nsr_update = {}
5731 error_description_nslcmop = ""
5732 exc = None
5733 change_type = "updated"
5734 detailed_status = ""
5735 member_vnf_index = None
5736
5737 try:
5738 # wait for any previous tasks in process
5739 step = "Waiting for previous operations to terminate"
5740 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5741 self._write_ns_status(
5742 nsr_id=nsr_id,
5743 ns_state=None,
5744 current_operation="UPDATING",
5745 current_operation_id=nslcmop_id,
5746 )
5747
5748 step = "Getting nslcmop from database"
5749 db_nslcmop = self.db.get_one(
5750 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5751 )
5752 update_type = db_nslcmop["operationParams"]["updateType"]
5753
5754 step = "Getting nsr from database"
5755 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5756 old_operational_status = db_nsr["operational-status"]
5757 db_nsr_update["operational-status"] = "updating"
5758 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5759 nsr_deployed = db_nsr["_admin"].get("deployed")
5760
5761 if update_type == "CHANGE_VNFPKG":
5762 # Get the input parameters given through update request
5763 vnf_instance_id = db_nslcmop["operationParams"][
5764 "changeVnfPackageData"
5765 ].get("vnfInstanceId")
5766
5767 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5768 "vnfdId"
5769 )
5770 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5771
5772 step = "Getting vnfr from database"
5773 db_vnfr = self.db.get_one(
5774 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5775 )
5776
5777 step = "Getting vnfds from database"
5778 # Latest VNFD
5779 latest_vnfd = self.db.get_one(
5780 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5781 )
5782 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5783
5784 # Current VNFD
5785 current_vnf_revision = db_vnfr.get("revision", 1)
5786 current_vnfd = self.db.get_one(
5787 "vnfds_revisions",
5788 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5789 fail_on_empty=False,
5790 )
5791 # Charm artifact paths will be filled up later
5792 (
5793 current_charm_artifact_path,
5794 target_charm_artifact_path,
5795 charm_artifact_paths,
5796 helm_artifacts,
5797 ) = ([], [], [], [])
5798
5799 step = "Checking if revision has changed in VNFD"
5800 if current_vnf_revision != latest_vnfd_revision:
5801 change_type = "policy_updated"
5802
5803 # There is new revision of VNFD, update operation is required
5804 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5805 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5806
5807 step = "Removing the VNFD packages if they exist in the local path"
5808 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5809 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5810
5811 step = "Get the VNFD packages from FSMongo"
5812 self.fs.sync(from_path=latest_vnfd_path)
5813 self.fs.sync(from_path=current_vnfd_path)
5814
5815 step = (
5816 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5817 )
5818 current_base_folder = current_vnfd["_admin"]["storage"]
5819 latest_base_folder = latest_vnfd["_admin"]["storage"]
5820
5821 for vca_index, vca_deployed in enumerate(
5822 get_iterable(nsr_deployed, "VCA")
5823 ):
5824 vnf_index = db_vnfr.get("member-vnf-index-ref")
5825
5826 # Getting charm-id and charm-type
5827 if vca_deployed.get("member-vnf-index") == vnf_index:
5828 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5829 vca_type = vca_deployed.get("type")
5830 vdu_count_index = vca_deployed.get("vdu_count_index")
5831
5832 # Getting ee-id
5833 ee_id = vca_deployed.get("ee_id")
5834
5835 step = "Getting descriptor config"
5836 if current_vnfd.get("kdu"):
5837 search_key = "kdu_name"
5838 else:
5839 search_key = "vnfd_id"
5840
5841 entity_id = vca_deployed.get(search_key)
5842
5843 descriptor_config = get_configuration(
5844 current_vnfd, entity_id
5845 )
5846
5847 if "execution-environment-list" in descriptor_config:
5848 ee_list = descriptor_config.get(
5849 "execution-environment-list", []
5850 )
5851 else:
5852 ee_list = []
5853
5854 # There could be several charm used in the same VNF
5855 for ee_item in ee_list:
5856 if ee_item.get("juju"):
5857 step = "Getting charm name"
5858 charm_name = ee_item["juju"].get("charm")
5859
5860 step = "Setting Charm artifact paths"
5861 current_charm_artifact_path.append(
5862 get_charm_artifact_path(
5863 current_base_folder,
5864 charm_name,
5865 vca_type,
5866 current_vnf_revision,
5867 )
5868 )
5869 target_charm_artifact_path.append(
5870 get_charm_artifact_path(
5871 latest_base_folder,
5872 charm_name,
5873 vca_type,
5874 latest_vnfd_revision,
5875 )
5876 )
5877 elif ee_item.get("helm-chart"):
5878 # add chart to list and all parameters
5879 step = "Getting helm chart name"
5880 chart_name = ee_item.get("helm-chart")
5881 vca_type = "helm-v3"
5882 step = "Setting Helm chart artifact paths"
5883
5884 helm_artifacts.append(
5885 {
5886 "current_artifact_path": get_charm_artifact_path(
5887 current_base_folder,
5888 chart_name,
5889 vca_type,
5890 current_vnf_revision,
5891 ),
5892 "target_artifact_path": get_charm_artifact_path(
5893 latest_base_folder,
5894 chart_name,
5895 vca_type,
5896 latest_vnfd_revision,
5897 ),
5898 "ee_id": ee_id,
5899 "vca_index": vca_index,
5900 "vdu_index": vdu_count_index,
5901 }
5902 )
5903
5904 charm_artifact_paths = zip(
5905 current_charm_artifact_path, target_charm_artifact_path
5906 )
5907
5908 step = "Checking if software version has changed in VNFD"
5909 if find_software_version(current_vnfd) != find_software_version(
5910 latest_vnfd
5911 ):
5912 step = "Checking if existing VNF has charm"
5913 for current_charm_path, target_charm_path in list(
5914 charm_artifact_paths
5915 ):
5916 if current_charm_path:
5917 raise LcmException(
5918 "Software version change is not supported as VNF instance {} has charm.".format(
5919 vnf_instance_id
5920 )
5921 )
5922
5923 step = "Checking whether the descriptor has SFC"
5924 if db_nsr.get("nsd", {}).get("vnffgd"):
5925 raise LcmException(
5926 "Ns update is not allowed for NS with SFC"
5927 )
5928
5929 # There is no change in the charm package, then redeploy the VNF
5930 # based on new descriptor
5931 step = "Redeploying VNF"
5932 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5933 (result, detailed_status) = await self._ns_redeploy_vnf(
5934 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5935 )
5936 if result == "FAILED":
5937 nslcmop_operation_state = result
5938 error_description_nslcmop = detailed_status
5939 old_operational_status = "failed"
5940 db_nslcmop_update["detailed-status"] = detailed_status
5941 db_nsr_update["detailed-status"] = detailed_status
5942 scaling_aspect = get_scaling_aspect(latest_vnfd)
5943 scaling_group_desc = db_nsr.get("_admin").get(
5944 "scaling-group", None
5945 )
5946 if scaling_group_desc:
5947 for aspect in scaling_aspect:
5948 scaling_group_id = aspect.get("id")
5949 for scale_index, scaling_group in enumerate(
5950 scaling_group_desc
5951 ):
5952 if scaling_group.get("name") == scaling_group_id:
5953 db_nsr_update[
5954 "_admin.scaling-group.{}.nb-scale-op".format(
5955 scale_index
5956 )
5957 ] = 0
5958 self.logger.debug(
5959 logging_text
5960 + " step {} Done with result {} {}".format(
5961 step, nslcmop_operation_state, detailed_status
5962 )
5963 )
5964
5965 else:
5966 step = "Checking if any charm package has changed or not"
5967 for current_charm_path, target_charm_path in list(
5968 charm_artifact_paths
5969 ):
5970 if (
5971 current_charm_path
5972 and target_charm_path
5973 and self.check_charm_hash_changed(
5974 current_charm_path, target_charm_path
5975 )
5976 ):
5977 step = "Checking whether VNF uses juju bundle"
5978 if check_juju_bundle_existence(current_vnfd):
5979 raise LcmException(
5980 "Charm upgrade is not supported for the instance which"
5981 " uses juju-bundle: {}".format(
5982 check_juju_bundle_existence(current_vnfd)
5983 )
5984 )
5985
5986 step = "Upgrading Charm"
5987 (
5988 result,
5989 detailed_status,
5990 ) = await self._ns_charm_upgrade(
5991 ee_id=ee_id,
5992 charm_id=vca_id,
5993 charm_type=vca_type,
5994 path=self.fs.path + target_charm_path,
5995 timeout=timeout_seconds,
5996 )
5997
5998 if result == "FAILED":
5999 nslcmop_operation_state = result
6000 error_description_nslcmop = detailed_status
6001
6002 db_nslcmop_update["detailed-status"] = detailed_status
6003 self.logger.debug(
6004 logging_text
6005 + " step {} Done with result {} {}".format(
6006 step, nslcmop_operation_state, detailed_status
6007 )
6008 )
6009
6010 step = "Updating policies"
6011 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6012 result = "COMPLETED"
6013 detailed_status = "Done"
6014 db_nslcmop_update["detailed-status"] = "Done"
6015
6016 # helm base EE
6017 for item in helm_artifacts:
6018 if not (
6019 item["current_artifact_path"]
6020 and item["target_artifact_path"]
6021 and self.check_charm_hash_changed(
6022 item["current_artifact_path"],
6023 item["target_artifact_path"],
6024 )
6025 ):
6026 continue
6027 db_update_entry = "_admin.deployed.VCA.{}.".format(
6028 item["vca_index"]
6029 )
6030 vnfr_id = db_vnfr["_id"]
6031 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6032 db_dict = {
6033 "collection": "nsrs",
6034 "filter": {"_id": nsr_id},
6035 "path": db_update_entry,
6036 }
6037 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6038 await self.vca_map[vca_type].upgrade_execution_environment(
6039 namespace=namespace,
6040 helm_id=helm_id,
6041 db_dict=db_dict,
6042 config=osm_config,
6043 artifact_path=item["target_artifact_path"],
6044 vca_type=vca_type,
6045 )
6046 vnf_id = db_vnfr.get("vnfd-ref")
6047 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6048 self.logger.debug("get ssh key block")
6049 rw_mgmt_ip = None
6050 if deep_get(
6051 config_descriptor,
6052 ("config-access", "ssh-access", "required"),
6053 ):
6054 # Needed to inject a ssh key
6055 user = deep_get(
6056 config_descriptor,
6057 ("config-access", "ssh-access", "default-user"),
6058 )
6059 step = (
6060 "Install configuration Software, getting public ssh key"
6061 )
6062 pub_key = await self.vca_map[
6063 vca_type
6064 ].get_ee_ssh_public__key(
6065 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6066 )
6067
6068 step = (
6069 "Insert public key into VM user={} ssh_key={}".format(
6070 user, pub_key
6071 )
6072 )
6073 self.logger.debug(logging_text + step)
6074
6075 # wait for RO (ip-address) Insert pub_key into VM
6076 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6077 logging_text,
6078 nsr_id,
6079 vnfr_id,
6080 None,
6081 item["vdu_index"],
6082 user=user,
6083 pub_key=pub_key,
6084 )
6085
6086 initial_config_primitive_list = config_descriptor.get(
6087 "initial-config-primitive"
6088 )
6089 config_primitive = next(
6090 (
6091 p
6092 for p in initial_config_primitive_list
6093 if p["name"] == "config"
6094 ),
6095 None,
6096 )
6097 if not config_primitive:
6098 continue
6099
6100 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6101 if rw_mgmt_ip:
6102 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6103 if db_vnfr.get("additionalParamsForVnf"):
6104 deploy_params.update(
6105 parse_yaml_strings(
6106 db_vnfr["additionalParamsForVnf"].copy()
6107 )
6108 )
6109 primitive_params_ = self._map_primitive_params(
6110 config_primitive, {}, deploy_params
6111 )
6112
6113 step = "execute primitive '{}' params '{}'".format(
6114 config_primitive["name"], primitive_params_
6115 )
6116 self.logger.debug(logging_text + step)
6117 await self.vca_map[vca_type].exec_primitive(
6118 ee_id=ee_id,
6119 primitive_name=config_primitive["name"],
6120 params_dict=primitive_params_,
6121 db_dict=db_dict,
6122 vca_id=vca_id,
6123 vca_type=vca_type,
6124 )
6125
6126 step = "Updating policies"
6127 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6128 detailed_status = "Done"
6129 db_nslcmop_update["detailed-status"] = "Done"
6130
6131 # If nslcmop_operation_state is None, so any operation is not failed.
6132 if not nslcmop_operation_state:
6133 nslcmop_operation_state = "COMPLETED"
6134
6135 # If update CHANGE_VNFPKG nslcmop_operation is successful
6136 # vnf revision need to be updated
6137 vnfr_update["revision"] = latest_vnfd_revision
6138 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6139
6140 self.logger.debug(
6141 logging_text
6142 + " task Done with result {} {}".format(
6143 nslcmop_operation_state, detailed_status
6144 )
6145 )
6146 elif update_type == "REMOVE_VNF":
6147 # This part is included in https://osm.etsi.org/gerrit/11876
6148 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6149 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6150 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6151 step = "Removing VNF"
6152 (result, detailed_status) = await self.remove_vnf(
6153 nsr_id, nslcmop_id, vnf_instance_id
6154 )
6155 if result == "FAILED":
6156 nslcmop_operation_state = result
6157 error_description_nslcmop = detailed_status
6158 db_nslcmop_update["detailed-status"] = detailed_status
6159 change_type = "vnf_terminated"
6160 if not nslcmop_operation_state:
6161 nslcmop_operation_state = "COMPLETED"
6162 self.logger.debug(
6163 logging_text
6164 + " task Done with result {} {}".format(
6165 nslcmop_operation_state, detailed_status
6166 )
6167 )
6168
6169 elif update_type == "OPERATE_VNF":
6170 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6171 "vnfInstanceId"
6172 ]
6173 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6174 "changeStateTo"
6175 ]
6176 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6177 "additionalParam"
6178 ]
6179 (result, detailed_status) = await self.rebuild_start_stop(
6180 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6181 )
6182 if result == "FAILED":
6183 nslcmop_operation_state = result
6184 error_description_nslcmop = detailed_status
6185 db_nslcmop_update["detailed-status"] = detailed_status
6186 if not nslcmop_operation_state:
6187 nslcmop_operation_state = "COMPLETED"
6188 self.logger.debug(
6189 logging_text
6190 + " task Done with result {} {}".format(
6191 nslcmop_operation_state, detailed_status
6192 )
6193 )
6194 elif update_type == "VERTICAL_SCALE":
6195 self.logger.debug(
6196 "Prepare for VERTICAL_SCALE update operation {}".format(db_nslcmop)
6197 )
6198 # Get the input parameters given through update request
6199 vnf_instance_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
6200 "vnfInstanceId"
6201 )
6202
6203 vnfd_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
6204 "vnfdId"
6205 )
6206 step = "Getting vnfr from database"
6207 db_vnfr = self.db.get_one(
6208 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
6209 )
6210 self.logger.debug(step)
6211 step = "Getting vnfds from database"
6212 self.logger.debug("Start" + step)
6213 # Latest VNFD
6214 latest_vnfd = self.db.get_one(
6215 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
6216 )
6217 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
6218 # Current VNFD
6219 current_vnf_revision = db_vnfr.get("revision", 1)
6220 current_vnfd = self.db.get_one(
6221 "vnfds_revisions",
6222 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
6223 fail_on_empty=False,
6224 )
6225 self.logger.debug("End" + step)
6226 # verify flavor changes
6227 step = "Checking for flavor change"
6228 if find_software_version(current_vnfd) != find_software_version(
6229 latest_vnfd
6230 ):
6231 self.logger.debug("Start" + step)
6232 if current_vnfd.get("virtual-compute-desc") == latest_vnfd.get(
6233 "virtual-compute-desc"
6234 ) and current_vnfd.get("virtual-storage-desc") == latest_vnfd.get(
6235 "virtual-storage-desc"
6236 ):
6237 raise LcmException(
6238 "No change in flavor check vnfd {}".format(vnfd_id)
6239 )
6240 else:
6241 raise LcmException(
6242 "No change in software_version of vnfd {}".format(vnfd_id)
6243 )
6244
6245 self.logger.debug("End" + step)
6246
6247 (result, detailed_status) = await self.vertical_scale(
6248 nsr_id, nslcmop_id
6249 )
6250 self.logger.debug(
6251 "vertical_scale result: {} detailed_status :{}".format(
6252 result, detailed_status
6253 )
6254 )
6255 if result == "FAILED":
6256 nslcmop_operation_state = result
6257 error_description_nslcmop = detailed_status
6258 db_nslcmop_update["detailed-status"] = detailed_status
6259 if not nslcmop_operation_state:
6260 nslcmop_operation_state = "COMPLETED"
6261 self.logger.debug(
6262 logging_text
6263 + " task Done with result {} {}".format(
6264 nslcmop_operation_state, detailed_status
6265 )
6266 )
6267
6268 # If nslcmop_operation_state is None, so any operation is not failed.
6269 # All operations are executed in overall.
6270 if not nslcmop_operation_state:
6271 nslcmop_operation_state = "COMPLETED"
6272 db_nsr_update["operational-status"] = old_operational_status
6273
6274 except (DbException, LcmException, N2VCException, K8sException) as e:
6275 self.logger.error(logging_text + "Exit Exception {}".format(e))
6276 exc = e
6277 except asyncio.CancelledError:
6278 self.logger.error(
6279 logging_text + "Cancelled Exception while '{}'".format(step)
6280 )
6281 exc = "Operation was cancelled"
6282 except asyncio.TimeoutError:
6283 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6284 exc = "Timeout"
6285 except Exception as e:
6286 exc = traceback.format_exc()
6287 self.logger.critical(
6288 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6289 exc_info=True,
6290 )
6291 finally:
6292 if exc:
6293 db_nslcmop_update[
6294 "detailed-status"
6295 ] = (
6296 detailed_status
6297 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6298 nslcmop_operation_state = "FAILED"
6299 db_nsr_update["operational-status"] = old_operational_status
6300 if db_nsr:
6301 self._write_ns_status(
6302 nsr_id=nsr_id,
6303 ns_state=db_nsr["nsState"],
6304 current_operation="IDLE",
6305 current_operation_id=None,
6306 other_update=db_nsr_update,
6307 )
6308
6309 self._write_op_status(
6310 op_id=nslcmop_id,
6311 stage="",
6312 error_message=error_description_nslcmop,
6313 operation_state=nslcmop_operation_state,
6314 other_update=db_nslcmop_update,
6315 )
6316
6317 if nslcmop_operation_state:
6318 try:
6319 msg = {
6320 "nsr_id": nsr_id,
6321 "nslcmop_id": nslcmop_id,
6322 "operationState": nslcmop_operation_state,
6323 }
6324 if (
6325 change_type in ("vnf_terminated", "policy_updated")
6326 and member_vnf_index
6327 ):
6328 msg.update({"vnf_member_index": member_vnf_index})
6329 await self.msg.aiowrite("ns", change_type, msg)
6330 except Exception as e:
6331 self.logger.error(
6332 logging_text + "kafka_write notification Exception {}".format(e)
6333 )
6334 self.logger.debug(logging_text + "Exit")
6335 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6336 return nslcmop_operation_state, detailed_status
6337
6338 async def scale(self, nsr_id, nslcmop_id):
6339 # Try to lock HA task here
6340 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6341 if not task_is_locked_by_me:
6342 return
6343
6344 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6345 stage = ["", "", ""]
6346 tasks_dict_info = {}
6347 # ^ stage, step, VIM progress
6348 self.logger.debug(logging_text + "Enter")
6349 # get all needed from database
6350 db_nsr = None
6351 db_nslcmop_update = {}
6352 db_nsr_update = {}
6353 exc = None
6354 # in case of error, indicates what part of scale was failed to put nsr at error status
6355 scale_process = None
6356 old_operational_status = ""
6357 old_config_status = ""
6358 nsi_id = None
6359 prom_job_name = ""
6360 try:
6361 # wait for any previous tasks in process
6362 step = "Waiting for previous operations to terminate"
6363 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6364 self._write_ns_status(
6365 nsr_id=nsr_id,
6366 ns_state=None,
6367 current_operation="SCALING",
6368 current_operation_id=nslcmop_id,
6369 )
6370
6371 step = "Getting nslcmop from database"
6372 self.logger.debug(
6373 step + " after having waited for previous tasks to be completed"
6374 )
6375 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6376
6377 step = "Getting nsr from database"
6378 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6379 old_operational_status = db_nsr["operational-status"]
6380 old_config_status = db_nsr["config-status"]
6381
6382 step = "Checking whether the descriptor has SFC"
6383 if db_nsr.get("nsd", {}).get("vnffgd"):
6384 raise LcmException("Scaling is not allowed for NS with SFC")
6385
6386 step = "Parsing scaling parameters"
6387 db_nsr_update["operational-status"] = "scaling"
6388 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6389 nsr_deployed = db_nsr["_admin"].get("deployed")
6390
6391 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6392 "scaleByStepData"
6393 ]["member-vnf-index"]
6394 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6395 "scaleByStepData"
6396 ]["scaling-group-descriptor"]
6397 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6398 # for backward compatibility
6399 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6400 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6401 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6402 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6403
6404 step = "Getting vnfr from database"
6405 db_vnfr = self.db.get_one(
6406 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6407 )
6408
6409 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6410
6411 step = "Getting vnfd from database"
6412 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6413
6414 base_folder = db_vnfd["_admin"]["storage"]
6415
6416 step = "Getting scaling-group-descriptor"
6417 scaling_descriptor = find_in_list(
6418 get_scaling_aspect(db_vnfd),
6419 lambda scale_desc: scale_desc["name"] == scaling_group,
6420 )
6421 if not scaling_descriptor:
6422 raise LcmException(
6423 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6424 "at vnfd:scaling-group-descriptor".format(scaling_group)
6425 )
6426
6427 step = "Sending scale order to VIM"
6428 # TODO check if ns is in a proper status
6429 nb_scale_op = 0
6430 if not db_nsr["_admin"].get("scaling-group"):
6431 self.update_db_2(
6432 "nsrs",
6433 nsr_id,
6434 {
6435 "_admin.scaling-group": [
6436 {
6437 "name": scaling_group,
6438 "vnf_index": vnf_index,
6439 "nb-scale-op": 0,
6440 }
6441 ]
6442 },
6443 )
6444 admin_scale_index = 0
6445 else:
6446 for admin_scale_index, admin_scale_info in enumerate(
6447 db_nsr["_admin"]["scaling-group"]
6448 ):
6449 if (
6450 admin_scale_info["name"] == scaling_group
6451 and admin_scale_info["vnf_index"] == vnf_index
6452 ):
6453 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6454 break
6455 else: # not found, set index one plus last element and add new entry with the name
6456 admin_scale_index += 1
6457 db_nsr_update[
6458 "_admin.scaling-group.{}.name".format(admin_scale_index)
6459 ] = scaling_group
6460 db_nsr_update[
6461 "_admin.scaling-group.{}.vnf_index".format(admin_scale_index)
6462 ] = vnf_index
6463
6464 vca_scaling_info = []
6465 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6466 if scaling_type == "SCALE_OUT":
6467 if "aspect-delta-details" not in scaling_descriptor:
6468 raise LcmException(
6469 "Aspect delta details not fount in scaling descriptor {}".format(
6470 scaling_descriptor["name"]
6471 )
6472 )
6473 # count if max-instance-count is reached
6474 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6475
6476 scaling_info["scaling_direction"] = "OUT"
6477 scaling_info["vdu-create"] = {}
6478 scaling_info["kdu-create"] = {}
6479 for delta in deltas:
6480 for vdu_delta in delta.get("vdu-delta", {}):
6481 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6482 # vdu_index also provides the number of instance of the targeted vdu
6483 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6484 if vdu_index <= len(db_vnfr["vdur"]):
6485 vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
6486 prom_job_name = (
6487 db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
6488 )
6489 prom_job_name = prom_job_name.replace("_", "")
6490 prom_job_name = prom_job_name.replace("-", "")
6491 else:
6492 prom_job_name = None
6493 cloud_init_text = self._get_vdu_cloud_init_content(
6494 vdud, db_vnfd
6495 )
6496 if cloud_init_text:
6497 additional_params = (
6498 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6499 or {}
6500 )
6501 cloud_init_list = []
6502
6503 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6504 max_instance_count = 10
6505 if vdu_profile and "max-number-of-instances" in vdu_profile:
6506 max_instance_count = vdu_profile.get(
6507 "max-number-of-instances", 10
6508 )
6509
6510 default_instance_num = get_number_of_instances(
6511 db_vnfd, vdud["id"]
6512 )
6513 instances_number = vdu_delta.get("number-of-instances", 1)
6514 nb_scale_op += instances_number
6515
6516 new_instance_count = nb_scale_op + default_instance_num
6517 # Control if new count is over max and vdu count is less than max.
6518 # Then assign new instance count
6519 if new_instance_count > max_instance_count > vdu_count:
6520 instances_number = new_instance_count - max_instance_count
6521 else:
6522 instances_number = instances_number
6523
6524 if new_instance_count > max_instance_count:
6525 raise LcmException(
6526 "reached the limit of {} (max-instance-count) "
6527 "scaling-out operations for the "
6528 "scaling-group-descriptor '{}'".format(
6529 nb_scale_op, scaling_group
6530 )
6531 )
6532 for x in range(vdu_delta.get("number-of-instances", 1)):
6533 if cloud_init_text:
6534 # TODO Information of its own ip is not available because db_vnfr is not updated.
6535 additional_params["OSM"] = get_osm_params(
6536 db_vnfr, vdu_delta["id"], vdu_index + x
6537 )
6538 cloud_init_list.append(
6539 self._parse_cloud_init(
6540 cloud_init_text,
6541 additional_params,
6542 db_vnfd["id"],
6543 vdud["id"],
6544 )
6545 )
6546 vca_scaling_info.append(
6547 {
6548 "osm_vdu_id": vdu_delta["id"],
6549 "member-vnf-index": vnf_index,
6550 "type": "create",
6551 "vdu_index": vdu_index + x,
6552 }
6553 )
6554 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6555 for kdu_delta in delta.get("kdu-resource-delta", {}):
6556 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6557 kdu_name = kdu_profile["kdu-name"]
6558 resource_name = kdu_profile.get("resource-name", "")
6559
6560 # Might have different kdus in the same delta
6561 # Should have list for each kdu
6562 if not scaling_info["kdu-create"].get(kdu_name, None):
6563 scaling_info["kdu-create"][kdu_name] = []
6564
6565 kdur = get_kdur(db_vnfr, kdu_name)
6566 if kdur.get("helm-chart"):
6567 k8s_cluster_type = "helm-chart-v3"
6568 self.logger.debug("kdur: {}".format(kdur))
6569 elif kdur.get("juju-bundle"):
6570 k8s_cluster_type = "juju-bundle"
6571 else:
6572 raise LcmException(
6573 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6574 "juju-bundle. Maybe an old NBI version is running".format(
6575 db_vnfr["member-vnf-index-ref"], kdu_name
6576 )
6577 )
6578
6579 max_instance_count = 10
6580 if kdu_profile and "max-number-of-instances" in kdu_profile:
6581 max_instance_count = kdu_profile.get(
6582 "max-number-of-instances", 10
6583 )
6584
6585 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6586 deployed_kdu, _ = get_deployed_kdu(
6587 nsr_deployed, kdu_name, vnf_index
6588 )
6589 if deployed_kdu is None:
6590 raise LcmException(
6591 "KDU '{}' for vnf '{}' not deployed".format(
6592 kdu_name, vnf_index
6593 )
6594 )
6595 kdu_instance = deployed_kdu.get("kdu-instance")
6596 instance_num = await self.k8scluster_map[
6597 k8s_cluster_type
6598 ].get_scale_count(
6599 resource_name,
6600 kdu_instance,
6601 vca_id=vca_id,
6602 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6603 kdu_model=deployed_kdu.get("kdu-model"),
6604 )
6605 kdu_replica_count = instance_num + kdu_delta.get(
6606 "number-of-instances", 1
6607 )
6608
6609 # Control if new count is over max and instance_num is less than max.
6610 # Then assign max instance number to kdu replica count
6611 if kdu_replica_count > max_instance_count > instance_num:
6612 kdu_replica_count = max_instance_count
6613 if kdu_replica_count > max_instance_count:
6614 raise LcmException(
6615 "reached the limit of {} (max-instance-count) "
6616 "scaling-out operations for the "
6617 "scaling-group-descriptor '{}'".format(
6618 instance_num, scaling_group
6619 )
6620 )
6621
6622 for x in range(kdu_delta.get("number-of-instances", 1)):
6623 vca_scaling_info.append(
6624 {
6625 "osm_kdu_id": kdu_name,
6626 "member-vnf-index": vnf_index,
6627 "type": "create",
6628 "kdu_index": instance_num + x - 1,
6629 }
6630 )
6631 scaling_info["kdu-create"][kdu_name].append(
6632 {
6633 "member-vnf-index": vnf_index,
6634 "type": "create",
6635 "k8s-cluster-type": k8s_cluster_type,
6636 "resource-name": resource_name,
6637 "scale": kdu_replica_count,
6638 }
6639 )
6640 elif scaling_type == "SCALE_IN":
6641 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6642
6643 scaling_info["scaling_direction"] = "IN"
6644 scaling_info["vdu-delete"] = {}
6645 scaling_info["kdu-delete"] = {}
6646
6647 for delta in deltas:
6648 for vdu_delta in delta.get("vdu-delta", {}):
6649 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6650 min_instance_count = 0
6651 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6652 if vdu_profile and "min-number-of-instances" in vdu_profile:
6653 min_instance_count = vdu_profile["min-number-of-instances"]
6654
6655 default_instance_num = get_number_of_instances(
6656 db_vnfd, vdu_delta["id"]
6657 )
6658 instance_num = vdu_delta.get("number-of-instances", 1)
6659 nb_scale_op -= instance_num
6660
6661 new_instance_count = nb_scale_op + default_instance_num
6662
6663 if new_instance_count < min_instance_count < vdu_count:
6664 instances_number = min_instance_count - new_instance_count
6665 else:
6666 instances_number = instance_num
6667
6668 if new_instance_count < min_instance_count:
6669 raise LcmException(
6670 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6671 "scaling-group-descriptor '{}'".format(
6672 nb_scale_op, scaling_group
6673 )
6674 )
6675 for x in range(vdu_delta.get("number-of-instances", 1)):
6676 vca_scaling_info.append(
6677 {
6678 "osm_vdu_id": vdu_delta["id"],
6679 "member-vnf-index": vnf_index,
6680 "type": "delete",
6681 "vdu_index": vdu_index - 1 - x,
6682 }
6683 )
6684 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6685 for kdu_delta in delta.get("kdu-resource-delta", {}):
6686 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6687 kdu_name = kdu_profile["kdu-name"]
6688 resource_name = kdu_profile.get("resource-name", "")
6689
6690 if not scaling_info["kdu-delete"].get(kdu_name, None):
6691 scaling_info["kdu-delete"][kdu_name] = []
6692
6693 kdur = get_kdur(db_vnfr, kdu_name)
6694 if kdur.get("helm-chart"):
6695 k8s_cluster_type = "helm-chart-v3"
6696 self.logger.debug("kdur: {}".format(kdur))
6697 elif kdur.get("juju-bundle"):
6698 k8s_cluster_type = "juju-bundle"
6699 else:
6700 raise LcmException(
6701 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6702 "juju-bundle. Maybe an old NBI version is running".format(
6703 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6704 )
6705 )
6706
6707 min_instance_count = 0
6708 if kdu_profile and "min-number-of-instances" in kdu_profile:
6709 min_instance_count = kdu_profile["min-number-of-instances"]
6710
6711 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6712 deployed_kdu, _ = get_deployed_kdu(
6713 nsr_deployed, kdu_name, vnf_index
6714 )
6715 if deployed_kdu is None:
6716 raise LcmException(
6717 "KDU '{}' for vnf '{}' not deployed".format(
6718 kdu_name, vnf_index
6719 )
6720 )
6721 kdu_instance = deployed_kdu.get("kdu-instance")
6722 instance_num = await self.k8scluster_map[
6723 k8s_cluster_type
6724 ].get_scale_count(
6725 resource_name,
6726 kdu_instance,
6727 vca_id=vca_id,
6728 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6729 kdu_model=deployed_kdu.get("kdu-model"),
6730 )
6731 kdu_replica_count = instance_num - kdu_delta.get(
6732 "number-of-instances", 1
6733 )
6734
6735 if kdu_replica_count < min_instance_count < instance_num:
6736 kdu_replica_count = min_instance_count
6737 if kdu_replica_count < min_instance_count:
6738 raise LcmException(
6739 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6740 "scaling-group-descriptor '{}'".format(
6741 instance_num, scaling_group
6742 )
6743 )
6744
6745 for x in range(kdu_delta.get("number-of-instances", 1)):
6746 vca_scaling_info.append(
6747 {
6748 "osm_kdu_id": kdu_name,
6749 "member-vnf-index": vnf_index,
6750 "type": "delete",
6751 "kdu_index": instance_num - x - 1,
6752 }
6753 )
6754 scaling_info["kdu-delete"][kdu_name].append(
6755 {
6756 "member-vnf-index": vnf_index,
6757 "type": "delete",
6758 "k8s-cluster-type": k8s_cluster_type,
6759 "resource-name": resource_name,
6760 "scale": kdu_replica_count,
6761 }
6762 )
6763
6764 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6765 vdu_delete = copy(scaling_info.get("vdu-delete"))
6766 if scaling_info["scaling_direction"] == "IN":
6767 for vdur in reversed(db_vnfr["vdur"]):
6768 if vdu_delete.get(vdur["vdu-id-ref"]):
6769 vdu_delete[vdur["vdu-id-ref"]] -= 1
6770 scaling_info["vdu"].append(
6771 {
6772 "name": vdur.get("name") or vdur.get("vdu-name"),
6773 "vdu_id": vdur["vdu-id-ref"],
6774 "interface": [],
6775 }
6776 )
6777 for interface in vdur["interfaces"]:
6778 scaling_info["vdu"][-1]["interface"].append(
6779 {
6780 "name": interface["name"],
6781 "ip_address": interface["ip-address"],
6782 "mac_address": interface.get("mac-address"),
6783 }
6784 )
6785 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6786
6787 # PRE-SCALE BEGIN
6788 step = "Executing pre-scale vnf-config-primitive"
6789 if scaling_descriptor.get("scaling-config-action"):
6790 for scaling_config_action in scaling_descriptor[
6791 "scaling-config-action"
6792 ]:
6793 if (
6794 scaling_config_action.get("trigger") == "pre-scale-in"
6795 and scaling_type == "SCALE_IN"
6796 ) or (
6797 scaling_config_action.get("trigger") == "pre-scale-out"
6798 and scaling_type == "SCALE_OUT"
6799 ):
6800 vnf_config_primitive = scaling_config_action[
6801 "vnf-config-primitive-name-ref"
6802 ]
6803 step = db_nslcmop_update[
6804 "detailed-status"
6805 ] = "executing pre-scale scaling-config-action '{}'".format(
6806 vnf_config_primitive
6807 )
6808
6809 # look for primitive
6810 for config_primitive in (
6811 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6812 ).get("config-primitive", ()):
6813 if config_primitive["name"] == vnf_config_primitive:
6814 break
6815 else:
6816 raise LcmException(
6817 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6818 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6819 "primitive".format(scaling_group, vnf_config_primitive)
6820 )
6821
6822 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6823 if db_vnfr.get("additionalParamsForVnf"):
6824 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6825
6826 scale_process = "VCA"
6827 db_nsr_update["config-status"] = "configuring pre-scaling"
6828 primitive_params = self._map_primitive_params(
6829 config_primitive, {}, vnfr_params
6830 )
6831
6832 # Pre-scale retry check: Check if this sub-operation has been executed before
6833 op_index = self._check_or_add_scale_suboperation(
6834 db_nslcmop,
6835 vnf_index,
6836 vnf_config_primitive,
6837 primitive_params,
6838 "PRE-SCALE",
6839 )
6840 if op_index == self.SUBOPERATION_STATUS_SKIP:
6841 # Skip sub-operation
6842 result = "COMPLETED"
6843 result_detail = "Done"
6844 self.logger.debug(
6845 logging_text
6846 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6847 vnf_config_primitive, result, result_detail
6848 )
6849 )
6850 else:
6851 if op_index == self.SUBOPERATION_STATUS_NEW:
6852 # New sub-operation: Get index of this sub-operation
6853 op_index = (
6854 len(db_nslcmop.get("_admin", {}).get("operations"))
6855 - 1
6856 )
6857 self.logger.debug(
6858 logging_text
6859 + "vnf_config_primitive={} New sub-operation".format(
6860 vnf_config_primitive
6861 )
6862 )
6863 else:
6864 # retry: Get registered params for this existing sub-operation
6865 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6866 op_index
6867 ]
6868 vnf_index = op.get("member_vnf_index")
6869 vnf_config_primitive = op.get("primitive")
6870 primitive_params = op.get("primitive_params")
6871 self.logger.debug(
6872 logging_text
6873 + "vnf_config_primitive={} Sub-operation retry".format(
6874 vnf_config_primitive
6875 )
6876 )
6877 # Execute the primitive, either with new (first-time) or registered (reintent) args
6878 ee_descriptor_id = config_primitive.get(
6879 "execution-environment-ref"
6880 )
6881 primitive_name = config_primitive.get(
6882 "execution-environment-primitive", vnf_config_primitive
6883 )
6884 ee_id, vca_type = self._look_for_deployed_vca(
6885 nsr_deployed["VCA"],
6886 member_vnf_index=vnf_index,
6887 vdu_id=None,
6888 vdu_count_index=None,
6889 ee_descriptor_id=ee_descriptor_id,
6890 )
6891 result, result_detail = await self._ns_execute_primitive(
6892 ee_id,
6893 primitive_name,
6894 primitive_params,
6895 vca_type=vca_type,
6896 vca_id=vca_id,
6897 )
6898 self.logger.debug(
6899 logging_text
6900 + "vnf_config_primitive={} Done with result {} {}".format(
6901 vnf_config_primitive, result, result_detail
6902 )
6903 )
6904 # Update operationState = COMPLETED | FAILED
6905 self._update_suboperation_status(
6906 db_nslcmop, op_index, result, result_detail
6907 )
6908
6909 if result == "FAILED":
6910 raise LcmException(result_detail)
6911 db_nsr_update["config-status"] = old_config_status
6912 scale_process = None
6913 # PRE-SCALE END
6914
6915 db_nsr_update[
6916 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6917 ] = nb_scale_op
6918 db_nsr_update[
6919 "_admin.scaling-group.{}.time".format(admin_scale_index)
6920 ] = time()
6921
6922 # SCALE-IN VCA - BEGIN
6923 if vca_scaling_info:
6924 step = db_nslcmop_update[
6925 "detailed-status"
6926 ] = "Deleting the execution environments"
6927 scale_process = "VCA"
6928 for vca_info in vca_scaling_info:
6929 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6930 member_vnf_index = str(vca_info["member-vnf-index"])
6931 self.logger.debug(
6932 logging_text + "vdu info: {}".format(vca_info)
6933 )
6934 if vca_info.get("osm_vdu_id"):
6935 vdu_id = vca_info["osm_vdu_id"]
6936 vdu_index = int(vca_info["vdu_index"])
6937 stage[
6938 1
6939 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6940 member_vnf_index, vdu_id, vdu_index
6941 )
6942 stage[2] = step = "Scaling in VCA"
6943 self._write_op_status(op_id=nslcmop_id, stage=stage)
6944 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6945 config_update = db_nsr["configurationStatus"]
6946 for vca_index, vca in enumerate(vca_update):
6947 if (
6948 (vca or vca.get("ee_id"))
6949 and vca["member-vnf-index"] == member_vnf_index
6950 and vca["vdu_count_index"] == vdu_index
6951 ):
6952 if vca.get("vdu_id"):
6953 config_descriptor = get_configuration(
6954 db_vnfd, vca.get("vdu_id")
6955 )
6956 elif vca.get("kdu_name"):
6957 config_descriptor = get_configuration(
6958 db_vnfd, vca.get("kdu_name")
6959 )
6960 else:
6961 config_descriptor = get_configuration(
6962 db_vnfd, db_vnfd["id"]
6963 )
6964 operation_params = (
6965 db_nslcmop.get("operationParams") or {}
6966 )
6967 exec_terminate_primitives = not operation_params.get(
6968 "skip_terminate_primitives"
6969 ) and vca.get("needed_terminate")
6970 task = asyncio.ensure_future(
6971 asyncio.wait_for(
6972 self.destroy_N2VC(
6973 logging_text,
6974 db_nslcmop,
6975 vca,
6976 config_descriptor,
6977 vca_index,
6978 destroy_ee=True,
6979 exec_primitives=exec_terminate_primitives,
6980 scaling_in=True,
6981 vca_id=vca_id,
6982 ),
6983 timeout=self.timeout.charm_delete,
6984 )
6985 )
6986 tasks_dict_info[task] = "Terminating VCA {}".format(
6987 vca.get("ee_id")
6988 )
6989 del vca_update[vca_index]
6990 del config_update[vca_index]
6991 # wait for pending tasks of terminate primitives
6992 if tasks_dict_info:
6993 self.logger.debug(
6994 logging_text
6995 + "Waiting for tasks {}".format(
6996 list(tasks_dict_info.keys())
6997 )
6998 )
6999 error_list = await self._wait_for_tasks(
7000 logging_text,
7001 tasks_dict_info,
7002 min(
7003 self.timeout.charm_delete, self.timeout.ns_terminate
7004 ),
7005 stage,
7006 nslcmop_id,
7007 )
7008 tasks_dict_info.clear()
7009 if error_list:
7010 raise LcmException("; ".join(error_list))
7011
7012 db_vca_and_config_update = {
7013 "_admin.deployed.VCA": vca_update,
7014 "configurationStatus": config_update,
7015 }
7016 self.update_db_2(
7017 "nsrs", db_nsr["_id"], db_vca_and_config_update
7018 )
7019 scale_process = None
7020 # SCALE-IN VCA - END
7021
7022 # SCALE RO - BEGIN
7023 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
7024 scale_process = "RO"
7025 if self.ro_config.ng:
7026 await self._scale_ng_ro(
7027 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
7028 )
7029 scaling_info.pop("vdu-create", None)
7030 scaling_info.pop("vdu-delete", None)
7031
7032 scale_process = None
7033 # SCALE RO - END
7034
7035 # SCALE KDU - BEGIN
7036 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7037 scale_process = "KDU"
7038 await self._scale_kdu(
7039 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7040 )
7041 scaling_info.pop("kdu-create", None)
7042 scaling_info.pop("kdu-delete", None)
7043
7044 scale_process = None
7045 # SCALE KDU - END
7046
7047 if db_nsr_update:
7048 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7049
7050 # SCALE-UP VCA - BEGIN
7051 if vca_scaling_info:
7052 step = db_nslcmop_update[
7053 "detailed-status"
7054 ] = "Creating new execution environments"
7055 scale_process = "VCA"
7056 for vca_info in vca_scaling_info:
7057 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7058 member_vnf_index = str(vca_info["member-vnf-index"])
7059 self.logger.debug(
7060 logging_text + "vdu info: {}".format(vca_info)
7061 )
7062 vnfd_id = db_vnfr["vnfd-ref"]
7063 if vca_info.get("osm_vdu_id"):
7064 vdu_index = int(vca_info["vdu_index"])
7065 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7066 if db_vnfr.get("additionalParamsForVnf"):
7067 deploy_params.update(
7068 parse_yaml_strings(
7069 db_vnfr["additionalParamsForVnf"].copy()
7070 )
7071 )
7072 descriptor_config = get_configuration(
7073 db_vnfd, db_vnfd["id"]
7074 )
7075 if descriptor_config:
7076 vdu_id = None
7077 vdu_name = None
7078 kdu_name = None
7079 kdu_index = None
7080 self._deploy_n2vc(
7081 logging_text=logging_text
7082 + "member_vnf_index={} ".format(member_vnf_index),
7083 db_nsr=db_nsr,
7084 db_vnfr=db_vnfr,
7085 nslcmop_id=nslcmop_id,
7086 nsr_id=nsr_id,
7087 nsi_id=nsi_id,
7088 vnfd_id=vnfd_id,
7089 vdu_id=vdu_id,
7090 kdu_name=kdu_name,
7091 kdu_index=kdu_index,
7092 member_vnf_index=member_vnf_index,
7093 vdu_index=vdu_index,
7094 vdu_name=vdu_name,
7095 deploy_params=deploy_params,
7096 descriptor_config=descriptor_config,
7097 base_folder=base_folder,
7098 task_instantiation_info=tasks_dict_info,
7099 stage=stage,
7100 )
7101 vdu_id = vca_info["osm_vdu_id"]
7102 vdur = find_in_list(
7103 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7104 )
7105 descriptor_config = get_configuration(db_vnfd, vdu_id)
7106 if vdur.get("additionalParams"):
7107 deploy_params_vdu = parse_yaml_strings(
7108 vdur["additionalParams"]
7109 )
7110 else:
7111 deploy_params_vdu = deploy_params
7112 deploy_params_vdu["OSM"] = get_osm_params(
7113 db_vnfr, vdu_id, vdu_count_index=vdu_index
7114 )
7115 if descriptor_config:
7116 vdu_name = None
7117 kdu_name = None
7118 kdu_index = None
7119 stage[
7120 1
7121 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7122 member_vnf_index, vdu_id, vdu_index
7123 )
7124 stage[2] = step = "Scaling out VCA"
7125 self._write_op_status(op_id=nslcmop_id, stage=stage)
7126 self._deploy_n2vc(
7127 logging_text=logging_text
7128 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7129 member_vnf_index, vdu_id, vdu_index
7130 ),
7131 db_nsr=db_nsr,
7132 db_vnfr=db_vnfr,
7133 nslcmop_id=nslcmop_id,
7134 nsr_id=nsr_id,
7135 nsi_id=nsi_id,
7136 vnfd_id=vnfd_id,
7137 vdu_id=vdu_id,
7138 kdu_name=kdu_name,
7139 member_vnf_index=member_vnf_index,
7140 vdu_index=vdu_index,
7141 kdu_index=kdu_index,
7142 vdu_name=vdu_name,
7143 deploy_params=deploy_params_vdu,
7144 descriptor_config=descriptor_config,
7145 base_folder=base_folder,
7146 task_instantiation_info=tasks_dict_info,
7147 stage=stage,
7148 )
7149 # SCALE-UP VCA - END
7150 scale_process = None
7151
7152 # POST-SCALE BEGIN
7153 # execute primitive service POST-SCALING
7154 step = "Executing post-scale vnf-config-primitive"
7155 if scaling_descriptor.get("scaling-config-action"):
7156 for scaling_config_action in scaling_descriptor[
7157 "scaling-config-action"
7158 ]:
7159 if (
7160 scaling_config_action.get("trigger") == "post-scale-in"
7161 and scaling_type == "SCALE_IN"
7162 ) or (
7163 scaling_config_action.get("trigger") == "post-scale-out"
7164 and scaling_type == "SCALE_OUT"
7165 ):
7166 vnf_config_primitive = scaling_config_action[
7167 "vnf-config-primitive-name-ref"
7168 ]
7169 step = db_nslcmop_update[
7170 "detailed-status"
7171 ] = "executing post-scale scaling-config-action '{}'".format(
7172 vnf_config_primitive
7173 )
7174
7175 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7176 if db_vnfr.get("additionalParamsForVnf"):
7177 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7178
7179 # look for primitive
7180 for config_primitive in (
7181 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7182 ).get("config-primitive", ()):
7183 if config_primitive["name"] == vnf_config_primitive:
7184 break
7185 else:
7186 raise LcmException(
7187 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7188 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7189 "config-primitive".format(
7190 scaling_group, vnf_config_primitive
7191 )
7192 )
7193 scale_process = "VCA"
7194 db_nsr_update["config-status"] = "configuring post-scaling"
7195 primitive_params = self._map_primitive_params(
7196 config_primitive, {}, vnfr_params
7197 )
7198
7199 # Post-scale retry check: Check if this sub-operation has been executed before
7200 op_index = self._check_or_add_scale_suboperation(
7201 db_nslcmop,
7202 vnf_index,
7203 vnf_config_primitive,
7204 primitive_params,
7205 "POST-SCALE",
7206 )
7207 if op_index == self.SUBOPERATION_STATUS_SKIP:
7208 # Skip sub-operation
7209 result = "COMPLETED"
7210 result_detail = "Done"
7211 self.logger.debug(
7212 logging_text
7213 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7214 vnf_config_primitive, result, result_detail
7215 )
7216 )
7217 else:
7218 if op_index == self.SUBOPERATION_STATUS_NEW:
7219 # New sub-operation: Get index of this sub-operation
7220 op_index = (
7221 len(db_nslcmop.get("_admin", {}).get("operations"))
7222 - 1
7223 )
7224 self.logger.debug(
7225 logging_text
7226 + "vnf_config_primitive={} New sub-operation".format(
7227 vnf_config_primitive
7228 )
7229 )
7230 else:
7231 # retry: Get registered params for this existing sub-operation
7232 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7233 op_index
7234 ]
7235 vnf_index = op.get("member_vnf_index")
7236 vnf_config_primitive = op.get("primitive")
7237 primitive_params = op.get("primitive_params")
7238 self.logger.debug(
7239 logging_text
7240 + "vnf_config_primitive={} Sub-operation retry".format(
7241 vnf_config_primitive
7242 )
7243 )
7244 # Execute the primitive, either with new (first-time) or registered (reintent) args
7245 ee_descriptor_id = config_primitive.get(
7246 "execution-environment-ref"
7247 )
7248 primitive_name = config_primitive.get(
7249 "execution-environment-primitive", vnf_config_primitive
7250 )
7251 ee_id, vca_type = self._look_for_deployed_vca(
7252 nsr_deployed["VCA"],
7253 member_vnf_index=vnf_index,
7254 vdu_id=None,
7255 vdu_count_index=None,
7256 ee_descriptor_id=ee_descriptor_id,
7257 )
7258 result, result_detail = await self._ns_execute_primitive(
7259 ee_id,
7260 primitive_name,
7261 primitive_params,
7262 vca_type=vca_type,
7263 vca_id=vca_id,
7264 )
7265 self.logger.debug(
7266 logging_text
7267 + "vnf_config_primitive={} Done with result {} {}".format(
7268 vnf_config_primitive, result, result_detail
7269 )
7270 )
7271 # Update operationState = COMPLETED | FAILED
7272 self._update_suboperation_status(
7273 db_nslcmop, op_index, result, result_detail
7274 )
7275
7276 if result == "FAILED":
7277 raise LcmException(result_detail)
7278 db_nsr_update["config-status"] = old_config_status
7279 scale_process = None
7280 # POST-SCALE END
7281 # Check if each vnf has exporter for metric collection if so update prometheus job records
7282 if scaling_type == "SCALE_OUT":
7283 if "exporters-endpoints" in db_vnfd.get("df")[0]:
7284 vnfr_id = db_vnfr["id"]
7285 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7286 exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
7287 self.logger.debug("exporter config :{}".format(exporter_config))
7288 artifact_path = "{}/{}/{}".format(
7289 base_folder["folder"],
7290 base_folder["pkg-dir"],
7291 "exporter-endpoint",
7292 )
7293 ee_id = None
7294 ee_config_descriptor = exporter_config
7295 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
7296 logging_text,
7297 nsr_id,
7298 vnfr_id,
7299 vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
7300 vdu_index=db_vnfr["vdur"][-1]["count-index"],
7301 user=None,
7302 pub_key=None,
7303 )
7304 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
7305 self.logger.debug("Artifact_path:{}".format(artifact_path))
7306 vdu_id_for_prom = None
7307 vdu_index_for_prom = None
7308 for x in get_iterable(db_vnfr, "vdur"):
7309 vdu_id_for_prom = x.get("vdu-id-ref")
7310 vdu_index_for_prom = x.get("count-index")
7311 vnfr_id = vnfr_id + vdu_id + str(vdu_index)
7312 vnfr_id = vnfr_id.replace("_", "")
7313 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
7314 ee_id=ee_id,
7315 artifact_path=artifact_path,
7316 ee_config_descriptor=ee_config_descriptor,
7317 vnfr_id=vnfr_id,
7318 nsr_id=nsr_id,
7319 target_ip=rw_mgmt_ip,
7320 element_type="VDU",
7321 vdu_id=vdu_id_for_prom,
7322 vdu_index=vdu_index_for_prom,
7323 )
7324
7325 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
7326 if prometheus_jobs:
7327 db_nsr_update[
7328 "_admin.deployed.prometheus_jobs"
7329 ] = prometheus_jobs
7330 self.update_db_2(
7331 "nsrs",
7332 nsr_id,
7333 db_nsr_update,
7334 )
7335
7336 for job in prometheus_jobs:
7337 self.db.set_one(
7338 "prometheus_jobs",
7339 {"job_name": ""},
7340 job,
7341 upsert=True,
7342 fail_on_empty=False,
7343 )
7344 db_nsr_update[
7345 "detailed-status"
7346 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7347 db_nsr_update["operational-status"] = (
7348 "running"
7349 if old_operational_status == "failed"
7350 else old_operational_status
7351 )
7352 db_nsr_update["config-status"] = old_config_status
7353 return
7354 except (
7355 ROclient.ROClientException,
7356 DbException,
7357 LcmException,
7358 NgRoException,
7359 ) as e:
7360 self.logger.error(logging_text + "Exit Exception {}".format(e))
7361 exc = e
7362 except asyncio.CancelledError:
7363 self.logger.error(
7364 logging_text + "Cancelled Exception while '{}'".format(step)
7365 )
7366 exc = "Operation was cancelled"
7367 except Exception as e:
7368 exc = traceback.format_exc()
7369 self.logger.critical(
7370 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7371 exc_info=True,
7372 )
7373 finally:
7374 error_list = list()
7375 if exc:
7376 error_list.append(str(exc))
7377 self._write_ns_status(
7378 nsr_id=nsr_id,
7379 ns_state=None,
7380 current_operation="IDLE",
7381 current_operation_id=None,
7382 )
7383 try:
7384 if tasks_dict_info:
7385 stage[1] = "Waiting for instantiate pending tasks."
7386 self.logger.debug(logging_text + stage[1])
7387 exc = await self._wait_for_tasks(
7388 logging_text,
7389 tasks_dict_info,
7390 self.timeout.ns_deploy,
7391 stage,
7392 nslcmop_id,
7393 nsr_id=nsr_id,
7394 )
7395 except asyncio.CancelledError:
7396 error_list.append("Cancelled")
7397 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7398 await self._wait_for_tasks(
7399 logging_text,
7400 tasks_dict_info,
7401 self.timeout.ns_deploy,
7402 stage,
7403 nslcmop_id,
7404 nsr_id=nsr_id,
7405 )
7406 if error_list:
7407 error_detail = "; ".join(error_list)
7408 db_nslcmop_update[
7409 "detailed-status"
7410 ] = error_description_nslcmop = "FAILED {}: {}".format(
7411 step, error_detail
7412 )
7413 nslcmop_operation_state = "FAILED"
7414 if db_nsr:
7415 db_nsr_update["operational-status"] = old_operational_status
7416 db_nsr_update["config-status"] = old_config_status
7417 db_nsr_update["detailed-status"] = ""
7418 if scale_process:
7419 if "VCA" in scale_process:
7420 db_nsr_update["config-status"] = "failed"
7421 if "RO" in scale_process:
7422 db_nsr_update["operational-status"] = "failed"
7423 db_nsr_update[
7424 "detailed-status"
7425 ] = "FAILED scaling nslcmop={} {}: {}".format(
7426 nslcmop_id, step, error_detail
7427 )
7428 else:
7429 error_description_nslcmop = None
7430 nslcmop_operation_state = "COMPLETED"
7431 db_nslcmop_update["detailed-status"] = "Done"
7432 if scaling_type == "SCALE_IN" and prom_job_name is not None:
7433 self.db.del_one(
7434 "prometheus_jobs",
7435 {"job_name": prom_job_name},
7436 fail_on_empty=False,
7437 )
7438
7439 self._write_op_status(
7440 op_id=nslcmop_id,
7441 stage="",
7442 error_message=error_description_nslcmop,
7443 operation_state=nslcmop_operation_state,
7444 other_update=db_nslcmop_update,
7445 )
7446 if db_nsr:
7447 self._write_ns_status(
7448 nsr_id=nsr_id,
7449 ns_state=None,
7450 current_operation="IDLE",
7451 current_operation_id=None,
7452 other_update=db_nsr_update,
7453 )
7454
7455 if nslcmop_operation_state:
7456 try:
7457 msg = {
7458 "nsr_id": nsr_id,
7459 "nslcmop_id": nslcmop_id,
7460 "operationState": nslcmop_operation_state,
7461 }
7462 await self.msg.aiowrite("ns", "scaled", msg)
7463 except Exception as e:
7464 self.logger.error(
7465 logging_text + "kafka_write notification Exception {}".format(e)
7466 )
7467 self.logger.debug(logging_text + "Exit")
7468 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7469
7470 async def _scale_kdu(
7471 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7472 ):
7473 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7474 for kdu_name in _scaling_info:
7475 for kdu_scaling_info in _scaling_info[kdu_name]:
7476 deployed_kdu, index = get_deployed_kdu(
7477 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7478 )
7479 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7480 kdu_instance = deployed_kdu["kdu-instance"]
7481 kdu_model = deployed_kdu.get("kdu-model")
7482 scale = int(kdu_scaling_info["scale"])
7483 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7484
7485 db_dict = {
7486 "collection": "nsrs",
7487 "filter": {"_id": nsr_id},
7488 "path": "_admin.deployed.K8s.{}".format(index),
7489 }
7490
7491 step = "scaling application {}".format(
7492 kdu_scaling_info["resource-name"]
7493 )
7494 self.logger.debug(logging_text + step)
7495
7496 if kdu_scaling_info["type"] == "delete":
7497 kdu_config = get_configuration(db_vnfd, kdu_name)
7498 if (
7499 kdu_config
7500 and kdu_config.get("terminate-config-primitive")
7501 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7502 ):
7503 terminate_config_primitive_list = kdu_config.get(
7504 "terminate-config-primitive"
7505 )
7506 terminate_config_primitive_list.sort(
7507 key=lambda val: int(val["seq"])
7508 )
7509
7510 for (
7511 terminate_config_primitive
7512 ) in terminate_config_primitive_list:
7513 primitive_params_ = self._map_primitive_params(
7514 terminate_config_primitive, {}, {}
7515 )
7516 step = "execute terminate config primitive"
7517 self.logger.debug(logging_text + step)
7518 await asyncio.wait_for(
7519 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7520 cluster_uuid=cluster_uuid,
7521 kdu_instance=kdu_instance,
7522 primitive_name=terminate_config_primitive["name"],
7523 params=primitive_params_,
7524 db_dict=db_dict,
7525 total_timeout=self.timeout.primitive,
7526 vca_id=vca_id,
7527 ),
7528 timeout=self.timeout.primitive
7529 * self.timeout.primitive_outer_factor,
7530 )
7531
7532 await asyncio.wait_for(
7533 self.k8scluster_map[k8s_cluster_type].scale(
7534 kdu_instance=kdu_instance,
7535 scale=scale,
7536 resource_name=kdu_scaling_info["resource-name"],
7537 total_timeout=self.timeout.scale_on_error,
7538 vca_id=vca_id,
7539 cluster_uuid=cluster_uuid,
7540 kdu_model=kdu_model,
7541 atomic=True,
7542 db_dict=db_dict,
7543 ),
7544 timeout=self.timeout.scale_on_error
7545 * self.timeout.scale_on_error_outer_factor,
7546 )
7547
7548 if kdu_scaling_info["type"] == "create":
7549 kdu_config = get_configuration(db_vnfd, kdu_name)
7550 if (
7551 kdu_config
7552 and kdu_config.get("initial-config-primitive")
7553 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7554 ):
7555 initial_config_primitive_list = kdu_config.get(
7556 "initial-config-primitive"
7557 )
7558 initial_config_primitive_list.sort(
7559 key=lambda val: int(val["seq"])
7560 )
7561
7562 for initial_config_primitive in initial_config_primitive_list:
7563 primitive_params_ = self._map_primitive_params(
7564 initial_config_primitive, {}, {}
7565 )
7566 step = "execute initial config primitive"
7567 self.logger.debug(logging_text + step)
7568 await asyncio.wait_for(
7569 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7570 cluster_uuid=cluster_uuid,
7571 kdu_instance=kdu_instance,
7572 primitive_name=initial_config_primitive["name"],
7573 params=primitive_params_,
7574 db_dict=db_dict,
7575 vca_id=vca_id,
7576 ),
7577 timeout=600,
7578 )
7579
7580 async def _scale_ng_ro(
7581 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7582 ):
7583 nsr_id = db_nslcmop["nsInstanceId"]
7584 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7585 db_vnfrs = {}
7586
7587 # read from db: vnfd's for every vnf
7588 db_vnfds = []
7589
7590 # for each vnf in ns, read vnfd
7591 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7592 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7593 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7594 # if we haven't this vnfd, read it from db
7595 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7596 # read from db
7597 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7598 db_vnfds.append(vnfd)
7599 n2vc_key = self.n2vc.get_public_key()
7600 n2vc_key_list = [n2vc_key]
7601 self.scale_vnfr(
7602 db_vnfr,
7603 vdu_scaling_info.get("vdu-create"),
7604 vdu_scaling_info.get("vdu-delete"),
7605 mark_delete=True,
7606 )
7607 # db_vnfr has been updated, update db_vnfrs to use it
7608 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7609 await self._instantiate_ng_ro(
7610 logging_text,
7611 nsr_id,
7612 db_nsd,
7613 db_nsr,
7614 db_nslcmop,
7615 db_vnfrs,
7616 db_vnfds,
7617 n2vc_key_list,
7618 stage=stage,
7619 start_deploy=time(),
7620 timeout_ns_deploy=self.timeout.ns_deploy,
7621 )
7622 if vdu_scaling_info.get("vdu-delete"):
7623 self.scale_vnfr(
7624 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7625 )
7626
7627 async def extract_prometheus_scrape_jobs(
7628 self,
7629 ee_id: str,
7630 artifact_path: str,
7631 ee_config_descriptor: dict,
7632 vnfr_id: str,
7633 nsr_id: str,
7634 target_ip: str,
7635 element_type: str,
7636 vnf_member_index: str = "",
7637 vdu_id: str = "",
7638 vdu_index: int = None,
7639 kdu_name: str = "",
7640 kdu_index: int = None,
7641 ) -> dict:
7642 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7643 This method will wait until the corresponding VDU or KDU is fully instantiated
7644
7645 Args:
7646 ee_id (str): Execution Environment ID
7647 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7648 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7649 vnfr_id (str): VNFR ID where this EE applies
7650 nsr_id (str): NSR ID where this EE applies
7651 target_ip (str): VDU/KDU instance IP address
7652 element_type (str): NS or VNF or VDU or KDU
7653 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7654 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7655 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7656 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7657 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7658
7659 Raises:
7660 LcmException: When the VDU or KDU instance was not found in an hour
7661
7662 Returns:
7663 _type_: Prometheus jobs
7664 """
7665 # default the vdur and kdur names to an empty string, to avoid any later
7666 # problem with Prometheus when the element type is not VDU or KDU
7667 vdur_name = ""
7668 kdur_name = ""
7669
7670 # look if exist a file called 'prometheus*.j2' and
7671 artifact_content = self.fs.dir_ls(artifact_path)
7672 job_file = next(
7673 (
7674 f
7675 for f in artifact_content
7676 if f.startswith("prometheus") and f.endswith(".j2")
7677 ),
7678 None,
7679 )
7680 if not job_file:
7681 return
7682 self.logger.debug("Artifact path{}".format(artifact_path))
7683 self.logger.debug("job file{}".format(job_file))
7684 with self.fs.file_open((artifact_path, job_file), "r") as f:
7685 job_data = f.read()
7686
7687 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7688 if element_type in ("VDU", "KDU"):
7689 for _ in range(360):
7690 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7691 if vdu_id and vdu_index is not None:
7692 vdur = next(
7693 (
7694 x
7695 for x in get_iterable(db_vnfr, "vdur")
7696 if (
7697 x.get("vdu-id-ref") == vdu_id
7698 and x.get("count-index") == vdu_index
7699 )
7700 ),
7701 {},
7702 )
7703 if vdur.get("name"):
7704 vdur_name = vdur.get("name")
7705 break
7706 if kdu_name and kdu_index is not None:
7707 kdur = next(
7708 (
7709 x
7710 for x in get_iterable(db_vnfr, "kdur")
7711 if (
7712 x.get("kdu-name") == kdu_name
7713 and x.get("count-index") == kdu_index
7714 )
7715 ),
7716 {},
7717 )
7718 if kdur.get("name"):
7719 kdur_name = kdur.get("name")
7720 break
7721
7722 await asyncio.sleep(10)
7723 else:
7724 if vdu_id and vdu_index is not None:
7725 raise LcmException(
7726 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7727 )
7728 if kdu_name and kdu_index is not None:
7729 raise LcmException(
7730 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7731 )
7732
7733 if ee_id is not None:
7734 _, namespace, helm_id = get_ee_id_parts(
7735 ee_id
7736 ) # get namespace and EE gRPC service name
7737 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7738 host_port = "80"
7739 vnfr_id = vnfr_id.replace("-", "")
7740 variables = {
7741 "JOB_NAME": vnfr_id,
7742 "TARGET_IP": target_ip,
7743 "EXPORTER_POD_IP": host_name,
7744 "EXPORTER_POD_PORT": host_port,
7745 "NSR_ID": nsr_id,
7746 "VNF_MEMBER_INDEX": vnf_member_index,
7747 "VDUR_NAME": vdur_name,
7748 "KDUR_NAME": kdur_name,
7749 "ELEMENT_TYPE": element_type,
7750 }
7751 else:
7752 metric_path = ee_config_descriptor["metric-path"]
7753 target_port = ee_config_descriptor["metric-port"]
7754 vnfr_id = vnfr_id.replace("-", "")
7755 variables = {
7756 "JOB_NAME": vnfr_id,
7757 "TARGET_IP": target_ip,
7758 "TARGET_PORT": target_port,
7759 "METRIC_PATH": metric_path,
7760 }
7761
7762 job_list = parse_job(job_data, variables)
7763 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7764 for job in job_list:
7765 if (
7766 not isinstance(job.get("job_name"), str)
7767 or vnfr_id not in job["job_name"]
7768 ):
7769 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7770 job["nsr_id"] = nsr_id
7771 job["vnfr_id"] = vnfr_id
7772 return job_list
7773
7774 async def rebuild_start_stop(
7775 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7776 ):
7777 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7778 self.logger.info(logging_text + "Enter")
7779 stage = ["Preparing the environment", ""]
7780 # database nsrs record
7781 db_nsr_update = {}
7782 vdu_vim_name = None
7783 vim_vm_id = None
7784 # in case of error, indicates what part of scale was failed to put nsr at error status
7785 start_deploy = time()
7786 try:
7787 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7788 vim_account_id = db_vnfr.get("vim-account-id")
7789 vim_info_key = "vim:" + vim_account_id
7790 vdu_id = additional_param["vdu_id"]
7791 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7792 vdur = find_in_list(
7793 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7794 )
7795 if vdur:
7796 vdu_vim_name = vdur["name"]
7797 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7798 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7799 else:
7800 raise LcmException("Target vdu is not found")
7801 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7802 # wait for any previous tasks in process
7803 stage[1] = "Waiting for previous operations to terminate"
7804 self.logger.info(stage[1])
7805 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7806
7807 stage[1] = "Reading from database."
7808 self.logger.info(stage[1])
7809 self._write_ns_status(
7810 nsr_id=nsr_id,
7811 ns_state=None,
7812 current_operation=operation_type.upper(),
7813 current_operation_id=nslcmop_id,
7814 )
7815 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7816
7817 # read from db: ns
7818 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7819 db_nsr_update["operational-status"] = operation_type
7820 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7821 # Payload for RO
7822 desc = {
7823 operation_type: {
7824 "vim_vm_id": vim_vm_id,
7825 "vnf_id": vnf_id,
7826 "vdu_index": additional_param["count-index"],
7827 "vdu_id": vdur["id"],
7828 "target_vim": target_vim,
7829 "vim_account_id": vim_account_id,
7830 }
7831 }
7832 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7833 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7834 self.logger.info("ro nsr id: {}".format(nsr_id))
7835 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7836 self.logger.info("response from RO: {}".format(result_dict))
7837 action_id = result_dict["action_id"]
7838 await self._wait_ng_ro(
7839 nsr_id,
7840 action_id,
7841 nslcmop_id,
7842 start_deploy,
7843 self.timeout.operate,
7844 None,
7845 "start_stop_rebuild",
7846 )
7847 return "COMPLETED", "Done"
7848 except (ROclient.ROClientException, DbException, LcmException) as e:
7849 self.logger.error("Exit Exception {}".format(e))
7850 exc = e
7851 except asyncio.CancelledError:
7852 self.logger.error("Cancelled Exception while '{}'".format(stage))
7853 exc = "Operation was cancelled"
7854 except Exception as e:
7855 exc = traceback.format_exc()
7856 self.logger.critical(
7857 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7858 )
7859 return "FAILED", "Error in operate VNF {}".format(exc)
7860
7861 async def migrate(self, nsr_id, nslcmop_id):
7862 """
7863 Migrate VNFs and VDUs instances in a NS
7864
7865 :param: nsr_id: NS Instance ID
7866 :param: nslcmop_id: nslcmop ID of migrate
7867
7868 """
7869 # Try to lock HA task here
7870 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7871 if not task_is_locked_by_me:
7872 return
7873 logging_text = "Task ns={} migrate ".format(nsr_id)
7874 self.logger.debug(logging_text + "Enter")
7875 # get all needed from database
7876 db_nslcmop = None
7877 db_nslcmop_update = {}
7878 nslcmop_operation_state = None
7879 db_nsr_update = {}
7880 target = {}
7881 exc = None
7882 # in case of error, indicates what part of scale was failed to put nsr at error status
7883 start_deploy = time()
7884
7885 try:
7886 # wait for any previous tasks in process
7887 step = "Waiting for previous operations to terminate"
7888 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7889
7890 self._write_ns_status(
7891 nsr_id=nsr_id,
7892 ns_state=None,
7893 current_operation="MIGRATING",
7894 current_operation_id=nslcmop_id,
7895 )
7896 step = "Getting nslcmop from database"
7897 self.logger.debug(
7898 step + " after having waited for previous tasks to be completed"
7899 )
7900 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7901 migrate_params = db_nslcmop.get("operationParams")
7902
7903 target = {}
7904 target.update(migrate_params)
7905 desc = await self.RO.migrate(nsr_id, target)
7906 self.logger.debug("RO return > {}".format(desc))
7907 action_id = desc["action_id"]
7908 await self._wait_ng_ro(
7909 nsr_id,
7910 action_id,
7911 nslcmop_id,
7912 start_deploy,
7913 self.timeout.migrate,
7914 operation="migrate",
7915 )
7916 except (ROclient.ROClientException, DbException, LcmException) as e:
7917 self.logger.error("Exit Exception {}".format(e))
7918 exc = e
7919 except asyncio.CancelledError:
7920 self.logger.error("Cancelled Exception while '{}'".format(step))
7921 exc = "Operation was cancelled"
7922 except Exception as e:
7923 exc = traceback.format_exc()
7924 self.logger.critical(
7925 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7926 )
7927 finally:
7928 self._write_ns_status(
7929 nsr_id=nsr_id,
7930 ns_state=None,
7931 current_operation="IDLE",
7932 current_operation_id=None,
7933 )
7934 if exc:
7935 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7936 nslcmop_operation_state = "FAILED"
7937 else:
7938 nslcmop_operation_state = "COMPLETED"
7939 db_nslcmop_update["detailed-status"] = "Done"
7940 db_nsr_update["detailed-status"] = "Done"
7941
7942 self._write_op_status(
7943 op_id=nslcmop_id,
7944 stage="",
7945 error_message="",
7946 operation_state=nslcmop_operation_state,
7947 other_update=db_nslcmop_update,
7948 )
7949 if nslcmop_operation_state:
7950 try:
7951 msg = {
7952 "nsr_id": nsr_id,
7953 "nslcmop_id": nslcmop_id,
7954 "operationState": nslcmop_operation_state,
7955 }
7956 await self.msg.aiowrite("ns", "migrated", msg)
7957 except Exception as e:
7958 self.logger.error(
7959 logging_text + "kafka_write notification Exception {}".format(e)
7960 )
7961 self.logger.debug(logging_text + "Exit")
7962 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7963
7964 async def heal(self, nsr_id, nslcmop_id):
7965 """
7966 Heal NS
7967
7968 :param nsr_id: ns instance to heal
7969 :param nslcmop_id: operation to run
7970 :return:
7971 """
7972
7973 # Try to lock HA task here
7974 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7975 if not task_is_locked_by_me:
7976 return
7977
7978 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7979 stage = ["", "", ""]
7980 tasks_dict_info = {}
7981 # ^ stage, step, VIM progress
7982 self.logger.debug(logging_text + "Enter")
7983 # get all needed from database
7984 db_nsr = None
7985 db_nslcmop_update = {}
7986 db_nsr_update = {}
7987 db_vnfrs = {} # vnf's info indexed by _id
7988 exc = None
7989 old_operational_status = ""
7990 old_config_status = ""
7991 nsi_id = None
7992 try:
7993 # wait for any previous tasks in process
7994 step = "Waiting for previous operations to terminate"
7995 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7996 self._write_ns_status(
7997 nsr_id=nsr_id,
7998 ns_state=None,
7999 current_operation="HEALING",
8000 current_operation_id=nslcmop_id,
8001 )
8002
8003 step = "Getting nslcmop from database"
8004 self.logger.debug(
8005 step + " after having waited for previous tasks to be completed"
8006 )
8007 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8008
8009 step = "Getting nsr from database"
8010 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8011 old_operational_status = db_nsr["operational-status"]
8012 old_config_status = db_nsr["config-status"]
8013
8014 db_nsr_update = {
8015 "operational-status": "healing",
8016 "_admin.deployed.RO.operational-status": "healing",
8017 }
8018 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8019
8020 step = "Sending heal order to VIM"
8021 await self.heal_RO(
8022 logging_text=logging_text,
8023 nsr_id=nsr_id,
8024 db_nslcmop=db_nslcmop,
8025 stage=stage,
8026 )
8027 # VCA tasks
8028 # read from db: nsd
8029 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
8030 self.logger.debug(logging_text + stage[1])
8031 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
8032 self.fs.sync(db_nsr["nsd-id"])
8033 db_nsr["nsd"] = nsd
8034 # read from db: vnfr's of this ns
8035 step = "Getting vnfrs from db"
8036 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
8037 for vnfr in db_vnfrs_list:
8038 db_vnfrs[vnfr["_id"]] = vnfr
8039 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
8040
8041 # Check for each target VNF
8042 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
8043 for target_vnf in target_list:
8044 # Find this VNF in the list from DB
8045 vnfr_id = target_vnf.get("vnfInstanceId", None)
8046 if vnfr_id:
8047 db_vnfr = db_vnfrs[vnfr_id]
8048 vnfd_id = db_vnfr.get("vnfd-id")
8049 vnfd_ref = db_vnfr.get("vnfd-ref")
8050 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
8051 base_folder = vnfd["_admin"]["storage"]
8052 vdu_id = None
8053 vdu_index = 0
8054 vdu_name = None
8055 kdu_name = None
8056 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
8057 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
8058
8059 # Check each target VDU and deploy N2VC
8060 target_vdu_list = target_vnf.get("additionalParams", {}).get(
8061 "vdu", []
8062 )
8063 if not target_vdu_list:
8064 # Codigo nuevo para crear diccionario
8065 target_vdu_list = []
8066 for existing_vdu in db_vnfr.get("vdur"):
8067 vdu_name = existing_vdu.get("vdu-name", None)
8068 vdu_index = existing_vdu.get("count-index", 0)
8069 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
8070 "run-day1", False
8071 )
8072 vdu_to_be_healed = {
8073 "vdu-id": vdu_name,
8074 "count-index": vdu_index,
8075 "run-day1": vdu_run_day1,
8076 }
8077 target_vdu_list.append(vdu_to_be_healed)
8078 for target_vdu in target_vdu_list:
8079 deploy_params_vdu = target_vdu
8080 # Set run-day1 vnf level value if not vdu level value exists
8081 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
8082 "additionalParams", {}
8083 ).get("run-day1"):
8084 deploy_params_vdu["run-day1"] = target_vnf[
8085 "additionalParams"
8086 ].get("run-day1")
8087 vdu_name = target_vdu.get("vdu-id", None)
8088 # TODO: Get vdu_id from vdud.
8089 vdu_id = vdu_name
8090 # For multi instance VDU count-index is mandatory
8091 # For single session VDU count-indes is 0
8092 vdu_index = target_vdu.get("count-index", 0)
8093
8094 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8095 stage[1] = "Deploying Execution Environments."
8096 self.logger.debug(logging_text + stage[1])
8097
8098 # VNF Level charm. Normal case when proxy charms.
8099 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8100 descriptor_config = get_configuration(vnfd, vnfd_ref)
8101 if descriptor_config:
8102 # Continue if healed machine is management machine
8103 vnf_ip_address = db_vnfr.get("ip-address")
8104 target_instance = None
8105 for instance in db_vnfr.get("vdur", None):
8106 if (
8107 instance["vdu-name"] == vdu_name
8108 and instance["count-index"] == vdu_index
8109 ):
8110 target_instance = instance
8111 break
8112 if vnf_ip_address == target_instance.get("ip-address"):
8113 self._heal_n2vc(
8114 logging_text=logging_text
8115 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8116 member_vnf_index, vdu_name, vdu_index
8117 ),
8118 db_nsr=db_nsr,
8119 db_vnfr=db_vnfr,
8120 nslcmop_id=nslcmop_id,
8121 nsr_id=nsr_id,
8122 nsi_id=nsi_id,
8123 vnfd_id=vnfd_ref,
8124 vdu_id=None,
8125 kdu_name=None,
8126 member_vnf_index=member_vnf_index,
8127 vdu_index=0,
8128 vdu_name=None,
8129 deploy_params=deploy_params_vdu,
8130 descriptor_config=descriptor_config,
8131 base_folder=base_folder,
8132 task_instantiation_info=tasks_dict_info,
8133 stage=stage,
8134 )
8135
8136 # VDU Level charm. Normal case with native charms.
8137 descriptor_config = get_configuration(vnfd, vdu_name)
8138 if descriptor_config:
8139 self._heal_n2vc(
8140 logging_text=logging_text
8141 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8142 member_vnf_index, vdu_name, vdu_index
8143 ),
8144 db_nsr=db_nsr,
8145 db_vnfr=db_vnfr,
8146 nslcmop_id=nslcmop_id,
8147 nsr_id=nsr_id,
8148 nsi_id=nsi_id,
8149 vnfd_id=vnfd_ref,
8150 vdu_id=vdu_id,
8151 kdu_name=kdu_name,
8152 member_vnf_index=member_vnf_index,
8153 vdu_index=vdu_index,
8154 vdu_name=vdu_name,
8155 deploy_params=deploy_params_vdu,
8156 descriptor_config=descriptor_config,
8157 base_folder=base_folder,
8158 task_instantiation_info=tasks_dict_info,
8159 stage=stage,
8160 )
8161 except (
8162 ROclient.ROClientException,
8163 DbException,
8164 LcmException,
8165 NgRoException,
8166 ) as e:
8167 self.logger.error(logging_text + "Exit Exception {}".format(e))
8168 exc = e
8169 except asyncio.CancelledError:
8170 self.logger.error(
8171 logging_text + "Cancelled Exception while '{}'".format(step)
8172 )
8173 exc = "Operation was cancelled"
8174 except Exception as e:
8175 exc = traceback.format_exc()
8176 self.logger.critical(
8177 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8178 exc_info=True,
8179 )
8180 finally:
8181 error_list = list()
8182 if db_vnfrs_list and target_list:
8183 for vnfrs in db_vnfrs_list:
8184 for vnf_instance in target_list:
8185 if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"):
8186 self.db.set_list(
8187 "vnfrs",
8188 {"_id": vnfrs["_id"]},
8189 {"_admin.modified": time()},
8190 )
8191 if exc:
8192 error_list.append(str(exc))
8193 try:
8194 if tasks_dict_info:
8195 stage[1] = "Waiting for healing pending tasks."
8196 self.logger.debug(logging_text + stage[1])
8197 exc = await self._wait_for_tasks(
8198 logging_text,
8199 tasks_dict_info,
8200 self.timeout.ns_deploy,
8201 stage,
8202 nslcmop_id,
8203 nsr_id=nsr_id,
8204 )
8205 except asyncio.CancelledError:
8206 error_list.append("Cancelled")
8207 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
8208 await self._wait_for_tasks(
8209 logging_text,
8210 tasks_dict_info,
8211 self.timeout.ns_deploy,
8212 stage,
8213 nslcmop_id,
8214 nsr_id=nsr_id,
8215 )
8216 if error_list:
8217 error_detail = "; ".join(error_list)
8218 db_nslcmop_update[
8219 "detailed-status"
8220 ] = error_description_nslcmop = "FAILED {}: {}".format(
8221 step, error_detail
8222 )
8223 nslcmop_operation_state = "FAILED"
8224 if db_nsr:
8225 db_nsr_update["operational-status"] = old_operational_status
8226 db_nsr_update["config-status"] = old_config_status
8227 db_nsr_update[
8228 "detailed-status"
8229 ] = "FAILED healing nslcmop={} {}: {}".format(
8230 nslcmop_id, step, error_detail
8231 )
8232 for task, task_name in tasks_dict_info.items():
8233 if not task.done() or task.cancelled() or task.exception():
8234 if task_name.startswith(self.task_name_deploy_vca):
8235 # A N2VC task is pending
8236 db_nsr_update["config-status"] = "failed"
8237 else:
8238 # RO task is pending
8239 db_nsr_update["operational-status"] = "failed"
8240 else:
8241 error_description_nslcmop = None
8242 nslcmop_operation_state = "COMPLETED"
8243 db_nslcmop_update["detailed-status"] = "Done"
8244 db_nsr_update["detailed-status"] = "Done"
8245 db_nsr_update["operational-status"] = "running"
8246 db_nsr_update["config-status"] = "configured"
8247
8248 self._write_op_status(
8249 op_id=nslcmop_id,
8250 stage="",
8251 error_message=error_description_nslcmop,
8252 operation_state=nslcmop_operation_state,
8253 other_update=db_nslcmop_update,
8254 )
8255 if db_nsr:
8256 self._write_ns_status(
8257 nsr_id=nsr_id,
8258 ns_state=None,
8259 current_operation="IDLE",
8260 current_operation_id=None,
8261 other_update=db_nsr_update,
8262 )
8263
8264 if nslcmop_operation_state:
8265 try:
8266 msg = {
8267 "nsr_id": nsr_id,
8268 "nslcmop_id": nslcmop_id,
8269 "operationState": nslcmop_operation_state,
8270 }
8271 await self.msg.aiowrite("ns", "healed", msg)
8272 except Exception as e:
8273 self.logger.error(
8274 logging_text + "kafka_write notification Exception {}".format(e)
8275 )
8276 self.logger.debug(logging_text + "Exit")
8277 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8278
8279 async def heal_RO(
8280 self,
8281 logging_text,
8282 nsr_id,
8283 db_nslcmop,
8284 stage,
8285 ):
8286 """
8287 Heal at RO
8288 :param logging_text: preffix text to use at logging
8289 :param nsr_id: nsr identity
8290 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8291 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8292 :return: None or exception
8293 """
8294
8295 def get_vim_account(vim_account_id):
8296 nonlocal db_vims
8297 if vim_account_id in db_vims:
8298 return db_vims[vim_account_id]
8299 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8300 db_vims[vim_account_id] = db_vim
8301 return db_vim
8302
8303 try:
8304 start_heal = time()
8305 ns_params = db_nslcmop.get("operationParams")
8306 if ns_params and ns_params.get("timeout_ns_heal"):
8307 timeout_ns_heal = ns_params["timeout_ns_heal"]
8308 else:
8309 timeout_ns_heal = self.timeout.ns_heal
8310
8311 db_vims = {}
8312
8313 nslcmop_id = db_nslcmop["_id"]
8314 target = {
8315 "action_id": nslcmop_id,
8316 }
8317 self.logger.warning(
8318 "db_nslcmop={} and timeout_ns_heal={}".format(
8319 db_nslcmop, timeout_ns_heal
8320 )
8321 )
8322 target.update(db_nslcmop.get("operationParams", {}))
8323
8324 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8325 desc = await self.RO.recreate(nsr_id, target)
8326 self.logger.debug("RO return > {}".format(desc))
8327 action_id = desc["action_id"]
8328 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8329 await self._wait_ng_ro(
8330 nsr_id,
8331 action_id,
8332 nslcmop_id,
8333 start_heal,
8334 timeout_ns_heal,
8335 stage,
8336 operation="healing",
8337 )
8338
8339 # Updating NSR
8340 db_nsr_update = {
8341 "_admin.deployed.RO.operational-status": "running",
8342 "detailed-status": " ".join(stage),
8343 }
8344 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8345 self._write_op_status(nslcmop_id, stage)
8346 self.logger.debug(
8347 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8348 )
8349
8350 except Exception as e:
8351 stage[2] = "ERROR healing at VIM"
8352 # self.set_vnfr_at_error(db_vnfrs, str(e))
8353 self.logger.error(
8354 "Error healing at VIM {}".format(e),
8355 exc_info=not isinstance(
8356 e,
8357 (
8358 ROclient.ROClientException,
8359 LcmException,
8360 DbException,
8361 NgRoException,
8362 ),
8363 ),
8364 )
8365 raise
8366
8367 def _heal_n2vc(
8368 self,
8369 logging_text,
8370 db_nsr,
8371 db_vnfr,
8372 nslcmop_id,
8373 nsr_id,
8374 nsi_id,
8375 vnfd_id,
8376 vdu_id,
8377 kdu_name,
8378 member_vnf_index,
8379 vdu_index,
8380 vdu_name,
8381 deploy_params,
8382 descriptor_config,
8383 base_folder,
8384 task_instantiation_info,
8385 stage,
8386 ):
8387 # launch instantiate_N2VC in a asyncio task and register task object
8388 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8389 # if not found, create one entry and update database
8390 # fill db_nsr._admin.deployed.VCA.<index>
8391
8392 self.logger.debug(
8393 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8394 )
8395
8396 charm_name = ""
8397 get_charm_name = False
8398 if "execution-environment-list" in descriptor_config:
8399 ee_list = descriptor_config.get("execution-environment-list", [])
8400 elif "juju" in descriptor_config:
8401 ee_list = [descriptor_config] # ns charms
8402 if "execution-environment-list" not in descriptor_config:
8403 # charm name is only required for ns charms
8404 get_charm_name = True
8405 else: # other types as script are not supported
8406 ee_list = []
8407
8408 for ee_item in ee_list:
8409 self.logger.debug(
8410 logging_text
8411 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8412 ee_item.get("juju"), ee_item.get("helm-chart")
8413 )
8414 )
8415 ee_descriptor_id = ee_item.get("id")
8416 vca_name, charm_name, vca_type = self.get_vca_info(
8417 ee_item, db_nsr, get_charm_name
8418 )
8419 if not vca_type:
8420 self.logger.debug(
8421 logging_text + "skipping, non juju/charm/helm configuration"
8422 )
8423 continue
8424
8425 vca_index = -1
8426 for vca_index, vca_deployed in enumerate(
8427 db_nsr["_admin"]["deployed"]["VCA"]
8428 ):
8429 if not vca_deployed:
8430 continue
8431 if (
8432 vca_deployed.get("member-vnf-index") == member_vnf_index
8433 and vca_deployed.get("vdu_id") == vdu_id
8434 and vca_deployed.get("kdu_name") == kdu_name
8435 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8436 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8437 ):
8438 break
8439 else:
8440 # not found, create one.
8441 target = (
8442 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8443 )
8444 if vdu_id:
8445 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8446 elif kdu_name:
8447 target += "/kdu/{}".format(kdu_name)
8448 vca_deployed = {
8449 "target_element": target,
8450 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8451 "member-vnf-index": member_vnf_index,
8452 "vdu_id": vdu_id,
8453 "kdu_name": kdu_name,
8454 "vdu_count_index": vdu_index,
8455 "operational-status": "init", # TODO revise
8456 "detailed-status": "", # TODO revise
8457 "step": "initial-deploy", # TODO revise
8458 "vnfd_id": vnfd_id,
8459 "vdu_name": vdu_name,
8460 "type": vca_type,
8461 "ee_descriptor_id": ee_descriptor_id,
8462 "charm_name": charm_name,
8463 }
8464 vca_index += 1
8465
8466 # create VCA and configurationStatus in db
8467 db_dict = {
8468 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8469 "configurationStatus.{}".format(vca_index): dict(),
8470 }
8471 self.update_db_2("nsrs", nsr_id, db_dict)
8472
8473 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8474
8475 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8476 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8477 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8478
8479 # Launch task
8480 task_n2vc = asyncio.ensure_future(
8481 self.heal_N2VC(
8482 logging_text=logging_text,
8483 vca_index=vca_index,
8484 nsi_id=nsi_id,
8485 db_nsr=db_nsr,
8486 db_vnfr=db_vnfr,
8487 vdu_id=vdu_id,
8488 kdu_name=kdu_name,
8489 vdu_index=vdu_index,
8490 deploy_params=deploy_params,
8491 config_descriptor=descriptor_config,
8492 base_folder=base_folder,
8493 nslcmop_id=nslcmop_id,
8494 stage=stage,
8495 vca_type=vca_type,
8496 vca_name=vca_name,
8497 ee_config_descriptor=ee_item,
8498 )
8499 )
8500 self.lcm_tasks.register(
8501 "ns",
8502 nsr_id,
8503 nslcmop_id,
8504 "instantiate_N2VC-{}".format(vca_index),
8505 task_n2vc,
8506 )
8507 task_instantiation_info[
8508 task_n2vc
8509 ] = self.task_name_deploy_vca + " {}.{}".format(
8510 member_vnf_index or "", vdu_id or ""
8511 )
8512
8513 async def heal_N2VC(
8514 self,
8515 logging_text,
8516 vca_index,
8517 nsi_id,
8518 db_nsr,
8519 db_vnfr,
8520 vdu_id,
8521 kdu_name,
8522 vdu_index,
8523 config_descriptor,
8524 deploy_params,
8525 base_folder,
8526 nslcmop_id,
8527 stage,
8528 vca_type,
8529 vca_name,
8530 ee_config_descriptor,
8531 ):
8532 nsr_id = db_nsr["_id"]
8533 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8534 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8535 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8536 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8537 db_dict = {
8538 "collection": "nsrs",
8539 "filter": {"_id": nsr_id},
8540 "path": db_update_entry,
8541 }
8542 step = ""
8543 try:
8544 element_type = "NS"
8545 element_under_configuration = nsr_id
8546
8547 vnfr_id = None
8548 if db_vnfr:
8549 vnfr_id = db_vnfr["_id"]
8550 osm_config["osm"]["vnf_id"] = vnfr_id
8551
8552 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8553
8554 if vca_type == "native_charm":
8555 index_number = 0
8556 else:
8557 index_number = vdu_index or 0
8558
8559 if vnfr_id:
8560 element_type = "VNF"
8561 element_under_configuration = vnfr_id
8562 namespace += ".{}-{}".format(vnfr_id, index_number)
8563 if vdu_id:
8564 namespace += ".{}-{}".format(vdu_id, index_number)
8565 element_type = "VDU"
8566 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8567 osm_config["osm"]["vdu_id"] = vdu_id
8568 elif kdu_name:
8569 namespace += ".{}".format(kdu_name)
8570 element_type = "KDU"
8571 element_under_configuration = kdu_name
8572 osm_config["osm"]["kdu_name"] = kdu_name
8573
8574 # Get artifact path
8575 if base_folder["pkg-dir"]:
8576 artifact_path = "{}/{}/{}/{}".format(
8577 base_folder["folder"],
8578 base_folder["pkg-dir"],
8579 "charms"
8580 if vca_type
8581 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8582 else "helm-charts",
8583 vca_name,
8584 )
8585 else:
8586 artifact_path = "{}/Scripts/{}/{}/".format(
8587 base_folder["folder"],
8588 "charms"
8589 if vca_type
8590 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8591 else "helm-charts",
8592 vca_name,
8593 )
8594
8595 self.logger.debug("Artifact path > {}".format(artifact_path))
8596
8597 # get initial_config_primitive_list that applies to this element
8598 initial_config_primitive_list = config_descriptor.get(
8599 "initial-config-primitive"
8600 )
8601
8602 self.logger.debug(
8603 "Initial config primitive list > {}".format(
8604 initial_config_primitive_list
8605 )
8606 )
8607
8608 # add config if not present for NS charm
8609 ee_descriptor_id = ee_config_descriptor.get("id")
8610 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8611 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8612 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8613 )
8614
8615 self.logger.debug(
8616 "Initial config primitive list #2 > {}".format(
8617 initial_config_primitive_list
8618 )
8619 )
8620 # n2vc_redesign STEP 3.1
8621 # find old ee_id if exists
8622 ee_id = vca_deployed.get("ee_id")
8623
8624 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8625 # create or register execution environment in VCA. Only for native charms when healing
8626 if vca_type == "native_charm":
8627 step = "Waiting to VM being up and getting IP address"
8628 self.logger.debug(logging_text + step)
8629 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8630 logging_text,
8631 nsr_id,
8632 vnfr_id,
8633 vdu_id,
8634 vdu_index,
8635 user=None,
8636 pub_key=None,
8637 )
8638 credentials = {"hostname": rw_mgmt_ip}
8639 # get username
8640 username = deep_get(
8641 config_descriptor, ("config-access", "ssh-access", "default-user")
8642 )
8643 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8644 # merged. Meanwhile let's get username from initial-config-primitive
8645 if not username and initial_config_primitive_list:
8646 for config_primitive in initial_config_primitive_list:
8647 for param in config_primitive.get("parameter", ()):
8648 if param["name"] == "ssh-username":
8649 username = param["value"]
8650 break
8651 if not username:
8652 raise LcmException(
8653 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8654 "'config-access.ssh-access.default-user'"
8655 )
8656 credentials["username"] = username
8657
8658 # n2vc_redesign STEP 3.2
8659 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8660 self._write_configuration_status(
8661 nsr_id=nsr_id,
8662 vca_index=vca_index,
8663 status="REGISTERING",
8664 element_under_configuration=element_under_configuration,
8665 element_type=element_type,
8666 )
8667
8668 step = "register execution environment {}".format(credentials)
8669 self.logger.debug(logging_text + step)
8670 ee_id = await self.vca_map[vca_type].register_execution_environment(
8671 credentials=credentials,
8672 namespace=namespace,
8673 db_dict=db_dict,
8674 vca_id=vca_id,
8675 )
8676
8677 # update ee_id en db
8678 db_dict_ee_id = {
8679 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8680 }
8681 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8682
8683 # for compatibility with MON/POL modules, the need model and application name at database
8684 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8685 # Not sure if this need to be done when healing
8686 """
8687 ee_id_parts = ee_id.split(".")
8688 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8689 if len(ee_id_parts) >= 2:
8690 model_name = ee_id_parts[0]
8691 application_name = ee_id_parts[1]
8692 db_nsr_update[db_update_entry + "model"] = model_name
8693 db_nsr_update[db_update_entry + "application"] = application_name
8694 """
8695
8696 # n2vc_redesign STEP 3.3
8697 # Install configuration software. Only for native charms.
8698 step = "Install configuration Software"
8699
8700 self._write_configuration_status(
8701 nsr_id=nsr_id,
8702 vca_index=vca_index,
8703 status="INSTALLING SW",
8704 element_under_configuration=element_under_configuration,
8705 element_type=element_type,
8706 # other_update=db_nsr_update,
8707 other_update=None,
8708 )
8709
8710 # TODO check if already done
8711 self.logger.debug(logging_text + step)
8712 config = None
8713 if vca_type == "native_charm":
8714 config_primitive = next(
8715 (p for p in initial_config_primitive_list if p["name"] == "config"),
8716 None,
8717 )
8718 if config_primitive:
8719 config = self._map_primitive_params(
8720 config_primitive, {}, deploy_params
8721 )
8722 await self.vca_map[vca_type].install_configuration_sw(
8723 ee_id=ee_id,
8724 artifact_path=artifact_path,
8725 db_dict=db_dict,
8726 config=config,
8727 num_units=1,
8728 vca_id=vca_id,
8729 vca_type=vca_type,
8730 )
8731
8732 # write in db flag of configuration_sw already installed
8733 self.update_db_2(
8734 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8735 )
8736
8737 # Not sure if this need to be done when healing
8738 """
8739 # add relations for this VCA (wait for other peers related with this VCA)
8740 await self._add_vca_relations(
8741 logging_text=logging_text,
8742 nsr_id=nsr_id,
8743 vca_type=vca_type,
8744 vca_index=vca_index,
8745 )
8746 """
8747
8748 # if SSH access is required, then get execution environment SSH public
8749 # if native charm we have waited already to VM be UP
8750 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8751 pub_key = None
8752 user = None
8753 # self.logger.debug("get ssh key block")
8754 if deep_get(
8755 config_descriptor, ("config-access", "ssh-access", "required")
8756 ):
8757 # self.logger.debug("ssh key needed")
8758 # Needed to inject a ssh key
8759 user = deep_get(
8760 config_descriptor,
8761 ("config-access", "ssh-access", "default-user"),
8762 )
8763 step = "Install configuration Software, getting public ssh key"
8764 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8765 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8766 )
8767
8768 step = "Insert public key into VM user={} ssh_key={}".format(
8769 user, pub_key
8770 )
8771 else:
8772 # self.logger.debug("no need to get ssh key")
8773 step = "Waiting to VM being up and getting IP address"
8774 self.logger.debug(logging_text + step)
8775
8776 # n2vc_redesign STEP 5.1
8777 # wait for RO (ip-address) Insert pub_key into VM
8778 # IMPORTANT: We need do wait for RO to complete healing operation.
8779 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8780 if vnfr_id:
8781 if kdu_name:
8782 rw_mgmt_ip = await self.wait_kdu_up(
8783 logging_text, nsr_id, vnfr_id, kdu_name
8784 )
8785 else:
8786 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8787 logging_text,
8788 nsr_id,
8789 vnfr_id,
8790 vdu_id,
8791 vdu_index,
8792 user=user,
8793 pub_key=pub_key,
8794 )
8795 else:
8796 rw_mgmt_ip = None # This is for a NS configuration
8797
8798 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8799
8800 # store rw_mgmt_ip in deploy params for later replacement
8801 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8802
8803 # Day1 operations.
8804 # get run-day1 operation parameter
8805 runDay1 = deploy_params.get("run-day1", False)
8806 self.logger.debug(
8807 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8808 )
8809 if runDay1:
8810 # n2vc_redesign STEP 6 Execute initial config primitive
8811 step = "execute initial config primitive"
8812
8813 # wait for dependent primitives execution (NS -> VNF -> VDU)
8814 if initial_config_primitive_list:
8815 await self._wait_dependent_n2vc(
8816 nsr_id, vca_deployed_list, vca_index
8817 )
8818
8819 # stage, in function of element type: vdu, kdu, vnf or ns
8820 my_vca = vca_deployed_list[vca_index]
8821 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8822 # VDU or KDU
8823 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8824 elif my_vca.get("member-vnf-index"):
8825 # VNF
8826 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8827 else:
8828 # NS
8829 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8830
8831 self._write_configuration_status(
8832 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8833 )
8834
8835 self._write_op_status(op_id=nslcmop_id, stage=stage)
8836
8837 check_if_terminated_needed = True
8838 for initial_config_primitive in initial_config_primitive_list:
8839 # adding information on the vca_deployed if it is a NS execution environment
8840 if not vca_deployed["member-vnf-index"]:
8841 deploy_params["ns_config_info"] = json.dumps(
8842 self._get_ns_config_info(nsr_id)
8843 )
8844 # TODO check if already done
8845 primitive_params_ = self._map_primitive_params(
8846 initial_config_primitive, {}, deploy_params
8847 )
8848
8849 step = "execute primitive '{}' params '{}'".format(
8850 initial_config_primitive["name"], primitive_params_
8851 )
8852 self.logger.debug(logging_text + step)
8853 await self.vca_map[vca_type].exec_primitive(
8854 ee_id=ee_id,
8855 primitive_name=initial_config_primitive["name"],
8856 params_dict=primitive_params_,
8857 db_dict=db_dict,
8858 vca_id=vca_id,
8859 vca_type=vca_type,
8860 )
8861 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8862 if check_if_terminated_needed:
8863 if config_descriptor.get("terminate-config-primitive"):
8864 self.update_db_2(
8865 "nsrs",
8866 nsr_id,
8867 {db_update_entry + "needed_terminate": True},
8868 )
8869 check_if_terminated_needed = False
8870
8871 # TODO register in database that primitive is done
8872
8873 # STEP 7 Configure metrics
8874 # Not sure if this need to be done when healing
8875 """
8876 if vca_type == "helm" or vca_type == "helm-v3":
8877 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8878 ee_id=ee_id,
8879 artifact_path=artifact_path,
8880 ee_config_descriptor=ee_config_descriptor,
8881 vnfr_id=vnfr_id,
8882 nsr_id=nsr_id,
8883 target_ip=rw_mgmt_ip,
8884 )
8885 if prometheus_jobs:
8886 self.update_db_2(
8887 "nsrs",
8888 nsr_id,
8889 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8890 )
8891
8892 for job in prometheus_jobs:
8893 self.db.set_one(
8894 "prometheus_jobs",
8895 {"job_name": job["job_name"]},
8896 job,
8897 upsert=True,
8898 fail_on_empty=False,
8899 )
8900
8901 """
8902 step = "instantiated at VCA"
8903 self.logger.debug(logging_text + step)
8904
8905 self._write_configuration_status(
8906 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8907 )
8908
8909 except Exception as e: # TODO not use Exception but N2VC exception
8910 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8911 if not isinstance(
8912 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8913 ):
8914 self.logger.error(
8915 "Exception while {} : {}".format(step, e), exc_info=True
8916 )
8917 self._write_configuration_status(
8918 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8919 )
8920 raise LcmException("{} {}".format(step, e)) from e
8921
8922 async def _wait_heal_ro(
8923 self,
8924 nsr_id,
8925 timeout=600,
8926 ):
8927 start_time = time()
8928 while time() <= start_time + timeout:
8929 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8930 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8931 "operational-status"
8932 ]
8933 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8934 if operational_status_ro != "healing":
8935 break
8936 await asyncio.sleep(15)
8937 else: # timeout_ns_deploy
8938 raise NgRoException("Timeout waiting ns to deploy")
8939
8940 async def vertical_scale(self, nsr_id, nslcmop_id):
8941 """
8942 Vertical Scale the VDUs in a NS
8943
8944 :param: nsr_id: NS Instance ID
8945 :param: nslcmop_id: nslcmop ID of migrate
8946
8947 """
8948 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8949 self.logger.info(logging_text + "Enter")
8950 stage = ["Preparing the environment", ""]
8951 # get all needed from database
8952 db_nslcmop = None
8953 db_nsr_update = {}
8954 target = {}
8955 exc = None
8956 # in case of error, indicates what part of scale was failed to put nsr at error status
8957 start_deploy = time()
8958
8959 try:
8960 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8961 operationParams = db_nslcmop.get("operationParams")
8962 vertical_scale_data = operationParams["verticalScaleVnf"]
8963 vnfd_id = vertical_scale_data["vnfdId"]
8964 count_index = vertical_scale_data["countIndex"]
8965 vdu_id_ref = vertical_scale_data["vduId"]
8966 vnfr_id = vertical_scale_data["vnfInstanceId"]
8967 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8968 db_flavor = db_nsr.get("flavor")
8969 db_flavor_index = str(len(db_flavor))
8970
8971 def set_flavor_refrence_to_vdur(diff=0):
8972 """
8973 Utility function to add and remove the
8974 ref to new ns-flavor-id to vdurs
8975 :param: diff: default 0
8976 """
8977 q_filter = {}
8978 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
8979 for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
8980 if (
8981 vdur.get("count-index") == count_index
8982 and vdur.get("vdu-id-ref") == vdu_id_ref
8983 ):
8984 filter_text = {
8985 "_id": vnfr_id,
8986 "vdur.count-index": count_index,
8987 "vdur.vdu-id-ref": vdu_id_ref,
8988 }
8989 q_filter.update(filter_text)
8990 db_update = {}
8991 db_update["vdur.{}.ns-flavor-id".format(vdu_index)] = str(
8992 int(db_flavor_index) - diff
8993 )
8994 self.db.set_one(
8995 "vnfrs",
8996 q_filter=q_filter,
8997 update_dict=db_update,
8998 fail_on_empty=True,
8999 )
9000
9001 # wait for any previous tasks in process
9002 stage[1] = "Waiting for previous operations to terminate"
9003 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
9004
9005 self._write_ns_status(
9006 nsr_id=nsr_id,
9007 ns_state=None,
9008 current_operation="VERTICALSCALE",
9009 current_operation_id=nslcmop_id,
9010 )
9011 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
9012 self.logger.debug(
9013 stage[1] + " after having waited for previous tasks to be completed"
9014 )
9015 self.update_db_2("nsrs", nsr_id, db_nsr_update)
9016 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
9017 virtual_compute = vnfd["virtual-compute-desc"][0]
9018 virtual_memory = round(
9019 float(virtual_compute["virtual-memory"]["size"]) * 1024
9020 )
9021 virtual_cpu = virtual_compute["virtual-cpu"]["num-virtual-cpu"]
9022 virtual_storage = vnfd["virtual-storage-desc"][0]["size-of-storage"]
9023 flavor_dict_update = {
9024 "id": db_flavor_index,
9025 "memory-mb": virtual_memory,
9026 "name": f"{vdu_id_ref}-{count_index}-flv",
9027 "storage-gb": str(virtual_storage),
9028 "vcpu-count": virtual_cpu,
9029 }
9030 db_flavor.append(flavor_dict_update)
9031 db_update = {}
9032 db_update["flavor"] = db_flavor
9033 q_filter = {
9034 "_id": nsr_id,
9035 }
9036 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
9037 self.db.set_one(
9038 "nsrs",
9039 q_filter=q_filter,
9040 update_dict=db_update,
9041 fail_on_empty=True,
9042 )
9043 set_flavor_refrence_to_vdur()
9044 target = {}
9045 new_operationParams = {
9046 "lcmOperationType": "verticalscale",
9047 "verticalScale": "CHANGE_VNFFLAVOR",
9048 "nsInstanceId": nsr_id,
9049 "changeVnfFlavorData": {
9050 "vnfInstanceId": vnfr_id,
9051 "additionalParams": {
9052 "vduid": vdu_id_ref,
9053 "vduCountIndex": count_index,
9054 "virtualMemory": virtual_memory,
9055 "numVirtualCpu": int(virtual_cpu),
9056 "sizeOfStorage": int(virtual_storage),
9057 },
9058 },
9059 }
9060 target.update(new_operationParams)
9061
9062 stage[1] = "Sending vertical scale request to RO... {}".format(target)
9063 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
9064 self.logger.info("RO target > {}".format(target))
9065 desc = await self.RO.vertical_scale(nsr_id, target)
9066 self.logger.info("RO.vertical_scale return value - {}".format(desc))
9067 action_id = desc["action_id"]
9068 await self._wait_ng_ro(
9069 nsr_id,
9070 action_id,
9071 nslcmop_id,
9072 start_deploy,
9073 self.timeout.verticalscale,
9074 operation="verticalscale",
9075 )
9076 except (
9077 NgRoException,
9078 ROclient.ROClientException,
9079 DbException,
9080 LcmException,
9081 ) as e:
9082 self.logger.error("Exit Exception {}".format(e))
9083 exc = e
9084 except asyncio.CancelledError:
9085 self.logger.error("Cancelled Exception while '{}'".format(stage))
9086 exc = "Operation was cancelled"
9087 except Exception as e:
9088 exc = traceback.format_exc()
9089 self.logger.critical(
9090 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
9091 )
9092 finally:
9093 if exc:
9094 self.logger.critical(
9095 "Vertical-Scale operation Failed, cleaning up nsrs and vnfrs flavor detail"
9096 )
9097 self.db.set_one(
9098 "nsrs",
9099 {"_id": nsr_id},
9100 None,
9101 pull={"flavor": {"id": db_flavor_index}},
9102 )
9103 set_flavor_refrence_to_vdur(diff=1)
9104 return "FAILED", "Error in verticalscale VNF {}".format(exc)
9105 else:
9106 return "COMPLETED", "Done"