Fix 2280: Random IP's are geeting assigned for Dual stack NS when doing scaling-out...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import ipaddress
27 import json
28 from jinja2 import (
29 Environment,
30 TemplateError,
31 TemplateNotFound,
32 StrictUndefined,
33 UndefinedError,
34 select_autoescape,
35 )
36
37 from osm_lcm import ROclient
38 from osm_lcm.data_utils.lcm_config import LcmCfg
39 from osm_lcm.data_utils.nsr import (
40 get_deployed_kdu,
41 get_deployed_vca,
42 get_deployed_vca_list,
43 get_nsd,
44 )
45 from osm_lcm.data_utils.vca import (
46 DeployedComponent,
47 DeployedK8sResource,
48 DeployedVCA,
49 EELevel,
50 Relation,
51 EERelation,
52 safe_get_ee_relation,
53 )
54 from osm_lcm.ng_ro import NgRoClient, NgRoException
55 from osm_lcm.lcm_utils import (
56 LcmException,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import SystemRandom
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 EE_TLS_NAME = "ee-tls"
136 task_name_deploy_vca = "Deploying VCA"
137 rel_operation_types = {
138 "GE": ">=",
139 "LE": "<=",
140 "GT": ">",
141 "LT": "<",
142 "EQ": "==",
143 "NE": "!=",
144 }
145
146 def __init__(self, msg, lcm_tasks, config: LcmCfg):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.lcm_tasks = lcm_tasks
157 self.timeout = config.timeout
158 self.ro_config = config.RO
159 self.vca_config = config.VCA
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm3 = K8sHelm3Connector(
176 kubectl_command=self.vca_config.kubectlpath,
177 helm_command=self.vca_config.helm3path,
178 fs=self.fs,
179 log=self.logger,
180 db=self.db,
181 on_update_db=None,
182 )
183
184 self.k8sclusterjuju = K8sJujuConnector(
185 kubectl_command=self.vca_config.kubectlpath,
186 juju_command=self.vca_config.jujupath,
187 log=self.logger,
188 on_update_db=self._on_update_k8s_db,
189 fs=self.fs,
190 db=self.db,
191 )
192
193 self.k8scluster_map = {
194 "helm-chart-v3": self.k8sclusterhelm3,
195 "chart": self.k8sclusterhelm3,
196 "juju-bundle": self.k8sclusterjuju,
197 "juju": self.k8sclusterjuju,
198 }
199
200 self.vca_map = {
201 "lxc_proxy_charm": self.n2vc,
202 "native_charm": self.n2vc,
203 "k8s_proxy_charm": self.n2vc,
204 "helm": self.conn_helm_ee,
205 "helm-v3": self.conn_helm_ee,
206 }
207
208 # create RO client
209 self.RO = NgRoClient(**self.ro_config.to_dict())
210
211 self.op_status_map = {
212 "instantiation": self.RO.status,
213 "termination": self.RO.status,
214 "migrate": self.RO.status,
215 "healing": self.RO.recreate_status,
216 "verticalscale": self.RO.status,
217 "start_stop_rebuild": self.RO.status,
218 }
219
220 @staticmethod
221 def increment_ip_mac(ip_mac, vm_index=1):
222 if not isinstance(ip_mac, str):
223 return ip_mac
224 try:
225 next_ipv6 = None
226 next_ipv4 = None
227 dual_ip = ip_mac.split(";")
228 if len(dual_ip) == 2:
229 for ip in dual_ip:
230 if ipaddress.ip_address(ip).version == 6:
231 ipv6 = ipaddress.IPv6Address(ip)
232 next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1))
233 elif ipaddress.ip_address(ip).version == 4:
234 ipv4 = ipaddress.IPv4Address(ip)
235 next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1))
236 return [next_ipv4, next_ipv6]
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
255 # remove last dot from path (if exists)
256 if path.endswith("."):
257 path = path[:-1]
258
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
261 try:
262 nsr_id = filter.get("_id")
263
264 # read ns record from database
265 nsr = self.db.get_one(table="nsrs", q_filter=filter)
266 current_ns_status = nsr.get("nsState")
267
268 # get vca status for NS
269 status_dict = await self.n2vc.get_status(
270 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
271 )
272
273 # vcaStatus
274 db_dict = dict()
275 db_dict["vcaStatus"] = status_dict
276
277 # update configurationStatus for this VCA
278 try:
279 vca_index = int(path[path.rfind(".") + 1 :])
280
281 vca_list = deep_get(
282 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
283 )
284 vca_status = vca_list[vca_index].get("status")
285
286 configuration_status_list = nsr.get("configurationStatus")
287 config_status = configuration_status_list[vca_index].get("status")
288
289 if config_status == "BROKEN" and vca_status != "failed":
290 db_dict["configurationStatus"][vca_index] = "READY"
291 elif config_status != "BROKEN" and vca_status == "failed":
292 db_dict["configurationStatus"][vca_index] = "BROKEN"
293 except Exception as e:
294 # not update configurationStatus
295 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
296
297 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
298 # if nsState = 'DEGRADED' check if all is OK
299 is_degraded = False
300 if current_ns_status in ("READY", "DEGRADED"):
301 error_description = ""
302 # check machines
303 if status_dict.get("machines"):
304 for machine_id in status_dict.get("machines"):
305 machine = status_dict.get("machines").get(machine_id)
306 # check machine agent-status
307 if machine.get("agent-status"):
308 s = machine.get("agent-status").get("status")
309 if s != "started":
310 is_degraded = True
311 error_description += (
312 "machine {} agent-status={} ; ".format(
313 machine_id, s
314 )
315 )
316 # check machine instance status
317 if machine.get("instance-status"):
318 s = machine.get("instance-status").get("status")
319 if s != "running":
320 is_degraded = True
321 error_description += (
322 "machine {} instance-status={} ; ".format(
323 machine_id, s
324 )
325 )
326 # check applications
327 if status_dict.get("applications"):
328 for app_id in status_dict.get("applications"):
329 app = status_dict.get("applications").get(app_id)
330 # check application status
331 if app.get("status"):
332 s = app.get("status").get("status")
333 if s != "active":
334 is_degraded = True
335 error_description += (
336 "application {} status={} ; ".format(app_id, s)
337 )
338
339 if error_description:
340 db_dict["errorDescription"] = error_description
341 if current_ns_status == "READY" and is_degraded:
342 db_dict["nsState"] = "DEGRADED"
343 if current_ns_status == "DEGRADED" and not is_degraded:
344 db_dict["nsState"] = "READY"
345
346 # write to database
347 self.update_db_2("nsrs", nsr_id, db_dict)
348
349 except (asyncio.CancelledError, asyncio.TimeoutError):
350 raise
351 except Exception as e:
352 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
353
354 async def _on_update_k8s_db(
355 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
356 ):
357 """
358 Updating vca status in NSR record
359 :param cluster_uuid: UUID of a k8s cluster
360 :param kdu_instance: The unique name of the KDU instance
361 :param filter: To get nsr_id
362 :cluster_type: The cluster type (juju, k8s)
363 :return: none
364 """
365
366 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
367 # .format(cluster_uuid, kdu_instance, filter))
368
369 nsr_id = filter.get("_id")
370 try:
371 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
372 cluster_uuid=cluster_uuid,
373 kdu_instance=kdu_instance,
374 yaml_format=False,
375 complete_status=True,
376 vca_id=vca_id,
377 )
378
379 # vcaStatus
380 db_dict = dict()
381 db_dict["vcaStatus"] = {nsr_id: vca_status}
382
383 self.logger.debug(
384 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
385 )
386
387 # write to database
388 self.update_db_2("nsrs", nsr_id, db_dict)
389 except (asyncio.CancelledError, asyncio.TimeoutError):
390 raise
391 except Exception as e:
392 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
393
394 @staticmethod
395 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
396 try:
397 env = Environment(
398 undefined=StrictUndefined,
399 autoescape=select_autoescape(default_for_string=True, default=True),
400 )
401 template = env.from_string(cloud_init_text)
402 return template.render(additional_params or {})
403 except UndefinedError as e:
404 raise LcmException(
405 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
406 "file, must be provided in the instantiation parameters inside the "
407 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
408 )
409 except (TemplateError, TemplateNotFound) as e:
410 raise LcmException(
411 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
412 vnfd_id, vdu_id, e
413 )
414 )
415
416 def _get_vdu_cloud_init_content(self, vdu, vnfd):
417 cloud_init_content = cloud_init_file = None
418 try:
419 if vdu.get("cloud-init-file"):
420 base_folder = vnfd["_admin"]["storage"]
421 if base_folder["pkg-dir"]:
422 cloud_init_file = "{}/{}/cloud_init/{}".format(
423 base_folder["folder"],
424 base_folder["pkg-dir"],
425 vdu["cloud-init-file"],
426 )
427 else:
428 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
429 base_folder["folder"],
430 vdu["cloud-init-file"],
431 )
432 with self.fs.file_open(cloud_init_file, "r") as ci_file:
433 cloud_init_content = ci_file.read()
434 elif vdu.get("cloud-init"):
435 cloud_init_content = vdu["cloud-init"]
436
437 return cloud_init_content
438 except FsException as e:
439 raise LcmException(
440 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
441 vnfd["id"], vdu["id"], cloud_init_file, e
442 )
443 )
444
445 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
446 vdur = next(
447 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
448 )
449 additional_params = vdur.get("additionalParams")
450 return parse_yaml_strings(additional_params)
451
452 @staticmethod
453 def ip_profile_2_RO(ip_profile):
454 RO_ip_profile = deepcopy(ip_profile)
455 if "dns-server" in RO_ip_profile:
456 if isinstance(RO_ip_profile["dns-server"], list):
457 RO_ip_profile["dns-address"] = []
458 for ds in RO_ip_profile.pop("dns-server"):
459 RO_ip_profile["dns-address"].append(ds["address"])
460 else:
461 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
462 if RO_ip_profile.get("ip-version") == "ipv4":
463 RO_ip_profile["ip-version"] = "IPv4"
464 if RO_ip_profile.get("ip-version") == "ipv6":
465 RO_ip_profile["ip-version"] = "IPv6"
466 if "dhcp-params" in RO_ip_profile:
467 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
468 return RO_ip_profile
469
470 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
471 db_vdu_push_list = []
472 template_vdur = []
473 db_update = {"_admin.modified": time()}
474 if vdu_create:
475 for vdu_id, vdu_count in vdu_create.items():
476 vdur = next(
477 (
478 vdur
479 for vdur in reversed(db_vnfr["vdur"])
480 if vdur["vdu-id-ref"] == vdu_id
481 ),
482 None,
483 )
484 if not vdur:
485 # Read the template saved in the db:
486 self.logger.debug(
487 "No vdur in the database. Using the vdur-template to scale"
488 )
489 vdur_template = db_vnfr.get("vdur-template")
490 if not vdur_template:
491 raise LcmException(
492 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
493 vdu_id
494 )
495 )
496 vdur = vdur_template[0]
497 # Delete a template from the database after using it
498 self.db.set_one(
499 "vnfrs",
500 {"_id": db_vnfr["_id"]},
501 None,
502 pull={"vdur-template": {"_id": vdur["_id"]}},
503 )
504 for count in range(vdu_count):
505 vdur_copy = deepcopy(vdur)
506 vdur_copy["status"] = "BUILD"
507 vdur_copy["status-detailed"] = None
508 vdur_copy["ip-address"] = None
509 vdur_copy["_id"] = str(uuid4())
510 vdur_copy["count-index"] += count + 1
511 vdur_copy["id"] = "{}-{}".format(
512 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
513 )
514 vdur_copy.pop("vim_info", None)
515 for iface in vdur_copy["interfaces"]:
516 if iface.get("fixed-ip"):
517 iface["ip-address"] = self.increment_ip_mac(
518 iface["ip-address"], count + 1
519 )
520 else:
521 iface.pop("ip-address", None)
522 if iface.get("fixed-mac"):
523 iface["mac-address"] = self.increment_ip_mac(
524 iface["mac-address"], count + 1
525 )
526 else:
527 iface.pop("mac-address", None)
528 if db_vnfr["vdur"]:
529 iface.pop(
530 "mgmt_vnf", None
531 ) # only first vdu can be managment of vnf
532 db_vdu_push_list.append(vdur_copy)
533 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
534 if vdu_delete:
535 if len(db_vnfr["vdur"]) == 1:
536 # The scale will move to 0 instances
537 self.logger.debug(
538 "Scaling to 0 !, creating the template with the last vdur"
539 )
540 template_vdur = [db_vnfr["vdur"][0]]
541 for vdu_id, vdu_count in vdu_delete.items():
542 if mark_delete:
543 indexes_to_delete = [
544 iv[0]
545 for iv in enumerate(db_vnfr["vdur"])
546 if iv[1]["vdu-id-ref"] == vdu_id
547 ]
548 db_update.update(
549 {
550 "vdur.{}.status".format(i): "DELETING"
551 for i in indexes_to_delete[-vdu_count:]
552 }
553 )
554 else:
555 # it must be deleted one by one because common.db does not allow otherwise
556 vdus_to_delete = [
557 v
558 for v in reversed(db_vnfr["vdur"])
559 if v["vdu-id-ref"] == vdu_id
560 ]
561 for vdu in vdus_to_delete[:vdu_count]:
562 self.db.set_one(
563 "vnfrs",
564 {"_id": db_vnfr["_id"]},
565 None,
566 pull={"vdur": {"_id": vdu["_id"]}},
567 )
568 db_push = {}
569 if db_vdu_push_list:
570 db_push["vdur"] = db_vdu_push_list
571 if template_vdur:
572 db_push["vdur-template"] = template_vdur
573 if not db_push:
574 db_push = None
575 db_vnfr["vdur-template"] = template_vdur
576 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
577 # modify passed dictionary db_vnfr
578 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
579 db_vnfr["vdur"] = db_vnfr_["vdur"]
580
581 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
582 """
583 Updates database nsr with the RO info for the created vld
584 :param ns_update_nsr: dictionary to be filled with the updated info
585 :param db_nsr: content of db_nsr. This is also modified
586 :param nsr_desc_RO: nsr descriptor from RO
587 :return: Nothing, LcmException is raised on errors
588 """
589
590 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
591 for net_RO in get_iterable(nsr_desc_RO, "nets"):
592 if vld["id"] != net_RO.get("ns_net_osm_id"):
593 continue
594 vld["vim-id"] = net_RO.get("vim_net_id")
595 vld["name"] = net_RO.get("vim_name")
596 vld["status"] = net_RO.get("status")
597 vld["status-detailed"] = net_RO.get("error_msg")
598 ns_update_nsr["vld.{}".format(vld_index)] = vld
599 break
600 else:
601 raise LcmException(
602 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
603 )
604
605 def set_vnfr_at_error(self, db_vnfrs, error_text):
606 try:
607 for db_vnfr in db_vnfrs.values():
608 vnfr_update = {"status": "ERROR"}
609 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
610 if "status" not in vdur:
611 vdur["status"] = "ERROR"
612 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
613 if error_text:
614 vdur["status-detailed"] = str(error_text)
615 vnfr_update[
616 "vdur.{}.status-detailed".format(vdu_index)
617 ] = "ERROR"
618 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
619 except DbException as e:
620 self.logger.error("Cannot update vnf. {}".format(e))
621
622 def _get_ns_config_info(self, nsr_id):
623 """
624 Generates a mapping between vnf,vdu elements and the N2VC id
625 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
626 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
627 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
628 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
629 """
630 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
631 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
632 mapping = {}
633 ns_config_info = {"osm-config-mapping": mapping}
634 for vca in vca_deployed_list:
635 if not vca["member-vnf-index"]:
636 continue
637 if not vca["vdu_id"]:
638 mapping[vca["member-vnf-index"]] = vca["application"]
639 else:
640 mapping[
641 "{}.{}.{}".format(
642 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
643 )
644 ] = vca["application"]
645 return ns_config_info
646
647 async def _instantiate_ng_ro(
648 self,
649 logging_text,
650 nsr_id,
651 nsd,
652 db_nsr,
653 db_nslcmop,
654 db_vnfrs,
655 db_vnfds,
656 n2vc_key_list,
657 stage,
658 start_deploy,
659 timeout_ns_deploy,
660 ):
661 db_vims = {}
662
663 def get_vim_account(vim_account_id):
664 nonlocal db_vims
665 if vim_account_id in db_vims:
666 return db_vims[vim_account_id]
667 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
668 db_vims[vim_account_id] = db_vim
669 return db_vim
670
671 # modify target_vld info with instantiation parameters
672 def parse_vld_instantiation_params(
673 target_vim, target_vld, vld_params, target_sdn
674 ):
675 if vld_params.get("ip-profile"):
676 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
677 vld_params["ip-profile"]
678 )
679 if vld_params.get("provider-network"):
680 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
681 "provider-network"
682 ]
683 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
684 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
685 "provider-network"
686 ]["sdn-ports"]
687
688 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
689 # if wim_account_id is specified in vld_params, validate if it is feasible.
690 wim_account_id, db_wim = select_feasible_wim_account(
691 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
692 )
693
694 if wim_account_id:
695 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
696 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
697 # update vld_params with correct WIM account Id
698 vld_params["wimAccountId"] = wim_account_id
699
700 target_wim = "wim:{}".format(wim_account_id)
701 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
702 sdn_ports = get_sdn_ports(vld_params, db_wim)
703 if len(sdn_ports) > 0:
704 target_vld["vim_info"][target_wim] = target_wim_attrs
705 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
706
707 self.logger.debug(
708 "Target VLD with WIM data: {:s}".format(str(target_vld))
709 )
710
711 for param in ("vim-network-name", "vim-network-id"):
712 if vld_params.get(param):
713 if isinstance(vld_params[param], dict):
714 for vim, vim_net in vld_params[param].items():
715 other_target_vim = "vim:" + vim
716 populate_dict(
717 target_vld["vim_info"],
718 (other_target_vim, param.replace("-", "_")),
719 vim_net,
720 )
721 else: # isinstance str
722 target_vld["vim_info"][target_vim][
723 param.replace("-", "_")
724 ] = vld_params[param]
725 if vld_params.get("common_id"):
726 target_vld["common_id"] = vld_params.get("common_id")
727
728 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
729 def update_ns_vld_target(target, ns_params):
730 for vnf_params in ns_params.get("vnf", ()):
731 if vnf_params.get("vimAccountId"):
732 target_vnf = next(
733 (
734 vnfr
735 for vnfr in db_vnfrs.values()
736 if vnf_params["member-vnf-index"]
737 == vnfr["member-vnf-index-ref"]
738 ),
739 None,
740 )
741 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
742 if not vdur:
743 continue
744 for a_index, a_vld in enumerate(target["ns"]["vld"]):
745 target_vld = find_in_list(
746 get_iterable(vdur, "interfaces"),
747 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
748 )
749
750 vld_params = find_in_list(
751 get_iterable(ns_params, "vld"),
752 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
753 )
754 if target_vld:
755 if vnf_params.get("vimAccountId") not in a_vld.get(
756 "vim_info", {}
757 ):
758 target_vim_network_list = [
759 v for _, v in a_vld.get("vim_info").items()
760 ]
761 target_vim_network_name = next(
762 (
763 item.get("vim_network_name", "")
764 for item in target_vim_network_list
765 ),
766 "",
767 )
768
769 target["ns"]["vld"][a_index].get("vim_info").update(
770 {
771 "vim:{}".format(vnf_params["vimAccountId"]): {
772 "vim_network_name": target_vim_network_name,
773 }
774 }
775 )
776
777 if vld_params:
778 for param in ("vim-network-name", "vim-network-id"):
779 if vld_params.get(param) and isinstance(
780 vld_params[param], dict
781 ):
782 for vim, vim_net in vld_params[
783 param
784 ].items():
785 other_target_vim = "vim:" + vim
786 populate_dict(
787 target["ns"]["vld"][a_index].get(
788 "vim_info"
789 ),
790 (
791 other_target_vim,
792 param.replace("-", "_"),
793 ),
794 vim_net,
795 )
796
797 nslcmop_id = db_nslcmop["_id"]
798 target = {
799 "name": db_nsr["name"],
800 "ns": {"vld": []},
801 "vnf": [],
802 "image": deepcopy(db_nsr["image"]),
803 "flavor": deepcopy(db_nsr["flavor"]),
804 "action_id": nslcmop_id,
805 "cloud_init_content": {},
806 }
807 for image in target["image"]:
808 image["vim_info"] = {}
809 for flavor in target["flavor"]:
810 flavor["vim_info"] = {}
811 if db_nsr.get("shared-volumes"):
812 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
813 for shared_volumes in target["shared-volumes"]:
814 shared_volumes["vim_info"] = {}
815 if db_nsr.get("affinity-or-anti-affinity-group"):
816 target["affinity-or-anti-affinity-group"] = deepcopy(
817 db_nsr["affinity-or-anti-affinity-group"]
818 )
819 for affinity_or_anti_affinity_group in target[
820 "affinity-or-anti-affinity-group"
821 ]:
822 affinity_or_anti_affinity_group["vim_info"] = {}
823
824 if db_nslcmop.get("lcmOperationType") != "instantiate":
825 # get parameters of instantiation:
826 db_nslcmop_instantiate = self.db.get_list(
827 "nslcmops",
828 {
829 "nsInstanceId": db_nslcmop["nsInstanceId"],
830 "lcmOperationType": "instantiate",
831 },
832 )[-1]
833 ns_params = db_nslcmop_instantiate.get("operationParams")
834 else:
835 ns_params = db_nslcmop.get("operationParams")
836 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
837 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
838
839 cp2target = {}
840 for vld_index, vld in enumerate(db_nsr.get("vld")):
841 target_vim = "vim:{}".format(ns_params["vimAccountId"])
842 target_vld = {
843 "id": vld["id"],
844 "name": vld["name"],
845 "mgmt-network": vld.get("mgmt-network", False),
846 "type": vld.get("type"),
847 "vim_info": {
848 target_vim: {
849 "vim_network_name": vld.get("vim-network-name"),
850 "vim_account_id": ns_params["vimAccountId"],
851 }
852 },
853 }
854 # check if this network needs SDN assist
855 if vld.get("pci-interfaces"):
856 db_vim = get_vim_account(ns_params["vimAccountId"])
857 if vim_config := db_vim.get("config"):
858 if sdnc_id := vim_config.get("sdn-controller"):
859 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
860 target_sdn = "sdn:{}".format(sdnc_id)
861 target_vld["vim_info"][target_sdn] = {
862 "sdn": True,
863 "target_vim": target_vim,
864 "vlds": [sdn_vld],
865 "type": vld.get("type"),
866 }
867
868 nsd_vnf_profiles = get_vnf_profiles(nsd)
869 for nsd_vnf_profile in nsd_vnf_profiles:
870 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
871 if cp["virtual-link-profile-id"] == vld["id"]:
872 cp2target[
873 "member_vnf:{}.{}".format(
874 cp["constituent-cpd-id"][0][
875 "constituent-base-element-id"
876 ],
877 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
878 )
879 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
880
881 # check at nsd descriptor, if there is an ip-profile
882 vld_params = {}
883 nsd_vlp = find_in_list(
884 get_virtual_link_profiles(nsd),
885 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
886 == vld["id"],
887 )
888 if (
889 nsd_vlp
890 and nsd_vlp.get("virtual-link-protocol-data")
891 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
892 ):
893 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
894 "l3-protocol-data"
895 ]
896
897 # update vld_params with instantiation params
898 vld_instantiation_params = find_in_list(
899 get_iterable(ns_params, "vld"),
900 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
901 )
902 if vld_instantiation_params:
903 vld_params.update(vld_instantiation_params)
904 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
905 target["ns"]["vld"].append(target_vld)
906 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
907 update_ns_vld_target(target, ns_params)
908
909 for vnfr in db_vnfrs.values():
910 vnfd = find_in_list(
911 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
912 )
913 vnf_params = find_in_list(
914 get_iterable(ns_params, "vnf"),
915 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
916 )
917 target_vnf = deepcopy(vnfr)
918 target_vim = "vim:{}".format(vnfr["vim-account-id"])
919 for vld in target_vnf.get("vld", ()):
920 # check if connected to a ns.vld, to fill target'
921 vnf_cp = find_in_list(
922 vnfd.get("int-virtual-link-desc", ()),
923 lambda cpd: cpd.get("id") == vld["id"],
924 )
925 if vnf_cp:
926 ns_cp = "member_vnf:{}.{}".format(
927 vnfr["member-vnf-index-ref"], vnf_cp["id"]
928 )
929 if cp2target.get(ns_cp):
930 vld["target"] = cp2target[ns_cp]
931
932 vld["vim_info"] = {
933 target_vim: {"vim_network_name": vld.get("vim-network-name")}
934 }
935 # check if this network needs SDN assist
936 target_sdn = None
937 if vld.get("pci-interfaces"):
938 db_vim = get_vim_account(vnfr["vim-account-id"])
939 sdnc_id = db_vim["config"].get("sdn-controller")
940 if sdnc_id:
941 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
942 target_sdn = "sdn:{}".format(sdnc_id)
943 vld["vim_info"][target_sdn] = {
944 "sdn": True,
945 "target_vim": target_vim,
946 "vlds": [sdn_vld],
947 "type": vld.get("type"),
948 }
949
950 # check at vnfd descriptor, if there is an ip-profile
951 vld_params = {}
952 vnfd_vlp = find_in_list(
953 get_virtual_link_profiles(vnfd),
954 lambda a_link_profile: a_link_profile["id"] == vld["id"],
955 )
956 if (
957 vnfd_vlp
958 and vnfd_vlp.get("virtual-link-protocol-data")
959 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
960 ):
961 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
962 "l3-protocol-data"
963 ]
964 # update vld_params with instantiation params
965 if vnf_params:
966 vld_instantiation_params = find_in_list(
967 get_iterable(vnf_params, "internal-vld"),
968 lambda i_vld: i_vld["name"] == vld["id"],
969 )
970 if vld_instantiation_params:
971 vld_params.update(vld_instantiation_params)
972 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
973
974 vdur_list = []
975 for vdur in target_vnf.get("vdur", ()):
976 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
977 continue # This vdu must not be created
978 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
979
980 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
981
982 if ssh_keys_all:
983 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
984 vnf_configuration = get_configuration(vnfd, vnfd["id"])
985 if (
986 vdu_configuration
987 and vdu_configuration.get("config-access")
988 and vdu_configuration.get("config-access").get("ssh-access")
989 ):
990 vdur["ssh-keys"] = ssh_keys_all
991 vdur["ssh-access-required"] = vdu_configuration[
992 "config-access"
993 ]["ssh-access"]["required"]
994 elif (
995 vnf_configuration
996 and vnf_configuration.get("config-access")
997 and vnf_configuration.get("config-access").get("ssh-access")
998 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
999 ):
1000 vdur["ssh-keys"] = ssh_keys_all
1001 vdur["ssh-access-required"] = vnf_configuration[
1002 "config-access"
1003 ]["ssh-access"]["required"]
1004 elif ssh_keys_instantiation and find_in_list(
1005 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1006 ):
1007 vdur["ssh-keys"] = ssh_keys_instantiation
1008
1009 self.logger.debug("NS > vdur > {}".format(vdur))
1010
1011 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1012 # cloud-init
1013 if vdud.get("cloud-init-file"):
1014 vdur["cloud-init"] = "{}:file:{}".format(
1015 vnfd["_id"], vdud.get("cloud-init-file")
1016 )
1017 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1018 if vdur["cloud-init"] not in target["cloud_init_content"]:
1019 base_folder = vnfd["_admin"]["storage"]
1020 if base_folder["pkg-dir"]:
1021 cloud_init_file = "{}/{}/cloud_init/{}".format(
1022 base_folder["folder"],
1023 base_folder["pkg-dir"],
1024 vdud.get("cloud-init-file"),
1025 )
1026 else:
1027 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1028 base_folder["folder"],
1029 vdud.get("cloud-init-file"),
1030 )
1031 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1032 target["cloud_init_content"][
1033 vdur["cloud-init"]
1034 ] = ci_file.read()
1035 elif vdud.get("cloud-init"):
1036 vdur["cloud-init"] = "{}:vdu:{}".format(
1037 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1038 )
1039 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1040 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1041 "cloud-init"
1042 ]
1043 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1044 deploy_params_vdu = self._format_additional_params(
1045 vdur.get("additionalParams") or {}
1046 )
1047 deploy_params_vdu["OSM"] = get_osm_params(
1048 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1049 )
1050 vdur["additionalParams"] = deploy_params_vdu
1051
1052 # flavor
1053 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1054 if target_vim not in ns_flavor["vim_info"]:
1055 ns_flavor["vim_info"][target_vim] = {}
1056
1057 # deal with images
1058 # in case alternative images are provided we must check if they should be applied
1059 # for the vim_type, modify the vim_type taking into account
1060 ns_image_id = int(vdur["ns-image-id"])
1061 if vdur.get("alt-image-ids"):
1062 db_vim = get_vim_account(vnfr["vim-account-id"])
1063 vim_type = db_vim["vim_type"]
1064 for alt_image_id in vdur.get("alt-image-ids"):
1065 ns_alt_image = target["image"][int(alt_image_id)]
1066 if vim_type == ns_alt_image.get("vim-type"):
1067 # must use alternative image
1068 self.logger.debug(
1069 "use alternative image id: {}".format(alt_image_id)
1070 )
1071 ns_image_id = alt_image_id
1072 vdur["ns-image-id"] = ns_image_id
1073 break
1074 ns_image = target["image"][int(ns_image_id)]
1075 if target_vim not in ns_image["vim_info"]:
1076 ns_image["vim_info"][target_vim] = {}
1077
1078 # Affinity groups
1079 if vdur.get("affinity-or-anti-affinity-group-id"):
1080 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1081 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1082 if target_vim not in ns_ags["vim_info"]:
1083 ns_ags["vim_info"][target_vim] = {}
1084
1085 # shared-volumes
1086 if vdur.get("shared-volumes-id"):
1087 for sv_id in vdur["shared-volumes-id"]:
1088 ns_sv = find_in_list(
1089 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1090 )
1091 if ns_sv:
1092 ns_sv["vim_info"][target_vim] = {}
1093
1094 vdur["vim_info"] = {target_vim: {}}
1095 # instantiation parameters
1096 if vnf_params:
1097 vdu_instantiation_params = find_in_list(
1098 get_iterable(vnf_params, "vdu"),
1099 lambda i_vdu: i_vdu["id"] == vdud["id"],
1100 )
1101 if vdu_instantiation_params:
1102 # Parse the vdu_volumes from the instantiation params
1103 vdu_volumes = get_volumes_from_instantiation_params(
1104 vdu_instantiation_params, vdud
1105 )
1106 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1107 vdur["additionalParams"]["OSM"][
1108 "vim_flavor_id"
1109 ] = vdu_instantiation_params.get("vim-flavor-id")
1110 vdur_list.append(vdur)
1111 target_vnf["vdur"] = vdur_list
1112 target["vnf"].append(target_vnf)
1113
1114 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1115 desc = await self.RO.deploy(nsr_id, target)
1116 self.logger.debug("RO return > {}".format(desc))
1117 action_id = desc["action_id"]
1118 await self._wait_ng_ro(
1119 nsr_id,
1120 action_id,
1121 nslcmop_id,
1122 start_deploy,
1123 timeout_ns_deploy,
1124 stage,
1125 operation="instantiation",
1126 )
1127
1128 # Updating NSR
1129 db_nsr_update = {
1130 "_admin.deployed.RO.operational-status": "running",
1131 "detailed-status": " ".join(stage),
1132 }
1133 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1134 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1135 self._write_op_status(nslcmop_id, stage)
1136 self.logger.debug(
1137 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1138 )
1139 return
1140
1141 async def _wait_ng_ro(
1142 self,
1143 nsr_id,
1144 action_id,
1145 nslcmop_id=None,
1146 start_time=None,
1147 timeout=600,
1148 stage=None,
1149 operation=None,
1150 ):
1151 detailed_status_old = None
1152 db_nsr_update = {}
1153 start_time = start_time or time()
1154 while time() <= start_time + timeout:
1155 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1156 self.logger.debug("Wait NG RO > {}".format(desc_status))
1157 if desc_status["status"] == "FAILED":
1158 raise NgRoException(desc_status["details"])
1159 elif desc_status["status"] == "BUILD":
1160 if stage:
1161 stage[2] = "VIM: ({})".format(desc_status["details"])
1162 elif desc_status["status"] == "DONE":
1163 if stage:
1164 stage[2] = "Deployed at VIM"
1165 break
1166 else:
1167 assert False, "ROclient.check_ns_status returns unknown {}".format(
1168 desc_status["status"]
1169 )
1170 if stage and nslcmop_id and stage[2] != detailed_status_old:
1171 detailed_status_old = stage[2]
1172 db_nsr_update["detailed-status"] = " ".join(stage)
1173 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1174 self._write_op_status(nslcmop_id, stage)
1175 await asyncio.sleep(15)
1176 else: # timeout_ns_deploy
1177 raise NgRoException("Timeout waiting ns to deploy")
1178
1179 async def _terminate_ng_ro(
1180 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1181 ):
1182 db_nsr_update = {}
1183 failed_detail = []
1184 action_id = None
1185 start_deploy = time()
1186 try:
1187 target = {
1188 "ns": {"vld": []},
1189 "vnf": [],
1190 "image": [],
1191 "flavor": [],
1192 "action_id": nslcmop_id,
1193 }
1194 desc = await self.RO.deploy(nsr_id, target)
1195 action_id = desc["action_id"]
1196 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1197 self.logger.debug(
1198 logging_text
1199 + "ns terminate action at RO. action_id={}".format(action_id)
1200 )
1201
1202 # wait until done
1203 delete_timeout = 20 * 60 # 20 minutes
1204 await self._wait_ng_ro(
1205 nsr_id,
1206 action_id,
1207 nslcmop_id,
1208 start_deploy,
1209 delete_timeout,
1210 stage,
1211 operation="termination",
1212 )
1213 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1214 # delete all nsr
1215 await self.RO.delete(nsr_id)
1216 except NgRoException as e:
1217 if e.http_code == 404: # not found
1218 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1219 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1220 self.logger.debug(
1221 logging_text + "RO_action_id={} already deleted".format(action_id)
1222 )
1223 elif e.http_code == 409: # conflict
1224 failed_detail.append("delete conflict: {}".format(e))
1225 self.logger.debug(
1226 logging_text
1227 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1228 )
1229 else:
1230 failed_detail.append("delete error: {}".format(e))
1231 self.logger.error(
1232 logging_text
1233 + "RO_action_id={} delete error: {}".format(action_id, e)
1234 )
1235 except Exception as e:
1236 failed_detail.append("delete error: {}".format(e))
1237 self.logger.error(
1238 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1239 )
1240
1241 if failed_detail:
1242 stage[2] = "Error deleting from VIM"
1243 else:
1244 stage[2] = "Deleted from VIM"
1245 db_nsr_update["detailed-status"] = " ".join(stage)
1246 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1247 self._write_op_status(nslcmop_id, stage)
1248
1249 if failed_detail:
1250 raise LcmException("; ".join(failed_detail))
1251 return
1252
1253 async def instantiate_RO(
1254 self,
1255 logging_text,
1256 nsr_id,
1257 nsd,
1258 db_nsr,
1259 db_nslcmop,
1260 db_vnfrs,
1261 db_vnfds,
1262 n2vc_key_list,
1263 stage,
1264 ):
1265 """
1266 Instantiate at RO
1267 :param logging_text: preffix text to use at logging
1268 :param nsr_id: nsr identity
1269 :param nsd: database content of ns descriptor
1270 :param db_nsr: database content of ns record
1271 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1272 :param db_vnfrs:
1273 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1274 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1275 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1276 :return: None or exception
1277 """
1278 try:
1279 start_deploy = time()
1280 ns_params = db_nslcmop.get("operationParams")
1281 if ns_params and ns_params.get("timeout_ns_deploy"):
1282 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1283 else:
1284 timeout_ns_deploy = self.timeout.ns_deploy
1285
1286 # Check for and optionally request placement optimization. Database will be updated if placement activated
1287 stage[2] = "Waiting for Placement."
1288 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1289 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1290 for vnfr in db_vnfrs.values():
1291 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1292 break
1293 else:
1294 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1295
1296 return await self._instantiate_ng_ro(
1297 logging_text,
1298 nsr_id,
1299 nsd,
1300 db_nsr,
1301 db_nslcmop,
1302 db_vnfrs,
1303 db_vnfds,
1304 n2vc_key_list,
1305 stage,
1306 start_deploy,
1307 timeout_ns_deploy,
1308 )
1309 except Exception as e:
1310 stage[2] = "ERROR deploying at VIM"
1311 self.set_vnfr_at_error(db_vnfrs, str(e))
1312 self.logger.error(
1313 "Error deploying at VIM {}".format(e),
1314 exc_info=not isinstance(
1315 e,
1316 (
1317 ROclient.ROClientException,
1318 LcmException,
1319 DbException,
1320 NgRoException,
1321 ),
1322 ),
1323 )
1324 raise
1325
1326 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1327 """
1328 Wait for kdu to be up, get ip address
1329 :param logging_text: prefix use for logging
1330 :param nsr_id:
1331 :param vnfr_id:
1332 :param kdu_name:
1333 :return: IP address, K8s services
1334 """
1335
1336 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1337 nb_tries = 0
1338
1339 while nb_tries < 360:
1340 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1341 kdur = next(
1342 (
1343 x
1344 for x in get_iterable(db_vnfr, "kdur")
1345 if x.get("kdu-name") == kdu_name
1346 ),
1347 None,
1348 )
1349 if not kdur:
1350 raise LcmException(
1351 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1352 )
1353 if kdur.get("status"):
1354 if kdur["status"] in ("READY", "ENABLED"):
1355 return kdur.get("ip-address"), kdur.get("services")
1356 else:
1357 raise LcmException(
1358 "target KDU={} is in error state".format(kdu_name)
1359 )
1360
1361 await asyncio.sleep(10)
1362 nb_tries += 1
1363 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1364
1365 async def wait_vm_up_insert_key_ro(
1366 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1367 ):
1368 """
1369 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1370 :param logging_text: prefix use for logging
1371 :param nsr_id:
1372 :param vnfr_id:
1373 :param vdu_id:
1374 :param vdu_index:
1375 :param pub_key: public ssh key to inject, None to skip
1376 :param user: user to apply the public ssh key
1377 :return: IP address
1378 """
1379
1380 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1381 ip_address = None
1382 target_vdu_id = None
1383 ro_retries = 0
1384
1385 while True:
1386 ro_retries += 1
1387 if ro_retries >= 360: # 1 hour
1388 raise LcmException(
1389 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1390 )
1391
1392 await asyncio.sleep(10)
1393
1394 # get ip address
1395 if not target_vdu_id:
1396 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1397
1398 if not vdu_id: # for the VNF case
1399 if db_vnfr.get("status") == "ERROR":
1400 raise LcmException(
1401 "Cannot inject ssh-key because target VNF is in error state"
1402 )
1403 ip_address = db_vnfr.get("ip-address")
1404 if not ip_address:
1405 continue
1406 vdur = next(
1407 (
1408 x
1409 for x in get_iterable(db_vnfr, "vdur")
1410 if x.get("ip-address") == ip_address
1411 ),
1412 None,
1413 )
1414 else: # VDU case
1415 vdur = next(
1416 (
1417 x
1418 for x in get_iterable(db_vnfr, "vdur")
1419 if x.get("vdu-id-ref") == vdu_id
1420 and x.get("count-index") == vdu_index
1421 ),
1422 None,
1423 )
1424
1425 if (
1426 not vdur and len(db_vnfr.get("vdur", ())) == 1
1427 ): # If only one, this should be the target vdu
1428 vdur = db_vnfr["vdur"][0]
1429 if not vdur:
1430 raise LcmException(
1431 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1432 vnfr_id, vdu_id, vdu_index
1433 )
1434 )
1435 # New generation RO stores information at "vim_info"
1436 ng_ro_status = None
1437 target_vim = None
1438 if vdur.get("vim_info"):
1439 target_vim = next(
1440 t for t in vdur["vim_info"]
1441 ) # there should be only one key
1442 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1443 if (
1444 vdur.get("pdu-type")
1445 or vdur.get("status") == "ACTIVE"
1446 or ng_ro_status == "ACTIVE"
1447 ):
1448 ip_address = vdur.get("ip-address")
1449 if not ip_address:
1450 continue
1451 target_vdu_id = vdur["vdu-id-ref"]
1452 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1453 raise LcmException(
1454 "Cannot inject ssh-key because target VM is in error state"
1455 )
1456
1457 if not target_vdu_id:
1458 continue
1459
1460 # inject public key into machine
1461 if pub_key and user:
1462 self.logger.debug(logging_text + "Inserting RO key")
1463 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1464 if vdur.get("pdu-type"):
1465 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1466 return ip_address
1467 try:
1468 target = {
1469 "action": {
1470 "action": "inject_ssh_key",
1471 "key": pub_key,
1472 "user": user,
1473 },
1474 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1475 }
1476 desc = await self.RO.deploy(nsr_id, target)
1477 action_id = desc["action_id"]
1478 await self._wait_ng_ro(
1479 nsr_id, action_id, timeout=600, operation="instantiation"
1480 )
1481 break
1482 except NgRoException as e:
1483 raise LcmException(
1484 "Reaching max tries injecting key. Error: {}".format(e)
1485 )
1486 else:
1487 break
1488
1489 return ip_address
1490
1491 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1492 """
1493 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1494 """
1495 my_vca = vca_deployed_list[vca_index]
1496 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1497 # vdu or kdu: no dependencies
1498 return
1499 timeout = 300
1500 while timeout >= 0:
1501 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1502 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1503 configuration_status_list = db_nsr["configurationStatus"]
1504 for index, vca_deployed in enumerate(configuration_status_list):
1505 if index == vca_index:
1506 # myself
1507 continue
1508 if not my_vca.get("member-vnf-index") or (
1509 vca_deployed.get("member-vnf-index")
1510 == my_vca.get("member-vnf-index")
1511 ):
1512 internal_status = configuration_status_list[index].get("status")
1513 if internal_status == "READY":
1514 continue
1515 elif internal_status == "BROKEN":
1516 raise LcmException(
1517 "Configuration aborted because dependent charm/s has failed"
1518 )
1519 else:
1520 break
1521 else:
1522 # no dependencies, return
1523 return
1524 await asyncio.sleep(10)
1525 timeout -= 1
1526
1527 raise LcmException("Configuration aborted because dependent charm/s timeout")
1528
1529 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1530 vca_id = None
1531 if db_vnfr:
1532 vca_id = deep_get(db_vnfr, ("vca-id",))
1533 elif db_nsr:
1534 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1535 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1536 return vca_id
1537
1538 async def instantiate_N2VC(
1539 self,
1540 logging_text,
1541 vca_index,
1542 nsi_id,
1543 db_nsr,
1544 db_vnfr,
1545 vdu_id,
1546 kdu_name,
1547 vdu_index,
1548 kdu_index,
1549 config_descriptor,
1550 deploy_params,
1551 base_folder,
1552 nslcmop_id,
1553 stage,
1554 vca_type,
1555 vca_name,
1556 ee_config_descriptor,
1557 ):
1558 nsr_id = db_nsr["_id"]
1559 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1560 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1561 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1562 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1563 db_dict = {
1564 "collection": "nsrs",
1565 "filter": {"_id": nsr_id},
1566 "path": db_update_entry,
1567 }
1568 step = ""
1569 try:
1570 element_type = "NS"
1571 element_under_configuration = nsr_id
1572
1573 vnfr_id = None
1574 if db_vnfr:
1575 vnfr_id = db_vnfr["_id"]
1576 osm_config["osm"]["vnf_id"] = vnfr_id
1577
1578 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1579
1580 if vca_type == "native_charm":
1581 index_number = 0
1582 else:
1583 index_number = vdu_index or 0
1584
1585 if vnfr_id:
1586 element_type = "VNF"
1587 element_under_configuration = vnfr_id
1588 namespace += ".{}-{}".format(vnfr_id, index_number)
1589 if vdu_id:
1590 namespace += ".{}-{}".format(vdu_id, index_number)
1591 element_type = "VDU"
1592 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1593 osm_config["osm"]["vdu_id"] = vdu_id
1594 elif kdu_name:
1595 namespace += ".{}".format(kdu_name)
1596 element_type = "KDU"
1597 element_under_configuration = kdu_name
1598 osm_config["osm"]["kdu_name"] = kdu_name
1599
1600 # Get artifact path
1601 if base_folder["pkg-dir"]:
1602 artifact_path = "{}/{}/{}/{}".format(
1603 base_folder["folder"],
1604 base_folder["pkg-dir"],
1605 "charms"
1606 if vca_type
1607 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1608 else "helm-charts",
1609 vca_name,
1610 )
1611 else:
1612 artifact_path = "{}/Scripts/{}/{}/".format(
1613 base_folder["folder"],
1614 "charms"
1615 if vca_type
1616 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1617 else "helm-charts",
1618 vca_name,
1619 )
1620
1621 self.logger.debug("Artifact path > {}".format(artifact_path))
1622
1623 # get initial_config_primitive_list that applies to this element
1624 initial_config_primitive_list = config_descriptor.get(
1625 "initial-config-primitive"
1626 )
1627
1628 self.logger.debug(
1629 "Initial config primitive list > {}".format(
1630 initial_config_primitive_list
1631 )
1632 )
1633
1634 # add config if not present for NS charm
1635 ee_descriptor_id = ee_config_descriptor.get("id")
1636 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1637 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1638 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1639 )
1640
1641 self.logger.debug(
1642 "Initial config primitive list #2 > {}".format(
1643 initial_config_primitive_list
1644 )
1645 )
1646 # n2vc_redesign STEP 3.1
1647 # find old ee_id if exists
1648 ee_id = vca_deployed.get("ee_id")
1649
1650 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1651 # create or register execution environment in VCA
1652 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1653 self._write_configuration_status(
1654 nsr_id=nsr_id,
1655 vca_index=vca_index,
1656 status="CREATING",
1657 element_under_configuration=element_under_configuration,
1658 element_type=element_type,
1659 )
1660
1661 step = "create execution environment"
1662 self.logger.debug(logging_text + step)
1663
1664 ee_id = None
1665 credentials = None
1666 if vca_type == "k8s_proxy_charm":
1667 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1668 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1669 namespace=namespace,
1670 artifact_path=artifact_path,
1671 db_dict=db_dict,
1672 vca_id=vca_id,
1673 )
1674 elif vca_type == "helm-v3":
1675 ee_id, credentials = await self.vca_map[
1676 vca_type
1677 ].create_execution_environment(
1678 namespace=nsr_id,
1679 reuse_ee_id=ee_id,
1680 db_dict=db_dict,
1681 config=osm_config,
1682 artifact_path=artifact_path,
1683 chart_model=vca_name,
1684 vca_type=vca_type,
1685 )
1686 else:
1687 ee_id, credentials = await self.vca_map[
1688 vca_type
1689 ].create_execution_environment(
1690 namespace=namespace,
1691 reuse_ee_id=ee_id,
1692 db_dict=db_dict,
1693 vca_id=vca_id,
1694 )
1695
1696 elif vca_type == "native_charm":
1697 step = "Waiting to VM being up and getting IP address"
1698 self.logger.debug(logging_text + step)
1699 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1700 logging_text,
1701 nsr_id,
1702 vnfr_id,
1703 vdu_id,
1704 vdu_index,
1705 user=None,
1706 pub_key=None,
1707 )
1708 credentials = {"hostname": rw_mgmt_ip}
1709 # get username
1710 username = deep_get(
1711 config_descriptor, ("config-access", "ssh-access", "default-user")
1712 )
1713 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1714 # merged. Meanwhile let's get username from initial-config-primitive
1715 if not username and initial_config_primitive_list:
1716 for config_primitive in initial_config_primitive_list:
1717 for param in config_primitive.get("parameter", ()):
1718 if param["name"] == "ssh-username":
1719 username = param["value"]
1720 break
1721 if not username:
1722 raise LcmException(
1723 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1724 "'config-access.ssh-access.default-user'"
1725 )
1726 credentials["username"] = username
1727 # n2vc_redesign STEP 3.2
1728
1729 self._write_configuration_status(
1730 nsr_id=nsr_id,
1731 vca_index=vca_index,
1732 status="REGISTERING",
1733 element_under_configuration=element_under_configuration,
1734 element_type=element_type,
1735 )
1736
1737 step = "register execution environment {}".format(credentials)
1738 self.logger.debug(logging_text + step)
1739 ee_id = await self.vca_map[vca_type].register_execution_environment(
1740 credentials=credentials,
1741 namespace=namespace,
1742 db_dict=db_dict,
1743 vca_id=vca_id,
1744 )
1745
1746 # for compatibility with MON/POL modules, the need model and application name at database
1747 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1748 ee_id_parts = ee_id.split(".")
1749 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1750 if len(ee_id_parts) >= 2:
1751 model_name = ee_id_parts[0]
1752 application_name = ee_id_parts[1]
1753 db_nsr_update[db_update_entry + "model"] = model_name
1754 db_nsr_update[db_update_entry + "application"] = application_name
1755
1756 # n2vc_redesign STEP 3.3
1757 step = "Install configuration Software"
1758
1759 self._write_configuration_status(
1760 nsr_id=nsr_id,
1761 vca_index=vca_index,
1762 status="INSTALLING SW",
1763 element_under_configuration=element_under_configuration,
1764 element_type=element_type,
1765 other_update=db_nsr_update,
1766 )
1767
1768 # TODO check if already done
1769 self.logger.debug(logging_text + step)
1770 config = None
1771 if vca_type == "native_charm":
1772 config_primitive = next(
1773 (p for p in initial_config_primitive_list if p["name"] == "config"),
1774 None,
1775 )
1776 if config_primitive:
1777 config = self._map_primitive_params(
1778 config_primitive, {}, deploy_params
1779 )
1780 num_units = 1
1781 if vca_type == "lxc_proxy_charm":
1782 if element_type == "NS":
1783 num_units = db_nsr.get("config-units") or 1
1784 elif element_type == "VNF":
1785 num_units = db_vnfr.get("config-units") or 1
1786 elif element_type == "VDU":
1787 for v in db_vnfr["vdur"]:
1788 if vdu_id == v["vdu-id-ref"]:
1789 num_units = v.get("config-units") or 1
1790 break
1791 if vca_type != "k8s_proxy_charm":
1792 await self.vca_map[vca_type].install_configuration_sw(
1793 ee_id=ee_id,
1794 artifact_path=artifact_path,
1795 db_dict=db_dict,
1796 config=config,
1797 num_units=num_units,
1798 vca_id=vca_id,
1799 vca_type=vca_type,
1800 )
1801
1802 # write in db flag of configuration_sw already installed
1803 self.update_db_2(
1804 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1805 )
1806
1807 # add relations for this VCA (wait for other peers related with this VCA)
1808 is_relation_added = await self._add_vca_relations(
1809 logging_text=logging_text,
1810 nsr_id=nsr_id,
1811 vca_type=vca_type,
1812 vca_index=vca_index,
1813 )
1814
1815 if not is_relation_added:
1816 raise LcmException("Relations could not be added to VCA.")
1817
1818 # if SSH access is required, then get execution environment SSH public
1819 # if native charm we have waited already to VM be UP
1820 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1821 pub_key = None
1822 user = None
1823 # self.logger.debug("get ssh key block")
1824 if deep_get(
1825 config_descriptor, ("config-access", "ssh-access", "required")
1826 ):
1827 # self.logger.debug("ssh key needed")
1828 # Needed to inject a ssh key
1829 user = deep_get(
1830 config_descriptor,
1831 ("config-access", "ssh-access", "default-user"),
1832 )
1833 step = "Install configuration Software, getting public ssh key"
1834 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1835 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1836 )
1837
1838 step = "Insert public key into VM user={} ssh_key={}".format(
1839 user, pub_key
1840 )
1841 else:
1842 # self.logger.debug("no need to get ssh key")
1843 step = "Waiting to VM being up and getting IP address"
1844 self.logger.debug(logging_text + step)
1845
1846 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1847 rw_mgmt_ip = None
1848
1849 # n2vc_redesign STEP 5.1
1850 # wait for RO (ip-address) Insert pub_key into VM
1851 if vnfr_id:
1852 if kdu_name:
1853 rw_mgmt_ip, services = await self.wait_kdu_up(
1854 logging_text, nsr_id, vnfr_id, kdu_name
1855 )
1856 vnfd = self.db.get_one(
1857 "vnfds_revisions",
1858 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1859 )
1860 kdu = get_kdu(vnfd, kdu_name)
1861 kdu_services = [
1862 service["name"] for service in get_kdu_services(kdu)
1863 ]
1864 exposed_services = []
1865 for service in services:
1866 if any(s in service["name"] for s in kdu_services):
1867 exposed_services.append(service)
1868 await self.vca_map[vca_type].exec_primitive(
1869 ee_id=ee_id,
1870 primitive_name="config",
1871 params_dict={
1872 "osm-config": json.dumps(
1873 OsmConfigBuilder(
1874 k8s={"services": exposed_services}
1875 ).build()
1876 )
1877 },
1878 vca_id=vca_id,
1879 )
1880
1881 # This verification is needed in order to avoid trying to add a public key
1882 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1883 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1884 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1885 # or it is a KNF)
1886 elif db_vnfr.get("vdur"):
1887 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1888 logging_text,
1889 nsr_id,
1890 vnfr_id,
1891 vdu_id,
1892 vdu_index,
1893 user=user,
1894 pub_key=pub_key,
1895 )
1896
1897 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1898
1899 # store rw_mgmt_ip in deploy params for later replacement
1900 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1901
1902 # n2vc_redesign STEP 6 Execute initial config primitive
1903 step = "execute initial config primitive"
1904
1905 # wait for dependent primitives execution (NS -> VNF -> VDU)
1906 if initial_config_primitive_list:
1907 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1908
1909 # stage, in function of element type: vdu, kdu, vnf or ns
1910 my_vca = vca_deployed_list[vca_index]
1911 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1912 # VDU or KDU
1913 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1914 elif my_vca.get("member-vnf-index"):
1915 # VNF
1916 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1917 else:
1918 # NS
1919 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1920
1921 self._write_configuration_status(
1922 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1923 )
1924
1925 self._write_op_status(op_id=nslcmop_id, stage=stage)
1926
1927 check_if_terminated_needed = True
1928 for initial_config_primitive in initial_config_primitive_list:
1929 # adding information on the vca_deployed if it is a NS execution environment
1930 if not vca_deployed["member-vnf-index"]:
1931 deploy_params["ns_config_info"] = json.dumps(
1932 self._get_ns_config_info(nsr_id)
1933 )
1934 # TODO check if already done
1935 primitive_params_ = self._map_primitive_params(
1936 initial_config_primitive, {}, deploy_params
1937 )
1938
1939 step = "execute primitive '{}' params '{}'".format(
1940 initial_config_primitive["name"], primitive_params_
1941 )
1942 self.logger.debug(logging_text + step)
1943 await self.vca_map[vca_type].exec_primitive(
1944 ee_id=ee_id,
1945 primitive_name=initial_config_primitive["name"],
1946 params_dict=primitive_params_,
1947 db_dict=db_dict,
1948 vca_id=vca_id,
1949 vca_type=vca_type,
1950 )
1951 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1952 if check_if_terminated_needed:
1953 if config_descriptor.get("terminate-config-primitive"):
1954 self.update_db_2(
1955 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1956 )
1957 check_if_terminated_needed = False
1958
1959 # TODO register in database that primitive is done
1960
1961 # STEP 7 Configure metrics
1962 if vca_type == "helm-v3":
1963 # TODO: review for those cases where the helm chart is a reference and
1964 # is not part of the NF package
1965 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1966 ee_id=ee_id,
1967 artifact_path=artifact_path,
1968 ee_config_descriptor=ee_config_descriptor,
1969 vnfr_id=vnfr_id,
1970 nsr_id=nsr_id,
1971 target_ip=rw_mgmt_ip,
1972 element_type=element_type,
1973 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
1974 vdu_id=vdu_id,
1975 vdu_index=vdu_index,
1976 kdu_name=kdu_name,
1977 kdu_index=kdu_index,
1978 )
1979 if prometheus_jobs:
1980 self.update_db_2(
1981 "nsrs",
1982 nsr_id,
1983 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1984 )
1985
1986 for job in prometheus_jobs:
1987 self.db.set_one(
1988 "prometheus_jobs",
1989 {"job_name": job["job_name"]},
1990 job,
1991 upsert=True,
1992 fail_on_empty=False,
1993 )
1994
1995 step = "instantiated at VCA"
1996 self.logger.debug(logging_text + step)
1997
1998 self._write_configuration_status(
1999 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2000 )
2001
2002 except Exception as e: # TODO not use Exception but N2VC exception
2003 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2004 if not isinstance(
2005 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2006 ):
2007 self.logger.error(
2008 "Exception while {} : {}".format(step, e), exc_info=True
2009 )
2010 self._write_configuration_status(
2011 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2012 )
2013 raise LcmException("{}. {}".format(step, e)) from e
2014
2015 def _write_ns_status(
2016 self,
2017 nsr_id: str,
2018 ns_state: str,
2019 current_operation: str,
2020 current_operation_id: str,
2021 error_description: str = None,
2022 error_detail: str = None,
2023 other_update: dict = None,
2024 ):
2025 """
2026 Update db_nsr fields.
2027 :param nsr_id:
2028 :param ns_state:
2029 :param current_operation:
2030 :param current_operation_id:
2031 :param error_description:
2032 :param error_detail:
2033 :param other_update: Other required changes at database if provided, will be cleared
2034 :return:
2035 """
2036 try:
2037 db_dict = other_update or {}
2038 db_dict[
2039 "_admin.nslcmop"
2040 ] = current_operation_id # for backward compatibility
2041 db_dict["_admin.current-operation"] = current_operation_id
2042 db_dict["_admin.operation-type"] = (
2043 current_operation if current_operation != "IDLE" else None
2044 )
2045 db_dict["currentOperation"] = current_operation
2046 db_dict["currentOperationID"] = current_operation_id
2047 db_dict["errorDescription"] = error_description
2048 db_dict["errorDetail"] = error_detail
2049
2050 if ns_state:
2051 db_dict["nsState"] = ns_state
2052 self.update_db_2("nsrs", nsr_id, db_dict)
2053 except DbException as e:
2054 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2055
2056 def _write_op_status(
2057 self,
2058 op_id: str,
2059 stage: list = None,
2060 error_message: str = None,
2061 queuePosition: int = 0,
2062 operation_state: str = None,
2063 other_update: dict = None,
2064 ):
2065 try:
2066 db_dict = other_update or {}
2067 db_dict["queuePosition"] = queuePosition
2068 if isinstance(stage, list):
2069 db_dict["stage"] = stage[0]
2070 db_dict["detailed-status"] = " ".join(stage)
2071 elif stage is not None:
2072 db_dict["stage"] = str(stage)
2073
2074 if error_message is not None:
2075 db_dict["errorMessage"] = error_message
2076 if operation_state is not None:
2077 db_dict["operationState"] = operation_state
2078 db_dict["statusEnteredTime"] = time()
2079 self.update_db_2("nslcmops", op_id, db_dict)
2080 except DbException as e:
2081 self.logger.warn(
2082 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2083 )
2084
2085 def _write_all_config_status(self, db_nsr: dict, status: str):
2086 try:
2087 nsr_id = db_nsr["_id"]
2088 # configurationStatus
2089 config_status = db_nsr.get("configurationStatus")
2090 if config_status:
2091 db_nsr_update = {
2092 "configurationStatus.{}.status".format(index): status
2093 for index, v in enumerate(config_status)
2094 if v
2095 }
2096 # update status
2097 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2098
2099 except DbException as e:
2100 self.logger.warn(
2101 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2102 )
2103
2104 def _write_configuration_status(
2105 self,
2106 nsr_id: str,
2107 vca_index: int,
2108 status: str = None,
2109 element_under_configuration: str = None,
2110 element_type: str = None,
2111 other_update: dict = None,
2112 ):
2113 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2114 # .format(vca_index, status))
2115
2116 try:
2117 db_path = "configurationStatus.{}.".format(vca_index)
2118 db_dict = other_update or {}
2119 if status:
2120 db_dict[db_path + "status"] = status
2121 if element_under_configuration:
2122 db_dict[
2123 db_path + "elementUnderConfiguration"
2124 ] = element_under_configuration
2125 if element_type:
2126 db_dict[db_path + "elementType"] = element_type
2127 self.update_db_2("nsrs", nsr_id, db_dict)
2128 except DbException as e:
2129 self.logger.warn(
2130 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2131 status, nsr_id, vca_index, e
2132 )
2133 )
2134
2135 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2136 """
2137 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2138 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2139 Database is used because the result can be obtained from a different LCM worker in case of HA.
2140 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2141 :param db_nslcmop: database content of nslcmop
2142 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2143 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2144 computed 'vim-account-id'
2145 """
2146 modified = False
2147 nslcmop_id = db_nslcmop["_id"]
2148 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2149 if placement_engine == "PLA":
2150 self.logger.debug(
2151 logging_text + "Invoke and wait for placement optimization"
2152 )
2153 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2154 db_poll_interval = 5
2155 wait = db_poll_interval * 10
2156 pla_result = None
2157 while not pla_result and wait >= 0:
2158 await asyncio.sleep(db_poll_interval)
2159 wait -= db_poll_interval
2160 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2161 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2162
2163 if not pla_result:
2164 raise LcmException(
2165 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2166 )
2167
2168 for pla_vnf in pla_result["vnf"]:
2169 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2170 if not pla_vnf.get("vimAccountId") or not vnfr:
2171 continue
2172 modified = True
2173 self.db.set_one(
2174 "vnfrs",
2175 {"_id": vnfr["_id"]},
2176 {"vim-account-id": pla_vnf["vimAccountId"]},
2177 )
2178 # Modifies db_vnfrs
2179 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2180 return modified
2181
2182 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2183 alerts = []
2184 nsr_id = vnfr["nsr-id-ref"]
2185 df = vnfd.get("df", [{}])[0]
2186 # Checking for auto-healing configuration
2187 if "healing-aspect" in df:
2188 healing_aspects = df["healing-aspect"]
2189 for healing in healing_aspects:
2190 for healing_policy in healing.get("healing-policy", ()):
2191 vdu_id = healing_policy["vdu-id"]
2192 vdur = next(
2193 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2194 {},
2195 )
2196 if not vdur:
2197 continue
2198 metric_name = "vm_status"
2199 vdu_name = vdur.get("name")
2200 vnf_member_index = vnfr["member-vnf-index-ref"]
2201 uuid = str(uuid4())
2202 name = f"healing_{uuid}"
2203 action = healing_policy
2204 # action_on_recovery = healing.get("action-on-recovery")
2205 # cooldown_time = healing.get("cooldown-time")
2206 # day1 = healing.get("day1")
2207 alert = {
2208 "uuid": uuid,
2209 "name": name,
2210 "metric": metric_name,
2211 "tags": {
2212 "ns_id": nsr_id,
2213 "vnf_member_index": vnf_member_index,
2214 "vdu_name": vdu_name,
2215 },
2216 "alarm_status": "ok",
2217 "action_type": "healing",
2218 "action": action,
2219 }
2220 alerts.append(alert)
2221 return alerts
2222
2223 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2224 alerts = []
2225 nsr_id = vnfr["nsr-id-ref"]
2226 df = vnfd.get("df", [{}])[0]
2227 # Checking for auto-scaling configuration
2228 if "scaling-aspect" in df:
2229 scaling_aspects = df["scaling-aspect"]
2230 all_vnfd_monitoring_params = {}
2231 for ivld in vnfd.get("int-virtual-link-desc", ()):
2232 for mp in ivld.get("monitoring-parameters", ()):
2233 all_vnfd_monitoring_params[mp.get("id")] = mp
2234 for vdu in vnfd.get("vdu", ()):
2235 for mp in vdu.get("monitoring-parameter", ()):
2236 all_vnfd_monitoring_params[mp.get("id")] = mp
2237 for df in vnfd.get("df", ()):
2238 for mp in df.get("monitoring-parameter", ()):
2239 all_vnfd_monitoring_params[mp.get("id")] = mp
2240 for scaling_aspect in scaling_aspects:
2241 scaling_group_name = scaling_aspect.get("name", "")
2242 # Get monitored VDUs
2243 all_monitored_vdus = set()
2244 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2245 "deltas", ()
2246 ):
2247 for vdu_delta in delta.get("vdu-delta", ()):
2248 all_monitored_vdus.add(vdu_delta.get("id"))
2249 monitored_vdurs = list(
2250 filter(
2251 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2252 vnfr["vdur"],
2253 )
2254 )
2255 if not monitored_vdurs:
2256 self.logger.error(
2257 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2258 )
2259 continue
2260 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2261 if scaling_policy["scaling-type"] != "automatic":
2262 continue
2263 threshold_time = scaling_policy.get("threshold-time", "1")
2264 cooldown_time = scaling_policy.get("cooldown-time", "0")
2265 for scaling_criteria in scaling_policy["scaling-criteria"]:
2266 monitoring_param_ref = scaling_criteria.get(
2267 "vnf-monitoring-param-ref"
2268 )
2269 vnf_monitoring_param = all_vnfd_monitoring_params[
2270 monitoring_param_ref
2271 ]
2272 for vdur in monitored_vdurs:
2273 vdu_id = vdur["vdu-id-ref"]
2274 metric_name = vnf_monitoring_param.get("performance-metric")
2275 metric_name = f"osm_{metric_name}"
2276 vnf_member_index = vnfr["member-vnf-index-ref"]
2277 scalein_threshold = scaling_criteria.get(
2278 "scale-in-threshold"
2279 )
2280 scaleout_threshold = scaling_criteria.get(
2281 "scale-out-threshold"
2282 )
2283 # Looking for min/max-number-of-instances
2284 instances_min_number = 1
2285 instances_max_number = 1
2286 vdu_profile = df["vdu-profile"]
2287 if vdu_profile:
2288 profile = next(
2289 item for item in vdu_profile if item["id"] == vdu_id
2290 )
2291 instances_min_number = profile.get(
2292 "min-number-of-instances", 1
2293 )
2294 instances_max_number = profile.get(
2295 "max-number-of-instances", 1
2296 )
2297
2298 if scalein_threshold:
2299 uuid = str(uuid4())
2300 name = f"scalein_{uuid}"
2301 operation = scaling_criteria[
2302 "scale-in-relational-operation"
2303 ]
2304 rel_operator = self.rel_operation_types.get(
2305 operation, "<="
2306 )
2307 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2308 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2309 labels = {
2310 "ns_id": nsr_id,
2311 "vnf_member_index": vnf_member_index,
2312 "vdu_id": vdu_id,
2313 }
2314 prom_cfg = {
2315 "alert": name,
2316 "expr": expression,
2317 "for": str(threshold_time) + "m",
2318 "labels": labels,
2319 }
2320 action = scaling_policy
2321 action = {
2322 "scaling-group": scaling_group_name,
2323 "cooldown-time": cooldown_time,
2324 }
2325 alert = {
2326 "uuid": uuid,
2327 "name": name,
2328 "metric": metric_name,
2329 "tags": {
2330 "ns_id": nsr_id,
2331 "vnf_member_index": vnf_member_index,
2332 "vdu_id": vdu_id,
2333 },
2334 "alarm_status": "ok",
2335 "action_type": "scale_in",
2336 "action": action,
2337 "prometheus_config": prom_cfg,
2338 }
2339 alerts.append(alert)
2340
2341 if scaleout_threshold:
2342 uuid = str(uuid4())
2343 name = f"scaleout_{uuid}"
2344 operation = scaling_criteria[
2345 "scale-out-relational-operation"
2346 ]
2347 rel_operator = self.rel_operation_types.get(
2348 operation, "<="
2349 )
2350 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2351 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2352 labels = {
2353 "ns_id": nsr_id,
2354 "vnf_member_index": vnf_member_index,
2355 "vdu_id": vdu_id,
2356 }
2357 prom_cfg = {
2358 "alert": name,
2359 "expr": expression,
2360 "for": str(threshold_time) + "m",
2361 "labels": labels,
2362 }
2363 action = scaling_policy
2364 action = {
2365 "scaling-group": scaling_group_name,
2366 "cooldown-time": cooldown_time,
2367 }
2368 alert = {
2369 "uuid": uuid,
2370 "name": name,
2371 "metric": metric_name,
2372 "tags": {
2373 "ns_id": nsr_id,
2374 "vnf_member_index": vnf_member_index,
2375 "vdu_id": vdu_id,
2376 },
2377 "alarm_status": "ok",
2378 "action_type": "scale_out",
2379 "action": action,
2380 "prometheus_config": prom_cfg,
2381 }
2382 alerts.append(alert)
2383 return alerts
2384
2385 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2386 alerts = []
2387 nsr_id = vnfr["nsr-id-ref"]
2388 vnf_member_index = vnfr["member-vnf-index-ref"]
2389
2390 # Checking for VNF alarm configuration
2391 for vdur in vnfr["vdur"]:
2392 vdu_id = vdur["vdu-id-ref"]
2393 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2394 if "alarm" in vdu:
2395 # Get VDU monitoring params, since alerts are based on them
2396 vdu_monitoring_params = {}
2397 for mp in vdu.get("monitoring-parameter", []):
2398 vdu_monitoring_params[mp.get("id")] = mp
2399 if not vdu_monitoring_params:
2400 self.logger.error(
2401 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2402 )
2403 continue
2404 # Get alarms in the VDU
2405 alarm_descriptors = vdu["alarm"]
2406 # Create VDU alarms for each alarm in the VDU
2407 for alarm_descriptor in alarm_descriptors:
2408 # Check that the VDU alarm refers to a proper monitoring param
2409 alarm_monitoring_param = alarm_descriptor.get(
2410 "vnf-monitoring-param-ref", ""
2411 )
2412 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2413 alarm_monitoring_param, {}
2414 )
2415 if not vdu_specific_monitoring_param:
2416 self.logger.error(
2417 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2418 )
2419 continue
2420 metric_name = vdu_specific_monitoring_param.get(
2421 "performance-metric"
2422 )
2423 if not metric_name:
2424 self.logger.error(
2425 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2426 )
2427 continue
2428 # Set params of the alarm to be created in Prometheus
2429 metric_name = f"osm_{metric_name}"
2430 metric_threshold = alarm_descriptor.get("value")
2431 uuid = str(uuid4())
2432 alert_name = f"vdu_alarm_{uuid}"
2433 operation = alarm_descriptor["operation"]
2434 rel_operator = self.rel_operation_types.get(operation, "<=")
2435 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2436 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2437 labels = {
2438 "ns_id": nsr_id,
2439 "vnf_member_index": vnf_member_index,
2440 "vdu_id": vdu_id,
2441 "vdu_name": "{{ $labels.vdu_name }}",
2442 }
2443 prom_cfg = {
2444 "alert": alert_name,
2445 "expr": expression,
2446 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2447 "labels": labels,
2448 }
2449 alarm_action = dict()
2450 for action_type in ["ok", "insufficient-data", "alarm"]:
2451 if (
2452 "actions" in alarm_descriptor
2453 and action_type in alarm_descriptor["actions"]
2454 ):
2455 alarm_action[action_type] = alarm_descriptor["actions"][
2456 action_type
2457 ]
2458 alert = {
2459 "uuid": uuid,
2460 "name": alert_name,
2461 "metric": metric_name,
2462 "tags": {
2463 "ns_id": nsr_id,
2464 "vnf_member_index": vnf_member_index,
2465 "vdu_id": vdu_id,
2466 },
2467 "alarm_status": "ok",
2468 "action_type": "vdu_alarm",
2469 "action": alarm_action,
2470 "prometheus_config": prom_cfg,
2471 }
2472 alerts.append(alert)
2473 return alerts
2474
2475 def update_nsrs_with_pla_result(self, params):
2476 try:
2477 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2478 self.update_db_2(
2479 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2480 )
2481 except Exception as e:
2482 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2483
2484 async def instantiate(self, nsr_id, nslcmop_id):
2485 """
2486
2487 :param nsr_id: ns instance to deploy
2488 :param nslcmop_id: operation to run
2489 :return:
2490 """
2491
2492 # Try to lock HA task here
2493 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2494 if not task_is_locked_by_me:
2495 self.logger.debug(
2496 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2497 )
2498 return
2499
2500 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2501 self.logger.debug(logging_text + "Enter")
2502
2503 # get all needed from database
2504
2505 # database nsrs record
2506 db_nsr = None
2507
2508 # database nslcmops record
2509 db_nslcmop = None
2510
2511 # update operation on nsrs
2512 db_nsr_update = {}
2513 # update operation on nslcmops
2514 db_nslcmop_update = {}
2515
2516 timeout_ns_deploy = self.timeout.ns_deploy
2517
2518 nslcmop_operation_state = None
2519 db_vnfrs = {} # vnf's info indexed by member-index
2520 # n2vc_info = {}
2521 tasks_dict_info = {} # from task to info text
2522 exc = None
2523 error_list = []
2524 stage = [
2525 "Stage 1/5: preparation of the environment.",
2526 "Waiting for previous operations to terminate.",
2527 "",
2528 ]
2529 # ^ stage, step, VIM progress
2530 try:
2531 # wait for any previous tasks in process
2532 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2533
2534 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2535 stage[1] = "Reading from database."
2536 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2537 db_nsr_update["detailed-status"] = "creating"
2538 db_nsr_update["operational-status"] = "init"
2539 self._write_ns_status(
2540 nsr_id=nsr_id,
2541 ns_state="BUILDING",
2542 current_operation="INSTANTIATING",
2543 current_operation_id=nslcmop_id,
2544 other_update=db_nsr_update,
2545 )
2546 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2547
2548 # read from db: operation
2549 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2550 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2551 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2552 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2553 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2554 )
2555 ns_params = db_nslcmop.get("operationParams")
2556 if ns_params and ns_params.get("timeout_ns_deploy"):
2557 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2558
2559 # read from db: ns
2560 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2561 self.logger.debug(logging_text + stage[1])
2562 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2563 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2564 self.logger.debug(logging_text + stage[1])
2565 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2566 self.fs.sync(db_nsr["nsd-id"])
2567 db_nsr["nsd"] = nsd
2568 # nsr_name = db_nsr["name"] # TODO short-name??
2569
2570 # read from db: vnf's of this ns
2571 stage[1] = "Getting vnfrs from db."
2572 self.logger.debug(logging_text + stage[1])
2573 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2574
2575 # read from db: vnfd's for every vnf
2576 db_vnfds = [] # every vnfd data
2577
2578 # for each vnf in ns, read vnfd
2579 for vnfr in db_vnfrs_list:
2580 if vnfr.get("kdur"):
2581 kdur_list = []
2582 for kdur in vnfr["kdur"]:
2583 if kdur.get("additionalParams"):
2584 kdur["additionalParams"] = json.loads(
2585 kdur["additionalParams"]
2586 )
2587 kdur_list.append(kdur)
2588 vnfr["kdur"] = kdur_list
2589
2590 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2591 vnfd_id = vnfr["vnfd-id"]
2592 vnfd_ref = vnfr["vnfd-ref"]
2593 self.fs.sync(vnfd_id)
2594
2595 # if we haven't this vnfd, read it from db
2596 if vnfd_id not in db_vnfds:
2597 # read from db
2598 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2599 vnfd_id, vnfd_ref
2600 )
2601 self.logger.debug(logging_text + stage[1])
2602 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2603
2604 # store vnfd
2605 db_vnfds.append(vnfd)
2606
2607 # Get or generates the _admin.deployed.VCA list
2608 vca_deployed_list = None
2609 if db_nsr["_admin"].get("deployed"):
2610 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2611 if vca_deployed_list is None:
2612 vca_deployed_list = []
2613 configuration_status_list = []
2614 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2615 db_nsr_update["configurationStatus"] = configuration_status_list
2616 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2617 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2618 elif isinstance(vca_deployed_list, dict):
2619 # maintain backward compatibility. Change a dict to list at database
2620 vca_deployed_list = list(vca_deployed_list.values())
2621 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2622 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2623
2624 if not isinstance(
2625 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2626 ):
2627 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2628 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2629
2630 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2631 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2632 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2633 self.db.set_list(
2634 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2635 )
2636
2637 # n2vc_redesign STEP 2 Deploy Network Scenario
2638 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2639 self._write_op_status(op_id=nslcmop_id, stage=stage)
2640
2641 stage[1] = "Deploying KDUs."
2642 # self.logger.debug(logging_text + "Before deploy_kdus")
2643 # Call to deploy_kdus in case exists the "vdu:kdu" param
2644 await self.deploy_kdus(
2645 logging_text=logging_text,
2646 nsr_id=nsr_id,
2647 nslcmop_id=nslcmop_id,
2648 db_vnfrs=db_vnfrs,
2649 db_vnfds=db_vnfds,
2650 task_instantiation_info=tasks_dict_info,
2651 )
2652
2653 stage[1] = "Getting VCA public key."
2654 # n2vc_redesign STEP 1 Get VCA public ssh-key
2655 # feature 1429. Add n2vc public key to needed VMs
2656 n2vc_key = self.n2vc.get_public_key()
2657 n2vc_key_list = [n2vc_key]
2658 if self.vca_config.public_key:
2659 n2vc_key_list.append(self.vca_config.public_key)
2660
2661 stage[1] = "Deploying NS at VIM."
2662 task_ro = asyncio.ensure_future(
2663 self.instantiate_RO(
2664 logging_text=logging_text,
2665 nsr_id=nsr_id,
2666 nsd=nsd,
2667 db_nsr=db_nsr,
2668 db_nslcmop=db_nslcmop,
2669 db_vnfrs=db_vnfrs,
2670 db_vnfds=db_vnfds,
2671 n2vc_key_list=n2vc_key_list,
2672 stage=stage,
2673 )
2674 )
2675 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2676 tasks_dict_info[task_ro] = "Deploying at VIM"
2677
2678 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2679 stage[1] = "Deploying Execution Environments."
2680 self.logger.debug(logging_text + stage[1])
2681
2682 # create namespace and certificate if any helm based EE is present in the NS
2683 if check_helm_ee_in_ns(db_vnfds):
2684 await self.vca_map["helm-v3"].setup_ns_namespace(
2685 name=nsr_id,
2686 )
2687 # create TLS certificates
2688 await self.vca_map["helm-v3"].create_tls_certificate(
2689 secret_name=self.EE_TLS_NAME,
2690 dns_prefix="*",
2691 nsr_id=nsr_id,
2692 usage="server auth",
2693 namespace=nsr_id,
2694 )
2695
2696 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2697 for vnf_profile in get_vnf_profiles(nsd):
2698 vnfd_id = vnf_profile["vnfd-id"]
2699 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2700 member_vnf_index = str(vnf_profile["id"])
2701 db_vnfr = db_vnfrs[member_vnf_index]
2702 base_folder = vnfd["_admin"]["storage"]
2703 vdu_id = None
2704 vdu_index = 0
2705 vdu_name = None
2706 kdu_name = None
2707 kdu_index = None
2708
2709 # Get additional parameters
2710 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2711 if db_vnfr.get("additionalParamsForVnf"):
2712 deploy_params.update(
2713 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2714 )
2715
2716 descriptor_config = get_configuration(vnfd, vnfd["id"])
2717 if descriptor_config:
2718 self._deploy_n2vc(
2719 logging_text=logging_text
2720 + "member_vnf_index={} ".format(member_vnf_index),
2721 db_nsr=db_nsr,
2722 db_vnfr=db_vnfr,
2723 nslcmop_id=nslcmop_id,
2724 nsr_id=nsr_id,
2725 nsi_id=nsi_id,
2726 vnfd_id=vnfd_id,
2727 vdu_id=vdu_id,
2728 kdu_name=kdu_name,
2729 member_vnf_index=member_vnf_index,
2730 vdu_index=vdu_index,
2731 kdu_index=kdu_index,
2732 vdu_name=vdu_name,
2733 deploy_params=deploy_params,
2734 descriptor_config=descriptor_config,
2735 base_folder=base_folder,
2736 task_instantiation_info=tasks_dict_info,
2737 stage=stage,
2738 )
2739
2740 # Deploy charms for each VDU that supports one.
2741 for vdud in get_vdu_list(vnfd):
2742 vdu_id = vdud["id"]
2743 descriptor_config = get_configuration(vnfd, vdu_id)
2744 vdur = find_in_list(
2745 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2746 )
2747
2748 if vdur.get("additionalParams"):
2749 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2750 else:
2751 deploy_params_vdu = deploy_params
2752 deploy_params_vdu["OSM"] = get_osm_params(
2753 db_vnfr, vdu_id, vdu_count_index=0
2754 )
2755 vdud_count = get_number_of_instances(vnfd, vdu_id)
2756
2757 self.logger.debug("VDUD > {}".format(vdud))
2758 self.logger.debug(
2759 "Descriptor config > {}".format(descriptor_config)
2760 )
2761 if descriptor_config:
2762 vdu_name = None
2763 kdu_name = None
2764 kdu_index = None
2765 for vdu_index in range(vdud_count):
2766 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2767 self._deploy_n2vc(
2768 logging_text=logging_text
2769 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2770 member_vnf_index, vdu_id, vdu_index
2771 ),
2772 db_nsr=db_nsr,
2773 db_vnfr=db_vnfr,
2774 nslcmop_id=nslcmop_id,
2775 nsr_id=nsr_id,
2776 nsi_id=nsi_id,
2777 vnfd_id=vnfd_id,
2778 vdu_id=vdu_id,
2779 kdu_name=kdu_name,
2780 kdu_index=kdu_index,
2781 member_vnf_index=member_vnf_index,
2782 vdu_index=vdu_index,
2783 vdu_name=vdu_name,
2784 deploy_params=deploy_params_vdu,
2785 descriptor_config=descriptor_config,
2786 base_folder=base_folder,
2787 task_instantiation_info=tasks_dict_info,
2788 stage=stage,
2789 )
2790 for kdud in get_kdu_list(vnfd):
2791 kdu_name = kdud["name"]
2792 descriptor_config = get_configuration(vnfd, kdu_name)
2793 if descriptor_config:
2794 vdu_id = None
2795 vdu_index = 0
2796 vdu_name = None
2797 kdu_index, kdur = next(
2798 x
2799 for x in enumerate(db_vnfr["kdur"])
2800 if x[1]["kdu-name"] == kdu_name
2801 )
2802 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2803 if kdur.get("additionalParams"):
2804 deploy_params_kdu.update(
2805 parse_yaml_strings(kdur["additionalParams"].copy())
2806 )
2807
2808 self._deploy_n2vc(
2809 logging_text=logging_text,
2810 db_nsr=db_nsr,
2811 db_vnfr=db_vnfr,
2812 nslcmop_id=nslcmop_id,
2813 nsr_id=nsr_id,
2814 nsi_id=nsi_id,
2815 vnfd_id=vnfd_id,
2816 vdu_id=vdu_id,
2817 kdu_name=kdu_name,
2818 member_vnf_index=member_vnf_index,
2819 vdu_index=vdu_index,
2820 kdu_index=kdu_index,
2821 vdu_name=vdu_name,
2822 deploy_params=deploy_params_kdu,
2823 descriptor_config=descriptor_config,
2824 base_folder=base_folder,
2825 task_instantiation_info=tasks_dict_info,
2826 stage=stage,
2827 )
2828
2829 # Check if each vnf has exporter for metric collection if so update prometheus job records
2830 if "exporters-endpoints" in vnfd.get("df")[0]:
2831 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2832 self.logger.debug("exporter config :{}".format(exporter_config))
2833 artifact_path = "{}/{}/{}".format(
2834 base_folder["folder"],
2835 base_folder["pkg-dir"],
2836 "exporter-endpoint",
2837 )
2838 ee_id = None
2839 ee_config_descriptor = exporter_config
2840 vnfr_id = db_vnfr["id"]
2841 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2842 logging_text,
2843 nsr_id,
2844 vnfr_id,
2845 vdu_id=None,
2846 vdu_index=None,
2847 user=None,
2848 pub_key=None,
2849 )
2850 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2851 self.logger.debug("Artifact_path:{}".format(artifact_path))
2852 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2853 vdu_id_for_prom = None
2854 vdu_index_for_prom = None
2855 for x in get_iterable(db_vnfr, "vdur"):
2856 vdu_id_for_prom = x.get("vdu-id-ref")
2857 vdu_index_for_prom = x.get("count-index")
2858 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2859 ee_id=ee_id,
2860 artifact_path=artifact_path,
2861 ee_config_descriptor=ee_config_descriptor,
2862 vnfr_id=vnfr_id,
2863 nsr_id=nsr_id,
2864 target_ip=rw_mgmt_ip,
2865 element_type="VDU",
2866 vdu_id=vdu_id_for_prom,
2867 vdu_index=vdu_index_for_prom,
2868 )
2869
2870 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2871 if prometheus_jobs:
2872 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2873 self.update_db_2(
2874 "nsrs",
2875 nsr_id,
2876 db_nsr_update,
2877 )
2878
2879 for job in prometheus_jobs:
2880 self.db.set_one(
2881 "prometheus_jobs",
2882 {"job_name": job["job_name"]},
2883 job,
2884 upsert=True,
2885 fail_on_empty=False,
2886 )
2887
2888 # Check if this NS has a charm configuration
2889 descriptor_config = nsd.get("ns-configuration")
2890 if descriptor_config and descriptor_config.get("juju"):
2891 vnfd_id = None
2892 db_vnfr = None
2893 member_vnf_index = None
2894 vdu_id = None
2895 kdu_name = None
2896 kdu_index = None
2897 vdu_index = 0
2898 vdu_name = None
2899
2900 # Get additional parameters
2901 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2902 if db_nsr.get("additionalParamsForNs"):
2903 deploy_params.update(
2904 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2905 )
2906 base_folder = nsd["_admin"]["storage"]
2907 self._deploy_n2vc(
2908 logging_text=logging_text,
2909 db_nsr=db_nsr,
2910 db_vnfr=db_vnfr,
2911 nslcmop_id=nslcmop_id,
2912 nsr_id=nsr_id,
2913 nsi_id=nsi_id,
2914 vnfd_id=vnfd_id,
2915 vdu_id=vdu_id,
2916 kdu_name=kdu_name,
2917 member_vnf_index=member_vnf_index,
2918 vdu_index=vdu_index,
2919 kdu_index=kdu_index,
2920 vdu_name=vdu_name,
2921 deploy_params=deploy_params,
2922 descriptor_config=descriptor_config,
2923 base_folder=base_folder,
2924 task_instantiation_info=tasks_dict_info,
2925 stage=stage,
2926 )
2927
2928 # rest of staff will be done at finally
2929
2930 except (
2931 ROclient.ROClientException,
2932 DbException,
2933 LcmException,
2934 N2VCException,
2935 ) as e:
2936 self.logger.error(
2937 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2938 )
2939 exc = e
2940 except asyncio.CancelledError:
2941 self.logger.error(
2942 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2943 )
2944 exc = "Operation was cancelled"
2945 except Exception as e:
2946 exc = traceback.format_exc()
2947 self.logger.critical(
2948 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2949 exc_info=True,
2950 )
2951 finally:
2952 if exc:
2953 error_list.append(str(exc))
2954 try:
2955 # wait for pending tasks
2956 if tasks_dict_info:
2957 stage[1] = "Waiting for instantiate pending tasks."
2958 self.logger.debug(logging_text + stage[1])
2959 error_list += await self._wait_for_tasks(
2960 logging_text,
2961 tasks_dict_info,
2962 timeout_ns_deploy,
2963 stage,
2964 nslcmop_id,
2965 nsr_id=nsr_id,
2966 )
2967 stage[1] = stage[2] = ""
2968 except asyncio.CancelledError:
2969 error_list.append("Cancelled")
2970 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
2971 await self._wait_for_tasks(
2972 logging_text,
2973 tasks_dict_info,
2974 timeout_ns_deploy,
2975 stage,
2976 nslcmop_id,
2977 nsr_id=nsr_id,
2978 )
2979 except Exception as exc:
2980 error_list.append(str(exc))
2981
2982 # update operation-status
2983 db_nsr_update["operational-status"] = "running"
2984 # let's begin with VCA 'configured' status (later we can change it)
2985 db_nsr_update["config-status"] = "configured"
2986 for task, task_name in tasks_dict_info.items():
2987 if not task.done() or task.cancelled() or task.exception():
2988 if task_name.startswith(self.task_name_deploy_vca):
2989 # A N2VC task is pending
2990 db_nsr_update["config-status"] = "failed"
2991 else:
2992 # RO or KDU task is pending
2993 db_nsr_update["operational-status"] = "failed"
2994
2995 # update status at database
2996 if error_list:
2997 error_detail = ". ".join(error_list)
2998 self.logger.error(logging_text + error_detail)
2999 error_description_nslcmop = "{} Detail: {}".format(
3000 stage[0], error_detail
3001 )
3002 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3003 nslcmop_id, stage[0]
3004 )
3005
3006 db_nsr_update["detailed-status"] = (
3007 error_description_nsr + " Detail: " + error_detail
3008 )
3009 db_nslcmop_update["detailed-status"] = error_detail
3010 nslcmop_operation_state = "FAILED"
3011 ns_state = "BROKEN"
3012 else:
3013 error_detail = None
3014 error_description_nsr = error_description_nslcmop = None
3015 ns_state = "READY"
3016 db_nsr_update["detailed-status"] = "Done"
3017 db_nslcmop_update["detailed-status"] = "Done"
3018 nslcmop_operation_state = "COMPLETED"
3019 # Gather auto-healing and auto-scaling alerts for each vnfr
3020 healing_alerts = []
3021 scaling_alerts = []
3022 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3023 vnfd = next(
3024 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3025 )
3026 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3027 for alert in healing_alerts:
3028 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3029 self.db.create("alerts", alert)
3030
3031 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3032 for alert in scaling_alerts:
3033 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3034 self.db.create("alerts", alert)
3035
3036 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3037 for alert in alarm_alerts:
3038 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3039 self.db.create("alerts", alert)
3040 if db_nsr:
3041 self._write_ns_status(
3042 nsr_id=nsr_id,
3043 ns_state=ns_state,
3044 current_operation="IDLE",
3045 current_operation_id=None,
3046 error_description=error_description_nsr,
3047 error_detail=error_detail,
3048 other_update=db_nsr_update,
3049 )
3050 self._write_op_status(
3051 op_id=nslcmop_id,
3052 stage="",
3053 error_message=error_description_nslcmop,
3054 operation_state=nslcmop_operation_state,
3055 other_update=db_nslcmop_update,
3056 )
3057
3058 if nslcmop_operation_state:
3059 try:
3060 await self.msg.aiowrite(
3061 "ns",
3062 "instantiated",
3063 {
3064 "nsr_id": nsr_id,
3065 "nslcmop_id": nslcmop_id,
3066 "operationState": nslcmop_operation_state,
3067 "startTime": db_nslcmop["startTime"],
3068 "links": db_nslcmop["links"],
3069 "operationParams": {
3070 "nsInstanceId": nsr_id,
3071 "nsdId": db_nsr["nsd-id"],
3072 },
3073 },
3074 )
3075 except Exception as e:
3076 self.logger.error(
3077 logging_text + "kafka_write notification Exception {}".format(e)
3078 )
3079
3080 self.logger.debug(logging_text + "Exit")
3081 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3082
3083 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3084 if vnfd_id not in cached_vnfds:
3085 cached_vnfds[vnfd_id] = self.db.get_one(
3086 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3087 )
3088 return cached_vnfds[vnfd_id]
3089
3090 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3091 if vnf_profile_id not in cached_vnfrs:
3092 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3093 "vnfrs",
3094 {
3095 "member-vnf-index-ref": vnf_profile_id,
3096 "nsr-id-ref": nsr_id,
3097 },
3098 )
3099 return cached_vnfrs[vnf_profile_id]
3100
3101 def _is_deployed_vca_in_relation(
3102 self, vca: DeployedVCA, relation: Relation
3103 ) -> bool:
3104 found = False
3105 for endpoint in (relation.provider, relation.requirer):
3106 if endpoint["kdu-resource-profile-id"]:
3107 continue
3108 found = (
3109 vca.vnf_profile_id == endpoint.vnf_profile_id
3110 and vca.vdu_profile_id == endpoint.vdu_profile_id
3111 and vca.execution_environment_ref == endpoint.execution_environment_ref
3112 )
3113 if found:
3114 break
3115 return found
3116
3117 def _update_ee_relation_data_with_implicit_data(
3118 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3119 ):
3120 ee_relation_data = safe_get_ee_relation(
3121 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3122 )
3123 ee_relation_level = EELevel.get_level(ee_relation_data)
3124 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3125 "execution-environment-ref"
3126 ]:
3127 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3128 vnfd_id = vnf_profile["vnfd-id"]
3129 project = nsd["_admin"]["projects_read"][0]
3130 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3131 entity_id = (
3132 vnfd_id
3133 if ee_relation_level == EELevel.VNF
3134 else ee_relation_data["vdu-profile-id"]
3135 )
3136 ee = get_juju_ee_ref(db_vnfd, entity_id)
3137 if not ee:
3138 raise Exception(
3139 f"not execution environments found for ee_relation {ee_relation_data}"
3140 )
3141 ee_relation_data["execution-environment-ref"] = ee["id"]
3142 return ee_relation_data
3143
3144 def _get_ns_relations(
3145 self,
3146 nsr_id: str,
3147 nsd: Dict[str, Any],
3148 vca: DeployedVCA,
3149 cached_vnfds: Dict[str, Any],
3150 ) -> List[Relation]:
3151 relations = []
3152 db_ns_relations = get_ns_configuration_relation_list(nsd)
3153 for r in db_ns_relations:
3154 provider_dict = None
3155 requirer_dict = None
3156 if all(key in r for key in ("provider", "requirer")):
3157 provider_dict = r["provider"]
3158 requirer_dict = r["requirer"]
3159 elif "entities" in r:
3160 provider_id = r["entities"][0]["id"]
3161 provider_dict = {
3162 "nsr-id": nsr_id,
3163 "endpoint": r["entities"][0]["endpoint"],
3164 }
3165 if provider_id != nsd["id"]:
3166 provider_dict["vnf-profile-id"] = provider_id
3167 requirer_id = r["entities"][1]["id"]
3168 requirer_dict = {
3169 "nsr-id": nsr_id,
3170 "endpoint": r["entities"][1]["endpoint"],
3171 }
3172 if requirer_id != nsd["id"]:
3173 requirer_dict["vnf-profile-id"] = requirer_id
3174 else:
3175 raise Exception(
3176 "provider/requirer or entities must be included in the relation."
3177 )
3178 relation_provider = self._update_ee_relation_data_with_implicit_data(
3179 nsr_id, nsd, provider_dict, cached_vnfds
3180 )
3181 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3182 nsr_id, nsd, requirer_dict, cached_vnfds
3183 )
3184 provider = EERelation(relation_provider)
3185 requirer = EERelation(relation_requirer)
3186 relation = Relation(r["name"], provider, requirer)
3187 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3188 if vca_in_relation:
3189 relations.append(relation)
3190 return relations
3191
3192 def _get_vnf_relations(
3193 self,
3194 nsr_id: str,
3195 nsd: Dict[str, Any],
3196 vca: DeployedVCA,
3197 cached_vnfds: Dict[str, Any],
3198 ) -> List[Relation]:
3199 relations = []
3200 if vca.target_element == "ns":
3201 self.logger.debug("VCA is a NS charm, not a VNF.")
3202 return relations
3203 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3204 vnf_profile_id = vnf_profile["id"]
3205 vnfd_id = vnf_profile["vnfd-id"]
3206 project = nsd["_admin"]["projects_read"][0]
3207 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3208 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3209 for r in db_vnf_relations:
3210 provider_dict = None
3211 requirer_dict = None
3212 if all(key in r for key in ("provider", "requirer")):
3213 provider_dict = r["provider"]
3214 requirer_dict = r["requirer"]
3215 elif "entities" in r:
3216 provider_id = r["entities"][0]["id"]
3217 provider_dict = {
3218 "nsr-id": nsr_id,
3219 "vnf-profile-id": vnf_profile_id,
3220 "endpoint": r["entities"][0]["endpoint"],
3221 }
3222 if provider_id != vnfd_id:
3223 provider_dict["vdu-profile-id"] = provider_id
3224 requirer_id = r["entities"][1]["id"]
3225 requirer_dict = {
3226 "nsr-id": nsr_id,
3227 "vnf-profile-id": vnf_profile_id,
3228 "endpoint": r["entities"][1]["endpoint"],
3229 }
3230 if requirer_id != vnfd_id:
3231 requirer_dict["vdu-profile-id"] = requirer_id
3232 else:
3233 raise Exception(
3234 "provider/requirer or entities must be included in the relation."
3235 )
3236 relation_provider = self._update_ee_relation_data_with_implicit_data(
3237 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3238 )
3239 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3240 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3241 )
3242 provider = EERelation(relation_provider)
3243 requirer = EERelation(relation_requirer)
3244 relation = Relation(r["name"], provider, requirer)
3245 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3246 if vca_in_relation:
3247 relations.append(relation)
3248 return relations
3249
3250 def _get_kdu_resource_data(
3251 self,
3252 ee_relation: EERelation,
3253 db_nsr: Dict[str, Any],
3254 cached_vnfds: Dict[str, Any],
3255 ) -> DeployedK8sResource:
3256 nsd = get_nsd(db_nsr)
3257 vnf_profiles = get_vnf_profiles(nsd)
3258 vnfd_id = find_in_list(
3259 vnf_profiles,
3260 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3261 )["vnfd-id"]
3262 project = nsd["_admin"]["projects_read"][0]
3263 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3264 kdu_resource_profile = get_kdu_resource_profile(
3265 db_vnfd, ee_relation.kdu_resource_profile_id
3266 )
3267 kdu_name = kdu_resource_profile["kdu-name"]
3268 deployed_kdu, _ = get_deployed_kdu(
3269 db_nsr.get("_admin", ()).get("deployed", ()),
3270 kdu_name,
3271 ee_relation.vnf_profile_id,
3272 )
3273 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3274 return deployed_kdu
3275
3276 def _get_deployed_component(
3277 self,
3278 ee_relation: EERelation,
3279 db_nsr: Dict[str, Any],
3280 cached_vnfds: Dict[str, Any],
3281 ) -> DeployedComponent:
3282 nsr_id = db_nsr["_id"]
3283 deployed_component = None
3284 ee_level = EELevel.get_level(ee_relation)
3285 if ee_level == EELevel.NS:
3286 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3287 if vca:
3288 deployed_component = DeployedVCA(nsr_id, vca)
3289 elif ee_level == EELevel.VNF:
3290 vca = get_deployed_vca(
3291 db_nsr,
3292 {
3293 "vdu_id": None,
3294 "member-vnf-index": ee_relation.vnf_profile_id,
3295 "ee_descriptor_id": ee_relation.execution_environment_ref,
3296 },
3297 )
3298 if vca:
3299 deployed_component = DeployedVCA(nsr_id, vca)
3300 elif ee_level == EELevel.VDU:
3301 vca = get_deployed_vca(
3302 db_nsr,
3303 {
3304 "vdu_id": ee_relation.vdu_profile_id,
3305 "member-vnf-index": ee_relation.vnf_profile_id,
3306 "ee_descriptor_id": ee_relation.execution_environment_ref,
3307 },
3308 )
3309 if vca:
3310 deployed_component = DeployedVCA(nsr_id, vca)
3311 elif ee_level == EELevel.KDU:
3312 kdu_resource_data = self._get_kdu_resource_data(
3313 ee_relation, db_nsr, cached_vnfds
3314 )
3315 if kdu_resource_data:
3316 deployed_component = DeployedK8sResource(kdu_resource_data)
3317 return deployed_component
3318
3319 async def _add_relation(
3320 self,
3321 relation: Relation,
3322 vca_type: str,
3323 db_nsr: Dict[str, Any],
3324 cached_vnfds: Dict[str, Any],
3325 cached_vnfrs: Dict[str, Any],
3326 ) -> bool:
3327 deployed_provider = self._get_deployed_component(
3328 relation.provider, db_nsr, cached_vnfds
3329 )
3330 deployed_requirer = self._get_deployed_component(
3331 relation.requirer, db_nsr, cached_vnfds
3332 )
3333 if (
3334 deployed_provider
3335 and deployed_requirer
3336 and deployed_provider.config_sw_installed
3337 and deployed_requirer.config_sw_installed
3338 ):
3339 provider_db_vnfr = (
3340 self._get_vnfr(
3341 relation.provider.nsr_id,
3342 relation.provider.vnf_profile_id,
3343 cached_vnfrs,
3344 )
3345 if relation.provider.vnf_profile_id
3346 else None
3347 )
3348 requirer_db_vnfr = (
3349 self._get_vnfr(
3350 relation.requirer.nsr_id,
3351 relation.requirer.vnf_profile_id,
3352 cached_vnfrs,
3353 )
3354 if relation.requirer.vnf_profile_id
3355 else None
3356 )
3357 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3358 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3359 provider_relation_endpoint = RelationEndpoint(
3360 deployed_provider.ee_id,
3361 provider_vca_id,
3362 relation.provider.endpoint,
3363 )
3364 requirer_relation_endpoint = RelationEndpoint(
3365 deployed_requirer.ee_id,
3366 requirer_vca_id,
3367 relation.requirer.endpoint,
3368 )
3369 try:
3370 await self.vca_map[vca_type].add_relation(
3371 provider=provider_relation_endpoint,
3372 requirer=requirer_relation_endpoint,
3373 )
3374 except N2VCException as exception:
3375 self.logger.error(exception)
3376 raise LcmException(exception)
3377 return True
3378 return False
3379
3380 async def _add_vca_relations(
3381 self,
3382 logging_text,
3383 nsr_id,
3384 vca_type: str,
3385 vca_index: int,
3386 timeout: int = 3600,
3387 ) -> bool:
3388 # steps:
3389 # 1. find all relations for this VCA
3390 # 2. wait for other peers related
3391 # 3. add relations
3392
3393 try:
3394 # STEP 1: find all relations for this VCA
3395
3396 # read nsr record
3397 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3398 nsd = get_nsd(db_nsr)
3399
3400 # this VCA data
3401 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3402 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3403
3404 cached_vnfds = {}
3405 cached_vnfrs = {}
3406 relations = []
3407 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3408 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3409
3410 # if no relations, terminate
3411 if not relations:
3412 self.logger.debug(logging_text + " No relations")
3413 return True
3414
3415 self.logger.debug(logging_text + " adding relations {}".format(relations))
3416
3417 # add all relations
3418 start = time()
3419 while True:
3420 # check timeout
3421 now = time()
3422 if now - start >= timeout:
3423 self.logger.error(logging_text + " : timeout adding relations")
3424 return False
3425
3426 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3427 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3428
3429 # for each relation, find the VCA's related
3430 for relation in relations.copy():
3431 added = await self._add_relation(
3432 relation,
3433 vca_type,
3434 db_nsr,
3435 cached_vnfds,
3436 cached_vnfrs,
3437 )
3438 if added:
3439 relations.remove(relation)
3440
3441 if not relations:
3442 self.logger.debug("Relations added")
3443 break
3444 await asyncio.sleep(5.0)
3445
3446 return True
3447
3448 except Exception as e:
3449 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3450 return False
3451
3452 async def _install_kdu(
3453 self,
3454 nsr_id: str,
3455 nsr_db_path: str,
3456 vnfr_data: dict,
3457 kdu_index: int,
3458 kdud: dict,
3459 vnfd: dict,
3460 k8s_instance_info: dict,
3461 k8params: dict = None,
3462 timeout: int = 600,
3463 vca_id: str = None,
3464 ):
3465 try:
3466 k8sclustertype = k8s_instance_info["k8scluster-type"]
3467 # Instantiate kdu
3468 db_dict_install = {
3469 "collection": "nsrs",
3470 "filter": {"_id": nsr_id},
3471 "path": nsr_db_path,
3472 }
3473
3474 if k8s_instance_info.get("kdu-deployment-name"):
3475 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3476 else:
3477 kdu_instance = self.k8scluster_map[
3478 k8sclustertype
3479 ].generate_kdu_instance_name(
3480 db_dict=db_dict_install,
3481 kdu_model=k8s_instance_info["kdu-model"],
3482 kdu_name=k8s_instance_info["kdu-name"],
3483 )
3484
3485 # Update the nsrs table with the kdu-instance value
3486 self.update_db_2(
3487 item="nsrs",
3488 _id=nsr_id,
3489 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3490 )
3491
3492 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3493 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3494 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3495 # namespace, this first verification could be removed, and the next step would be done for any kind
3496 # of KNF.
3497 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3498 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3499 if k8sclustertype in ("juju", "juju-bundle"):
3500 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3501 # that the user passed a namespace which he wants its KDU to be deployed in)
3502 if (
3503 self.db.count(
3504 table="nsrs",
3505 q_filter={
3506 "_id": nsr_id,
3507 "_admin.projects_write": k8s_instance_info["namespace"],
3508 "_admin.projects_read": k8s_instance_info["namespace"],
3509 },
3510 )
3511 > 0
3512 ):
3513 self.logger.debug(
3514 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3515 )
3516 self.update_db_2(
3517 item="nsrs",
3518 _id=nsr_id,
3519 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3520 )
3521 k8s_instance_info["namespace"] = kdu_instance
3522
3523 await self.k8scluster_map[k8sclustertype].install(
3524 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3525 kdu_model=k8s_instance_info["kdu-model"],
3526 atomic=True,
3527 params=k8params,
3528 db_dict=db_dict_install,
3529 timeout=timeout,
3530 kdu_name=k8s_instance_info["kdu-name"],
3531 namespace=k8s_instance_info["namespace"],
3532 kdu_instance=kdu_instance,
3533 vca_id=vca_id,
3534 )
3535
3536 # Obtain services to obtain management service ip
3537 services = await self.k8scluster_map[k8sclustertype].get_services(
3538 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3539 kdu_instance=kdu_instance,
3540 namespace=k8s_instance_info["namespace"],
3541 )
3542
3543 # Obtain management service info (if exists)
3544 vnfr_update_dict = {}
3545 kdu_config = get_configuration(vnfd, kdud["name"])
3546 if kdu_config:
3547 target_ee_list = kdu_config.get("execution-environment-list", [])
3548 else:
3549 target_ee_list = []
3550
3551 if services:
3552 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3553 mgmt_services = [
3554 service
3555 for service in kdud.get("service", [])
3556 if service.get("mgmt-service")
3557 ]
3558 for mgmt_service in mgmt_services:
3559 for service in services:
3560 if service["name"].startswith(mgmt_service["name"]):
3561 # Mgmt service found, Obtain service ip
3562 ip = service.get("external_ip", service.get("cluster_ip"))
3563 if isinstance(ip, list) and len(ip) == 1:
3564 ip = ip[0]
3565
3566 vnfr_update_dict[
3567 "kdur.{}.ip-address".format(kdu_index)
3568 ] = ip
3569
3570 # Check if must update also mgmt ip at the vnf
3571 service_external_cp = mgmt_service.get(
3572 "external-connection-point-ref"
3573 )
3574 if service_external_cp:
3575 if (
3576 deep_get(vnfd, ("mgmt-interface", "cp"))
3577 == service_external_cp
3578 ):
3579 vnfr_update_dict["ip-address"] = ip
3580
3581 if find_in_list(
3582 target_ee_list,
3583 lambda ee: ee.get(
3584 "external-connection-point-ref", ""
3585 )
3586 == service_external_cp,
3587 ):
3588 vnfr_update_dict[
3589 "kdur.{}.ip-address".format(kdu_index)
3590 ] = ip
3591 break
3592 else:
3593 self.logger.warn(
3594 "Mgmt service name: {} not found".format(
3595 mgmt_service["name"]
3596 )
3597 )
3598
3599 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3600 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3601
3602 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3603 if (
3604 kdu_config
3605 and kdu_config.get("initial-config-primitive")
3606 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3607 ):
3608 initial_config_primitive_list = kdu_config.get(
3609 "initial-config-primitive"
3610 )
3611 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3612
3613 for initial_config_primitive in initial_config_primitive_list:
3614 primitive_params_ = self._map_primitive_params(
3615 initial_config_primitive, {}, {}
3616 )
3617
3618 await asyncio.wait_for(
3619 self.k8scluster_map[k8sclustertype].exec_primitive(
3620 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3621 kdu_instance=kdu_instance,
3622 primitive_name=initial_config_primitive["name"],
3623 params=primitive_params_,
3624 db_dict=db_dict_install,
3625 vca_id=vca_id,
3626 ),
3627 timeout=timeout,
3628 )
3629
3630 except Exception as e:
3631 # Prepare update db with error and raise exception
3632 try:
3633 self.update_db_2(
3634 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3635 )
3636 self.update_db_2(
3637 "vnfrs",
3638 vnfr_data.get("_id"),
3639 {"kdur.{}.status".format(kdu_index): "ERROR"},
3640 )
3641 except Exception as error:
3642 # ignore to keep original exception
3643 self.logger.warning(
3644 f"An exception occurred while updating DB: {str(error)}"
3645 )
3646 # reraise original error
3647 raise
3648
3649 return kdu_instance
3650
3651 async def deploy_kdus(
3652 self,
3653 logging_text,
3654 nsr_id,
3655 nslcmop_id,
3656 db_vnfrs,
3657 db_vnfds,
3658 task_instantiation_info,
3659 ):
3660 # Launch kdus if present in the descriptor
3661
3662 k8scluster_id_2_uuic = {
3663 "helm-chart-v3": {},
3664 "juju-bundle": {},
3665 }
3666
3667 async def _get_cluster_id(cluster_id, cluster_type):
3668 nonlocal k8scluster_id_2_uuic
3669 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3670 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3671
3672 # check if K8scluster is creating and wait look if previous tasks in process
3673 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3674 "k8scluster", cluster_id
3675 )
3676 if task_dependency:
3677 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3678 task_name, cluster_id
3679 )
3680 self.logger.debug(logging_text + text)
3681 await asyncio.wait(task_dependency, timeout=3600)
3682
3683 db_k8scluster = self.db.get_one(
3684 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3685 )
3686 if not db_k8scluster:
3687 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3688
3689 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3690 if not k8s_id:
3691 if cluster_type == "helm-chart-v3":
3692 try:
3693 # backward compatibility for existing clusters that have not been initialized for helm v3
3694 k8s_credentials = yaml.safe_dump(
3695 db_k8scluster.get("credentials")
3696 )
3697 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3698 k8s_credentials, reuse_cluster_uuid=cluster_id
3699 )
3700 db_k8scluster_update = {}
3701 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3702 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3703 db_k8scluster_update[
3704 "_admin.helm-chart-v3.created"
3705 ] = uninstall_sw
3706 db_k8scluster_update[
3707 "_admin.helm-chart-v3.operationalState"
3708 ] = "ENABLED"
3709 self.update_db_2(
3710 "k8sclusters", cluster_id, db_k8scluster_update
3711 )
3712 except Exception as e:
3713 self.logger.error(
3714 logging_text
3715 + "error initializing helm-v3 cluster: {}".format(str(e))
3716 )
3717 raise LcmException(
3718 "K8s cluster '{}' has not been initialized for '{}'".format(
3719 cluster_id, cluster_type
3720 )
3721 )
3722 else:
3723 raise LcmException(
3724 "K8s cluster '{}' has not been initialized for '{}'".format(
3725 cluster_id, cluster_type
3726 )
3727 )
3728 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3729 return k8s_id
3730
3731 logging_text += "Deploy kdus: "
3732 step = ""
3733 try:
3734 db_nsr_update = {"_admin.deployed.K8s": []}
3735 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3736
3737 index = 0
3738 updated_cluster_list = []
3739 updated_v3_cluster_list = []
3740
3741 for vnfr_data in db_vnfrs.values():
3742 vca_id = self.get_vca_id(vnfr_data, {})
3743 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3744 # Step 0: Prepare and set parameters
3745 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3746 vnfd_id = vnfr_data.get("vnfd-id")
3747 vnfd_with_id = find_in_list(
3748 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3749 )
3750 kdud = next(
3751 kdud
3752 for kdud in vnfd_with_id["kdu"]
3753 if kdud["name"] == kdur["kdu-name"]
3754 )
3755 namespace = kdur.get("k8s-namespace")
3756 kdu_deployment_name = kdur.get("kdu-deployment-name")
3757 if kdur.get("helm-chart"):
3758 kdumodel = kdur["helm-chart"]
3759 # Default version: helm3, if helm-version is v2 assign v2
3760 k8sclustertype = "helm-chart-v3"
3761 self.logger.debug("kdur: {}".format(kdur))
3762 elif kdur.get("juju-bundle"):
3763 kdumodel = kdur["juju-bundle"]
3764 k8sclustertype = "juju-bundle"
3765 else:
3766 raise LcmException(
3767 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3768 "juju-bundle. Maybe an old NBI version is running".format(
3769 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3770 )
3771 )
3772 # check if kdumodel is a file and exists
3773 try:
3774 vnfd_with_id = find_in_list(
3775 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3776 )
3777 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3778 if storage: # may be not present if vnfd has not artifacts
3779 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3780 if storage["pkg-dir"]:
3781 filename = "{}/{}/{}s/{}".format(
3782 storage["folder"],
3783 storage["pkg-dir"],
3784 k8sclustertype,
3785 kdumodel,
3786 )
3787 else:
3788 filename = "{}/Scripts/{}s/{}".format(
3789 storage["folder"],
3790 k8sclustertype,
3791 kdumodel,
3792 )
3793 if self.fs.file_exists(
3794 filename, mode="file"
3795 ) or self.fs.file_exists(filename, mode="dir"):
3796 kdumodel = self.fs.path + filename
3797 except (asyncio.TimeoutError, asyncio.CancelledError):
3798 raise
3799 except Exception as e: # it is not a file
3800 self.logger.warning(f"An exception occurred: {str(e)}")
3801
3802 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3803 step = "Synchronize repos for k8s cluster '{}'".format(
3804 k8s_cluster_id
3805 )
3806 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3807
3808 # Synchronize repos
3809 if (
3810 k8sclustertype == "helm-chart"
3811 and cluster_uuid not in updated_cluster_list
3812 ) or (
3813 k8sclustertype == "helm-chart-v3"
3814 and cluster_uuid not in updated_v3_cluster_list
3815 ):
3816 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3817 self.k8scluster_map[k8sclustertype].synchronize_repos(
3818 cluster_uuid=cluster_uuid
3819 )
3820 )
3821 if del_repo_list or added_repo_dict:
3822 if k8sclustertype == "helm-chart":
3823 unset = {
3824 "_admin.helm_charts_added." + item: None
3825 for item in del_repo_list
3826 }
3827 updated = {
3828 "_admin.helm_charts_added." + item: name
3829 for item, name in added_repo_dict.items()
3830 }
3831 updated_cluster_list.append(cluster_uuid)
3832 elif k8sclustertype == "helm-chart-v3":
3833 unset = {
3834 "_admin.helm_charts_v3_added." + item: None
3835 for item in del_repo_list
3836 }
3837 updated = {
3838 "_admin.helm_charts_v3_added." + item: name
3839 for item, name in added_repo_dict.items()
3840 }
3841 updated_v3_cluster_list.append(cluster_uuid)
3842 self.logger.debug(
3843 logging_text + "repos synchronized on k8s cluster "
3844 "'{}' to_delete: {}, to_add: {}".format(
3845 k8s_cluster_id, del_repo_list, added_repo_dict
3846 )
3847 )
3848 self.db.set_one(
3849 "k8sclusters",
3850 {"_id": k8s_cluster_id},
3851 updated,
3852 unset=unset,
3853 )
3854
3855 # Instantiate kdu
3856 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3857 vnfr_data["member-vnf-index-ref"],
3858 kdur["kdu-name"],
3859 k8s_cluster_id,
3860 )
3861 k8s_instance_info = {
3862 "kdu-instance": None,
3863 "k8scluster-uuid": cluster_uuid,
3864 "k8scluster-type": k8sclustertype,
3865 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3866 "kdu-name": kdur["kdu-name"],
3867 "kdu-model": kdumodel,
3868 "namespace": namespace,
3869 "kdu-deployment-name": kdu_deployment_name,
3870 }
3871 db_path = "_admin.deployed.K8s.{}".format(index)
3872 db_nsr_update[db_path] = k8s_instance_info
3873 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3874 vnfd_with_id = find_in_list(
3875 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3876 )
3877 task = asyncio.ensure_future(
3878 self._install_kdu(
3879 nsr_id,
3880 db_path,
3881 vnfr_data,
3882 kdu_index,
3883 kdud,
3884 vnfd_with_id,
3885 k8s_instance_info,
3886 k8params=desc_params,
3887 timeout=1800,
3888 vca_id=vca_id,
3889 )
3890 )
3891 self.lcm_tasks.register(
3892 "ns",
3893 nsr_id,
3894 nslcmop_id,
3895 "instantiate_KDU-{}".format(index),
3896 task,
3897 )
3898 task_instantiation_info[task] = "Deploying KDU {}".format(
3899 kdur["kdu-name"]
3900 )
3901
3902 index += 1
3903
3904 except (LcmException, asyncio.CancelledError):
3905 raise
3906 except Exception as e:
3907 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3908 if isinstance(e, (N2VCException, DbException)):
3909 self.logger.error(logging_text + msg)
3910 else:
3911 self.logger.critical(logging_text + msg, exc_info=True)
3912 raise LcmException(msg)
3913 finally:
3914 if db_nsr_update:
3915 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3916
3917 def _deploy_n2vc(
3918 self,
3919 logging_text,
3920 db_nsr,
3921 db_vnfr,
3922 nslcmop_id,
3923 nsr_id,
3924 nsi_id,
3925 vnfd_id,
3926 vdu_id,
3927 kdu_name,
3928 member_vnf_index,
3929 vdu_index,
3930 kdu_index,
3931 vdu_name,
3932 deploy_params,
3933 descriptor_config,
3934 base_folder,
3935 task_instantiation_info,
3936 stage,
3937 ):
3938 # launch instantiate_N2VC in a asyncio task and register task object
3939 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3940 # if not found, create one entry and update database
3941 # fill db_nsr._admin.deployed.VCA.<index>
3942
3943 self.logger.debug(
3944 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3945 )
3946
3947 charm_name = ""
3948 get_charm_name = False
3949 if "execution-environment-list" in descriptor_config:
3950 ee_list = descriptor_config.get("execution-environment-list", [])
3951 elif "juju" in descriptor_config:
3952 ee_list = [descriptor_config] # ns charms
3953 if "execution-environment-list" not in descriptor_config:
3954 # charm name is only required for ns charms
3955 get_charm_name = True
3956 else: # other types as script are not supported
3957 ee_list = []
3958
3959 for ee_item in ee_list:
3960 self.logger.debug(
3961 logging_text
3962 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3963 ee_item.get("juju"), ee_item.get("helm-chart")
3964 )
3965 )
3966 ee_descriptor_id = ee_item.get("id")
3967 if ee_item.get("juju"):
3968 vca_name = ee_item["juju"].get("charm")
3969 if get_charm_name:
3970 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3971 vca_type = (
3972 "lxc_proxy_charm"
3973 if ee_item["juju"].get("charm") is not None
3974 else "native_charm"
3975 )
3976 if ee_item["juju"].get("cloud") == "k8s":
3977 vca_type = "k8s_proxy_charm"
3978 elif ee_item["juju"].get("proxy") is False:
3979 vca_type = "native_charm"
3980 elif ee_item.get("helm-chart"):
3981 vca_name = ee_item["helm-chart"]
3982 vca_type = "helm-v3"
3983 else:
3984 self.logger.debug(
3985 logging_text + "skipping non juju neither charm configuration"
3986 )
3987 continue
3988
3989 vca_index = -1
3990 for vca_index, vca_deployed in enumerate(
3991 db_nsr["_admin"]["deployed"]["VCA"]
3992 ):
3993 if not vca_deployed:
3994 continue
3995 if (
3996 vca_deployed.get("member-vnf-index") == member_vnf_index
3997 and vca_deployed.get("vdu_id") == vdu_id
3998 and vca_deployed.get("kdu_name") == kdu_name
3999 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4000 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4001 ):
4002 break
4003 else:
4004 # not found, create one.
4005 target = (
4006 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4007 )
4008 if vdu_id:
4009 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4010 elif kdu_name:
4011 target += "/kdu/{}".format(kdu_name)
4012 vca_deployed = {
4013 "target_element": target,
4014 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4015 "member-vnf-index": member_vnf_index,
4016 "vdu_id": vdu_id,
4017 "kdu_name": kdu_name,
4018 "vdu_count_index": vdu_index,
4019 "operational-status": "init", # TODO revise
4020 "detailed-status": "", # TODO revise
4021 "step": "initial-deploy", # TODO revise
4022 "vnfd_id": vnfd_id,
4023 "vdu_name": vdu_name,
4024 "type": vca_type,
4025 "ee_descriptor_id": ee_descriptor_id,
4026 "charm_name": charm_name,
4027 }
4028 vca_index += 1
4029
4030 # create VCA and configurationStatus in db
4031 db_dict = {
4032 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4033 "configurationStatus.{}".format(vca_index): dict(),
4034 }
4035 self.update_db_2("nsrs", nsr_id, db_dict)
4036
4037 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4038
4039 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4040 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4041 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4042
4043 # Launch task
4044 task_n2vc = asyncio.ensure_future(
4045 self.instantiate_N2VC(
4046 logging_text=logging_text,
4047 vca_index=vca_index,
4048 nsi_id=nsi_id,
4049 db_nsr=db_nsr,
4050 db_vnfr=db_vnfr,
4051 vdu_id=vdu_id,
4052 kdu_name=kdu_name,
4053 vdu_index=vdu_index,
4054 kdu_index=kdu_index,
4055 deploy_params=deploy_params,
4056 config_descriptor=descriptor_config,
4057 base_folder=base_folder,
4058 nslcmop_id=nslcmop_id,
4059 stage=stage,
4060 vca_type=vca_type,
4061 vca_name=vca_name,
4062 ee_config_descriptor=ee_item,
4063 )
4064 )
4065 self.lcm_tasks.register(
4066 "ns",
4067 nsr_id,
4068 nslcmop_id,
4069 "instantiate_N2VC-{}".format(vca_index),
4070 task_n2vc,
4071 )
4072 task_instantiation_info[
4073 task_n2vc
4074 ] = self.task_name_deploy_vca + " {}.{}".format(
4075 member_vnf_index or "", vdu_id or ""
4076 )
4077
4078 def _format_additional_params(self, params):
4079 params = params or {}
4080 for key, value in params.items():
4081 if str(value).startswith("!!yaml "):
4082 params[key] = yaml.safe_load(value[7:])
4083 return params
4084
4085 def _get_terminate_primitive_params(self, seq, vnf_index):
4086 primitive = seq.get("name")
4087 primitive_params = {}
4088 params = {
4089 "member_vnf_index": vnf_index,
4090 "primitive": primitive,
4091 "primitive_params": primitive_params,
4092 }
4093 desc_params = {}
4094 return self._map_primitive_params(seq, params, desc_params)
4095
4096 # sub-operations
4097
4098 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4099 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4100 if op.get("operationState") == "COMPLETED":
4101 # b. Skip sub-operation
4102 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4103 return self.SUBOPERATION_STATUS_SKIP
4104 else:
4105 # c. retry executing sub-operation
4106 # The sub-operation exists, and operationState != 'COMPLETED'
4107 # Update operationState = 'PROCESSING' to indicate a retry.
4108 operationState = "PROCESSING"
4109 detailed_status = "In progress"
4110 self._update_suboperation_status(
4111 db_nslcmop, op_index, operationState, detailed_status
4112 )
4113 # Return the sub-operation index
4114 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4115 # with arguments extracted from the sub-operation
4116 return op_index
4117
4118 # Find a sub-operation where all keys in a matching dictionary must match
4119 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4120 def _find_suboperation(self, db_nslcmop, match):
4121 if db_nslcmop and match:
4122 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4123 for i, op in enumerate(op_list):
4124 if all(op.get(k) == match[k] for k in match):
4125 return i
4126 return self.SUBOPERATION_STATUS_NOT_FOUND
4127
4128 # Update status for a sub-operation given its index
4129 def _update_suboperation_status(
4130 self, db_nslcmop, op_index, operationState, detailed_status
4131 ):
4132 # Update DB for HA tasks
4133 q_filter = {"_id": db_nslcmop["_id"]}
4134 update_dict = {
4135 "_admin.operations.{}.operationState".format(op_index): operationState,
4136 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4137 }
4138 self.db.set_one(
4139 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4140 )
4141
4142 # Add sub-operation, return the index of the added sub-operation
4143 # Optionally, set operationState, detailed-status, and operationType
4144 # Status and type are currently set for 'scale' sub-operations:
4145 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4146 # 'detailed-status' : status message
4147 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4148 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4149 def _add_suboperation(
4150 self,
4151 db_nslcmop,
4152 vnf_index,
4153 vdu_id,
4154 vdu_count_index,
4155 vdu_name,
4156 primitive,
4157 mapped_primitive_params,
4158 operationState=None,
4159 detailed_status=None,
4160 operationType=None,
4161 RO_nsr_id=None,
4162 RO_scaling_info=None,
4163 ):
4164 if not db_nslcmop:
4165 return self.SUBOPERATION_STATUS_NOT_FOUND
4166 # Get the "_admin.operations" list, if it exists
4167 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4168 op_list = db_nslcmop_admin.get("operations")
4169 # Create or append to the "_admin.operations" list
4170 new_op = {
4171 "member_vnf_index": vnf_index,
4172 "vdu_id": vdu_id,
4173 "vdu_count_index": vdu_count_index,
4174 "primitive": primitive,
4175 "primitive_params": mapped_primitive_params,
4176 }
4177 if operationState:
4178 new_op["operationState"] = operationState
4179 if detailed_status:
4180 new_op["detailed-status"] = detailed_status
4181 if operationType:
4182 new_op["lcmOperationType"] = operationType
4183 if RO_nsr_id:
4184 new_op["RO_nsr_id"] = RO_nsr_id
4185 if RO_scaling_info:
4186 new_op["RO_scaling_info"] = RO_scaling_info
4187 if not op_list:
4188 # No existing operations, create key 'operations' with current operation as first list element
4189 db_nslcmop_admin.update({"operations": [new_op]})
4190 op_list = db_nslcmop_admin.get("operations")
4191 else:
4192 # Existing operations, append operation to list
4193 op_list.append(new_op)
4194
4195 db_nslcmop_update = {"_admin.operations": op_list}
4196 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4197 op_index = len(op_list) - 1
4198 return op_index
4199
4200 # Helper methods for scale() sub-operations
4201
4202 # pre-scale/post-scale:
4203 # Check for 3 different cases:
4204 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4205 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4206 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4207 def _check_or_add_scale_suboperation(
4208 self,
4209 db_nslcmop,
4210 vnf_index,
4211 vnf_config_primitive,
4212 primitive_params,
4213 operationType,
4214 RO_nsr_id=None,
4215 RO_scaling_info=None,
4216 ):
4217 # Find this sub-operation
4218 if RO_nsr_id and RO_scaling_info:
4219 operationType = "SCALE-RO"
4220 match = {
4221 "member_vnf_index": vnf_index,
4222 "RO_nsr_id": RO_nsr_id,
4223 "RO_scaling_info": RO_scaling_info,
4224 }
4225 else:
4226 match = {
4227 "member_vnf_index": vnf_index,
4228 "primitive": vnf_config_primitive,
4229 "primitive_params": primitive_params,
4230 "lcmOperationType": operationType,
4231 }
4232 op_index = self._find_suboperation(db_nslcmop, match)
4233 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4234 # a. New sub-operation
4235 # The sub-operation does not exist, add it.
4236 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4237 # The following parameters are set to None for all kind of scaling:
4238 vdu_id = None
4239 vdu_count_index = None
4240 vdu_name = None
4241 if RO_nsr_id and RO_scaling_info:
4242 vnf_config_primitive = None
4243 primitive_params = None
4244 else:
4245 RO_nsr_id = None
4246 RO_scaling_info = None
4247 # Initial status for sub-operation
4248 operationState = "PROCESSING"
4249 detailed_status = "In progress"
4250 # Add sub-operation for pre/post-scaling (zero or more operations)
4251 self._add_suboperation(
4252 db_nslcmop,
4253 vnf_index,
4254 vdu_id,
4255 vdu_count_index,
4256 vdu_name,
4257 vnf_config_primitive,
4258 primitive_params,
4259 operationState,
4260 detailed_status,
4261 operationType,
4262 RO_nsr_id,
4263 RO_scaling_info,
4264 )
4265 return self.SUBOPERATION_STATUS_NEW
4266 else:
4267 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4268 # or op_index (operationState != 'COMPLETED')
4269 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4270
4271 # Function to return execution_environment id
4272
4273 async def destroy_N2VC(
4274 self,
4275 logging_text,
4276 db_nslcmop,
4277 vca_deployed,
4278 config_descriptor,
4279 vca_index,
4280 destroy_ee=True,
4281 exec_primitives=True,
4282 scaling_in=False,
4283 vca_id: str = None,
4284 ):
4285 """
4286 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4287 :param logging_text:
4288 :param db_nslcmop:
4289 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4290 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4291 :param vca_index: index in the database _admin.deployed.VCA
4292 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4293 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4294 not executed properly
4295 :param scaling_in: True destroys the application, False destroys the model
4296 :return: None or exception
4297 """
4298
4299 self.logger.debug(
4300 logging_text
4301 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4302 vca_index, vca_deployed, config_descriptor, destroy_ee
4303 )
4304 )
4305
4306 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4307
4308 # execute terminate_primitives
4309 if exec_primitives:
4310 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4311 config_descriptor.get("terminate-config-primitive"),
4312 vca_deployed.get("ee_descriptor_id"),
4313 )
4314 vdu_id = vca_deployed.get("vdu_id")
4315 vdu_count_index = vca_deployed.get("vdu_count_index")
4316 vdu_name = vca_deployed.get("vdu_name")
4317 vnf_index = vca_deployed.get("member-vnf-index")
4318 if terminate_primitives and vca_deployed.get("needed_terminate"):
4319 for seq in terminate_primitives:
4320 # For each sequence in list, get primitive and call _ns_execute_primitive()
4321 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4322 vnf_index, seq.get("name")
4323 )
4324 self.logger.debug(logging_text + step)
4325 # Create the primitive for each sequence, i.e. "primitive": "touch"
4326 primitive = seq.get("name")
4327 mapped_primitive_params = self._get_terminate_primitive_params(
4328 seq, vnf_index
4329 )
4330
4331 # Add sub-operation
4332 self._add_suboperation(
4333 db_nslcmop,
4334 vnf_index,
4335 vdu_id,
4336 vdu_count_index,
4337 vdu_name,
4338 primitive,
4339 mapped_primitive_params,
4340 )
4341 # Sub-operations: Call _ns_execute_primitive() instead of action()
4342 try:
4343 result, result_detail = await self._ns_execute_primitive(
4344 vca_deployed["ee_id"],
4345 primitive,
4346 mapped_primitive_params,
4347 vca_type=vca_type,
4348 vca_id=vca_id,
4349 )
4350 except LcmException:
4351 # this happens when VCA is not deployed. In this case it is not needed to terminate
4352 continue
4353 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4354 if result not in result_ok:
4355 raise LcmException(
4356 "terminate_primitive {} for vnf_member_index={} fails with "
4357 "error {}".format(seq.get("name"), vnf_index, result_detail)
4358 )
4359 # set that this VCA do not need terminated
4360 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4361 vca_index
4362 )
4363 self.update_db_2(
4364 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4365 )
4366
4367 # Delete Prometheus Jobs if any
4368 # This uses NSR_ID, so it will destroy any jobs under this index
4369 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4370
4371 if destroy_ee:
4372 await self.vca_map[vca_type].delete_execution_environment(
4373 vca_deployed["ee_id"],
4374 scaling_in=scaling_in,
4375 vca_type=vca_type,
4376 vca_id=vca_id,
4377 )
4378
4379 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4380 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4381 namespace = "." + db_nsr["_id"]
4382 try:
4383 await self.n2vc.delete_namespace(
4384 namespace=namespace,
4385 total_timeout=self.timeout.charm_delete,
4386 vca_id=vca_id,
4387 )
4388 except N2VCNotFound: # already deleted. Skip
4389 pass
4390 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4391
4392 async def terminate(self, nsr_id, nslcmop_id):
4393 # Try to lock HA task here
4394 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4395 if not task_is_locked_by_me:
4396 return
4397
4398 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4399 self.logger.debug(logging_text + "Enter")
4400 timeout_ns_terminate = self.timeout.ns_terminate
4401 db_nsr = None
4402 db_nslcmop = None
4403 operation_params = None
4404 exc = None
4405 error_list = [] # annotates all failed error messages
4406 db_nslcmop_update = {}
4407 autoremove = False # autoremove after terminated
4408 tasks_dict_info = {}
4409 db_nsr_update = {}
4410 stage = [
4411 "Stage 1/3: Preparing task.",
4412 "Waiting for previous operations to terminate.",
4413 "",
4414 ]
4415 # ^ contains [stage, step, VIM-status]
4416 try:
4417 # wait for any previous tasks in process
4418 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4419
4420 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4421 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4422 operation_params = db_nslcmop.get("operationParams") or {}
4423 if operation_params.get("timeout_ns_terminate"):
4424 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4425 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4426 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4427
4428 db_nsr_update["operational-status"] = "terminating"
4429 db_nsr_update["config-status"] = "terminating"
4430 self._write_ns_status(
4431 nsr_id=nsr_id,
4432 ns_state="TERMINATING",
4433 current_operation="TERMINATING",
4434 current_operation_id=nslcmop_id,
4435 other_update=db_nsr_update,
4436 )
4437 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4438 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4439 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4440 return
4441
4442 stage[1] = "Getting vnf descriptors from db."
4443 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4444 db_vnfrs_dict = {
4445 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4446 }
4447 db_vnfds_from_id = {}
4448 db_vnfds_from_member_index = {}
4449 # Loop over VNFRs
4450 for vnfr in db_vnfrs_list:
4451 vnfd_id = vnfr["vnfd-id"]
4452 if vnfd_id not in db_vnfds_from_id:
4453 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4454 db_vnfds_from_id[vnfd_id] = vnfd
4455 db_vnfds_from_member_index[
4456 vnfr["member-vnf-index-ref"]
4457 ] = db_vnfds_from_id[vnfd_id]
4458
4459 # Destroy individual execution environments when there are terminating primitives.
4460 # Rest of EE will be deleted at once
4461 # TODO - check before calling _destroy_N2VC
4462 # if not operation_params.get("skip_terminate_primitives"):#
4463 # or not vca.get("needed_terminate"):
4464 stage[0] = "Stage 2/3 execute terminating primitives."
4465 self.logger.debug(logging_text + stage[0])
4466 stage[1] = "Looking execution environment that needs terminate."
4467 self.logger.debug(logging_text + stage[1])
4468
4469 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4470 config_descriptor = None
4471 vca_member_vnf_index = vca.get("member-vnf-index")
4472 vca_id = self.get_vca_id(
4473 db_vnfrs_dict.get(vca_member_vnf_index)
4474 if vca_member_vnf_index
4475 else None,
4476 db_nsr,
4477 )
4478 if not vca or not vca.get("ee_id"):
4479 continue
4480 if not vca.get("member-vnf-index"):
4481 # ns
4482 config_descriptor = db_nsr.get("ns-configuration")
4483 elif vca.get("vdu_id"):
4484 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4485 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4486 elif vca.get("kdu_name"):
4487 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4488 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4489 else:
4490 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4491 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4492 vca_type = vca.get("type")
4493 exec_terminate_primitives = not operation_params.get(
4494 "skip_terminate_primitives"
4495 ) and vca.get("needed_terminate")
4496 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4497 # pending native charms
4498 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4499 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4500 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4501 task = asyncio.ensure_future(
4502 self.destroy_N2VC(
4503 logging_text,
4504 db_nslcmop,
4505 vca,
4506 config_descriptor,
4507 vca_index,
4508 destroy_ee,
4509 exec_terminate_primitives,
4510 vca_id=vca_id,
4511 )
4512 )
4513 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4514
4515 # wait for pending tasks of terminate primitives
4516 if tasks_dict_info:
4517 self.logger.debug(
4518 logging_text
4519 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4520 )
4521 error_list = await self._wait_for_tasks(
4522 logging_text,
4523 tasks_dict_info,
4524 min(self.timeout.charm_delete, timeout_ns_terminate),
4525 stage,
4526 nslcmop_id,
4527 )
4528 tasks_dict_info.clear()
4529 if error_list:
4530 return # raise LcmException("; ".join(error_list))
4531
4532 # remove All execution environments at once
4533 stage[0] = "Stage 3/3 delete all."
4534
4535 if nsr_deployed.get("VCA"):
4536 stage[1] = "Deleting all execution environments."
4537 self.logger.debug(logging_text + stage[1])
4538 vca_id = self.get_vca_id({}, db_nsr)
4539 task_delete_ee = asyncio.ensure_future(
4540 asyncio.wait_for(
4541 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4542 timeout=self.timeout.charm_delete,
4543 )
4544 )
4545 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4546 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4547
4548 # Delete Namespace and Certificates if necessary
4549 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4550 await self.vca_map["helm-v3"].delete_tls_certificate(
4551 namespace=db_nslcmop["nsInstanceId"],
4552 certificate_name=self.EE_TLS_NAME,
4553 )
4554 await self.vca_map["helm-v3"].delete_namespace(
4555 namespace=db_nslcmop["nsInstanceId"],
4556 )
4557
4558 # Delete from k8scluster
4559 stage[1] = "Deleting KDUs."
4560 self.logger.debug(logging_text + stage[1])
4561 # print(nsr_deployed)
4562 for kdu in get_iterable(nsr_deployed, "K8s"):
4563 if not kdu or not kdu.get("kdu-instance"):
4564 continue
4565 kdu_instance = kdu.get("kdu-instance")
4566 if kdu.get("k8scluster-type") in self.k8scluster_map:
4567 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4568 vca_id = self.get_vca_id({}, db_nsr)
4569 task_delete_kdu_instance = asyncio.ensure_future(
4570 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4571 cluster_uuid=kdu.get("k8scluster-uuid"),
4572 kdu_instance=kdu_instance,
4573 vca_id=vca_id,
4574 namespace=kdu.get("namespace"),
4575 )
4576 )
4577 else:
4578 self.logger.error(
4579 logging_text
4580 + "Unknown k8s deployment type {}".format(
4581 kdu.get("k8scluster-type")
4582 )
4583 )
4584 continue
4585 tasks_dict_info[
4586 task_delete_kdu_instance
4587 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4588
4589 # remove from RO
4590 stage[1] = "Deleting ns from VIM."
4591 if self.ro_config.ng:
4592 task_delete_ro = asyncio.ensure_future(
4593 self._terminate_ng_ro(
4594 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4595 )
4596 )
4597 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4598
4599 # rest of staff will be done at finally
4600
4601 except (
4602 ROclient.ROClientException,
4603 DbException,
4604 LcmException,
4605 N2VCException,
4606 ) as e:
4607 self.logger.error(logging_text + "Exit Exception {}".format(e))
4608 exc = e
4609 except asyncio.CancelledError:
4610 self.logger.error(
4611 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4612 )
4613 exc = "Operation was cancelled"
4614 except Exception as e:
4615 exc = traceback.format_exc()
4616 self.logger.critical(
4617 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4618 exc_info=True,
4619 )
4620 finally:
4621 if exc:
4622 error_list.append(str(exc))
4623 try:
4624 # wait for pending tasks
4625 if tasks_dict_info:
4626 stage[1] = "Waiting for terminate pending tasks."
4627 self.logger.debug(logging_text + stage[1])
4628 error_list += await self._wait_for_tasks(
4629 logging_text,
4630 tasks_dict_info,
4631 timeout_ns_terminate,
4632 stage,
4633 nslcmop_id,
4634 )
4635 stage[1] = stage[2] = ""
4636 except asyncio.CancelledError:
4637 error_list.append("Cancelled")
4638 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
4639 await self._wait_for_tasks(
4640 logging_text,
4641 tasks_dict_info,
4642 timeout_ns_terminate,
4643 stage,
4644 nslcmop_id,
4645 )
4646 except Exception as exc:
4647 error_list.append(str(exc))
4648 # update status at database
4649 if error_list:
4650 error_detail = "; ".join(error_list)
4651 # self.logger.error(logging_text + error_detail)
4652 error_description_nslcmop = "{} Detail: {}".format(
4653 stage[0], error_detail
4654 )
4655 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4656 nslcmop_id, stage[0]
4657 )
4658
4659 db_nsr_update["operational-status"] = "failed"
4660 db_nsr_update["detailed-status"] = (
4661 error_description_nsr + " Detail: " + error_detail
4662 )
4663 db_nslcmop_update["detailed-status"] = error_detail
4664 nslcmop_operation_state = "FAILED"
4665 ns_state = "BROKEN"
4666 else:
4667 error_detail = None
4668 error_description_nsr = error_description_nslcmop = None
4669 ns_state = "NOT_INSTANTIATED"
4670 db_nsr_update["operational-status"] = "terminated"
4671 db_nsr_update["detailed-status"] = "Done"
4672 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4673 db_nslcmop_update["detailed-status"] = "Done"
4674 nslcmop_operation_state = "COMPLETED"
4675
4676 if db_nsr:
4677 self._write_ns_status(
4678 nsr_id=nsr_id,
4679 ns_state=ns_state,
4680 current_operation="IDLE",
4681 current_operation_id=None,
4682 error_description=error_description_nsr,
4683 error_detail=error_detail,
4684 other_update=db_nsr_update,
4685 )
4686 self._write_op_status(
4687 op_id=nslcmop_id,
4688 stage="",
4689 error_message=error_description_nslcmop,
4690 operation_state=nslcmop_operation_state,
4691 other_update=db_nslcmop_update,
4692 )
4693 if ns_state == "NOT_INSTANTIATED":
4694 try:
4695 self.db.set_list(
4696 "vnfrs",
4697 {"nsr-id-ref": nsr_id},
4698 {"_admin.nsState": "NOT_INSTANTIATED"},
4699 )
4700 except DbException as e:
4701 self.logger.warn(
4702 logging_text
4703 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4704 nsr_id, e
4705 )
4706 )
4707 if operation_params:
4708 autoremove = operation_params.get("autoremove", False)
4709 if nslcmop_operation_state:
4710 try:
4711 await self.msg.aiowrite(
4712 "ns",
4713 "terminated",
4714 {
4715 "nsr_id": nsr_id,
4716 "nslcmop_id": nslcmop_id,
4717 "operationState": nslcmop_operation_state,
4718 "autoremove": autoremove,
4719 },
4720 )
4721 except Exception as e:
4722 self.logger.error(
4723 logging_text + "kafka_write notification Exception {}".format(e)
4724 )
4725 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4726 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4727
4728 self.logger.debug(logging_text + "Exit")
4729 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4730
4731 async def _wait_for_tasks(
4732 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4733 ):
4734 time_start = time()
4735 error_detail_list = []
4736 error_list = []
4737 pending_tasks = list(created_tasks_info.keys())
4738 num_tasks = len(pending_tasks)
4739 num_done = 0
4740 stage[1] = "{}/{}.".format(num_done, num_tasks)
4741 self._write_op_status(nslcmop_id, stage)
4742 while pending_tasks:
4743 new_error = None
4744 _timeout = timeout + time_start - time()
4745 done, pending_tasks = await asyncio.wait(
4746 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4747 )
4748 num_done += len(done)
4749 if not done: # Timeout
4750 for task in pending_tasks:
4751 new_error = created_tasks_info[task] + ": Timeout"
4752 error_detail_list.append(new_error)
4753 error_list.append(new_error)
4754 break
4755 for task in done:
4756 if task.cancelled():
4757 exc = "Cancelled"
4758 else:
4759 exc = task.exception()
4760 if exc:
4761 if isinstance(exc, asyncio.TimeoutError):
4762 exc = "Timeout"
4763 new_error = created_tasks_info[task] + ": {}".format(exc)
4764 error_list.append(created_tasks_info[task])
4765 error_detail_list.append(new_error)
4766 if isinstance(
4767 exc,
4768 (
4769 str,
4770 DbException,
4771 N2VCException,
4772 ROclient.ROClientException,
4773 LcmException,
4774 K8sException,
4775 NgRoException,
4776 ),
4777 ):
4778 self.logger.error(logging_text + new_error)
4779 else:
4780 exc_traceback = "".join(
4781 traceback.format_exception(None, exc, exc.__traceback__)
4782 )
4783 self.logger.error(
4784 logging_text
4785 + created_tasks_info[task]
4786 + " "
4787 + exc_traceback
4788 )
4789 else:
4790 self.logger.debug(
4791 logging_text + created_tasks_info[task] + ": Done"
4792 )
4793 stage[1] = "{}/{}.".format(num_done, num_tasks)
4794 if new_error:
4795 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4796 if nsr_id: # update also nsr
4797 self.update_db_2(
4798 "nsrs",
4799 nsr_id,
4800 {
4801 "errorDescription": "Error at: " + ", ".join(error_list),
4802 "errorDetail": ". ".join(error_detail_list),
4803 },
4804 )
4805 self._write_op_status(nslcmop_id, stage)
4806 return error_detail_list
4807
4808 async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
4809 for task, name in created_tasks_info.items():
4810 self.logger.debug(logging_text + "Cancelling task: " + name)
4811 task.cancel()
4812
4813 @staticmethod
4814 def _map_primitive_params(primitive_desc, params, instantiation_params):
4815 """
4816 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4817 The default-value is used. If it is between < > it look for a value at instantiation_params
4818 :param primitive_desc: portion of VNFD/NSD that describes primitive
4819 :param params: Params provided by user
4820 :param instantiation_params: Instantiation params provided by user
4821 :return: a dictionary with the calculated params
4822 """
4823 calculated_params = {}
4824 for parameter in primitive_desc.get("parameter", ()):
4825 param_name = parameter["name"]
4826 if param_name in params:
4827 calculated_params[param_name] = params[param_name]
4828 elif "default-value" in parameter or "value" in parameter:
4829 if "value" in parameter:
4830 calculated_params[param_name] = parameter["value"]
4831 else:
4832 calculated_params[param_name] = parameter["default-value"]
4833 if (
4834 isinstance(calculated_params[param_name], str)
4835 and calculated_params[param_name].startswith("<")
4836 and calculated_params[param_name].endswith(">")
4837 ):
4838 if calculated_params[param_name][1:-1] in instantiation_params:
4839 calculated_params[param_name] = instantiation_params[
4840 calculated_params[param_name][1:-1]
4841 ]
4842 else:
4843 raise LcmException(
4844 "Parameter {} needed to execute primitive {} not provided".format(
4845 calculated_params[param_name], primitive_desc["name"]
4846 )
4847 )
4848 else:
4849 raise LcmException(
4850 "Parameter {} needed to execute primitive {} not provided".format(
4851 param_name, primitive_desc["name"]
4852 )
4853 )
4854
4855 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4856 calculated_params[param_name] = yaml.safe_dump(
4857 calculated_params[param_name], default_flow_style=True, width=256
4858 )
4859 elif isinstance(calculated_params[param_name], str) and calculated_params[
4860 param_name
4861 ].startswith("!!yaml "):
4862 calculated_params[param_name] = calculated_params[param_name][7:]
4863 if parameter.get("data-type") == "INTEGER":
4864 try:
4865 calculated_params[param_name] = int(calculated_params[param_name])
4866 except ValueError: # error converting string to int
4867 raise LcmException(
4868 "Parameter {} of primitive {} must be integer".format(
4869 param_name, primitive_desc["name"]
4870 )
4871 )
4872 elif parameter.get("data-type") == "BOOLEAN":
4873 calculated_params[param_name] = not (
4874 (str(calculated_params[param_name])).lower() == "false"
4875 )
4876
4877 # add always ns_config_info if primitive name is config
4878 if primitive_desc["name"] == "config":
4879 if "ns_config_info" in instantiation_params:
4880 calculated_params["ns_config_info"] = instantiation_params[
4881 "ns_config_info"
4882 ]
4883 return calculated_params
4884
4885 def _look_for_deployed_vca(
4886 self,
4887 deployed_vca,
4888 member_vnf_index,
4889 vdu_id,
4890 vdu_count_index,
4891 kdu_name=None,
4892 ee_descriptor_id=None,
4893 ):
4894 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4895 for vca in deployed_vca:
4896 if not vca:
4897 continue
4898 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4899 continue
4900 if (
4901 vdu_count_index is not None
4902 and vdu_count_index != vca["vdu_count_index"]
4903 ):
4904 continue
4905 if kdu_name and kdu_name != vca["kdu_name"]:
4906 continue
4907 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4908 continue
4909 break
4910 else:
4911 # vca_deployed not found
4912 raise LcmException(
4913 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4914 " is not deployed".format(
4915 member_vnf_index,
4916 vdu_id,
4917 vdu_count_index,
4918 kdu_name,
4919 ee_descriptor_id,
4920 )
4921 )
4922 # get ee_id
4923 ee_id = vca.get("ee_id")
4924 vca_type = vca.get(
4925 "type", "lxc_proxy_charm"
4926 ) # default value for backward compatibility - proxy charm
4927 if not ee_id:
4928 raise LcmException(
4929 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4930 "execution environment".format(
4931 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4932 )
4933 )
4934 return ee_id, vca_type
4935
4936 async def _ns_execute_primitive(
4937 self,
4938 ee_id,
4939 primitive,
4940 primitive_params,
4941 retries=0,
4942 retries_interval=30,
4943 timeout=None,
4944 vca_type=None,
4945 db_dict=None,
4946 vca_id: str = None,
4947 ) -> (str, str):
4948 try:
4949 if primitive == "config":
4950 primitive_params = {"params": primitive_params}
4951
4952 vca_type = vca_type or "lxc_proxy_charm"
4953
4954 while retries >= 0:
4955 try:
4956 output = await asyncio.wait_for(
4957 self.vca_map[vca_type].exec_primitive(
4958 ee_id=ee_id,
4959 primitive_name=primitive,
4960 params_dict=primitive_params,
4961 progress_timeout=self.timeout.progress_primitive,
4962 total_timeout=self.timeout.primitive,
4963 db_dict=db_dict,
4964 vca_id=vca_id,
4965 vca_type=vca_type,
4966 ),
4967 timeout=timeout or self.timeout.primitive,
4968 )
4969 # execution was OK
4970 break
4971 except asyncio.CancelledError:
4972 raise
4973 except Exception as e:
4974 retries -= 1
4975 if retries >= 0:
4976 self.logger.debug(
4977 "Error executing action {} on {} -> {}".format(
4978 primitive, ee_id, e
4979 )
4980 )
4981 # wait and retry
4982 await asyncio.sleep(retries_interval)
4983 else:
4984 if isinstance(e, asyncio.TimeoutError):
4985 e = N2VCException(
4986 message="Timed out waiting for action to complete"
4987 )
4988 return "FAILED", getattr(e, "message", repr(e))
4989
4990 return "COMPLETED", output
4991
4992 except (LcmException, asyncio.CancelledError):
4993 raise
4994 except Exception as e:
4995 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4996
4997 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4998 """
4999 Updating the vca_status with latest juju information in nsrs record
5000 :param: nsr_id: Id of the nsr
5001 :param: nslcmop_id: Id of the nslcmop
5002 :return: None
5003 """
5004
5005 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5006 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5007 vca_id = self.get_vca_id({}, db_nsr)
5008 if db_nsr["_admin"]["deployed"]["K8s"]:
5009 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5010 cluster_uuid, kdu_instance, cluster_type = (
5011 k8s["k8scluster-uuid"],
5012 k8s["kdu-instance"],
5013 k8s["k8scluster-type"],
5014 )
5015 await self._on_update_k8s_db(
5016 cluster_uuid=cluster_uuid,
5017 kdu_instance=kdu_instance,
5018 filter={"_id": nsr_id},
5019 vca_id=vca_id,
5020 cluster_type=cluster_type,
5021 )
5022 else:
5023 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5024 table, filter = "nsrs", {"_id": nsr_id}
5025 path = "_admin.deployed.VCA.{}.".format(vca_index)
5026 await self._on_update_n2vc_db(table, filter, path, {})
5027
5028 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5029 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5030
5031 async def action(self, nsr_id, nslcmop_id):
5032 # Try to lock HA task here
5033 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5034 if not task_is_locked_by_me:
5035 return
5036
5037 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5038 self.logger.debug(logging_text + "Enter")
5039 # get all needed from database
5040 db_nsr = None
5041 db_nslcmop = None
5042 db_nsr_update = {}
5043 db_nslcmop_update = {}
5044 nslcmop_operation_state = None
5045 error_description_nslcmop = None
5046 exc = None
5047 step = ""
5048 try:
5049 # wait for any previous tasks in process
5050 step = "Waiting for previous operations to terminate"
5051 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5052
5053 self._write_ns_status(
5054 nsr_id=nsr_id,
5055 ns_state=None,
5056 current_operation="RUNNING ACTION",
5057 current_operation_id=nslcmop_id,
5058 )
5059
5060 step = "Getting information from database"
5061 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5062 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5063 if db_nslcmop["operationParams"].get("primitive_params"):
5064 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5065 db_nslcmop["operationParams"]["primitive_params"]
5066 )
5067
5068 nsr_deployed = db_nsr["_admin"].get("deployed")
5069 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5070 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5071 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5072 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5073 primitive = db_nslcmop["operationParams"]["primitive"]
5074 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5075 timeout_ns_action = db_nslcmop["operationParams"].get(
5076 "timeout_ns_action", self.timeout.primitive
5077 )
5078
5079 if vnf_index:
5080 step = "Getting vnfr from database"
5081 db_vnfr = self.db.get_one(
5082 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5083 )
5084 if db_vnfr.get("kdur"):
5085 kdur_list = []
5086 for kdur in db_vnfr["kdur"]:
5087 if kdur.get("additionalParams"):
5088 kdur["additionalParams"] = json.loads(
5089 kdur["additionalParams"]
5090 )
5091 kdur_list.append(kdur)
5092 db_vnfr["kdur"] = kdur_list
5093 step = "Getting vnfd from database"
5094 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5095
5096 # Sync filesystem before running a primitive
5097 self.fs.sync(db_vnfr["vnfd-id"])
5098 else:
5099 step = "Getting nsd from database"
5100 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5101
5102 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5103 # for backward compatibility
5104 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5105 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5106 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5107 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5108
5109 # look for primitive
5110 config_primitive_desc = descriptor_configuration = None
5111 if vdu_id:
5112 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5113 elif kdu_name:
5114 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5115 elif vnf_index:
5116 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5117 else:
5118 descriptor_configuration = db_nsd.get("ns-configuration")
5119
5120 if descriptor_configuration and descriptor_configuration.get(
5121 "config-primitive"
5122 ):
5123 for config_primitive in descriptor_configuration["config-primitive"]:
5124 if config_primitive["name"] == primitive:
5125 config_primitive_desc = config_primitive
5126 break
5127
5128 if not config_primitive_desc:
5129 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5130 raise LcmException(
5131 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5132 primitive
5133 )
5134 )
5135 primitive_name = primitive
5136 ee_descriptor_id = None
5137 else:
5138 primitive_name = config_primitive_desc.get(
5139 "execution-environment-primitive", primitive
5140 )
5141 ee_descriptor_id = config_primitive_desc.get(
5142 "execution-environment-ref"
5143 )
5144
5145 if vnf_index:
5146 if vdu_id:
5147 vdur = next(
5148 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5149 )
5150 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5151 elif kdu_name:
5152 kdur = next(
5153 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5154 )
5155 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5156 else:
5157 desc_params = parse_yaml_strings(
5158 db_vnfr.get("additionalParamsForVnf")
5159 )
5160 else:
5161 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5162 if kdu_name and get_configuration(db_vnfd, kdu_name):
5163 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5164 actions = set()
5165 for primitive in kdu_configuration.get("initial-config-primitive", []):
5166 actions.add(primitive["name"])
5167 for primitive in kdu_configuration.get("config-primitive", []):
5168 actions.add(primitive["name"])
5169 kdu = find_in_list(
5170 nsr_deployed["K8s"],
5171 lambda kdu: kdu_name == kdu["kdu-name"]
5172 and kdu["member-vnf-index"] == vnf_index,
5173 )
5174 kdu_action = (
5175 True
5176 if primitive_name in actions
5177 and kdu["k8scluster-type"] != "helm-chart-v3"
5178 else False
5179 )
5180
5181 # TODO check if ns is in a proper status
5182 if kdu_name and (
5183 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5184 ):
5185 # kdur and desc_params already set from before
5186 if primitive_params:
5187 desc_params.update(primitive_params)
5188 # TODO Check if we will need something at vnf level
5189 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5190 if (
5191 kdu_name == kdu["kdu-name"]
5192 and kdu["member-vnf-index"] == vnf_index
5193 ):
5194 break
5195 else:
5196 raise LcmException(
5197 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5198 )
5199
5200 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5201 msg = "unknown k8scluster-type '{}'".format(
5202 kdu.get("k8scluster-type")
5203 )
5204 raise LcmException(msg)
5205
5206 db_dict = {
5207 "collection": "nsrs",
5208 "filter": {"_id": nsr_id},
5209 "path": "_admin.deployed.K8s.{}".format(index),
5210 }
5211 self.logger.debug(
5212 logging_text
5213 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5214 )
5215 step = "Executing kdu {}".format(primitive_name)
5216 if primitive_name == "upgrade":
5217 if desc_params.get("kdu_model"):
5218 kdu_model = desc_params.get("kdu_model")
5219 del desc_params["kdu_model"]
5220 else:
5221 kdu_model = kdu.get("kdu-model")
5222 if kdu_model.count("/") < 2: # helm chart is not embedded
5223 parts = kdu_model.split(sep=":")
5224 if len(parts) == 2:
5225 kdu_model = parts[0]
5226 if desc_params.get("kdu_atomic_upgrade"):
5227 atomic_upgrade = desc_params.get(
5228 "kdu_atomic_upgrade"
5229 ).lower() in ("yes", "true", "1")
5230 del desc_params["kdu_atomic_upgrade"]
5231 else:
5232 atomic_upgrade = True
5233
5234 detailed_status = await asyncio.wait_for(
5235 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5236 cluster_uuid=kdu.get("k8scluster-uuid"),
5237 kdu_instance=kdu.get("kdu-instance"),
5238 atomic=atomic_upgrade,
5239 kdu_model=kdu_model,
5240 params=desc_params,
5241 db_dict=db_dict,
5242 timeout=timeout_ns_action,
5243 ),
5244 timeout=timeout_ns_action + 10,
5245 )
5246 self.logger.debug(
5247 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5248 )
5249 elif primitive_name == "rollback":
5250 detailed_status = await asyncio.wait_for(
5251 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5252 cluster_uuid=kdu.get("k8scluster-uuid"),
5253 kdu_instance=kdu.get("kdu-instance"),
5254 db_dict=db_dict,
5255 ),
5256 timeout=timeout_ns_action,
5257 )
5258 elif primitive_name == "status":
5259 detailed_status = await asyncio.wait_for(
5260 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5261 cluster_uuid=kdu.get("k8scluster-uuid"),
5262 kdu_instance=kdu.get("kdu-instance"),
5263 vca_id=vca_id,
5264 ),
5265 timeout=timeout_ns_action,
5266 )
5267 else:
5268 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5269 kdu["kdu-name"], nsr_id
5270 )
5271 params = self._map_primitive_params(
5272 config_primitive_desc, primitive_params, desc_params
5273 )
5274
5275 detailed_status = await asyncio.wait_for(
5276 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5277 cluster_uuid=kdu.get("k8scluster-uuid"),
5278 kdu_instance=kdu_instance,
5279 primitive_name=primitive_name,
5280 params=params,
5281 db_dict=db_dict,
5282 timeout=timeout_ns_action,
5283 vca_id=vca_id,
5284 ),
5285 timeout=timeout_ns_action,
5286 )
5287
5288 if detailed_status:
5289 nslcmop_operation_state = "COMPLETED"
5290 else:
5291 detailed_status = ""
5292 nslcmop_operation_state = "FAILED"
5293 else:
5294 ee_id, vca_type = self._look_for_deployed_vca(
5295 nsr_deployed["VCA"],
5296 member_vnf_index=vnf_index,
5297 vdu_id=vdu_id,
5298 vdu_count_index=vdu_count_index,
5299 ee_descriptor_id=ee_descriptor_id,
5300 )
5301 for vca_index, vca_deployed in enumerate(
5302 db_nsr["_admin"]["deployed"]["VCA"]
5303 ):
5304 if vca_deployed.get("member-vnf-index") == vnf_index:
5305 db_dict = {
5306 "collection": "nsrs",
5307 "filter": {"_id": nsr_id},
5308 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5309 }
5310 break
5311 (
5312 nslcmop_operation_state,
5313 detailed_status,
5314 ) = await self._ns_execute_primitive(
5315 ee_id,
5316 primitive=primitive_name,
5317 primitive_params=self._map_primitive_params(
5318 config_primitive_desc, primitive_params, desc_params
5319 ),
5320 timeout=timeout_ns_action,
5321 vca_type=vca_type,
5322 db_dict=db_dict,
5323 vca_id=vca_id,
5324 )
5325
5326 db_nslcmop_update["detailed-status"] = detailed_status
5327 error_description_nslcmop = (
5328 detailed_status if nslcmop_operation_state == "FAILED" else ""
5329 )
5330 self.logger.debug(
5331 logging_text
5332 + "Done with result {} {}".format(
5333 nslcmop_operation_state, detailed_status
5334 )
5335 )
5336 return # database update is called inside finally
5337
5338 except (DbException, LcmException, N2VCException, K8sException) as e:
5339 self.logger.error(logging_text + "Exit Exception {}".format(e))
5340 exc = e
5341 except asyncio.CancelledError:
5342 self.logger.error(
5343 logging_text + "Cancelled Exception while '{}'".format(step)
5344 )
5345 exc = "Operation was cancelled"
5346 except asyncio.TimeoutError:
5347 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5348 exc = "Timeout"
5349 except Exception as e:
5350 exc = traceback.format_exc()
5351 self.logger.critical(
5352 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5353 exc_info=True,
5354 )
5355 finally:
5356 if exc:
5357 db_nslcmop_update[
5358 "detailed-status"
5359 ] = (
5360 detailed_status
5361 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5362 nslcmop_operation_state = "FAILED"
5363 if db_nsr:
5364 self._write_ns_status(
5365 nsr_id=nsr_id,
5366 ns_state=db_nsr[
5367 "nsState"
5368 ], # TODO check if degraded. For the moment use previous status
5369 current_operation="IDLE",
5370 current_operation_id=None,
5371 # error_description=error_description_nsr,
5372 # error_detail=error_detail,
5373 other_update=db_nsr_update,
5374 )
5375
5376 self._write_op_status(
5377 op_id=nslcmop_id,
5378 stage="",
5379 error_message=error_description_nslcmop,
5380 operation_state=nslcmop_operation_state,
5381 other_update=db_nslcmop_update,
5382 )
5383
5384 if nslcmop_operation_state:
5385 try:
5386 await self.msg.aiowrite(
5387 "ns",
5388 "actioned",
5389 {
5390 "nsr_id": nsr_id,
5391 "nslcmop_id": nslcmop_id,
5392 "operationState": nslcmop_operation_state,
5393 },
5394 )
5395 except Exception as e:
5396 self.logger.error(
5397 logging_text + "kafka_write notification Exception {}".format(e)
5398 )
5399 self.logger.debug(logging_text + "Exit")
5400 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5401 return nslcmop_operation_state, detailed_status
5402
5403 async def terminate_vdus(
5404 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5405 ):
5406 """This method terminates VDUs
5407
5408 Args:
5409 db_vnfr: VNF instance record
5410 member_vnf_index: VNF index to identify the VDUs to be removed
5411 db_nsr: NS instance record
5412 update_db_nslcmops: Nslcmop update record
5413 """
5414 vca_scaling_info = []
5415 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5416 scaling_info["scaling_direction"] = "IN"
5417 scaling_info["vdu-delete"] = {}
5418 scaling_info["kdu-delete"] = {}
5419 db_vdur = db_vnfr.get("vdur")
5420 vdur_list = copy(db_vdur)
5421 count_index = 0
5422 for index, vdu in enumerate(vdur_list):
5423 vca_scaling_info.append(
5424 {
5425 "osm_vdu_id": vdu["vdu-id-ref"],
5426 "member-vnf-index": member_vnf_index,
5427 "type": "delete",
5428 "vdu_index": count_index,
5429 }
5430 )
5431 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5432 scaling_info["vdu"].append(
5433 {
5434 "name": vdu.get("name") or vdu.get("vdu-name"),
5435 "vdu_id": vdu["vdu-id-ref"],
5436 "interface": [],
5437 }
5438 )
5439 for interface in vdu["interfaces"]:
5440 scaling_info["vdu"][index]["interface"].append(
5441 {
5442 "name": interface["name"],
5443 "ip_address": interface["ip-address"],
5444 "mac_address": interface.get("mac-address"),
5445 }
5446 )
5447 self.logger.info("NS update scaling info{}".format(scaling_info))
5448 stage[2] = "Terminating VDUs"
5449 if scaling_info.get("vdu-delete"):
5450 # scale_process = "RO"
5451 if self.ro_config.ng:
5452 await self._scale_ng_ro(
5453 logging_text,
5454 db_nsr,
5455 update_db_nslcmops,
5456 db_vnfr,
5457 scaling_info,
5458 stage,
5459 )
5460
5461 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5462 """This method is to Remove VNF instances from NS.
5463
5464 Args:
5465 nsr_id: NS instance id
5466 nslcmop_id: nslcmop id of update
5467 vnf_instance_id: id of the VNF instance to be removed
5468
5469 Returns:
5470 result: (str, str) COMPLETED/FAILED, details
5471 """
5472 try:
5473 db_nsr_update = {}
5474 logging_text = "Task ns={} update ".format(nsr_id)
5475 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5476 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5477 if check_vnfr_count > 1:
5478 stage = ["", "", ""]
5479 step = "Getting nslcmop from database"
5480 self.logger.debug(
5481 step + " after having waited for previous tasks to be completed"
5482 )
5483 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5484 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5485 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5486 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5487 """ db_vnfr = self.db.get_one(
5488 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5489
5490 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5491 await self.terminate_vdus(
5492 db_vnfr,
5493 member_vnf_index,
5494 db_nsr,
5495 update_db_nslcmops,
5496 stage,
5497 logging_text,
5498 )
5499
5500 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5501 constituent_vnfr.remove(db_vnfr.get("_id"))
5502 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5503 "constituent-vnfr-ref"
5504 )
5505 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5506 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5507 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5508 return "COMPLETED", "Done"
5509 else:
5510 step = "Terminate VNF Failed with"
5511 raise LcmException(
5512 "{} Cannot terminate the last VNF in this NS.".format(
5513 vnf_instance_id
5514 )
5515 )
5516 except (LcmException, asyncio.CancelledError):
5517 raise
5518 except Exception as e:
5519 self.logger.debug("Error removing VNF {}".format(e))
5520 return "FAILED", "Error removing VNF {}".format(e)
5521
5522 async def _ns_redeploy_vnf(
5523 self,
5524 nsr_id,
5525 nslcmop_id,
5526 db_vnfd,
5527 db_vnfr,
5528 db_nsr,
5529 ):
5530 """This method updates and redeploys VNF instances
5531
5532 Args:
5533 nsr_id: NS instance id
5534 nslcmop_id: nslcmop id
5535 db_vnfd: VNF descriptor
5536 db_vnfr: VNF instance record
5537 db_nsr: NS instance record
5538
5539 Returns:
5540 result: (str, str) COMPLETED/FAILED, details
5541 """
5542 try:
5543 count_index = 0
5544 stage = ["", "", ""]
5545 logging_text = "Task ns={} update ".format(nsr_id)
5546 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5547 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5548
5549 # Terminate old VNF resources
5550 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5551 await self.terminate_vdus(
5552 db_vnfr,
5553 member_vnf_index,
5554 db_nsr,
5555 update_db_nslcmops,
5556 stage,
5557 logging_text,
5558 )
5559
5560 # old_vnfd_id = db_vnfr["vnfd-id"]
5561 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5562 new_db_vnfd = db_vnfd
5563 # new_vnfd_ref = new_db_vnfd["id"]
5564 # new_vnfd_id = vnfd_id
5565
5566 # Create VDUR
5567 new_vnfr_cp = []
5568 for cp in new_db_vnfd.get("ext-cpd", ()):
5569 vnf_cp = {
5570 "name": cp.get("id"),
5571 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5572 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5573 "id": cp.get("id"),
5574 }
5575 new_vnfr_cp.append(vnf_cp)
5576 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5577 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5578 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5579 new_vnfr_update = {
5580 "revision": latest_vnfd_revision,
5581 "connection-point": new_vnfr_cp,
5582 "vdur": new_vdur,
5583 "ip-address": "",
5584 }
5585 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5586 updated_db_vnfr = self.db.get_one(
5587 "vnfrs",
5588 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5589 )
5590
5591 # Instantiate new VNF resources
5592 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5593 vca_scaling_info = []
5594 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5595 scaling_info["scaling_direction"] = "OUT"
5596 scaling_info["vdu-create"] = {}
5597 scaling_info["kdu-create"] = {}
5598 vdud_instantiate_list = db_vnfd["vdu"]
5599 for index, vdud in enumerate(vdud_instantiate_list):
5600 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5601 if cloud_init_text:
5602 additional_params = (
5603 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5604 or {}
5605 )
5606 cloud_init_list = []
5607 if cloud_init_text:
5608 # TODO Information of its own ip is not available because db_vnfr is not updated.
5609 additional_params["OSM"] = get_osm_params(
5610 updated_db_vnfr, vdud["id"], 1
5611 )
5612 cloud_init_list.append(
5613 self._parse_cloud_init(
5614 cloud_init_text,
5615 additional_params,
5616 db_vnfd["id"],
5617 vdud["id"],
5618 )
5619 )
5620 vca_scaling_info.append(
5621 {
5622 "osm_vdu_id": vdud["id"],
5623 "member-vnf-index": member_vnf_index,
5624 "type": "create",
5625 "vdu_index": count_index,
5626 }
5627 )
5628 scaling_info["vdu-create"][vdud["id"]] = count_index
5629 if self.ro_config.ng:
5630 self.logger.debug(
5631 "New Resources to be deployed: {}".format(scaling_info)
5632 )
5633 await self._scale_ng_ro(
5634 logging_text,
5635 db_nsr,
5636 update_db_nslcmops,
5637 updated_db_vnfr,
5638 scaling_info,
5639 stage,
5640 )
5641 return "COMPLETED", "Done"
5642 except (LcmException, asyncio.CancelledError):
5643 raise
5644 except Exception as e:
5645 self.logger.debug("Error updating VNF {}".format(e))
5646 return "FAILED", "Error updating VNF {}".format(e)
5647
5648 async def _ns_charm_upgrade(
5649 self,
5650 ee_id,
5651 charm_id,
5652 charm_type,
5653 path,
5654 timeout: float = None,
5655 ) -> (str, str):
5656 """This method upgrade charms in VNF instances
5657
5658 Args:
5659 ee_id: Execution environment id
5660 path: Local path to the charm
5661 charm_id: charm-id
5662 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5663 timeout: (Float) Timeout for the ns update operation
5664
5665 Returns:
5666 result: (str, str) COMPLETED/FAILED, details
5667 """
5668 try:
5669 charm_type = charm_type or "lxc_proxy_charm"
5670 output = await self.vca_map[charm_type].upgrade_charm(
5671 ee_id=ee_id,
5672 path=path,
5673 charm_id=charm_id,
5674 charm_type=charm_type,
5675 timeout=timeout or self.timeout.ns_update,
5676 )
5677
5678 if output:
5679 return "COMPLETED", output
5680
5681 except (LcmException, asyncio.CancelledError):
5682 raise
5683
5684 except Exception as e:
5685 self.logger.debug("Error upgrading charm {}".format(path))
5686
5687 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5688
5689 async def update(self, nsr_id, nslcmop_id):
5690 """Update NS according to different update types
5691
5692 This method performs upgrade of VNF instances then updates the revision
5693 number in VNF record
5694
5695 Args:
5696 nsr_id: Network service will be updated
5697 nslcmop_id: ns lcm operation id
5698
5699 Returns:
5700 It may raise DbException, LcmException, N2VCException, K8sException
5701
5702 """
5703 # Try to lock HA task here
5704 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5705 if not task_is_locked_by_me:
5706 return
5707
5708 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5709 self.logger.debug(logging_text + "Enter")
5710
5711 # Set the required variables to be filled up later
5712 db_nsr = None
5713 db_nslcmop_update = {}
5714 vnfr_update = {}
5715 nslcmop_operation_state = None
5716 db_nsr_update = {}
5717 error_description_nslcmop = ""
5718 exc = None
5719 change_type = "updated"
5720 detailed_status = ""
5721 member_vnf_index = None
5722
5723 try:
5724 # wait for any previous tasks in process
5725 step = "Waiting for previous operations to terminate"
5726 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5727 self._write_ns_status(
5728 nsr_id=nsr_id,
5729 ns_state=None,
5730 current_operation="UPDATING",
5731 current_operation_id=nslcmop_id,
5732 )
5733
5734 step = "Getting nslcmop from database"
5735 db_nslcmop = self.db.get_one(
5736 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5737 )
5738 update_type = db_nslcmop["operationParams"]["updateType"]
5739
5740 step = "Getting nsr from database"
5741 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5742 old_operational_status = db_nsr["operational-status"]
5743 db_nsr_update["operational-status"] = "updating"
5744 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5745 nsr_deployed = db_nsr["_admin"].get("deployed")
5746
5747 if update_type == "CHANGE_VNFPKG":
5748 # Get the input parameters given through update request
5749 vnf_instance_id = db_nslcmop["operationParams"][
5750 "changeVnfPackageData"
5751 ].get("vnfInstanceId")
5752
5753 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5754 "vnfdId"
5755 )
5756 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5757
5758 step = "Getting vnfr from database"
5759 db_vnfr = self.db.get_one(
5760 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5761 )
5762
5763 step = "Getting vnfds from database"
5764 # Latest VNFD
5765 latest_vnfd = self.db.get_one(
5766 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5767 )
5768 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5769
5770 # Current VNFD
5771 current_vnf_revision = db_vnfr.get("revision", 1)
5772 current_vnfd = self.db.get_one(
5773 "vnfds_revisions",
5774 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5775 fail_on_empty=False,
5776 )
5777 # Charm artifact paths will be filled up later
5778 (
5779 current_charm_artifact_path,
5780 target_charm_artifact_path,
5781 charm_artifact_paths,
5782 helm_artifacts,
5783 ) = ([], [], [], [])
5784
5785 step = "Checking if revision has changed in VNFD"
5786 if current_vnf_revision != latest_vnfd_revision:
5787 change_type = "policy_updated"
5788
5789 # There is new revision of VNFD, update operation is required
5790 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5791 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5792
5793 step = "Removing the VNFD packages if they exist in the local path"
5794 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5795 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5796
5797 step = "Get the VNFD packages from FSMongo"
5798 self.fs.sync(from_path=latest_vnfd_path)
5799 self.fs.sync(from_path=current_vnfd_path)
5800
5801 step = (
5802 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5803 )
5804 current_base_folder = current_vnfd["_admin"]["storage"]
5805 latest_base_folder = latest_vnfd["_admin"]["storage"]
5806
5807 for vca_index, vca_deployed in enumerate(
5808 get_iterable(nsr_deployed, "VCA")
5809 ):
5810 vnf_index = db_vnfr.get("member-vnf-index-ref")
5811
5812 # Getting charm-id and charm-type
5813 if vca_deployed.get("member-vnf-index") == vnf_index:
5814 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5815 vca_type = vca_deployed.get("type")
5816 vdu_count_index = vca_deployed.get("vdu_count_index")
5817
5818 # Getting ee-id
5819 ee_id = vca_deployed.get("ee_id")
5820
5821 step = "Getting descriptor config"
5822 if current_vnfd.get("kdu"):
5823 search_key = "kdu_name"
5824 else:
5825 search_key = "vnfd_id"
5826
5827 entity_id = vca_deployed.get(search_key)
5828
5829 descriptor_config = get_configuration(
5830 current_vnfd, entity_id
5831 )
5832
5833 if "execution-environment-list" in descriptor_config:
5834 ee_list = descriptor_config.get(
5835 "execution-environment-list", []
5836 )
5837 else:
5838 ee_list = []
5839
5840 # There could be several charm used in the same VNF
5841 for ee_item in ee_list:
5842 if ee_item.get("juju"):
5843 step = "Getting charm name"
5844 charm_name = ee_item["juju"].get("charm")
5845
5846 step = "Setting Charm artifact paths"
5847 current_charm_artifact_path.append(
5848 get_charm_artifact_path(
5849 current_base_folder,
5850 charm_name,
5851 vca_type,
5852 current_vnf_revision,
5853 )
5854 )
5855 target_charm_artifact_path.append(
5856 get_charm_artifact_path(
5857 latest_base_folder,
5858 charm_name,
5859 vca_type,
5860 latest_vnfd_revision,
5861 )
5862 )
5863 elif ee_item.get("helm-chart"):
5864 # add chart to list and all parameters
5865 step = "Getting helm chart name"
5866 chart_name = ee_item.get("helm-chart")
5867 vca_type = "helm-v3"
5868 step = "Setting Helm chart artifact paths"
5869
5870 helm_artifacts.append(
5871 {
5872 "current_artifact_path": get_charm_artifact_path(
5873 current_base_folder,
5874 chart_name,
5875 vca_type,
5876 current_vnf_revision,
5877 ),
5878 "target_artifact_path": get_charm_artifact_path(
5879 latest_base_folder,
5880 chart_name,
5881 vca_type,
5882 latest_vnfd_revision,
5883 ),
5884 "ee_id": ee_id,
5885 "vca_index": vca_index,
5886 "vdu_index": vdu_count_index,
5887 }
5888 )
5889
5890 charm_artifact_paths = zip(
5891 current_charm_artifact_path, target_charm_artifact_path
5892 )
5893
5894 step = "Checking if software version has changed in VNFD"
5895 if find_software_version(current_vnfd) != find_software_version(
5896 latest_vnfd
5897 ):
5898 step = "Checking if existing VNF has charm"
5899 for current_charm_path, target_charm_path in list(
5900 charm_artifact_paths
5901 ):
5902 if current_charm_path:
5903 raise LcmException(
5904 "Software version change is not supported as VNF instance {} has charm.".format(
5905 vnf_instance_id
5906 )
5907 )
5908
5909 # There is no change in the charm package, then redeploy the VNF
5910 # based on new descriptor
5911 step = "Redeploying VNF"
5912 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5913 (result, detailed_status) = await self._ns_redeploy_vnf(
5914 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5915 )
5916 if result == "FAILED":
5917 nslcmop_operation_state = result
5918 error_description_nslcmop = detailed_status
5919 old_operational_status = "failed"
5920 db_nslcmop_update["detailed-status"] = detailed_status
5921 db_nsr_update["detailed-status"] = detailed_status
5922 scaling_aspect = get_scaling_aspect(latest_vnfd)
5923 scaling_group_desc = db_nsr.get("_admin").get(
5924 "scaling-group", None
5925 )
5926 if scaling_group_desc:
5927 for aspect in scaling_aspect:
5928 scaling_group_id = aspect.get("id")
5929 for scale_index, scaling_group in enumerate(
5930 scaling_group_desc
5931 ):
5932 if scaling_group.get("name") == scaling_group_id:
5933 db_nsr_update[
5934 "_admin.scaling-group.{}.nb-scale-op".format(
5935 scale_index
5936 )
5937 ] = 0
5938 self.logger.debug(
5939 logging_text
5940 + " step {} Done with result {} {}".format(
5941 step, nslcmop_operation_state, detailed_status
5942 )
5943 )
5944
5945 else:
5946 step = "Checking if any charm package has changed or not"
5947 for current_charm_path, target_charm_path in list(
5948 charm_artifact_paths
5949 ):
5950 if (
5951 current_charm_path
5952 and target_charm_path
5953 and self.check_charm_hash_changed(
5954 current_charm_path, target_charm_path
5955 )
5956 ):
5957 step = "Checking whether VNF uses juju bundle"
5958 if check_juju_bundle_existence(current_vnfd):
5959 raise LcmException(
5960 "Charm upgrade is not supported for the instance which"
5961 " uses juju-bundle: {}".format(
5962 check_juju_bundle_existence(current_vnfd)
5963 )
5964 )
5965
5966 step = "Upgrading Charm"
5967 (
5968 result,
5969 detailed_status,
5970 ) = await self._ns_charm_upgrade(
5971 ee_id=ee_id,
5972 charm_id=vca_id,
5973 charm_type=vca_type,
5974 path=self.fs.path + target_charm_path,
5975 timeout=timeout_seconds,
5976 )
5977
5978 if result == "FAILED":
5979 nslcmop_operation_state = result
5980 error_description_nslcmop = detailed_status
5981
5982 db_nslcmop_update["detailed-status"] = detailed_status
5983 self.logger.debug(
5984 logging_text
5985 + " step {} Done with result {} {}".format(
5986 step, nslcmop_operation_state, detailed_status
5987 )
5988 )
5989
5990 step = "Updating policies"
5991 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5992 result = "COMPLETED"
5993 detailed_status = "Done"
5994 db_nslcmop_update["detailed-status"] = "Done"
5995
5996 # helm base EE
5997 for item in helm_artifacts:
5998 if not (
5999 item["current_artifact_path"]
6000 and item["target_artifact_path"]
6001 and self.check_charm_hash_changed(
6002 item["current_artifact_path"],
6003 item["target_artifact_path"],
6004 )
6005 ):
6006 continue
6007 db_update_entry = "_admin.deployed.VCA.{}.".format(
6008 item["vca_index"]
6009 )
6010 vnfr_id = db_vnfr["_id"]
6011 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6012 db_dict = {
6013 "collection": "nsrs",
6014 "filter": {"_id": nsr_id},
6015 "path": db_update_entry,
6016 }
6017 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6018 await self.vca_map[vca_type].upgrade_execution_environment(
6019 namespace=namespace,
6020 helm_id=helm_id,
6021 db_dict=db_dict,
6022 config=osm_config,
6023 artifact_path=item["target_artifact_path"],
6024 vca_type=vca_type,
6025 )
6026 vnf_id = db_vnfr.get("vnfd-ref")
6027 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6028 self.logger.debug("get ssh key block")
6029 rw_mgmt_ip = None
6030 if deep_get(
6031 config_descriptor,
6032 ("config-access", "ssh-access", "required"),
6033 ):
6034 # Needed to inject a ssh key
6035 user = deep_get(
6036 config_descriptor,
6037 ("config-access", "ssh-access", "default-user"),
6038 )
6039 step = (
6040 "Install configuration Software, getting public ssh key"
6041 )
6042 pub_key = await self.vca_map[
6043 vca_type
6044 ].get_ee_ssh_public__key(
6045 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6046 )
6047
6048 step = (
6049 "Insert public key into VM user={} ssh_key={}".format(
6050 user, pub_key
6051 )
6052 )
6053 self.logger.debug(logging_text + step)
6054
6055 # wait for RO (ip-address) Insert pub_key into VM
6056 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6057 logging_text,
6058 nsr_id,
6059 vnfr_id,
6060 None,
6061 item["vdu_index"],
6062 user=user,
6063 pub_key=pub_key,
6064 )
6065
6066 initial_config_primitive_list = config_descriptor.get(
6067 "initial-config-primitive"
6068 )
6069 config_primitive = next(
6070 (
6071 p
6072 for p in initial_config_primitive_list
6073 if p["name"] == "config"
6074 ),
6075 None,
6076 )
6077 if not config_primitive:
6078 continue
6079
6080 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6081 if rw_mgmt_ip:
6082 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6083 if db_vnfr.get("additionalParamsForVnf"):
6084 deploy_params.update(
6085 parse_yaml_strings(
6086 db_vnfr["additionalParamsForVnf"].copy()
6087 )
6088 )
6089 primitive_params_ = self._map_primitive_params(
6090 config_primitive, {}, deploy_params
6091 )
6092
6093 step = "execute primitive '{}' params '{}'".format(
6094 config_primitive["name"], primitive_params_
6095 )
6096 self.logger.debug(logging_text + step)
6097 await self.vca_map[vca_type].exec_primitive(
6098 ee_id=ee_id,
6099 primitive_name=config_primitive["name"],
6100 params_dict=primitive_params_,
6101 db_dict=db_dict,
6102 vca_id=vca_id,
6103 vca_type=vca_type,
6104 )
6105
6106 step = "Updating policies"
6107 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6108 detailed_status = "Done"
6109 db_nslcmop_update["detailed-status"] = "Done"
6110
6111 # If nslcmop_operation_state is None, so any operation is not failed.
6112 if not nslcmop_operation_state:
6113 nslcmop_operation_state = "COMPLETED"
6114
6115 # If update CHANGE_VNFPKG nslcmop_operation is successful
6116 # vnf revision need to be updated
6117 vnfr_update["revision"] = latest_vnfd_revision
6118 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6119
6120 self.logger.debug(
6121 logging_text
6122 + " task Done with result {} {}".format(
6123 nslcmop_operation_state, detailed_status
6124 )
6125 )
6126 elif update_type == "REMOVE_VNF":
6127 # This part is included in https://osm.etsi.org/gerrit/11876
6128 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6129 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6130 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6131 step = "Removing VNF"
6132 (result, detailed_status) = await self.remove_vnf(
6133 nsr_id, nslcmop_id, vnf_instance_id
6134 )
6135 if result == "FAILED":
6136 nslcmop_operation_state = result
6137 error_description_nslcmop = detailed_status
6138 db_nslcmop_update["detailed-status"] = detailed_status
6139 change_type = "vnf_terminated"
6140 if not nslcmop_operation_state:
6141 nslcmop_operation_state = "COMPLETED"
6142 self.logger.debug(
6143 logging_text
6144 + " task Done with result {} {}".format(
6145 nslcmop_operation_state, detailed_status
6146 )
6147 )
6148
6149 elif update_type == "OPERATE_VNF":
6150 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6151 "vnfInstanceId"
6152 ]
6153 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6154 "changeStateTo"
6155 ]
6156 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6157 "additionalParam"
6158 ]
6159 (result, detailed_status) = await self.rebuild_start_stop(
6160 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6161 )
6162 if result == "FAILED":
6163 nslcmop_operation_state = result
6164 error_description_nslcmop = detailed_status
6165 db_nslcmop_update["detailed-status"] = detailed_status
6166 if not nslcmop_operation_state:
6167 nslcmop_operation_state = "COMPLETED"
6168 self.logger.debug(
6169 logging_text
6170 + " task Done with result {} {}".format(
6171 nslcmop_operation_state, detailed_status
6172 )
6173 )
6174
6175 # If nslcmop_operation_state is None, so any operation is not failed.
6176 # All operations are executed in overall.
6177 if not nslcmop_operation_state:
6178 nslcmop_operation_state = "COMPLETED"
6179 db_nsr_update["operational-status"] = old_operational_status
6180
6181 except (DbException, LcmException, N2VCException, K8sException) as e:
6182 self.logger.error(logging_text + "Exit Exception {}".format(e))
6183 exc = e
6184 except asyncio.CancelledError:
6185 self.logger.error(
6186 logging_text + "Cancelled Exception while '{}'".format(step)
6187 )
6188 exc = "Operation was cancelled"
6189 except asyncio.TimeoutError:
6190 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6191 exc = "Timeout"
6192 except Exception as e:
6193 exc = traceback.format_exc()
6194 self.logger.critical(
6195 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6196 exc_info=True,
6197 )
6198 finally:
6199 if exc:
6200 db_nslcmop_update[
6201 "detailed-status"
6202 ] = (
6203 detailed_status
6204 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6205 nslcmop_operation_state = "FAILED"
6206 db_nsr_update["operational-status"] = old_operational_status
6207 if db_nsr:
6208 self._write_ns_status(
6209 nsr_id=nsr_id,
6210 ns_state=db_nsr["nsState"],
6211 current_operation="IDLE",
6212 current_operation_id=None,
6213 other_update=db_nsr_update,
6214 )
6215
6216 self._write_op_status(
6217 op_id=nslcmop_id,
6218 stage="",
6219 error_message=error_description_nslcmop,
6220 operation_state=nslcmop_operation_state,
6221 other_update=db_nslcmop_update,
6222 )
6223
6224 if nslcmop_operation_state:
6225 try:
6226 msg = {
6227 "nsr_id": nsr_id,
6228 "nslcmop_id": nslcmop_id,
6229 "operationState": nslcmop_operation_state,
6230 }
6231 if (
6232 change_type in ("vnf_terminated", "policy_updated")
6233 and member_vnf_index
6234 ):
6235 msg.update({"vnf_member_index": member_vnf_index})
6236 await self.msg.aiowrite("ns", change_type, msg)
6237 except Exception as e:
6238 self.logger.error(
6239 logging_text + "kafka_write notification Exception {}".format(e)
6240 )
6241 self.logger.debug(logging_text + "Exit")
6242 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6243 return nslcmop_operation_state, detailed_status
6244
6245 async def scale(self, nsr_id, nslcmop_id):
6246 # Try to lock HA task here
6247 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6248 if not task_is_locked_by_me:
6249 return
6250
6251 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6252 stage = ["", "", ""]
6253 tasks_dict_info = {}
6254 # ^ stage, step, VIM progress
6255 self.logger.debug(logging_text + "Enter")
6256 # get all needed from database
6257 db_nsr = None
6258 db_nslcmop_update = {}
6259 db_nsr_update = {}
6260 exc = None
6261 # in case of error, indicates what part of scale was failed to put nsr at error status
6262 scale_process = None
6263 old_operational_status = ""
6264 old_config_status = ""
6265 nsi_id = None
6266 prom_job_name = ""
6267 try:
6268 # wait for any previous tasks in process
6269 step = "Waiting for previous operations to terminate"
6270 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6271 self._write_ns_status(
6272 nsr_id=nsr_id,
6273 ns_state=None,
6274 current_operation="SCALING",
6275 current_operation_id=nslcmop_id,
6276 )
6277
6278 step = "Getting nslcmop from database"
6279 self.logger.debug(
6280 step + " after having waited for previous tasks to be completed"
6281 )
6282 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6283
6284 step = "Getting nsr from database"
6285 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6286 old_operational_status = db_nsr["operational-status"]
6287 old_config_status = db_nsr["config-status"]
6288
6289 step = "Parsing scaling parameters"
6290 db_nsr_update["operational-status"] = "scaling"
6291 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6292 nsr_deployed = db_nsr["_admin"].get("deployed")
6293
6294 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6295 "scaleByStepData"
6296 ]["member-vnf-index"]
6297 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6298 "scaleByStepData"
6299 ]["scaling-group-descriptor"]
6300 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6301 # for backward compatibility
6302 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6303 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6304 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6305 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6306
6307 step = "Getting vnfr from database"
6308 db_vnfr = self.db.get_one(
6309 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6310 )
6311
6312 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6313
6314 step = "Getting vnfd from database"
6315 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6316
6317 base_folder = db_vnfd["_admin"]["storage"]
6318
6319 step = "Getting scaling-group-descriptor"
6320 scaling_descriptor = find_in_list(
6321 get_scaling_aspect(db_vnfd),
6322 lambda scale_desc: scale_desc["name"] == scaling_group,
6323 )
6324 if not scaling_descriptor:
6325 raise LcmException(
6326 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6327 "at vnfd:scaling-group-descriptor".format(scaling_group)
6328 )
6329
6330 step = "Sending scale order to VIM"
6331 # TODO check if ns is in a proper status
6332 nb_scale_op = 0
6333 if not db_nsr["_admin"].get("scaling-group"):
6334 self.update_db_2(
6335 "nsrs",
6336 nsr_id,
6337 {
6338 "_admin.scaling-group": [
6339 {"name": scaling_group, "nb-scale-op": 0}
6340 ]
6341 },
6342 )
6343 admin_scale_index = 0
6344 else:
6345 for admin_scale_index, admin_scale_info in enumerate(
6346 db_nsr["_admin"]["scaling-group"]
6347 ):
6348 if admin_scale_info["name"] == scaling_group:
6349 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6350 break
6351 else: # not found, set index one plus last element and add new entry with the name
6352 admin_scale_index += 1
6353 db_nsr_update[
6354 "_admin.scaling-group.{}.name".format(admin_scale_index)
6355 ] = scaling_group
6356
6357 vca_scaling_info = []
6358 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6359 if scaling_type == "SCALE_OUT":
6360 if "aspect-delta-details" not in scaling_descriptor:
6361 raise LcmException(
6362 "Aspect delta details not fount in scaling descriptor {}".format(
6363 scaling_descriptor["name"]
6364 )
6365 )
6366 # count if max-instance-count is reached
6367 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6368
6369 scaling_info["scaling_direction"] = "OUT"
6370 scaling_info["vdu-create"] = {}
6371 scaling_info["kdu-create"] = {}
6372 for delta in deltas:
6373 for vdu_delta in delta.get("vdu-delta", {}):
6374 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6375 # vdu_index also provides the number of instance of the targeted vdu
6376 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6377 if vdu_index <= len(db_vnfr["vdur"]):
6378 vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
6379 prom_job_name = (
6380 db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
6381 )
6382 prom_job_name = prom_job_name.replace("_", "")
6383 prom_job_name = prom_job_name.replace("-", "")
6384 else:
6385 prom_job_name = None
6386 cloud_init_text = self._get_vdu_cloud_init_content(
6387 vdud, db_vnfd
6388 )
6389 if cloud_init_text:
6390 additional_params = (
6391 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6392 or {}
6393 )
6394 cloud_init_list = []
6395
6396 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6397 max_instance_count = 10
6398 if vdu_profile and "max-number-of-instances" in vdu_profile:
6399 max_instance_count = vdu_profile.get(
6400 "max-number-of-instances", 10
6401 )
6402
6403 default_instance_num = get_number_of_instances(
6404 db_vnfd, vdud["id"]
6405 )
6406 instances_number = vdu_delta.get("number-of-instances", 1)
6407 nb_scale_op += instances_number
6408
6409 new_instance_count = nb_scale_op + default_instance_num
6410 # Control if new count is over max and vdu count is less than max.
6411 # Then assign new instance count
6412 if new_instance_count > max_instance_count > vdu_count:
6413 instances_number = new_instance_count - max_instance_count
6414 else:
6415 instances_number = instances_number
6416
6417 if new_instance_count > max_instance_count:
6418 raise LcmException(
6419 "reached the limit of {} (max-instance-count) "
6420 "scaling-out operations for the "
6421 "scaling-group-descriptor '{}'".format(
6422 nb_scale_op, scaling_group
6423 )
6424 )
6425 for x in range(vdu_delta.get("number-of-instances", 1)):
6426 if cloud_init_text:
6427 # TODO Information of its own ip is not available because db_vnfr is not updated.
6428 additional_params["OSM"] = get_osm_params(
6429 db_vnfr, vdu_delta["id"], vdu_index + x
6430 )
6431 cloud_init_list.append(
6432 self._parse_cloud_init(
6433 cloud_init_text,
6434 additional_params,
6435 db_vnfd["id"],
6436 vdud["id"],
6437 )
6438 )
6439 vca_scaling_info.append(
6440 {
6441 "osm_vdu_id": vdu_delta["id"],
6442 "member-vnf-index": vnf_index,
6443 "type": "create",
6444 "vdu_index": vdu_index + x,
6445 }
6446 )
6447 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6448 for kdu_delta in delta.get("kdu-resource-delta", {}):
6449 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6450 kdu_name = kdu_profile["kdu-name"]
6451 resource_name = kdu_profile.get("resource-name", "")
6452
6453 # Might have different kdus in the same delta
6454 # Should have list for each kdu
6455 if not scaling_info["kdu-create"].get(kdu_name, None):
6456 scaling_info["kdu-create"][kdu_name] = []
6457
6458 kdur = get_kdur(db_vnfr, kdu_name)
6459 if kdur.get("helm-chart"):
6460 k8s_cluster_type = "helm-chart-v3"
6461 self.logger.debug("kdur: {}".format(kdur))
6462 elif kdur.get("juju-bundle"):
6463 k8s_cluster_type = "juju-bundle"
6464 else:
6465 raise LcmException(
6466 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6467 "juju-bundle. Maybe an old NBI version is running".format(
6468 db_vnfr["member-vnf-index-ref"], kdu_name
6469 )
6470 )
6471
6472 max_instance_count = 10
6473 if kdu_profile and "max-number-of-instances" in kdu_profile:
6474 max_instance_count = kdu_profile.get(
6475 "max-number-of-instances", 10
6476 )
6477
6478 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6479 deployed_kdu, _ = get_deployed_kdu(
6480 nsr_deployed, kdu_name, vnf_index
6481 )
6482 if deployed_kdu is None:
6483 raise LcmException(
6484 "KDU '{}' for vnf '{}' not deployed".format(
6485 kdu_name, vnf_index
6486 )
6487 )
6488 kdu_instance = deployed_kdu.get("kdu-instance")
6489 instance_num = await self.k8scluster_map[
6490 k8s_cluster_type
6491 ].get_scale_count(
6492 resource_name,
6493 kdu_instance,
6494 vca_id=vca_id,
6495 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6496 kdu_model=deployed_kdu.get("kdu-model"),
6497 )
6498 kdu_replica_count = instance_num + kdu_delta.get(
6499 "number-of-instances", 1
6500 )
6501
6502 # Control if new count is over max and instance_num is less than max.
6503 # Then assign max instance number to kdu replica count
6504 if kdu_replica_count > max_instance_count > instance_num:
6505 kdu_replica_count = max_instance_count
6506 if kdu_replica_count > max_instance_count:
6507 raise LcmException(
6508 "reached the limit of {} (max-instance-count) "
6509 "scaling-out operations for the "
6510 "scaling-group-descriptor '{}'".format(
6511 instance_num, scaling_group
6512 )
6513 )
6514
6515 for x in range(kdu_delta.get("number-of-instances", 1)):
6516 vca_scaling_info.append(
6517 {
6518 "osm_kdu_id": kdu_name,
6519 "member-vnf-index": vnf_index,
6520 "type": "create",
6521 "kdu_index": instance_num + x - 1,
6522 }
6523 )
6524 scaling_info["kdu-create"][kdu_name].append(
6525 {
6526 "member-vnf-index": vnf_index,
6527 "type": "create",
6528 "k8s-cluster-type": k8s_cluster_type,
6529 "resource-name": resource_name,
6530 "scale": kdu_replica_count,
6531 }
6532 )
6533 elif scaling_type == "SCALE_IN":
6534 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6535
6536 scaling_info["scaling_direction"] = "IN"
6537 scaling_info["vdu-delete"] = {}
6538 scaling_info["kdu-delete"] = {}
6539
6540 for delta in deltas:
6541 for vdu_delta in delta.get("vdu-delta", {}):
6542 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6543 min_instance_count = 0
6544 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6545 if vdu_profile and "min-number-of-instances" in vdu_profile:
6546 min_instance_count = vdu_profile["min-number-of-instances"]
6547
6548 default_instance_num = get_number_of_instances(
6549 db_vnfd, vdu_delta["id"]
6550 )
6551 instance_num = vdu_delta.get("number-of-instances", 1)
6552 nb_scale_op -= instance_num
6553
6554 new_instance_count = nb_scale_op + default_instance_num
6555
6556 if new_instance_count < min_instance_count < vdu_count:
6557 instances_number = min_instance_count - new_instance_count
6558 else:
6559 instances_number = instance_num
6560
6561 if new_instance_count < min_instance_count:
6562 raise LcmException(
6563 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6564 "scaling-group-descriptor '{}'".format(
6565 nb_scale_op, scaling_group
6566 )
6567 )
6568 for x in range(vdu_delta.get("number-of-instances", 1)):
6569 vca_scaling_info.append(
6570 {
6571 "osm_vdu_id": vdu_delta["id"],
6572 "member-vnf-index": vnf_index,
6573 "type": "delete",
6574 "vdu_index": vdu_index - 1 - x,
6575 }
6576 )
6577 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6578 for kdu_delta in delta.get("kdu-resource-delta", {}):
6579 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6580 kdu_name = kdu_profile["kdu-name"]
6581 resource_name = kdu_profile.get("resource-name", "")
6582
6583 if not scaling_info["kdu-delete"].get(kdu_name, None):
6584 scaling_info["kdu-delete"][kdu_name] = []
6585
6586 kdur = get_kdur(db_vnfr, kdu_name)
6587 if kdur.get("helm-chart"):
6588 k8s_cluster_type = "helm-chart-v3"
6589 self.logger.debug("kdur: {}".format(kdur))
6590 elif kdur.get("juju-bundle"):
6591 k8s_cluster_type = "juju-bundle"
6592 else:
6593 raise LcmException(
6594 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6595 "juju-bundle. Maybe an old NBI version is running".format(
6596 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6597 )
6598 )
6599
6600 min_instance_count = 0
6601 if kdu_profile and "min-number-of-instances" in kdu_profile:
6602 min_instance_count = kdu_profile["min-number-of-instances"]
6603
6604 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6605 deployed_kdu, _ = get_deployed_kdu(
6606 nsr_deployed, kdu_name, vnf_index
6607 )
6608 if deployed_kdu is None:
6609 raise LcmException(
6610 "KDU '{}' for vnf '{}' not deployed".format(
6611 kdu_name, vnf_index
6612 )
6613 )
6614 kdu_instance = deployed_kdu.get("kdu-instance")
6615 instance_num = await self.k8scluster_map[
6616 k8s_cluster_type
6617 ].get_scale_count(
6618 resource_name,
6619 kdu_instance,
6620 vca_id=vca_id,
6621 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6622 kdu_model=deployed_kdu.get("kdu-model"),
6623 )
6624 kdu_replica_count = instance_num - kdu_delta.get(
6625 "number-of-instances", 1
6626 )
6627
6628 if kdu_replica_count < min_instance_count < instance_num:
6629 kdu_replica_count = min_instance_count
6630 if kdu_replica_count < min_instance_count:
6631 raise LcmException(
6632 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6633 "scaling-group-descriptor '{}'".format(
6634 instance_num, scaling_group
6635 )
6636 )
6637
6638 for x in range(kdu_delta.get("number-of-instances", 1)):
6639 vca_scaling_info.append(
6640 {
6641 "osm_kdu_id": kdu_name,
6642 "member-vnf-index": vnf_index,
6643 "type": "delete",
6644 "kdu_index": instance_num - x - 1,
6645 }
6646 )
6647 scaling_info["kdu-delete"][kdu_name].append(
6648 {
6649 "member-vnf-index": vnf_index,
6650 "type": "delete",
6651 "k8s-cluster-type": k8s_cluster_type,
6652 "resource-name": resource_name,
6653 "scale": kdu_replica_count,
6654 }
6655 )
6656
6657 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6658 vdu_delete = copy(scaling_info.get("vdu-delete"))
6659 if scaling_info["scaling_direction"] == "IN":
6660 for vdur in reversed(db_vnfr["vdur"]):
6661 if vdu_delete.get(vdur["vdu-id-ref"]):
6662 vdu_delete[vdur["vdu-id-ref"]] -= 1
6663 scaling_info["vdu"].append(
6664 {
6665 "name": vdur.get("name") or vdur.get("vdu-name"),
6666 "vdu_id": vdur["vdu-id-ref"],
6667 "interface": [],
6668 }
6669 )
6670 for interface in vdur["interfaces"]:
6671 scaling_info["vdu"][-1]["interface"].append(
6672 {
6673 "name": interface["name"],
6674 "ip_address": interface["ip-address"],
6675 "mac_address": interface.get("mac-address"),
6676 }
6677 )
6678 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6679
6680 # PRE-SCALE BEGIN
6681 step = "Executing pre-scale vnf-config-primitive"
6682 if scaling_descriptor.get("scaling-config-action"):
6683 for scaling_config_action in scaling_descriptor[
6684 "scaling-config-action"
6685 ]:
6686 if (
6687 scaling_config_action.get("trigger") == "pre-scale-in"
6688 and scaling_type == "SCALE_IN"
6689 ) or (
6690 scaling_config_action.get("trigger") == "pre-scale-out"
6691 and scaling_type == "SCALE_OUT"
6692 ):
6693 vnf_config_primitive = scaling_config_action[
6694 "vnf-config-primitive-name-ref"
6695 ]
6696 step = db_nslcmop_update[
6697 "detailed-status"
6698 ] = "executing pre-scale scaling-config-action '{}'".format(
6699 vnf_config_primitive
6700 )
6701
6702 # look for primitive
6703 for config_primitive in (
6704 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6705 ).get("config-primitive", ()):
6706 if config_primitive["name"] == vnf_config_primitive:
6707 break
6708 else:
6709 raise LcmException(
6710 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6711 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6712 "primitive".format(scaling_group, vnf_config_primitive)
6713 )
6714
6715 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6716 if db_vnfr.get("additionalParamsForVnf"):
6717 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6718
6719 scale_process = "VCA"
6720 db_nsr_update["config-status"] = "configuring pre-scaling"
6721 primitive_params = self._map_primitive_params(
6722 config_primitive, {}, vnfr_params
6723 )
6724
6725 # Pre-scale retry check: Check if this sub-operation has been executed before
6726 op_index = self._check_or_add_scale_suboperation(
6727 db_nslcmop,
6728 vnf_index,
6729 vnf_config_primitive,
6730 primitive_params,
6731 "PRE-SCALE",
6732 )
6733 if op_index == self.SUBOPERATION_STATUS_SKIP:
6734 # Skip sub-operation
6735 result = "COMPLETED"
6736 result_detail = "Done"
6737 self.logger.debug(
6738 logging_text
6739 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6740 vnf_config_primitive, result, result_detail
6741 )
6742 )
6743 else:
6744 if op_index == self.SUBOPERATION_STATUS_NEW:
6745 # New sub-operation: Get index of this sub-operation
6746 op_index = (
6747 len(db_nslcmop.get("_admin", {}).get("operations"))
6748 - 1
6749 )
6750 self.logger.debug(
6751 logging_text
6752 + "vnf_config_primitive={} New sub-operation".format(
6753 vnf_config_primitive
6754 )
6755 )
6756 else:
6757 # retry: Get registered params for this existing sub-operation
6758 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6759 op_index
6760 ]
6761 vnf_index = op.get("member_vnf_index")
6762 vnf_config_primitive = op.get("primitive")
6763 primitive_params = op.get("primitive_params")
6764 self.logger.debug(
6765 logging_text
6766 + "vnf_config_primitive={} Sub-operation retry".format(
6767 vnf_config_primitive
6768 )
6769 )
6770 # Execute the primitive, either with new (first-time) or registered (reintent) args
6771 ee_descriptor_id = config_primitive.get(
6772 "execution-environment-ref"
6773 )
6774 primitive_name = config_primitive.get(
6775 "execution-environment-primitive", vnf_config_primitive
6776 )
6777 ee_id, vca_type = self._look_for_deployed_vca(
6778 nsr_deployed["VCA"],
6779 member_vnf_index=vnf_index,
6780 vdu_id=None,
6781 vdu_count_index=None,
6782 ee_descriptor_id=ee_descriptor_id,
6783 )
6784 result, result_detail = await self._ns_execute_primitive(
6785 ee_id,
6786 primitive_name,
6787 primitive_params,
6788 vca_type=vca_type,
6789 vca_id=vca_id,
6790 )
6791 self.logger.debug(
6792 logging_text
6793 + "vnf_config_primitive={} Done with result {} {}".format(
6794 vnf_config_primitive, result, result_detail
6795 )
6796 )
6797 # Update operationState = COMPLETED | FAILED
6798 self._update_suboperation_status(
6799 db_nslcmop, op_index, result, result_detail
6800 )
6801
6802 if result == "FAILED":
6803 raise LcmException(result_detail)
6804 db_nsr_update["config-status"] = old_config_status
6805 scale_process = None
6806 # PRE-SCALE END
6807
6808 db_nsr_update[
6809 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6810 ] = nb_scale_op
6811 db_nsr_update[
6812 "_admin.scaling-group.{}.time".format(admin_scale_index)
6813 ] = time()
6814
6815 # SCALE-IN VCA - BEGIN
6816 if vca_scaling_info:
6817 step = db_nslcmop_update[
6818 "detailed-status"
6819 ] = "Deleting the execution environments"
6820 scale_process = "VCA"
6821 for vca_info in vca_scaling_info:
6822 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6823 member_vnf_index = str(vca_info["member-vnf-index"])
6824 self.logger.debug(
6825 logging_text + "vdu info: {}".format(vca_info)
6826 )
6827 if vca_info.get("osm_vdu_id"):
6828 vdu_id = vca_info["osm_vdu_id"]
6829 vdu_index = int(vca_info["vdu_index"])
6830 stage[
6831 1
6832 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6833 member_vnf_index, vdu_id, vdu_index
6834 )
6835 stage[2] = step = "Scaling in VCA"
6836 self._write_op_status(op_id=nslcmop_id, stage=stage)
6837 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6838 config_update = db_nsr["configurationStatus"]
6839 for vca_index, vca in enumerate(vca_update):
6840 if (
6841 (vca or vca.get("ee_id"))
6842 and vca["member-vnf-index"] == member_vnf_index
6843 and vca["vdu_count_index"] == vdu_index
6844 ):
6845 if vca.get("vdu_id"):
6846 config_descriptor = get_configuration(
6847 db_vnfd, vca.get("vdu_id")
6848 )
6849 elif vca.get("kdu_name"):
6850 config_descriptor = get_configuration(
6851 db_vnfd, vca.get("kdu_name")
6852 )
6853 else:
6854 config_descriptor = get_configuration(
6855 db_vnfd, db_vnfd["id"]
6856 )
6857 operation_params = (
6858 db_nslcmop.get("operationParams") or {}
6859 )
6860 exec_terminate_primitives = not operation_params.get(
6861 "skip_terminate_primitives"
6862 ) and vca.get("needed_terminate")
6863 task = asyncio.ensure_future(
6864 asyncio.wait_for(
6865 self.destroy_N2VC(
6866 logging_text,
6867 db_nslcmop,
6868 vca,
6869 config_descriptor,
6870 vca_index,
6871 destroy_ee=True,
6872 exec_primitives=exec_terminate_primitives,
6873 scaling_in=True,
6874 vca_id=vca_id,
6875 ),
6876 timeout=self.timeout.charm_delete,
6877 )
6878 )
6879 tasks_dict_info[task] = "Terminating VCA {}".format(
6880 vca.get("ee_id")
6881 )
6882 del vca_update[vca_index]
6883 del config_update[vca_index]
6884 # wait for pending tasks of terminate primitives
6885 if tasks_dict_info:
6886 self.logger.debug(
6887 logging_text
6888 + "Waiting for tasks {}".format(
6889 list(tasks_dict_info.keys())
6890 )
6891 )
6892 error_list = await self._wait_for_tasks(
6893 logging_text,
6894 tasks_dict_info,
6895 min(
6896 self.timeout.charm_delete, self.timeout.ns_terminate
6897 ),
6898 stage,
6899 nslcmop_id,
6900 )
6901 tasks_dict_info.clear()
6902 if error_list:
6903 raise LcmException("; ".join(error_list))
6904
6905 db_vca_and_config_update = {
6906 "_admin.deployed.VCA": vca_update,
6907 "configurationStatus": config_update,
6908 }
6909 self.update_db_2(
6910 "nsrs", db_nsr["_id"], db_vca_and_config_update
6911 )
6912 scale_process = None
6913 # SCALE-IN VCA - END
6914
6915 # SCALE RO - BEGIN
6916 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6917 scale_process = "RO"
6918 if self.ro_config.ng:
6919 await self._scale_ng_ro(
6920 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6921 )
6922 scaling_info.pop("vdu-create", None)
6923 scaling_info.pop("vdu-delete", None)
6924
6925 scale_process = None
6926 # SCALE RO - END
6927
6928 # SCALE KDU - BEGIN
6929 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6930 scale_process = "KDU"
6931 await self._scale_kdu(
6932 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6933 )
6934 scaling_info.pop("kdu-create", None)
6935 scaling_info.pop("kdu-delete", None)
6936
6937 scale_process = None
6938 # SCALE KDU - END
6939
6940 if db_nsr_update:
6941 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6942
6943 # SCALE-UP VCA - BEGIN
6944 if vca_scaling_info:
6945 step = db_nslcmop_update[
6946 "detailed-status"
6947 ] = "Creating new execution environments"
6948 scale_process = "VCA"
6949 for vca_info in vca_scaling_info:
6950 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6951 member_vnf_index = str(vca_info["member-vnf-index"])
6952 self.logger.debug(
6953 logging_text + "vdu info: {}".format(vca_info)
6954 )
6955 vnfd_id = db_vnfr["vnfd-ref"]
6956 if vca_info.get("osm_vdu_id"):
6957 vdu_index = int(vca_info["vdu_index"])
6958 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6959 if db_vnfr.get("additionalParamsForVnf"):
6960 deploy_params.update(
6961 parse_yaml_strings(
6962 db_vnfr["additionalParamsForVnf"].copy()
6963 )
6964 )
6965 descriptor_config = get_configuration(
6966 db_vnfd, db_vnfd["id"]
6967 )
6968 if descriptor_config:
6969 vdu_id = None
6970 vdu_name = None
6971 kdu_name = None
6972 kdu_index = None
6973 self._deploy_n2vc(
6974 logging_text=logging_text
6975 + "member_vnf_index={} ".format(member_vnf_index),
6976 db_nsr=db_nsr,
6977 db_vnfr=db_vnfr,
6978 nslcmop_id=nslcmop_id,
6979 nsr_id=nsr_id,
6980 nsi_id=nsi_id,
6981 vnfd_id=vnfd_id,
6982 vdu_id=vdu_id,
6983 kdu_name=kdu_name,
6984 kdu_index=kdu_index,
6985 member_vnf_index=member_vnf_index,
6986 vdu_index=vdu_index,
6987 vdu_name=vdu_name,
6988 deploy_params=deploy_params,
6989 descriptor_config=descriptor_config,
6990 base_folder=base_folder,
6991 task_instantiation_info=tasks_dict_info,
6992 stage=stage,
6993 )
6994 vdu_id = vca_info["osm_vdu_id"]
6995 vdur = find_in_list(
6996 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6997 )
6998 descriptor_config = get_configuration(db_vnfd, vdu_id)
6999 if vdur.get("additionalParams"):
7000 deploy_params_vdu = parse_yaml_strings(
7001 vdur["additionalParams"]
7002 )
7003 else:
7004 deploy_params_vdu = deploy_params
7005 deploy_params_vdu["OSM"] = get_osm_params(
7006 db_vnfr, vdu_id, vdu_count_index=vdu_index
7007 )
7008 if descriptor_config:
7009 vdu_name = None
7010 kdu_name = None
7011 kdu_index = None
7012 stage[
7013 1
7014 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7015 member_vnf_index, vdu_id, vdu_index
7016 )
7017 stage[2] = step = "Scaling out VCA"
7018 self._write_op_status(op_id=nslcmop_id, stage=stage)
7019 self._deploy_n2vc(
7020 logging_text=logging_text
7021 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7022 member_vnf_index, vdu_id, vdu_index
7023 ),
7024 db_nsr=db_nsr,
7025 db_vnfr=db_vnfr,
7026 nslcmop_id=nslcmop_id,
7027 nsr_id=nsr_id,
7028 nsi_id=nsi_id,
7029 vnfd_id=vnfd_id,
7030 vdu_id=vdu_id,
7031 kdu_name=kdu_name,
7032 member_vnf_index=member_vnf_index,
7033 vdu_index=vdu_index,
7034 kdu_index=kdu_index,
7035 vdu_name=vdu_name,
7036 deploy_params=deploy_params_vdu,
7037 descriptor_config=descriptor_config,
7038 base_folder=base_folder,
7039 task_instantiation_info=tasks_dict_info,
7040 stage=stage,
7041 )
7042 # SCALE-UP VCA - END
7043 scale_process = None
7044
7045 # POST-SCALE BEGIN
7046 # execute primitive service POST-SCALING
7047 step = "Executing post-scale vnf-config-primitive"
7048 if scaling_descriptor.get("scaling-config-action"):
7049 for scaling_config_action in scaling_descriptor[
7050 "scaling-config-action"
7051 ]:
7052 if (
7053 scaling_config_action.get("trigger") == "post-scale-in"
7054 and scaling_type == "SCALE_IN"
7055 ) or (
7056 scaling_config_action.get("trigger") == "post-scale-out"
7057 and scaling_type == "SCALE_OUT"
7058 ):
7059 vnf_config_primitive = scaling_config_action[
7060 "vnf-config-primitive-name-ref"
7061 ]
7062 step = db_nslcmop_update[
7063 "detailed-status"
7064 ] = "executing post-scale scaling-config-action '{}'".format(
7065 vnf_config_primitive
7066 )
7067
7068 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7069 if db_vnfr.get("additionalParamsForVnf"):
7070 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7071
7072 # look for primitive
7073 for config_primitive in (
7074 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7075 ).get("config-primitive", ()):
7076 if config_primitive["name"] == vnf_config_primitive:
7077 break
7078 else:
7079 raise LcmException(
7080 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7081 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7082 "config-primitive".format(
7083 scaling_group, vnf_config_primitive
7084 )
7085 )
7086 scale_process = "VCA"
7087 db_nsr_update["config-status"] = "configuring post-scaling"
7088 primitive_params = self._map_primitive_params(
7089 config_primitive, {}, vnfr_params
7090 )
7091
7092 # Post-scale retry check: Check if this sub-operation has been executed before
7093 op_index = self._check_or_add_scale_suboperation(
7094 db_nslcmop,
7095 vnf_index,
7096 vnf_config_primitive,
7097 primitive_params,
7098 "POST-SCALE",
7099 )
7100 if op_index == self.SUBOPERATION_STATUS_SKIP:
7101 # Skip sub-operation
7102 result = "COMPLETED"
7103 result_detail = "Done"
7104 self.logger.debug(
7105 logging_text
7106 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7107 vnf_config_primitive, result, result_detail
7108 )
7109 )
7110 else:
7111 if op_index == self.SUBOPERATION_STATUS_NEW:
7112 # New sub-operation: Get index of this sub-operation
7113 op_index = (
7114 len(db_nslcmop.get("_admin", {}).get("operations"))
7115 - 1
7116 )
7117 self.logger.debug(
7118 logging_text
7119 + "vnf_config_primitive={} New sub-operation".format(
7120 vnf_config_primitive
7121 )
7122 )
7123 else:
7124 # retry: Get registered params for this existing sub-operation
7125 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7126 op_index
7127 ]
7128 vnf_index = op.get("member_vnf_index")
7129 vnf_config_primitive = op.get("primitive")
7130 primitive_params = op.get("primitive_params")
7131 self.logger.debug(
7132 logging_text
7133 + "vnf_config_primitive={} Sub-operation retry".format(
7134 vnf_config_primitive
7135 )
7136 )
7137 # Execute the primitive, either with new (first-time) or registered (reintent) args
7138 ee_descriptor_id = config_primitive.get(
7139 "execution-environment-ref"
7140 )
7141 primitive_name = config_primitive.get(
7142 "execution-environment-primitive", vnf_config_primitive
7143 )
7144 ee_id, vca_type = self._look_for_deployed_vca(
7145 nsr_deployed["VCA"],
7146 member_vnf_index=vnf_index,
7147 vdu_id=None,
7148 vdu_count_index=None,
7149 ee_descriptor_id=ee_descriptor_id,
7150 )
7151 result, result_detail = await self._ns_execute_primitive(
7152 ee_id,
7153 primitive_name,
7154 primitive_params,
7155 vca_type=vca_type,
7156 vca_id=vca_id,
7157 )
7158 self.logger.debug(
7159 logging_text
7160 + "vnf_config_primitive={} Done with result {} {}".format(
7161 vnf_config_primitive, result, result_detail
7162 )
7163 )
7164 # Update operationState = COMPLETED | FAILED
7165 self._update_suboperation_status(
7166 db_nslcmop, op_index, result, result_detail
7167 )
7168
7169 if result == "FAILED":
7170 raise LcmException(result_detail)
7171 db_nsr_update["config-status"] = old_config_status
7172 scale_process = None
7173 # POST-SCALE END
7174 # Check if each vnf has exporter for metric collection if so update prometheus job records
7175 if scaling_type == "SCALE_OUT":
7176 if "exporters-endpoints" in db_vnfd.get("df")[0]:
7177 vnfr_id = db_vnfr["id"]
7178 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7179 exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
7180 self.logger.debug("exporter config :{}".format(exporter_config))
7181 artifact_path = "{}/{}/{}".format(
7182 base_folder["folder"],
7183 base_folder["pkg-dir"],
7184 "exporter-endpoint",
7185 )
7186 ee_id = None
7187 ee_config_descriptor = exporter_config
7188 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
7189 logging_text,
7190 nsr_id,
7191 vnfr_id,
7192 vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
7193 vdu_index=db_vnfr["vdur"][-1]["count-index"],
7194 user=None,
7195 pub_key=None,
7196 )
7197 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
7198 self.logger.debug("Artifact_path:{}".format(artifact_path))
7199 vdu_id_for_prom = None
7200 vdu_index_for_prom = None
7201 for x in get_iterable(db_vnfr, "vdur"):
7202 vdu_id_for_prom = x.get("vdu-id-ref")
7203 vdu_index_for_prom = x.get("count-index")
7204 vnfr_id = vnfr_id + vdu_id + str(vdu_index)
7205 vnfr_id = vnfr_id.replace("_", "")
7206 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
7207 ee_id=ee_id,
7208 artifact_path=artifact_path,
7209 ee_config_descriptor=ee_config_descriptor,
7210 vnfr_id=vnfr_id,
7211 nsr_id=nsr_id,
7212 target_ip=rw_mgmt_ip,
7213 element_type="VDU",
7214 vdu_id=vdu_id_for_prom,
7215 vdu_index=vdu_index_for_prom,
7216 )
7217
7218 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
7219 if prometheus_jobs:
7220 db_nsr_update[
7221 "_admin.deployed.prometheus_jobs"
7222 ] = prometheus_jobs
7223 self.update_db_2(
7224 "nsrs",
7225 nsr_id,
7226 db_nsr_update,
7227 )
7228
7229 for job in prometheus_jobs:
7230 self.db.set_one(
7231 "prometheus_jobs",
7232 {"job_name": ""},
7233 job,
7234 upsert=True,
7235 fail_on_empty=False,
7236 )
7237 db_nsr_update[
7238 "detailed-status"
7239 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7240 db_nsr_update["operational-status"] = (
7241 "running"
7242 if old_operational_status == "failed"
7243 else old_operational_status
7244 )
7245 db_nsr_update["config-status"] = old_config_status
7246 return
7247 except (
7248 ROclient.ROClientException,
7249 DbException,
7250 LcmException,
7251 NgRoException,
7252 ) as e:
7253 self.logger.error(logging_text + "Exit Exception {}".format(e))
7254 exc = e
7255 except asyncio.CancelledError:
7256 self.logger.error(
7257 logging_text + "Cancelled Exception while '{}'".format(step)
7258 )
7259 exc = "Operation was cancelled"
7260 except Exception as e:
7261 exc = traceback.format_exc()
7262 self.logger.critical(
7263 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7264 exc_info=True,
7265 )
7266 finally:
7267 error_list = list()
7268 if exc:
7269 error_list.append(str(exc))
7270 self._write_ns_status(
7271 nsr_id=nsr_id,
7272 ns_state=None,
7273 current_operation="IDLE",
7274 current_operation_id=None,
7275 )
7276 try:
7277 if tasks_dict_info:
7278 stage[1] = "Waiting for instantiate pending tasks."
7279 self.logger.debug(logging_text + stage[1])
7280 exc = await self._wait_for_tasks(
7281 logging_text,
7282 tasks_dict_info,
7283 self.timeout.ns_deploy,
7284 stage,
7285 nslcmop_id,
7286 nsr_id=nsr_id,
7287 )
7288 except asyncio.CancelledError:
7289 error_list.append("Cancelled")
7290 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7291 await self._wait_for_tasks(
7292 logging_text,
7293 tasks_dict_info,
7294 self.timeout.ns_deploy,
7295 stage,
7296 nslcmop_id,
7297 nsr_id=nsr_id,
7298 )
7299 if error_list:
7300 error_detail = "; ".join(error_list)
7301 db_nslcmop_update[
7302 "detailed-status"
7303 ] = error_description_nslcmop = "FAILED {}: {}".format(
7304 step, error_detail
7305 )
7306 nslcmop_operation_state = "FAILED"
7307 if db_nsr:
7308 db_nsr_update["operational-status"] = old_operational_status
7309 db_nsr_update["config-status"] = old_config_status
7310 db_nsr_update["detailed-status"] = ""
7311 if scale_process:
7312 if "VCA" in scale_process:
7313 db_nsr_update["config-status"] = "failed"
7314 if "RO" in scale_process:
7315 db_nsr_update["operational-status"] = "failed"
7316 db_nsr_update[
7317 "detailed-status"
7318 ] = "FAILED scaling nslcmop={} {}: {}".format(
7319 nslcmop_id, step, error_detail
7320 )
7321 else:
7322 error_description_nslcmop = None
7323 nslcmop_operation_state = "COMPLETED"
7324 db_nslcmop_update["detailed-status"] = "Done"
7325 if scaling_type == "SCALE_IN" and prom_job_name is not None:
7326 self.db.del_one(
7327 "prometheus_jobs",
7328 {"job_name": prom_job_name},
7329 fail_on_empty=False,
7330 )
7331
7332 self._write_op_status(
7333 op_id=nslcmop_id,
7334 stage="",
7335 error_message=error_description_nslcmop,
7336 operation_state=nslcmop_operation_state,
7337 other_update=db_nslcmop_update,
7338 )
7339 if db_nsr:
7340 self._write_ns_status(
7341 nsr_id=nsr_id,
7342 ns_state=None,
7343 current_operation="IDLE",
7344 current_operation_id=None,
7345 other_update=db_nsr_update,
7346 )
7347
7348 if nslcmop_operation_state:
7349 try:
7350 msg = {
7351 "nsr_id": nsr_id,
7352 "nslcmop_id": nslcmop_id,
7353 "operationState": nslcmop_operation_state,
7354 }
7355 await self.msg.aiowrite("ns", "scaled", msg)
7356 except Exception as e:
7357 self.logger.error(
7358 logging_text + "kafka_write notification Exception {}".format(e)
7359 )
7360 self.logger.debug(logging_text + "Exit")
7361 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7362
7363 async def _scale_kdu(
7364 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7365 ):
7366 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7367 for kdu_name in _scaling_info:
7368 for kdu_scaling_info in _scaling_info[kdu_name]:
7369 deployed_kdu, index = get_deployed_kdu(
7370 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7371 )
7372 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7373 kdu_instance = deployed_kdu["kdu-instance"]
7374 kdu_model = deployed_kdu.get("kdu-model")
7375 scale = int(kdu_scaling_info["scale"])
7376 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7377
7378 db_dict = {
7379 "collection": "nsrs",
7380 "filter": {"_id": nsr_id},
7381 "path": "_admin.deployed.K8s.{}".format(index),
7382 }
7383
7384 step = "scaling application {}".format(
7385 kdu_scaling_info["resource-name"]
7386 )
7387 self.logger.debug(logging_text + step)
7388
7389 if kdu_scaling_info["type"] == "delete":
7390 kdu_config = get_configuration(db_vnfd, kdu_name)
7391 if (
7392 kdu_config
7393 and kdu_config.get("terminate-config-primitive")
7394 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7395 ):
7396 terminate_config_primitive_list = kdu_config.get(
7397 "terminate-config-primitive"
7398 )
7399 terminate_config_primitive_list.sort(
7400 key=lambda val: int(val["seq"])
7401 )
7402
7403 for (
7404 terminate_config_primitive
7405 ) in terminate_config_primitive_list:
7406 primitive_params_ = self._map_primitive_params(
7407 terminate_config_primitive, {}, {}
7408 )
7409 step = "execute terminate config primitive"
7410 self.logger.debug(logging_text + step)
7411 await asyncio.wait_for(
7412 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7413 cluster_uuid=cluster_uuid,
7414 kdu_instance=kdu_instance,
7415 primitive_name=terminate_config_primitive["name"],
7416 params=primitive_params_,
7417 db_dict=db_dict,
7418 total_timeout=self.timeout.primitive,
7419 vca_id=vca_id,
7420 ),
7421 timeout=self.timeout.primitive
7422 * self.timeout.primitive_outer_factor,
7423 )
7424
7425 await asyncio.wait_for(
7426 self.k8scluster_map[k8s_cluster_type].scale(
7427 kdu_instance=kdu_instance,
7428 scale=scale,
7429 resource_name=kdu_scaling_info["resource-name"],
7430 total_timeout=self.timeout.scale_on_error,
7431 vca_id=vca_id,
7432 cluster_uuid=cluster_uuid,
7433 kdu_model=kdu_model,
7434 atomic=True,
7435 db_dict=db_dict,
7436 ),
7437 timeout=self.timeout.scale_on_error
7438 * self.timeout.scale_on_error_outer_factor,
7439 )
7440
7441 if kdu_scaling_info["type"] == "create":
7442 kdu_config = get_configuration(db_vnfd, kdu_name)
7443 if (
7444 kdu_config
7445 and kdu_config.get("initial-config-primitive")
7446 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7447 ):
7448 initial_config_primitive_list = kdu_config.get(
7449 "initial-config-primitive"
7450 )
7451 initial_config_primitive_list.sort(
7452 key=lambda val: int(val["seq"])
7453 )
7454
7455 for initial_config_primitive in initial_config_primitive_list:
7456 primitive_params_ = self._map_primitive_params(
7457 initial_config_primitive, {}, {}
7458 )
7459 step = "execute initial config primitive"
7460 self.logger.debug(logging_text + step)
7461 await asyncio.wait_for(
7462 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7463 cluster_uuid=cluster_uuid,
7464 kdu_instance=kdu_instance,
7465 primitive_name=initial_config_primitive["name"],
7466 params=primitive_params_,
7467 db_dict=db_dict,
7468 vca_id=vca_id,
7469 ),
7470 timeout=600,
7471 )
7472
7473 async def _scale_ng_ro(
7474 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7475 ):
7476 nsr_id = db_nslcmop["nsInstanceId"]
7477 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7478 db_vnfrs = {}
7479
7480 # read from db: vnfd's for every vnf
7481 db_vnfds = []
7482
7483 # for each vnf in ns, read vnfd
7484 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7485 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7486 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7487 # if we haven't this vnfd, read it from db
7488 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7489 # read from db
7490 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7491 db_vnfds.append(vnfd)
7492 n2vc_key = self.n2vc.get_public_key()
7493 n2vc_key_list = [n2vc_key]
7494 self.scale_vnfr(
7495 db_vnfr,
7496 vdu_scaling_info.get("vdu-create"),
7497 vdu_scaling_info.get("vdu-delete"),
7498 mark_delete=True,
7499 )
7500 # db_vnfr has been updated, update db_vnfrs to use it
7501 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7502 await self._instantiate_ng_ro(
7503 logging_text,
7504 nsr_id,
7505 db_nsd,
7506 db_nsr,
7507 db_nslcmop,
7508 db_vnfrs,
7509 db_vnfds,
7510 n2vc_key_list,
7511 stage=stage,
7512 start_deploy=time(),
7513 timeout_ns_deploy=self.timeout.ns_deploy,
7514 )
7515 if vdu_scaling_info.get("vdu-delete"):
7516 self.scale_vnfr(
7517 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7518 )
7519
7520 async def extract_prometheus_scrape_jobs(
7521 self,
7522 ee_id: str,
7523 artifact_path: str,
7524 ee_config_descriptor: dict,
7525 vnfr_id: str,
7526 nsr_id: str,
7527 target_ip: str,
7528 element_type: str,
7529 vnf_member_index: str = "",
7530 vdu_id: str = "",
7531 vdu_index: int = None,
7532 kdu_name: str = "",
7533 kdu_index: int = None,
7534 ) -> dict:
7535 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7536 This method will wait until the corresponding VDU or KDU is fully instantiated
7537
7538 Args:
7539 ee_id (str): Execution Environment ID
7540 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7541 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7542 vnfr_id (str): VNFR ID where this EE applies
7543 nsr_id (str): NSR ID where this EE applies
7544 target_ip (str): VDU/KDU instance IP address
7545 element_type (str): NS or VNF or VDU or KDU
7546 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7547 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7548 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7549 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7550 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7551
7552 Raises:
7553 LcmException: When the VDU or KDU instance was not found in an hour
7554
7555 Returns:
7556 _type_: Prometheus jobs
7557 """
7558 # default the vdur and kdur names to an empty string, to avoid any later
7559 # problem with Prometheus when the element type is not VDU or KDU
7560 vdur_name = ""
7561 kdur_name = ""
7562
7563 # look if exist a file called 'prometheus*.j2' and
7564 artifact_content = self.fs.dir_ls(artifact_path)
7565 job_file = next(
7566 (
7567 f
7568 for f in artifact_content
7569 if f.startswith("prometheus") and f.endswith(".j2")
7570 ),
7571 None,
7572 )
7573 if not job_file:
7574 return
7575 self.logger.debug("Artifact path{}".format(artifact_path))
7576 self.logger.debug("job file{}".format(job_file))
7577 with self.fs.file_open((artifact_path, job_file), "r") as f:
7578 job_data = f.read()
7579
7580 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7581 if element_type in ("VDU", "KDU"):
7582 for _ in range(360):
7583 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7584 if vdu_id and vdu_index is not None:
7585 vdur = next(
7586 (
7587 x
7588 for x in get_iterable(db_vnfr, "vdur")
7589 if (
7590 x.get("vdu-id-ref") == vdu_id
7591 and x.get("count-index") == vdu_index
7592 )
7593 ),
7594 {},
7595 )
7596 if vdur.get("name"):
7597 vdur_name = vdur.get("name")
7598 break
7599 if kdu_name and kdu_index is not None:
7600 kdur = next(
7601 (
7602 x
7603 for x in get_iterable(db_vnfr, "kdur")
7604 if (
7605 x.get("kdu-name") == kdu_name
7606 and x.get("count-index") == kdu_index
7607 )
7608 ),
7609 {},
7610 )
7611 if kdur.get("name"):
7612 kdur_name = kdur.get("name")
7613 break
7614
7615 await asyncio.sleep(10)
7616 else:
7617 if vdu_id and vdu_index is not None:
7618 raise LcmException(
7619 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7620 )
7621 if kdu_name and kdu_index is not None:
7622 raise LcmException(
7623 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7624 )
7625
7626 if ee_id is not None:
7627 _, namespace, helm_id = get_ee_id_parts(
7628 ee_id
7629 ) # get namespace and EE gRPC service name
7630 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7631 host_port = "80"
7632 vnfr_id = vnfr_id.replace("-", "")
7633 variables = {
7634 "JOB_NAME": vnfr_id,
7635 "TARGET_IP": target_ip,
7636 "EXPORTER_POD_IP": host_name,
7637 "EXPORTER_POD_PORT": host_port,
7638 "NSR_ID": nsr_id,
7639 "VNF_MEMBER_INDEX": vnf_member_index,
7640 "VDUR_NAME": vdur_name,
7641 "KDUR_NAME": kdur_name,
7642 "ELEMENT_TYPE": element_type,
7643 }
7644 else:
7645 metric_path = ee_config_descriptor["metric-path"]
7646 target_port = ee_config_descriptor["metric-port"]
7647 vnfr_id = vnfr_id.replace("-", "")
7648 variables = {
7649 "JOB_NAME": vnfr_id,
7650 "TARGET_IP": target_ip,
7651 "TARGET_PORT": target_port,
7652 "METRIC_PATH": metric_path,
7653 }
7654
7655 job_list = parse_job(job_data, variables)
7656 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7657 for job in job_list:
7658 if (
7659 not isinstance(job.get("job_name"), str)
7660 or vnfr_id not in job["job_name"]
7661 ):
7662 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7663 job["nsr_id"] = nsr_id
7664 job["vnfr_id"] = vnfr_id
7665 return job_list
7666
7667 async def rebuild_start_stop(
7668 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7669 ):
7670 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7671 self.logger.info(logging_text + "Enter")
7672 stage = ["Preparing the environment", ""]
7673 # database nsrs record
7674 db_nsr_update = {}
7675 vdu_vim_name = None
7676 vim_vm_id = None
7677 # in case of error, indicates what part of scale was failed to put nsr at error status
7678 start_deploy = time()
7679 try:
7680 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7681 vim_account_id = db_vnfr.get("vim-account-id")
7682 vim_info_key = "vim:" + vim_account_id
7683 vdu_id = additional_param["vdu_id"]
7684 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7685 vdur = find_in_list(
7686 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7687 )
7688 if vdur:
7689 vdu_vim_name = vdur["name"]
7690 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7691 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7692 else:
7693 raise LcmException("Target vdu is not found")
7694 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7695 # wait for any previous tasks in process
7696 stage[1] = "Waiting for previous operations to terminate"
7697 self.logger.info(stage[1])
7698 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7699
7700 stage[1] = "Reading from database."
7701 self.logger.info(stage[1])
7702 self._write_ns_status(
7703 nsr_id=nsr_id,
7704 ns_state=None,
7705 current_operation=operation_type.upper(),
7706 current_operation_id=nslcmop_id,
7707 )
7708 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7709
7710 # read from db: ns
7711 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7712 db_nsr_update["operational-status"] = operation_type
7713 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7714 # Payload for RO
7715 desc = {
7716 operation_type: {
7717 "vim_vm_id": vim_vm_id,
7718 "vnf_id": vnf_id,
7719 "vdu_index": additional_param["count-index"],
7720 "vdu_id": vdur["id"],
7721 "target_vim": target_vim,
7722 "vim_account_id": vim_account_id,
7723 }
7724 }
7725 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7726 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7727 self.logger.info("ro nsr id: {}".format(nsr_id))
7728 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7729 self.logger.info("response from RO: {}".format(result_dict))
7730 action_id = result_dict["action_id"]
7731 await self._wait_ng_ro(
7732 nsr_id,
7733 action_id,
7734 nslcmop_id,
7735 start_deploy,
7736 self.timeout.operate,
7737 None,
7738 "start_stop_rebuild",
7739 )
7740 return "COMPLETED", "Done"
7741 except (ROclient.ROClientException, DbException, LcmException) as e:
7742 self.logger.error("Exit Exception {}".format(e))
7743 exc = e
7744 except asyncio.CancelledError:
7745 self.logger.error("Cancelled Exception while '{}'".format(stage))
7746 exc = "Operation was cancelled"
7747 except Exception as e:
7748 exc = traceback.format_exc()
7749 self.logger.critical(
7750 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7751 )
7752 return "FAILED", "Error in operate VNF {}".format(exc)
7753
7754 async def migrate(self, nsr_id, nslcmop_id):
7755 """
7756 Migrate VNFs and VDUs instances in a NS
7757
7758 :param: nsr_id: NS Instance ID
7759 :param: nslcmop_id: nslcmop ID of migrate
7760
7761 """
7762 # Try to lock HA task here
7763 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7764 if not task_is_locked_by_me:
7765 return
7766 logging_text = "Task ns={} migrate ".format(nsr_id)
7767 self.logger.debug(logging_text + "Enter")
7768 # get all needed from database
7769 db_nslcmop = None
7770 db_nslcmop_update = {}
7771 nslcmop_operation_state = None
7772 db_nsr_update = {}
7773 target = {}
7774 exc = None
7775 # in case of error, indicates what part of scale was failed to put nsr at error status
7776 start_deploy = time()
7777
7778 try:
7779 # wait for any previous tasks in process
7780 step = "Waiting for previous operations to terminate"
7781 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7782
7783 self._write_ns_status(
7784 nsr_id=nsr_id,
7785 ns_state=None,
7786 current_operation="MIGRATING",
7787 current_operation_id=nslcmop_id,
7788 )
7789 step = "Getting nslcmop from database"
7790 self.logger.debug(
7791 step + " after having waited for previous tasks to be completed"
7792 )
7793 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7794 migrate_params = db_nslcmop.get("operationParams")
7795
7796 target = {}
7797 target.update(migrate_params)
7798 desc = await self.RO.migrate(nsr_id, target)
7799 self.logger.debug("RO return > {}".format(desc))
7800 action_id = desc["action_id"]
7801 await self._wait_ng_ro(
7802 nsr_id,
7803 action_id,
7804 nslcmop_id,
7805 start_deploy,
7806 self.timeout.migrate,
7807 operation="migrate",
7808 )
7809 except (ROclient.ROClientException, DbException, LcmException) as e:
7810 self.logger.error("Exit Exception {}".format(e))
7811 exc = e
7812 except asyncio.CancelledError:
7813 self.logger.error("Cancelled Exception while '{}'".format(step))
7814 exc = "Operation was cancelled"
7815 except Exception as e:
7816 exc = traceback.format_exc()
7817 self.logger.critical(
7818 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7819 )
7820 finally:
7821 self._write_ns_status(
7822 nsr_id=nsr_id,
7823 ns_state=None,
7824 current_operation="IDLE",
7825 current_operation_id=None,
7826 )
7827 if exc:
7828 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7829 nslcmop_operation_state = "FAILED"
7830 else:
7831 nslcmop_operation_state = "COMPLETED"
7832 db_nslcmop_update["detailed-status"] = "Done"
7833 db_nsr_update["detailed-status"] = "Done"
7834
7835 self._write_op_status(
7836 op_id=nslcmop_id,
7837 stage="",
7838 error_message="",
7839 operation_state=nslcmop_operation_state,
7840 other_update=db_nslcmop_update,
7841 )
7842 if nslcmop_operation_state:
7843 try:
7844 msg = {
7845 "nsr_id": nsr_id,
7846 "nslcmop_id": nslcmop_id,
7847 "operationState": nslcmop_operation_state,
7848 }
7849 await self.msg.aiowrite("ns", "migrated", msg)
7850 except Exception as e:
7851 self.logger.error(
7852 logging_text + "kafka_write notification Exception {}".format(e)
7853 )
7854 self.logger.debug(logging_text + "Exit")
7855 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7856
7857 async def heal(self, nsr_id, nslcmop_id):
7858 """
7859 Heal NS
7860
7861 :param nsr_id: ns instance to heal
7862 :param nslcmop_id: operation to run
7863 :return:
7864 """
7865
7866 # Try to lock HA task here
7867 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7868 if not task_is_locked_by_me:
7869 return
7870
7871 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7872 stage = ["", "", ""]
7873 tasks_dict_info = {}
7874 # ^ stage, step, VIM progress
7875 self.logger.debug(logging_text + "Enter")
7876 # get all needed from database
7877 db_nsr = None
7878 db_nslcmop_update = {}
7879 db_nsr_update = {}
7880 db_vnfrs = {} # vnf's info indexed by _id
7881 exc = None
7882 old_operational_status = ""
7883 old_config_status = ""
7884 nsi_id = None
7885 try:
7886 # wait for any previous tasks in process
7887 step = "Waiting for previous operations to terminate"
7888 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7889 self._write_ns_status(
7890 nsr_id=nsr_id,
7891 ns_state=None,
7892 current_operation="HEALING",
7893 current_operation_id=nslcmop_id,
7894 )
7895
7896 step = "Getting nslcmop from database"
7897 self.logger.debug(
7898 step + " after having waited for previous tasks to be completed"
7899 )
7900 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7901
7902 step = "Getting nsr from database"
7903 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7904 old_operational_status = db_nsr["operational-status"]
7905 old_config_status = db_nsr["config-status"]
7906
7907 db_nsr_update = {
7908 "_admin.deployed.RO.operational-status": "healing",
7909 }
7910 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7911
7912 step = "Sending heal order to VIM"
7913 await self.heal_RO(
7914 logging_text=logging_text,
7915 nsr_id=nsr_id,
7916 db_nslcmop=db_nslcmop,
7917 stage=stage,
7918 )
7919 # VCA tasks
7920 # read from db: nsd
7921 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7922 self.logger.debug(logging_text + stage[1])
7923 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7924 self.fs.sync(db_nsr["nsd-id"])
7925 db_nsr["nsd"] = nsd
7926 # read from db: vnfr's of this ns
7927 step = "Getting vnfrs from db"
7928 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7929 for vnfr in db_vnfrs_list:
7930 db_vnfrs[vnfr["_id"]] = vnfr
7931 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7932
7933 # Check for each target VNF
7934 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7935 for target_vnf in target_list:
7936 # Find this VNF in the list from DB
7937 vnfr_id = target_vnf.get("vnfInstanceId", None)
7938 if vnfr_id:
7939 db_vnfr = db_vnfrs[vnfr_id]
7940 vnfd_id = db_vnfr.get("vnfd-id")
7941 vnfd_ref = db_vnfr.get("vnfd-ref")
7942 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7943 base_folder = vnfd["_admin"]["storage"]
7944 vdu_id = None
7945 vdu_index = 0
7946 vdu_name = None
7947 kdu_name = None
7948 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7949 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7950
7951 # Check each target VDU and deploy N2VC
7952 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7953 "vdu", []
7954 )
7955 if not target_vdu_list:
7956 # Codigo nuevo para crear diccionario
7957 target_vdu_list = []
7958 for existing_vdu in db_vnfr.get("vdur"):
7959 vdu_name = existing_vdu.get("vdu-name", None)
7960 vdu_index = existing_vdu.get("count-index", 0)
7961 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7962 "run-day1", False
7963 )
7964 vdu_to_be_healed = {
7965 "vdu-id": vdu_name,
7966 "count-index": vdu_index,
7967 "run-day1": vdu_run_day1,
7968 }
7969 target_vdu_list.append(vdu_to_be_healed)
7970 for target_vdu in target_vdu_list:
7971 deploy_params_vdu = target_vdu
7972 # Set run-day1 vnf level value if not vdu level value exists
7973 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7974 "additionalParams", {}
7975 ).get("run-day1"):
7976 deploy_params_vdu["run-day1"] = target_vnf[
7977 "additionalParams"
7978 ].get("run-day1")
7979 vdu_name = target_vdu.get("vdu-id", None)
7980 # TODO: Get vdu_id from vdud.
7981 vdu_id = vdu_name
7982 # For multi instance VDU count-index is mandatory
7983 # For single session VDU count-indes is 0
7984 vdu_index = target_vdu.get("count-index", 0)
7985
7986 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7987 stage[1] = "Deploying Execution Environments."
7988 self.logger.debug(logging_text + stage[1])
7989
7990 # VNF Level charm. Normal case when proxy charms.
7991 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7992 descriptor_config = get_configuration(vnfd, vnfd_ref)
7993 if descriptor_config:
7994 # Continue if healed machine is management machine
7995 vnf_ip_address = db_vnfr.get("ip-address")
7996 target_instance = None
7997 for instance in db_vnfr.get("vdur", None):
7998 if (
7999 instance["vdu-name"] == vdu_name
8000 and instance["count-index"] == vdu_index
8001 ):
8002 target_instance = instance
8003 break
8004 if vnf_ip_address == target_instance.get("ip-address"):
8005 self._heal_n2vc(
8006 logging_text=logging_text
8007 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8008 member_vnf_index, vdu_name, vdu_index
8009 ),
8010 db_nsr=db_nsr,
8011 db_vnfr=db_vnfr,
8012 nslcmop_id=nslcmop_id,
8013 nsr_id=nsr_id,
8014 nsi_id=nsi_id,
8015 vnfd_id=vnfd_ref,
8016 vdu_id=None,
8017 kdu_name=None,
8018 member_vnf_index=member_vnf_index,
8019 vdu_index=0,
8020 vdu_name=None,
8021 deploy_params=deploy_params_vdu,
8022 descriptor_config=descriptor_config,
8023 base_folder=base_folder,
8024 task_instantiation_info=tasks_dict_info,
8025 stage=stage,
8026 )
8027
8028 # VDU Level charm. Normal case with native charms.
8029 descriptor_config = get_configuration(vnfd, vdu_name)
8030 if descriptor_config:
8031 self._heal_n2vc(
8032 logging_text=logging_text
8033 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8034 member_vnf_index, vdu_name, vdu_index
8035 ),
8036 db_nsr=db_nsr,
8037 db_vnfr=db_vnfr,
8038 nslcmop_id=nslcmop_id,
8039 nsr_id=nsr_id,
8040 nsi_id=nsi_id,
8041 vnfd_id=vnfd_ref,
8042 vdu_id=vdu_id,
8043 kdu_name=kdu_name,
8044 member_vnf_index=member_vnf_index,
8045 vdu_index=vdu_index,
8046 vdu_name=vdu_name,
8047 deploy_params=deploy_params_vdu,
8048 descriptor_config=descriptor_config,
8049 base_folder=base_folder,
8050 task_instantiation_info=tasks_dict_info,
8051 stage=stage,
8052 )
8053
8054 except (
8055 ROclient.ROClientException,
8056 DbException,
8057 LcmException,
8058 NgRoException,
8059 ) as e:
8060 self.logger.error(logging_text + "Exit Exception {}".format(e))
8061 exc = e
8062 except asyncio.CancelledError:
8063 self.logger.error(
8064 logging_text + "Cancelled Exception while '{}'".format(step)
8065 )
8066 exc = "Operation was cancelled"
8067 except Exception as e:
8068 exc = traceback.format_exc()
8069 self.logger.critical(
8070 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8071 exc_info=True,
8072 )
8073 finally:
8074 error_list = list()
8075 if exc:
8076 error_list.append(str(exc))
8077 try:
8078 if tasks_dict_info:
8079 stage[1] = "Waiting for healing pending tasks."
8080 self.logger.debug(logging_text + stage[1])
8081 exc = await self._wait_for_tasks(
8082 logging_text,
8083 tasks_dict_info,
8084 self.timeout.ns_deploy,
8085 stage,
8086 nslcmop_id,
8087 nsr_id=nsr_id,
8088 )
8089 except asyncio.CancelledError:
8090 error_list.append("Cancelled")
8091 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
8092 await self._wait_for_tasks(
8093 logging_text,
8094 tasks_dict_info,
8095 self.timeout.ns_deploy,
8096 stage,
8097 nslcmop_id,
8098 nsr_id=nsr_id,
8099 )
8100 if error_list:
8101 error_detail = "; ".join(error_list)
8102 db_nslcmop_update[
8103 "detailed-status"
8104 ] = error_description_nslcmop = "FAILED {}: {}".format(
8105 step, error_detail
8106 )
8107 nslcmop_operation_state = "FAILED"
8108 if db_nsr:
8109 db_nsr_update["operational-status"] = old_operational_status
8110 db_nsr_update["config-status"] = old_config_status
8111 db_nsr_update[
8112 "detailed-status"
8113 ] = "FAILED healing nslcmop={} {}: {}".format(
8114 nslcmop_id, step, error_detail
8115 )
8116 for task, task_name in tasks_dict_info.items():
8117 if not task.done() or task.cancelled() or task.exception():
8118 if task_name.startswith(self.task_name_deploy_vca):
8119 # A N2VC task is pending
8120 db_nsr_update["config-status"] = "failed"
8121 else:
8122 # RO task is pending
8123 db_nsr_update["operational-status"] = "failed"
8124 else:
8125 error_description_nslcmop = None
8126 nslcmop_operation_state = "COMPLETED"
8127 db_nslcmop_update["detailed-status"] = "Done"
8128 db_nsr_update["detailed-status"] = "Done"
8129 db_nsr_update["operational-status"] = "running"
8130 db_nsr_update["config-status"] = "configured"
8131
8132 self._write_op_status(
8133 op_id=nslcmop_id,
8134 stage="",
8135 error_message=error_description_nslcmop,
8136 operation_state=nslcmop_operation_state,
8137 other_update=db_nslcmop_update,
8138 )
8139 if db_nsr:
8140 self._write_ns_status(
8141 nsr_id=nsr_id,
8142 ns_state=None,
8143 current_operation="IDLE",
8144 current_operation_id=None,
8145 other_update=db_nsr_update,
8146 )
8147
8148 if nslcmop_operation_state:
8149 try:
8150 msg = {
8151 "nsr_id": nsr_id,
8152 "nslcmop_id": nslcmop_id,
8153 "operationState": nslcmop_operation_state,
8154 }
8155 await self.msg.aiowrite("ns", "healed", msg)
8156 except Exception as e:
8157 self.logger.error(
8158 logging_text + "kafka_write notification Exception {}".format(e)
8159 )
8160 self.logger.debug(logging_text + "Exit")
8161 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8162
8163 async def heal_RO(
8164 self,
8165 logging_text,
8166 nsr_id,
8167 db_nslcmop,
8168 stage,
8169 ):
8170 """
8171 Heal at RO
8172 :param logging_text: preffix text to use at logging
8173 :param nsr_id: nsr identity
8174 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8175 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8176 :return: None or exception
8177 """
8178
8179 def get_vim_account(vim_account_id):
8180 nonlocal db_vims
8181 if vim_account_id in db_vims:
8182 return db_vims[vim_account_id]
8183 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8184 db_vims[vim_account_id] = db_vim
8185 return db_vim
8186
8187 try:
8188 start_heal = time()
8189 ns_params = db_nslcmop.get("operationParams")
8190 if ns_params and ns_params.get("timeout_ns_heal"):
8191 timeout_ns_heal = ns_params["timeout_ns_heal"]
8192 else:
8193 timeout_ns_heal = self.timeout.ns_heal
8194
8195 db_vims = {}
8196
8197 nslcmop_id = db_nslcmop["_id"]
8198 target = {
8199 "action_id": nslcmop_id,
8200 }
8201 self.logger.warning(
8202 "db_nslcmop={} and timeout_ns_heal={}".format(
8203 db_nslcmop, timeout_ns_heal
8204 )
8205 )
8206 target.update(db_nslcmop.get("operationParams", {}))
8207
8208 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8209 desc = await self.RO.recreate(nsr_id, target)
8210 self.logger.debug("RO return > {}".format(desc))
8211 action_id = desc["action_id"]
8212 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8213 await self._wait_ng_ro(
8214 nsr_id,
8215 action_id,
8216 nslcmop_id,
8217 start_heal,
8218 timeout_ns_heal,
8219 stage,
8220 operation="healing",
8221 )
8222
8223 # Updating NSR
8224 db_nsr_update = {
8225 "_admin.deployed.RO.operational-status": "running",
8226 "detailed-status": " ".join(stage),
8227 }
8228 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8229 self._write_op_status(nslcmop_id, stage)
8230 self.logger.debug(
8231 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8232 )
8233
8234 except Exception as e:
8235 stage[2] = "ERROR healing at VIM"
8236 # self.set_vnfr_at_error(db_vnfrs, str(e))
8237 self.logger.error(
8238 "Error healing at VIM {}".format(e),
8239 exc_info=not isinstance(
8240 e,
8241 (
8242 ROclient.ROClientException,
8243 LcmException,
8244 DbException,
8245 NgRoException,
8246 ),
8247 ),
8248 )
8249 raise
8250
8251 def _heal_n2vc(
8252 self,
8253 logging_text,
8254 db_nsr,
8255 db_vnfr,
8256 nslcmop_id,
8257 nsr_id,
8258 nsi_id,
8259 vnfd_id,
8260 vdu_id,
8261 kdu_name,
8262 member_vnf_index,
8263 vdu_index,
8264 vdu_name,
8265 deploy_params,
8266 descriptor_config,
8267 base_folder,
8268 task_instantiation_info,
8269 stage,
8270 ):
8271 # launch instantiate_N2VC in a asyncio task and register task object
8272 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8273 # if not found, create one entry and update database
8274 # fill db_nsr._admin.deployed.VCA.<index>
8275
8276 self.logger.debug(
8277 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8278 )
8279
8280 charm_name = ""
8281 get_charm_name = False
8282 if "execution-environment-list" in descriptor_config:
8283 ee_list = descriptor_config.get("execution-environment-list", [])
8284 elif "juju" in descriptor_config:
8285 ee_list = [descriptor_config] # ns charms
8286 if "execution-environment-list" not in descriptor_config:
8287 # charm name is only required for ns charms
8288 get_charm_name = True
8289 else: # other types as script are not supported
8290 ee_list = []
8291
8292 for ee_item in ee_list:
8293 self.logger.debug(
8294 logging_text
8295 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8296 ee_item.get("juju"), ee_item.get("helm-chart")
8297 )
8298 )
8299 ee_descriptor_id = ee_item.get("id")
8300 if ee_item.get("juju"):
8301 vca_name = ee_item["juju"].get("charm")
8302 if get_charm_name:
8303 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8304 vca_type = (
8305 "lxc_proxy_charm"
8306 if ee_item["juju"].get("charm") is not None
8307 else "native_charm"
8308 )
8309 if ee_item["juju"].get("cloud") == "k8s":
8310 vca_type = "k8s_proxy_charm"
8311 elif ee_item["juju"].get("proxy") is False:
8312 vca_type = "native_charm"
8313 elif ee_item.get("helm-chart"):
8314 vca_name = ee_item["helm-chart"]
8315 vca_type = "helm-v3"
8316 else:
8317 self.logger.debug(
8318 logging_text + "skipping non juju neither charm configuration"
8319 )
8320 continue
8321
8322 vca_index = -1
8323 for vca_index, vca_deployed in enumerate(
8324 db_nsr["_admin"]["deployed"]["VCA"]
8325 ):
8326 if not vca_deployed:
8327 continue
8328 if (
8329 vca_deployed.get("member-vnf-index") == member_vnf_index
8330 and vca_deployed.get("vdu_id") == vdu_id
8331 and vca_deployed.get("kdu_name") == kdu_name
8332 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8333 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8334 ):
8335 break
8336 else:
8337 # not found, create one.
8338 target = (
8339 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8340 )
8341 if vdu_id:
8342 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8343 elif kdu_name:
8344 target += "/kdu/{}".format(kdu_name)
8345 vca_deployed = {
8346 "target_element": target,
8347 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8348 "member-vnf-index": member_vnf_index,
8349 "vdu_id": vdu_id,
8350 "kdu_name": kdu_name,
8351 "vdu_count_index": vdu_index,
8352 "operational-status": "init", # TODO revise
8353 "detailed-status": "", # TODO revise
8354 "step": "initial-deploy", # TODO revise
8355 "vnfd_id": vnfd_id,
8356 "vdu_name": vdu_name,
8357 "type": vca_type,
8358 "ee_descriptor_id": ee_descriptor_id,
8359 "charm_name": charm_name,
8360 }
8361 vca_index += 1
8362
8363 # create VCA and configurationStatus in db
8364 db_dict = {
8365 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8366 "configurationStatus.{}".format(vca_index): dict(),
8367 }
8368 self.update_db_2("nsrs", nsr_id, db_dict)
8369
8370 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8371
8372 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8373 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8374 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8375
8376 # Launch task
8377 task_n2vc = asyncio.ensure_future(
8378 self.heal_N2VC(
8379 logging_text=logging_text,
8380 vca_index=vca_index,
8381 nsi_id=nsi_id,
8382 db_nsr=db_nsr,
8383 db_vnfr=db_vnfr,
8384 vdu_id=vdu_id,
8385 kdu_name=kdu_name,
8386 vdu_index=vdu_index,
8387 deploy_params=deploy_params,
8388 config_descriptor=descriptor_config,
8389 base_folder=base_folder,
8390 nslcmop_id=nslcmop_id,
8391 stage=stage,
8392 vca_type=vca_type,
8393 vca_name=vca_name,
8394 ee_config_descriptor=ee_item,
8395 )
8396 )
8397 self.lcm_tasks.register(
8398 "ns",
8399 nsr_id,
8400 nslcmop_id,
8401 "instantiate_N2VC-{}".format(vca_index),
8402 task_n2vc,
8403 )
8404 task_instantiation_info[
8405 task_n2vc
8406 ] = self.task_name_deploy_vca + " {}.{}".format(
8407 member_vnf_index or "", vdu_id or ""
8408 )
8409
8410 async def heal_N2VC(
8411 self,
8412 logging_text,
8413 vca_index,
8414 nsi_id,
8415 db_nsr,
8416 db_vnfr,
8417 vdu_id,
8418 kdu_name,
8419 vdu_index,
8420 config_descriptor,
8421 deploy_params,
8422 base_folder,
8423 nslcmop_id,
8424 stage,
8425 vca_type,
8426 vca_name,
8427 ee_config_descriptor,
8428 ):
8429 nsr_id = db_nsr["_id"]
8430 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8431 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8432 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8433 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8434 db_dict = {
8435 "collection": "nsrs",
8436 "filter": {"_id": nsr_id},
8437 "path": db_update_entry,
8438 }
8439 step = ""
8440 try:
8441 element_type = "NS"
8442 element_under_configuration = nsr_id
8443
8444 vnfr_id = None
8445 if db_vnfr:
8446 vnfr_id = db_vnfr["_id"]
8447 osm_config["osm"]["vnf_id"] = vnfr_id
8448
8449 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8450
8451 if vca_type == "native_charm":
8452 index_number = 0
8453 else:
8454 index_number = vdu_index or 0
8455
8456 if vnfr_id:
8457 element_type = "VNF"
8458 element_under_configuration = vnfr_id
8459 namespace += ".{}-{}".format(vnfr_id, index_number)
8460 if vdu_id:
8461 namespace += ".{}-{}".format(vdu_id, index_number)
8462 element_type = "VDU"
8463 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8464 osm_config["osm"]["vdu_id"] = vdu_id
8465 elif kdu_name:
8466 namespace += ".{}".format(kdu_name)
8467 element_type = "KDU"
8468 element_under_configuration = kdu_name
8469 osm_config["osm"]["kdu_name"] = kdu_name
8470
8471 # Get artifact path
8472 if base_folder["pkg-dir"]:
8473 artifact_path = "{}/{}/{}/{}".format(
8474 base_folder["folder"],
8475 base_folder["pkg-dir"],
8476 "charms"
8477 if vca_type
8478 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8479 else "helm-charts",
8480 vca_name,
8481 )
8482 else:
8483 artifact_path = "{}/Scripts/{}/{}/".format(
8484 base_folder["folder"],
8485 "charms"
8486 if vca_type
8487 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8488 else "helm-charts",
8489 vca_name,
8490 )
8491
8492 self.logger.debug("Artifact path > {}".format(artifact_path))
8493
8494 # get initial_config_primitive_list that applies to this element
8495 initial_config_primitive_list = config_descriptor.get(
8496 "initial-config-primitive"
8497 )
8498
8499 self.logger.debug(
8500 "Initial config primitive list > {}".format(
8501 initial_config_primitive_list
8502 )
8503 )
8504
8505 # add config if not present for NS charm
8506 ee_descriptor_id = ee_config_descriptor.get("id")
8507 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8508 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8509 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8510 )
8511
8512 self.logger.debug(
8513 "Initial config primitive list #2 > {}".format(
8514 initial_config_primitive_list
8515 )
8516 )
8517 # n2vc_redesign STEP 3.1
8518 # find old ee_id if exists
8519 ee_id = vca_deployed.get("ee_id")
8520
8521 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8522 # create or register execution environment in VCA. Only for native charms when healing
8523 if vca_type == "native_charm":
8524 step = "Waiting to VM being up and getting IP address"
8525 self.logger.debug(logging_text + step)
8526 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8527 logging_text,
8528 nsr_id,
8529 vnfr_id,
8530 vdu_id,
8531 vdu_index,
8532 user=None,
8533 pub_key=None,
8534 )
8535 credentials = {"hostname": rw_mgmt_ip}
8536 # get username
8537 username = deep_get(
8538 config_descriptor, ("config-access", "ssh-access", "default-user")
8539 )
8540 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8541 # merged. Meanwhile let's get username from initial-config-primitive
8542 if not username and initial_config_primitive_list:
8543 for config_primitive in initial_config_primitive_list:
8544 for param in config_primitive.get("parameter", ()):
8545 if param["name"] == "ssh-username":
8546 username = param["value"]
8547 break
8548 if not username:
8549 raise LcmException(
8550 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8551 "'config-access.ssh-access.default-user'"
8552 )
8553 credentials["username"] = username
8554
8555 # n2vc_redesign STEP 3.2
8556 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8557 self._write_configuration_status(
8558 nsr_id=nsr_id,
8559 vca_index=vca_index,
8560 status="REGISTERING",
8561 element_under_configuration=element_under_configuration,
8562 element_type=element_type,
8563 )
8564
8565 step = "register execution environment {}".format(credentials)
8566 self.logger.debug(logging_text + step)
8567 ee_id = await self.vca_map[vca_type].register_execution_environment(
8568 credentials=credentials,
8569 namespace=namespace,
8570 db_dict=db_dict,
8571 vca_id=vca_id,
8572 )
8573
8574 # update ee_id en db
8575 db_dict_ee_id = {
8576 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8577 }
8578 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8579
8580 # for compatibility with MON/POL modules, the need model and application name at database
8581 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8582 # Not sure if this need to be done when healing
8583 """
8584 ee_id_parts = ee_id.split(".")
8585 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8586 if len(ee_id_parts) >= 2:
8587 model_name = ee_id_parts[0]
8588 application_name = ee_id_parts[1]
8589 db_nsr_update[db_update_entry + "model"] = model_name
8590 db_nsr_update[db_update_entry + "application"] = application_name
8591 """
8592
8593 # n2vc_redesign STEP 3.3
8594 # Install configuration software. Only for native charms.
8595 step = "Install configuration Software"
8596
8597 self._write_configuration_status(
8598 nsr_id=nsr_id,
8599 vca_index=vca_index,
8600 status="INSTALLING SW",
8601 element_under_configuration=element_under_configuration,
8602 element_type=element_type,
8603 # other_update=db_nsr_update,
8604 other_update=None,
8605 )
8606
8607 # TODO check if already done
8608 self.logger.debug(logging_text + step)
8609 config = None
8610 if vca_type == "native_charm":
8611 config_primitive = next(
8612 (p for p in initial_config_primitive_list if p["name"] == "config"),
8613 None,
8614 )
8615 if config_primitive:
8616 config = self._map_primitive_params(
8617 config_primitive, {}, deploy_params
8618 )
8619 await self.vca_map[vca_type].install_configuration_sw(
8620 ee_id=ee_id,
8621 artifact_path=artifact_path,
8622 db_dict=db_dict,
8623 config=config,
8624 num_units=1,
8625 vca_id=vca_id,
8626 vca_type=vca_type,
8627 )
8628
8629 # write in db flag of configuration_sw already installed
8630 self.update_db_2(
8631 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8632 )
8633
8634 # Not sure if this need to be done when healing
8635 """
8636 # add relations for this VCA (wait for other peers related with this VCA)
8637 await self._add_vca_relations(
8638 logging_text=logging_text,
8639 nsr_id=nsr_id,
8640 vca_type=vca_type,
8641 vca_index=vca_index,
8642 )
8643 """
8644
8645 # if SSH access is required, then get execution environment SSH public
8646 # if native charm we have waited already to VM be UP
8647 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8648 pub_key = None
8649 user = None
8650 # self.logger.debug("get ssh key block")
8651 if deep_get(
8652 config_descriptor, ("config-access", "ssh-access", "required")
8653 ):
8654 # self.logger.debug("ssh key needed")
8655 # Needed to inject a ssh key
8656 user = deep_get(
8657 config_descriptor,
8658 ("config-access", "ssh-access", "default-user"),
8659 )
8660 step = "Install configuration Software, getting public ssh key"
8661 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8662 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8663 )
8664
8665 step = "Insert public key into VM user={} ssh_key={}".format(
8666 user, pub_key
8667 )
8668 else:
8669 # self.logger.debug("no need to get ssh key")
8670 step = "Waiting to VM being up and getting IP address"
8671 self.logger.debug(logging_text + step)
8672
8673 # n2vc_redesign STEP 5.1
8674 # wait for RO (ip-address) Insert pub_key into VM
8675 # IMPORTANT: We need do wait for RO to complete healing operation.
8676 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8677 if vnfr_id:
8678 if kdu_name:
8679 rw_mgmt_ip = await self.wait_kdu_up(
8680 logging_text, nsr_id, vnfr_id, kdu_name
8681 )
8682 else:
8683 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8684 logging_text,
8685 nsr_id,
8686 vnfr_id,
8687 vdu_id,
8688 vdu_index,
8689 user=user,
8690 pub_key=pub_key,
8691 )
8692 else:
8693 rw_mgmt_ip = None # This is for a NS configuration
8694
8695 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8696
8697 # store rw_mgmt_ip in deploy params for later replacement
8698 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8699
8700 # Day1 operations.
8701 # get run-day1 operation parameter
8702 runDay1 = deploy_params.get("run-day1", False)
8703 self.logger.debug(
8704 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8705 )
8706 if runDay1:
8707 # n2vc_redesign STEP 6 Execute initial config primitive
8708 step = "execute initial config primitive"
8709
8710 # wait for dependent primitives execution (NS -> VNF -> VDU)
8711 if initial_config_primitive_list:
8712 await self._wait_dependent_n2vc(
8713 nsr_id, vca_deployed_list, vca_index
8714 )
8715
8716 # stage, in function of element type: vdu, kdu, vnf or ns
8717 my_vca = vca_deployed_list[vca_index]
8718 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8719 # VDU or KDU
8720 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8721 elif my_vca.get("member-vnf-index"):
8722 # VNF
8723 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8724 else:
8725 # NS
8726 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8727
8728 self._write_configuration_status(
8729 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8730 )
8731
8732 self._write_op_status(op_id=nslcmop_id, stage=stage)
8733
8734 check_if_terminated_needed = True
8735 for initial_config_primitive in initial_config_primitive_list:
8736 # adding information on the vca_deployed if it is a NS execution environment
8737 if not vca_deployed["member-vnf-index"]:
8738 deploy_params["ns_config_info"] = json.dumps(
8739 self._get_ns_config_info(nsr_id)
8740 )
8741 # TODO check if already done
8742 primitive_params_ = self._map_primitive_params(
8743 initial_config_primitive, {}, deploy_params
8744 )
8745
8746 step = "execute primitive '{}' params '{}'".format(
8747 initial_config_primitive["name"], primitive_params_
8748 )
8749 self.logger.debug(logging_text + step)
8750 await self.vca_map[vca_type].exec_primitive(
8751 ee_id=ee_id,
8752 primitive_name=initial_config_primitive["name"],
8753 params_dict=primitive_params_,
8754 db_dict=db_dict,
8755 vca_id=vca_id,
8756 vca_type=vca_type,
8757 )
8758 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8759 if check_if_terminated_needed:
8760 if config_descriptor.get("terminate-config-primitive"):
8761 self.update_db_2(
8762 "nsrs",
8763 nsr_id,
8764 {db_update_entry + "needed_terminate": True},
8765 )
8766 check_if_terminated_needed = False
8767
8768 # TODO register in database that primitive is done
8769
8770 # STEP 7 Configure metrics
8771 # Not sure if this need to be done when healing
8772 """
8773 if vca_type == "helm" or vca_type == "helm-v3":
8774 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8775 ee_id=ee_id,
8776 artifact_path=artifact_path,
8777 ee_config_descriptor=ee_config_descriptor,
8778 vnfr_id=vnfr_id,
8779 nsr_id=nsr_id,
8780 target_ip=rw_mgmt_ip,
8781 )
8782 if prometheus_jobs:
8783 self.update_db_2(
8784 "nsrs",
8785 nsr_id,
8786 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8787 )
8788
8789 for job in prometheus_jobs:
8790 self.db.set_one(
8791 "prometheus_jobs",
8792 {"job_name": job["job_name"]},
8793 job,
8794 upsert=True,
8795 fail_on_empty=False,
8796 )
8797
8798 """
8799 step = "instantiated at VCA"
8800 self.logger.debug(logging_text + step)
8801
8802 self._write_configuration_status(
8803 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8804 )
8805
8806 except Exception as e: # TODO not use Exception but N2VC exception
8807 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8808 if not isinstance(
8809 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8810 ):
8811 self.logger.error(
8812 "Exception while {} : {}".format(step, e), exc_info=True
8813 )
8814 self._write_configuration_status(
8815 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8816 )
8817 raise LcmException("{} {}".format(step, e)) from e
8818
8819 async def _wait_heal_ro(
8820 self,
8821 nsr_id,
8822 timeout=600,
8823 ):
8824 start_time = time()
8825 while time() <= start_time + timeout:
8826 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8827 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8828 "operational-status"
8829 ]
8830 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8831 if operational_status_ro != "healing":
8832 break
8833 await asyncio.sleep(15)
8834 else: # timeout_ns_deploy
8835 raise NgRoException("Timeout waiting ns to deploy")
8836
8837 async def vertical_scale(self, nsr_id, nslcmop_id):
8838 """
8839 Vertical Scale the VDUs in a NS
8840
8841 :param: nsr_id: NS Instance ID
8842 :param: nslcmop_id: nslcmop ID of migrate
8843
8844 """
8845 # Try to lock HA task here
8846 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8847 if not task_is_locked_by_me:
8848 return
8849 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8850 self.logger.debug(logging_text + "Enter")
8851 # get all needed from database
8852 db_nslcmop = None
8853 db_nslcmop_update = {}
8854 nslcmop_operation_state = None
8855 old_db_update = {}
8856 q_filter = {}
8857 old_vdu_index = None
8858 old_flavor_id = None
8859 db_nsr_update = {}
8860 target = {}
8861 exc = None
8862 # in case of error, indicates what part of scale was failed to put nsr at error status
8863 start_deploy = time()
8864
8865 try:
8866 # wait for any previous tasks in process
8867 step = "Waiting for previous operations to terminate"
8868 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8869
8870 self._write_ns_status(
8871 nsr_id=nsr_id,
8872 ns_state=None,
8873 current_operation="VerticalScale",
8874 current_operation_id=nslcmop_id,
8875 )
8876 step = "Getting nslcmop from database"
8877 self.logger.debug(
8878 step + " after having waited for previous tasks to be completed"
8879 )
8880 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8881 operationParams = db_nslcmop.get("operationParams")
8882 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
8883 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8884 db_flavor = db_nsr.get("flavor")
8885 db_flavor_index = str(len(db_flavor))
8886 change_vnf_flavor_data = operationParams["changeVnfFlavorData"]
8887 flavor_dict = change_vnf_flavor_data["additionalParams"]
8888 count_index = flavor_dict["vduCountIndex"]
8889 vdu_id_ref = flavor_dict["vduid"]
8890 flavor_dict_update = {
8891 "id": db_flavor_index,
8892 "memory-mb": flavor_dict["virtualMemory"],
8893 "name": f"{vdu_id_ref}-{count_index}-flv",
8894 "storage-gb": flavor_dict["sizeOfStorage"],
8895 "vcpu-count": flavor_dict["numVirtualCpu"],
8896 }
8897 db_flavor.append(flavor_dict_update)
8898 db_update = {}
8899 db_update["flavor"] = db_flavor
8900 ns_q_filter = {
8901 "_id": nsr_id,
8902 }
8903 self.db.set_one(
8904 "nsrs",
8905 q_filter=ns_q_filter,
8906 update_dict=db_update,
8907 fail_on_empty=True,
8908 )
8909 db_vnfr = self.db.get_one(
8910 "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]}
8911 )
8912 for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
8913 if (
8914 vdur.get("count-index") == count_index
8915 and vdur.get("vdu-id-ref") == vdu_id_ref
8916 ):
8917 old_flavor_id = vdur.get("ns-flavor-id", 0)
8918 old_vdu_index = vdu_index
8919 filter_text = {
8920 "_id": change_vnf_flavor_data["vnfInstanceId"],
8921 "vdur.count-index": count_index,
8922 "vdur.vdu-id-ref": vdu_id_ref,
8923 }
8924 q_filter.update(filter_text)
8925 db_update = {}
8926 db_update[
8927 "vdur.{}.ns-flavor-id".format(vdu_index)
8928 ] = db_flavor_index
8929 self.db.set_one(
8930 "vnfrs",
8931 q_filter=q_filter,
8932 update_dict=db_update,
8933 fail_on_empty=True,
8934 )
8935 target = {}
8936 target.update(operationParams)
8937 desc = await self.RO.vertical_scale(nsr_id, target)
8938 self.logger.debug("RO return > {}".format(desc))
8939 action_id = desc["action_id"]
8940 await self._wait_ng_ro(
8941 nsr_id,
8942 action_id,
8943 nslcmop_id,
8944 start_deploy,
8945 self.timeout.verticalscale,
8946 operation="verticalscale",
8947 )
8948 except (
8949 NgRoException,
8950 ROclient.ROClientException,
8951 DbException,
8952 LcmException,
8953 ) as e:
8954 self.logger.error("Exit Exception {}".format(e))
8955 exc = e
8956 except asyncio.CancelledError:
8957 self.logger.error("Cancelled Exception while '{}'".format(step))
8958 exc = "Operation was cancelled"
8959 except Exception as e:
8960 exc = traceback.format_exc()
8961 self.logger.critical(
8962 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8963 )
8964 finally:
8965 self._write_ns_status(
8966 nsr_id=nsr_id,
8967 ns_state=None,
8968 current_operation="IDLE",
8969 current_operation_id=None,
8970 )
8971 if exc:
8972 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8973 nslcmop_operation_state = "FAILED"
8974 old_db_update[
8975 "vdur.{}.ns-flavor-id".format(old_vdu_index)
8976 ] = old_flavor_id
8977 else:
8978 nslcmop_operation_state = "COMPLETED"
8979 db_nslcmop_update["detailed-status"] = "Done"
8980 db_nsr_update["detailed-status"] = "Done"
8981
8982 self._write_op_status(
8983 op_id=nslcmop_id,
8984 stage="",
8985 error_message="",
8986 operation_state=nslcmop_operation_state,
8987 other_update=db_nslcmop_update,
8988 )
8989 if old_vdu_index and old_db_update != {}:
8990 self.logger.critical(
8991 "Reverting Old Flavor -- : {}".format(old_db_update)
8992 )
8993 self.db.set_one(
8994 "vnfrs",
8995 q_filter=q_filter,
8996 update_dict=old_db_update,
8997 fail_on_empty=True,
8998 )
8999 if nslcmop_operation_state:
9000 try:
9001 msg = {
9002 "nsr_id": nsr_id,
9003 "nslcmop_id": nslcmop_id,
9004 "operationState": nslcmop_operation_state,
9005 }
9006 await self.msg.aiowrite("ns", "verticalscaled", msg)
9007 except Exception as e:
9008 self.logger.error(
9009 logging_text + "kafka_write notification Exception {}".format(e)
9010 )
9011 self.logger.debug(logging_text + "Exit")
9012 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")