f282b88c79febae259a6eaf7e27f026a60a9228b
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 vld_to_ro_ip_profile,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm3_conn import K8sHelm3Connector
101 from n2vc.k8s_juju_conn import K8sJujuConnector
102
103 from osm_common.dbbase import DbException
104 from osm_common.fsbase import FsException
105
106 from osm_lcm.data_utils.database.database import Database
107 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
108 from osm_lcm.data_utils.wim import (
109 get_sdn_ports,
110 get_target_wim_attrs,
111 select_feasible_wim_account,
112 )
113
114 from n2vc.n2vc_juju_conn import N2VCJujuConnector
115 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
116
117 from osm_lcm.lcm_helm_conn import LCMHelmConn
118 from osm_lcm.osm_config import OsmConfigBuilder
119 from osm_lcm.prometheus import parse_job
120
121 from copy import copy, deepcopy
122 from time import time
123 from uuid import uuid4
124
125 from random import SystemRandom
126
127 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
128
129
130 class NsLcm(LcmBase):
131 SUBOPERATION_STATUS_NOT_FOUND = -1
132 SUBOPERATION_STATUS_NEW = -2
133 SUBOPERATION_STATUS_SKIP = -3
134 EE_TLS_NAME = "ee-tls"
135 task_name_deploy_vca = "Deploying VCA"
136 rel_operation_types = {
137 "GE": ">=",
138 "LE": "<=",
139 "GT": ">",
140 "LT": "<",
141 "EQ": "==",
142 "NE": "!=",
143 }
144
145 def __init__(self, msg, lcm_tasks, config: LcmCfg):
146 """
147 Init, Connect to database, filesystem storage, and messaging
148 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
149 :return: None
150 """
151 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
152
153 self.db = Database().instance.db
154 self.fs = Filesystem().instance.fs
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config.timeout
157 self.ro_config = config.RO
158 self.vca_config = config.VCA
159
160 # create N2VC connector
161 self.n2vc = N2VCJujuConnector(
162 log=self.logger,
163 on_update_db=self._on_update_n2vc_db,
164 fs=self.fs,
165 db=self.db,
166 )
167
168 self.conn_helm_ee = LCMHelmConn(
169 log=self.logger,
170 vca_config=self.vca_config,
171 on_update_db=self._on_update_n2vc_db,
172 )
173
174 self.k8sclusterhelm3 = K8sHelm3Connector(
175 kubectl_command=self.vca_config.kubectlpath,
176 helm_command=self.vca_config.helm3path,
177 fs=self.fs,
178 log=self.logger,
179 db=self.db,
180 on_update_db=None,
181 )
182
183 self.k8sclusterjuju = K8sJujuConnector(
184 kubectl_command=self.vca_config.kubectlpath,
185 juju_command=self.vca_config.jujupath,
186 log=self.logger,
187 on_update_db=self._on_update_k8s_db,
188 fs=self.fs,
189 db=self.db,
190 )
191
192 self.k8scluster_map = {
193 "helm-chart-v3": self.k8sclusterhelm3,
194 "chart": self.k8sclusterhelm3,
195 "juju-bundle": self.k8sclusterjuju,
196 "juju": self.k8sclusterjuju,
197 }
198
199 self.vca_map = {
200 "lxc_proxy_charm": self.n2vc,
201 "native_charm": self.n2vc,
202 "k8s_proxy_charm": self.n2vc,
203 "helm": self.conn_helm_ee,
204 "helm-v3": self.conn_helm_ee,
205 }
206
207 # create RO client
208 self.RO = NgRoClient(**self.ro_config.to_dict())
209
210 self.op_status_map = {
211 "instantiation": self.RO.status,
212 "termination": self.RO.status,
213 "migrate": self.RO.status,
214 "healing": self.RO.recreate_status,
215 "verticalscale": self.RO.status,
216 "start_stop_rebuild": self.RO.status,
217 }
218
219 @staticmethod
220 def increment_ip_mac(ip_mac, vm_index=1):
221 if not isinstance(ip_mac, str):
222 return ip_mac
223 try:
224 # try with ipv4 look for last dot
225 i = ip_mac.rfind(".")
226 if i > 0:
227 i += 1
228 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
229 # try with ipv6 or mac look for last colon. Operate in hex
230 i = ip_mac.rfind(":")
231 if i > 0:
232 i += 1
233 # format in hex, len can be 2 for mac or 4 for ipv6
234 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
235 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
236 )
237 except Exception:
238 pass
239 return None
240
241 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
242 # remove last dot from path (if exists)
243 if path.endswith("."):
244 path = path[:-1]
245
246 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
247 # .format(table, filter, path, updated_data))
248 try:
249 nsr_id = filter.get("_id")
250
251 # read ns record from database
252 nsr = self.db.get_one(table="nsrs", q_filter=filter)
253 current_ns_status = nsr.get("nsState")
254
255 # get vca status for NS
256 status_dict = await self.n2vc.get_status(
257 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
258 )
259
260 # vcaStatus
261 db_dict = dict()
262 db_dict["vcaStatus"] = status_dict
263
264 # update configurationStatus for this VCA
265 try:
266 vca_index = int(path[path.rfind(".") + 1 :])
267
268 vca_list = deep_get(
269 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
270 )
271 vca_status = vca_list[vca_index].get("status")
272
273 configuration_status_list = nsr.get("configurationStatus")
274 config_status = configuration_status_list[vca_index].get("status")
275
276 if config_status == "BROKEN" and vca_status != "failed":
277 db_dict["configurationStatus"][vca_index] = "READY"
278 elif config_status != "BROKEN" and vca_status == "failed":
279 db_dict["configurationStatus"][vca_index] = "BROKEN"
280 except Exception as e:
281 # not update configurationStatus
282 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
283
284 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
285 # if nsState = 'DEGRADED' check if all is OK
286 is_degraded = False
287 if current_ns_status in ("READY", "DEGRADED"):
288 error_description = ""
289 # check machines
290 if status_dict.get("machines"):
291 for machine_id in status_dict.get("machines"):
292 machine = status_dict.get("machines").get(machine_id)
293 # check machine agent-status
294 if machine.get("agent-status"):
295 s = machine.get("agent-status").get("status")
296 if s != "started":
297 is_degraded = True
298 error_description += (
299 "machine {} agent-status={} ; ".format(
300 machine_id, s
301 )
302 )
303 # check machine instance status
304 if machine.get("instance-status"):
305 s = machine.get("instance-status").get("status")
306 if s != "running":
307 is_degraded = True
308 error_description += (
309 "machine {} instance-status={} ; ".format(
310 machine_id, s
311 )
312 )
313 # check applications
314 if status_dict.get("applications"):
315 for app_id in status_dict.get("applications"):
316 app = status_dict.get("applications").get(app_id)
317 # check application status
318 if app.get("status"):
319 s = app.get("status").get("status")
320 if s != "active":
321 is_degraded = True
322 error_description += (
323 "application {} status={} ; ".format(app_id, s)
324 )
325
326 if error_description:
327 db_dict["errorDescription"] = error_description
328 if current_ns_status == "READY" and is_degraded:
329 db_dict["nsState"] = "DEGRADED"
330 if current_ns_status == "DEGRADED" and not is_degraded:
331 db_dict["nsState"] = "READY"
332
333 # write to database
334 self.update_db_2("nsrs", nsr_id, db_dict)
335
336 except (asyncio.CancelledError, asyncio.TimeoutError):
337 raise
338 except Exception as e:
339 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
340
341 async def _on_update_k8s_db(
342 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
343 ):
344 """
345 Updating vca status in NSR record
346 :param cluster_uuid: UUID of a k8s cluster
347 :param kdu_instance: The unique name of the KDU instance
348 :param filter: To get nsr_id
349 :cluster_type: The cluster type (juju, k8s)
350 :return: none
351 """
352
353 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
354 # .format(cluster_uuid, kdu_instance, filter))
355
356 nsr_id = filter.get("_id")
357 try:
358 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
359 cluster_uuid=cluster_uuid,
360 kdu_instance=kdu_instance,
361 yaml_format=False,
362 complete_status=True,
363 vca_id=vca_id,
364 )
365
366 # vcaStatus
367 db_dict = dict()
368 db_dict["vcaStatus"] = {nsr_id: vca_status}
369
370 self.logger.debug(
371 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
372 )
373
374 # write to database
375 self.update_db_2("nsrs", nsr_id, db_dict)
376 except (asyncio.CancelledError, asyncio.TimeoutError):
377 raise
378 except Exception as e:
379 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
380
381 @staticmethod
382 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
383 try:
384 env = Environment(
385 undefined=StrictUndefined,
386 autoescape=select_autoescape(default_for_string=True, default=True),
387 )
388 template = env.from_string(cloud_init_text)
389 return template.render(additional_params or {})
390 except UndefinedError as e:
391 raise LcmException(
392 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
393 "file, must be provided in the instantiation parameters inside the "
394 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
395 )
396 except (TemplateError, TemplateNotFound) as e:
397 raise LcmException(
398 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
399 vnfd_id, vdu_id, e
400 )
401 )
402
403 def _get_vdu_cloud_init_content(self, vdu, vnfd):
404 cloud_init_content = cloud_init_file = None
405 try:
406 if vdu.get("cloud-init-file"):
407 base_folder = vnfd["_admin"]["storage"]
408 if base_folder["pkg-dir"]:
409 cloud_init_file = "{}/{}/cloud_init/{}".format(
410 base_folder["folder"],
411 base_folder["pkg-dir"],
412 vdu["cloud-init-file"],
413 )
414 else:
415 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
416 base_folder["folder"],
417 vdu["cloud-init-file"],
418 )
419 with self.fs.file_open(cloud_init_file, "r") as ci_file:
420 cloud_init_content = ci_file.read()
421 elif vdu.get("cloud-init"):
422 cloud_init_content = vdu["cloud-init"]
423
424 return cloud_init_content
425 except FsException as e:
426 raise LcmException(
427 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
428 vnfd["id"], vdu["id"], cloud_init_file, e
429 )
430 )
431
432 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
433 vdur = next(
434 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
435 )
436 additional_params = vdur.get("additionalParams")
437 return parse_yaml_strings(additional_params)
438
439 @staticmethod
440 def ip_profile_2_RO(ip_profile):
441 RO_ip_profile = deepcopy(ip_profile)
442 if "dns-server" in RO_ip_profile:
443 if isinstance(RO_ip_profile["dns-server"], list):
444 RO_ip_profile["dns-address"] = []
445 for ds in RO_ip_profile.pop("dns-server"):
446 RO_ip_profile["dns-address"].append(ds["address"])
447 else:
448 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
449 if RO_ip_profile.get("ip-version") == "ipv4":
450 RO_ip_profile["ip-version"] = "IPv4"
451 if RO_ip_profile.get("ip-version") == "ipv6":
452 RO_ip_profile["ip-version"] = "IPv6"
453 if "dhcp-params" in RO_ip_profile:
454 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
455 return RO_ip_profile
456
457 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
458 db_vdu_push_list = []
459 template_vdur = []
460 db_update = {"_admin.modified": time()}
461 if vdu_create:
462 for vdu_id, vdu_count in vdu_create.items():
463 vdur = next(
464 (
465 vdur
466 for vdur in reversed(db_vnfr["vdur"])
467 if vdur["vdu-id-ref"] == vdu_id
468 ),
469 None,
470 )
471 if not vdur:
472 # Read the template saved in the db:
473 self.logger.debug(
474 "No vdur in the database. Using the vdur-template to scale"
475 )
476 vdur_template = db_vnfr.get("vdur-template")
477 if not vdur_template:
478 raise LcmException(
479 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
480 vdu_id
481 )
482 )
483 vdur = vdur_template[0]
484 # Delete a template from the database after using it
485 self.db.set_one(
486 "vnfrs",
487 {"_id": db_vnfr["_id"]},
488 None,
489 pull={"vdur-template": {"_id": vdur["_id"]}},
490 )
491 for count in range(vdu_count):
492 vdur_copy = deepcopy(vdur)
493 vdur_copy["status"] = "BUILD"
494 vdur_copy["status-detailed"] = None
495 vdur_copy["ip-address"] = None
496 vdur_copy["_id"] = str(uuid4())
497 vdur_copy["count-index"] += count + 1
498 vdur_copy["id"] = "{}-{}".format(
499 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
500 )
501 vdur_copy.pop("vim_info", None)
502 for iface in vdur_copy["interfaces"]:
503 if iface.get("fixed-ip"):
504 iface["ip-address"] = self.increment_ip_mac(
505 iface["ip-address"], count + 1
506 )
507 else:
508 iface.pop("ip-address", None)
509 if iface.get("fixed-mac"):
510 iface["mac-address"] = self.increment_ip_mac(
511 iface["mac-address"], count + 1
512 )
513 else:
514 iface.pop("mac-address", None)
515 if db_vnfr["vdur"]:
516 iface.pop(
517 "mgmt_vnf", None
518 ) # only first vdu can be managment of vnf
519 db_vdu_push_list.append(vdur_copy)
520 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
521 if vdu_delete:
522 if len(db_vnfr["vdur"]) == 1:
523 # The scale will move to 0 instances
524 self.logger.debug(
525 "Scaling to 0 !, creating the template with the last vdur"
526 )
527 template_vdur = [db_vnfr["vdur"][0]]
528 for vdu_id, vdu_count in vdu_delete.items():
529 if mark_delete:
530 indexes_to_delete = [
531 iv[0]
532 for iv in enumerate(db_vnfr["vdur"])
533 if iv[1]["vdu-id-ref"] == vdu_id
534 ]
535 db_update.update(
536 {
537 "vdur.{}.status".format(i): "DELETING"
538 for i in indexes_to_delete[-vdu_count:]
539 }
540 )
541 else:
542 # it must be deleted one by one because common.db does not allow otherwise
543 vdus_to_delete = [
544 v
545 for v in reversed(db_vnfr["vdur"])
546 if v["vdu-id-ref"] == vdu_id
547 ]
548 for vdu in vdus_to_delete[:vdu_count]:
549 self.db.set_one(
550 "vnfrs",
551 {"_id": db_vnfr["_id"]},
552 None,
553 pull={"vdur": {"_id": vdu["_id"]}},
554 )
555 db_push = {}
556 if db_vdu_push_list:
557 db_push["vdur"] = db_vdu_push_list
558 if template_vdur:
559 db_push["vdur-template"] = template_vdur
560 if not db_push:
561 db_push = None
562 db_vnfr["vdur-template"] = template_vdur
563 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
564 # modify passed dictionary db_vnfr
565 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
566 db_vnfr["vdur"] = db_vnfr_["vdur"]
567
568 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
569 """
570 Updates database nsr with the RO info for the created vld
571 :param ns_update_nsr: dictionary to be filled with the updated info
572 :param db_nsr: content of db_nsr. This is also modified
573 :param nsr_desc_RO: nsr descriptor from RO
574 :return: Nothing, LcmException is raised on errors
575 """
576
577 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
578 for net_RO in get_iterable(nsr_desc_RO, "nets"):
579 if vld["id"] != net_RO.get("ns_net_osm_id"):
580 continue
581 vld["vim-id"] = net_RO.get("vim_net_id")
582 vld["name"] = net_RO.get("vim_name")
583 vld["status"] = net_RO.get("status")
584 vld["status-detailed"] = net_RO.get("error_msg")
585 ns_update_nsr["vld.{}".format(vld_index)] = vld
586 break
587 else:
588 raise LcmException(
589 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
590 )
591
592 def set_vnfr_at_error(self, db_vnfrs, error_text):
593 try:
594 for db_vnfr in db_vnfrs.values():
595 vnfr_update = {"status": "ERROR"}
596 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
597 if "status" not in vdur:
598 vdur["status"] = "ERROR"
599 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
600 if error_text:
601 vdur["status-detailed"] = str(error_text)
602 vnfr_update[
603 "vdur.{}.status-detailed".format(vdu_index)
604 ] = "ERROR"
605 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
606 except DbException as e:
607 self.logger.error("Cannot update vnf. {}".format(e))
608
609 def _get_ns_config_info(self, nsr_id):
610 """
611 Generates a mapping between vnf,vdu elements and the N2VC id
612 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
613 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
614 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
615 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
616 """
617 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
618 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
619 mapping = {}
620 ns_config_info = {"osm-config-mapping": mapping}
621 for vca in vca_deployed_list:
622 if not vca["member-vnf-index"]:
623 continue
624 if not vca["vdu_id"]:
625 mapping[vca["member-vnf-index"]] = vca["application"]
626 else:
627 mapping[
628 "{}.{}.{}".format(
629 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
630 )
631 ] = vca["application"]
632 return ns_config_info
633
634 async def _instantiate_ng_ro(
635 self,
636 logging_text,
637 nsr_id,
638 nsd,
639 db_nsr,
640 db_nslcmop,
641 db_vnfrs,
642 db_vnfds,
643 n2vc_key_list,
644 stage,
645 start_deploy,
646 timeout_ns_deploy,
647 ):
648 db_vims = {}
649
650 def get_vim_account(vim_account_id):
651 nonlocal db_vims
652 if vim_account_id in db_vims:
653 return db_vims[vim_account_id]
654 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
655 db_vims[vim_account_id] = db_vim
656 return db_vim
657
658 # modify target_vld info with instantiation parameters
659 def parse_vld_instantiation_params(
660 target_vim, target_vld, vld_params, target_sdn
661 ):
662 if vld_params.get("ip-profile"):
663 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
664 vld_params["ip-profile"]
665 )
666 if vld_params.get("provider-network"):
667 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
668 "provider-network"
669 ]
670 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
671 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
672 "provider-network"
673 ]["sdn-ports"]
674
675 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
676 # if wim_account_id is specified in vld_params, validate if it is feasible.
677 wim_account_id, db_wim = select_feasible_wim_account(
678 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
679 )
680
681 if wim_account_id:
682 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
683 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
684 # update vld_params with correct WIM account Id
685 vld_params["wimAccountId"] = wim_account_id
686
687 target_wim = "wim:{}".format(wim_account_id)
688 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
689 sdn_ports = get_sdn_ports(vld_params, db_wim)
690 if len(sdn_ports) > 0:
691 target_vld["vim_info"][target_wim] = target_wim_attrs
692 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
693
694 self.logger.debug(
695 "Target VLD with WIM data: {:s}".format(str(target_vld))
696 )
697
698 for param in ("vim-network-name", "vim-network-id"):
699 if vld_params.get(param):
700 if isinstance(vld_params[param], dict):
701 for vim, vim_net in vld_params[param].items():
702 other_target_vim = "vim:" + vim
703 populate_dict(
704 target_vld["vim_info"],
705 (other_target_vim, param.replace("-", "_")),
706 vim_net,
707 )
708 else: # isinstance str
709 target_vld["vim_info"][target_vim][
710 param.replace("-", "_")
711 ] = vld_params[param]
712 if vld_params.get("common_id"):
713 target_vld["common_id"] = vld_params.get("common_id")
714
715 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
716 def update_ns_vld_target(target, ns_params):
717 for vnf_params in ns_params.get("vnf", ()):
718 if vnf_params.get("vimAccountId"):
719 target_vnf = next(
720 (
721 vnfr
722 for vnfr in db_vnfrs.values()
723 if vnf_params["member-vnf-index"]
724 == vnfr["member-vnf-index-ref"]
725 ),
726 None,
727 )
728 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
729 if not vdur:
730 continue
731 for a_index, a_vld in enumerate(target["ns"]["vld"]):
732 target_vld = find_in_list(
733 get_iterable(vdur, "interfaces"),
734 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
735 )
736
737 vld_params = find_in_list(
738 get_iterable(ns_params, "vld"),
739 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
740 )
741 if target_vld:
742 if vnf_params.get("vimAccountId") not in a_vld.get(
743 "vim_info", {}
744 ):
745 target_vim_network_list = [
746 v for _, v in a_vld.get("vim_info").items()
747 ]
748 target_vim_network_name = next(
749 (
750 item.get("vim_network_name", "")
751 for item in target_vim_network_list
752 ),
753 "",
754 )
755
756 target["ns"]["vld"][a_index].get("vim_info").update(
757 {
758 "vim:{}".format(vnf_params["vimAccountId"]): {
759 "vim_network_name": target_vim_network_name,
760 }
761 }
762 )
763
764 if vld_params:
765 for param in ("vim-network-name", "vim-network-id"):
766 if vld_params.get(param) and isinstance(
767 vld_params[param], dict
768 ):
769 for vim, vim_net in vld_params[
770 param
771 ].items():
772 other_target_vim = "vim:" + vim
773 populate_dict(
774 target["ns"]["vld"][a_index].get(
775 "vim_info"
776 ),
777 (
778 other_target_vim,
779 param.replace("-", "_"),
780 ),
781 vim_net,
782 )
783
784 nslcmop_id = db_nslcmop["_id"]
785 target = {
786 "name": db_nsr["name"],
787 "ns": {"vld": []},
788 "vnf": [],
789 "image": deepcopy(db_nsr["image"]),
790 "flavor": deepcopy(db_nsr["flavor"]),
791 "action_id": nslcmop_id,
792 "cloud_init_content": {},
793 }
794 for image in target["image"]:
795 image["vim_info"] = {}
796 for flavor in target["flavor"]:
797 flavor["vim_info"] = {}
798 if db_nsr.get("shared-volumes"):
799 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
800 for shared_volumes in target["shared-volumes"]:
801 shared_volumes["vim_info"] = {}
802 if db_nsr.get("affinity-or-anti-affinity-group"):
803 target["affinity-or-anti-affinity-group"] = deepcopy(
804 db_nsr["affinity-or-anti-affinity-group"]
805 )
806 for affinity_or_anti_affinity_group in target[
807 "affinity-or-anti-affinity-group"
808 ]:
809 affinity_or_anti_affinity_group["vim_info"] = {}
810
811 if db_nslcmop.get("lcmOperationType") != "instantiate":
812 # get parameters of instantiation:
813 db_nslcmop_instantiate = self.db.get_list(
814 "nslcmops",
815 {
816 "nsInstanceId": db_nslcmop["nsInstanceId"],
817 "lcmOperationType": "instantiate",
818 },
819 )[-1]
820 ns_params = db_nslcmop_instantiate.get("operationParams")
821 else:
822 ns_params = db_nslcmop.get("operationParams")
823 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
824 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
825
826 cp2target = {}
827 for vld_index, vld in enumerate(db_nsr.get("vld")):
828 target_vim = "vim:{}".format(ns_params["vimAccountId"])
829 target_vld = {
830 "id": vld["id"],
831 "name": vld["name"],
832 "mgmt-network": vld.get("mgmt-network", False),
833 "type": vld.get("type"),
834 "vim_info": {
835 target_vim: {
836 "vim_network_name": vld.get("vim-network-name"),
837 "vim_account_id": ns_params["vimAccountId"],
838 }
839 },
840 }
841 # check if this network needs SDN assist
842 if vld.get("pci-interfaces"):
843 db_vim = get_vim_account(ns_params["vimAccountId"])
844 if vim_config := db_vim.get("config"):
845 if sdnc_id := vim_config.get("sdn-controller"):
846 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
847 target_sdn = "sdn:{}".format(sdnc_id)
848 target_vld["vim_info"][target_sdn] = {
849 "sdn": True,
850 "target_vim": target_vim,
851 "vlds": [sdn_vld],
852 "type": vld.get("type"),
853 }
854
855 nsd_vnf_profiles = get_vnf_profiles(nsd)
856 for nsd_vnf_profile in nsd_vnf_profiles:
857 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
858 if cp["virtual-link-profile-id"] == vld["id"]:
859 cp2target[
860 "member_vnf:{}.{}".format(
861 cp["constituent-cpd-id"][0][
862 "constituent-base-element-id"
863 ],
864 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
865 )
866 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
867
868 # check at nsd descriptor, if there is an ip-profile
869 vld_params = {}
870 nsd_vlp = find_in_list(
871 get_virtual_link_profiles(nsd),
872 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
873 == vld["id"],
874 )
875 if (
876 nsd_vlp
877 and nsd_vlp.get("virtual-link-protocol-data")
878 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
879 ):
880 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
881 "l3-protocol-data"
882 ]
883
884 # update vld_params with instantiation params
885 vld_instantiation_params = find_in_list(
886 get_iterable(ns_params, "vld"),
887 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
888 )
889 if vld_instantiation_params:
890 vld_params.update(vld_instantiation_params)
891 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
892 target["ns"]["vld"].append(target_vld)
893 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
894 update_ns_vld_target(target, ns_params)
895
896 for vnfr in db_vnfrs.values():
897 vnfd = find_in_list(
898 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
899 )
900 vnf_params = find_in_list(
901 get_iterable(ns_params, "vnf"),
902 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
903 )
904 target_vnf = deepcopy(vnfr)
905 target_vim = "vim:{}".format(vnfr["vim-account-id"])
906 for vld in target_vnf.get("vld", ()):
907 # check if connected to a ns.vld, to fill target'
908 vnf_cp = find_in_list(
909 vnfd.get("int-virtual-link-desc", ()),
910 lambda cpd: cpd.get("id") == vld["id"],
911 )
912 if vnf_cp:
913 ns_cp = "member_vnf:{}.{}".format(
914 vnfr["member-vnf-index-ref"], vnf_cp["id"]
915 )
916 if cp2target.get(ns_cp):
917 vld["target"] = cp2target[ns_cp]
918
919 vld["vim_info"] = {
920 target_vim: {"vim_network_name": vld.get("vim-network-name")}
921 }
922 # check if this network needs SDN assist
923 target_sdn = None
924 if vld.get("pci-interfaces"):
925 db_vim = get_vim_account(vnfr["vim-account-id"])
926 sdnc_id = db_vim["config"].get("sdn-controller")
927 if sdnc_id:
928 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
929 target_sdn = "sdn:{}".format(sdnc_id)
930 vld["vim_info"][target_sdn] = {
931 "sdn": True,
932 "target_vim": target_vim,
933 "vlds": [sdn_vld],
934 "type": vld.get("type"),
935 }
936
937 # check at vnfd descriptor, if there is an ip-profile
938 vld_params = {}
939 vnfd_vlp = find_in_list(
940 get_virtual_link_profiles(vnfd),
941 lambda a_link_profile: a_link_profile["id"] == vld["id"],
942 )
943 if (
944 vnfd_vlp
945 and vnfd_vlp.get("virtual-link-protocol-data")
946 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
947 ):
948 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
949 "l3-protocol-data"
950 ]
951 # update vld_params with instantiation params
952 if vnf_params:
953 vld_instantiation_params = find_in_list(
954 get_iterable(vnf_params, "internal-vld"),
955 lambda i_vld: i_vld["name"] == vld["id"],
956 )
957 if vld_instantiation_params:
958 vld_params.update(vld_instantiation_params)
959 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
960
961 vdur_list = []
962 for vdur in target_vnf.get("vdur", ()):
963 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
964 continue # This vdu must not be created
965 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
966
967 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
968
969 if ssh_keys_all:
970 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
971 vnf_configuration = get_configuration(vnfd, vnfd["id"])
972 if (
973 vdu_configuration
974 and vdu_configuration.get("config-access")
975 and vdu_configuration.get("config-access").get("ssh-access")
976 ):
977 vdur["ssh-keys"] = ssh_keys_all
978 vdur["ssh-access-required"] = vdu_configuration[
979 "config-access"
980 ]["ssh-access"]["required"]
981 elif (
982 vnf_configuration
983 and vnf_configuration.get("config-access")
984 and vnf_configuration.get("config-access").get("ssh-access")
985 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
986 ):
987 vdur["ssh-keys"] = ssh_keys_all
988 vdur["ssh-access-required"] = vnf_configuration[
989 "config-access"
990 ]["ssh-access"]["required"]
991 elif ssh_keys_instantiation and find_in_list(
992 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
993 ):
994 vdur["ssh-keys"] = ssh_keys_instantiation
995
996 self.logger.debug("NS > vdur > {}".format(vdur))
997
998 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
999 # cloud-init
1000 if vdud.get("cloud-init-file"):
1001 vdur["cloud-init"] = "{}:file:{}".format(
1002 vnfd["_id"], vdud.get("cloud-init-file")
1003 )
1004 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1005 if vdur["cloud-init"] not in target["cloud_init_content"]:
1006 base_folder = vnfd["_admin"]["storage"]
1007 if base_folder["pkg-dir"]:
1008 cloud_init_file = "{}/{}/cloud_init/{}".format(
1009 base_folder["folder"],
1010 base_folder["pkg-dir"],
1011 vdud.get("cloud-init-file"),
1012 )
1013 else:
1014 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1015 base_folder["folder"],
1016 vdud.get("cloud-init-file"),
1017 )
1018 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1019 target["cloud_init_content"][
1020 vdur["cloud-init"]
1021 ] = ci_file.read()
1022 elif vdud.get("cloud-init"):
1023 vdur["cloud-init"] = "{}:vdu:{}".format(
1024 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1025 )
1026 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1027 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1028 "cloud-init"
1029 ]
1030 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1031 deploy_params_vdu = self._format_additional_params(
1032 vdur.get("additionalParams") or {}
1033 )
1034 deploy_params_vdu["OSM"] = get_osm_params(
1035 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1036 )
1037 vdur["additionalParams"] = deploy_params_vdu
1038
1039 # flavor
1040 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1041 if target_vim not in ns_flavor["vim_info"]:
1042 ns_flavor["vim_info"][target_vim] = {}
1043
1044 # deal with images
1045 # in case alternative images are provided we must check if they should be applied
1046 # for the vim_type, modify the vim_type taking into account
1047 ns_image_id = int(vdur["ns-image-id"])
1048 if vdur.get("alt-image-ids"):
1049 db_vim = get_vim_account(vnfr["vim-account-id"])
1050 vim_type = db_vim["vim_type"]
1051 for alt_image_id in vdur.get("alt-image-ids"):
1052 ns_alt_image = target["image"][int(alt_image_id)]
1053 if vim_type == ns_alt_image.get("vim-type"):
1054 # must use alternative image
1055 self.logger.debug(
1056 "use alternative image id: {}".format(alt_image_id)
1057 )
1058 ns_image_id = alt_image_id
1059 vdur["ns-image-id"] = ns_image_id
1060 break
1061 ns_image = target["image"][int(ns_image_id)]
1062 if target_vim not in ns_image["vim_info"]:
1063 ns_image["vim_info"][target_vim] = {}
1064
1065 # Affinity groups
1066 if vdur.get("affinity-or-anti-affinity-group-id"):
1067 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1068 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1069 if target_vim not in ns_ags["vim_info"]:
1070 ns_ags["vim_info"][target_vim] = {}
1071
1072 # shared-volumes
1073 if vdur.get("shared-volumes-id"):
1074 for sv_id in vdur["shared-volumes-id"]:
1075 ns_sv = find_in_list(
1076 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1077 )
1078 if ns_sv:
1079 ns_sv["vim_info"][target_vim] = {}
1080
1081 vdur["vim_info"] = {target_vim: {}}
1082 # instantiation parameters
1083 if vnf_params:
1084 vdu_instantiation_params = find_in_list(
1085 get_iterable(vnf_params, "vdu"),
1086 lambda i_vdu: i_vdu["id"] == vdud["id"],
1087 )
1088 if vdu_instantiation_params:
1089 # Parse the vdu_volumes from the instantiation params
1090 vdu_volumes = get_volumes_from_instantiation_params(
1091 vdu_instantiation_params, vdud
1092 )
1093 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1094 vdur["additionalParams"]["OSM"][
1095 "vim_flavor_id"
1096 ] = vdu_instantiation_params.get("vim-flavor-id")
1097 vdur_list.append(vdur)
1098 target_vnf["vdur"] = vdur_list
1099 target["vnf"].append(target_vnf)
1100
1101 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1102 desc = await self.RO.deploy(nsr_id, target)
1103 self.logger.debug("RO return > {}".format(desc))
1104 action_id = desc["action_id"]
1105 await self._wait_ng_ro(
1106 nsr_id,
1107 action_id,
1108 nslcmop_id,
1109 start_deploy,
1110 timeout_ns_deploy,
1111 stage,
1112 operation="instantiation",
1113 )
1114
1115 # Updating NSR
1116 db_nsr_update = {
1117 "_admin.deployed.RO.operational-status": "running",
1118 "detailed-status": " ".join(stage),
1119 }
1120 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1122 self._write_op_status(nslcmop_id, stage)
1123 self.logger.debug(
1124 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1125 )
1126 return
1127
1128 async def _wait_ng_ro(
1129 self,
1130 nsr_id,
1131 action_id,
1132 nslcmop_id=None,
1133 start_time=None,
1134 timeout=600,
1135 stage=None,
1136 operation=None,
1137 ):
1138 detailed_status_old = None
1139 db_nsr_update = {}
1140 start_time = start_time or time()
1141 while time() <= start_time + timeout:
1142 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1143 self.logger.debug("Wait NG RO > {}".format(desc_status))
1144 if desc_status["status"] == "FAILED":
1145 raise NgRoException(desc_status["details"])
1146 elif desc_status["status"] == "BUILD":
1147 if stage:
1148 stage[2] = "VIM: ({})".format(desc_status["details"])
1149 elif desc_status["status"] == "DONE":
1150 if stage:
1151 stage[2] = "Deployed at VIM"
1152 break
1153 else:
1154 assert False, "ROclient.check_ns_status returns unknown {}".format(
1155 desc_status["status"]
1156 )
1157 if stage and nslcmop_id and stage[2] != detailed_status_old:
1158 detailed_status_old = stage[2]
1159 db_nsr_update["detailed-status"] = " ".join(stage)
1160 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1161 self._write_op_status(nslcmop_id, stage)
1162 await asyncio.sleep(15)
1163 else: # timeout_ns_deploy
1164 raise NgRoException("Timeout waiting ns to deploy")
1165
1166 async def _terminate_ng_ro(
1167 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1168 ):
1169 db_nsr_update = {}
1170 failed_detail = []
1171 action_id = None
1172 start_deploy = time()
1173 try:
1174 target = {
1175 "ns": {"vld": []},
1176 "vnf": [],
1177 "image": [],
1178 "flavor": [],
1179 "action_id": nslcmop_id,
1180 }
1181 desc = await self.RO.deploy(nsr_id, target)
1182 action_id = desc["action_id"]
1183 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1184 self.logger.debug(
1185 logging_text
1186 + "ns terminate action at RO. action_id={}".format(action_id)
1187 )
1188
1189 # wait until done
1190 delete_timeout = 20 * 60 # 20 minutes
1191 await self._wait_ng_ro(
1192 nsr_id,
1193 action_id,
1194 nslcmop_id,
1195 start_deploy,
1196 delete_timeout,
1197 stage,
1198 operation="termination",
1199 )
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 # delete all nsr
1202 await self.RO.delete(nsr_id)
1203 except NgRoException as e:
1204 if e.http_code == 404: # not found
1205 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1206 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1207 self.logger.debug(
1208 logging_text + "RO_action_id={} already deleted".format(action_id)
1209 )
1210 elif e.http_code == 409: # conflict
1211 failed_detail.append("delete conflict: {}".format(e))
1212 self.logger.debug(
1213 logging_text
1214 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1215 )
1216 else:
1217 failed_detail.append("delete error: {}".format(e))
1218 self.logger.error(
1219 logging_text
1220 + "RO_action_id={} delete error: {}".format(action_id, e)
1221 )
1222 except Exception as e:
1223 failed_detail.append("delete error: {}".format(e))
1224 self.logger.error(
1225 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1226 )
1227
1228 if failed_detail:
1229 stage[2] = "Error deleting from VIM"
1230 else:
1231 stage[2] = "Deleted from VIM"
1232 db_nsr_update["detailed-status"] = " ".join(stage)
1233 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1234 self._write_op_status(nslcmop_id, stage)
1235
1236 if failed_detail:
1237 raise LcmException("; ".join(failed_detail))
1238 return
1239
1240 async def instantiate_RO(
1241 self,
1242 logging_text,
1243 nsr_id,
1244 nsd,
1245 db_nsr,
1246 db_nslcmop,
1247 db_vnfrs,
1248 db_vnfds,
1249 n2vc_key_list,
1250 stage,
1251 ):
1252 """
1253 Instantiate at RO
1254 :param logging_text: preffix text to use at logging
1255 :param nsr_id: nsr identity
1256 :param nsd: database content of ns descriptor
1257 :param db_nsr: database content of ns record
1258 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1259 :param db_vnfrs:
1260 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1261 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1262 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1263 :return: None or exception
1264 """
1265 try:
1266 start_deploy = time()
1267 ns_params = db_nslcmop.get("operationParams")
1268 if ns_params and ns_params.get("timeout_ns_deploy"):
1269 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1270 else:
1271 timeout_ns_deploy = self.timeout.ns_deploy
1272
1273 # Check for and optionally request placement optimization. Database will be updated if placement activated
1274 stage[2] = "Waiting for Placement."
1275 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1276 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1277 for vnfr in db_vnfrs.values():
1278 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1279 break
1280 else:
1281 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1282
1283 return await self._instantiate_ng_ro(
1284 logging_text,
1285 nsr_id,
1286 nsd,
1287 db_nsr,
1288 db_nslcmop,
1289 db_vnfrs,
1290 db_vnfds,
1291 n2vc_key_list,
1292 stage,
1293 start_deploy,
1294 timeout_ns_deploy,
1295 )
1296 except Exception as e:
1297 stage[2] = "ERROR deploying at VIM"
1298 self.set_vnfr_at_error(db_vnfrs, str(e))
1299 self.logger.error(
1300 "Error deploying at VIM {}".format(e),
1301 exc_info=not isinstance(
1302 e,
1303 (
1304 ROclient.ROClientException,
1305 LcmException,
1306 DbException,
1307 NgRoException,
1308 ),
1309 ),
1310 )
1311 raise
1312
1313 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1314 """
1315 Wait for kdu to be up, get ip address
1316 :param logging_text: prefix use for logging
1317 :param nsr_id:
1318 :param vnfr_id:
1319 :param kdu_name:
1320 :return: IP address, K8s services
1321 """
1322
1323 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1324 nb_tries = 0
1325
1326 while nb_tries < 360:
1327 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1328 kdur = next(
1329 (
1330 x
1331 for x in get_iterable(db_vnfr, "kdur")
1332 if x.get("kdu-name") == kdu_name
1333 ),
1334 None,
1335 )
1336 if not kdur:
1337 raise LcmException(
1338 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1339 )
1340 if kdur.get("status"):
1341 if kdur["status"] in ("READY", "ENABLED"):
1342 return kdur.get("ip-address"), kdur.get("services")
1343 else:
1344 raise LcmException(
1345 "target KDU={} is in error state".format(kdu_name)
1346 )
1347
1348 await asyncio.sleep(10)
1349 nb_tries += 1
1350 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1351
1352 async def wait_vm_up_insert_key_ro(
1353 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1354 ):
1355 """
1356 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1357 :param logging_text: prefix use for logging
1358 :param nsr_id:
1359 :param vnfr_id:
1360 :param vdu_id:
1361 :param vdu_index:
1362 :param pub_key: public ssh key to inject, None to skip
1363 :param user: user to apply the public ssh key
1364 :return: IP address
1365 """
1366
1367 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1368 ip_address = None
1369 target_vdu_id = None
1370 ro_retries = 0
1371
1372 while True:
1373 ro_retries += 1
1374 if ro_retries >= 360: # 1 hour
1375 raise LcmException(
1376 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1377 )
1378
1379 await asyncio.sleep(10)
1380
1381 # get ip address
1382 if not target_vdu_id:
1383 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1384
1385 if not vdu_id: # for the VNF case
1386 if db_vnfr.get("status") == "ERROR":
1387 raise LcmException(
1388 "Cannot inject ssh-key because target VNF is in error state"
1389 )
1390 ip_address = db_vnfr.get("ip-address")
1391 if not ip_address:
1392 continue
1393 vdur = next(
1394 (
1395 x
1396 for x in get_iterable(db_vnfr, "vdur")
1397 if x.get("ip-address") == ip_address
1398 ),
1399 None,
1400 )
1401 else: # VDU case
1402 vdur = next(
1403 (
1404 x
1405 for x in get_iterable(db_vnfr, "vdur")
1406 if x.get("vdu-id-ref") == vdu_id
1407 and x.get("count-index") == vdu_index
1408 ),
1409 None,
1410 )
1411
1412 if (
1413 not vdur and len(db_vnfr.get("vdur", ())) == 1
1414 ): # If only one, this should be the target vdu
1415 vdur = db_vnfr["vdur"][0]
1416 if not vdur:
1417 raise LcmException(
1418 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1419 vnfr_id, vdu_id, vdu_index
1420 )
1421 )
1422 # New generation RO stores information at "vim_info"
1423 ng_ro_status = None
1424 target_vim = None
1425 if vdur.get("vim_info"):
1426 target_vim = next(
1427 t for t in vdur["vim_info"]
1428 ) # there should be only one key
1429 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1430 if (
1431 vdur.get("pdu-type")
1432 or vdur.get("status") == "ACTIVE"
1433 or ng_ro_status == "ACTIVE"
1434 ):
1435 ip_address = vdur.get("ip-address")
1436 if not ip_address:
1437 continue
1438 target_vdu_id = vdur["vdu-id-ref"]
1439 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1440 raise LcmException(
1441 "Cannot inject ssh-key because target VM is in error state"
1442 )
1443
1444 if not target_vdu_id:
1445 continue
1446
1447 # inject public key into machine
1448 if pub_key and user:
1449 self.logger.debug(logging_text + "Inserting RO key")
1450 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1451 if vdur.get("pdu-type"):
1452 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1453 return ip_address
1454 try:
1455 target = {
1456 "action": {
1457 "action": "inject_ssh_key",
1458 "key": pub_key,
1459 "user": user,
1460 },
1461 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1462 }
1463 desc = await self.RO.deploy(nsr_id, target)
1464 action_id = desc["action_id"]
1465 await self._wait_ng_ro(
1466 nsr_id, action_id, timeout=600, operation="instantiation"
1467 )
1468 break
1469 except NgRoException as e:
1470 raise LcmException(
1471 "Reaching max tries injecting key. Error: {}".format(e)
1472 )
1473 else:
1474 break
1475
1476 return ip_address
1477
1478 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1479 """
1480 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1481 """
1482 my_vca = vca_deployed_list[vca_index]
1483 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1484 # vdu or kdu: no dependencies
1485 return
1486 timeout = 300
1487 while timeout >= 0:
1488 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1489 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1490 configuration_status_list = db_nsr["configurationStatus"]
1491 for index, vca_deployed in enumerate(configuration_status_list):
1492 if index == vca_index:
1493 # myself
1494 continue
1495 if not my_vca.get("member-vnf-index") or (
1496 vca_deployed.get("member-vnf-index")
1497 == my_vca.get("member-vnf-index")
1498 ):
1499 internal_status = configuration_status_list[index].get("status")
1500 if internal_status == "READY":
1501 continue
1502 elif internal_status == "BROKEN":
1503 raise LcmException(
1504 "Configuration aborted because dependent charm/s has failed"
1505 )
1506 else:
1507 break
1508 else:
1509 # no dependencies, return
1510 return
1511 await asyncio.sleep(10)
1512 timeout -= 1
1513
1514 raise LcmException("Configuration aborted because dependent charm/s timeout")
1515
1516 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1517 vca_id = None
1518 if db_vnfr:
1519 vca_id = deep_get(db_vnfr, ("vca-id",))
1520 elif db_nsr:
1521 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1522 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1523 return vca_id
1524
1525 async def instantiate_N2VC(
1526 self,
1527 logging_text,
1528 vca_index,
1529 nsi_id,
1530 db_nsr,
1531 db_vnfr,
1532 vdu_id,
1533 kdu_name,
1534 vdu_index,
1535 kdu_index,
1536 config_descriptor,
1537 deploy_params,
1538 base_folder,
1539 nslcmop_id,
1540 stage,
1541 vca_type,
1542 vca_name,
1543 ee_config_descriptor,
1544 ):
1545 nsr_id = db_nsr["_id"]
1546 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1547 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1548 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1549 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1550 db_dict = {
1551 "collection": "nsrs",
1552 "filter": {"_id": nsr_id},
1553 "path": db_update_entry,
1554 }
1555 step = ""
1556 try:
1557 element_type = "NS"
1558 element_under_configuration = nsr_id
1559
1560 vnfr_id = None
1561 if db_vnfr:
1562 vnfr_id = db_vnfr["_id"]
1563 osm_config["osm"]["vnf_id"] = vnfr_id
1564
1565 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1566
1567 if vca_type == "native_charm":
1568 index_number = 0
1569 else:
1570 index_number = vdu_index or 0
1571
1572 if vnfr_id:
1573 element_type = "VNF"
1574 element_under_configuration = vnfr_id
1575 namespace += ".{}-{}".format(vnfr_id, index_number)
1576 if vdu_id:
1577 namespace += ".{}-{}".format(vdu_id, index_number)
1578 element_type = "VDU"
1579 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1580 osm_config["osm"]["vdu_id"] = vdu_id
1581 elif kdu_name:
1582 namespace += ".{}".format(kdu_name)
1583 element_type = "KDU"
1584 element_under_configuration = kdu_name
1585 osm_config["osm"]["kdu_name"] = kdu_name
1586
1587 # Get artifact path
1588 if base_folder["pkg-dir"]:
1589 artifact_path = "{}/{}/{}/{}".format(
1590 base_folder["folder"],
1591 base_folder["pkg-dir"],
1592 "charms"
1593 if vca_type
1594 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1595 else "helm-charts",
1596 vca_name,
1597 )
1598 else:
1599 artifact_path = "{}/Scripts/{}/{}/".format(
1600 base_folder["folder"],
1601 "charms"
1602 if vca_type
1603 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1604 else "helm-charts",
1605 vca_name,
1606 )
1607
1608 self.logger.debug("Artifact path > {}".format(artifact_path))
1609
1610 # get initial_config_primitive_list that applies to this element
1611 initial_config_primitive_list = config_descriptor.get(
1612 "initial-config-primitive"
1613 )
1614
1615 self.logger.debug(
1616 "Initial config primitive list > {}".format(
1617 initial_config_primitive_list
1618 )
1619 )
1620
1621 # add config if not present for NS charm
1622 ee_descriptor_id = ee_config_descriptor.get("id")
1623 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1624 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1625 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1626 )
1627
1628 self.logger.debug(
1629 "Initial config primitive list #2 > {}".format(
1630 initial_config_primitive_list
1631 )
1632 )
1633 # n2vc_redesign STEP 3.1
1634 # find old ee_id if exists
1635 ee_id = vca_deployed.get("ee_id")
1636
1637 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1638 # create or register execution environment in VCA
1639 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1640 self._write_configuration_status(
1641 nsr_id=nsr_id,
1642 vca_index=vca_index,
1643 status="CREATING",
1644 element_under_configuration=element_under_configuration,
1645 element_type=element_type,
1646 )
1647
1648 step = "create execution environment"
1649 self.logger.debug(logging_text + step)
1650
1651 ee_id = None
1652 credentials = None
1653 if vca_type == "k8s_proxy_charm":
1654 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1655 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1656 namespace=namespace,
1657 artifact_path=artifact_path,
1658 db_dict=db_dict,
1659 vca_id=vca_id,
1660 )
1661 elif vca_type == "helm-v3":
1662 ee_id, credentials = await self.vca_map[
1663 vca_type
1664 ].create_execution_environment(
1665 namespace=nsr_id,
1666 reuse_ee_id=ee_id,
1667 db_dict=db_dict,
1668 config=osm_config,
1669 artifact_path=artifact_path,
1670 chart_model=vca_name,
1671 vca_type=vca_type,
1672 )
1673 else:
1674 ee_id, credentials = await self.vca_map[
1675 vca_type
1676 ].create_execution_environment(
1677 namespace=namespace,
1678 reuse_ee_id=ee_id,
1679 db_dict=db_dict,
1680 vca_id=vca_id,
1681 )
1682
1683 elif vca_type == "native_charm":
1684 step = "Waiting to VM being up and getting IP address"
1685 self.logger.debug(logging_text + step)
1686 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1687 logging_text,
1688 nsr_id,
1689 vnfr_id,
1690 vdu_id,
1691 vdu_index,
1692 user=None,
1693 pub_key=None,
1694 )
1695 credentials = {"hostname": rw_mgmt_ip}
1696 # get username
1697 username = deep_get(
1698 config_descriptor, ("config-access", "ssh-access", "default-user")
1699 )
1700 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1701 # merged. Meanwhile let's get username from initial-config-primitive
1702 if not username and initial_config_primitive_list:
1703 for config_primitive in initial_config_primitive_list:
1704 for param in config_primitive.get("parameter", ()):
1705 if param["name"] == "ssh-username":
1706 username = param["value"]
1707 break
1708 if not username:
1709 raise LcmException(
1710 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1711 "'config-access.ssh-access.default-user'"
1712 )
1713 credentials["username"] = username
1714 # n2vc_redesign STEP 3.2
1715
1716 self._write_configuration_status(
1717 nsr_id=nsr_id,
1718 vca_index=vca_index,
1719 status="REGISTERING",
1720 element_under_configuration=element_under_configuration,
1721 element_type=element_type,
1722 )
1723
1724 step = "register execution environment {}".format(credentials)
1725 self.logger.debug(logging_text + step)
1726 ee_id = await self.vca_map[vca_type].register_execution_environment(
1727 credentials=credentials,
1728 namespace=namespace,
1729 db_dict=db_dict,
1730 vca_id=vca_id,
1731 )
1732
1733 # for compatibility with MON/POL modules, the need model and application name at database
1734 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1735 ee_id_parts = ee_id.split(".")
1736 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1737 if len(ee_id_parts) >= 2:
1738 model_name = ee_id_parts[0]
1739 application_name = ee_id_parts[1]
1740 db_nsr_update[db_update_entry + "model"] = model_name
1741 db_nsr_update[db_update_entry + "application"] = application_name
1742
1743 # n2vc_redesign STEP 3.3
1744 step = "Install configuration Software"
1745
1746 self._write_configuration_status(
1747 nsr_id=nsr_id,
1748 vca_index=vca_index,
1749 status="INSTALLING SW",
1750 element_under_configuration=element_under_configuration,
1751 element_type=element_type,
1752 other_update=db_nsr_update,
1753 )
1754
1755 # TODO check if already done
1756 self.logger.debug(logging_text + step)
1757 config = None
1758 if vca_type == "native_charm":
1759 config_primitive = next(
1760 (p for p in initial_config_primitive_list if p["name"] == "config"),
1761 None,
1762 )
1763 if config_primitive:
1764 config = self._map_primitive_params(
1765 config_primitive, {}, deploy_params
1766 )
1767 num_units = 1
1768 if vca_type == "lxc_proxy_charm":
1769 if element_type == "NS":
1770 num_units = db_nsr.get("config-units") or 1
1771 elif element_type == "VNF":
1772 num_units = db_vnfr.get("config-units") or 1
1773 elif element_type == "VDU":
1774 for v in db_vnfr["vdur"]:
1775 if vdu_id == v["vdu-id-ref"]:
1776 num_units = v.get("config-units") or 1
1777 break
1778 if vca_type != "k8s_proxy_charm":
1779 await self.vca_map[vca_type].install_configuration_sw(
1780 ee_id=ee_id,
1781 artifact_path=artifact_path,
1782 db_dict=db_dict,
1783 config=config,
1784 num_units=num_units,
1785 vca_id=vca_id,
1786 vca_type=vca_type,
1787 )
1788
1789 # write in db flag of configuration_sw already installed
1790 self.update_db_2(
1791 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1792 )
1793
1794 # add relations for this VCA (wait for other peers related with this VCA)
1795 is_relation_added = await self._add_vca_relations(
1796 logging_text=logging_text,
1797 nsr_id=nsr_id,
1798 vca_type=vca_type,
1799 vca_index=vca_index,
1800 )
1801
1802 if not is_relation_added:
1803 raise LcmException("Relations could not be added to VCA.")
1804
1805 # if SSH access is required, then get execution environment SSH public
1806 # if native charm we have waited already to VM be UP
1807 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1808 pub_key = None
1809 user = None
1810 # self.logger.debug("get ssh key block")
1811 if deep_get(
1812 config_descriptor, ("config-access", "ssh-access", "required")
1813 ):
1814 # self.logger.debug("ssh key needed")
1815 # Needed to inject a ssh key
1816 user = deep_get(
1817 config_descriptor,
1818 ("config-access", "ssh-access", "default-user"),
1819 )
1820 step = "Install configuration Software, getting public ssh key"
1821 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1822 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1823 )
1824
1825 step = "Insert public key into VM user={} ssh_key={}".format(
1826 user, pub_key
1827 )
1828 else:
1829 # self.logger.debug("no need to get ssh key")
1830 step = "Waiting to VM being up and getting IP address"
1831 self.logger.debug(logging_text + step)
1832
1833 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1834 rw_mgmt_ip = None
1835
1836 # n2vc_redesign STEP 5.1
1837 # wait for RO (ip-address) Insert pub_key into VM
1838 if vnfr_id:
1839 if kdu_name:
1840 rw_mgmt_ip, services = await self.wait_kdu_up(
1841 logging_text, nsr_id, vnfr_id, kdu_name
1842 )
1843 vnfd = self.db.get_one(
1844 "vnfds_revisions",
1845 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1846 )
1847 kdu = get_kdu(vnfd, kdu_name)
1848 kdu_services = [
1849 service["name"] for service in get_kdu_services(kdu)
1850 ]
1851 exposed_services = []
1852 for service in services:
1853 if any(s in service["name"] for s in kdu_services):
1854 exposed_services.append(service)
1855 await self.vca_map[vca_type].exec_primitive(
1856 ee_id=ee_id,
1857 primitive_name="config",
1858 params_dict={
1859 "osm-config": json.dumps(
1860 OsmConfigBuilder(
1861 k8s={"services": exposed_services}
1862 ).build()
1863 )
1864 },
1865 vca_id=vca_id,
1866 )
1867
1868 # This verification is needed in order to avoid trying to add a public key
1869 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1870 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1871 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1872 # or it is a KNF)
1873 elif db_vnfr.get("vdur"):
1874 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1875 logging_text,
1876 nsr_id,
1877 vnfr_id,
1878 vdu_id,
1879 vdu_index,
1880 user=user,
1881 pub_key=pub_key,
1882 )
1883
1884 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1885
1886 # store rw_mgmt_ip in deploy params for later replacement
1887 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1888
1889 # n2vc_redesign STEP 6 Execute initial config primitive
1890 step = "execute initial config primitive"
1891
1892 # wait for dependent primitives execution (NS -> VNF -> VDU)
1893 if initial_config_primitive_list:
1894 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1895
1896 # stage, in function of element type: vdu, kdu, vnf or ns
1897 my_vca = vca_deployed_list[vca_index]
1898 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1899 # VDU or KDU
1900 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1901 elif my_vca.get("member-vnf-index"):
1902 # VNF
1903 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1904 else:
1905 # NS
1906 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1907
1908 self._write_configuration_status(
1909 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1910 )
1911
1912 self._write_op_status(op_id=nslcmop_id, stage=stage)
1913
1914 check_if_terminated_needed = True
1915 for initial_config_primitive in initial_config_primitive_list:
1916 # adding information on the vca_deployed if it is a NS execution environment
1917 if not vca_deployed["member-vnf-index"]:
1918 deploy_params["ns_config_info"] = json.dumps(
1919 self._get_ns_config_info(nsr_id)
1920 )
1921 # TODO check if already done
1922 primitive_params_ = self._map_primitive_params(
1923 initial_config_primitive, {}, deploy_params
1924 )
1925
1926 step = "execute primitive '{}' params '{}'".format(
1927 initial_config_primitive["name"], primitive_params_
1928 )
1929 self.logger.debug(logging_text + step)
1930 await self.vca_map[vca_type].exec_primitive(
1931 ee_id=ee_id,
1932 primitive_name=initial_config_primitive["name"],
1933 params_dict=primitive_params_,
1934 db_dict=db_dict,
1935 vca_id=vca_id,
1936 vca_type=vca_type,
1937 )
1938 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1939 if check_if_terminated_needed:
1940 if config_descriptor.get("terminate-config-primitive"):
1941 self.update_db_2(
1942 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1943 )
1944 check_if_terminated_needed = False
1945
1946 # TODO register in database that primitive is done
1947
1948 # STEP 7 Configure metrics
1949 if vca_type == "helm-v3":
1950 # TODO: review for those cases where the helm chart is a reference and
1951 # is not part of the NF package
1952 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1953 ee_id=ee_id,
1954 artifact_path=artifact_path,
1955 ee_config_descriptor=ee_config_descriptor,
1956 vnfr_id=vnfr_id,
1957 nsr_id=nsr_id,
1958 target_ip=rw_mgmt_ip,
1959 element_type=element_type,
1960 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
1961 vdu_id=vdu_id,
1962 vdu_index=vdu_index,
1963 kdu_name=kdu_name,
1964 kdu_index=kdu_index,
1965 )
1966 if prometheus_jobs:
1967 self.update_db_2(
1968 "nsrs",
1969 nsr_id,
1970 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1971 )
1972
1973 for job in prometheus_jobs:
1974 self.db.set_one(
1975 "prometheus_jobs",
1976 {"job_name": job["job_name"]},
1977 job,
1978 upsert=True,
1979 fail_on_empty=False,
1980 )
1981
1982 step = "instantiated at VCA"
1983 self.logger.debug(logging_text + step)
1984
1985 self._write_configuration_status(
1986 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1987 )
1988
1989 except Exception as e: # TODO not use Exception but N2VC exception
1990 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1991 if not isinstance(
1992 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1993 ):
1994 self.logger.error(
1995 "Exception while {} : {}".format(step, e), exc_info=True
1996 )
1997 self._write_configuration_status(
1998 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1999 )
2000 raise LcmException("{}. {}".format(step, e)) from e
2001
2002 def _write_ns_status(
2003 self,
2004 nsr_id: str,
2005 ns_state: str,
2006 current_operation: str,
2007 current_operation_id: str,
2008 error_description: str = None,
2009 error_detail: str = None,
2010 other_update: dict = None,
2011 ):
2012 """
2013 Update db_nsr fields.
2014 :param nsr_id:
2015 :param ns_state:
2016 :param current_operation:
2017 :param current_operation_id:
2018 :param error_description:
2019 :param error_detail:
2020 :param other_update: Other required changes at database if provided, will be cleared
2021 :return:
2022 """
2023 try:
2024 db_dict = other_update or {}
2025 db_dict[
2026 "_admin.nslcmop"
2027 ] = current_operation_id # for backward compatibility
2028 db_dict["_admin.current-operation"] = current_operation_id
2029 db_dict["_admin.operation-type"] = (
2030 current_operation if current_operation != "IDLE" else None
2031 )
2032 db_dict["currentOperation"] = current_operation
2033 db_dict["currentOperationID"] = current_operation_id
2034 db_dict["errorDescription"] = error_description
2035 db_dict["errorDetail"] = error_detail
2036
2037 if ns_state:
2038 db_dict["nsState"] = ns_state
2039 self.update_db_2("nsrs", nsr_id, db_dict)
2040 except DbException as e:
2041 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2042
2043 def _write_op_status(
2044 self,
2045 op_id: str,
2046 stage: list = None,
2047 error_message: str = None,
2048 queuePosition: int = 0,
2049 operation_state: str = None,
2050 other_update: dict = None,
2051 ):
2052 try:
2053 db_dict = other_update or {}
2054 db_dict["queuePosition"] = queuePosition
2055 if isinstance(stage, list):
2056 db_dict["stage"] = stage[0]
2057 db_dict["detailed-status"] = " ".join(stage)
2058 elif stage is not None:
2059 db_dict["stage"] = str(stage)
2060
2061 if error_message is not None:
2062 db_dict["errorMessage"] = error_message
2063 if operation_state is not None:
2064 db_dict["operationState"] = operation_state
2065 db_dict["statusEnteredTime"] = time()
2066 self.update_db_2("nslcmops", op_id, db_dict)
2067 except DbException as e:
2068 self.logger.warn(
2069 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2070 )
2071
2072 def _write_all_config_status(self, db_nsr: dict, status: str):
2073 try:
2074 nsr_id = db_nsr["_id"]
2075 # configurationStatus
2076 config_status = db_nsr.get("configurationStatus")
2077 if config_status:
2078 db_nsr_update = {
2079 "configurationStatus.{}.status".format(index): status
2080 for index, v in enumerate(config_status)
2081 if v
2082 }
2083 # update status
2084 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2085
2086 except DbException as e:
2087 self.logger.warn(
2088 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2089 )
2090
2091 def _write_configuration_status(
2092 self,
2093 nsr_id: str,
2094 vca_index: int,
2095 status: str = None,
2096 element_under_configuration: str = None,
2097 element_type: str = None,
2098 other_update: dict = None,
2099 ):
2100 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2101 # .format(vca_index, status))
2102
2103 try:
2104 db_path = "configurationStatus.{}.".format(vca_index)
2105 db_dict = other_update or {}
2106 if status:
2107 db_dict[db_path + "status"] = status
2108 if element_under_configuration:
2109 db_dict[
2110 db_path + "elementUnderConfiguration"
2111 ] = element_under_configuration
2112 if element_type:
2113 db_dict[db_path + "elementType"] = element_type
2114 self.update_db_2("nsrs", nsr_id, db_dict)
2115 except DbException as e:
2116 self.logger.warn(
2117 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2118 status, nsr_id, vca_index, e
2119 )
2120 )
2121
2122 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2123 """
2124 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2125 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2126 Database is used because the result can be obtained from a different LCM worker in case of HA.
2127 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2128 :param db_nslcmop: database content of nslcmop
2129 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2130 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2131 computed 'vim-account-id'
2132 """
2133 modified = False
2134 nslcmop_id = db_nslcmop["_id"]
2135 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2136 if placement_engine == "PLA":
2137 self.logger.debug(
2138 logging_text + "Invoke and wait for placement optimization"
2139 )
2140 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2141 db_poll_interval = 5
2142 wait = db_poll_interval * 10
2143 pla_result = None
2144 while not pla_result and wait >= 0:
2145 await asyncio.sleep(db_poll_interval)
2146 wait -= db_poll_interval
2147 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2148 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2149
2150 if not pla_result:
2151 raise LcmException(
2152 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2153 )
2154
2155 for pla_vnf in pla_result["vnf"]:
2156 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2157 if not pla_vnf.get("vimAccountId") or not vnfr:
2158 continue
2159 modified = True
2160 self.db.set_one(
2161 "vnfrs",
2162 {"_id": vnfr["_id"]},
2163 {"vim-account-id": pla_vnf["vimAccountId"]},
2164 )
2165 # Modifies db_vnfrs
2166 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2167 return modified
2168
2169 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2170 alerts = []
2171 nsr_id = vnfr["nsr-id-ref"]
2172 df = vnfd.get("df", [{}])[0]
2173 # Checking for auto-healing configuration
2174 if "healing-aspect" in df:
2175 healing_aspects = df["healing-aspect"]
2176 for healing in healing_aspects:
2177 for healing_policy in healing.get("healing-policy", ()):
2178 vdu_id = healing_policy["vdu-id"]
2179 vdur = next(
2180 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2181 {},
2182 )
2183 if not vdur:
2184 continue
2185 metric_name = "vm_status"
2186 vdu_name = vdur.get("name")
2187 vnf_member_index = vnfr["member-vnf-index-ref"]
2188 uuid = str(uuid4())
2189 name = f"healing_{uuid}"
2190 action = healing_policy
2191 # action_on_recovery = healing.get("action-on-recovery")
2192 # cooldown_time = healing.get("cooldown-time")
2193 # day1 = healing.get("day1")
2194 alert = {
2195 "uuid": uuid,
2196 "name": name,
2197 "metric": metric_name,
2198 "tags": {
2199 "ns_id": nsr_id,
2200 "vnf_member_index": vnf_member_index,
2201 "vdu_name": vdu_name,
2202 },
2203 "alarm_status": "ok",
2204 "action_type": "healing",
2205 "action": action,
2206 }
2207 alerts.append(alert)
2208 return alerts
2209
2210 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2211 alerts = []
2212 nsr_id = vnfr["nsr-id-ref"]
2213 df = vnfd.get("df", [{}])[0]
2214 # Checking for auto-scaling configuration
2215 if "scaling-aspect" in df:
2216 scaling_aspects = df["scaling-aspect"]
2217 all_vnfd_monitoring_params = {}
2218 for ivld in vnfd.get("int-virtual-link-desc", ()):
2219 for mp in ivld.get("monitoring-parameters", ()):
2220 all_vnfd_monitoring_params[mp.get("id")] = mp
2221 for vdu in vnfd.get("vdu", ()):
2222 for mp in vdu.get("monitoring-parameter", ()):
2223 all_vnfd_monitoring_params[mp.get("id")] = mp
2224 for df in vnfd.get("df", ()):
2225 for mp in df.get("monitoring-parameter", ()):
2226 all_vnfd_monitoring_params[mp.get("id")] = mp
2227 for scaling_aspect in scaling_aspects:
2228 scaling_group_name = scaling_aspect.get("name", "")
2229 # Get monitored VDUs
2230 all_monitored_vdus = set()
2231 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2232 "deltas", ()
2233 ):
2234 for vdu_delta in delta.get("vdu-delta", ()):
2235 all_monitored_vdus.add(vdu_delta.get("id"))
2236 monitored_vdurs = list(
2237 filter(
2238 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2239 vnfr["vdur"],
2240 )
2241 )
2242 if not monitored_vdurs:
2243 self.logger.error(
2244 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2245 )
2246 continue
2247 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2248 if scaling_policy["scaling-type"] != "automatic":
2249 continue
2250 threshold_time = scaling_policy.get("threshold-time", "1")
2251 cooldown_time = scaling_policy.get("cooldown-time", "0")
2252 for scaling_criteria in scaling_policy["scaling-criteria"]:
2253 monitoring_param_ref = scaling_criteria.get(
2254 "vnf-monitoring-param-ref"
2255 )
2256 vnf_monitoring_param = all_vnfd_monitoring_params[
2257 monitoring_param_ref
2258 ]
2259 for vdur in monitored_vdurs:
2260 vdu_id = vdur["vdu-id-ref"]
2261 metric_name = vnf_monitoring_param.get("performance-metric")
2262 metric_name = f"osm_{metric_name}"
2263 vnf_member_index = vnfr["member-vnf-index-ref"]
2264 scalein_threshold = scaling_criteria.get(
2265 "scale-in-threshold"
2266 )
2267 scaleout_threshold = scaling_criteria.get(
2268 "scale-out-threshold"
2269 )
2270 # Looking for min/max-number-of-instances
2271 instances_min_number = 1
2272 instances_max_number = 1
2273 vdu_profile = df["vdu-profile"]
2274 if vdu_profile:
2275 profile = next(
2276 item for item in vdu_profile if item["id"] == vdu_id
2277 )
2278 instances_min_number = profile.get(
2279 "min-number-of-instances", 1
2280 )
2281 instances_max_number = profile.get(
2282 "max-number-of-instances", 1
2283 )
2284
2285 if scalein_threshold:
2286 uuid = str(uuid4())
2287 name = f"scalein_{uuid}"
2288 operation = scaling_criteria[
2289 "scale-in-relational-operation"
2290 ]
2291 rel_operator = self.rel_operation_types.get(
2292 operation, "<="
2293 )
2294 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2295 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2296 labels = {
2297 "ns_id": nsr_id,
2298 "vnf_member_index": vnf_member_index,
2299 "vdu_id": vdu_id,
2300 }
2301 prom_cfg = {
2302 "alert": name,
2303 "expr": expression,
2304 "for": str(threshold_time) + "m",
2305 "labels": labels,
2306 }
2307 action = scaling_policy
2308 action = {
2309 "scaling-group": scaling_group_name,
2310 "cooldown-time": cooldown_time,
2311 }
2312 alert = {
2313 "uuid": uuid,
2314 "name": name,
2315 "metric": metric_name,
2316 "tags": {
2317 "ns_id": nsr_id,
2318 "vnf_member_index": vnf_member_index,
2319 "vdu_id": vdu_id,
2320 },
2321 "alarm_status": "ok",
2322 "action_type": "scale_in",
2323 "action": action,
2324 "prometheus_config": prom_cfg,
2325 }
2326 alerts.append(alert)
2327
2328 if scaleout_threshold:
2329 uuid = str(uuid4())
2330 name = f"scaleout_{uuid}"
2331 operation = scaling_criteria[
2332 "scale-out-relational-operation"
2333 ]
2334 rel_operator = self.rel_operation_types.get(
2335 operation, "<="
2336 )
2337 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2338 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2339 labels = {
2340 "ns_id": nsr_id,
2341 "vnf_member_index": vnf_member_index,
2342 "vdu_id": vdu_id,
2343 }
2344 prom_cfg = {
2345 "alert": name,
2346 "expr": expression,
2347 "for": str(threshold_time) + "m",
2348 "labels": labels,
2349 }
2350 action = scaling_policy
2351 action = {
2352 "scaling-group": scaling_group_name,
2353 "cooldown-time": cooldown_time,
2354 }
2355 alert = {
2356 "uuid": uuid,
2357 "name": name,
2358 "metric": metric_name,
2359 "tags": {
2360 "ns_id": nsr_id,
2361 "vnf_member_index": vnf_member_index,
2362 "vdu_id": vdu_id,
2363 },
2364 "alarm_status": "ok",
2365 "action_type": "scale_out",
2366 "action": action,
2367 "prometheus_config": prom_cfg,
2368 }
2369 alerts.append(alert)
2370 return alerts
2371
2372 def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
2373 alerts = []
2374 nsr_id = vnfr["nsr-id-ref"]
2375 vnf_member_index = vnfr["member-vnf-index-ref"]
2376
2377 # Checking for VNF alarm configuration
2378 for vdur in vnfr["vdur"]:
2379 vdu_id = vdur["vdu-id-ref"]
2380 vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
2381 if "alarm" in vdu:
2382 # Get VDU monitoring params, since alerts are based on them
2383 vdu_monitoring_params = {}
2384 for mp in vdu.get("monitoring-parameter", []):
2385 vdu_monitoring_params[mp.get("id")] = mp
2386 if not vdu_monitoring_params:
2387 self.logger.error(
2388 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2389 )
2390 continue
2391 # Get alarms in the VDU
2392 alarm_descriptors = vdu["alarm"]
2393 # Create VDU alarms for each alarm in the VDU
2394 for alarm_descriptor in alarm_descriptors:
2395 # Check that the VDU alarm refers to a proper monitoring param
2396 alarm_monitoring_param = alarm_descriptor.get(
2397 "vnf-monitoring-param-ref", ""
2398 )
2399 vdu_specific_monitoring_param = vdu_monitoring_params.get(
2400 alarm_monitoring_param, {}
2401 )
2402 if not vdu_specific_monitoring_param:
2403 self.logger.error(
2404 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2405 )
2406 continue
2407 metric_name = vdu_specific_monitoring_param.get(
2408 "performance-metric"
2409 )
2410 if not metric_name:
2411 self.logger.error(
2412 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2413 )
2414 continue
2415 # Set params of the alarm to be created in Prometheus
2416 metric_name = f"osm_{metric_name}"
2417 metric_threshold = alarm_descriptor.get("value")
2418 uuid = str(uuid4())
2419 alert_name = f"vdu_alarm_{uuid}"
2420 operation = alarm_descriptor["operation"]
2421 rel_operator = self.rel_operation_types.get(operation, "<=")
2422 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2423 expression = f"{metric_selector} {rel_operator} {metric_threshold}"
2424 labels = {
2425 "ns_id": nsr_id,
2426 "vnf_member_index": vnf_member_index,
2427 "vdu_id": vdu_id,
2428 "vdu_name": "{{ $labels.vdu_name }}",
2429 }
2430 prom_cfg = {
2431 "alert": alert_name,
2432 "expr": expression,
2433 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2434 "labels": labels,
2435 }
2436 alarm_action = dict()
2437 for action_type in ["ok", "insufficient-data", "alarm"]:
2438 if (
2439 "actions" in alarm_descriptor
2440 and action_type in alarm_descriptor["actions"]
2441 ):
2442 alarm_action[action_type] = alarm_descriptor["actions"][
2443 action_type
2444 ]
2445 alert = {
2446 "uuid": uuid,
2447 "name": alert_name,
2448 "metric": metric_name,
2449 "tags": {
2450 "ns_id": nsr_id,
2451 "vnf_member_index": vnf_member_index,
2452 "vdu_id": vdu_id,
2453 },
2454 "alarm_status": "ok",
2455 "action_type": "vdu_alarm",
2456 "action": alarm_action,
2457 "prometheus_config": prom_cfg,
2458 }
2459 alerts.append(alert)
2460 return alerts
2461
2462 def update_nsrs_with_pla_result(self, params):
2463 try:
2464 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2465 self.update_db_2(
2466 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2467 )
2468 except Exception as e:
2469 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2470
2471 async def instantiate(self, nsr_id, nslcmop_id):
2472 """
2473
2474 :param nsr_id: ns instance to deploy
2475 :param nslcmop_id: operation to run
2476 :return:
2477 """
2478
2479 # Try to lock HA task here
2480 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2481 if not task_is_locked_by_me:
2482 self.logger.debug(
2483 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2484 )
2485 return
2486
2487 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2488 self.logger.debug(logging_text + "Enter")
2489
2490 # get all needed from database
2491
2492 # database nsrs record
2493 db_nsr = None
2494
2495 # database nslcmops record
2496 db_nslcmop = None
2497
2498 # update operation on nsrs
2499 db_nsr_update = {}
2500 # update operation on nslcmops
2501 db_nslcmop_update = {}
2502
2503 timeout_ns_deploy = self.timeout.ns_deploy
2504
2505 nslcmop_operation_state = None
2506 db_vnfrs = {} # vnf's info indexed by member-index
2507 # n2vc_info = {}
2508 tasks_dict_info = {} # from task to info text
2509 exc = None
2510 error_list = []
2511 stage = [
2512 "Stage 1/5: preparation of the environment.",
2513 "Waiting for previous operations to terminate.",
2514 "",
2515 ]
2516 # ^ stage, step, VIM progress
2517 try:
2518 # wait for any previous tasks in process
2519 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2520
2521 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2522 stage[1] = "Reading from database."
2523 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2524 db_nsr_update["detailed-status"] = "creating"
2525 db_nsr_update["operational-status"] = "init"
2526 self._write_ns_status(
2527 nsr_id=nsr_id,
2528 ns_state="BUILDING",
2529 current_operation="INSTANTIATING",
2530 current_operation_id=nslcmop_id,
2531 other_update=db_nsr_update,
2532 )
2533 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2534
2535 # read from db: operation
2536 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2537 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2538 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2539 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2540 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2541 )
2542 ns_params = db_nslcmop.get("operationParams")
2543 if ns_params and ns_params.get("timeout_ns_deploy"):
2544 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2545
2546 # read from db: ns
2547 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2548 self.logger.debug(logging_text + stage[1])
2549 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2550 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2551 self.logger.debug(logging_text + stage[1])
2552 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2553 self.fs.sync(db_nsr["nsd-id"])
2554 db_nsr["nsd"] = nsd
2555 # nsr_name = db_nsr["name"] # TODO short-name??
2556
2557 # read from db: vnf's of this ns
2558 stage[1] = "Getting vnfrs from db."
2559 self.logger.debug(logging_text + stage[1])
2560 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2561
2562 # read from db: vnfd's for every vnf
2563 db_vnfds = [] # every vnfd data
2564
2565 # for each vnf in ns, read vnfd
2566 for vnfr in db_vnfrs_list:
2567 if vnfr.get("kdur"):
2568 kdur_list = []
2569 for kdur in vnfr["kdur"]:
2570 if kdur.get("additionalParams"):
2571 kdur["additionalParams"] = json.loads(
2572 kdur["additionalParams"]
2573 )
2574 kdur_list.append(kdur)
2575 vnfr["kdur"] = kdur_list
2576
2577 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2578 vnfd_id = vnfr["vnfd-id"]
2579 vnfd_ref = vnfr["vnfd-ref"]
2580 self.fs.sync(vnfd_id)
2581
2582 # if we haven't this vnfd, read it from db
2583 if vnfd_id not in db_vnfds:
2584 # read from db
2585 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2586 vnfd_id, vnfd_ref
2587 )
2588 self.logger.debug(logging_text + stage[1])
2589 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2590
2591 # store vnfd
2592 db_vnfds.append(vnfd)
2593
2594 # Get or generates the _admin.deployed.VCA list
2595 vca_deployed_list = None
2596 if db_nsr["_admin"].get("deployed"):
2597 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2598 if vca_deployed_list is None:
2599 vca_deployed_list = []
2600 configuration_status_list = []
2601 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2602 db_nsr_update["configurationStatus"] = configuration_status_list
2603 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2604 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2605 elif isinstance(vca_deployed_list, dict):
2606 # maintain backward compatibility. Change a dict to list at database
2607 vca_deployed_list = list(vca_deployed_list.values())
2608 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2609 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2610
2611 if not isinstance(
2612 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2613 ):
2614 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2615 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2616
2617 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2618 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2619 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2620 self.db.set_list(
2621 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2622 )
2623
2624 # n2vc_redesign STEP 2 Deploy Network Scenario
2625 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2626 self._write_op_status(op_id=nslcmop_id, stage=stage)
2627
2628 stage[1] = "Deploying KDUs."
2629 # self.logger.debug(logging_text + "Before deploy_kdus")
2630 # Call to deploy_kdus in case exists the "vdu:kdu" param
2631 await self.deploy_kdus(
2632 logging_text=logging_text,
2633 nsr_id=nsr_id,
2634 nslcmop_id=nslcmop_id,
2635 db_vnfrs=db_vnfrs,
2636 db_vnfds=db_vnfds,
2637 task_instantiation_info=tasks_dict_info,
2638 )
2639
2640 stage[1] = "Getting VCA public key."
2641 # n2vc_redesign STEP 1 Get VCA public ssh-key
2642 # feature 1429. Add n2vc public key to needed VMs
2643 n2vc_key = self.n2vc.get_public_key()
2644 n2vc_key_list = [n2vc_key]
2645 if self.vca_config.public_key:
2646 n2vc_key_list.append(self.vca_config.public_key)
2647
2648 stage[1] = "Deploying NS at VIM."
2649 task_ro = asyncio.ensure_future(
2650 self.instantiate_RO(
2651 logging_text=logging_text,
2652 nsr_id=nsr_id,
2653 nsd=nsd,
2654 db_nsr=db_nsr,
2655 db_nslcmop=db_nslcmop,
2656 db_vnfrs=db_vnfrs,
2657 db_vnfds=db_vnfds,
2658 n2vc_key_list=n2vc_key_list,
2659 stage=stage,
2660 )
2661 )
2662 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2663 tasks_dict_info[task_ro] = "Deploying at VIM"
2664
2665 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2666 stage[1] = "Deploying Execution Environments."
2667 self.logger.debug(logging_text + stage[1])
2668
2669 # create namespace and certificate if any helm based EE is present in the NS
2670 if check_helm_ee_in_ns(db_vnfds):
2671 await self.vca_map["helm-v3"].setup_ns_namespace(
2672 name=nsr_id,
2673 )
2674 # create TLS certificates
2675 await self.vca_map["helm-v3"].create_tls_certificate(
2676 secret_name=self.EE_TLS_NAME,
2677 dns_prefix="*",
2678 nsr_id=nsr_id,
2679 usage="server auth",
2680 namespace=nsr_id,
2681 )
2682
2683 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2684 for vnf_profile in get_vnf_profiles(nsd):
2685 vnfd_id = vnf_profile["vnfd-id"]
2686 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2687 member_vnf_index = str(vnf_profile["id"])
2688 db_vnfr = db_vnfrs[member_vnf_index]
2689 base_folder = vnfd["_admin"]["storage"]
2690 vdu_id = None
2691 vdu_index = 0
2692 vdu_name = None
2693 kdu_name = None
2694 kdu_index = None
2695
2696 # Get additional parameters
2697 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2698 if db_vnfr.get("additionalParamsForVnf"):
2699 deploy_params.update(
2700 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2701 )
2702
2703 descriptor_config = get_configuration(vnfd, vnfd["id"])
2704 if descriptor_config:
2705 self._deploy_n2vc(
2706 logging_text=logging_text
2707 + "member_vnf_index={} ".format(member_vnf_index),
2708 db_nsr=db_nsr,
2709 db_vnfr=db_vnfr,
2710 nslcmop_id=nslcmop_id,
2711 nsr_id=nsr_id,
2712 nsi_id=nsi_id,
2713 vnfd_id=vnfd_id,
2714 vdu_id=vdu_id,
2715 kdu_name=kdu_name,
2716 member_vnf_index=member_vnf_index,
2717 vdu_index=vdu_index,
2718 kdu_index=kdu_index,
2719 vdu_name=vdu_name,
2720 deploy_params=deploy_params,
2721 descriptor_config=descriptor_config,
2722 base_folder=base_folder,
2723 task_instantiation_info=tasks_dict_info,
2724 stage=stage,
2725 )
2726
2727 # Deploy charms for each VDU that supports one.
2728 for vdud in get_vdu_list(vnfd):
2729 vdu_id = vdud["id"]
2730 descriptor_config = get_configuration(vnfd, vdu_id)
2731 vdur = find_in_list(
2732 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2733 )
2734
2735 if vdur.get("additionalParams"):
2736 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2737 else:
2738 deploy_params_vdu = deploy_params
2739 deploy_params_vdu["OSM"] = get_osm_params(
2740 db_vnfr, vdu_id, vdu_count_index=0
2741 )
2742 vdud_count = get_number_of_instances(vnfd, vdu_id)
2743
2744 self.logger.debug("VDUD > {}".format(vdud))
2745 self.logger.debug(
2746 "Descriptor config > {}".format(descriptor_config)
2747 )
2748 if descriptor_config:
2749 vdu_name = None
2750 kdu_name = None
2751 kdu_index = None
2752 for vdu_index in range(vdud_count):
2753 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2754 self._deploy_n2vc(
2755 logging_text=logging_text
2756 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2757 member_vnf_index, vdu_id, vdu_index
2758 ),
2759 db_nsr=db_nsr,
2760 db_vnfr=db_vnfr,
2761 nslcmop_id=nslcmop_id,
2762 nsr_id=nsr_id,
2763 nsi_id=nsi_id,
2764 vnfd_id=vnfd_id,
2765 vdu_id=vdu_id,
2766 kdu_name=kdu_name,
2767 kdu_index=kdu_index,
2768 member_vnf_index=member_vnf_index,
2769 vdu_index=vdu_index,
2770 vdu_name=vdu_name,
2771 deploy_params=deploy_params_vdu,
2772 descriptor_config=descriptor_config,
2773 base_folder=base_folder,
2774 task_instantiation_info=tasks_dict_info,
2775 stage=stage,
2776 )
2777 for kdud in get_kdu_list(vnfd):
2778 kdu_name = kdud["name"]
2779 descriptor_config = get_configuration(vnfd, kdu_name)
2780 if descriptor_config:
2781 vdu_id = None
2782 vdu_index = 0
2783 vdu_name = None
2784 kdu_index, kdur = next(
2785 x
2786 for x in enumerate(db_vnfr["kdur"])
2787 if x[1]["kdu-name"] == kdu_name
2788 )
2789 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2790 if kdur.get("additionalParams"):
2791 deploy_params_kdu.update(
2792 parse_yaml_strings(kdur["additionalParams"].copy())
2793 )
2794
2795 self._deploy_n2vc(
2796 logging_text=logging_text,
2797 db_nsr=db_nsr,
2798 db_vnfr=db_vnfr,
2799 nslcmop_id=nslcmop_id,
2800 nsr_id=nsr_id,
2801 nsi_id=nsi_id,
2802 vnfd_id=vnfd_id,
2803 vdu_id=vdu_id,
2804 kdu_name=kdu_name,
2805 member_vnf_index=member_vnf_index,
2806 vdu_index=vdu_index,
2807 kdu_index=kdu_index,
2808 vdu_name=vdu_name,
2809 deploy_params=deploy_params_kdu,
2810 descriptor_config=descriptor_config,
2811 base_folder=base_folder,
2812 task_instantiation_info=tasks_dict_info,
2813 stage=stage,
2814 )
2815
2816 # Check if each vnf has exporter for metric collection if so update prometheus job records
2817 if "exporters-endpoints" in vnfd.get("df")[0]:
2818 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2819 self.logger.debug("exporter config :{}".format(exporter_config))
2820 artifact_path = "{}/{}/{}".format(
2821 base_folder["folder"],
2822 base_folder["pkg-dir"],
2823 "exporter-endpoint",
2824 )
2825 ee_id = None
2826 ee_config_descriptor = exporter_config
2827 vnfr_id = db_vnfr["id"]
2828 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2829 logging_text,
2830 nsr_id,
2831 vnfr_id,
2832 vdu_id=None,
2833 vdu_index=None,
2834 user=None,
2835 pub_key=None,
2836 )
2837 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2838 self.logger.debug("Artifact_path:{}".format(artifact_path))
2839 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2840 vdu_id_for_prom = None
2841 vdu_index_for_prom = None
2842 for x in get_iterable(db_vnfr, "vdur"):
2843 vdu_id_for_prom = x.get("vdu-id-ref")
2844 vdu_index_for_prom = x.get("count-index")
2845 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2846 ee_id=ee_id,
2847 artifact_path=artifact_path,
2848 ee_config_descriptor=ee_config_descriptor,
2849 vnfr_id=vnfr_id,
2850 nsr_id=nsr_id,
2851 target_ip=rw_mgmt_ip,
2852 element_type="VDU",
2853 vdu_id=vdu_id_for_prom,
2854 vdu_index=vdu_index_for_prom,
2855 )
2856
2857 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2858 if prometheus_jobs:
2859 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2860 self.update_db_2(
2861 "nsrs",
2862 nsr_id,
2863 db_nsr_update,
2864 )
2865
2866 for job in prometheus_jobs:
2867 self.db.set_one(
2868 "prometheus_jobs",
2869 {"job_name": job["job_name"]},
2870 job,
2871 upsert=True,
2872 fail_on_empty=False,
2873 )
2874
2875 # Check if this NS has a charm configuration
2876 descriptor_config = nsd.get("ns-configuration")
2877 if descriptor_config and descriptor_config.get("juju"):
2878 vnfd_id = None
2879 db_vnfr = None
2880 member_vnf_index = None
2881 vdu_id = None
2882 kdu_name = None
2883 kdu_index = None
2884 vdu_index = 0
2885 vdu_name = None
2886
2887 # Get additional parameters
2888 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2889 if db_nsr.get("additionalParamsForNs"):
2890 deploy_params.update(
2891 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2892 )
2893 base_folder = nsd["_admin"]["storage"]
2894 self._deploy_n2vc(
2895 logging_text=logging_text,
2896 db_nsr=db_nsr,
2897 db_vnfr=db_vnfr,
2898 nslcmop_id=nslcmop_id,
2899 nsr_id=nsr_id,
2900 nsi_id=nsi_id,
2901 vnfd_id=vnfd_id,
2902 vdu_id=vdu_id,
2903 kdu_name=kdu_name,
2904 member_vnf_index=member_vnf_index,
2905 vdu_index=vdu_index,
2906 kdu_index=kdu_index,
2907 vdu_name=vdu_name,
2908 deploy_params=deploy_params,
2909 descriptor_config=descriptor_config,
2910 base_folder=base_folder,
2911 task_instantiation_info=tasks_dict_info,
2912 stage=stage,
2913 )
2914
2915 # rest of staff will be done at finally
2916
2917 except (
2918 ROclient.ROClientException,
2919 DbException,
2920 LcmException,
2921 N2VCException,
2922 ) as e:
2923 self.logger.error(
2924 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2925 )
2926 exc = e
2927 except asyncio.CancelledError:
2928 self.logger.error(
2929 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2930 )
2931 exc = "Operation was cancelled"
2932 except Exception as e:
2933 exc = traceback.format_exc()
2934 self.logger.critical(
2935 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2936 exc_info=True,
2937 )
2938 finally:
2939 if exc:
2940 error_list.append(str(exc))
2941 try:
2942 # wait for pending tasks
2943 if tasks_dict_info:
2944 stage[1] = "Waiting for instantiate pending tasks."
2945 self.logger.debug(logging_text + stage[1])
2946 error_list += await self._wait_for_tasks(
2947 logging_text,
2948 tasks_dict_info,
2949 timeout_ns_deploy,
2950 stage,
2951 nslcmop_id,
2952 nsr_id=nsr_id,
2953 )
2954 stage[1] = stage[2] = ""
2955 except asyncio.CancelledError:
2956 error_list.append("Cancelled")
2957 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
2958 await self._wait_for_tasks(
2959 logging_text,
2960 tasks_dict_info,
2961 timeout_ns_deploy,
2962 stage,
2963 nslcmop_id,
2964 nsr_id=nsr_id,
2965 )
2966 except Exception as exc:
2967 error_list.append(str(exc))
2968
2969 # update operation-status
2970 db_nsr_update["operational-status"] = "running"
2971 # let's begin with VCA 'configured' status (later we can change it)
2972 db_nsr_update["config-status"] = "configured"
2973 for task, task_name in tasks_dict_info.items():
2974 if not task.done() or task.cancelled() or task.exception():
2975 if task_name.startswith(self.task_name_deploy_vca):
2976 # A N2VC task is pending
2977 db_nsr_update["config-status"] = "failed"
2978 else:
2979 # RO or KDU task is pending
2980 db_nsr_update["operational-status"] = "failed"
2981
2982 # update status at database
2983 if error_list:
2984 error_detail = ". ".join(error_list)
2985 self.logger.error(logging_text + error_detail)
2986 error_description_nslcmop = "{} Detail: {}".format(
2987 stage[0], error_detail
2988 )
2989 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2990 nslcmop_id, stage[0]
2991 )
2992
2993 db_nsr_update["detailed-status"] = (
2994 error_description_nsr + " Detail: " + error_detail
2995 )
2996 db_nslcmop_update["detailed-status"] = error_detail
2997 nslcmop_operation_state = "FAILED"
2998 ns_state = "BROKEN"
2999 else:
3000 error_detail = None
3001 error_description_nsr = error_description_nslcmop = None
3002 ns_state = "READY"
3003 db_nsr_update["detailed-status"] = "Done"
3004 db_nslcmop_update["detailed-status"] = "Done"
3005 nslcmop_operation_state = "COMPLETED"
3006 # Gather auto-healing and auto-scaling alerts for each vnfr
3007 healing_alerts = []
3008 scaling_alerts = []
3009 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3010 vnfd = next(
3011 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3012 )
3013 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3014 for alert in healing_alerts:
3015 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3016 self.db.create("alerts", alert)
3017
3018 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3019 for alert in scaling_alerts:
3020 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3021 self.db.create("alerts", alert)
3022
3023 alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
3024 for alert in alarm_alerts:
3025 self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
3026 self.db.create("alerts", alert)
3027 if db_nsr:
3028 self._write_ns_status(
3029 nsr_id=nsr_id,
3030 ns_state=ns_state,
3031 current_operation="IDLE",
3032 current_operation_id=None,
3033 error_description=error_description_nsr,
3034 error_detail=error_detail,
3035 other_update=db_nsr_update,
3036 )
3037 self._write_op_status(
3038 op_id=nslcmop_id,
3039 stage="",
3040 error_message=error_description_nslcmop,
3041 operation_state=nslcmop_operation_state,
3042 other_update=db_nslcmop_update,
3043 )
3044
3045 if nslcmop_operation_state:
3046 try:
3047 await self.msg.aiowrite(
3048 "ns",
3049 "instantiated",
3050 {
3051 "nsr_id": nsr_id,
3052 "nslcmop_id": nslcmop_id,
3053 "operationState": nslcmop_operation_state,
3054 "startTime": db_nslcmop["startTime"],
3055 "links": db_nslcmop["links"],
3056 "operationParams": {
3057 "nsInstanceId": nsr_id,
3058 "nsdId": db_nsr["nsd-id"],
3059 },
3060 },
3061 )
3062 except Exception as e:
3063 self.logger.error(
3064 logging_text + "kafka_write notification Exception {}".format(e)
3065 )
3066
3067 self.logger.debug(logging_text + "Exit")
3068 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3069
3070 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3071 if vnfd_id not in cached_vnfds:
3072 cached_vnfds[vnfd_id] = self.db.get_one(
3073 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3074 )
3075 return cached_vnfds[vnfd_id]
3076
3077 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3078 if vnf_profile_id not in cached_vnfrs:
3079 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3080 "vnfrs",
3081 {
3082 "member-vnf-index-ref": vnf_profile_id,
3083 "nsr-id-ref": nsr_id,
3084 },
3085 )
3086 return cached_vnfrs[vnf_profile_id]
3087
3088 def _is_deployed_vca_in_relation(
3089 self, vca: DeployedVCA, relation: Relation
3090 ) -> bool:
3091 found = False
3092 for endpoint in (relation.provider, relation.requirer):
3093 if endpoint["kdu-resource-profile-id"]:
3094 continue
3095 found = (
3096 vca.vnf_profile_id == endpoint.vnf_profile_id
3097 and vca.vdu_profile_id == endpoint.vdu_profile_id
3098 and vca.execution_environment_ref == endpoint.execution_environment_ref
3099 )
3100 if found:
3101 break
3102 return found
3103
3104 def _update_ee_relation_data_with_implicit_data(
3105 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3106 ):
3107 ee_relation_data = safe_get_ee_relation(
3108 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3109 )
3110 ee_relation_level = EELevel.get_level(ee_relation_data)
3111 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3112 "execution-environment-ref"
3113 ]:
3114 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3115 vnfd_id = vnf_profile["vnfd-id"]
3116 project = nsd["_admin"]["projects_read"][0]
3117 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3118 entity_id = (
3119 vnfd_id
3120 if ee_relation_level == EELevel.VNF
3121 else ee_relation_data["vdu-profile-id"]
3122 )
3123 ee = get_juju_ee_ref(db_vnfd, entity_id)
3124 if not ee:
3125 raise Exception(
3126 f"not execution environments found for ee_relation {ee_relation_data}"
3127 )
3128 ee_relation_data["execution-environment-ref"] = ee["id"]
3129 return ee_relation_data
3130
3131 def _get_ns_relations(
3132 self,
3133 nsr_id: str,
3134 nsd: Dict[str, Any],
3135 vca: DeployedVCA,
3136 cached_vnfds: Dict[str, Any],
3137 ) -> List[Relation]:
3138 relations = []
3139 db_ns_relations = get_ns_configuration_relation_list(nsd)
3140 for r in db_ns_relations:
3141 provider_dict = None
3142 requirer_dict = None
3143 if all(key in r for key in ("provider", "requirer")):
3144 provider_dict = r["provider"]
3145 requirer_dict = r["requirer"]
3146 elif "entities" in r:
3147 provider_id = r["entities"][0]["id"]
3148 provider_dict = {
3149 "nsr-id": nsr_id,
3150 "endpoint": r["entities"][0]["endpoint"],
3151 }
3152 if provider_id != nsd["id"]:
3153 provider_dict["vnf-profile-id"] = provider_id
3154 requirer_id = r["entities"][1]["id"]
3155 requirer_dict = {
3156 "nsr-id": nsr_id,
3157 "endpoint": r["entities"][1]["endpoint"],
3158 }
3159 if requirer_id != nsd["id"]:
3160 requirer_dict["vnf-profile-id"] = requirer_id
3161 else:
3162 raise Exception(
3163 "provider/requirer or entities must be included in the relation."
3164 )
3165 relation_provider = self._update_ee_relation_data_with_implicit_data(
3166 nsr_id, nsd, provider_dict, cached_vnfds
3167 )
3168 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3169 nsr_id, nsd, requirer_dict, cached_vnfds
3170 )
3171 provider = EERelation(relation_provider)
3172 requirer = EERelation(relation_requirer)
3173 relation = Relation(r["name"], provider, requirer)
3174 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3175 if vca_in_relation:
3176 relations.append(relation)
3177 return relations
3178
3179 def _get_vnf_relations(
3180 self,
3181 nsr_id: str,
3182 nsd: Dict[str, Any],
3183 vca: DeployedVCA,
3184 cached_vnfds: Dict[str, Any],
3185 ) -> List[Relation]:
3186 relations = []
3187 if vca.target_element == "ns":
3188 self.logger.debug("VCA is a NS charm, not a VNF.")
3189 return relations
3190 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3191 vnf_profile_id = vnf_profile["id"]
3192 vnfd_id = vnf_profile["vnfd-id"]
3193 project = nsd["_admin"]["projects_read"][0]
3194 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3195 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3196 for r in db_vnf_relations:
3197 provider_dict = None
3198 requirer_dict = None
3199 if all(key in r for key in ("provider", "requirer")):
3200 provider_dict = r["provider"]
3201 requirer_dict = r["requirer"]
3202 elif "entities" in r:
3203 provider_id = r["entities"][0]["id"]
3204 provider_dict = {
3205 "nsr-id": nsr_id,
3206 "vnf-profile-id": vnf_profile_id,
3207 "endpoint": r["entities"][0]["endpoint"],
3208 }
3209 if provider_id != vnfd_id:
3210 provider_dict["vdu-profile-id"] = provider_id
3211 requirer_id = r["entities"][1]["id"]
3212 requirer_dict = {
3213 "nsr-id": nsr_id,
3214 "vnf-profile-id": vnf_profile_id,
3215 "endpoint": r["entities"][1]["endpoint"],
3216 }
3217 if requirer_id != vnfd_id:
3218 requirer_dict["vdu-profile-id"] = requirer_id
3219 else:
3220 raise Exception(
3221 "provider/requirer or entities must be included in the relation."
3222 )
3223 relation_provider = self._update_ee_relation_data_with_implicit_data(
3224 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3225 )
3226 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3227 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3228 )
3229 provider = EERelation(relation_provider)
3230 requirer = EERelation(relation_requirer)
3231 relation = Relation(r["name"], provider, requirer)
3232 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3233 if vca_in_relation:
3234 relations.append(relation)
3235 return relations
3236
3237 def _get_kdu_resource_data(
3238 self,
3239 ee_relation: EERelation,
3240 db_nsr: Dict[str, Any],
3241 cached_vnfds: Dict[str, Any],
3242 ) -> DeployedK8sResource:
3243 nsd = get_nsd(db_nsr)
3244 vnf_profiles = get_vnf_profiles(nsd)
3245 vnfd_id = find_in_list(
3246 vnf_profiles,
3247 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3248 )["vnfd-id"]
3249 project = nsd["_admin"]["projects_read"][0]
3250 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3251 kdu_resource_profile = get_kdu_resource_profile(
3252 db_vnfd, ee_relation.kdu_resource_profile_id
3253 )
3254 kdu_name = kdu_resource_profile["kdu-name"]
3255 deployed_kdu, _ = get_deployed_kdu(
3256 db_nsr.get("_admin", ()).get("deployed", ()),
3257 kdu_name,
3258 ee_relation.vnf_profile_id,
3259 )
3260 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3261 return deployed_kdu
3262
3263 def _get_deployed_component(
3264 self,
3265 ee_relation: EERelation,
3266 db_nsr: Dict[str, Any],
3267 cached_vnfds: Dict[str, Any],
3268 ) -> DeployedComponent:
3269 nsr_id = db_nsr["_id"]
3270 deployed_component = None
3271 ee_level = EELevel.get_level(ee_relation)
3272 if ee_level == EELevel.NS:
3273 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3274 if vca:
3275 deployed_component = DeployedVCA(nsr_id, vca)
3276 elif ee_level == EELevel.VNF:
3277 vca = get_deployed_vca(
3278 db_nsr,
3279 {
3280 "vdu_id": None,
3281 "member-vnf-index": ee_relation.vnf_profile_id,
3282 "ee_descriptor_id": ee_relation.execution_environment_ref,
3283 },
3284 )
3285 if vca:
3286 deployed_component = DeployedVCA(nsr_id, vca)
3287 elif ee_level == EELevel.VDU:
3288 vca = get_deployed_vca(
3289 db_nsr,
3290 {
3291 "vdu_id": ee_relation.vdu_profile_id,
3292 "member-vnf-index": ee_relation.vnf_profile_id,
3293 "ee_descriptor_id": ee_relation.execution_environment_ref,
3294 },
3295 )
3296 if vca:
3297 deployed_component = DeployedVCA(nsr_id, vca)
3298 elif ee_level == EELevel.KDU:
3299 kdu_resource_data = self._get_kdu_resource_data(
3300 ee_relation, db_nsr, cached_vnfds
3301 )
3302 if kdu_resource_data:
3303 deployed_component = DeployedK8sResource(kdu_resource_data)
3304 return deployed_component
3305
3306 async def _add_relation(
3307 self,
3308 relation: Relation,
3309 vca_type: str,
3310 db_nsr: Dict[str, Any],
3311 cached_vnfds: Dict[str, Any],
3312 cached_vnfrs: Dict[str, Any],
3313 ) -> bool:
3314 deployed_provider = self._get_deployed_component(
3315 relation.provider, db_nsr, cached_vnfds
3316 )
3317 deployed_requirer = self._get_deployed_component(
3318 relation.requirer, db_nsr, cached_vnfds
3319 )
3320 if (
3321 deployed_provider
3322 and deployed_requirer
3323 and deployed_provider.config_sw_installed
3324 and deployed_requirer.config_sw_installed
3325 ):
3326 provider_db_vnfr = (
3327 self._get_vnfr(
3328 relation.provider.nsr_id,
3329 relation.provider.vnf_profile_id,
3330 cached_vnfrs,
3331 )
3332 if relation.provider.vnf_profile_id
3333 else None
3334 )
3335 requirer_db_vnfr = (
3336 self._get_vnfr(
3337 relation.requirer.nsr_id,
3338 relation.requirer.vnf_profile_id,
3339 cached_vnfrs,
3340 )
3341 if relation.requirer.vnf_profile_id
3342 else None
3343 )
3344 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3345 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3346 provider_relation_endpoint = RelationEndpoint(
3347 deployed_provider.ee_id,
3348 provider_vca_id,
3349 relation.provider.endpoint,
3350 )
3351 requirer_relation_endpoint = RelationEndpoint(
3352 deployed_requirer.ee_id,
3353 requirer_vca_id,
3354 relation.requirer.endpoint,
3355 )
3356 try:
3357 await self.vca_map[vca_type].add_relation(
3358 provider=provider_relation_endpoint,
3359 requirer=requirer_relation_endpoint,
3360 )
3361 except N2VCException as exception:
3362 self.logger.error(exception)
3363 raise LcmException(exception)
3364 return True
3365 return False
3366
3367 async def _add_vca_relations(
3368 self,
3369 logging_text,
3370 nsr_id,
3371 vca_type: str,
3372 vca_index: int,
3373 timeout: int = 3600,
3374 ) -> bool:
3375 # steps:
3376 # 1. find all relations for this VCA
3377 # 2. wait for other peers related
3378 # 3. add relations
3379
3380 try:
3381 # STEP 1: find all relations for this VCA
3382
3383 # read nsr record
3384 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3385 nsd = get_nsd(db_nsr)
3386
3387 # this VCA data
3388 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3389 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3390
3391 cached_vnfds = {}
3392 cached_vnfrs = {}
3393 relations = []
3394 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3395 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3396
3397 # if no relations, terminate
3398 if not relations:
3399 self.logger.debug(logging_text + " No relations")
3400 return True
3401
3402 self.logger.debug(logging_text + " adding relations {}".format(relations))
3403
3404 # add all relations
3405 start = time()
3406 while True:
3407 # check timeout
3408 now = time()
3409 if now - start >= timeout:
3410 self.logger.error(logging_text + " : timeout adding relations")
3411 return False
3412
3413 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3414 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3415
3416 # for each relation, find the VCA's related
3417 for relation in relations.copy():
3418 added = await self._add_relation(
3419 relation,
3420 vca_type,
3421 db_nsr,
3422 cached_vnfds,
3423 cached_vnfrs,
3424 )
3425 if added:
3426 relations.remove(relation)
3427
3428 if not relations:
3429 self.logger.debug("Relations added")
3430 break
3431 await asyncio.sleep(5.0)
3432
3433 return True
3434
3435 except Exception as e:
3436 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3437 return False
3438
3439 async def _install_kdu(
3440 self,
3441 nsr_id: str,
3442 nsr_db_path: str,
3443 vnfr_data: dict,
3444 kdu_index: int,
3445 kdud: dict,
3446 vnfd: dict,
3447 k8s_instance_info: dict,
3448 k8params: dict = None,
3449 timeout: int = 600,
3450 vca_id: str = None,
3451 ):
3452 try:
3453 k8sclustertype = k8s_instance_info["k8scluster-type"]
3454 # Instantiate kdu
3455 db_dict_install = {
3456 "collection": "nsrs",
3457 "filter": {"_id": nsr_id},
3458 "path": nsr_db_path,
3459 }
3460
3461 if k8s_instance_info.get("kdu-deployment-name"):
3462 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3463 else:
3464 kdu_instance = self.k8scluster_map[
3465 k8sclustertype
3466 ].generate_kdu_instance_name(
3467 db_dict=db_dict_install,
3468 kdu_model=k8s_instance_info["kdu-model"],
3469 kdu_name=k8s_instance_info["kdu-name"],
3470 )
3471
3472 # Update the nsrs table with the kdu-instance value
3473 self.update_db_2(
3474 item="nsrs",
3475 _id=nsr_id,
3476 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3477 )
3478
3479 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3480 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3481 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3482 # namespace, this first verification could be removed, and the next step would be done for any kind
3483 # of KNF.
3484 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3485 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3486 if k8sclustertype in ("juju", "juju-bundle"):
3487 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3488 # that the user passed a namespace which he wants its KDU to be deployed in)
3489 if (
3490 self.db.count(
3491 table="nsrs",
3492 q_filter={
3493 "_id": nsr_id,
3494 "_admin.projects_write": k8s_instance_info["namespace"],
3495 "_admin.projects_read": k8s_instance_info["namespace"],
3496 },
3497 )
3498 > 0
3499 ):
3500 self.logger.debug(
3501 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3502 )
3503 self.update_db_2(
3504 item="nsrs",
3505 _id=nsr_id,
3506 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3507 )
3508 k8s_instance_info["namespace"] = kdu_instance
3509
3510 await self.k8scluster_map[k8sclustertype].install(
3511 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3512 kdu_model=k8s_instance_info["kdu-model"],
3513 atomic=True,
3514 params=k8params,
3515 db_dict=db_dict_install,
3516 timeout=timeout,
3517 kdu_name=k8s_instance_info["kdu-name"],
3518 namespace=k8s_instance_info["namespace"],
3519 kdu_instance=kdu_instance,
3520 vca_id=vca_id,
3521 )
3522
3523 # Obtain services to obtain management service ip
3524 services = await self.k8scluster_map[k8sclustertype].get_services(
3525 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3526 kdu_instance=kdu_instance,
3527 namespace=k8s_instance_info["namespace"],
3528 )
3529
3530 # Obtain management service info (if exists)
3531 vnfr_update_dict = {}
3532 kdu_config = get_configuration(vnfd, kdud["name"])
3533 if kdu_config:
3534 target_ee_list = kdu_config.get("execution-environment-list", [])
3535 else:
3536 target_ee_list = []
3537
3538 if services:
3539 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3540 mgmt_services = [
3541 service
3542 for service in kdud.get("service", [])
3543 if service.get("mgmt-service")
3544 ]
3545 for mgmt_service in mgmt_services:
3546 for service in services:
3547 if service["name"].startswith(mgmt_service["name"]):
3548 # Mgmt service found, Obtain service ip
3549 ip = service.get("external_ip", service.get("cluster_ip"))
3550 if isinstance(ip, list) and len(ip) == 1:
3551 ip = ip[0]
3552
3553 vnfr_update_dict[
3554 "kdur.{}.ip-address".format(kdu_index)
3555 ] = ip
3556
3557 # Check if must update also mgmt ip at the vnf
3558 service_external_cp = mgmt_service.get(
3559 "external-connection-point-ref"
3560 )
3561 if service_external_cp:
3562 if (
3563 deep_get(vnfd, ("mgmt-interface", "cp"))
3564 == service_external_cp
3565 ):
3566 vnfr_update_dict["ip-address"] = ip
3567
3568 if find_in_list(
3569 target_ee_list,
3570 lambda ee: ee.get(
3571 "external-connection-point-ref", ""
3572 )
3573 == service_external_cp,
3574 ):
3575 vnfr_update_dict[
3576 "kdur.{}.ip-address".format(kdu_index)
3577 ] = ip
3578 break
3579 else:
3580 self.logger.warn(
3581 "Mgmt service name: {} not found".format(
3582 mgmt_service["name"]
3583 )
3584 )
3585
3586 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3587 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3588
3589 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3590 if (
3591 kdu_config
3592 and kdu_config.get("initial-config-primitive")
3593 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3594 ):
3595 initial_config_primitive_list = kdu_config.get(
3596 "initial-config-primitive"
3597 )
3598 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3599
3600 for initial_config_primitive in initial_config_primitive_list:
3601 primitive_params_ = self._map_primitive_params(
3602 initial_config_primitive, {}, {}
3603 )
3604
3605 await asyncio.wait_for(
3606 self.k8scluster_map[k8sclustertype].exec_primitive(
3607 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3608 kdu_instance=kdu_instance,
3609 primitive_name=initial_config_primitive["name"],
3610 params=primitive_params_,
3611 db_dict=db_dict_install,
3612 vca_id=vca_id,
3613 ),
3614 timeout=timeout,
3615 )
3616
3617 except Exception as e:
3618 # Prepare update db with error and raise exception
3619 try:
3620 self.update_db_2(
3621 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3622 )
3623 self.update_db_2(
3624 "vnfrs",
3625 vnfr_data.get("_id"),
3626 {"kdur.{}.status".format(kdu_index): "ERROR"},
3627 )
3628 except Exception as error:
3629 # ignore to keep original exception
3630 self.logger.warning(
3631 f"An exception occurred while updating DB: {str(error)}"
3632 )
3633 # reraise original error
3634 raise
3635
3636 return kdu_instance
3637
3638 async def deploy_kdus(
3639 self,
3640 logging_text,
3641 nsr_id,
3642 nslcmop_id,
3643 db_vnfrs,
3644 db_vnfds,
3645 task_instantiation_info,
3646 ):
3647 # Launch kdus if present in the descriptor
3648
3649 k8scluster_id_2_uuic = {
3650 "helm-chart-v3": {},
3651 "juju-bundle": {},
3652 }
3653
3654 async def _get_cluster_id(cluster_id, cluster_type):
3655 nonlocal k8scluster_id_2_uuic
3656 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3657 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3658
3659 # check if K8scluster is creating and wait look if previous tasks in process
3660 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3661 "k8scluster", cluster_id
3662 )
3663 if task_dependency:
3664 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3665 task_name, cluster_id
3666 )
3667 self.logger.debug(logging_text + text)
3668 await asyncio.wait(task_dependency, timeout=3600)
3669
3670 db_k8scluster = self.db.get_one(
3671 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3672 )
3673 if not db_k8scluster:
3674 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3675
3676 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3677 if not k8s_id:
3678 if cluster_type == "helm-chart-v3":
3679 try:
3680 # backward compatibility for existing clusters that have not been initialized for helm v3
3681 k8s_credentials = yaml.safe_dump(
3682 db_k8scluster.get("credentials")
3683 )
3684 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3685 k8s_credentials, reuse_cluster_uuid=cluster_id
3686 )
3687 db_k8scluster_update = {}
3688 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3689 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3690 db_k8scluster_update[
3691 "_admin.helm-chart-v3.created"
3692 ] = uninstall_sw
3693 db_k8scluster_update[
3694 "_admin.helm-chart-v3.operationalState"
3695 ] = "ENABLED"
3696 self.update_db_2(
3697 "k8sclusters", cluster_id, db_k8scluster_update
3698 )
3699 except Exception as e:
3700 self.logger.error(
3701 logging_text
3702 + "error initializing helm-v3 cluster: {}".format(str(e))
3703 )
3704 raise LcmException(
3705 "K8s cluster '{}' has not been initialized for '{}'".format(
3706 cluster_id, cluster_type
3707 )
3708 )
3709 else:
3710 raise LcmException(
3711 "K8s cluster '{}' has not been initialized for '{}'".format(
3712 cluster_id, cluster_type
3713 )
3714 )
3715 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3716 return k8s_id
3717
3718 logging_text += "Deploy kdus: "
3719 step = ""
3720 try:
3721 db_nsr_update = {"_admin.deployed.K8s": []}
3722 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3723
3724 index = 0
3725 updated_cluster_list = []
3726 updated_v3_cluster_list = []
3727
3728 for vnfr_data in db_vnfrs.values():
3729 vca_id = self.get_vca_id(vnfr_data, {})
3730 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3731 # Step 0: Prepare and set parameters
3732 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3733 vnfd_id = vnfr_data.get("vnfd-id")
3734 vnfd_with_id = find_in_list(
3735 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3736 )
3737 kdud = next(
3738 kdud
3739 for kdud in vnfd_with_id["kdu"]
3740 if kdud["name"] == kdur["kdu-name"]
3741 )
3742 namespace = kdur.get("k8s-namespace")
3743 kdu_deployment_name = kdur.get("kdu-deployment-name")
3744 if kdur.get("helm-chart"):
3745 kdumodel = kdur["helm-chart"]
3746 # Default version: helm3, if helm-version is v2 assign v2
3747 k8sclustertype = "helm-chart-v3"
3748 self.logger.debug("kdur: {}".format(kdur))
3749 elif kdur.get("juju-bundle"):
3750 kdumodel = kdur["juju-bundle"]
3751 k8sclustertype = "juju-bundle"
3752 else:
3753 raise LcmException(
3754 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3755 "juju-bundle. Maybe an old NBI version is running".format(
3756 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3757 )
3758 )
3759 # check if kdumodel is a file and exists
3760 try:
3761 vnfd_with_id = find_in_list(
3762 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3763 )
3764 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3765 if storage: # may be not present if vnfd has not artifacts
3766 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3767 if storage["pkg-dir"]:
3768 filename = "{}/{}/{}s/{}".format(
3769 storage["folder"],
3770 storage["pkg-dir"],
3771 k8sclustertype,
3772 kdumodel,
3773 )
3774 else:
3775 filename = "{}/Scripts/{}s/{}".format(
3776 storage["folder"],
3777 k8sclustertype,
3778 kdumodel,
3779 )
3780 if self.fs.file_exists(
3781 filename, mode="file"
3782 ) or self.fs.file_exists(filename, mode="dir"):
3783 kdumodel = self.fs.path + filename
3784 except (asyncio.TimeoutError, asyncio.CancelledError):
3785 raise
3786 except Exception as e: # it is not a file
3787 self.logger.warning(f"An exception occurred: {str(e)}")
3788
3789 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3790 step = "Synchronize repos for k8s cluster '{}'".format(
3791 k8s_cluster_id
3792 )
3793 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3794
3795 # Synchronize repos
3796 if (
3797 k8sclustertype == "helm-chart"
3798 and cluster_uuid not in updated_cluster_list
3799 ) or (
3800 k8sclustertype == "helm-chart-v3"
3801 and cluster_uuid not in updated_v3_cluster_list
3802 ):
3803 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3804 self.k8scluster_map[k8sclustertype].synchronize_repos(
3805 cluster_uuid=cluster_uuid
3806 )
3807 )
3808 if del_repo_list or added_repo_dict:
3809 if k8sclustertype == "helm-chart":
3810 unset = {
3811 "_admin.helm_charts_added." + item: None
3812 for item in del_repo_list
3813 }
3814 updated = {
3815 "_admin.helm_charts_added." + item: name
3816 for item, name in added_repo_dict.items()
3817 }
3818 updated_cluster_list.append(cluster_uuid)
3819 elif k8sclustertype == "helm-chart-v3":
3820 unset = {
3821 "_admin.helm_charts_v3_added." + item: None
3822 for item in del_repo_list
3823 }
3824 updated = {
3825 "_admin.helm_charts_v3_added." + item: name
3826 for item, name in added_repo_dict.items()
3827 }
3828 updated_v3_cluster_list.append(cluster_uuid)
3829 self.logger.debug(
3830 logging_text + "repos synchronized on k8s cluster "
3831 "'{}' to_delete: {}, to_add: {}".format(
3832 k8s_cluster_id, del_repo_list, added_repo_dict
3833 )
3834 )
3835 self.db.set_one(
3836 "k8sclusters",
3837 {"_id": k8s_cluster_id},
3838 updated,
3839 unset=unset,
3840 )
3841
3842 # Instantiate kdu
3843 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3844 vnfr_data["member-vnf-index-ref"],
3845 kdur["kdu-name"],
3846 k8s_cluster_id,
3847 )
3848 k8s_instance_info = {
3849 "kdu-instance": None,
3850 "k8scluster-uuid": cluster_uuid,
3851 "k8scluster-type": k8sclustertype,
3852 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3853 "kdu-name": kdur["kdu-name"],
3854 "kdu-model": kdumodel,
3855 "namespace": namespace,
3856 "kdu-deployment-name": kdu_deployment_name,
3857 }
3858 db_path = "_admin.deployed.K8s.{}".format(index)
3859 db_nsr_update[db_path] = k8s_instance_info
3860 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3861 vnfd_with_id = find_in_list(
3862 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3863 )
3864 task = asyncio.ensure_future(
3865 self._install_kdu(
3866 nsr_id,
3867 db_path,
3868 vnfr_data,
3869 kdu_index,
3870 kdud,
3871 vnfd_with_id,
3872 k8s_instance_info,
3873 k8params=desc_params,
3874 timeout=1800,
3875 vca_id=vca_id,
3876 )
3877 )
3878 self.lcm_tasks.register(
3879 "ns",
3880 nsr_id,
3881 nslcmop_id,
3882 "instantiate_KDU-{}".format(index),
3883 task,
3884 )
3885 task_instantiation_info[task] = "Deploying KDU {}".format(
3886 kdur["kdu-name"]
3887 )
3888
3889 index += 1
3890
3891 except (LcmException, asyncio.CancelledError):
3892 raise
3893 except Exception as e:
3894 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3895 if isinstance(e, (N2VCException, DbException)):
3896 self.logger.error(logging_text + msg)
3897 else:
3898 self.logger.critical(logging_text + msg, exc_info=True)
3899 raise LcmException(msg)
3900 finally:
3901 if db_nsr_update:
3902 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3903
3904 def _deploy_n2vc(
3905 self,
3906 logging_text,
3907 db_nsr,
3908 db_vnfr,
3909 nslcmop_id,
3910 nsr_id,
3911 nsi_id,
3912 vnfd_id,
3913 vdu_id,
3914 kdu_name,
3915 member_vnf_index,
3916 vdu_index,
3917 kdu_index,
3918 vdu_name,
3919 deploy_params,
3920 descriptor_config,
3921 base_folder,
3922 task_instantiation_info,
3923 stage,
3924 ):
3925 # launch instantiate_N2VC in a asyncio task and register task object
3926 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3927 # if not found, create one entry and update database
3928 # fill db_nsr._admin.deployed.VCA.<index>
3929
3930 self.logger.debug(
3931 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3932 )
3933
3934 charm_name = ""
3935 get_charm_name = False
3936 if "execution-environment-list" in descriptor_config:
3937 ee_list = descriptor_config.get("execution-environment-list", [])
3938 elif "juju" in descriptor_config:
3939 ee_list = [descriptor_config] # ns charms
3940 if "execution-environment-list" not in descriptor_config:
3941 # charm name is only required for ns charms
3942 get_charm_name = True
3943 else: # other types as script are not supported
3944 ee_list = []
3945
3946 for ee_item in ee_list:
3947 self.logger.debug(
3948 logging_text
3949 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3950 ee_item.get("juju"), ee_item.get("helm-chart")
3951 )
3952 )
3953 ee_descriptor_id = ee_item.get("id")
3954 if ee_item.get("juju"):
3955 vca_name = ee_item["juju"].get("charm")
3956 if get_charm_name:
3957 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3958 vca_type = (
3959 "lxc_proxy_charm"
3960 if ee_item["juju"].get("charm") is not None
3961 else "native_charm"
3962 )
3963 if ee_item["juju"].get("cloud") == "k8s":
3964 vca_type = "k8s_proxy_charm"
3965 elif ee_item["juju"].get("proxy") is False:
3966 vca_type = "native_charm"
3967 elif ee_item.get("helm-chart"):
3968 vca_name = ee_item["helm-chart"]
3969 vca_type = "helm-v3"
3970 else:
3971 self.logger.debug(
3972 logging_text + "skipping non juju neither charm configuration"
3973 )
3974 continue
3975
3976 vca_index = -1
3977 for vca_index, vca_deployed in enumerate(
3978 db_nsr["_admin"]["deployed"]["VCA"]
3979 ):
3980 if not vca_deployed:
3981 continue
3982 if (
3983 vca_deployed.get("member-vnf-index") == member_vnf_index
3984 and vca_deployed.get("vdu_id") == vdu_id
3985 and vca_deployed.get("kdu_name") == kdu_name
3986 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3987 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3988 ):
3989 break
3990 else:
3991 # not found, create one.
3992 target = (
3993 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3994 )
3995 if vdu_id:
3996 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3997 elif kdu_name:
3998 target += "/kdu/{}".format(kdu_name)
3999 vca_deployed = {
4000 "target_element": target,
4001 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4002 "member-vnf-index": member_vnf_index,
4003 "vdu_id": vdu_id,
4004 "kdu_name": kdu_name,
4005 "vdu_count_index": vdu_index,
4006 "operational-status": "init", # TODO revise
4007 "detailed-status": "", # TODO revise
4008 "step": "initial-deploy", # TODO revise
4009 "vnfd_id": vnfd_id,
4010 "vdu_name": vdu_name,
4011 "type": vca_type,
4012 "ee_descriptor_id": ee_descriptor_id,
4013 "charm_name": charm_name,
4014 }
4015 vca_index += 1
4016
4017 # create VCA and configurationStatus in db
4018 db_dict = {
4019 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4020 "configurationStatus.{}".format(vca_index): dict(),
4021 }
4022 self.update_db_2("nsrs", nsr_id, db_dict)
4023
4024 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4025
4026 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4027 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4028 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4029
4030 # Launch task
4031 task_n2vc = asyncio.ensure_future(
4032 self.instantiate_N2VC(
4033 logging_text=logging_text,
4034 vca_index=vca_index,
4035 nsi_id=nsi_id,
4036 db_nsr=db_nsr,
4037 db_vnfr=db_vnfr,
4038 vdu_id=vdu_id,
4039 kdu_name=kdu_name,
4040 vdu_index=vdu_index,
4041 kdu_index=kdu_index,
4042 deploy_params=deploy_params,
4043 config_descriptor=descriptor_config,
4044 base_folder=base_folder,
4045 nslcmop_id=nslcmop_id,
4046 stage=stage,
4047 vca_type=vca_type,
4048 vca_name=vca_name,
4049 ee_config_descriptor=ee_item,
4050 )
4051 )
4052 self.lcm_tasks.register(
4053 "ns",
4054 nsr_id,
4055 nslcmop_id,
4056 "instantiate_N2VC-{}".format(vca_index),
4057 task_n2vc,
4058 )
4059 task_instantiation_info[
4060 task_n2vc
4061 ] = self.task_name_deploy_vca + " {}.{}".format(
4062 member_vnf_index or "", vdu_id or ""
4063 )
4064
4065 def _format_additional_params(self, params):
4066 params = params or {}
4067 for key, value in params.items():
4068 if str(value).startswith("!!yaml "):
4069 params[key] = yaml.safe_load(value[7:])
4070 return params
4071
4072 def _get_terminate_primitive_params(self, seq, vnf_index):
4073 primitive = seq.get("name")
4074 primitive_params = {}
4075 params = {
4076 "member_vnf_index": vnf_index,
4077 "primitive": primitive,
4078 "primitive_params": primitive_params,
4079 }
4080 desc_params = {}
4081 return self._map_primitive_params(seq, params, desc_params)
4082
4083 # sub-operations
4084
4085 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4086 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4087 if op.get("operationState") == "COMPLETED":
4088 # b. Skip sub-operation
4089 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4090 return self.SUBOPERATION_STATUS_SKIP
4091 else:
4092 # c. retry executing sub-operation
4093 # The sub-operation exists, and operationState != 'COMPLETED'
4094 # Update operationState = 'PROCESSING' to indicate a retry.
4095 operationState = "PROCESSING"
4096 detailed_status = "In progress"
4097 self._update_suboperation_status(
4098 db_nslcmop, op_index, operationState, detailed_status
4099 )
4100 # Return the sub-operation index
4101 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4102 # with arguments extracted from the sub-operation
4103 return op_index
4104
4105 # Find a sub-operation where all keys in a matching dictionary must match
4106 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4107 def _find_suboperation(self, db_nslcmop, match):
4108 if db_nslcmop and match:
4109 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4110 for i, op in enumerate(op_list):
4111 if all(op.get(k) == match[k] for k in match):
4112 return i
4113 return self.SUBOPERATION_STATUS_NOT_FOUND
4114
4115 # Update status for a sub-operation given its index
4116 def _update_suboperation_status(
4117 self, db_nslcmop, op_index, operationState, detailed_status
4118 ):
4119 # Update DB for HA tasks
4120 q_filter = {"_id": db_nslcmop["_id"]}
4121 update_dict = {
4122 "_admin.operations.{}.operationState".format(op_index): operationState,
4123 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4124 }
4125 self.db.set_one(
4126 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4127 )
4128
4129 # Add sub-operation, return the index of the added sub-operation
4130 # Optionally, set operationState, detailed-status, and operationType
4131 # Status and type are currently set for 'scale' sub-operations:
4132 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4133 # 'detailed-status' : status message
4134 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4135 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4136 def _add_suboperation(
4137 self,
4138 db_nslcmop,
4139 vnf_index,
4140 vdu_id,
4141 vdu_count_index,
4142 vdu_name,
4143 primitive,
4144 mapped_primitive_params,
4145 operationState=None,
4146 detailed_status=None,
4147 operationType=None,
4148 RO_nsr_id=None,
4149 RO_scaling_info=None,
4150 ):
4151 if not db_nslcmop:
4152 return self.SUBOPERATION_STATUS_NOT_FOUND
4153 # Get the "_admin.operations" list, if it exists
4154 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4155 op_list = db_nslcmop_admin.get("operations")
4156 # Create or append to the "_admin.operations" list
4157 new_op = {
4158 "member_vnf_index": vnf_index,
4159 "vdu_id": vdu_id,
4160 "vdu_count_index": vdu_count_index,
4161 "primitive": primitive,
4162 "primitive_params": mapped_primitive_params,
4163 }
4164 if operationState:
4165 new_op["operationState"] = operationState
4166 if detailed_status:
4167 new_op["detailed-status"] = detailed_status
4168 if operationType:
4169 new_op["lcmOperationType"] = operationType
4170 if RO_nsr_id:
4171 new_op["RO_nsr_id"] = RO_nsr_id
4172 if RO_scaling_info:
4173 new_op["RO_scaling_info"] = RO_scaling_info
4174 if not op_list:
4175 # No existing operations, create key 'operations' with current operation as first list element
4176 db_nslcmop_admin.update({"operations": [new_op]})
4177 op_list = db_nslcmop_admin.get("operations")
4178 else:
4179 # Existing operations, append operation to list
4180 op_list.append(new_op)
4181
4182 db_nslcmop_update = {"_admin.operations": op_list}
4183 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4184 op_index = len(op_list) - 1
4185 return op_index
4186
4187 # Helper methods for scale() sub-operations
4188
4189 # pre-scale/post-scale:
4190 # Check for 3 different cases:
4191 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4192 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4193 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4194 def _check_or_add_scale_suboperation(
4195 self,
4196 db_nslcmop,
4197 vnf_index,
4198 vnf_config_primitive,
4199 primitive_params,
4200 operationType,
4201 RO_nsr_id=None,
4202 RO_scaling_info=None,
4203 ):
4204 # Find this sub-operation
4205 if RO_nsr_id and RO_scaling_info:
4206 operationType = "SCALE-RO"
4207 match = {
4208 "member_vnf_index": vnf_index,
4209 "RO_nsr_id": RO_nsr_id,
4210 "RO_scaling_info": RO_scaling_info,
4211 }
4212 else:
4213 match = {
4214 "member_vnf_index": vnf_index,
4215 "primitive": vnf_config_primitive,
4216 "primitive_params": primitive_params,
4217 "lcmOperationType": operationType,
4218 }
4219 op_index = self._find_suboperation(db_nslcmop, match)
4220 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4221 # a. New sub-operation
4222 # The sub-operation does not exist, add it.
4223 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4224 # The following parameters are set to None for all kind of scaling:
4225 vdu_id = None
4226 vdu_count_index = None
4227 vdu_name = None
4228 if RO_nsr_id and RO_scaling_info:
4229 vnf_config_primitive = None
4230 primitive_params = None
4231 else:
4232 RO_nsr_id = None
4233 RO_scaling_info = None
4234 # Initial status for sub-operation
4235 operationState = "PROCESSING"
4236 detailed_status = "In progress"
4237 # Add sub-operation for pre/post-scaling (zero or more operations)
4238 self._add_suboperation(
4239 db_nslcmop,
4240 vnf_index,
4241 vdu_id,
4242 vdu_count_index,
4243 vdu_name,
4244 vnf_config_primitive,
4245 primitive_params,
4246 operationState,
4247 detailed_status,
4248 operationType,
4249 RO_nsr_id,
4250 RO_scaling_info,
4251 )
4252 return self.SUBOPERATION_STATUS_NEW
4253 else:
4254 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4255 # or op_index (operationState != 'COMPLETED')
4256 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4257
4258 # Function to return execution_environment id
4259
4260 async def destroy_N2VC(
4261 self,
4262 logging_text,
4263 db_nslcmop,
4264 vca_deployed,
4265 config_descriptor,
4266 vca_index,
4267 destroy_ee=True,
4268 exec_primitives=True,
4269 scaling_in=False,
4270 vca_id: str = None,
4271 ):
4272 """
4273 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4274 :param logging_text:
4275 :param db_nslcmop:
4276 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4277 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4278 :param vca_index: index in the database _admin.deployed.VCA
4279 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4280 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4281 not executed properly
4282 :param scaling_in: True destroys the application, False destroys the model
4283 :return: None or exception
4284 """
4285
4286 self.logger.debug(
4287 logging_text
4288 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4289 vca_index, vca_deployed, config_descriptor, destroy_ee
4290 )
4291 )
4292
4293 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4294
4295 # execute terminate_primitives
4296 if exec_primitives:
4297 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4298 config_descriptor.get("terminate-config-primitive"),
4299 vca_deployed.get("ee_descriptor_id"),
4300 )
4301 vdu_id = vca_deployed.get("vdu_id")
4302 vdu_count_index = vca_deployed.get("vdu_count_index")
4303 vdu_name = vca_deployed.get("vdu_name")
4304 vnf_index = vca_deployed.get("member-vnf-index")
4305 if terminate_primitives and vca_deployed.get("needed_terminate"):
4306 for seq in terminate_primitives:
4307 # For each sequence in list, get primitive and call _ns_execute_primitive()
4308 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4309 vnf_index, seq.get("name")
4310 )
4311 self.logger.debug(logging_text + step)
4312 # Create the primitive for each sequence, i.e. "primitive": "touch"
4313 primitive = seq.get("name")
4314 mapped_primitive_params = self._get_terminate_primitive_params(
4315 seq, vnf_index
4316 )
4317
4318 # Add sub-operation
4319 self._add_suboperation(
4320 db_nslcmop,
4321 vnf_index,
4322 vdu_id,
4323 vdu_count_index,
4324 vdu_name,
4325 primitive,
4326 mapped_primitive_params,
4327 )
4328 # Sub-operations: Call _ns_execute_primitive() instead of action()
4329 try:
4330 result, result_detail = await self._ns_execute_primitive(
4331 vca_deployed["ee_id"],
4332 primitive,
4333 mapped_primitive_params,
4334 vca_type=vca_type,
4335 vca_id=vca_id,
4336 )
4337 except LcmException:
4338 # this happens when VCA is not deployed. In this case it is not needed to terminate
4339 continue
4340 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4341 if result not in result_ok:
4342 raise LcmException(
4343 "terminate_primitive {} for vnf_member_index={} fails with "
4344 "error {}".format(seq.get("name"), vnf_index, result_detail)
4345 )
4346 # set that this VCA do not need terminated
4347 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4348 vca_index
4349 )
4350 self.update_db_2(
4351 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4352 )
4353
4354 # Delete Prometheus Jobs if any
4355 # This uses NSR_ID, so it will destroy any jobs under this index
4356 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4357
4358 if destroy_ee:
4359 await self.vca_map[vca_type].delete_execution_environment(
4360 vca_deployed["ee_id"],
4361 scaling_in=scaling_in,
4362 vca_type=vca_type,
4363 vca_id=vca_id,
4364 )
4365
4366 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4367 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4368 namespace = "." + db_nsr["_id"]
4369 try:
4370 await self.n2vc.delete_namespace(
4371 namespace=namespace,
4372 total_timeout=self.timeout.charm_delete,
4373 vca_id=vca_id,
4374 )
4375 except N2VCNotFound: # already deleted. Skip
4376 pass
4377 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4378
4379 async def terminate(self, nsr_id, nslcmop_id):
4380 # Try to lock HA task here
4381 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4382 if not task_is_locked_by_me:
4383 return
4384
4385 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4386 self.logger.debug(logging_text + "Enter")
4387 timeout_ns_terminate = self.timeout.ns_terminate
4388 db_nsr = None
4389 db_nslcmop = None
4390 operation_params = None
4391 exc = None
4392 error_list = [] # annotates all failed error messages
4393 db_nslcmop_update = {}
4394 autoremove = False # autoremove after terminated
4395 tasks_dict_info = {}
4396 db_nsr_update = {}
4397 stage = [
4398 "Stage 1/3: Preparing task.",
4399 "Waiting for previous operations to terminate.",
4400 "",
4401 ]
4402 # ^ contains [stage, step, VIM-status]
4403 try:
4404 # wait for any previous tasks in process
4405 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4406
4407 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4408 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4409 operation_params = db_nslcmop.get("operationParams") or {}
4410 if operation_params.get("timeout_ns_terminate"):
4411 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4412 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4413 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4414
4415 db_nsr_update["operational-status"] = "terminating"
4416 db_nsr_update["config-status"] = "terminating"
4417 self._write_ns_status(
4418 nsr_id=nsr_id,
4419 ns_state="TERMINATING",
4420 current_operation="TERMINATING",
4421 current_operation_id=nslcmop_id,
4422 other_update=db_nsr_update,
4423 )
4424 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4425 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4426 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4427 return
4428
4429 stage[1] = "Getting vnf descriptors from db."
4430 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4431 db_vnfrs_dict = {
4432 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4433 }
4434 db_vnfds_from_id = {}
4435 db_vnfds_from_member_index = {}
4436 # Loop over VNFRs
4437 for vnfr in db_vnfrs_list:
4438 vnfd_id = vnfr["vnfd-id"]
4439 if vnfd_id not in db_vnfds_from_id:
4440 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4441 db_vnfds_from_id[vnfd_id] = vnfd
4442 db_vnfds_from_member_index[
4443 vnfr["member-vnf-index-ref"]
4444 ] = db_vnfds_from_id[vnfd_id]
4445
4446 # Destroy individual execution environments when there are terminating primitives.
4447 # Rest of EE will be deleted at once
4448 # TODO - check before calling _destroy_N2VC
4449 # if not operation_params.get("skip_terminate_primitives"):#
4450 # or not vca.get("needed_terminate"):
4451 stage[0] = "Stage 2/3 execute terminating primitives."
4452 self.logger.debug(logging_text + stage[0])
4453 stage[1] = "Looking execution environment that needs terminate."
4454 self.logger.debug(logging_text + stage[1])
4455
4456 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4457 config_descriptor = None
4458 vca_member_vnf_index = vca.get("member-vnf-index")
4459 vca_id = self.get_vca_id(
4460 db_vnfrs_dict.get(vca_member_vnf_index)
4461 if vca_member_vnf_index
4462 else None,
4463 db_nsr,
4464 )
4465 if not vca or not vca.get("ee_id"):
4466 continue
4467 if not vca.get("member-vnf-index"):
4468 # ns
4469 config_descriptor = db_nsr.get("ns-configuration")
4470 elif vca.get("vdu_id"):
4471 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4472 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4473 elif vca.get("kdu_name"):
4474 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4475 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4476 else:
4477 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4478 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4479 vca_type = vca.get("type")
4480 exec_terminate_primitives = not operation_params.get(
4481 "skip_terminate_primitives"
4482 ) and vca.get("needed_terminate")
4483 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4484 # pending native charms
4485 destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
4486 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4487 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4488 task = asyncio.ensure_future(
4489 self.destroy_N2VC(
4490 logging_text,
4491 db_nslcmop,
4492 vca,
4493 config_descriptor,
4494 vca_index,
4495 destroy_ee,
4496 exec_terminate_primitives,
4497 vca_id=vca_id,
4498 )
4499 )
4500 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4501
4502 # wait for pending tasks of terminate primitives
4503 if tasks_dict_info:
4504 self.logger.debug(
4505 logging_text
4506 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4507 )
4508 error_list = await self._wait_for_tasks(
4509 logging_text,
4510 tasks_dict_info,
4511 min(self.timeout.charm_delete, timeout_ns_terminate),
4512 stage,
4513 nslcmop_id,
4514 )
4515 tasks_dict_info.clear()
4516 if error_list:
4517 return # raise LcmException("; ".join(error_list))
4518
4519 # remove All execution environments at once
4520 stage[0] = "Stage 3/3 delete all."
4521
4522 if nsr_deployed.get("VCA"):
4523 stage[1] = "Deleting all execution environments."
4524 self.logger.debug(logging_text + stage[1])
4525 vca_id = self.get_vca_id({}, db_nsr)
4526 task_delete_ee = asyncio.ensure_future(
4527 asyncio.wait_for(
4528 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4529 timeout=self.timeout.charm_delete,
4530 )
4531 )
4532 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4533 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4534
4535 # Delete Namespace and Certificates if necessary
4536 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4537 await self.vca_map["helm-v3"].delete_tls_certificate(
4538 namespace=db_nslcmop["nsInstanceId"],
4539 certificate_name=self.EE_TLS_NAME,
4540 )
4541 await self.vca_map["helm-v3"].delete_namespace(
4542 namespace=db_nslcmop["nsInstanceId"],
4543 )
4544
4545 # Delete from k8scluster
4546 stage[1] = "Deleting KDUs."
4547 self.logger.debug(logging_text + stage[1])
4548 # print(nsr_deployed)
4549 for kdu in get_iterable(nsr_deployed, "K8s"):
4550 if not kdu or not kdu.get("kdu-instance"):
4551 continue
4552 kdu_instance = kdu.get("kdu-instance")
4553 if kdu.get("k8scluster-type") in self.k8scluster_map:
4554 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4555 vca_id = self.get_vca_id({}, db_nsr)
4556 task_delete_kdu_instance = asyncio.ensure_future(
4557 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4558 cluster_uuid=kdu.get("k8scluster-uuid"),
4559 kdu_instance=kdu_instance,
4560 vca_id=vca_id,
4561 namespace=kdu.get("namespace"),
4562 )
4563 )
4564 else:
4565 self.logger.error(
4566 logging_text
4567 + "Unknown k8s deployment type {}".format(
4568 kdu.get("k8scluster-type")
4569 )
4570 )
4571 continue
4572 tasks_dict_info[
4573 task_delete_kdu_instance
4574 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4575
4576 # remove from RO
4577 stage[1] = "Deleting ns from VIM."
4578 if self.ro_config.ng:
4579 task_delete_ro = asyncio.ensure_future(
4580 self._terminate_ng_ro(
4581 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4582 )
4583 )
4584 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4585
4586 # rest of staff will be done at finally
4587
4588 except (
4589 ROclient.ROClientException,
4590 DbException,
4591 LcmException,
4592 N2VCException,
4593 ) as e:
4594 self.logger.error(logging_text + "Exit Exception {}".format(e))
4595 exc = e
4596 except asyncio.CancelledError:
4597 self.logger.error(
4598 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4599 )
4600 exc = "Operation was cancelled"
4601 except Exception as e:
4602 exc = traceback.format_exc()
4603 self.logger.critical(
4604 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4605 exc_info=True,
4606 )
4607 finally:
4608 if exc:
4609 error_list.append(str(exc))
4610 try:
4611 # wait for pending tasks
4612 if tasks_dict_info:
4613 stage[1] = "Waiting for terminate pending tasks."
4614 self.logger.debug(logging_text + stage[1])
4615 error_list += await self._wait_for_tasks(
4616 logging_text,
4617 tasks_dict_info,
4618 timeout_ns_terminate,
4619 stage,
4620 nslcmop_id,
4621 )
4622 stage[1] = stage[2] = ""
4623 except asyncio.CancelledError:
4624 error_list.append("Cancelled")
4625 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
4626 await self._wait_for_tasks(
4627 logging_text,
4628 tasks_dict_info,
4629 timeout_ns_terminate,
4630 stage,
4631 nslcmop_id,
4632 )
4633 except Exception as exc:
4634 error_list.append(str(exc))
4635 # update status at database
4636 if error_list:
4637 error_detail = "; ".join(error_list)
4638 # self.logger.error(logging_text + error_detail)
4639 error_description_nslcmop = "{} Detail: {}".format(
4640 stage[0], error_detail
4641 )
4642 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4643 nslcmop_id, stage[0]
4644 )
4645
4646 db_nsr_update["operational-status"] = "failed"
4647 db_nsr_update["detailed-status"] = (
4648 error_description_nsr + " Detail: " + error_detail
4649 )
4650 db_nslcmop_update["detailed-status"] = error_detail
4651 nslcmop_operation_state = "FAILED"
4652 ns_state = "BROKEN"
4653 else:
4654 error_detail = None
4655 error_description_nsr = error_description_nslcmop = None
4656 ns_state = "NOT_INSTANTIATED"
4657 db_nsr_update["operational-status"] = "terminated"
4658 db_nsr_update["detailed-status"] = "Done"
4659 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4660 db_nslcmop_update["detailed-status"] = "Done"
4661 nslcmop_operation_state = "COMPLETED"
4662
4663 if db_nsr:
4664 self._write_ns_status(
4665 nsr_id=nsr_id,
4666 ns_state=ns_state,
4667 current_operation="IDLE",
4668 current_operation_id=None,
4669 error_description=error_description_nsr,
4670 error_detail=error_detail,
4671 other_update=db_nsr_update,
4672 )
4673 self._write_op_status(
4674 op_id=nslcmop_id,
4675 stage="",
4676 error_message=error_description_nslcmop,
4677 operation_state=nslcmop_operation_state,
4678 other_update=db_nslcmop_update,
4679 )
4680 if ns_state == "NOT_INSTANTIATED":
4681 try:
4682 self.db.set_list(
4683 "vnfrs",
4684 {"nsr-id-ref": nsr_id},
4685 {"_admin.nsState": "NOT_INSTANTIATED"},
4686 )
4687 except DbException as e:
4688 self.logger.warn(
4689 logging_text
4690 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4691 nsr_id, e
4692 )
4693 )
4694 if operation_params:
4695 autoremove = operation_params.get("autoremove", False)
4696 if nslcmop_operation_state:
4697 try:
4698 await self.msg.aiowrite(
4699 "ns",
4700 "terminated",
4701 {
4702 "nsr_id": nsr_id,
4703 "nslcmop_id": nslcmop_id,
4704 "operationState": nslcmop_operation_state,
4705 "autoremove": autoremove,
4706 },
4707 )
4708 except Exception as e:
4709 self.logger.error(
4710 logging_text + "kafka_write notification Exception {}".format(e)
4711 )
4712 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4713 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4714
4715 self.logger.debug(logging_text + "Exit")
4716 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4717
4718 async def _wait_for_tasks(
4719 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4720 ):
4721 time_start = time()
4722 error_detail_list = []
4723 error_list = []
4724 pending_tasks = list(created_tasks_info.keys())
4725 num_tasks = len(pending_tasks)
4726 num_done = 0
4727 stage[1] = "{}/{}.".format(num_done, num_tasks)
4728 self._write_op_status(nslcmop_id, stage)
4729 while pending_tasks:
4730 new_error = None
4731 _timeout = timeout + time_start - time()
4732 done, pending_tasks = await asyncio.wait(
4733 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4734 )
4735 num_done += len(done)
4736 if not done: # Timeout
4737 for task in pending_tasks:
4738 new_error = created_tasks_info[task] + ": Timeout"
4739 error_detail_list.append(new_error)
4740 error_list.append(new_error)
4741 break
4742 for task in done:
4743 if task.cancelled():
4744 exc = "Cancelled"
4745 else:
4746 exc = task.exception()
4747 if exc:
4748 if isinstance(exc, asyncio.TimeoutError):
4749 exc = "Timeout"
4750 new_error = created_tasks_info[task] + ": {}".format(exc)
4751 error_list.append(created_tasks_info[task])
4752 error_detail_list.append(new_error)
4753 if isinstance(
4754 exc,
4755 (
4756 str,
4757 DbException,
4758 N2VCException,
4759 ROclient.ROClientException,
4760 LcmException,
4761 K8sException,
4762 NgRoException,
4763 ),
4764 ):
4765 self.logger.error(logging_text + new_error)
4766 else:
4767 exc_traceback = "".join(
4768 traceback.format_exception(None, exc, exc.__traceback__)
4769 )
4770 self.logger.error(
4771 logging_text
4772 + created_tasks_info[task]
4773 + " "
4774 + exc_traceback
4775 )
4776 else:
4777 self.logger.debug(
4778 logging_text + created_tasks_info[task] + ": Done"
4779 )
4780 stage[1] = "{}/{}.".format(num_done, num_tasks)
4781 if new_error:
4782 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4783 if nsr_id: # update also nsr
4784 self.update_db_2(
4785 "nsrs",
4786 nsr_id,
4787 {
4788 "errorDescription": "Error at: " + ", ".join(error_list),
4789 "errorDetail": ". ".join(error_detail_list),
4790 },
4791 )
4792 self._write_op_status(nslcmop_id, stage)
4793 return error_detail_list
4794
4795 async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
4796 for task, name in created_tasks_info.items():
4797 self.logger.debug(logging_text + "Cancelling task: " + name)
4798 task.cancel()
4799
4800 @staticmethod
4801 def _map_primitive_params(primitive_desc, params, instantiation_params):
4802 """
4803 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4804 The default-value is used. If it is between < > it look for a value at instantiation_params
4805 :param primitive_desc: portion of VNFD/NSD that describes primitive
4806 :param params: Params provided by user
4807 :param instantiation_params: Instantiation params provided by user
4808 :return: a dictionary with the calculated params
4809 """
4810 calculated_params = {}
4811 for parameter in primitive_desc.get("parameter", ()):
4812 param_name = parameter["name"]
4813 if param_name in params:
4814 calculated_params[param_name] = params[param_name]
4815 elif "default-value" in parameter or "value" in parameter:
4816 if "value" in parameter:
4817 calculated_params[param_name] = parameter["value"]
4818 else:
4819 calculated_params[param_name] = parameter["default-value"]
4820 if (
4821 isinstance(calculated_params[param_name], str)
4822 and calculated_params[param_name].startswith("<")
4823 and calculated_params[param_name].endswith(">")
4824 ):
4825 if calculated_params[param_name][1:-1] in instantiation_params:
4826 calculated_params[param_name] = instantiation_params[
4827 calculated_params[param_name][1:-1]
4828 ]
4829 else:
4830 raise LcmException(
4831 "Parameter {} needed to execute primitive {} not provided".format(
4832 calculated_params[param_name], primitive_desc["name"]
4833 )
4834 )
4835 else:
4836 raise LcmException(
4837 "Parameter {} needed to execute primitive {} not provided".format(
4838 param_name, primitive_desc["name"]
4839 )
4840 )
4841
4842 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4843 calculated_params[param_name] = yaml.safe_dump(
4844 calculated_params[param_name], default_flow_style=True, width=256
4845 )
4846 elif isinstance(calculated_params[param_name], str) and calculated_params[
4847 param_name
4848 ].startswith("!!yaml "):
4849 calculated_params[param_name] = calculated_params[param_name][7:]
4850 if parameter.get("data-type") == "INTEGER":
4851 try:
4852 calculated_params[param_name] = int(calculated_params[param_name])
4853 except ValueError: # error converting string to int
4854 raise LcmException(
4855 "Parameter {} of primitive {} must be integer".format(
4856 param_name, primitive_desc["name"]
4857 )
4858 )
4859 elif parameter.get("data-type") == "BOOLEAN":
4860 calculated_params[param_name] = not (
4861 (str(calculated_params[param_name])).lower() == "false"
4862 )
4863
4864 # add always ns_config_info if primitive name is config
4865 if primitive_desc["name"] == "config":
4866 if "ns_config_info" in instantiation_params:
4867 calculated_params["ns_config_info"] = instantiation_params[
4868 "ns_config_info"
4869 ]
4870 return calculated_params
4871
4872 def _look_for_deployed_vca(
4873 self,
4874 deployed_vca,
4875 member_vnf_index,
4876 vdu_id,
4877 vdu_count_index,
4878 kdu_name=None,
4879 ee_descriptor_id=None,
4880 ):
4881 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4882 for vca in deployed_vca:
4883 if not vca:
4884 continue
4885 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4886 continue
4887 if (
4888 vdu_count_index is not None
4889 and vdu_count_index != vca["vdu_count_index"]
4890 ):
4891 continue
4892 if kdu_name and kdu_name != vca["kdu_name"]:
4893 continue
4894 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4895 continue
4896 break
4897 else:
4898 # vca_deployed not found
4899 raise LcmException(
4900 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4901 " is not deployed".format(
4902 member_vnf_index,
4903 vdu_id,
4904 vdu_count_index,
4905 kdu_name,
4906 ee_descriptor_id,
4907 )
4908 )
4909 # get ee_id
4910 ee_id = vca.get("ee_id")
4911 vca_type = vca.get(
4912 "type", "lxc_proxy_charm"
4913 ) # default value for backward compatibility - proxy charm
4914 if not ee_id:
4915 raise LcmException(
4916 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4917 "execution environment".format(
4918 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4919 )
4920 )
4921 return ee_id, vca_type
4922
4923 async def _ns_execute_primitive(
4924 self,
4925 ee_id,
4926 primitive,
4927 primitive_params,
4928 retries=0,
4929 retries_interval=30,
4930 timeout=None,
4931 vca_type=None,
4932 db_dict=None,
4933 vca_id: str = None,
4934 ) -> (str, str):
4935 try:
4936 if primitive == "config":
4937 primitive_params = {"params": primitive_params}
4938
4939 vca_type = vca_type or "lxc_proxy_charm"
4940
4941 while retries >= 0:
4942 try:
4943 output = await asyncio.wait_for(
4944 self.vca_map[vca_type].exec_primitive(
4945 ee_id=ee_id,
4946 primitive_name=primitive,
4947 params_dict=primitive_params,
4948 progress_timeout=self.timeout.progress_primitive,
4949 total_timeout=self.timeout.primitive,
4950 db_dict=db_dict,
4951 vca_id=vca_id,
4952 vca_type=vca_type,
4953 ),
4954 timeout=timeout or self.timeout.primitive,
4955 )
4956 # execution was OK
4957 break
4958 except asyncio.CancelledError:
4959 raise
4960 except Exception as e:
4961 retries -= 1
4962 if retries >= 0:
4963 self.logger.debug(
4964 "Error executing action {} on {} -> {}".format(
4965 primitive, ee_id, e
4966 )
4967 )
4968 # wait and retry
4969 await asyncio.sleep(retries_interval)
4970 else:
4971 if isinstance(e, asyncio.TimeoutError):
4972 e = N2VCException(
4973 message="Timed out waiting for action to complete"
4974 )
4975 return "FAILED", getattr(e, "message", repr(e))
4976
4977 return "COMPLETED", output
4978
4979 except (LcmException, asyncio.CancelledError):
4980 raise
4981 except Exception as e:
4982 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4983
4984 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4985 """
4986 Updating the vca_status with latest juju information in nsrs record
4987 :param: nsr_id: Id of the nsr
4988 :param: nslcmop_id: Id of the nslcmop
4989 :return: None
4990 """
4991
4992 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4993 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4994 vca_id = self.get_vca_id({}, db_nsr)
4995 if db_nsr["_admin"]["deployed"]["K8s"]:
4996 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4997 cluster_uuid, kdu_instance, cluster_type = (
4998 k8s["k8scluster-uuid"],
4999 k8s["kdu-instance"],
5000 k8s["k8scluster-type"],
5001 )
5002 await self._on_update_k8s_db(
5003 cluster_uuid=cluster_uuid,
5004 kdu_instance=kdu_instance,
5005 filter={"_id": nsr_id},
5006 vca_id=vca_id,
5007 cluster_type=cluster_type,
5008 )
5009 else:
5010 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5011 table, filter = "nsrs", {"_id": nsr_id}
5012 path = "_admin.deployed.VCA.{}.".format(vca_index)
5013 await self._on_update_n2vc_db(table, filter, path, {})
5014
5015 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5016 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5017
5018 async def action(self, nsr_id, nslcmop_id):
5019 # Try to lock HA task here
5020 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5021 if not task_is_locked_by_me:
5022 return
5023
5024 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5025 self.logger.debug(logging_text + "Enter")
5026 # get all needed from database
5027 db_nsr = None
5028 db_nslcmop = None
5029 db_nsr_update = {}
5030 db_nslcmop_update = {}
5031 nslcmop_operation_state = None
5032 error_description_nslcmop = None
5033 exc = None
5034 step = ""
5035 try:
5036 # wait for any previous tasks in process
5037 step = "Waiting for previous operations to terminate"
5038 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5039
5040 self._write_ns_status(
5041 nsr_id=nsr_id,
5042 ns_state=None,
5043 current_operation="RUNNING ACTION",
5044 current_operation_id=nslcmop_id,
5045 )
5046
5047 step = "Getting information from database"
5048 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5049 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5050 if db_nslcmop["operationParams"].get("primitive_params"):
5051 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5052 db_nslcmop["operationParams"]["primitive_params"]
5053 )
5054
5055 nsr_deployed = db_nsr["_admin"].get("deployed")
5056 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5057 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5058 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5059 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5060 primitive = db_nslcmop["operationParams"]["primitive"]
5061 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5062 timeout_ns_action = db_nslcmop["operationParams"].get(
5063 "timeout_ns_action", self.timeout.primitive
5064 )
5065
5066 if vnf_index:
5067 step = "Getting vnfr from database"
5068 db_vnfr = self.db.get_one(
5069 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5070 )
5071 if db_vnfr.get("kdur"):
5072 kdur_list = []
5073 for kdur in db_vnfr["kdur"]:
5074 if kdur.get("additionalParams"):
5075 kdur["additionalParams"] = json.loads(
5076 kdur["additionalParams"]
5077 )
5078 kdur_list.append(kdur)
5079 db_vnfr["kdur"] = kdur_list
5080 step = "Getting vnfd from database"
5081 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5082
5083 # Sync filesystem before running a primitive
5084 self.fs.sync(db_vnfr["vnfd-id"])
5085 else:
5086 step = "Getting nsd from database"
5087 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5088
5089 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5090 # for backward compatibility
5091 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5092 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5093 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5094 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5095
5096 # look for primitive
5097 config_primitive_desc = descriptor_configuration = None
5098 if vdu_id:
5099 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5100 elif kdu_name:
5101 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5102 elif vnf_index:
5103 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5104 else:
5105 descriptor_configuration = db_nsd.get("ns-configuration")
5106
5107 if descriptor_configuration and descriptor_configuration.get(
5108 "config-primitive"
5109 ):
5110 for config_primitive in descriptor_configuration["config-primitive"]:
5111 if config_primitive["name"] == primitive:
5112 config_primitive_desc = config_primitive
5113 break
5114
5115 if not config_primitive_desc:
5116 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5117 raise LcmException(
5118 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5119 primitive
5120 )
5121 )
5122 primitive_name = primitive
5123 ee_descriptor_id = None
5124 else:
5125 primitive_name = config_primitive_desc.get(
5126 "execution-environment-primitive", primitive
5127 )
5128 ee_descriptor_id = config_primitive_desc.get(
5129 "execution-environment-ref"
5130 )
5131
5132 if vnf_index:
5133 if vdu_id:
5134 vdur = next(
5135 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5136 )
5137 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5138 elif kdu_name:
5139 kdur = next(
5140 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5141 )
5142 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5143 else:
5144 desc_params = parse_yaml_strings(
5145 db_vnfr.get("additionalParamsForVnf")
5146 )
5147 else:
5148 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5149 if kdu_name and get_configuration(db_vnfd, kdu_name):
5150 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5151 actions = set()
5152 for primitive in kdu_configuration.get("initial-config-primitive", []):
5153 actions.add(primitive["name"])
5154 for primitive in kdu_configuration.get("config-primitive", []):
5155 actions.add(primitive["name"])
5156 kdu = find_in_list(
5157 nsr_deployed["K8s"],
5158 lambda kdu: kdu_name == kdu["kdu-name"]
5159 and kdu["member-vnf-index"] == vnf_index,
5160 )
5161 kdu_action = (
5162 True
5163 if primitive_name in actions
5164 and kdu["k8scluster-type"] != "helm-chart-v3"
5165 else False
5166 )
5167
5168 # TODO check if ns is in a proper status
5169 if kdu_name and (
5170 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5171 ):
5172 # kdur and desc_params already set from before
5173 if primitive_params:
5174 desc_params.update(primitive_params)
5175 # TODO Check if we will need something at vnf level
5176 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5177 if (
5178 kdu_name == kdu["kdu-name"]
5179 and kdu["member-vnf-index"] == vnf_index
5180 ):
5181 break
5182 else:
5183 raise LcmException(
5184 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5185 )
5186
5187 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5188 msg = "unknown k8scluster-type '{}'".format(
5189 kdu.get("k8scluster-type")
5190 )
5191 raise LcmException(msg)
5192
5193 db_dict = {
5194 "collection": "nsrs",
5195 "filter": {"_id": nsr_id},
5196 "path": "_admin.deployed.K8s.{}".format(index),
5197 }
5198 self.logger.debug(
5199 logging_text
5200 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5201 )
5202 step = "Executing kdu {}".format(primitive_name)
5203 if primitive_name == "upgrade":
5204 if desc_params.get("kdu_model"):
5205 kdu_model = desc_params.get("kdu_model")
5206 del desc_params["kdu_model"]
5207 else:
5208 kdu_model = kdu.get("kdu-model")
5209 if kdu_model.count("/") < 2: # helm chart is not embedded
5210 parts = kdu_model.split(sep=":")
5211 if len(parts) == 2:
5212 kdu_model = parts[0]
5213 if desc_params.get("kdu_atomic_upgrade"):
5214 atomic_upgrade = desc_params.get(
5215 "kdu_atomic_upgrade"
5216 ).lower() in ("yes", "true", "1")
5217 del desc_params["kdu_atomic_upgrade"]
5218 else:
5219 atomic_upgrade = True
5220
5221 detailed_status = await asyncio.wait_for(
5222 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5223 cluster_uuid=kdu.get("k8scluster-uuid"),
5224 kdu_instance=kdu.get("kdu-instance"),
5225 atomic=atomic_upgrade,
5226 kdu_model=kdu_model,
5227 params=desc_params,
5228 db_dict=db_dict,
5229 timeout=timeout_ns_action,
5230 ),
5231 timeout=timeout_ns_action + 10,
5232 )
5233 self.logger.debug(
5234 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5235 )
5236 elif primitive_name == "rollback":
5237 detailed_status = await asyncio.wait_for(
5238 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5239 cluster_uuid=kdu.get("k8scluster-uuid"),
5240 kdu_instance=kdu.get("kdu-instance"),
5241 db_dict=db_dict,
5242 ),
5243 timeout=timeout_ns_action,
5244 )
5245 elif primitive_name == "status":
5246 detailed_status = await asyncio.wait_for(
5247 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5248 cluster_uuid=kdu.get("k8scluster-uuid"),
5249 kdu_instance=kdu.get("kdu-instance"),
5250 vca_id=vca_id,
5251 ),
5252 timeout=timeout_ns_action,
5253 )
5254 else:
5255 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5256 kdu["kdu-name"], nsr_id
5257 )
5258 params = self._map_primitive_params(
5259 config_primitive_desc, primitive_params, desc_params
5260 )
5261
5262 detailed_status = await asyncio.wait_for(
5263 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5264 cluster_uuid=kdu.get("k8scluster-uuid"),
5265 kdu_instance=kdu_instance,
5266 primitive_name=primitive_name,
5267 params=params,
5268 db_dict=db_dict,
5269 timeout=timeout_ns_action,
5270 vca_id=vca_id,
5271 ),
5272 timeout=timeout_ns_action,
5273 )
5274
5275 if detailed_status:
5276 nslcmop_operation_state = "COMPLETED"
5277 else:
5278 detailed_status = ""
5279 nslcmop_operation_state = "FAILED"
5280 else:
5281 ee_id, vca_type = self._look_for_deployed_vca(
5282 nsr_deployed["VCA"],
5283 member_vnf_index=vnf_index,
5284 vdu_id=vdu_id,
5285 vdu_count_index=vdu_count_index,
5286 ee_descriptor_id=ee_descriptor_id,
5287 )
5288 for vca_index, vca_deployed in enumerate(
5289 db_nsr["_admin"]["deployed"]["VCA"]
5290 ):
5291 if vca_deployed.get("member-vnf-index") == vnf_index:
5292 db_dict = {
5293 "collection": "nsrs",
5294 "filter": {"_id": nsr_id},
5295 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5296 }
5297 break
5298 (
5299 nslcmop_operation_state,
5300 detailed_status,
5301 ) = await self._ns_execute_primitive(
5302 ee_id,
5303 primitive=primitive_name,
5304 primitive_params=self._map_primitive_params(
5305 config_primitive_desc, primitive_params, desc_params
5306 ),
5307 timeout=timeout_ns_action,
5308 vca_type=vca_type,
5309 db_dict=db_dict,
5310 vca_id=vca_id,
5311 )
5312
5313 db_nslcmop_update["detailed-status"] = detailed_status
5314 error_description_nslcmop = (
5315 detailed_status if nslcmop_operation_state == "FAILED" else ""
5316 )
5317 self.logger.debug(
5318 logging_text
5319 + "Done with result {} {}".format(
5320 nslcmop_operation_state, detailed_status
5321 )
5322 )
5323 return # database update is called inside finally
5324
5325 except (DbException, LcmException, N2VCException, K8sException) as e:
5326 self.logger.error(logging_text + "Exit Exception {}".format(e))
5327 exc = e
5328 except asyncio.CancelledError:
5329 self.logger.error(
5330 logging_text + "Cancelled Exception while '{}'".format(step)
5331 )
5332 exc = "Operation was cancelled"
5333 except asyncio.TimeoutError:
5334 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5335 exc = "Timeout"
5336 except Exception as e:
5337 exc = traceback.format_exc()
5338 self.logger.critical(
5339 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5340 exc_info=True,
5341 )
5342 finally:
5343 if exc:
5344 db_nslcmop_update[
5345 "detailed-status"
5346 ] = (
5347 detailed_status
5348 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5349 nslcmop_operation_state = "FAILED"
5350 if db_nsr:
5351 self._write_ns_status(
5352 nsr_id=nsr_id,
5353 ns_state=db_nsr[
5354 "nsState"
5355 ], # TODO check if degraded. For the moment use previous status
5356 current_operation="IDLE",
5357 current_operation_id=None,
5358 # error_description=error_description_nsr,
5359 # error_detail=error_detail,
5360 other_update=db_nsr_update,
5361 )
5362
5363 self._write_op_status(
5364 op_id=nslcmop_id,
5365 stage="",
5366 error_message=error_description_nslcmop,
5367 operation_state=nslcmop_operation_state,
5368 other_update=db_nslcmop_update,
5369 )
5370
5371 if nslcmop_operation_state:
5372 try:
5373 await self.msg.aiowrite(
5374 "ns",
5375 "actioned",
5376 {
5377 "nsr_id": nsr_id,
5378 "nslcmop_id": nslcmop_id,
5379 "operationState": nslcmop_operation_state,
5380 },
5381 )
5382 except Exception as e:
5383 self.logger.error(
5384 logging_text + "kafka_write notification Exception {}".format(e)
5385 )
5386 self.logger.debug(logging_text + "Exit")
5387 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5388 return nslcmop_operation_state, detailed_status
5389
5390 async def terminate_vdus(
5391 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5392 ):
5393 """This method terminates VDUs
5394
5395 Args:
5396 db_vnfr: VNF instance record
5397 member_vnf_index: VNF index to identify the VDUs to be removed
5398 db_nsr: NS instance record
5399 update_db_nslcmops: Nslcmop update record
5400 """
5401 vca_scaling_info = []
5402 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5403 scaling_info["scaling_direction"] = "IN"
5404 scaling_info["vdu-delete"] = {}
5405 scaling_info["kdu-delete"] = {}
5406 db_vdur = db_vnfr.get("vdur")
5407 vdur_list = copy(db_vdur)
5408 count_index = 0
5409 for index, vdu in enumerate(vdur_list):
5410 vca_scaling_info.append(
5411 {
5412 "osm_vdu_id": vdu["vdu-id-ref"],
5413 "member-vnf-index": member_vnf_index,
5414 "type": "delete",
5415 "vdu_index": count_index,
5416 }
5417 )
5418 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5419 scaling_info["vdu"].append(
5420 {
5421 "name": vdu.get("name") or vdu.get("vdu-name"),
5422 "vdu_id": vdu["vdu-id-ref"],
5423 "interface": [],
5424 }
5425 )
5426 for interface in vdu["interfaces"]:
5427 scaling_info["vdu"][index]["interface"].append(
5428 {
5429 "name": interface["name"],
5430 "ip_address": interface["ip-address"],
5431 "mac_address": interface.get("mac-address"),
5432 }
5433 )
5434 self.logger.info("NS update scaling info{}".format(scaling_info))
5435 stage[2] = "Terminating VDUs"
5436 if scaling_info.get("vdu-delete"):
5437 # scale_process = "RO"
5438 if self.ro_config.ng:
5439 await self._scale_ng_ro(
5440 logging_text,
5441 db_nsr,
5442 update_db_nslcmops,
5443 db_vnfr,
5444 scaling_info,
5445 stage,
5446 )
5447
5448 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5449 """This method is to Remove VNF instances from NS.
5450
5451 Args:
5452 nsr_id: NS instance id
5453 nslcmop_id: nslcmop id of update
5454 vnf_instance_id: id of the VNF instance to be removed
5455
5456 Returns:
5457 result: (str, str) COMPLETED/FAILED, details
5458 """
5459 try:
5460 db_nsr_update = {}
5461 logging_text = "Task ns={} update ".format(nsr_id)
5462 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5463 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5464 if check_vnfr_count > 1:
5465 stage = ["", "", ""]
5466 step = "Getting nslcmop from database"
5467 self.logger.debug(
5468 step + " after having waited for previous tasks to be completed"
5469 )
5470 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5471 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5472 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5473 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5474 """ db_vnfr = self.db.get_one(
5475 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5476
5477 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5478 await self.terminate_vdus(
5479 db_vnfr,
5480 member_vnf_index,
5481 db_nsr,
5482 update_db_nslcmops,
5483 stage,
5484 logging_text,
5485 )
5486
5487 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5488 constituent_vnfr.remove(db_vnfr.get("_id"))
5489 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5490 "constituent-vnfr-ref"
5491 )
5492 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5493 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5495 return "COMPLETED", "Done"
5496 else:
5497 step = "Terminate VNF Failed with"
5498 raise LcmException(
5499 "{} Cannot terminate the last VNF in this NS.".format(
5500 vnf_instance_id
5501 )
5502 )
5503 except (LcmException, asyncio.CancelledError):
5504 raise
5505 except Exception as e:
5506 self.logger.debug("Error removing VNF {}".format(e))
5507 return "FAILED", "Error removing VNF {}".format(e)
5508
5509 async def _ns_redeploy_vnf(
5510 self,
5511 nsr_id,
5512 nslcmop_id,
5513 db_vnfd,
5514 db_vnfr,
5515 db_nsr,
5516 ):
5517 """This method updates and redeploys VNF instances
5518
5519 Args:
5520 nsr_id: NS instance id
5521 nslcmop_id: nslcmop id
5522 db_vnfd: VNF descriptor
5523 db_vnfr: VNF instance record
5524 db_nsr: NS instance record
5525
5526 Returns:
5527 result: (str, str) COMPLETED/FAILED, details
5528 """
5529 try:
5530 count_index = 0
5531 stage = ["", "", ""]
5532 logging_text = "Task ns={} update ".format(nsr_id)
5533 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5534 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5535
5536 # Terminate old VNF resources
5537 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5538 await self.terminate_vdus(
5539 db_vnfr,
5540 member_vnf_index,
5541 db_nsr,
5542 update_db_nslcmops,
5543 stage,
5544 logging_text,
5545 )
5546
5547 # old_vnfd_id = db_vnfr["vnfd-id"]
5548 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5549 new_db_vnfd = db_vnfd
5550 # new_vnfd_ref = new_db_vnfd["id"]
5551 # new_vnfd_id = vnfd_id
5552
5553 # Create VDUR
5554 new_vnfr_cp = []
5555 for cp in new_db_vnfd.get("ext-cpd", ()):
5556 vnf_cp = {
5557 "name": cp.get("id"),
5558 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5559 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5560 "id": cp.get("id"),
5561 }
5562 new_vnfr_cp.append(vnf_cp)
5563 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5564 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5565 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5566 new_vnfr_update = {
5567 "revision": latest_vnfd_revision,
5568 "connection-point": new_vnfr_cp,
5569 "vdur": new_vdur,
5570 "ip-address": "",
5571 }
5572 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5573 updated_db_vnfr = self.db.get_one(
5574 "vnfrs",
5575 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5576 )
5577
5578 # Instantiate new VNF resources
5579 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5580 vca_scaling_info = []
5581 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5582 scaling_info["scaling_direction"] = "OUT"
5583 scaling_info["vdu-create"] = {}
5584 scaling_info["kdu-create"] = {}
5585 vdud_instantiate_list = db_vnfd["vdu"]
5586 for index, vdud in enumerate(vdud_instantiate_list):
5587 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5588 if cloud_init_text:
5589 additional_params = (
5590 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5591 or {}
5592 )
5593 cloud_init_list = []
5594 if cloud_init_text:
5595 # TODO Information of its own ip is not available because db_vnfr is not updated.
5596 additional_params["OSM"] = get_osm_params(
5597 updated_db_vnfr, vdud["id"], 1
5598 )
5599 cloud_init_list.append(
5600 self._parse_cloud_init(
5601 cloud_init_text,
5602 additional_params,
5603 db_vnfd["id"],
5604 vdud["id"],
5605 )
5606 )
5607 vca_scaling_info.append(
5608 {
5609 "osm_vdu_id": vdud["id"],
5610 "member-vnf-index": member_vnf_index,
5611 "type": "create",
5612 "vdu_index": count_index,
5613 }
5614 )
5615 scaling_info["vdu-create"][vdud["id"]] = count_index
5616 if self.ro_config.ng:
5617 self.logger.debug(
5618 "New Resources to be deployed: {}".format(scaling_info)
5619 )
5620 await self._scale_ng_ro(
5621 logging_text,
5622 db_nsr,
5623 update_db_nslcmops,
5624 updated_db_vnfr,
5625 scaling_info,
5626 stage,
5627 )
5628 return "COMPLETED", "Done"
5629 except (LcmException, asyncio.CancelledError):
5630 raise
5631 except Exception as e:
5632 self.logger.debug("Error updating VNF {}".format(e))
5633 return "FAILED", "Error updating VNF {}".format(e)
5634
5635 async def _ns_charm_upgrade(
5636 self,
5637 ee_id,
5638 charm_id,
5639 charm_type,
5640 path,
5641 timeout: float = None,
5642 ) -> (str, str):
5643 """This method upgrade charms in VNF instances
5644
5645 Args:
5646 ee_id: Execution environment id
5647 path: Local path to the charm
5648 charm_id: charm-id
5649 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5650 timeout: (Float) Timeout for the ns update operation
5651
5652 Returns:
5653 result: (str, str) COMPLETED/FAILED, details
5654 """
5655 try:
5656 charm_type = charm_type or "lxc_proxy_charm"
5657 output = await self.vca_map[charm_type].upgrade_charm(
5658 ee_id=ee_id,
5659 path=path,
5660 charm_id=charm_id,
5661 charm_type=charm_type,
5662 timeout=timeout or self.timeout.ns_update,
5663 )
5664
5665 if output:
5666 return "COMPLETED", output
5667
5668 except (LcmException, asyncio.CancelledError):
5669 raise
5670
5671 except Exception as e:
5672 self.logger.debug("Error upgrading charm {}".format(path))
5673
5674 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5675
5676 async def update(self, nsr_id, nslcmop_id):
5677 """Update NS according to different update types
5678
5679 This method performs upgrade of VNF instances then updates the revision
5680 number in VNF record
5681
5682 Args:
5683 nsr_id: Network service will be updated
5684 nslcmop_id: ns lcm operation id
5685
5686 Returns:
5687 It may raise DbException, LcmException, N2VCException, K8sException
5688
5689 """
5690 # Try to lock HA task here
5691 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5692 if not task_is_locked_by_me:
5693 return
5694
5695 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5696 self.logger.debug(logging_text + "Enter")
5697
5698 # Set the required variables to be filled up later
5699 db_nsr = None
5700 db_nslcmop_update = {}
5701 vnfr_update = {}
5702 nslcmop_operation_state = None
5703 db_nsr_update = {}
5704 error_description_nslcmop = ""
5705 exc = None
5706 change_type = "updated"
5707 detailed_status = ""
5708 member_vnf_index = None
5709
5710 try:
5711 # wait for any previous tasks in process
5712 step = "Waiting for previous operations to terminate"
5713 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5714 self._write_ns_status(
5715 nsr_id=nsr_id,
5716 ns_state=None,
5717 current_operation="UPDATING",
5718 current_operation_id=nslcmop_id,
5719 )
5720
5721 step = "Getting nslcmop from database"
5722 db_nslcmop = self.db.get_one(
5723 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5724 )
5725 update_type = db_nslcmop["operationParams"]["updateType"]
5726
5727 step = "Getting nsr from database"
5728 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5729 old_operational_status = db_nsr["operational-status"]
5730 db_nsr_update["operational-status"] = "updating"
5731 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5732 nsr_deployed = db_nsr["_admin"].get("deployed")
5733
5734 if update_type == "CHANGE_VNFPKG":
5735 # Get the input parameters given through update request
5736 vnf_instance_id = db_nslcmop["operationParams"][
5737 "changeVnfPackageData"
5738 ].get("vnfInstanceId")
5739
5740 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5741 "vnfdId"
5742 )
5743 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5744
5745 step = "Getting vnfr from database"
5746 db_vnfr = self.db.get_one(
5747 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5748 )
5749
5750 step = "Getting vnfds from database"
5751 # Latest VNFD
5752 latest_vnfd = self.db.get_one(
5753 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5754 )
5755 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5756
5757 # Current VNFD
5758 current_vnf_revision = db_vnfr.get("revision", 1)
5759 current_vnfd = self.db.get_one(
5760 "vnfds_revisions",
5761 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5762 fail_on_empty=False,
5763 )
5764 # Charm artifact paths will be filled up later
5765 (
5766 current_charm_artifact_path,
5767 target_charm_artifact_path,
5768 charm_artifact_paths,
5769 helm_artifacts,
5770 ) = ([], [], [], [])
5771
5772 step = "Checking if revision has changed in VNFD"
5773 if current_vnf_revision != latest_vnfd_revision:
5774 change_type = "policy_updated"
5775
5776 # There is new revision of VNFD, update operation is required
5777 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5778 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5779
5780 step = "Removing the VNFD packages if they exist in the local path"
5781 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5782 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5783
5784 step = "Get the VNFD packages from FSMongo"
5785 self.fs.sync(from_path=latest_vnfd_path)
5786 self.fs.sync(from_path=current_vnfd_path)
5787
5788 step = (
5789 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5790 )
5791 current_base_folder = current_vnfd["_admin"]["storage"]
5792 latest_base_folder = latest_vnfd["_admin"]["storage"]
5793
5794 for vca_index, vca_deployed in enumerate(
5795 get_iterable(nsr_deployed, "VCA")
5796 ):
5797 vnf_index = db_vnfr.get("member-vnf-index-ref")
5798
5799 # Getting charm-id and charm-type
5800 if vca_deployed.get("member-vnf-index") == vnf_index:
5801 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5802 vca_type = vca_deployed.get("type")
5803 vdu_count_index = vca_deployed.get("vdu_count_index")
5804
5805 # Getting ee-id
5806 ee_id = vca_deployed.get("ee_id")
5807
5808 step = "Getting descriptor config"
5809 if current_vnfd.get("kdu"):
5810 search_key = "kdu_name"
5811 else:
5812 search_key = "vnfd_id"
5813
5814 entity_id = vca_deployed.get(search_key)
5815
5816 descriptor_config = get_configuration(
5817 current_vnfd, entity_id
5818 )
5819
5820 if "execution-environment-list" in descriptor_config:
5821 ee_list = descriptor_config.get(
5822 "execution-environment-list", []
5823 )
5824 else:
5825 ee_list = []
5826
5827 # There could be several charm used in the same VNF
5828 for ee_item in ee_list:
5829 if ee_item.get("juju"):
5830 step = "Getting charm name"
5831 charm_name = ee_item["juju"].get("charm")
5832
5833 step = "Setting Charm artifact paths"
5834 current_charm_artifact_path.append(
5835 get_charm_artifact_path(
5836 current_base_folder,
5837 charm_name,
5838 vca_type,
5839 current_vnf_revision,
5840 )
5841 )
5842 target_charm_artifact_path.append(
5843 get_charm_artifact_path(
5844 latest_base_folder,
5845 charm_name,
5846 vca_type,
5847 latest_vnfd_revision,
5848 )
5849 )
5850 elif ee_item.get("helm-chart"):
5851 # add chart to list and all parameters
5852 step = "Getting helm chart name"
5853 chart_name = ee_item.get("helm-chart")
5854 vca_type = "helm-v3"
5855 step = "Setting Helm chart artifact paths"
5856
5857 helm_artifacts.append(
5858 {
5859 "current_artifact_path": get_charm_artifact_path(
5860 current_base_folder,
5861 chart_name,
5862 vca_type,
5863 current_vnf_revision,
5864 ),
5865 "target_artifact_path": get_charm_artifact_path(
5866 latest_base_folder,
5867 chart_name,
5868 vca_type,
5869 latest_vnfd_revision,
5870 ),
5871 "ee_id": ee_id,
5872 "vca_index": vca_index,
5873 "vdu_index": vdu_count_index,
5874 }
5875 )
5876
5877 charm_artifact_paths = zip(
5878 current_charm_artifact_path, target_charm_artifact_path
5879 )
5880
5881 step = "Checking if software version has changed in VNFD"
5882 if find_software_version(current_vnfd) != find_software_version(
5883 latest_vnfd
5884 ):
5885 step = "Checking if existing VNF has charm"
5886 for current_charm_path, target_charm_path in list(
5887 charm_artifact_paths
5888 ):
5889 if current_charm_path:
5890 raise LcmException(
5891 "Software version change is not supported as VNF instance {} has charm.".format(
5892 vnf_instance_id
5893 )
5894 )
5895
5896 # There is no change in the charm package, then redeploy the VNF
5897 # based on new descriptor
5898 step = "Redeploying VNF"
5899 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5900 (result, detailed_status) = await self._ns_redeploy_vnf(
5901 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5902 )
5903 if result == "FAILED":
5904 nslcmop_operation_state = result
5905 error_description_nslcmop = detailed_status
5906 db_nslcmop_update["detailed-status"] = detailed_status
5907 self.logger.debug(
5908 logging_text
5909 + " step {} Done with result {} {}".format(
5910 step, nslcmop_operation_state, detailed_status
5911 )
5912 )
5913
5914 else:
5915 step = "Checking if any charm package has changed or not"
5916 for current_charm_path, target_charm_path in list(
5917 charm_artifact_paths
5918 ):
5919 if (
5920 current_charm_path
5921 and target_charm_path
5922 and self.check_charm_hash_changed(
5923 current_charm_path, target_charm_path
5924 )
5925 ):
5926 step = "Checking whether VNF uses juju bundle"
5927 if check_juju_bundle_existence(current_vnfd):
5928 raise LcmException(
5929 "Charm upgrade is not supported for the instance which"
5930 " uses juju-bundle: {}".format(
5931 check_juju_bundle_existence(current_vnfd)
5932 )
5933 )
5934
5935 step = "Upgrading Charm"
5936 (
5937 result,
5938 detailed_status,
5939 ) = await self._ns_charm_upgrade(
5940 ee_id=ee_id,
5941 charm_id=vca_id,
5942 charm_type=vca_type,
5943 path=self.fs.path + target_charm_path,
5944 timeout=timeout_seconds,
5945 )
5946
5947 if result == "FAILED":
5948 nslcmop_operation_state = result
5949 error_description_nslcmop = detailed_status
5950
5951 db_nslcmop_update["detailed-status"] = detailed_status
5952 self.logger.debug(
5953 logging_text
5954 + " step {} Done with result {} {}".format(
5955 step, nslcmop_operation_state, detailed_status
5956 )
5957 )
5958
5959 step = "Updating policies"
5960 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5961 result = "COMPLETED"
5962 detailed_status = "Done"
5963 db_nslcmop_update["detailed-status"] = "Done"
5964
5965 # helm base EE
5966 for item in helm_artifacts:
5967 if not (
5968 item["current_artifact_path"]
5969 and item["target_artifact_path"]
5970 and self.check_charm_hash_changed(
5971 item["current_artifact_path"],
5972 item["target_artifact_path"],
5973 )
5974 ):
5975 continue
5976 db_update_entry = "_admin.deployed.VCA.{}.".format(
5977 item["vca_index"]
5978 )
5979 vnfr_id = db_vnfr["_id"]
5980 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5981 db_dict = {
5982 "collection": "nsrs",
5983 "filter": {"_id": nsr_id},
5984 "path": db_update_entry,
5985 }
5986 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5987 await self.vca_map[vca_type].upgrade_execution_environment(
5988 namespace=namespace,
5989 helm_id=helm_id,
5990 db_dict=db_dict,
5991 config=osm_config,
5992 artifact_path=item["target_artifact_path"],
5993 vca_type=vca_type,
5994 )
5995 vnf_id = db_vnfr.get("vnfd-ref")
5996 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5997 self.logger.debug("get ssh key block")
5998 rw_mgmt_ip = None
5999 if deep_get(
6000 config_descriptor,
6001 ("config-access", "ssh-access", "required"),
6002 ):
6003 # Needed to inject a ssh key
6004 user = deep_get(
6005 config_descriptor,
6006 ("config-access", "ssh-access", "default-user"),
6007 )
6008 step = (
6009 "Install configuration Software, getting public ssh key"
6010 )
6011 pub_key = await self.vca_map[
6012 vca_type
6013 ].get_ee_ssh_public__key(
6014 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6015 )
6016
6017 step = (
6018 "Insert public key into VM user={} ssh_key={}".format(
6019 user, pub_key
6020 )
6021 )
6022 self.logger.debug(logging_text + step)
6023
6024 # wait for RO (ip-address) Insert pub_key into VM
6025 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6026 logging_text,
6027 nsr_id,
6028 vnfr_id,
6029 None,
6030 item["vdu_index"],
6031 user=user,
6032 pub_key=pub_key,
6033 )
6034
6035 initial_config_primitive_list = config_descriptor.get(
6036 "initial-config-primitive"
6037 )
6038 config_primitive = next(
6039 (
6040 p
6041 for p in initial_config_primitive_list
6042 if p["name"] == "config"
6043 ),
6044 None,
6045 )
6046 if not config_primitive:
6047 continue
6048
6049 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6050 if rw_mgmt_ip:
6051 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6052 if db_vnfr.get("additionalParamsForVnf"):
6053 deploy_params.update(
6054 parse_yaml_strings(
6055 db_vnfr["additionalParamsForVnf"].copy()
6056 )
6057 )
6058 primitive_params_ = self._map_primitive_params(
6059 config_primitive, {}, deploy_params
6060 )
6061
6062 step = "execute primitive '{}' params '{}'".format(
6063 config_primitive["name"], primitive_params_
6064 )
6065 self.logger.debug(logging_text + step)
6066 await self.vca_map[vca_type].exec_primitive(
6067 ee_id=ee_id,
6068 primitive_name=config_primitive["name"],
6069 params_dict=primitive_params_,
6070 db_dict=db_dict,
6071 vca_id=vca_id,
6072 vca_type=vca_type,
6073 )
6074
6075 step = "Updating policies"
6076 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6077 detailed_status = "Done"
6078 db_nslcmop_update["detailed-status"] = "Done"
6079
6080 # If nslcmop_operation_state is None, so any operation is not failed.
6081 if not nslcmop_operation_state:
6082 nslcmop_operation_state = "COMPLETED"
6083
6084 # If update CHANGE_VNFPKG nslcmop_operation is successful
6085 # vnf revision need to be updated
6086 vnfr_update["revision"] = latest_vnfd_revision
6087 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6088
6089 self.logger.debug(
6090 logging_text
6091 + " task Done with result {} {}".format(
6092 nslcmop_operation_state, detailed_status
6093 )
6094 )
6095 elif update_type == "REMOVE_VNF":
6096 # This part is included in https://osm.etsi.org/gerrit/11876
6097 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6098 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6099 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6100 step = "Removing VNF"
6101 (result, detailed_status) = await self.remove_vnf(
6102 nsr_id, nslcmop_id, vnf_instance_id
6103 )
6104 if result == "FAILED":
6105 nslcmop_operation_state = result
6106 error_description_nslcmop = detailed_status
6107 db_nslcmop_update["detailed-status"] = detailed_status
6108 change_type = "vnf_terminated"
6109 if not nslcmop_operation_state:
6110 nslcmop_operation_state = "COMPLETED"
6111 self.logger.debug(
6112 logging_text
6113 + " task Done with result {} {}".format(
6114 nslcmop_operation_state, detailed_status
6115 )
6116 )
6117
6118 elif update_type == "OPERATE_VNF":
6119 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6120 "vnfInstanceId"
6121 ]
6122 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6123 "changeStateTo"
6124 ]
6125 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6126 "additionalParam"
6127 ]
6128 (result, detailed_status) = await self.rebuild_start_stop(
6129 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6130 )
6131 if result == "FAILED":
6132 nslcmop_operation_state = result
6133 error_description_nslcmop = detailed_status
6134 db_nslcmop_update["detailed-status"] = detailed_status
6135 if not nslcmop_operation_state:
6136 nslcmop_operation_state = "COMPLETED"
6137 self.logger.debug(
6138 logging_text
6139 + " task Done with result {} {}".format(
6140 nslcmop_operation_state, detailed_status
6141 )
6142 )
6143
6144 # If nslcmop_operation_state is None, so any operation is not failed.
6145 # All operations are executed in overall.
6146 if not nslcmop_operation_state:
6147 nslcmop_operation_state = "COMPLETED"
6148 db_nsr_update["operational-status"] = old_operational_status
6149
6150 except (DbException, LcmException, N2VCException, K8sException) as e:
6151 self.logger.error(logging_text + "Exit Exception {}".format(e))
6152 exc = e
6153 except asyncio.CancelledError:
6154 self.logger.error(
6155 logging_text + "Cancelled Exception while '{}'".format(step)
6156 )
6157 exc = "Operation was cancelled"
6158 except asyncio.TimeoutError:
6159 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6160 exc = "Timeout"
6161 except Exception as e:
6162 exc = traceback.format_exc()
6163 self.logger.critical(
6164 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6165 exc_info=True,
6166 )
6167 finally:
6168 if exc:
6169 db_nslcmop_update[
6170 "detailed-status"
6171 ] = (
6172 detailed_status
6173 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6174 nslcmop_operation_state = "FAILED"
6175 db_nsr_update["operational-status"] = old_operational_status
6176 if db_nsr:
6177 self._write_ns_status(
6178 nsr_id=nsr_id,
6179 ns_state=db_nsr["nsState"],
6180 current_operation="IDLE",
6181 current_operation_id=None,
6182 other_update=db_nsr_update,
6183 )
6184
6185 self._write_op_status(
6186 op_id=nslcmop_id,
6187 stage="",
6188 error_message=error_description_nslcmop,
6189 operation_state=nslcmop_operation_state,
6190 other_update=db_nslcmop_update,
6191 )
6192
6193 if nslcmop_operation_state:
6194 try:
6195 msg = {
6196 "nsr_id": nsr_id,
6197 "nslcmop_id": nslcmop_id,
6198 "operationState": nslcmop_operation_state,
6199 }
6200 if (
6201 change_type in ("vnf_terminated", "policy_updated")
6202 and member_vnf_index
6203 ):
6204 msg.update({"vnf_member_index": member_vnf_index})
6205 await self.msg.aiowrite("ns", change_type, msg)
6206 except Exception as e:
6207 self.logger.error(
6208 logging_text + "kafka_write notification Exception {}".format(e)
6209 )
6210 self.logger.debug(logging_text + "Exit")
6211 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6212 return nslcmop_operation_state, detailed_status
6213
6214 async def scale(self, nsr_id, nslcmop_id):
6215 # Try to lock HA task here
6216 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6217 if not task_is_locked_by_me:
6218 return
6219
6220 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6221 stage = ["", "", ""]
6222 tasks_dict_info = {}
6223 # ^ stage, step, VIM progress
6224 self.logger.debug(logging_text + "Enter")
6225 # get all needed from database
6226 db_nsr = None
6227 db_nslcmop_update = {}
6228 db_nsr_update = {}
6229 exc = None
6230 # in case of error, indicates what part of scale was failed to put nsr at error status
6231 scale_process = None
6232 old_operational_status = ""
6233 old_config_status = ""
6234 nsi_id = None
6235 try:
6236 # wait for any previous tasks in process
6237 step = "Waiting for previous operations to terminate"
6238 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6239 self._write_ns_status(
6240 nsr_id=nsr_id,
6241 ns_state=None,
6242 current_operation="SCALING",
6243 current_operation_id=nslcmop_id,
6244 )
6245
6246 step = "Getting nslcmop from database"
6247 self.logger.debug(
6248 step + " after having waited for previous tasks to be completed"
6249 )
6250 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6251
6252 step = "Getting nsr from database"
6253 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6254 old_operational_status = db_nsr["operational-status"]
6255 old_config_status = db_nsr["config-status"]
6256
6257 step = "Parsing scaling parameters"
6258 db_nsr_update["operational-status"] = "scaling"
6259 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6260 nsr_deployed = db_nsr["_admin"].get("deployed")
6261
6262 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6263 "scaleByStepData"
6264 ]["member-vnf-index"]
6265 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6266 "scaleByStepData"
6267 ]["scaling-group-descriptor"]
6268 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6269 # for backward compatibility
6270 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6271 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6272 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6273 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6274
6275 step = "Getting vnfr from database"
6276 db_vnfr = self.db.get_one(
6277 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6278 )
6279
6280 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6281
6282 step = "Getting vnfd from database"
6283 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6284
6285 base_folder = db_vnfd["_admin"]["storage"]
6286
6287 step = "Getting scaling-group-descriptor"
6288 scaling_descriptor = find_in_list(
6289 get_scaling_aspect(db_vnfd),
6290 lambda scale_desc: scale_desc["name"] == scaling_group,
6291 )
6292 if not scaling_descriptor:
6293 raise LcmException(
6294 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6295 "at vnfd:scaling-group-descriptor".format(scaling_group)
6296 )
6297
6298 step = "Sending scale order to VIM"
6299 # TODO check if ns is in a proper status
6300 nb_scale_op = 0
6301 if not db_nsr["_admin"].get("scaling-group"):
6302 self.update_db_2(
6303 "nsrs",
6304 nsr_id,
6305 {
6306 "_admin.scaling-group": [
6307 {"name": scaling_group, "nb-scale-op": 0}
6308 ]
6309 },
6310 )
6311 admin_scale_index = 0
6312 else:
6313 for admin_scale_index, admin_scale_info in enumerate(
6314 db_nsr["_admin"]["scaling-group"]
6315 ):
6316 if admin_scale_info["name"] == scaling_group:
6317 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6318 break
6319 else: # not found, set index one plus last element and add new entry with the name
6320 admin_scale_index += 1
6321 db_nsr_update[
6322 "_admin.scaling-group.{}.name".format(admin_scale_index)
6323 ] = scaling_group
6324
6325 vca_scaling_info = []
6326 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6327 if scaling_type == "SCALE_OUT":
6328 if "aspect-delta-details" not in scaling_descriptor:
6329 raise LcmException(
6330 "Aspect delta details not fount in scaling descriptor {}".format(
6331 scaling_descriptor["name"]
6332 )
6333 )
6334 # count if max-instance-count is reached
6335 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6336
6337 scaling_info["scaling_direction"] = "OUT"
6338 scaling_info["vdu-create"] = {}
6339 scaling_info["kdu-create"] = {}
6340 for delta in deltas:
6341 for vdu_delta in delta.get("vdu-delta", {}):
6342 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6343 # vdu_index also provides the number of instance of the targeted vdu
6344 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6345 cloud_init_text = self._get_vdu_cloud_init_content(
6346 vdud, db_vnfd
6347 )
6348 if cloud_init_text:
6349 additional_params = (
6350 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6351 or {}
6352 )
6353 cloud_init_list = []
6354
6355 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6356 max_instance_count = 10
6357 if vdu_profile and "max-number-of-instances" in vdu_profile:
6358 max_instance_count = vdu_profile.get(
6359 "max-number-of-instances", 10
6360 )
6361
6362 default_instance_num = get_number_of_instances(
6363 db_vnfd, vdud["id"]
6364 )
6365 instances_number = vdu_delta.get("number-of-instances", 1)
6366 nb_scale_op += instances_number
6367
6368 new_instance_count = nb_scale_op + default_instance_num
6369 # Control if new count is over max and vdu count is less than max.
6370 # Then assign new instance count
6371 if new_instance_count > max_instance_count > vdu_count:
6372 instances_number = new_instance_count - max_instance_count
6373 else:
6374 instances_number = instances_number
6375
6376 if new_instance_count > max_instance_count:
6377 raise LcmException(
6378 "reached the limit of {} (max-instance-count) "
6379 "scaling-out operations for the "
6380 "scaling-group-descriptor '{}'".format(
6381 nb_scale_op, scaling_group
6382 )
6383 )
6384 for x in range(vdu_delta.get("number-of-instances", 1)):
6385 if cloud_init_text:
6386 # TODO Information of its own ip is not available because db_vnfr is not updated.
6387 additional_params["OSM"] = get_osm_params(
6388 db_vnfr, vdu_delta["id"], vdu_index + x
6389 )
6390 cloud_init_list.append(
6391 self._parse_cloud_init(
6392 cloud_init_text,
6393 additional_params,
6394 db_vnfd["id"],
6395 vdud["id"],
6396 )
6397 )
6398 vca_scaling_info.append(
6399 {
6400 "osm_vdu_id": vdu_delta["id"],
6401 "member-vnf-index": vnf_index,
6402 "type": "create",
6403 "vdu_index": vdu_index + x,
6404 }
6405 )
6406 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6407 for kdu_delta in delta.get("kdu-resource-delta", {}):
6408 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6409 kdu_name = kdu_profile["kdu-name"]
6410 resource_name = kdu_profile.get("resource-name", "")
6411
6412 # Might have different kdus in the same delta
6413 # Should have list for each kdu
6414 if not scaling_info["kdu-create"].get(kdu_name, None):
6415 scaling_info["kdu-create"][kdu_name] = []
6416
6417 kdur = get_kdur(db_vnfr, kdu_name)
6418 if kdur.get("helm-chart"):
6419 k8s_cluster_type = "helm-chart-v3"
6420 self.logger.debug("kdur: {}".format(kdur))
6421 elif kdur.get("juju-bundle"):
6422 k8s_cluster_type = "juju-bundle"
6423 else:
6424 raise LcmException(
6425 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6426 "juju-bundle. Maybe an old NBI version is running".format(
6427 db_vnfr["member-vnf-index-ref"], kdu_name
6428 )
6429 )
6430
6431 max_instance_count = 10
6432 if kdu_profile and "max-number-of-instances" in kdu_profile:
6433 max_instance_count = kdu_profile.get(
6434 "max-number-of-instances", 10
6435 )
6436
6437 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6438 deployed_kdu, _ = get_deployed_kdu(
6439 nsr_deployed, kdu_name, vnf_index
6440 )
6441 if deployed_kdu is None:
6442 raise LcmException(
6443 "KDU '{}' for vnf '{}' not deployed".format(
6444 kdu_name, vnf_index
6445 )
6446 )
6447 kdu_instance = deployed_kdu.get("kdu-instance")
6448 instance_num = await self.k8scluster_map[
6449 k8s_cluster_type
6450 ].get_scale_count(
6451 resource_name,
6452 kdu_instance,
6453 vca_id=vca_id,
6454 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6455 kdu_model=deployed_kdu.get("kdu-model"),
6456 )
6457 kdu_replica_count = instance_num + kdu_delta.get(
6458 "number-of-instances", 1
6459 )
6460
6461 # Control if new count is over max and instance_num is less than max.
6462 # Then assign max instance number to kdu replica count
6463 if kdu_replica_count > max_instance_count > instance_num:
6464 kdu_replica_count = max_instance_count
6465 if kdu_replica_count > max_instance_count:
6466 raise LcmException(
6467 "reached the limit of {} (max-instance-count) "
6468 "scaling-out operations for the "
6469 "scaling-group-descriptor '{}'".format(
6470 instance_num, scaling_group
6471 )
6472 )
6473
6474 for x in range(kdu_delta.get("number-of-instances", 1)):
6475 vca_scaling_info.append(
6476 {
6477 "osm_kdu_id": kdu_name,
6478 "member-vnf-index": vnf_index,
6479 "type": "create",
6480 "kdu_index": instance_num + x - 1,
6481 }
6482 )
6483 scaling_info["kdu-create"][kdu_name].append(
6484 {
6485 "member-vnf-index": vnf_index,
6486 "type": "create",
6487 "k8s-cluster-type": k8s_cluster_type,
6488 "resource-name": resource_name,
6489 "scale": kdu_replica_count,
6490 }
6491 )
6492 elif scaling_type == "SCALE_IN":
6493 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6494
6495 scaling_info["scaling_direction"] = "IN"
6496 scaling_info["vdu-delete"] = {}
6497 scaling_info["kdu-delete"] = {}
6498
6499 for delta in deltas:
6500 for vdu_delta in delta.get("vdu-delta", {}):
6501 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6502 min_instance_count = 0
6503 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6504 if vdu_profile and "min-number-of-instances" in vdu_profile:
6505 min_instance_count = vdu_profile["min-number-of-instances"]
6506
6507 default_instance_num = get_number_of_instances(
6508 db_vnfd, vdu_delta["id"]
6509 )
6510 instance_num = vdu_delta.get("number-of-instances", 1)
6511 nb_scale_op -= instance_num
6512
6513 new_instance_count = nb_scale_op + default_instance_num
6514
6515 if new_instance_count < min_instance_count < vdu_count:
6516 instances_number = min_instance_count - new_instance_count
6517 else:
6518 instances_number = instance_num
6519
6520 if new_instance_count < min_instance_count:
6521 raise LcmException(
6522 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6523 "scaling-group-descriptor '{}'".format(
6524 nb_scale_op, scaling_group
6525 )
6526 )
6527 for x in range(vdu_delta.get("number-of-instances", 1)):
6528 vca_scaling_info.append(
6529 {
6530 "osm_vdu_id": vdu_delta["id"],
6531 "member-vnf-index": vnf_index,
6532 "type": "delete",
6533 "vdu_index": vdu_index - 1 - x,
6534 }
6535 )
6536 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6537 for kdu_delta in delta.get("kdu-resource-delta", {}):
6538 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6539 kdu_name = kdu_profile["kdu-name"]
6540 resource_name = kdu_profile.get("resource-name", "")
6541
6542 if not scaling_info["kdu-delete"].get(kdu_name, None):
6543 scaling_info["kdu-delete"][kdu_name] = []
6544
6545 kdur = get_kdur(db_vnfr, kdu_name)
6546 if kdur.get("helm-chart"):
6547 k8s_cluster_type = "helm-chart-v3"
6548 self.logger.debug("kdur: {}".format(kdur))
6549 elif kdur.get("juju-bundle"):
6550 k8s_cluster_type = "juju-bundle"
6551 else:
6552 raise LcmException(
6553 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6554 "juju-bundle. Maybe an old NBI version is running".format(
6555 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6556 )
6557 )
6558
6559 min_instance_count = 0
6560 if kdu_profile and "min-number-of-instances" in kdu_profile:
6561 min_instance_count = kdu_profile["min-number-of-instances"]
6562
6563 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6564 deployed_kdu, _ = get_deployed_kdu(
6565 nsr_deployed, kdu_name, vnf_index
6566 )
6567 if deployed_kdu is None:
6568 raise LcmException(
6569 "KDU '{}' for vnf '{}' not deployed".format(
6570 kdu_name, vnf_index
6571 )
6572 )
6573 kdu_instance = deployed_kdu.get("kdu-instance")
6574 instance_num = await self.k8scluster_map[
6575 k8s_cluster_type
6576 ].get_scale_count(
6577 resource_name,
6578 kdu_instance,
6579 vca_id=vca_id,
6580 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6581 kdu_model=deployed_kdu.get("kdu-model"),
6582 )
6583 kdu_replica_count = instance_num - kdu_delta.get(
6584 "number-of-instances", 1
6585 )
6586
6587 if kdu_replica_count < min_instance_count < instance_num:
6588 kdu_replica_count = min_instance_count
6589 if kdu_replica_count < min_instance_count:
6590 raise LcmException(
6591 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6592 "scaling-group-descriptor '{}'".format(
6593 instance_num, scaling_group
6594 )
6595 )
6596
6597 for x in range(kdu_delta.get("number-of-instances", 1)):
6598 vca_scaling_info.append(
6599 {
6600 "osm_kdu_id": kdu_name,
6601 "member-vnf-index": vnf_index,
6602 "type": "delete",
6603 "kdu_index": instance_num - x - 1,
6604 }
6605 )
6606 scaling_info["kdu-delete"][kdu_name].append(
6607 {
6608 "member-vnf-index": vnf_index,
6609 "type": "delete",
6610 "k8s-cluster-type": k8s_cluster_type,
6611 "resource-name": resource_name,
6612 "scale": kdu_replica_count,
6613 }
6614 )
6615
6616 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6617 vdu_delete = copy(scaling_info.get("vdu-delete"))
6618 if scaling_info["scaling_direction"] == "IN":
6619 for vdur in reversed(db_vnfr["vdur"]):
6620 if vdu_delete.get(vdur["vdu-id-ref"]):
6621 vdu_delete[vdur["vdu-id-ref"]] -= 1
6622 scaling_info["vdu"].append(
6623 {
6624 "name": vdur.get("name") or vdur.get("vdu-name"),
6625 "vdu_id": vdur["vdu-id-ref"],
6626 "interface": [],
6627 }
6628 )
6629 for interface in vdur["interfaces"]:
6630 scaling_info["vdu"][-1]["interface"].append(
6631 {
6632 "name": interface["name"],
6633 "ip_address": interface["ip-address"],
6634 "mac_address": interface.get("mac-address"),
6635 }
6636 )
6637 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6638
6639 # PRE-SCALE BEGIN
6640 step = "Executing pre-scale vnf-config-primitive"
6641 if scaling_descriptor.get("scaling-config-action"):
6642 for scaling_config_action in scaling_descriptor[
6643 "scaling-config-action"
6644 ]:
6645 if (
6646 scaling_config_action.get("trigger") == "pre-scale-in"
6647 and scaling_type == "SCALE_IN"
6648 ) or (
6649 scaling_config_action.get("trigger") == "pre-scale-out"
6650 and scaling_type == "SCALE_OUT"
6651 ):
6652 vnf_config_primitive = scaling_config_action[
6653 "vnf-config-primitive-name-ref"
6654 ]
6655 step = db_nslcmop_update[
6656 "detailed-status"
6657 ] = "executing pre-scale scaling-config-action '{}'".format(
6658 vnf_config_primitive
6659 )
6660
6661 # look for primitive
6662 for config_primitive in (
6663 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6664 ).get("config-primitive", ()):
6665 if config_primitive["name"] == vnf_config_primitive:
6666 break
6667 else:
6668 raise LcmException(
6669 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6670 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6671 "primitive".format(scaling_group, vnf_config_primitive)
6672 )
6673
6674 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6675 if db_vnfr.get("additionalParamsForVnf"):
6676 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6677
6678 scale_process = "VCA"
6679 db_nsr_update["config-status"] = "configuring pre-scaling"
6680 primitive_params = self._map_primitive_params(
6681 config_primitive, {}, vnfr_params
6682 )
6683
6684 # Pre-scale retry check: Check if this sub-operation has been executed before
6685 op_index = self._check_or_add_scale_suboperation(
6686 db_nslcmop,
6687 vnf_index,
6688 vnf_config_primitive,
6689 primitive_params,
6690 "PRE-SCALE",
6691 )
6692 if op_index == self.SUBOPERATION_STATUS_SKIP:
6693 # Skip sub-operation
6694 result = "COMPLETED"
6695 result_detail = "Done"
6696 self.logger.debug(
6697 logging_text
6698 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6699 vnf_config_primitive, result, result_detail
6700 )
6701 )
6702 else:
6703 if op_index == self.SUBOPERATION_STATUS_NEW:
6704 # New sub-operation: Get index of this sub-operation
6705 op_index = (
6706 len(db_nslcmop.get("_admin", {}).get("operations"))
6707 - 1
6708 )
6709 self.logger.debug(
6710 logging_text
6711 + "vnf_config_primitive={} New sub-operation".format(
6712 vnf_config_primitive
6713 )
6714 )
6715 else:
6716 # retry: Get registered params for this existing sub-operation
6717 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6718 op_index
6719 ]
6720 vnf_index = op.get("member_vnf_index")
6721 vnf_config_primitive = op.get("primitive")
6722 primitive_params = op.get("primitive_params")
6723 self.logger.debug(
6724 logging_text
6725 + "vnf_config_primitive={} Sub-operation retry".format(
6726 vnf_config_primitive
6727 )
6728 )
6729 # Execute the primitive, either with new (first-time) or registered (reintent) args
6730 ee_descriptor_id = config_primitive.get(
6731 "execution-environment-ref"
6732 )
6733 primitive_name = config_primitive.get(
6734 "execution-environment-primitive", vnf_config_primitive
6735 )
6736 ee_id, vca_type = self._look_for_deployed_vca(
6737 nsr_deployed["VCA"],
6738 member_vnf_index=vnf_index,
6739 vdu_id=None,
6740 vdu_count_index=None,
6741 ee_descriptor_id=ee_descriptor_id,
6742 )
6743 result, result_detail = await self._ns_execute_primitive(
6744 ee_id,
6745 primitive_name,
6746 primitive_params,
6747 vca_type=vca_type,
6748 vca_id=vca_id,
6749 )
6750 self.logger.debug(
6751 logging_text
6752 + "vnf_config_primitive={} Done with result {} {}".format(
6753 vnf_config_primitive, result, result_detail
6754 )
6755 )
6756 # Update operationState = COMPLETED | FAILED
6757 self._update_suboperation_status(
6758 db_nslcmop, op_index, result, result_detail
6759 )
6760
6761 if result == "FAILED":
6762 raise LcmException(result_detail)
6763 db_nsr_update["config-status"] = old_config_status
6764 scale_process = None
6765 # PRE-SCALE END
6766
6767 db_nsr_update[
6768 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6769 ] = nb_scale_op
6770 db_nsr_update[
6771 "_admin.scaling-group.{}.time".format(admin_scale_index)
6772 ] = time()
6773
6774 # SCALE-IN VCA - BEGIN
6775 if vca_scaling_info:
6776 step = db_nslcmop_update[
6777 "detailed-status"
6778 ] = "Deleting the execution environments"
6779 scale_process = "VCA"
6780 for vca_info in vca_scaling_info:
6781 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6782 member_vnf_index = str(vca_info["member-vnf-index"])
6783 self.logger.debug(
6784 logging_text + "vdu info: {}".format(vca_info)
6785 )
6786 if vca_info.get("osm_vdu_id"):
6787 vdu_id = vca_info["osm_vdu_id"]
6788 vdu_index = int(vca_info["vdu_index"])
6789 stage[
6790 1
6791 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6792 member_vnf_index, vdu_id, vdu_index
6793 )
6794 stage[2] = step = "Scaling in VCA"
6795 self._write_op_status(op_id=nslcmop_id, stage=stage)
6796 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6797 config_update = db_nsr["configurationStatus"]
6798 for vca_index, vca in enumerate(vca_update):
6799 if (
6800 (vca or vca.get("ee_id"))
6801 and vca["member-vnf-index"] == member_vnf_index
6802 and vca["vdu_count_index"] == vdu_index
6803 ):
6804 if vca.get("vdu_id"):
6805 config_descriptor = get_configuration(
6806 db_vnfd, vca.get("vdu_id")
6807 )
6808 elif vca.get("kdu_name"):
6809 config_descriptor = get_configuration(
6810 db_vnfd, vca.get("kdu_name")
6811 )
6812 else:
6813 config_descriptor = get_configuration(
6814 db_vnfd, db_vnfd["id"]
6815 )
6816 operation_params = (
6817 db_nslcmop.get("operationParams") or {}
6818 )
6819 exec_terminate_primitives = not operation_params.get(
6820 "skip_terminate_primitives"
6821 ) and vca.get("needed_terminate")
6822 task = asyncio.ensure_future(
6823 asyncio.wait_for(
6824 self.destroy_N2VC(
6825 logging_text,
6826 db_nslcmop,
6827 vca,
6828 config_descriptor,
6829 vca_index,
6830 destroy_ee=True,
6831 exec_primitives=exec_terminate_primitives,
6832 scaling_in=True,
6833 vca_id=vca_id,
6834 ),
6835 timeout=self.timeout.charm_delete,
6836 )
6837 )
6838 tasks_dict_info[task] = "Terminating VCA {}".format(
6839 vca.get("ee_id")
6840 )
6841 del vca_update[vca_index]
6842 del config_update[vca_index]
6843 # wait for pending tasks of terminate primitives
6844 if tasks_dict_info:
6845 self.logger.debug(
6846 logging_text
6847 + "Waiting for tasks {}".format(
6848 list(tasks_dict_info.keys())
6849 )
6850 )
6851 error_list = await self._wait_for_tasks(
6852 logging_text,
6853 tasks_dict_info,
6854 min(
6855 self.timeout.charm_delete, self.timeout.ns_terminate
6856 ),
6857 stage,
6858 nslcmop_id,
6859 )
6860 tasks_dict_info.clear()
6861 if error_list:
6862 raise LcmException("; ".join(error_list))
6863
6864 db_vca_and_config_update = {
6865 "_admin.deployed.VCA": vca_update,
6866 "configurationStatus": config_update,
6867 }
6868 self.update_db_2(
6869 "nsrs", db_nsr["_id"], db_vca_and_config_update
6870 )
6871 scale_process = None
6872 # SCALE-IN VCA - END
6873
6874 # SCALE RO - BEGIN
6875 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6876 scale_process = "RO"
6877 if self.ro_config.ng:
6878 await self._scale_ng_ro(
6879 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6880 )
6881 scaling_info.pop("vdu-create", None)
6882 scaling_info.pop("vdu-delete", None)
6883
6884 scale_process = None
6885 # SCALE RO - END
6886
6887 # SCALE KDU - BEGIN
6888 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6889 scale_process = "KDU"
6890 await self._scale_kdu(
6891 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6892 )
6893 scaling_info.pop("kdu-create", None)
6894 scaling_info.pop("kdu-delete", None)
6895
6896 scale_process = None
6897 # SCALE KDU - END
6898
6899 if db_nsr_update:
6900 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6901
6902 # SCALE-UP VCA - BEGIN
6903 if vca_scaling_info:
6904 step = db_nslcmop_update[
6905 "detailed-status"
6906 ] = "Creating new execution environments"
6907 scale_process = "VCA"
6908 for vca_info in vca_scaling_info:
6909 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6910 member_vnf_index = str(vca_info["member-vnf-index"])
6911 self.logger.debug(
6912 logging_text + "vdu info: {}".format(vca_info)
6913 )
6914 vnfd_id = db_vnfr["vnfd-ref"]
6915 if vca_info.get("osm_vdu_id"):
6916 vdu_index = int(vca_info["vdu_index"])
6917 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6918 if db_vnfr.get("additionalParamsForVnf"):
6919 deploy_params.update(
6920 parse_yaml_strings(
6921 db_vnfr["additionalParamsForVnf"].copy()
6922 )
6923 )
6924 descriptor_config = get_configuration(
6925 db_vnfd, db_vnfd["id"]
6926 )
6927 if descriptor_config:
6928 vdu_id = None
6929 vdu_name = None
6930 kdu_name = None
6931 kdu_index = None
6932 self._deploy_n2vc(
6933 logging_text=logging_text
6934 + "member_vnf_index={} ".format(member_vnf_index),
6935 db_nsr=db_nsr,
6936 db_vnfr=db_vnfr,
6937 nslcmop_id=nslcmop_id,
6938 nsr_id=nsr_id,
6939 nsi_id=nsi_id,
6940 vnfd_id=vnfd_id,
6941 vdu_id=vdu_id,
6942 kdu_name=kdu_name,
6943 kdu_index=kdu_index,
6944 member_vnf_index=member_vnf_index,
6945 vdu_index=vdu_index,
6946 vdu_name=vdu_name,
6947 deploy_params=deploy_params,
6948 descriptor_config=descriptor_config,
6949 base_folder=base_folder,
6950 task_instantiation_info=tasks_dict_info,
6951 stage=stage,
6952 )
6953 vdu_id = vca_info["osm_vdu_id"]
6954 vdur = find_in_list(
6955 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6956 )
6957 descriptor_config = get_configuration(db_vnfd, vdu_id)
6958 if vdur.get("additionalParams"):
6959 deploy_params_vdu = parse_yaml_strings(
6960 vdur["additionalParams"]
6961 )
6962 else:
6963 deploy_params_vdu = deploy_params
6964 deploy_params_vdu["OSM"] = get_osm_params(
6965 db_vnfr, vdu_id, vdu_count_index=vdu_index
6966 )
6967 if descriptor_config:
6968 vdu_name = None
6969 kdu_name = None
6970 kdu_index = None
6971 stage[
6972 1
6973 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6974 member_vnf_index, vdu_id, vdu_index
6975 )
6976 stage[2] = step = "Scaling out VCA"
6977 self._write_op_status(op_id=nslcmop_id, stage=stage)
6978 self._deploy_n2vc(
6979 logging_text=logging_text
6980 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6981 member_vnf_index, vdu_id, vdu_index
6982 ),
6983 db_nsr=db_nsr,
6984 db_vnfr=db_vnfr,
6985 nslcmop_id=nslcmop_id,
6986 nsr_id=nsr_id,
6987 nsi_id=nsi_id,
6988 vnfd_id=vnfd_id,
6989 vdu_id=vdu_id,
6990 kdu_name=kdu_name,
6991 member_vnf_index=member_vnf_index,
6992 vdu_index=vdu_index,
6993 kdu_index=kdu_index,
6994 vdu_name=vdu_name,
6995 deploy_params=deploy_params_vdu,
6996 descriptor_config=descriptor_config,
6997 base_folder=base_folder,
6998 task_instantiation_info=tasks_dict_info,
6999 stage=stage,
7000 )
7001 # SCALE-UP VCA - END
7002 scale_process = None
7003
7004 # POST-SCALE BEGIN
7005 # execute primitive service POST-SCALING
7006 step = "Executing post-scale vnf-config-primitive"
7007 if scaling_descriptor.get("scaling-config-action"):
7008 for scaling_config_action in scaling_descriptor[
7009 "scaling-config-action"
7010 ]:
7011 if (
7012 scaling_config_action.get("trigger") == "post-scale-in"
7013 and scaling_type == "SCALE_IN"
7014 ) or (
7015 scaling_config_action.get("trigger") == "post-scale-out"
7016 and scaling_type == "SCALE_OUT"
7017 ):
7018 vnf_config_primitive = scaling_config_action[
7019 "vnf-config-primitive-name-ref"
7020 ]
7021 step = db_nslcmop_update[
7022 "detailed-status"
7023 ] = "executing post-scale scaling-config-action '{}'".format(
7024 vnf_config_primitive
7025 )
7026
7027 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7028 if db_vnfr.get("additionalParamsForVnf"):
7029 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7030
7031 # look for primitive
7032 for config_primitive in (
7033 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7034 ).get("config-primitive", ()):
7035 if config_primitive["name"] == vnf_config_primitive:
7036 break
7037 else:
7038 raise LcmException(
7039 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7040 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7041 "config-primitive".format(
7042 scaling_group, vnf_config_primitive
7043 )
7044 )
7045 scale_process = "VCA"
7046 db_nsr_update["config-status"] = "configuring post-scaling"
7047 primitive_params = self._map_primitive_params(
7048 config_primitive, {}, vnfr_params
7049 )
7050
7051 # Post-scale retry check: Check if this sub-operation has been executed before
7052 op_index = self._check_or_add_scale_suboperation(
7053 db_nslcmop,
7054 vnf_index,
7055 vnf_config_primitive,
7056 primitive_params,
7057 "POST-SCALE",
7058 )
7059 if op_index == self.SUBOPERATION_STATUS_SKIP:
7060 # Skip sub-operation
7061 result = "COMPLETED"
7062 result_detail = "Done"
7063 self.logger.debug(
7064 logging_text
7065 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7066 vnf_config_primitive, result, result_detail
7067 )
7068 )
7069 else:
7070 if op_index == self.SUBOPERATION_STATUS_NEW:
7071 # New sub-operation: Get index of this sub-operation
7072 op_index = (
7073 len(db_nslcmop.get("_admin", {}).get("operations"))
7074 - 1
7075 )
7076 self.logger.debug(
7077 logging_text
7078 + "vnf_config_primitive={} New sub-operation".format(
7079 vnf_config_primitive
7080 )
7081 )
7082 else:
7083 # retry: Get registered params for this existing sub-operation
7084 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7085 op_index
7086 ]
7087 vnf_index = op.get("member_vnf_index")
7088 vnf_config_primitive = op.get("primitive")
7089 primitive_params = op.get("primitive_params")
7090 self.logger.debug(
7091 logging_text
7092 + "vnf_config_primitive={} Sub-operation retry".format(
7093 vnf_config_primitive
7094 )
7095 )
7096 # Execute the primitive, either with new (first-time) or registered (reintent) args
7097 ee_descriptor_id = config_primitive.get(
7098 "execution-environment-ref"
7099 )
7100 primitive_name = config_primitive.get(
7101 "execution-environment-primitive", vnf_config_primitive
7102 )
7103 ee_id, vca_type = self._look_for_deployed_vca(
7104 nsr_deployed["VCA"],
7105 member_vnf_index=vnf_index,
7106 vdu_id=None,
7107 vdu_count_index=None,
7108 ee_descriptor_id=ee_descriptor_id,
7109 )
7110 result, result_detail = await self._ns_execute_primitive(
7111 ee_id,
7112 primitive_name,
7113 primitive_params,
7114 vca_type=vca_type,
7115 vca_id=vca_id,
7116 )
7117 self.logger.debug(
7118 logging_text
7119 + "vnf_config_primitive={} Done with result {} {}".format(
7120 vnf_config_primitive, result, result_detail
7121 )
7122 )
7123 # Update operationState = COMPLETED | FAILED
7124 self._update_suboperation_status(
7125 db_nslcmop, op_index, result, result_detail
7126 )
7127
7128 if result == "FAILED":
7129 raise LcmException(result_detail)
7130 db_nsr_update["config-status"] = old_config_status
7131 scale_process = None
7132 # POST-SCALE END
7133
7134 db_nsr_update[
7135 "detailed-status"
7136 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7137 db_nsr_update["operational-status"] = (
7138 "running"
7139 if old_operational_status == "failed"
7140 else old_operational_status
7141 )
7142 db_nsr_update["config-status"] = old_config_status
7143 return
7144 except (
7145 ROclient.ROClientException,
7146 DbException,
7147 LcmException,
7148 NgRoException,
7149 ) as e:
7150 self.logger.error(logging_text + "Exit Exception {}".format(e))
7151 exc = e
7152 except asyncio.CancelledError:
7153 self.logger.error(
7154 logging_text + "Cancelled Exception while '{}'".format(step)
7155 )
7156 exc = "Operation was cancelled"
7157 except Exception as e:
7158 exc = traceback.format_exc()
7159 self.logger.critical(
7160 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7161 exc_info=True,
7162 )
7163 finally:
7164 error_list = list()
7165 if exc:
7166 error_list.append(str(exc))
7167 self._write_ns_status(
7168 nsr_id=nsr_id,
7169 ns_state=None,
7170 current_operation="IDLE",
7171 current_operation_id=None,
7172 )
7173 try:
7174 if tasks_dict_info:
7175 stage[1] = "Waiting for instantiate pending tasks."
7176 self.logger.debug(logging_text + stage[1])
7177 exc = await self._wait_for_tasks(
7178 logging_text,
7179 tasks_dict_info,
7180 self.timeout.ns_deploy,
7181 stage,
7182 nslcmop_id,
7183 nsr_id=nsr_id,
7184 )
7185 except asyncio.CancelledError:
7186 error_list.append("Cancelled")
7187 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7188 await self._wait_for_tasks(
7189 logging_text,
7190 tasks_dict_info,
7191 self.timeout.ns_deploy,
7192 stage,
7193 nslcmop_id,
7194 nsr_id=nsr_id,
7195 )
7196 if error_list:
7197 error_detail = "; ".join(error_list)
7198 db_nslcmop_update[
7199 "detailed-status"
7200 ] = error_description_nslcmop = "FAILED {}: {}".format(
7201 step, error_detail
7202 )
7203 nslcmop_operation_state = "FAILED"
7204 if db_nsr:
7205 db_nsr_update["operational-status"] = old_operational_status
7206 db_nsr_update["config-status"] = old_config_status
7207 db_nsr_update["detailed-status"] = ""
7208 if scale_process:
7209 if "VCA" in scale_process:
7210 db_nsr_update["config-status"] = "failed"
7211 if "RO" in scale_process:
7212 db_nsr_update["operational-status"] = "failed"
7213 db_nsr_update[
7214 "detailed-status"
7215 ] = "FAILED scaling nslcmop={} {}: {}".format(
7216 nslcmop_id, step, error_detail
7217 )
7218 else:
7219 error_description_nslcmop = None
7220 nslcmop_operation_state = "COMPLETED"
7221 db_nslcmop_update["detailed-status"] = "Done"
7222
7223 self._write_op_status(
7224 op_id=nslcmop_id,
7225 stage="",
7226 error_message=error_description_nslcmop,
7227 operation_state=nslcmop_operation_state,
7228 other_update=db_nslcmop_update,
7229 )
7230 if db_nsr:
7231 self._write_ns_status(
7232 nsr_id=nsr_id,
7233 ns_state=None,
7234 current_operation="IDLE",
7235 current_operation_id=None,
7236 other_update=db_nsr_update,
7237 )
7238
7239 if nslcmop_operation_state:
7240 try:
7241 msg = {
7242 "nsr_id": nsr_id,
7243 "nslcmop_id": nslcmop_id,
7244 "operationState": nslcmop_operation_state,
7245 }
7246 await self.msg.aiowrite("ns", "scaled", msg)
7247 except Exception as e:
7248 self.logger.error(
7249 logging_text + "kafka_write notification Exception {}".format(e)
7250 )
7251 self.logger.debug(logging_text + "Exit")
7252 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7253
7254 async def _scale_kdu(
7255 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7256 ):
7257 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7258 for kdu_name in _scaling_info:
7259 for kdu_scaling_info in _scaling_info[kdu_name]:
7260 deployed_kdu, index = get_deployed_kdu(
7261 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7262 )
7263 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7264 kdu_instance = deployed_kdu["kdu-instance"]
7265 kdu_model = deployed_kdu.get("kdu-model")
7266 scale = int(kdu_scaling_info["scale"])
7267 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7268
7269 db_dict = {
7270 "collection": "nsrs",
7271 "filter": {"_id": nsr_id},
7272 "path": "_admin.deployed.K8s.{}".format(index),
7273 }
7274
7275 step = "scaling application {}".format(
7276 kdu_scaling_info["resource-name"]
7277 )
7278 self.logger.debug(logging_text + step)
7279
7280 if kdu_scaling_info["type"] == "delete":
7281 kdu_config = get_configuration(db_vnfd, kdu_name)
7282 if (
7283 kdu_config
7284 and kdu_config.get("terminate-config-primitive")
7285 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7286 ):
7287 terminate_config_primitive_list = kdu_config.get(
7288 "terminate-config-primitive"
7289 )
7290 terminate_config_primitive_list.sort(
7291 key=lambda val: int(val["seq"])
7292 )
7293
7294 for (
7295 terminate_config_primitive
7296 ) in terminate_config_primitive_list:
7297 primitive_params_ = self._map_primitive_params(
7298 terminate_config_primitive, {}, {}
7299 )
7300 step = "execute terminate config primitive"
7301 self.logger.debug(logging_text + step)
7302 await asyncio.wait_for(
7303 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7304 cluster_uuid=cluster_uuid,
7305 kdu_instance=kdu_instance,
7306 primitive_name=terminate_config_primitive["name"],
7307 params=primitive_params_,
7308 db_dict=db_dict,
7309 total_timeout=self.timeout.primitive,
7310 vca_id=vca_id,
7311 ),
7312 timeout=self.timeout.primitive
7313 * self.timeout.primitive_outer_factor,
7314 )
7315
7316 await asyncio.wait_for(
7317 self.k8scluster_map[k8s_cluster_type].scale(
7318 kdu_instance=kdu_instance,
7319 scale=scale,
7320 resource_name=kdu_scaling_info["resource-name"],
7321 total_timeout=self.timeout.scale_on_error,
7322 vca_id=vca_id,
7323 cluster_uuid=cluster_uuid,
7324 kdu_model=kdu_model,
7325 atomic=True,
7326 db_dict=db_dict,
7327 ),
7328 timeout=self.timeout.scale_on_error
7329 * self.timeout.scale_on_error_outer_factor,
7330 )
7331
7332 if kdu_scaling_info["type"] == "create":
7333 kdu_config = get_configuration(db_vnfd, kdu_name)
7334 if (
7335 kdu_config
7336 and kdu_config.get("initial-config-primitive")
7337 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7338 ):
7339 initial_config_primitive_list = kdu_config.get(
7340 "initial-config-primitive"
7341 )
7342 initial_config_primitive_list.sort(
7343 key=lambda val: int(val["seq"])
7344 )
7345
7346 for initial_config_primitive in initial_config_primitive_list:
7347 primitive_params_ = self._map_primitive_params(
7348 initial_config_primitive, {}, {}
7349 )
7350 step = "execute initial config primitive"
7351 self.logger.debug(logging_text + step)
7352 await asyncio.wait_for(
7353 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7354 cluster_uuid=cluster_uuid,
7355 kdu_instance=kdu_instance,
7356 primitive_name=initial_config_primitive["name"],
7357 params=primitive_params_,
7358 db_dict=db_dict,
7359 vca_id=vca_id,
7360 ),
7361 timeout=600,
7362 )
7363
7364 async def _scale_ng_ro(
7365 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7366 ):
7367 nsr_id = db_nslcmop["nsInstanceId"]
7368 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7369 db_vnfrs = {}
7370
7371 # read from db: vnfd's for every vnf
7372 db_vnfds = []
7373
7374 # for each vnf in ns, read vnfd
7375 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7376 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7377 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7378 # if we haven't this vnfd, read it from db
7379 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7380 # read from db
7381 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7382 db_vnfds.append(vnfd)
7383 n2vc_key = self.n2vc.get_public_key()
7384 n2vc_key_list = [n2vc_key]
7385 self.scale_vnfr(
7386 db_vnfr,
7387 vdu_scaling_info.get("vdu-create"),
7388 vdu_scaling_info.get("vdu-delete"),
7389 mark_delete=True,
7390 )
7391 # db_vnfr has been updated, update db_vnfrs to use it
7392 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7393 await self._instantiate_ng_ro(
7394 logging_text,
7395 nsr_id,
7396 db_nsd,
7397 db_nsr,
7398 db_nslcmop,
7399 db_vnfrs,
7400 db_vnfds,
7401 n2vc_key_list,
7402 stage=stage,
7403 start_deploy=time(),
7404 timeout_ns_deploy=self.timeout.ns_deploy,
7405 )
7406 if vdu_scaling_info.get("vdu-delete"):
7407 self.scale_vnfr(
7408 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7409 )
7410
7411 async def extract_prometheus_scrape_jobs(
7412 self,
7413 ee_id: str,
7414 artifact_path: str,
7415 ee_config_descriptor: dict,
7416 vnfr_id: str,
7417 nsr_id: str,
7418 target_ip: str,
7419 element_type: str,
7420 vnf_member_index: str = "",
7421 vdu_id: str = "",
7422 vdu_index: int = None,
7423 kdu_name: str = "",
7424 kdu_index: int = None,
7425 ) -> dict:
7426 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7427 This method will wait until the corresponding VDU or KDU is fully instantiated
7428
7429 Args:
7430 ee_id (str): Execution Environment ID
7431 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7432 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7433 vnfr_id (str): VNFR ID where this EE applies
7434 nsr_id (str): NSR ID where this EE applies
7435 target_ip (str): VDU/KDU instance IP address
7436 element_type (str): NS or VNF or VDU or KDU
7437 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7438 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7439 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7440 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7441 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7442
7443 Raises:
7444 LcmException: When the VDU or KDU instance was not found in an hour
7445
7446 Returns:
7447 _type_: Prometheus jobs
7448 """
7449 # default the vdur and kdur names to an empty string, to avoid any later
7450 # problem with Prometheus when the element type is not VDU or KDU
7451 vdur_name = ""
7452 kdur_name = ""
7453
7454 # look if exist a file called 'prometheus*.j2' and
7455 artifact_content = self.fs.dir_ls(artifact_path)
7456 job_file = next(
7457 (
7458 f
7459 for f in artifact_content
7460 if f.startswith("prometheus") and f.endswith(".j2")
7461 ),
7462 None,
7463 )
7464 if not job_file:
7465 return
7466 self.logger.debug("Artifact path{}".format(artifact_path))
7467 self.logger.debug("job file{}".format(job_file))
7468 with self.fs.file_open((artifact_path, job_file), "r") as f:
7469 job_data = f.read()
7470
7471 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7472 if element_type in ("VDU", "KDU"):
7473 for _ in range(360):
7474 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7475 if vdu_id and vdu_index is not None:
7476 vdur = next(
7477 (
7478 x
7479 for x in get_iterable(db_vnfr, "vdur")
7480 if (
7481 x.get("vdu-id-ref") == vdu_id
7482 and x.get("count-index") == vdu_index
7483 )
7484 ),
7485 {},
7486 )
7487 if vdur.get("name"):
7488 vdur_name = vdur.get("name")
7489 break
7490 if kdu_name and kdu_index is not None:
7491 kdur = next(
7492 (
7493 x
7494 for x in get_iterable(db_vnfr, "kdur")
7495 if (
7496 x.get("kdu-name") == kdu_name
7497 and x.get("count-index") == kdu_index
7498 )
7499 ),
7500 {},
7501 )
7502 if kdur.get("name"):
7503 kdur_name = kdur.get("name")
7504 break
7505
7506 await asyncio.sleep(10)
7507 else:
7508 if vdu_id and vdu_index is not None:
7509 raise LcmException(
7510 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7511 )
7512 if kdu_name and kdu_index is not None:
7513 raise LcmException(
7514 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7515 )
7516
7517 if ee_id is not None:
7518 _, namespace, helm_id = get_ee_id_parts(
7519 ee_id
7520 ) # get namespace and EE gRPC service name
7521 host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7522 host_port = "80"
7523 vnfr_id = vnfr_id.replace("-", "")
7524 variables = {
7525 "JOB_NAME": vnfr_id,
7526 "TARGET_IP": target_ip,
7527 "EXPORTER_POD_IP": host_name,
7528 "EXPORTER_POD_PORT": host_port,
7529 "NSR_ID": nsr_id,
7530 "VNF_MEMBER_INDEX": vnf_member_index,
7531 "VDUR_NAME": vdur_name,
7532 "KDUR_NAME": kdur_name,
7533 "ELEMENT_TYPE": element_type,
7534 }
7535 else:
7536 metric_path = ee_config_descriptor["metric-path"]
7537 target_port = ee_config_descriptor["metric-port"]
7538 vnfr_id = vnfr_id.replace("-", "")
7539 variables = {
7540 "JOB_NAME": vnfr_id,
7541 "TARGET_IP": target_ip,
7542 "TARGET_PORT": target_port,
7543 "METRIC_PATH": metric_path,
7544 }
7545
7546 job_list = parse_job(job_data, variables)
7547 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7548 for job in job_list:
7549 if (
7550 not isinstance(job.get("job_name"), str)
7551 or vnfr_id not in job["job_name"]
7552 ):
7553 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7554 job["nsr_id"] = nsr_id
7555 job["vnfr_id"] = vnfr_id
7556 return job_list
7557
7558 async def rebuild_start_stop(
7559 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7560 ):
7561 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7562 self.logger.info(logging_text + "Enter")
7563 stage = ["Preparing the environment", ""]
7564 # database nsrs record
7565 db_nsr_update = {}
7566 vdu_vim_name = None
7567 vim_vm_id = None
7568 # in case of error, indicates what part of scale was failed to put nsr at error status
7569 start_deploy = time()
7570 try:
7571 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7572 vim_account_id = db_vnfr.get("vim-account-id")
7573 vim_info_key = "vim:" + vim_account_id
7574 vdu_id = additional_param["vdu_id"]
7575 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7576 vdur = find_in_list(
7577 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7578 )
7579 if vdur:
7580 vdu_vim_name = vdur["name"]
7581 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7582 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7583 else:
7584 raise LcmException("Target vdu is not found")
7585 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7586 # wait for any previous tasks in process
7587 stage[1] = "Waiting for previous operations to terminate"
7588 self.logger.info(stage[1])
7589 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7590
7591 stage[1] = "Reading from database."
7592 self.logger.info(stage[1])
7593 self._write_ns_status(
7594 nsr_id=nsr_id,
7595 ns_state=None,
7596 current_operation=operation_type.upper(),
7597 current_operation_id=nslcmop_id,
7598 )
7599 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7600
7601 # read from db: ns
7602 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7603 db_nsr_update["operational-status"] = operation_type
7604 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7605 # Payload for RO
7606 desc = {
7607 operation_type: {
7608 "vim_vm_id": vim_vm_id,
7609 "vnf_id": vnf_id,
7610 "vdu_index": additional_param["count-index"],
7611 "vdu_id": vdur["id"],
7612 "target_vim": target_vim,
7613 "vim_account_id": vim_account_id,
7614 }
7615 }
7616 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7617 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7618 self.logger.info("ro nsr id: {}".format(nsr_id))
7619 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7620 self.logger.info("response from RO: {}".format(result_dict))
7621 action_id = result_dict["action_id"]
7622 await self._wait_ng_ro(
7623 nsr_id,
7624 action_id,
7625 nslcmop_id,
7626 start_deploy,
7627 self.timeout.operate,
7628 None,
7629 "start_stop_rebuild",
7630 )
7631 return "COMPLETED", "Done"
7632 except (ROclient.ROClientException, DbException, LcmException) as e:
7633 self.logger.error("Exit Exception {}".format(e))
7634 exc = e
7635 except asyncio.CancelledError:
7636 self.logger.error("Cancelled Exception while '{}'".format(stage))
7637 exc = "Operation was cancelled"
7638 except Exception as e:
7639 exc = traceback.format_exc()
7640 self.logger.critical(
7641 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7642 )
7643 return "FAILED", "Error in operate VNF {}".format(exc)
7644
7645 async def migrate(self, nsr_id, nslcmop_id):
7646 """
7647 Migrate VNFs and VDUs instances in a NS
7648
7649 :param: nsr_id: NS Instance ID
7650 :param: nslcmop_id: nslcmop ID of migrate
7651
7652 """
7653 # Try to lock HA task here
7654 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7655 if not task_is_locked_by_me:
7656 return
7657 logging_text = "Task ns={} migrate ".format(nsr_id)
7658 self.logger.debug(logging_text + "Enter")
7659 # get all needed from database
7660 db_nslcmop = None
7661 db_nslcmop_update = {}
7662 nslcmop_operation_state = None
7663 db_nsr_update = {}
7664 target = {}
7665 exc = None
7666 # in case of error, indicates what part of scale was failed to put nsr at error status
7667 start_deploy = time()
7668
7669 try:
7670 # wait for any previous tasks in process
7671 step = "Waiting for previous operations to terminate"
7672 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7673
7674 self._write_ns_status(
7675 nsr_id=nsr_id,
7676 ns_state=None,
7677 current_operation="MIGRATING",
7678 current_operation_id=nslcmop_id,
7679 )
7680 step = "Getting nslcmop from database"
7681 self.logger.debug(
7682 step + " after having waited for previous tasks to be completed"
7683 )
7684 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7685 migrate_params = db_nslcmop.get("operationParams")
7686
7687 target = {}
7688 target.update(migrate_params)
7689 desc = await self.RO.migrate(nsr_id, target)
7690 self.logger.debug("RO return > {}".format(desc))
7691 action_id = desc["action_id"]
7692 await self._wait_ng_ro(
7693 nsr_id,
7694 action_id,
7695 nslcmop_id,
7696 start_deploy,
7697 self.timeout.migrate,
7698 operation="migrate",
7699 )
7700 except (ROclient.ROClientException, DbException, LcmException) as e:
7701 self.logger.error("Exit Exception {}".format(e))
7702 exc = e
7703 except asyncio.CancelledError:
7704 self.logger.error("Cancelled Exception while '{}'".format(step))
7705 exc = "Operation was cancelled"
7706 except Exception as e:
7707 exc = traceback.format_exc()
7708 self.logger.critical(
7709 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7710 )
7711 finally:
7712 self._write_ns_status(
7713 nsr_id=nsr_id,
7714 ns_state=None,
7715 current_operation="IDLE",
7716 current_operation_id=None,
7717 )
7718 if exc:
7719 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7720 nslcmop_operation_state = "FAILED"
7721 else:
7722 nslcmop_operation_state = "COMPLETED"
7723 db_nslcmop_update["detailed-status"] = "Done"
7724 db_nsr_update["detailed-status"] = "Done"
7725
7726 self._write_op_status(
7727 op_id=nslcmop_id,
7728 stage="",
7729 error_message="",
7730 operation_state=nslcmop_operation_state,
7731 other_update=db_nslcmop_update,
7732 )
7733 if nslcmop_operation_state:
7734 try:
7735 msg = {
7736 "nsr_id": nsr_id,
7737 "nslcmop_id": nslcmop_id,
7738 "operationState": nslcmop_operation_state,
7739 }
7740 await self.msg.aiowrite("ns", "migrated", msg)
7741 except Exception as e:
7742 self.logger.error(
7743 logging_text + "kafka_write notification Exception {}".format(e)
7744 )
7745 self.logger.debug(logging_text + "Exit")
7746 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7747
7748 async def heal(self, nsr_id, nslcmop_id):
7749 """
7750 Heal NS
7751
7752 :param nsr_id: ns instance to heal
7753 :param nslcmop_id: operation to run
7754 :return:
7755 """
7756
7757 # Try to lock HA task here
7758 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7759 if not task_is_locked_by_me:
7760 return
7761
7762 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7763 stage = ["", "", ""]
7764 tasks_dict_info = {}
7765 # ^ stage, step, VIM progress
7766 self.logger.debug(logging_text + "Enter")
7767 # get all needed from database
7768 db_nsr = None
7769 db_nslcmop_update = {}
7770 db_nsr_update = {}
7771 db_vnfrs = {} # vnf's info indexed by _id
7772 exc = None
7773 old_operational_status = ""
7774 old_config_status = ""
7775 nsi_id = None
7776 try:
7777 # wait for any previous tasks in process
7778 step = "Waiting for previous operations to terminate"
7779 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7780 self._write_ns_status(
7781 nsr_id=nsr_id,
7782 ns_state=None,
7783 current_operation="HEALING",
7784 current_operation_id=nslcmop_id,
7785 )
7786
7787 step = "Getting nslcmop from database"
7788 self.logger.debug(
7789 step + " after having waited for previous tasks to be completed"
7790 )
7791 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7792
7793 step = "Getting nsr from database"
7794 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7795 old_operational_status = db_nsr["operational-status"]
7796 old_config_status = db_nsr["config-status"]
7797
7798 db_nsr_update = {
7799 "_admin.deployed.RO.operational-status": "healing",
7800 }
7801 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7802
7803 step = "Sending heal order to VIM"
7804 await self.heal_RO(
7805 logging_text=logging_text,
7806 nsr_id=nsr_id,
7807 db_nslcmop=db_nslcmop,
7808 stage=stage,
7809 )
7810 # VCA tasks
7811 # read from db: nsd
7812 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7813 self.logger.debug(logging_text + stage[1])
7814 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7815 self.fs.sync(db_nsr["nsd-id"])
7816 db_nsr["nsd"] = nsd
7817 # read from db: vnfr's of this ns
7818 step = "Getting vnfrs from db"
7819 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7820 for vnfr in db_vnfrs_list:
7821 db_vnfrs[vnfr["_id"]] = vnfr
7822 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7823
7824 # Check for each target VNF
7825 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7826 for target_vnf in target_list:
7827 # Find this VNF in the list from DB
7828 vnfr_id = target_vnf.get("vnfInstanceId", None)
7829 if vnfr_id:
7830 db_vnfr = db_vnfrs[vnfr_id]
7831 vnfd_id = db_vnfr.get("vnfd-id")
7832 vnfd_ref = db_vnfr.get("vnfd-ref")
7833 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7834 base_folder = vnfd["_admin"]["storage"]
7835 vdu_id = None
7836 vdu_index = 0
7837 vdu_name = None
7838 kdu_name = None
7839 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7840 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7841
7842 # Check each target VDU and deploy N2VC
7843 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7844 "vdu", []
7845 )
7846 if not target_vdu_list:
7847 # Codigo nuevo para crear diccionario
7848 target_vdu_list = []
7849 for existing_vdu in db_vnfr.get("vdur"):
7850 vdu_name = existing_vdu.get("vdu-name", None)
7851 vdu_index = existing_vdu.get("count-index", 0)
7852 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7853 "run-day1", False
7854 )
7855 vdu_to_be_healed = {
7856 "vdu-id": vdu_name,
7857 "count-index": vdu_index,
7858 "run-day1": vdu_run_day1,
7859 }
7860 target_vdu_list.append(vdu_to_be_healed)
7861 for target_vdu in target_vdu_list:
7862 deploy_params_vdu = target_vdu
7863 # Set run-day1 vnf level value if not vdu level value exists
7864 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7865 "additionalParams", {}
7866 ).get("run-day1"):
7867 deploy_params_vdu["run-day1"] = target_vnf[
7868 "additionalParams"
7869 ].get("run-day1")
7870 vdu_name = target_vdu.get("vdu-id", None)
7871 # TODO: Get vdu_id from vdud.
7872 vdu_id = vdu_name
7873 # For multi instance VDU count-index is mandatory
7874 # For single session VDU count-indes is 0
7875 vdu_index = target_vdu.get("count-index", 0)
7876
7877 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7878 stage[1] = "Deploying Execution Environments."
7879 self.logger.debug(logging_text + stage[1])
7880
7881 # VNF Level charm. Normal case when proxy charms.
7882 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7883 descriptor_config = get_configuration(vnfd, vnfd_ref)
7884 if descriptor_config:
7885 # Continue if healed machine is management machine
7886 vnf_ip_address = db_vnfr.get("ip-address")
7887 target_instance = None
7888 for instance in db_vnfr.get("vdur", None):
7889 if (
7890 instance["vdu-name"] == vdu_name
7891 and instance["count-index"] == vdu_index
7892 ):
7893 target_instance = instance
7894 break
7895 if vnf_ip_address == target_instance.get("ip-address"):
7896 self._heal_n2vc(
7897 logging_text=logging_text
7898 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7899 member_vnf_index, vdu_name, vdu_index
7900 ),
7901 db_nsr=db_nsr,
7902 db_vnfr=db_vnfr,
7903 nslcmop_id=nslcmop_id,
7904 nsr_id=nsr_id,
7905 nsi_id=nsi_id,
7906 vnfd_id=vnfd_ref,
7907 vdu_id=None,
7908 kdu_name=None,
7909 member_vnf_index=member_vnf_index,
7910 vdu_index=0,
7911 vdu_name=None,
7912 deploy_params=deploy_params_vdu,
7913 descriptor_config=descriptor_config,
7914 base_folder=base_folder,
7915 task_instantiation_info=tasks_dict_info,
7916 stage=stage,
7917 )
7918
7919 # VDU Level charm. Normal case with native charms.
7920 descriptor_config = get_configuration(vnfd, vdu_name)
7921 if descriptor_config:
7922 self._heal_n2vc(
7923 logging_text=logging_text
7924 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7925 member_vnf_index, vdu_name, vdu_index
7926 ),
7927 db_nsr=db_nsr,
7928 db_vnfr=db_vnfr,
7929 nslcmop_id=nslcmop_id,
7930 nsr_id=nsr_id,
7931 nsi_id=nsi_id,
7932 vnfd_id=vnfd_ref,
7933 vdu_id=vdu_id,
7934 kdu_name=kdu_name,
7935 member_vnf_index=member_vnf_index,
7936 vdu_index=vdu_index,
7937 vdu_name=vdu_name,
7938 deploy_params=deploy_params_vdu,
7939 descriptor_config=descriptor_config,
7940 base_folder=base_folder,
7941 task_instantiation_info=tasks_dict_info,
7942 stage=stage,
7943 )
7944
7945 except (
7946 ROclient.ROClientException,
7947 DbException,
7948 LcmException,
7949 NgRoException,
7950 ) as e:
7951 self.logger.error(logging_text + "Exit Exception {}".format(e))
7952 exc = e
7953 except asyncio.CancelledError:
7954 self.logger.error(
7955 logging_text + "Cancelled Exception while '{}'".format(step)
7956 )
7957 exc = "Operation was cancelled"
7958 except Exception as e:
7959 exc = traceback.format_exc()
7960 self.logger.critical(
7961 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7962 exc_info=True,
7963 )
7964 finally:
7965 error_list = list()
7966 if exc:
7967 error_list.append(str(exc))
7968 try:
7969 if tasks_dict_info:
7970 stage[1] = "Waiting for healing pending tasks."
7971 self.logger.debug(logging_text + stage[1])
7972 exc = await self._wait_for_tasks(
7973 logging_text,
7974 tasks_dict_info,
7975 self.timeout.ns_deploy,
7976 stage,
7977 nslcmop_id,
7978 nsr_id=nsr_id,
7979 )
7980 except asyncio.CancelledError:
7981 error_list.append("Cancelled")
7982 await self._cancel_pending_tasks(logging_text, tasks_dict_info)
7983 await self._wait_for_tasks(
7984 logging_text,
7985 tasks_dict_info,
7986 self.timeout.ns_deploy,
7987 stage,
7988 nslcmop_id,
7989 nsr_id=nsr_id,
7990 )
7991 if error_list:
7992 error_detail = "; ".join(error_list)
7993 db_nslcmop_update[
7994 "detailed-status"
7995 ] = error_description_nslcmop = "FAILED {}: {}".format(
7996 step, error_detail
7997 )
7998 nslcmop_operation_state = "FAILED"
7999 if db_nsr:
8000 db_nsr_update["operational-status"] = old_operational_status
8001 db_nsr_update["config-status"] = old_config_status
8002 db_nsr_update[
8003 "detailed-status"
8004 ] = "FAILED healing nslcmop={} {}: {}".format(
8005 nslcmop_id, step, error_detail
8006 )
8007 for task, task_name in tasks_dict_info.items():
8008 if not task.done() or task.cancelled() or task.exception():
8009 if task_name.startswith(self.task_name_deploy_vca):
8010 # A N2VC task is pending
8011 db_nsr_update["config-status"] = "failed"
8012 else:
8013 # RO task is pending
8014 db_nsr_update["operational-status"] = "failed"
8015 else:
8016 error_description_nslcmop = None
8017 nslcmop_operation_state = "COMPLETED"
8018 db_nslcmop_update["detailed-status"] = "Done"
8019 db_nsr_update["detailed-status"] = "Done"
8020 db_nsr_update["operational-status"] = "running"
8021 db_nsr_update["config-status"] = "configured"
8022
8023 self._write_op_status(
8024 op_id=nslcmop_id,
8025 stage="",
8026 error_message=error_description_nslcmop,
8027 operation_state=nslcmop_operation_state,
8028 other_update=db_nslcmop_update,
8029 )
8030 if db_nsr:
8031 self._write_ns_status(
8032 nsr_id=nsr_id,
8033 ns_state=None,
8034 current_operation="IDLE",
8035 current_operation_id=None,
8036 other_update=db_nsr_update,
8037 )
8038
8039 if nslcmop_operation_state:
8040 try:
8041 msg = {
8042 "nsr_id": nsr_id,
8043 "nslcmop_id": nslcmop_id,
8044 "operationState": nslcmop_operation_state,
8045 }
8046 await self.msg.aiowrite("ns", "healed", msg)
8047 except Exception as e:
8048 self.logger.error(
8049 logging_text + "kafka_write notification Exception {}".format(e)
8050 )
8051 self.logger.debug(logging_text + "Exit")
8052 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8053
8054 async def heal_RO(
8055 self,
8056 logging_text,
8057 nsr_id,
8058 db_nslcmop,
8059 stage,
8060 ):
8061 """
8062 Heal at RO
8063 :param logging_text: preffix text to use at logging
8064 :param nsr_id: nsr identity
8065 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8066 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8067 :return: None or exception
8068 """
8069
8070 def get_vim_account(vim_account_id):
8071 nonlocal db_vims
8072 if vim_account_id in db_vims:
8073 return db_vims[vim_account_id]
8074 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8075 db_vims[vim_account_id] = db_vim
8076 return db_vim
8077
8078 try:
8079 start_heal = time()
8080 ns_params = db_nslcmop.get("operationParams")
8081 if ns_params and ns_params.get("timeout_ns_heal"):
8082 timeout_ns_heal = ns_params["timeout_ns_heal"]
8083 else:
8084 timeout_ns_heal = self.timeout.ns_heal
8085
8086 db_vims = {}
8087
8088 nslcmop_id = db_nslcmop["_id"]
8089 target = {
8090 "action_id": nslcmop_id,
8091 }
8092 self.logger.warning(
8093 "db_nslcmop={} and timeout_ns_heal={}".format(
8094 db_nslcmop, timeout_ns_heal
8095 )
8096 )
8097 target.update(db_nslcmop.get("operationParams", {}))
8098
8099 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8100 desc = await self.RO.recreate(nsr_id, target)
8101 self.logger.debug("RO return > {}".format(desc))
8102 action_id = desc["action_id"]
8103 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8104 await self._wait_ng_ro(
8105 nsr_id,
8106 action_id,
8107 nslcmop_id,
8108 start_heal,
8109 timeout_ns_heal,
8110 stage,
8111 operation="healing",
8112 )
8113
8114 # Updating NSR
8115 db_nsr_update = {
8116 "_admin.deployed.RO.operational-status": "running",
8117 "detailed-status": " ".join(stage),
8118 }
8119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8120 self._write_op_status(nslcmop_id, stage)
8121 self.logger.debug(
8122 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8123 )
8124
8125 except Exception as e:
8126 stage[2] = "ERROR healing at VIM"
8127 # self.set_vnfr_at_error(db_vnfrs, str(e))
8128 self.logger.error(
8129 "Error healing at VIM {}".format(e),
8130 exc_info=not isinstance(
8131 e,
8132 (
8133 ROclient.ROClientException,
8134 LcmException,
8135 DbException,
8136 NgRoException,
8137 ),
8138 ),
8139 )
8140 raise
8141
8142 def _heal_n2vc(
8143 self,
8144 logging_text,
8145 db_nsr,
8146 db_vnfr,
8147 nslcmop_id,
8148 nsr_id,
8149 nsi_id,
8150 vnfd_id,
8151 vdu_id,
8152 kdu_name,
8153 member_vnf_index,
8154 vdu_index,
8155 vdu_name,
8156 deploy_params,
8157 descriptor_config,
8158 base_folder,
8159 task_instantiation_info,
8160 stage,
8161 ):
8162 # launch instantiate_N2VC in a asyncio task and register task object
8163 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8164 # if not found, create one entry and update database
8165 # fill db_nsr._admin.deployed.VCA.<index>
8166
8167 self.logger.debug(
8168 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8169 )
8170
8171 charm_name = ""
8172 get_charm_name = False
8173 if "execution-environment-list" in descriptor_config:
8174 ee_list = descriptor_config.get("execution-environment-list", [])
8175 elif "juju" in descriptor_config:
8176 ee_list = [descriptor_config] # ns charms
8177 if "execution-environment-list" not in descriptor_config:
8178 # charm name is only required for ns charms
8179 get_charm_name = True
8180 else: # other types as script are not supported
8181 ee_list = []
8182
8183 for ee_item in ee_list:
8184 self.logger.debug(
8185 logging_text
8186 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8187 ee_item.get("juju"), ee_item.get("helm-chart")
8188 )
8189 )
8190 ee_descriptor_id = ee_item.get("id")
8191 if ee_item.get("juju"):
8192 vca_name = ee_item["juju"].get("charm")
8193 if get_charm_name:
8194 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8195 vca_type = (
8196 "lxc_proxy_charm"
8197 if ee_item["juju"].get("charm") is not None
8198 else "native_charm"
8199 )
8200 if ee_item["juju"].get("cloud") == "k8s":
8201 vca_type = "k8s_proxy_charm"
8202 elif ee_item["juju"].get("proxy") is False:
8203 vca_type = "native_charm"
8204 elif ee_item.get("helm-chart"):
8205 vca_name = ee_item["helm-chart"]
8206 vca_type = "helm-v3"
8207 else:
8208 self.logger.debug(
8209 logging_text + "skipping non juju neither charm configuration"
8210 )
8211 continue
8212
8213 vca_index = -1
8214 for vca_index, vca_deployed in enumerate(
8215 db_nsr["_admin"]["deployed"]["VCA"]
8216 ):
8217 if not vca_deployed:
8218 continue
8219 if (
8220 vca_deployed.get("member-vnf-index") == member_vnf_index
8221 and vca_deployed.get("vdu_id") == vdu_id
8222 and vca_deployed.get("kdu_name") == kdu_name
8223 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8224 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8225 ):
8226 break
8227 else:
8228 # not found, create one.
8229 target = (
8230 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8231 )
8232 if vdu_id:
8233 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8234 elif kdu_name:
8235 target += "/kdu/{}".format(kdu_name)
8236 vca_deployed = {
8237 "target_element": target,
8238 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8239 "member-vnf-index": member_vnf_index,
8240 "vdu_id": vdu_id,
8241 "kdu_name": kdu_name,
8242 "vdu_count_index": vdu_index,
8243 "operational-status": "init", # TODO revise
8244 "detailed-status": "", # TODO revise
8245 "step": "initial-deploy", # TODO revise
8246 "vnfd_id": vnfd_id,
8247 "vdu_name": vdu_name,
8248 "type": vca_type,
8249 "ee_descriptor_id": ee_descriptor_id,
8250 "charm_name": charm_name,
8251 }
8252 vca_index += 1
8253
8254 # create VCA and configurationStatus in db
8255 db_dict = {
8256 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8257 "configurationStatus.{}".format(vca_index): dict(),
8258 }
8259 self.update_db_2("nsrs", nsr_id, db_dict)
8260
8261 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8262
8263 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8264 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8265 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8266
8267 # Launch task
8268 task_n2vc = asyncio.ensure_future(
8269 self.heal_N2VC(
8270 logging_text=logging_text,
8271 vca_index=vca_index,
8272 nsi_id=nsi_id,
8273 db_nsr=db_nsr,
8274 db_vnfr=db_vnfr,
8275 vdu_id=vdu_id,
8276 kdu_name=kdu_name,
8277 vdu_index=vdu_index,
8278 deploy_params=deploy_params,
8279 config_descriptor=descriptor_config,
8280 base_folder=base_folder,
8281 nslcmop_id=nslcmop_id,
8282 stage=stage,
8283 vca_type=vca_type,
8284 vca_name=vca_name,
8285 ee_config_descriptor=ee_item,
8286 )
8287 )
8288 self.lcm_tasks.register(
8289 "ns",
8290 nsr_id,
8291 nslcmop_id,
8292 "instantiate_N2VC-{}".format(vca_index),
8293 task_n2vc,
8294 )
8295 task_instantiation_info[
8296 task_n2vc
8297 ] = self.task_name_deploy_vca + " {}.{}".format(
8298 member_vnf_index or "", vdu_id or ""
8299 )
8300
8301 async def heal_N2VC(
8302 self,
8303 logging_text,
8304 vca_index,
8305 nsi_id,
8306 db_nsr,
8307 db_vnfr,
8308 vdu_id,
8309 kdu_name,
8310 vdu_index,
8311 config_descriptor,
8312 deploy_params,
8313 base_folder,
8314 nslcmop_id,
8315 stage,
8316 vca_type,
8317 vca_name,
8318 ee_config_descriptor,
8319 ):
8320 nsr_id = db_nsr["_id"]
8321 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8322 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8323 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8324 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8325 db_dict = {
8326 "collection": "nsrs",
8327 "filter": {"_id": nsr_id},
8328 "path": db_update_entry,
8329 }
8330 step = ""
8331 try:
8332 element_type = "NS"
8333 element_under_configuration = nsr_id
8334
8335 vnfr_id = None
8336 if db_vnfr:
8337 vnfr_id = db_vnfr["_id"]
8338 osm_config["osm"]["vnf_id"] = vnfr_id
8339
8340 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8341
8342 if vca_type == "native_charm":
8343 index_number = 0
8344 else:
8345 index_number = vdu_index or 0
8346
8347 if vnfr_id:
8348 element_type = "VNF"
8349 element_under_configuration = vnfr_id
8350 namespace += ".{}-{}".format(vnfr_id, index_number)
8351 if vdu_id:
8352 namespace += ".{}-{}".format(vdu_id, index_number)
8353 element_type = "VDU"
8354 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8355 osm_config["osm"]["vdu_id"] = vdu_id
8356 elif kdu_name:
8357 namespace += ".{}".format(kdu_name)
8358 element_type = "KDU"
8359 element_under_configuration = kdu_name
8360 osm_config["osm"]["kdu_name"] = kdu_name
8361
8362 # Get artifact path
8363 if base_folder["pkg-dir"]:
8364 artifact_path = "{}/{}/{}/{}".format(
8365 base_folder["folder"],
8366 base_folder["pkg-dir"],
8367 "charms"
8368 if vca_type
8369 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8370 else "helm-charts",
8371 vca_name,
8372 )
8373 else:
8374 artifact_path = "{}/Scripts/{}/{}/".format(
8375 base_folder["folder"],
8376 "charms"
8377 if vca_type
8378 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8379 else "helm-charts",
8380 vca_name,
8381 )
8382
8383 self.logger.debug("Artifact path > {}".format(artifact_path))
8384
8385 # get initial_config_primitive_list that applies to this element
8386 initial_config_primitive_list = config_descriptor.get(
8387 "initial-config-primitive"
8388 )
8389
8390 self.logger.debug(
8391 "Initial config primitive list > {}".format(
8392 initial_config_primitive_list
8393 )
8394 )
8395
8396 # add config if not present for NS charm
8397 ee_descriptor_id = ee_config_descriptor.get("id")
8398 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8399 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8400 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8401 )
8402
8403 self.logger.debug(
8404 "Initial config primitive list #2 > {}".format(
8405 initial_config_primitive_list
8406 )
8407 )
8408 # n2vc_redesign STEP 3.1
8409 # find old ee_id if exists
8410 ee_id = vca_deployed.get("ee_id")
8411
8412 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8413 # create or register execution environment in VCA. Only for native charms when healing
8414 if vca_type == "native_charm":
8415 step = "Waiting to VM being up and getting IP address"
8416 self.logger.debug(logging_text + step)
8417 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8418 logging_text,
8419 nsr_id,
8420 vnfr_id,
8421 vdu_id,
8422 vdu_index,
8423 user=None,
8424 pub_key=None,
8425 )
8426 credentials = {"hostname": rw_mgmt_ip}
8427 # get username
8428 username = deep_get(
8429 config_descriptor, ("config-access", "ssh-access", "default-user")
8430 )
8431 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8432 # merged. Meanwhile let's get username from initial-config-primitive
8433 if not username and initial_config_primitive_list:
8434 for config_primitive in initial_config_primitive_list:
8435 for param in config_primitive.get("parameter", ()):
8436 if param["name"] == "ssh-username":
8437 username = param["value"]
8438 break
8439 if not username:
8440 raise LcmException(
8441 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8442 "'config-access.ssh-access.default-user'"
8443 )
8444 credentials["username"] = username
8445
8446 # n2vc_redesign STEP 3.2
8447 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8448 self._write_configuration_status(
8449 nsr_id=nsr_id,
8450 vca_index=vca_index,
8451 status="REGISTERING",
8452 element_under_configuration=element_under_configuration,
8453 element_type=element_type,
8454 )
8455
8456 step = "register execution environment {}".format(credentials)
8457 self.logger.debug(logging_text + step)
8458 ee_id = await self.vca_map[vca_type].register_execution_environment(
8459 credentials=credentials,
8460 namespace=namespace,
8461 db_dict=db_dict,
8462 vca_id=vca_id,
8463 )
8464
8465 # update ee_id en db
8466 db_dict_ee_id = {
8467 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8468 }
8469 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8470
8471 # for compatibility with MON/POL modules, the need model and application name at database
8472 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8473 # Not sure if this need to be done when healing
8474 """
8475 ee_id_parts = ee_id.split(".")
8476 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8477 if len(ee_id_parts) >= 2:
8478 model_name = ee_id_parts[0]
8479 application_name = ee_id_parts[1]
8480 db_nsr_update[db_update_entry + "model"] = model_name
8481 db_nsr_update[db_update_entry + "application"] = application_name
8482 """
8483
8484 # n2vc_redesign STEP 3.3
8485 # Install configuration software. Only for native charms.
8486 step = "Install configuration Software"
8487
8488 self._write_configuration_status(
8489 nsr_id=nsr_id,
8490 vca_index=vca_index,
8491 status="INSTALLING SW",
8492 element_under_configuration=element_under_configuration,
8493 element_type=element_type,
8494 # other_update=db_nsr_update,
8495 other_update=None,
8496 )
8497
8498 # TODO check if already done
8499 self.logger.debug(logging_text + step)
8500 config = None
8501 if vca_type == "native_charm":
8502 config_primitive = next(
8503 (p for p in initial_config_primitive_list if p["name"] == "config"),
8504 None,
8505 )
8506 if config_primitive:
8507 config = self._map_primitive_params(
8508 config_primitive, {}, deploy_params
8509 )
8510 await self.vca_map[vca_type].install_configuration_sw(
8511 ee_id=ee_id,
8512 artifact_path=artifact_path,
8513 db_dict=db_dict,
8514 config=config,
8515 num_units=1,
8516 vca_id=vca_id,
8517 vca_type=vca_type,
8518 )
8519
8520 # write in db flag of configuration_sw already installed
8521 self.update_db_2(
8522 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8523 )
8524
8525 # Not sure if this need to be done when healing
8526 """
8527 # add relations for this VCA (wait for other peers related with this VCA)
8528 await self._add_vca_relations(
8529 logging_text=logging_text,
8530 nsr_id=nsr_id,
8531 vca_type=vca_type,
8532 vca_index=vca_index,
8533 )
8534 """
8535
8536 # if SSH access is required, then get execution environment SSH public
8537 # if native charm we have waited already to VM be UP
8538 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8539 pub_key = None
8540 user = None
8541 # self.logger.debug("get ssh key block")
8542 if deep_get(
8543 config_descriptor, ("config-access", "ssh-access", "required")
8544 ):
8545 # self.logger.debug("ssh key needed")
8546 # Needed to inject a ssh key
8547 user = deep_get(
8548 config_descriptor,
8549 ("config-access", "ssh-access", "default-user"),
8550 )
8551 step = "Install configuration Software, getting public ssh key"
8552 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8553 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8554 )
8555
8556 step = "Insert public key into VM user={} ssh_key={}".format(
8557 user, pub_key
8558 )
8559 else:
8560 # self.logger.debug("no need to get ssh key")
8561 step = "Waiting to VM being up and getting IP address"
8562 self.logger.debug(logging_text + step)
8563
8564 # n2vc_redesign STEP 5.1
8565 # wait for RO (ip-address) Insert pub_key into VM
8566 # IMPORTANT: We need do wait for RO to complete healing operation.
8567 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8568 if vnfr_id:
8569 if kdu_name:
8570 rw_mgmt_ip = await self.wait_kdu_up(
8571 logging_text, nsr_id, vnfr_id, kdu_name
8572 )
8573 else:
8574 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8575 logging_text,
8576 nsr_id,
8577 vnfr_id,
8578 vdu_id,
8579 vdu_index,
8580 user=user,
8581 pub_key=pub_key,
8582 )
8583 else:
8584 rw_mgmt_ip = None # This is for a NS configuration
8585
8586 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8587
8588 # store rw_mgmt_ip in deploy params for later replacement
8589 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8590
8591 # Day1 operations.
8592 # get run-day1 operation parameter
8593 runDay1 = deploy_params.get("run-day1", False)
8594 self.logger.debug(
8595 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8596 )
8597 if runDay1:
8598 # n2vc_redesign STEP 6 Execute initial config primitive
8599 step = "execute initial config primitive"
8600
8601 # wait for dependent primitives execution (NS -> VNF -> VDU)
8602 if initial_config_primitive_list:
8603 await self._wait_dependent_n2vc(
8604 nsr_id, vca_deployed_list, vca_index
8605 )
8606
8607 # stage, in function of element type: vdu, kdu, vnf or ns
8608 my_vca = vca_deployed_list[vca_index]
8609 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8610 # VDU or KDU
8611 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8612 elif my_vca.get("member-vnf-index"):
8613 # VNF
8614 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8615 else:
8616 # NS
8617 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8618
8619 self._write_configuration_status(
8620 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8621 )
8622
8623 self._write_op_status(op_id=nslcmop_id, stage=stage)
8624
8625 check_if_terminated_needed = True
8626 for initial_config_primitive in initial_config_primitive_list:
8627 # adding information on the vca_deployed if it is a NS execution environment
8628 if not vca_deployed["member-vnf-index"]:
8629 deploy_params["ns_config_info"] = json.dumps(
8630 self._get_ns_config_info(nsr_id)
8631 )
8632 # TODO check if already done
8633 primitive_params_ = self._map_primitive_params(
8634 initial_config_primitive, {}, deploy_params
8635 )
8636
8637 step = "execute primitive '{}' params '{}'".format(
8638 initial_config_primitive["name"], primitive_params_
8639 )
8640 self.logger.debug(logging_text + step)
8641 await self.vca_map[vca_type].exec_primitive(
8642 ee_id=ee_id,
8643 primitive_name=initial_config_primitive["name"],
8644 params_dict=primitive_params_,
8645 db_dict=db_dict,
8646 vca_id=vca_id,
8647 vca_type=vca_type,
8648 )
8649 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8650 if check_if_terminated_needed:
8651 if config_descriptor.get("terminate-config-primitive"):
8652 self.update_db_2(
8653 "nsrs",
8654 nsr_id,
8655 {db_update_entry + "needed_terminate": True},
8656 )
8657 check_if_terminated_needed = False
8658
8659 # TODO register in database that primitive is done
8660
8661 # STEP 7 Configure metrics
8662 # Not sure if this need to be done when healing
8663 """
8664 if vca_type == "helm" or vca_type == "helm-v3":
8665 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8666 ee_id=ee_id,
8667 artifact_path=artifact_path,
8668 ee_config_descriptor=ee_config_descriptor,
8669 vnfr_id=vnfr_id,
8670 nsr_id=nsr_id,
8671 target_ip=rw_mgmt_ip,
8672 )
8673 if prometheus_jobs:
8674 self.update_db_2(
8675 "nsrs",
8676 nsr_id,
8677 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8678 )
8679
8680 for job in prometheus_jobs:
8681 self.db.set_one(
8682 "prometheus_jobs",
8683 {"job_name": job["job_name"]},
8684 job,
8685 upsert=True,
8686 fail_on_empty=False,
8687 )
8688
8689 """
8690 step = "instantiated at VCA"
8691 self.logger.debug(logging_text + step)
8692
8693 self._write_configuration_status(
8694 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8695 )
8696
8697 except Exception as e: # TODO not use Exception but N2VC exception
8698 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8699 if not isinstance(
8700 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8701 ):
8702 self.logger.error(
8703 "Exception while {} : {}".format(step, e), exc_info=True
8704 )
8705 self._write_configuration_status(
8706 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8707 )
8708 raise LcmException("{} {}".format(step, e)) from e
8709
8710 async def _wait_heal_ro(
8711 self,
8712 nsr_id,
8713 timeout=600,
8714 ):
8715 start_time = time()
8716 while time() <= start_time + timeout:
8717 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8718 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8719 "operational-status"
8720 ]
8721 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8722 if operational_status_ro != "healing":
8723 break
8724 await asyncio.sleep(15)
8725 else: # timeout_ns_deploy
8726 raise NgRoException("Timeout waiting ns to deploy")
8727
8728 async def vertical_scale(self, nsr_id, nslcmop_id):
8729 """
8730 Vertical Scale the VDUs in a NS
8731
8732 :param: nsr_id: NS Instance ID
8733 :param: nslcmop_id: nslcmop ID of migrate
8734
8735 """
8736 # Try to lock HA task here
8737 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8738 if not task_is_locked_by_me:
8739 return
8740 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8741 self.logger.debug(logging_text + "Enter")
8742 # get all needed from database
8743 db_nslcmop = None
8744 db_nslcmop_update = {}
8745 nslcmop_operation_state = None
8746 old_db_update = {}
8747 q_filter = {}
8748 old_vdu_index = None
8749 old_flavor_id = None
8750 db_nsr_update = {}
8751 target = {}
8752 exc = None
8753 # in case of error, indicates what part of scale was failed to put nsr at error status
8754 start_deploy = time()
8755
8756 try:
8757 # wait for any previous tasks in process
8758 step = "Waiting for previous operations to terminate"
8759 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8760
8761 self._write_ns_status(
8762 nsr_id=nsr_id,
8763 ns_state=None,
8764 current_operation="VerticalScale",
8765 current_operation_id=nslcmop_id,
8766 )
8767 step = "Getting nslcmop from database"
8768 self.logger.debug(
8769 step + " after having waited for previous tasks to be completed"
8770 )
8771 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8772 operationParams = db_nslcmop.get("operationParams")
8773 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
8774 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8775 db_flavor = db_nsr.get("flavor")
8776 db_flavor_index = str(len(db_flavor))
8777 change_vnf_flavor_data = operationParams["changeVnfFlavorData"]
8778 flavor_dict = change_vnf_flavor_data["additionalParams"]
8779 count_index = flavor_dict["vduCountIndex"]
8780 vdu_id_ref = flavor_dict["vduid"]
8781 flavor_dict_update = {
8782 "id": db_flavor_index,
8783 "memory-mb": flavor_dict["virtualMemory"],
8784 "name": f"{vdu_id_ref}-{count_index}-flv",
8785 "storage-gb": flavor_dict["sizeOfStorage"],
8786 "vcpu-count": flavor_dict["numVirtualCpu"],
8787 }
8788 db_flavor.append(flavor_dict_update)
8789 db_update = {}
8790 db_update["flavor"] = db_flavor
8791 ns_q_filter = {
8792 "_id": nsr_id,
8793 }
8794 self.db.set_one(
8795 "nsrs",
8796 q_filter=ns_q_filter,
8797 update_dict=db_update,
8798 fail_on_empty=True,
8799 )
8800 db_vnfr = self.db.get_one(
8801 "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]}
8802 )
8803 for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
8804 if (
8805 vdur.get("count-index") == count_index
8806 and vdur.get("vdu-id-ref") == vdu_id_ref
8807 ):
8808 old_flavor_id = vdur.get("ns-flavor-id", 0)
8809 old_vdu_index = vdu_index
8810 filter_text = {
8811 "_id": change_vnf_flavor_data["vnfInstanceId"],
8812 "vdur.count-index": count_index,
8813 "vdur.vdu-id-ref": vdu_id_ref,
8814 }
8815 q_filter.update(filter_text)
8816 db_update = {}
8817 db_update[
8818 "vdur.{}.ns-flavor-id".format(vdu_index)
8819 ] = db_flavor_index
8820 self.db.set_one(
8821 "vnfrs",
8822 q_filter=q_filter,
8823 update_dict=db_update,
8824 fail_on_empty=True,
8825 )
8826 target = {}
8827 target.update(operationParams)
8828 desc = await self.RO.vertical_scale(nsr_id, target)
8829 self.logger.debug("RO return > {}".format(desc))
8830 action_id = desc["action_id"]
8831 await self._wait_ng_ro(
8832 nsr_id,
8833 action_id,
8834 nslcmop_id,
8835 start_deploy,
8836 self.timeout.verticalscale,
8837 operation="verticalscale",
8838 )
8839 except (
8840 NgRoException,
8841 ROclient.ROClientException,
8842 DbException,
8843 LcmException,
8844 ) as e:
8845 self.logger.error("Exit Exception {}".format(e))
8846 exc = e
8847 except asyncio.CancelledError:
8848 self.logger.error("Cancelled Exception while '{}'".format(step))
8849 exc = "Operation was cancelled"
8850 except Exception as e:
8851 exc = traceback.format_exc()
8852 self.logger.critical(
8853 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8854 )
8855 finally:
8856 self._write_ns_status(
8857 nsr_id=nsr_id,
8858 ns_state=None,
8859 current_operation="IDLE",
8860 current_operation_id=None,
8861 )
8862 if exc:
8863 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8864 nslcmop_operation_state = "FAILED"
8865 old_db_update[
8866 "vdur.{}.ns-flavor-id".format(old_vdu_index)
8867 ] = old_flavor_id
8868 else:
8869 nslcmop_operation_state = "COMPLETED"
8870 db_nslcmop_update["detailed-status"] = "Done"
8871 db_nsr_update["detailed-status"] = "Done"
8872
8873 self._write_op_status(
8874 op_id=nslcmop_id,
8875 stage="",
8876 error_message="",
8877 operation_state=nslcmop_operation_state,
8878 other_update=db_nslcmop_update,
8879 )
8880 if old_vdu_index and old_db_update != {}:
8881 self.logger.critical(
8882 "Reverting Old Flavor -- : {}".format(old_db_update)
8883 )
8884 self.db.set_one(
8885 "vnfrs",
8886 q_filter=q_filter,
8887 update_dict=old_db_update,
8888 fail_on_empty=True,
8889 )
8890 if nslcmop_operation_state:
8891 try:
8892 msg = {
8893 "nsr_id": nsr_id,
8894 "nslcmop_id": nslcmop_id,
8895 "operationState": nslcmop_operation_state,
8896 }
8897 await self.msg.aiowrite("ns", "verticalscaled", msg)
8898 except Exception as e:
8899 self.logger.error(
8900 logging_text + "kafka_write notification Exception {}".format(e)
8901 )
8902 self.logger.debug(logging_text + "Exit")
8903 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")