Feature 10957: Set up dedicated namespace for helm based EE and add client side TLS...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import SystemRandom
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 EE_TLS_NAME = "ee-tls"
137 task_name_deploy_vca = "Deploying VCA"
138
139 def __init__(self, msg, lcm_tasks, config: LcmCfg):
140 """
141 Init, Connect to database, filesystem storage, and messaging
142 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
143 :return: None
144 """
145 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
146
147 self.db = Database().instance.db
148 self.fs = Filesystem().instance.fs
149 self.lcm_tasks = lcm_tasks
150 self.timeout = config.timeout
151 self.ro_config = config.RO
152 self.vca_config = config.VCA
153
154 # create N2VC connector
155 self.n2vc = N2VCJujuConnector(
156 log=self.logger,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 vca_config=self.vca_config,
165 on_update_db=self._on_update_n2vc_db,
166 )
167
168 self.k8sclusterhelm2 = K8sHelmConnector(
169 kubectl_command=self.vca_config.kubectlpath,
170 helm_command=self.vca_config.helmpath,
171 log=self.logger,
172 on_update_db=None,
173 fs=self.fs,
174 db=self.db,
175 )
176
177 self.k8sclusterhelm3 = K8sHelm3Connector(
178 kubectl_command=self.vca_config.kubectlpath,
179 helm_command=self.vca_config.helm3path,
180 fs=self.fs,
181 log=self.logger,
182 db=self.db,
183 on_update_db=None,
184 )
185
186 self.k8sclusterjuju = K8sJujuConnector(
187 kubectl_command=self.vca_config.kubectlpath,
188 juju_command=self.vca_config.jujupath,
189 log=self.logger,
190 on_update_db=self._on_update_k8s_db,
191 fs=self.fs,
192 db=self.db,
193 )
194
195 self.k8scluster_map = {
196 "helm-chart": self.k8sclusterhelm2,
197 "helm-chart-v3": self.k8sclusterhelm3,
198 "chart": self.k8sclusterhelm3,
199 "juju-bundle": self.k8sclusterjuju,
200 "juju": self.k8sclusterjuju,
201 }
202
203 self.vca_map = {
204 "lxc_proxy_charm": self.n2vc,
205 "native_charm": self.n2vc,
206 "k8s_proxy_charm": self.n2vc,
207 "helm": self.conn_helm_ee,
208 "helm-v3": self.conn_helm_ee,
209 }
210
211 # create RO client
212 self.RO = NgRoClient(**self.ro_config.to_dict())
213
214 self.op_status_map = {
215 "instantiation": self.RO.status,
216 "termination": self.RO.status,
217 "migrate": self.RO.status,
218 "healing": self.RO.recreate_status,
219 "verticalscale": self.RO.status,
220 "start_stop_rebuild": self.RO.status,
221 }
222
223 @staticmethod
224 def increment_ip_mac(ip_mac, vm_index=1):
225 if not isinstance(ip_mac, str):
226 return ip_mac
227 try:
228 # try with ipv4 look for last dot
229 i = ip_mac.rfind(".")
230 if i > 0:
231 i += 1
232 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
233 # try with ipv6 or mac look for last colon. Operate in hex
234 i = ip_mac.rfind(":")
235 if i > 0:
236 i += 1
237 # format in hex, len can be 2 for mac or 4 for ipv6
238 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
239 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
240 )
241 except Exception:
242 pass
243 return None
244
245 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
246 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
247
248 try:
249 # TODO filter RO descriptor fields...
250
251 # write to database
252 db_dict = dict()
253 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
254 db_dict["deploymentStatus"] = ro_descriptor
255 self.update_db_2("nsrs", nsrs_id, db_dict)
256
257 except Exception as e:
258 self.logger.warn(
259 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
260 )
261
262 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
263 # remove last dot from path (if exists)
264 if path.endswith("."):
265 path = path[:-1]
266
267 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
268 # .format(table, filter, path, updated_data))
269 try:
270 nsr_id = filter.get("_id")
271
272 # read ns record from database
273 nsr = self.db.get_one(table="nsrs", q_filter=filter)
274 current_ns_status = nsr.get("nsState")
275
276 # get vca status for NS
277 status_dict = await self.n2vc.get_status(
278 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
279 )
280
281 # vcaStatus
282 db_dict = dict()
283 db_dict["vcaStatus"] = status_dict
284
285 # update configurationStatus for this VCA
286 try:
287 vca_index = int(path[path.rfind(".") + 1 :])
288
289 vca_list = deep_get(
290 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
291 )
292 vca_status = vca_list[vca_index].get("status")
293
294 configuration_status_list = nsr.get("configurationStatus")
295 config_status = configuration_status_list[vca_index].get("status")
296
297 if config_status == "BROKEN" and vca_status != "failed":
298 db_dict["configurationStatus"][vca_index] = "READY"
299 elif config_status != "BROKEN" and vca_status == "failed":
300 db_dict["configurationStatus"][vca_index] = "BROKEN"
301 except Exception as e:
302 # not update configurationStatus
303 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
304
305 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
306 # if nsState = 'DEGRADED' check if all is OK
307 is_degraded = False
308 if current_ns_status in ("READY", "DEGRADED"):
309 error_description = ""
310 # check machines
311 if status_dict.get("machines"):
312 for machine_id in status_dict.get("machines"):
313 machine = status_dict.get("machines").get(machine_id)
314 # check machine agent-status
315 if machine.get("agent-status"):
316 s = machine.get("agent-status").get("status")
317 if s != "started":
318 is_degraded = True
319 error_description += (
320 "machine {} agent-status={} ; ".format(
321 machine_id, s
322 )
323 )
324 # check machine instance status
325 if machine.get("instance-status"):
326 s = machine.get("instance-status").get("status")
327 if s != "running":
328 is_degraded = True
329 error_description += (
330 "machine {} instance-status={} ; ".format(
331 machine_id, s
332 )
333 )
334 # check applications
335 if status_dict.get("applications"):
336 for app_id in status_dict.get("applications"):
337 app = status_dict.get("applications").get(app_id)
338 # check application status
339 if app.get("status"):
340 s = app.get("status").get("status")
341 if s != "active":
342 is_degraded = True
343 error_description += (
344 "application {} status={} ; ".format(app_id, s)
345 )
346
347 if error_description:
348 db_dict["errorDescription"] = error_description
349 if current_ns_status == "READY" and is_degraded:
350 db_dict["nsState"] = "DEGRADED"
351 if current_ns_status == "DEGRADED" and not is_degraded:
352 db_dict["nsState"] = "READY"
353
354 # write to database
355 self.update_db_2("nsrs", nsr_id, db_dict)
356
357 except (asyncio.CancelledError, asyncio.TimeoutError):
358 raise
359 except Exception as e:
360 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
361
362 async def _on_update_k8s_db(
363 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
364 ):
365 """
366 Updating vca status in NSR record
367 :param cluster_uuid: UUID of a k8s cluster
368 :param kdu_instance: The unique name of the KDU instance
369 :param filter: To get nsr_id
370 :cluster_type: The cluster type (juju, k8s)
371 :return: none
372 """
373
374 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
375 # .format(cluster_uuid, kdu_instance, filter))
376
377 nsr_id = filter.get("_id")
378 try:
379 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
380 cluster_uuid=cluster_uuid,
381 kdu_instance=kdu_instance,
382 yaml_format=False,
383 complete_status=True,
384 vca_id=vca_id,
385 )
386
387 # vcaStatus
388 db_dict = dict()
389 db_dict["vcaStatus"] = {nsr_id: vca_status}
390
391 self.logger.debug(
392 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
393 )
394
395 # write to database
396 self.update_db_2("nsrs", nsr_id, db_dict)
397 except (asyncio.CancelledError, asyncio.TimeoutError):
398 raise
399 except Exception as e:
400 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
401
402 @staticmethod
403 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
404 try:
405 env = Environment(
406 undefined=StrictUndefined,
407 autoescape=select_autoescape(default_for_string=True, default=True),
408 )
409 template = env.from_string(cloud_init_text)
410 return template.render(additional_params or {})
411 except UndefinedError as e:
412 raise LcmException(
413 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
414 "file, must be provided in the instantiation parameters inside the "
415 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
416 )
417 except (TemplateError, TemplateNotFound) as e:
418 raise LcmException(
419 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
420 vnfd_id, vdu_id, e
421 )
422 )
423
424 def _get_vdu_cloud_init_content(self, vdu, vnfd):
425 cloud_init_content = cloud_init_file = None
426 try:
427 if vdu.get("cloud-init-file"):
428 base_folder = vnfd["_admin"]["storage"]
429 if base_folder["pkg-dir"]:
430 cloud_init_file = "{}/{}/cloud_init/{}".format(
431 base_folder["folder"],
432 base_folder["pkg-dir"],
433 vdu["cloud-init-file"],
434 )
435 else:
436 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
437 base_folder["folder"],
438 vdu["cloud-init-file"],
439 )
440 with self.fs.file_open(cloud_init_file, "r") as ci_file:
441 cloud_init_content = ci_file.read()
442 elif vdu.get("cloud-init"):
443 cloud_init_content = vdu["cloud-init"]
444
445 return cloud_init_content
446 except FsException as e:
447 raise LcmException(
448 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
449 vnfd["id"], vdu["id"], cloud_init_file, e
450 )
451 )
452
453 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
454 vdur = next(
455 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
456 )
457 additional_params = vdur.get("additionalParams")
458 return parse_yaml_strings(additional_params)
459
460 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
461 """
462 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
463 :param vnfd: input vnfd
464 :param new_id: overrides vnf id if provided
465 :param additionalParams: Instantiation params for VNFs provided
466 :param nsrId: Id of the NSR
467 :return: copy of vnfd
468 """
469 vnfd_RO = deepcopy(vnfd)
470 # remove unused by RO configuration, monitoring, scaling and internal keys
471 vnfd_RO.pop("_id", None)
472 vnfd_RO.pop("_admin", None)
473 vnfd_RO.pop("monitoring-param", None)
474 vnfd_RO.pop("scaling-group-descriptor", None)
475 vnfd_RO.pop("kdu", None)
476 vnfd_RO.pop("k8s-cluster", None)
477 if new_id:
478 vnfd_RO["id"] = new_id
479
480 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
481 for vdu in get_iterable(vnfd_RO, "vdu"):
482 vdu.pop("cloud-init-file", None)
483 vdu.pop("cloud-init", None)
484 return vnfd_RO
485
486 @staticmethod
487 def ip_profile_2_RO(ip_profile):
488 RO_ip_profile = deepcopy(ip_profile)
489 if "dns-server" in RO_ip_profile:
490 if isinstance(RO_ip_profile["dns-server"], list):
491 RO_ip_profile["dns-address"] = []
492 for ds in RO_ip_profile.pop("dns-server"):
493 RO_ip_profile["dns-address"].append(ds["address"])
494 else:
495 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
496 if RO_ip_profile.get("ip-version") == "ipv4":
497 RO_ip_profile["ip-version"] = "IPv4"
498 if RO_ip_profile.get("ip-version") == "ipv6":
499 RO_ip_profile["ip-version"] = "IPv6"
500 if "dhcp-params" in RO_ip_profile:
501 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
502 return RO_ip_profile
503
504 def _get_ro_vim_id_for_vim_account(self, vim_account):
505 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
506 if db_vim["_admin"]["operationalState"] != "ENABLED":
507 raise LcmException(
508 "VIM={} is not available. operationalState={}".format(
509 vim_account, db_vim["_admin"]["operationalState"]
510 )
511 )
512 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
513 return RO_vim_id
514
515 def get_ro_wim_id_for_wim_account(self, wim_account):
516 if isinstance(wim_account, str):
517 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
518 if db_wim["_admin"]["operationalState"] != "ENABLED":
519 raise LcmException(
520 "WIM={} is not available. operationalState={}".format(
521 wim_account, db_wim["_admin"]["operationalState"]
522 )
523 )
524 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
525 return RO_wim_id
526 else:
527 return wim_account
528
529 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
530 db_vdu_push_list = []
531 template_vdur = []
532 db_update = {"_admin.modified": time()}
533 if vdu_create:
534 for vdu_id, vdu_count in vdu_create.items():
535 vdur = next(
536 (
537 vdur
538 for vdur in reversed(db_vnfr["vdur"])
539 if vdur["vdu-id-ref"] == vdu_id
540 ),
541 None,
542 )
543 if not vdur:
544 # Read the template saved in the db:
545 self.logger.debug(
546 "No vdur in the database. Using the vdur-template to scale"
547 )
548 vdur_template = db_vnfr.get("vdur-template")
549 if not vdur_template:
550 raise LcmException(
551 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
552 vdu_id
553 )
554 )
555 vdur = vdur_template[0]
556 # Delete a template from the database after using it
557 self.db.set_one(
558 "vnfrs",
559 {"_id": db_vnfr["_id"]},
560 None,
561 pull={"vdur-template": {"_id": vdur["_id"]}},
562 )
563 for count in range(vdu_count):
564 vdur_copy = deepcopy(vdur)
565 vdur_copy["status"] = "BUILD"
566 vdur_copy["status-detailed"] = None
567 vdur_copy["ip-address"] = None
568 vdur_copy["_id"] = str(uuid4())
569 vdur_copy["count-index"] += count + 1
570 vdur_copy["id"] = "{}-{}".format(
571 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
572 )
573 vdur_copy.pop("vim_info", None)
574 for iface in vdur_copy["interfaces"]:
575 if iface.get("fixed-ip"):
576 iface["ip-address"] = self.increment_ip_mac(
577 iface["ip-address"], count + 1
578 )
579 else:
580 iface.pop("ip-address", None)
581 if iface.get("fixed-mac"):
582 iface["mac-address"] = self.increment_ip_mac(
583 iface["mac-address"], count + 1
584 )
585 else:
586 iface.pop("mac-address", None)
587 if db_vnfr["vdur"]:
588 iface.pop(
589 "mgmt_vnf", None
590 ) # only first vdu can be managment of vnf
591 db_vdu_push_list.append(vdur_copy)
592 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
593 if vdu_delete:
594 if len(db_vnfr["vdur"]) == 1:
595 # The scale will move to 0 instances
596 self.logger.debug(
597 "Scaling to 0 !, creating the template with the last vdur"
598 )
599 template_vdur = [db_vnfr["vdur"][0]]
600 for vdu_id, vdu_count in vdu_delete.items():
601 if mark_delete:
602 indexes_to_delete = [
603 iv[0]
604 for iv in enumerate(db_vnfr["vdur"])
605 if iv[1]["vdu-id-ref"] == vdu_id
606 ]
607 db_update.update(
608 {
609 "vdur.{}.status".format(i): "DELETING"
610 for i in indexes_to_delete[-vdu_count:]
611 }
612 )
613 else:
614 # it must be deleted one by one because common.db does not allow otherwise
615 vdus_to_delete = [
616 v
617 for v in reversed(db_vnfr["vdur"])
618 if v["vdu-id-ref"] == vdu_id
619 ]
620 for vdu in vdus_to_delete[:vdu_count]:
621 self.db.set_one(
622 "vnfrs",
623 {"_id": db_vnfr["_id"]},
624 None,
625 pull={"vdur": {"_id": vdu["_id"]}},
626 )
627 db_push = {}
628 if db_vdu_push_list:
629 db_push["vdur"] = db_vdu_push_list
630 if template_vdur:
631 db_push["vdur-template"] = template_vdur
632 if not db_push:
633 db_push = None
634 db_vnfr["vdur-template"] = template_vdur
635 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
636 # modify passed dictionary db_vnfr
637 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
638 db_vnfr["vdur"] = db_vnfr_["vdur"]
639
640 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
641 """
642 Updates database nsr with the RO info for the created vld
643 :param ns_update_nsr: dictionary to be filled with the updated info
644 :param db_nsr: content of db_nsr. This is also modified
645 :param nsr_desc_RO: nsr descriptor from RO
646 :return: Nothing, LcmException is raised on errors
647 """
648
649 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
650 for net_RO in get_iterable(nsr_desc_RO, "nets"):
651 if vld["id"] != net_RO.get("ns_net_osm_id"):
652 continue
653 vld["vim-id"] = net_RO.get("vim_net_id")
654 vld["name"] = net_RO.get("vim_name")
655 vld["status"] = net_RO.get("status")
656 vld["status-detailed"] = net_RO.get("error_msg")
657 ns_update_nsr["vld.{}".format(vld_index)] = vld
658 break
659 else:
660 raise LcmException(
661 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
662 )
663
664 def set_vnfr_at_error(self, db_vnfrs, error_text):
665 try:
666 for db_vnfr in db_vnfrs.values():
667 vnfr_update = {"status": "ERROR"}
668 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
669 if "status" not in vdur:
670 vdur["status"] = "ERROR"
671 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
672 if error_text:
673 vdur["status-detailed"] = str(error_text)
674 vnfr_update[
675 "vdur.{}.status-detailed".format(vdu_index)
676 ] = "ERROR"
677 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
678 except DbException as e:
679 self.logger.error("Cannot update vnf. {}".format(e))
680
681 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
682 """
683 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
684 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
685 :param nsr_desc_RO: nsr descriptor from RO
686 :return: Nothing, LcmException is raised on errors
687 """
688 for vnf_index, db_vnfr in db_vnfrs.items():
689 for vnf_RO in nsr_desc_RO["vnfs"]:
690 if vnf_RO["member_vnf_index"] != vnf_index:
691 continue
692 vnfr_update = {}
693 if vnf_RO.get("ip_address"):
694 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
695 "ip_address"
696 ].split(";")[0]
697 elif not db_vnfr.get("ip-address"):
698 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
699 raise LcmExceptionNoMgmtIP(
700 "ns member_vnf_index '{}' has no IP address".format(
701 vnf_index
702 )
703 )
704
705 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
706 vdur_RO_count_index = 0
707 if vdur.get("pdu-type"):
708 continue
709 for vdur_RO in get_iterable(vnf_RO, "vms"):
710 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
711 continue
712 if vdur["count-index"] != vdur_RO_count_index:
713 vdur_RO_count_index += 1
714 continue
715 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
716 if vdur_RO.get("ip_address"):
717 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
718 else:
719 vdur["ip-address"] = None
720 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
721 vdur["name"] = vdur_RO.get("vim_name")
722 vdur["status"] = vdur_RO.get("status")
723 vdur["status-detailed"] = vdur_RO.get("error_msg")
724 for ifacer in get_iterable(vdur, "interfaces"):
725 for interface_RO in get_iterable(vdur_RO, "interfaces"):
726 if ifacer["name"] == interface_RO.get("internal_name"):
727 ifacer["ip-address"] = interface_RO.get(
728 "ip_address"
729 )
730 ifacer["mac-address"] = interface_RO.get(
731 "mac_address"
732 )
733 break
734 else:
735 raise LcmException(
736 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
737 "from VIM info".format(
738 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
739 )
740 )
741 vnfr_update["vdur.{}".format(vdu_index)] = vdur
742 break
743 else:
744 raise LcmException(
745 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
746 "VIM info".format(
747 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
748 )
749 )
750
751 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
752 for net_RO in get_iterable(nsr_desc_RO, "nets"):
753 if vld["id"] != net_RO.get("vnf_net_osm_id"):
754 continue
755 vld["vim-id"] = net_RO.get("vim_net_id")
756 vld["name"] = net_RO.get("vim_name")
757 vld["status"] = net_RO.get("status")
758 vld["status-detailed"] = net_RO.get("error_msg")
759 vnfr_update["vld.{}".format(vld_index)] = vld
760 break
761 else:
762 raise LcmException(
763 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
764 vnf_index, vld["id"]
765 )
766 )
767
768 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
769 break
770
771 else:
772 raise LcmException(
773 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
774 vnf_index
775 )
776 )
777
778 def _get_ns_config_info(self, nsr_id):
779 """
780 Generates a mapping between vnf,vdu elements and the N2VC id
781 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
782 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
783 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
784 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
785 """
786 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
787 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
788 mapping = {}
789 ns_config_info = {"osm-config-mapping": mapping}
790 for vca in vca_deployed_list:
791 if not vca["member-vnf-index"]:
792 continue
793 if not vca["vdu_id"]:
794 mapping[vca["member-vnf-index"]] = vca["application"]
795 else:
796 mapping[
797 "{}.{}.{}".format(
798 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
799 )
800 ] = vca["application"]
801 return ns_config_info
802
803 async def _instantiate_ng_ro(
804 self,
805 logging_text,
806 nsr_id,
807 nsd,
808 db_nsr,
809 db_nslcmop,
810 db_vnfrs,
811 db_vnfds,
812 n2vc_key_list,
813 stage,
814 start_deploy,
815 timeout_ns_deploy,
816 ):
817 db_vims = {}
818
819 def get_vim_account(vim_account_id):
820 nonlocal db_vims
821 if vim_account_id in db_vims:
822 return db_vims[vim_account_id]
823 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
824 db_vims[vim_account_id] = db_vim
825 return db_vim
826
827 # modify target_vld info with instantiation parameters
828 def parse_vld_instantiation_params(
829 target_vim, target_vld, vld_params, target_sdn
830 ):
831 if vld_params.get("ip-profile"):
832 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
833 vld_params["ip-profile"]
834 )
835 if vld_params.get("provider-network"):
836 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
837 "provider-network"
838 ]
839 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
840 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
841 "provider-network"
842 ]["sdn-ports"]
843
844 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
845 # if wim_account_id is specified in vld_params, validate if it is feasible.
846 wim_account_id, db_wim = select_feasible_wim_account(
847 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
848 )
849
850 if wim_account_id:
851 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
852 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
853 # update vld_params with correct WIM account Id
854 vld_params["wimAccountId"] = wim_account_id
855
856 target_wim = "wim:{}".format(wim_account_id)
857 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
858 sdn_ports = get_sdn_ports(vld_params, db_wim)
859 if len(sdn_ports) > 0:
860 target_vld["vim_info"][target_wim] = target_wim_attrs
861 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
862
863 self.logger.debug(
864 "Target VLD with WIM data: {:s}".format(str(target_vld))
865 )
866
867 for param in ("vim-network-name", "vim-network-id"):
868 if vld_params.get(param):
869 if isinstance(vld_params[param], dict):
870 for vim, vim_net in vld_params[param].items():
871 other_target_vim = "vim:" + vim
872 populate_dict(
873 target_vld["vim_info"],
874 (other_target_vim, param.replace("-", "_")),
875 vim_net,
876 )
877 else: # isinstance str
878 target_vld["vim_info"][target_vim][
879 param.replace("-", "_")
880 ] = vld_params[param]
881 if vld_params.get("common_id"):
882 target_vld["common_id"] = vld_params.get("common_id")
883
884 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
885 def update_ns_vld_target(target, ns_params):
886 for vnf_params in ns_params.get("vnf", ()):
887 if vnf_params.get("vimAccountId"):
888 target_vnf = next(
889 (
890 vnfr
891 for vnfr in db_vnfrs.values()
892 if vnf_params["member-vnf-index"]
893 == vnfr["member-vnf-index-ref"]
894 ),
895 None,
896 )
897 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
898 if not vdur:
899 return
900 for a_index, a_vld in enumerate(target["ns"]["vld"]):
901 target_vld = find_in_list(
902 get_iterable(vdur, "interfaces"),
903 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
904 )
905
906 vld_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
909 )
910 if target_vld:
911 if vnf_params.get("vimAccountId") not in a_vld.get(
912 "vim_info", {}
913 ):
914 target_vim_network_list = [
915 v for _, v in a_vld.get("vim_info").items()
916 ]
917 target_vim_network_name = next(
918 (
919 item.get("vim_network_name", "")
920 for item in target_vim_network_list
921 ),
922 "",
923 )
924
925 target["ns"]["vld"][a_index].get("vim_info").update(
926 {
927 "vim:{}".format(vnf_params["vimAccountId"]): {
928 "vim_network_name": target_vim_network_name,
929 }
930 }
931 )
932
933 if vld_params:
934 for param in ("vim-network-name", "vim-network-id"):
935 if vld_params.get(param) and isinstance(
936 vld_params[param], dict
937 ):
938 for vim, vim_net in vld_params[
939 param
940 ].items():
941 other_target_vim = "vim:" + vim
942 populate_dict(
943 target["ns"]["vld"][a_index].get(
944 "vim_info"
945 ),
946 (
947 other_target_vim,
948 param.replace("-", "_"),
949 ),
950 vim_net,
951 )
952
953 nslcmop_id = db_nslcmop["_id"]
954 target = {
955 "name": db_nsr["name"],
956 "ns": {"vld": []},
957 "vnf": [],
958 "image": deepcopy(db_nsr["image"]),
959 "flavor": deepcopy(db_nsr["flavor"]),
960 "action_id": nslcmop_id,
961 "cloud_init_content": {},
962 }
963 for image in target["image"]:
964 image["vim_info"] = {}
965 for flavor in target["flavor"]:
966 flavor["vim_info"] = {}
967 if db_nsr.get("shared-volumes"):
968 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
969 for shared_volumes in target["shared-volumes"]:
970 shared_volumes["vim_info"] = {}
971 if db_nsr.get("affinity-or-anti-affinity-group"):
972 target["affinity-or-anti-affinity-group"] = deepcopy(
973 db_nsr["affinity-or-anti-affinity-group"]
974 )
975 for affinity_or_anti_affinity_group in target[
976 "affinity-or-anti-affinity-group"
977 ]:
978 affinity_or_anti_affinity_group["vim_info"] = {}
979
980 if db_nslcmop.get("lcmOperationType") != "instantiate":
981 # get parameters of instantiation:
982 db_nslcmop_instantiate = self.db.get_list(
983 "nslcmops",
984 {
985 "nsInstanceId": db_nslcmop["nsInstanceId"],
986 "lcmOperationType": "instantiate",
987 },
988 )[-1]
989 ns_params = db_nslcmop_instantiate.get("operationParams")
990 else:
991 ns_params = db_nslcmop.get("operationParams")
992 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
993 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
994
995 cp2target = {}
996 for vld_index, vld in enumerate(db_nsr.get("vld")):
997 target_vim = "vim:{}".format(ns_params["vimAccountId"])
998 target_vld = {
999 "id": vld["id"],
1000 "name": vld["name"],
1001 "mgmt-network": vld.get("mgmt-network", False),
1002 "type": vld.get("type"),
1003 "vim_info": {
1004 target_vim: {
1005 "vim_network_name": vld.get("vim-network-name"),
1006 "vim_account_id": ns_params["vimAccountId"],
1007 }
1008 },
1009 }
1010 # check if this network needs SDN assist
1011 if vld.get("pci-interfaces"):
1012 db_vim = get_vim_account(ns_params["vimAccountId"])
1013 if vim_config := db_vim.get("config"):
1014 if sdnc_id := vim_config.get("sdn-controller"):
1015 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1016 target_sdn = "sdn:{}".format(sdnc_id)
1017 target_vld["vim_info"][target_sdn] = {
1018 "sdn": True,
1019 "target_vim": target_vim,
1020 "vlds": [sdn_vld],
1021 "type": vld.get("type"),
1022 }
1023
1024 nsd_vnf_profiles = get_vnf_profiles(nsd)
1025 for nsd_vnf_profile in nsd_vnf_profiles:
1026 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1027 if cp["virtual-link-profile-id"] == vld["id"]:
1028 cp2target[
1029 "member_vnf:{}.{}".format(
1030 cp["constituent-cpd-id"][0][
1031 "constituent-base-element-id"
1032 ],
1033 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1034 )
1035 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1036
1037 # check at nsd descriptor, if there is an ip-profile
1038 vld_params = {}
1039 nsd_vlp = find_in_list(
1040 get_virtual_link_profiles(nsd),
1041 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1042 == vld["id"],
1043 )
1044 if (
1045 nsd_vlp
1046 and nsd_vlp.get("virtual-link-protocol-data")
1047 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1048 ):
1049 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1050 "l3-protocol-data"
1051 ]
1052
1053 # update vld_params with instantiation params
1054 vld_instantiation_params = find_in_list(
1055 get_iterable(ns_params, "vld"),
1056 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1057 )
1058 if vld_instantiation_params:
1059 vld_params.update(vld_instantiation_params)
1060 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1061 target["ns"]["vld"].append(target_vld)
1062 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1063 update_ns_vld_target(target, ns_params)
1064
1065 for vnfr in db_vnfrs.values():
1066 vnfd = find_in_list(
1067 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1068 )
1069 vnf_params = find_in_list(
1070 get_iterable(ns_params, "vnf"),
1071 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1072 )
1073 target_vnf = deepcopy(vnfr)
1074 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1075 for vld in target_vnf.get("vld", ()):
1076 # check if connected to a ns.vld, to fill target'
1077 vnf_cp = find_in_list(
1078 vnfd.get("int-virtual-link-desc", ()),
1079 lambda cpd: cpd.get("id") == vld["id"],
1080 )
1081 if vnf_cp:
1082 ns_cp = "member_vnf:{}.{}".format(
1083 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1084 )
1085 if cp2target.get(ns_cp):
1086 vld["target"] = cp2target[ns_cp]
1087
1088 vld["vim_info"] = {
1089 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1090 }
1091 # check if this network needs SDN assist
1092 target_sdn = None
1093 if vld.get("pci-interfaces"):
1094 db_vim = get_vim_account(vnfr["vim-account-id"])
1095 sdnc_id = db_vim["config"].get("sdn-controller")
1096 if sdnc_id:
1097 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1098 target_sdn = "sdn:{}".format(sdnc_id)
1099 vld["vim_info"][target_sdn] = {
1100 "sdn": True,
1101 "target_vim": target_vim,
1102 "vlds": [sdn_vld],
1103 "type": vld.get("type"),
1104 }
1105
1106 # check at vnfd descriptor, if there is an ip-profile
1107 vld_params = {}
1108 vnfd_vlp = find_in_list(
1109 get_virtual_link_profiles(vnfd),
1110 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1111 )
1112 if (
1113 vnfd_vlp
1114 and vnfd_vlp.get("virtual-link-protocol-data")
1115 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1116 ):
1117 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1118 "l3-protocol-data"
1119 ]
1120 # update vld_params with instantiation params
1121 if vnf_params:
1122 vld_instantiation_params = find_in_list(
1123 get_iterable(vnf_params, "internal-vld"),
1124 lambda i_vld: i_vld["name"] == vld["id"],
1125 )
1126 if vld_instantiation_params:
1127 vld_params.update(vld_instantiation_params)
1128 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1129
1130 vdur_list = []
1131 for vdur in target_vnf.get("vdur", ()):
1132 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1133 continue # This vdu must not be created
1134 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1135
1136 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1137
1138 if ssh_keys_all:
1139 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1140 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1141 if (
1142 vdu_configuration
1143 and vdu_configuration.get("config-access")
1144 and vdu_configuration.get("config-access").get("ssh-access")
1145 ):
1146 vdur["ssh-keys"] = ssh_keys_all
1147 vdur["ssh-access-required"] = vdu_configuration[
1148 "config-access"
1149 ]["ssh-access"]["required"]
1150 elif (
1151 vnf_configuration
1152 and vnf_configuration.get("config-access")
1153 and vnf_configuration.get("config-access").get("ssh-access")
1154 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1155 ):
1156 vdur["ssh-keys"] = ssh_keys_all
1157 vdur["ssh-access-required"] = vnf_configuration[
1158 "config-access"
1159 ]["ssh-access"]["required"]
1160 elif ssh_keys_instantiation and find_in_list(
1161 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1162 ):
1163 vdur["ssh-keys"] = ssh_keys_instantiation
1164
1165 self.logger.debug("NS > vdur > {}".format(vdur))
1166
1167 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1168 # cloud-init
1169 if vdud.get("cloud-init-file"):
1170 vdur["cloud-init"] = "{}:file:{}".format(
1171 vnfd["_id"], vdud.get("cloud-init-file")
1172 )
1173 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1174 if vdur["cloud-init"] not in target["cloud_init_content"]:
1175 base_folder = vnfd["_admin"]["storage"]
1176 if base_folder["pkg-dir"]:
1177 cloud_init_file = "{}/{}/cloud_init/{}".format(
1178 base_folder["folder"],
1179 base_folder["pkg-dir"],
1180 vdud.get("cloud-init-file"),
1181 )
1182 else:
1183 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1184 base_folder["folder"],
1185 vdud.get("cloud-init-file"),
1186 )
1187 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1188 target["cloud_init_content"][
1189 vdur["cloud-init"]
1190 ] = ci_file.read()
1191 elif vdud.get("cloud-init"):
1192 vdur["cloud-init"] = "{}:vdu:{}".format(
1193 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1194 )
1195 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1196 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1197 "cloud-init"
1198 ]
1199 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1200 deploy_params_vdu = self._format_additional_params(
1201 vdur.get("additionalParams") or {}
1202 )
1203 deploy_params_vdu["OSM"] = get_osm_params(
1204 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1205 )
1206 vdur["additionalParams"] = deploy_params_vdu
1207
1208 # flavor
1209 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1210 if target_vim not in ns_flavor["vim_info"]:
1211 ns_flavor["vim_info"][target_vim] = {}
1212
1213 # deal with images
1214 # in case alternative images are provided we must check if they should be applied
1215 # for the vim_type, modify the vim_type taking into account
1216 ns_image_id = int(vdur["ns-image-id"])
1217 if vdur.get("alt-image-ids"):
1218 db_vim = get_vim_account(vnfr["vim-account-id"])
1219 vim_type = db_vim["vim_type"]
1220 for alt_image_id in vdur.get("alt-image-ids"):
1221 ns_alt_image = target["image"][int(alt_image_id)]
1222 if vim_type == ns_alt_image.get("vim-type"):
1223 # must use alternative image
1224 self.logger.debug(
1225 "use alternative image id: {}".format(alt_image_id)
1226 )
1227 ns_image_id = alt_image_id
1228 vdur["ns-image-id"] = ns_image_id
1229 break
1230 ns_image = target["image"][int(ns_image_id)]
1231 if target_vim not in ns_image["vim_info"]:
1232 ns_image["vim_info"][target_vim] = {}
1233
1234 # Affinity groups
1235 if vdur.get("affinity-or-anti-affinity-group-id"):
1236 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1237 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1238 if target_vim not in ns_ags["vim_info"]:
1239 ns_ags["vim_info"][target_vim] = {}
1240
1241 # shared-volumes
1242 if vdur.get("shared-volumes-id"):
1243 for sv_id in vdur["shared-volumes-id"]:
1244 ns_sv = find_in_list(
1245 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1246 )
1247 if ns_sv:
1248 ns_sv["vim_info"][target_vim] = {}
1249
1250 vdur["vim_info"] = {target_vim: {}}
1251 # instantiation parameters
1252 if vnf_params:
1253 vdu_instantiation_params = find_in_list(
1254 get_iterable(vnf_params, "vdu"),
1255 lambda i_vdu: i_vdu["id"] == vdud["id"],
1256 )
1257 if vdu_instantiation_params:
1258 # Parse the vdu_volumes from the instantiation params
1259 vdu_volumes = get_volumes_from_instantiation_params(
1260 vdu_instantiation_params, vdud
1261 )
1262 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1263 vdur["additionalParams"]["OSM"][
1264 "vim_flavor_id"
1265 ] = vdu_instantiation_params.get("vim-flavor-id")
1266 vdur_list.append(vdur)
1267 target_vnf["vdur"] = vdur_list
1268 target["vnf"].append(target_vnf)
1269
1270 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1271 desc = await self.RO.deploy(nsr_id, target)
1272 self.logger.debug("RO return > {}".format(desc))
1273 action_id = desc["action_id"]
1274 await self._wait_ng_ro(
1275 nsr_id,
1276 action_id,
1277 nslcmop_id,
1278 start_deploy,
1279 timeout_ns_deploy,
1280 stage,
1281 operation="instantiation",
1282 )
1283
1284 # Updating NSR
1285 db_nsr_update = {
1286 "_admin.deployed.RO.operational-status": "running",
1287 "detailed-status": " ".join(stage),
1288 }
1289 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1290 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1291 self._write_op_status(nslcmop_id, stage)
1292 self.logger.debug(
1293 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1294 )
1295 return
1296
1297 async def _wait_ng_ro(
1298 self,
1299 nsr_id,
1300 action_id,
1301 nslcmop_id=None,
1302 start_time=None,
1303 timeout=600,
1304 stage=None,
1305 operation=None,
1306 ):
1307 detailed_status_old = None
1308 db_nsr_update = {}
1309 start_time = start_time or time()
1310 while time() <= start_time + timeout:
1311 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1312 self.logger.debug("Wait NG RO > {}".format(desc_status))
1313 if desc_status["status"] == "FAILED":
1314 raise NgRoException(desc_status["details"])
1315 elif desc_status["status"] == "BUILD":
1316 if stage:
1317 stage[2] = "VIM: ({})".format(desc_status["details"])
1318 elif desc_status["status"] == "DONE":
1319 if stage:
1320 stage[2] = "Deployed at VIM"
1321 break
1322 else:
1323 assert False, "ROclient.check_ns_status returns unknown {}".format(
1324 desc_status["status"]
1325 )
1326 if stage and nslcmop_id and stage[2] != detailed_status_old:
1327 detailed_status_old = stage[2]
1328 db_nsr_update["detailed-status"] = " ".join(stage)
1329 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1330 self._write_op_status(nslcmop_id, stage)
1331 await asyncio.sleep(15)
1332 else: # timeout_ns_deploy
1333 raise NgRoException("Timeout waiting ns to deploy")
1334
1335 async def _terminate_ng_ro(
1336 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1337 ):
1338 db_nsr_update = {}
1339 failed_detail = []
1340 action_id = None
1341 start_deploy = time()
1342 try:
1343 target = {
1344 "ns": {"vld": []},
1345 "vnf": [],
1346 "image": [],
1347 "flavor": [],
1348 "action_id": nslcmop_id,
1349 }
1350 desc = await self.RO.deploy(nsr_id, target)
1351 action_id = desc["action_id"]
1352 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1353 self.logger.debug(
1354 logging_text
1355 + "ns terminate action at RO. action_id={}".format(action_id)
1356 )
1357
1358 # wait until done
1359 delete_timeout = 20 * 60 # 20 minutes
1360 await self._wait_ng_ro(
1361 nsr_id,
1362 action_id,
1363 nslcmop_id,
1364 start_deploy,
1365 delete_timeout,
1366 stage,
1367 operation="termination",
1368 )
1369 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1370 # delete all nsr
1371 await self.RO.delete(nsr_id)
1372 except NgRoException as e:
1373 if e.http_code == 404: # not found
1374 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1375 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1376 self.logger.debug(
1377 logging_text + "RO_action_id={} already deleted".format(action_id)
1378 )
1379 elif e.http_code == 409: # conflict
1380 failed_detail.append("delete conflict: {}".format(e))
1381 self.logger.debug(
1382 logging_text
1383 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1384 )
1385 else:
1386 failed_detail.append("delete error: {}".format(e))
1387 self.logger.error(
1388 logging_text
1389 + "RO_action_id={} delete error: {}".format(action_id, e)
1390 )
1391 except Exception as e:
1392 failed_detail.append("delete error: {}".format(e))
1393 self.logger.error(
1394 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1395 )
1396
1397 if failed_detail:
1398 stage[2] = "Error deleting from VIM"
1399 else:
1400 stage[2] = "Deleted from VIM"
1401 db_nsr_update["detailed-status"] = " ".join(stage)
1402 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1403 self._write_op_status(nslcmop_id, stage)
1404
1405 if failed_detail:
1406 raise LcmException("; ".join(failed_detail))
1407 return
1408
1409 async def instantiate_RO(
1410 self,
1411 logging_text,
1412 nsr_id,
1413 nsd,
1414 db_nsr,
1415 db_nslcmop,
1416 db_vnfrs,
1417 db_vnfds,
1418 n2vc_key_list,
1419 stage,
1420 ):
1421 """
1422 Instantiate at RO
1423 :param logging_text: preffix text to use at logging
1424 :param nsr_id: nsr identity
1425 :param nsd: database content of ns descriptor
1426 :param db_nsr: database content of ns record
1427 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1428 :param db_vnfrs:
1429 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1430 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1431 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1432 :return: None or exception
1433 """
1434 try:
1435 start_deploy = time()
1436 ns_params = db_nslcmop.get("operationParams")
1437 if ns_params and ns_params.get("timeout_ns_deploy"):
1438 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1439 else:
1440 timeout_ns_deploy = self.timeout.ns_deploy
1441
1442 # Check for and optionally request placement optimization. Database will be updated if placement activated
1443 stage[2] = "Waiting for Placement."
1444 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1445 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1446 for vnfr in db_vnfrs.values():
1447 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1448 break
1449 else:
1450 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1451
1452 return await self._instantiate_ng_ro(
1453 logging_text,
1454 nsr_id,
1455 nsd,
1456 db_nsr,
1457 db_nslcmop,
1458 db_vnfrs,
1459 db_vnfds,
1460 n2vc_key_list,
1461 stage,
1462 start_deploy,
1463 timeout_ns_deploy,
1464 )
1465 except Exception as e:
1466 stage[2] = "ERROR deploying at VIM"
1467 self.set_vnfr_at_error(db_vnfrs, str(e))
1468 self.logger.error(
1469 "Error deploying at VIM {}".format(e),
1470 exc_info=not isinstance(
1471 e,
1472 (
1473 ROclient.ROClientException,
1474 LcmException,
1475 DbException,
1476 NgRoException,
1477 ),
1478 ),
1479 )
1480 raise
1481
1482 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1483 """
1484 Wait for kdu to be up, get ip address
1485 :param logging_text: prefix use for logging
1486 :param nsr_id:
1487 :param vnfr_id:
1488 :param kdu_name:
1489 :return: IP address, K8s services
1490 """
1491
1492 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1493 nb_tries = 0
1494
1495 while nb_tries < 360:
1496 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1497 kdur = next(
1498 (
1499 x
1500 for x in get_iterable(db_vnfr, "kdur")
1501 if x.get("kdu-name") == kdu_name
1502 ),
1503 None,
1504 )
1505 if not kdur:
1506 raise LcmException(
1507 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1508 )
1509 if kdur.get("status"):
1510 if kdur["status"] in ("READY", "ENABLED"):
1511 return kdur.get("ip-address"), kdur.get("services")
1512 else:
1513 raise LcmException(
1514 "target KDU={} is in error state".format(kdu_name)
1515 )
1516
1517 await asyncio.sleep(10)
1518 nb_tries += 1
1519 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1520
1521 async def wait_vm_up_insert_key_ro(
1522 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1523 ):
1524 """
1525 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1526 :param logging_text: prefix use for logging
1527 :param nsr_id:
1528 :param vnfr_id:
1529 :param vdu_id:
1530 :param vdu_index:
1531 :param pub_key: public ssh key to inject, None to skip
1532 :param user: user to apply the public ssh key
1533 :return: IP address
1534 """
1535
1536 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1537 ip_address = None
1538 target_vdu_id = None
1539 ro_retries = 0
1540
1541 while True:
1542 ro_retries += 1
1543 if ro_retries >= 360: # 1 hour
1544 raise LcmException(
1545 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1546 )
1547
1548 await asyncio.sleep(10)
1549
1550 # get ip address
1551 if not target_vdu_id:
1552 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1553
1554 if not vdu_id: # for the VNF case
1555 if db_vnfr.get("status") == "ERROR":
1556 raise LcmException(
1557 "Cannot inject ssh-key because target VNF is in error state"
1558 )
1559 ip_address = db_vnfr.get("ip-address")
1560 if not ip_address:
1561 continue
1562 vdur = next(
1563 (
1564 x
1565 for x in get_iterable(db_vnfr, "vdur")
1566 if x.get("ip-address") == ip_address
1567 ),
1568 None,
1569 )
1570 else: # VDU case
1571 vdur = next(
1572 (
1573 x
1574 for x in get_iterable(db_vnfr, "vdur")
1575 if x.get("vdu-id-ref") == vdu_id
1576 and x.get("count-index") == vdu_index
1577 ),
1578 None,
1579 )
1580
1581 if (
1582 not vdur and len(db_vnfr.get("vdur", ())) == 1
1583 ): # If only one, this should be the target vdu
1584 vdur = db_vnfr["vdur"][0]
1585 if not vdur:
1586 raise LcmException(
1587 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1588 vnfr_id, vdu_id, vdu_index
1589 )
1590 )
1591 # New generation RO stores information at "vim_info"
1592 ng_ro_status = None
1593 target_vim = None
1594 if vdur.get("vim_info"):
1595 target_vim = next(
1596 t for t in vdur["vim_info"]
1597 ) # there should be only one key
1598 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1599 if (
1600 vdur.get("pdu-type")
1601 or vdur.get("status") == "ACTIVE"
1602 or ng_ro_status == "ACTIVE"
1603 ):
1604 ip_address = vdur.get("ip-address")
1605 if not ip_address:
1606 continue
1607 target_vdu_id = vdur["vdu-id-ref"]
1608 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1609 raise LcmException(
1610 "Cannot inject ssh-key because target VM is in error state"
1611 )
1612
1613 if not target_vdu_id:
1614 continue
1615
1616 # inject public key into machine
1617 if pub_key and user:
1618 self.logger.debug(logging_text + "Inserting RO key")
1619 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1620 if vdur.get("pdu-type"):
1621 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1622 return ip_address
1623 try:
1624 target = {
1625 "action": {
1626 "action": "inject_ssh_key",
1627 "key": pub_key,
1628 "user": user,
1629 },
1630 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1631 }
1632 desc = await self.RO.deploy(nsr_id, target)
1633 action_id = desc["action_id"]
1634 await self._wait_ng_ro(
1635 nsr_id, action_id, timeout=600, operation="instantiation"
1636 )
1637 break
1638 except NgRoException as e:
1639 raise LcmException(
1640 "Reaching max tries injecting key. Error: {}".format(e)
1641 )
1642 else:
1643 break
1644
1645 return ip_address
1646
1647 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1648 """
1649 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1650 """
1651 my_vca = vca_deployed_list[vca_index]
1652 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1653 # vdu or kdu: no dependencies
1654 return
1655 timeout = 300
1656 while timeout >= 0:
1657 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1658 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1659 configuration_status_list = db_nsr["configurationStatus"]
1660 for index, vca_deployed in enumerate(configuration_status_list):
1661 if index == vca_index:
1662 # myself
1663 continue
1664 if not my_vca.get("member-vnf-index") or (
1665 vca_deployed.get("member-vnf-index")
1666 == my_vca.get("member-vnf-index")
1667 ):
1668 internal_status = configuration_status_list[index].get("status")
1669 if internal_status == "READY":
1670 continue
1671 elif internal_status == "BROKEN":
1672 raise LcmException(
1673 "Configuration aborted because dependent charm/s has failed"
1674 )
1675 else:
1676 break
1677 else:
1678 # no dependencies, return
1679 return
1680 await asyncio.sleep(10)
1681 timeout -= 1
1682
1683 raise LcmException("Configuration aborted because dependent charm/s timeout")
1684
1685 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1686 vca_id = None
1687 if db_vnfr:
1688 vca_id = deep_get(db_vnfr, ("vca-id",))
1689 elif db_nsr:
1690 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1691 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1692 return vca_id
1693
1694 async def instantiate_N2VC(
1695 self,
1696 logging_text,
1697 vca_index,
1698 nsi_id,
1699 db_nsr,
1700 db_vnfr,
1701 vdu_id,
1702 kdu_name,
1703 vdu_index,
1704 kdu_index,
1705 config_descriptor,
1706 deploy_params,
1707 base_folder,
1708 nslcmop_id,
1709 stage,
1710 vca_type,
1711 vca_name,
1712 ee_config_descriptor,
1713 ):
1714 nsr_id = db_nsr["_id"]
1715 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1716 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1717 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1718 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1719 db_dict = {
1720 "collection": "nsrs",
1721 "filter": {"_id": nsr_id},
1722 "path": db_update_entry,
1723 }
1724 step = ""
1725 try:
1726 element_type = "NS"
1727 element_under_configuration = nsr_id
1728
1729 vnfr_id = None
1730 if db_vnfr:
1731 vnfr_id = db_vnfr["_id"]
1732 osm_config["osm"]["vnf_id"] = vnfr_id
1733
1734 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1735
1736 if vca_type == "native_charm":
1737 index_number = 0
1738 else:
1739 index_number = vdu_index or 0
1740
1741 if vnfr_id:
1742 element_type = "VNF"
1743 element_under_configuration = vnfr_id
1744 namespace += ".{}-{}".format(vnfr_id, index_number)
1745 if vdu_id:
1746 namespace += ".{}-{}".format(vdu_id, index_number)
1747 element_type = "VDU"
1748 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1749 osm_config["osm"]["vdu_id"] = vdu_id
1750 elif kdu_name:
1751 namespace += ".{}".format(kdu_name)
1752 element_type = "KDU"
1753 element_under_configuration = kdu_name
1754 osm_config["osm"]["kdu_name"] = kdu_name
1755
1756 # Get artifact path
1757 if base_folder["pkg-dir"]:
1758 artifact_path = "{}/{}/{}/{}".format(
1759 base_folder["folder"],
1760 base_folder["pkg-dir"],
1761 "charms"
1762 if vca_type
1763 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1764 else "helm-charts",
1765 vca_name,
1766 )
1767 else:
1768 artifact_path = "{}/Scripts/{}/{}/".format(
1769 base_folder["folder"],
1770 "charms"
1771 if vca_type
1772 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1773 else "helm-charts",
1774 vca_name,
1775 )
1776
1777 self.logger.debug("Artifact path > {}".format(artifact_path))
1778
1779 # get initial_config_primitive_list that applies to this element
1780 initial_config_primitive_list = config_descriptor.get(
1781 "initial-config-primitive"
1782 )
1783
1784 self.logger.debug(
1785 "Initial config primitive list > {}".format(
1786 initial_config_primitive_list
1787 )
1788 )
1789
1790 # add config if not present for NS charm
1791 ee_descriptor_id = ee_config_descriptor.get("id")
1792 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1793 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1794 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1795 )
1796
1797 self.logger.debug(
1798 "Initial config primitive list #2 > {}".format(
1799 initial_config_primitive_list
1800 )
1801 )
1802 # n2vc_redesign STEP 3.1
1803 # find old ee_id if exists
1804 ee_id = vca_deployed.get("ee_id")
1805
1806 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1807 # create or register execution environment in VCA
1808 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1809 self._write_configuration_status(
1810 nsr_id=nsr_id,
1811 vca_index=vca_index,
1812 status="CREATING",
1813 element_under_configuration=element_under_configuration,
1814 element_type=element_type,
1815 )
1816
1817 step = "create execution environment"
1818 self.logger.debug(logging_text + step)
1819
1820 ee_id = None
1821 credentials = None
1822 if vca_type == "k8s_proxy_charm":
1823 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1824 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1825 namespace=namespace,
1826 artifact_path=artifact_path,
1827 db_dict=db_dict,
1828 vca_id=vca_id,
1829 )
1830 elif vca_type == "helm" or vca_type == "helm-v3":
1831 ee_id, credentials = await self.vca_map[
1832 vca_type
1833 ].create_execution_environment(
1834 namespace=nsr_id,
1835 reuse_ee_id=ee_id,
1836 db_dict=db_dict,
1837 config=osm_config,
1838 artifact_path=artifact_path,
1839 chart_model=vca_name,
1840 vca_type=vca_type,
1841 )
1842 else:
1843 ee_id, credentials = await self.vca_map[
1844 vca_type
1845 ].create_execution_environment(
1846 namespace=namespace,
1847 reuse_ee_id=ee_id,
1848 db_dict=db_dict,
1849 vca_id=vca_id,
1850 )
1851
1852 elif vca_type == "native_charm":
1853 step = "Waiting to VM being up and getting IP address"
1854 self.logger.debug(logging_text + step)
1855 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1856 logging_text,
1857 nsr_id,
1858 vnfr_id,
1859 vdu_id,
1860 vdu_index,
1861 user=None,
1862 pub_key=None,
1863 )
1864 credentials = {"hostname": rw_mgmt_ip}
1865 # get username
1866 username = deep_get(
1867 config_descriptor, ("config-access", "ssh-access", "default-user")
1868 )
1869 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1870 # merged. Meanwhile let's get username from initial-config-primitive
1871 if not username and initial_config_primitive_list:
1872 for config_primitive in initial_config_primitive_list:
1873 for param in config_primitive.get("parameter", ()):
1874 if param["name"] == "ssh-username":
1875 username = param["value"]
1876 break
1877 if not username:
1878 raise LcmException(
1879 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1880 "'config-access.ssh-access.default-user'"
1881 )
1882 credentials["username"] = username
1883 # n2vc_redesign STEP 3.2
1884
1885 self._write_configuration_status(
1886 nsr_id=nsr_id,
1887 vca_index=vca_index,
1888 status="REGISTERING",
1889 element_under_configuration=element_under_configuration,
1890 element_type=element_type,
1891 )
1892
1893 step = "register execution environment {}".format(credentials)
1894 self.logger.debug(logging_text + step)
1895 ee_id = await self.vca_map[vca_type].register_execution_environment(
1896 credentials=credentials,
1897 namespace=namespace,
1898 db_dict=db_dict,
1899 vca_id=vca_id,
1900 )
1901
1902 # for compatibility with MON/POL modules, the need model and application name at database
1903 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1904 ee_id_parts = ee_id.split(".")
1905 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1906 if len(ee_id_parts) >= 2:
1907 model_name = ee_id_parts[0]
1908 application_name = ee_id_parts[1]
1909 db_nsr_update[db_update_entry + "model"] = model_name
1910 db_nsr_update[db_update_entry + "application"] = application_name
1911
1912 # n2vc_redesign STEP 3.3
1913 step = "Install configuration Software"
1914
1915 self._write_configuration_status(
1916 nsr_id=nsr_id,
1917 vca_index=vca_index,
1918 status="INSTALLING SW",
1919 element_under_configuration=element_under_configuration,
1920 element_type=element_type,
1921 other_update=db_nsr_update,
1922 )
1923
1924 # TODO check if already done
1925 self.logger.debug(logging_text + step)
1926 config = None
1927 if vca_type == "native_charm":
1928 config_primitive = next(
1929 (p for p in initial_config_primitive_list if p["name"] == "config"),
1930 None,
1931 )
1932 if config_primitive:
1933 config = self._map_primitive_params(
1934 config_primitive, {}, deploy_params
1935 )
1936 num_units = 1
1937 if vca_type == "lxc_proxy_charm":
1938 if element_type == "NS":
1939 num_units = db_nsr.get("config-units") or 1
1940 elif element_type == "VNF":
1941 num_units = db_vnfr.get("config-units") or 1
1942 elif element_type == "VDU":
1943 for v in db_vnfr["vdur"]:
1944 if vdu_id == v["vdu-id-ref"]:
1945 num_units = v.get("config-units") or 1
1946 break
1947 if vca_type != "k8s_proxy_charm":
1948 await self.vca_map[vca_type].install_configuration_sw(
1949 ee_id=ee_id,
1950 artifact_path=artifact_path,
1951 db_dict=db_dict,
1952 config=config,
1953 num_units=num_units,
1954 vca_id=vca_id,
1955 vca_type=vca_type,
1956 )
1957
1958 # write in db flag of configuration_sw already installed
1959 self.update_db_2(
1960 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1961 )
1962
1963 # add relations for this VCA (wait for other peers related with this VCA)
1964 is_relation_added = await self._add_vca_relations(
1965 logging_text=logging_text,
1966 nsr_id=nsr_id,
1967 vca_type=vca_type,
1968 vca_index=vca_index,
1969 )
1970
1971 if not is_relation_added:
1972 raise LcmException("Relations could not be added to VCA.")
1973
1974 # if SSH access is required, then get execution environment SSH public
1975 # if native charm we have waited already to VM be UP
1976 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1977 pub_key = None
1978 user = None
1979 # self.logger.debug("get ssh key block")
1980 if deep_get(
1981 config_descriptor, ("config-access", "ssh-access", "required")
1982 ):
1983 # self.logger.debug("ssh key needed")
1984 # Needed to inject a ssh key
1985 user = deep_get(
1986 config_descriptor,
1987 ("config-access", "ssh-access", "default-user"),
1988 )
1989 step = "Install configuration Software, getting public ssh key"
1990 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1991 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1992 )
1993
1994 step = "Insert public key into VM user={} ssh_key={}".format(
1995 user, pub_key
1996 )
1997 else:
1998 # self.logger.debug("no need to get ssh key")
1999 step = "Waiting to VM being up and getting IP address"
2000 self.logger.debug(logging_text + step)
2001
2002 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2003 rw_mgmt_ip = None
2004
2005 # n2vc_redesign STEP 5.1
2006 # wait for RO (ip-address) Insert pub_key into VM
2007 if vnfr_id:
2008 if kdu_name:
2009 rw_mgmt_ip, services = await self.wait_kdu_up(
2010 logging_text, nsr_id, vnfr_id, kdu_name
2011 )
2012 vnfd = self.db.get_one(
2013 "vnfds_revisions",
2014 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2015 )
2016 kdu = get_kdu(vnfd, kdu_name)
2017 kdu_services = [
2018 service["name"] for service in get_kdu_services(kdu)
2019 ]
2020 exposed_services = []
2021 for service in services:
2022 if any(s in service["name"] for s in kdu_services):
2023 exposed_services.append(service)
2024 await self.vca_map[vca_type].exec_primitive(
2025 ee_id=ee_id,
2026 primitive_name="config",
2027 params_dict={
2028 "osm-config": json.dumps(
2029 OsmConfigBuilder(
2030 k8s={"services": exposed_services}
2031 ).build()
2032 )
2033 },
2034 vca_id=vca_id,
2035 )
2036
2037 # This verification is needed in order to avoid trying to add a public key
2038 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2039 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2040 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2041 # or it is a KNF)
2042 elif db_vnfr.get("vdur"):
2043 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2044 logging_text,
2045 nsr_id,
2046 vnfr_id,
2047 vdu_id,
2048 vdu_index,
2049 user=user,
2050 pub_key=pub_key,
2051 )
2052
2053 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2054
2055 # store rw_mgmt_ip in deploy params for later replacement
2056 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2057
2058 # n2vc_redesign STEP 6 Execute initial config primitive
2059 step = "execute initial config primitive"
2060
2061 # wait for dependent primitives execution (NS -> VNF -> VDU)
2062 if initial_config_primitive_list:
2063 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2064
2065 # stage, in function of element type: vdu, kdu, vnf or ns
2066 my_vca = vca_deployed_list[vca_index]
2067 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2068 # VDU or KDU
2069 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2070 elif my_vca.get("member-vnf-index"):
2071 # VNF
2072 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2073 else:
2074 # NS
2075 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2076
2077 self._write_configuration_status(
2078 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2079 )
2080
2081 self._write_op_status(op_id=nslcmop_id, stage=stage)
2082
2083 check_if_terminated_needed = True
2084 for initial_config_primitive in initial_config_primitive_list:
2085 # adding information on the vca_deployed if it is a NS execution environment
2086 if not vca_deployed["member-vnf-index"]:
2087 deploy_params["ns_config_info"] = json.dumps(
2088 self._get_ns_config_info(nsr_id)
2089 )
2090 # TODO check if already done
2091 primitive_params_ = self._map_primitive_params(
2092 initial_config_primitive, {}, deploy_params
2093 )
2094
2095 step = "execute primitive '{}' params '{}'".format(
2096 initial_config_primitive["name"], primitive_params_
2097 )
2098 self.logger.debug(logging_text + step)
2099 await self.vca_map[vca_type].exec_primitive(
2100 ee_id=ee_id,
2101 primitive_name=initial_config_primitive["name"],
2102 params_dict=primitive_params_,
2103 db_dict=db_dict,
2104 vca_id=vca_id,
2105 vca_type=vca_type,
2106 )
2107 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2108 if check_if_terminated_needed:
2109 if config_descriptor.get("terminate-config-primitive"):
2110 self.update_db_2(
2111 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2112 )
2113 check_if_terminated_needed = False
2114
2115 # TODO register in database that primitive is done
2116
2117 # STEP 7 Configure metrics
2118 if vca_type == "helm" or vca_type == "helm-v3":
2119 # TODO: review for those cases where the helm chart is a reference and
2120 # is not part of the NF package
2121 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2122 ee_id=ee_id,
2123 artifact_path=artifact_path,
2124 ee_config_descriptor=ee_config_descriptor,
2125 vnfr_id=vnfr_id,
2126 nsr_id=nsr_id,
2127 target_ip=rw_mgmt_ip,
2128 element_type=element_type,
2129 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2130 vdu_id=vdu_id,
2131 vdu_index=vdu_index,
2132 kdu_name=kdu_name,
2133 kdu_index=kdu_index,
2134 )
2135 if prometheus_jobs:
2136 self.update_db_2(
2137 "nsrs",
2138 nsr_id,
2139 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2140 )
2141
2142 for job in prometheus_jobs:
2143 self.db.set_one(
2144 "prometheus_jobs",
2145 {"job_name": job["job_name"]},
2146 job,
2147 upsert=True,
2148 fail_on_empty=False,
2149 )
2150
2151 step = "instantiated at VCA"
2152 self.logger.debug(logging_text + step)
2153
2154 self._write_configuration_status(
2155 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2156 )
2157
2158 except Exception as e: # TODO not use Exception but N2VC exception
2159 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2160 if not isinstance(
2161 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2162 ):
2163 self.logger.error(
2164 "Exception while {} : {}".format(step, e), exc_info=True
2165 )
2166 self._write_configuration_status(
2167 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2168 )
2169 raise LcmException("{}. {}".format(step, e)) from e
2170
2171 def _write_ns_status(
2172 self,
2173 nsr_id: str,
2174 ns_state: str,
2175 current_operation: str,
2176 current_operation_id: str,
2177 error_description: str = None,
2178 error_detail: str = None,
2179 other_update: dict = None,
2180 ):
2181 """
2182 Update db_nsr fields.
2183 :param nsr_id:
2184 :param ns_state:
2185 :param current_operation:
2186 :param current_operation_id:
2187 :param error_description:
2188 :param error_detail:
2189 :param other_update: Other required changes at database if provided, will be cleared
2190 :return:
2191 """
2192 try:
2193 db_dict = other_update or {}
2194 db_dict[
2195 "_admin.nslcmop"
2196 ] = current_operation_id # for backward compatibility
2197 db_dict["_admin.current-operation"] = current_operation_id
2198 db_dict["_admin.operation-type"] = (
2199 current_operation if current_operation != "IDLE" else None
2200 )
2201 db_dict["currentOperation"] = current_operation
2202 db_dict["currentOperationID"] = current_operation_id
2203 db_dict["errorDescription"] = error_description
2204 db_dict["errorDetail"] = error_detail
2205
2206 if ns_state:
2207 db_dict["nsState"] = ns_state
2208 self.update_db_2("nsrs", nsr_id, db_dict)
2209 except DbException as e:
2210 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2211
2212 def _write_op_status(
2213 self,
2214 op_id: str,
2215 stage: list = None,
2216 error_message: str = None,
2217 queuePosition: int = 0,
2218 operation_state: str = None,
2219 other_update: dict = None,
2220 ):
2221 try:
2222 db_dict = other_update or {}
2223 db_dict["queuePosition"] = queuePosition
2224 if isinstance(stage, list):
2225 db_dict["stage"] = stage[0]
2226 db_dict["detailed-status"] = " ".join(stage)
2227 elif stage is not None:
2228 db_dict["stage"] = str(stage)
2229
2230 if error_message is not None:
2231 db_dict["errorMessage"] = error_message
2232 if operation_state is not None:
2233 db_dict["operationState"] = operation_state
2234 db_dict["statusEnteredTime"] = time()
2235 self.update_db_2("nslcmops", op_id, db_dict)
2236 except DbException as e:
2237 self.logger.warn(
2238 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2239 )
2240
2241 def _write_all_config_status(self, db_nsr: dict, status: str):
2242 try:
2243 nsr_id = db_nsr["_id"]
2244 # configurationStatus
2245 config_status = db_nsr.get("configurationStatus")
2246 if config_status:
2247 db_nsr_update = {
2248 "configurationStatus.{}.status".format(index): status
2249 for index, v in enumerate(config_status)
2250 if v
2251 }
2252 # update status
2253 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2254
2255 except DbException as e:
2256 self.logger.warn(
2257 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2258 )
2259
2260 def _write_configuration_status(
2261 self,
2262 nsr_id: str,
2263 vca_index: int,
2264 status: str = None,
2265 element_under_configuration: str = None,
2266 element_type: str = None,
2267 other_update: dict = None,
2268 ):
2269 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2270 # .format(vca_index, status))
2271
2272 try:
2273 db_path = "configurationStatus.{}.".format(vca_index)
2274 db_dict = other_update or {}
2275 if status:
2276 db_dict[db_path + "status"] = status
2277 if element_under_configuration:
2278 db_dict[
2279 db_path + "elementUnderConfiguration"
2280 ] = element_under_configuration
2281 if element_type:
2282 db_dict[db_path + "elementType"] = element_type
2283 self.update_db_2("nsrs", nsr_id, db_dict)
2284 except DbException as e:
2285 self.logger.warn(
2286 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2287 status, nsr_id, vca_index, e
2288 )
2289 )
2290
2291 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2292 """
2293 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2294 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2295 Database is used because the result can be obtained from a different LCM worker in case of HA.
2296 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2297 :param db_nslcmop: database content of nslcmop
2298 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2299 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2300 computed 'vim-account-id'
2301 """
2302 modified = False
2303 nslcmop_id = db_nslcmop["_id"]
2304 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2305 if placement_engine == "PLA":
2306 self.logger.debug(
2307 logging_text + "Invoke and wait for placement optimization"
2308 )
2309 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2310 db_poll_interval = 5
2311 wait = db_poll_interval * 10
2312 pla_result = None
2313 while not pla_result and wait >= 0:
2314 await asyncio.sleep(db_poll_interval)
2315 wait -= db_poll_interval
2316 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2317 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2318
2319 if not pla_result:
2320 raise LcmException(
2321 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2322 )
2323
2324 for pla_vnf in pla_result["vnf"]:
2325 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2326 if not pla_vnf.get("vimAccountId") or not vnfr:
2327 continue
2328 modified = True
2329 self.db.set_one(
2330 "vnfrs",
2331 {"_id": vnfr["_id"]},
2332 {"vim-account-id": pla_vnf["vimAccountId"]},
2333 )
2334 # Modifies db_vnfrs
2335 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2336 return modified
2337
2338 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2339 alerts = []
2340 nsr_id = vnfr["nsr-id-ref"]
2341 df = vnfd.get("df", [{}])[0]
2342 # Checking for auto-healing configuration
2343 if "healing-aspect" in df:
2344 healing_aspects = df["healing-aspect"]
2345 for healing in healing_aspects:
2346 for healing_policy in healing.get("healing-policy", ()):
2347 vdu_id = healing_policy["vdu-id"]
2348 vdur = next(
2349 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2350 {},
2351 )
2352 if not vdur:
2353 continue
2354 metric_name = "vm_status"
2355 vdu_name = vdur.get("name")
2356 vnf_member_index = vnfr["member-vnf-index-ref"]
2357 uuid = str(uuid4())
2358 name = f"healing_{uuid}"
2359 action = healing_policy
2360 # action_on_recovery = healing.get("action-on-recovery")
2361 # cooldown_time = healing.get("cooldown-time")
2362 # day1 = healing.get("day1")
2363 alert = {
2364 "uuid": uuid,
2365 "name": name,
2366 "metric": metric_name,
2367 "tags": {
2368 "ns_id": nsr_id,
2369 "vnf_member_index": vnf_member_index,
2370 "vdu_name": vdu_name,
2371 },
2372 "alarm_status": "ok",
2373 "action_type": "healing",
2374 "action": action,
2375 }
2376 alerts.append(alert)
2377 return alerts
2378
2379 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2380 alerts = []
2381 nsr_id = vnfr["nsr-id-ref"]
2382 df = vnfd.get("df", [{}])[0]
2383 # Checking for auto-scaling configuration
2384 if "scaling-aspect" in df:
2385 rel_operation_types = {
2386 "GE": ">=",
2387 "LE": "<=",
2388 "GT": ">",
2389 "LT": "<",
2390 "EQ": "==",
2391 "NE": "!=",
2392 }
2393 scaling_aspects = df["scaling-aspect"]
2394 all_vnfd_monitoring_params = {}
2395 for ivld in vnfd.get("int-virtual-link-desc", ()):
2396 for mp in ivld.get("monitoring-parameters", ()):
2397 all_vnfd_monitoring_params[mp.get("id")] = mp
2398 for vdu in vnfd.get("vdu", ()):
2399 for mp in vdu.get("monitoring-parameter", ()):
2400 all_vnfd_monitoring_params[mp.get("id")] = mp
2401 for df in vnfd.get("df", ()):
2402 for mp in df.get("monitoring-parameter", ()):
2403 all_vnfd_monitoring_params[mp.get("id")] = mp
2404 for scaling_aspect in scaling_aspects:
2405 scaling_group_name = scaling_aspect.get("name", "")
2406 # Get monitored VDUs
2407 all_monitored_vdus = set()
2408 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2409 "deltas", ()
2410 ):
2411 for vdu_delta in delta.get("vdu-delta", ()):
2412 all_monitored_vdus.add(vdu_delta.get("id"))
2413 monitored_vdurs = list(
2414 filter(
2415 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2416 vnfr["vdur"],
2417 )
2418 )
2419 if not monitored_vdurs:
2420 self.logger.error(
2421 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2422 )
2423 continue
2424 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2425 if scaling_policy["scaling-type"] != "automatic":
2426 continue
2427 threshold_time = scaling_policy.get("threshold-time", "1")
2428 cooldown_time = scaling_policy.get("cooldown-time", "0")
2429 for scaling_criteria in scaling_policy["scaling-criteria"]:
2430 monitoring_param_ref = scaling_criteria.get(
2431 "vnf-monitoring-param-ref"
2432 )
2433 vnf_monitoring_param = all_vnfd_monitoring_params[
2434 monitoring_param_ref
2435 ]
2436 for vdur in monitored_vdurs:
2437 vdu_id = vdur["vdu-id-ref"]
2438 metric_name = vnf_monitoring_param.get("performance-metric")
2439 metric_name = f"osm_{metric_name}"
2440 vnf_member_index = vnfr["member-vnf-index-ref"]
2441 scalein_threshold = scaling_criteria.get(
2442 "scale-in-threshold"
2443 )
2444 scaleout_threshold = scaling_criteria.get(
2445 "scale-out-threshold"
2446 )
2447 # Looking for min/max-number-of-instances
2448 instances_min_number = 1
2449 instances_max_number = 1
2450 vdu_profile = df["vdu-profile"]
2451 if vdu_profile:
2452 profile = next(
2453 item for item in vdu_profile if item["id"] == vdu_id
2454 )
2455 instances_min_number = profile.get(
2456 "min-number-of-instances", 1
2457 )
2458 instances_max_number = profile.get(
2459 "max-number-of-instances", 1
2460 )
2461
2462 if scalein_threshold:
2463 uuid = str(uuid4())
2464 name = f"scalein_{uuid}"
2465 operation = scaling_criteria[
2466 "scale-in-relational-operation"
2467 ]
2468 rel_operator = rel_operation_types.get(operation, "<=")
2469 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2470 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2471 labels = {
2472 "ns_id": nsr_id,
2473 "vnf_member_index": vnf_member_index,
2474 "vdu_id": vdu_id,
2475 }
2476 prom_cfg = {
2477 "alert": name,
2478 "expr": expression,
2479 "for": str(threshold_time) + "m",
2480 "labels": labels,
2481 }
2482 action = scaling_policy
2483 action = {
2484 "scaling-group": scaling_group_name,
2485 "cooldown-time": cooldown_time,
2486 }
2487 alert = {
2488 "uuid": uuid,
2489 "name": name,
2490 "metric": metric_name,
2491 "tags": {
2492 "ns_id": nsr_id,
2493 "vnf_member_index": vnf_member_index,
2494 "vdu_id": vdu_id,
2495 },
2496 "alarm_status": "ok",
2497 "action_type": "scale_in",
2498 "action": action,
2499 "prometheus_config": prom_cfg,
2500 }
2501 alerts.append(alert)
2502
2503 if scaleout_threshold:
2504 uuid = str(uuid4())
2505 name = f"scaleout_{uuid}"
2506 operation = scaling_criteria[
2507 "scale-out-relational-operation"
2508 ]
2509 rel_operator = rel_operation_types.get(operation, "<=")
2510 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2511 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2512 labels = {
2513 "ns_id": nsr_id,
2514 "vnf_member_index": vnf_member_index,
2515 "vdu_id": vdu_id,
2516 }
2517 prom_cfg = {
2518 "alert": name,
2519 "expr": expression,
2520 "for": str(threshold_time) + "m",
2521 "labels": labels,
2522 }
2523 action = scaling_policy
2524 action = {
2525 "scaling-group": scaling_group_name,
2526 "cooldown-time": cooldown_time,
2527 }
2528 alert = {
2529 "uuid": uuid,
2530 "name": name,
2531 "metric": metric_name,
2532 "tags": {
2533 "ns_id": nsr_id,
2534 "vnf_member_index": vnf_member_index,
2535 "vdu_id": vdu_id,
2536 },
2537 "alarm_status": "ok",
2538 "action_type": "scale_out",
2539 "action": action,
2540 "prometheus_config": prom_cfg,
2541 }
2542 alerts.append(alert)
2543 return alerts
2544
2545 def update_nsrs_with_pla_result(self, params):
2546 try:
2547 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2548 self.update_db_2(
2549 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2550 )
2551 except Exception as e:
2552 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2553
2554 async def instantiate(self, nsr_id, nslcmop_id):
2555 """
2556
2557 :param nsr_id: ns instance to deploy
2558 :param nslcmop_id: operation to run
2559 :return:
2560 """
2561
2562 # Try to lock HA task here
2563 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2564 if not task_is_locked_by_me:
2565 self.logger.debug(
2566 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2567 )
2568 return
2569
2570 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2571 self.logger.debug(logging_text + "Enter")
2572
2573 # get all needed from database
2574
2575 # database nsrs record
2576 db_nsr = None
2577
2578 # database nslcmops record
2579 db_nslcmop = None
2580
2581 # update operation on nsrs
2582 db_nsr_update = {}
2583 # update operation on nslcmops
2584 db_nslcmop_update = {}
2585
2586 timeout_ns_deploy = self.timeout.ns_deploy
2587
2588 nslcmop_operation_state = None
2589 db_vnfrs = {} # vnf's info indexed by member-index
2590 # n2vc_info = {}
2591 tasks_dict_info = {} # from task to info text
2592 exc = None
2593 error_list = []
2594 stage = [
2595 "Stage 1/5: preparation of the environment.",
2596 "Waiting for previous operations to terminate.",
2597 "",
2598 ]
2599 # ^ stage, step, VIM progress
2600 try:
2601 # wait for any previous tasks in process
2602 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2603
2604 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2605 stage[1] = "Reading from database."
2606 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2607 db_nsr_update["detailed-status"] = "creating"
2608 db_nsr_update["operational-status"] = "init"
2609 self._write_ns_status(
2610 nsr_id=nsr_id,
2611 ns_state="BUILDING",
2612 current_operation="INSTANTIATING",
2613 current_operation_id=nslcmop_id,
2614 other_update=db_nsr_update,
2615 )
2616 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2617
2618 # read from db: operation
2619 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2620 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2621 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2622 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2623 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2624 )
2625 ns_params = db_nslcmop.get("operationParams")
2626 if ns_params and ns_params.get("timeout_ns_deploy"):
2627 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2628
2629 # read from db: ns
2630 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2631 self.logger.debug(logging_text + stage[1])
2632 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2633 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2634 self.logger.debug(logging_text + stage[1])
2635 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2636 self.fs.sync(db_nsr["nsd-id"])
2637 db_nsr["nsd"] = nsd
2638 # nsr_name = db_nsr["name"] # TODO short-name??
2639
2640 # read from db: vnf's of this ns
2641 stage[1] = "Getting vnfrs from db."
2642 self.logger.debug(logging_text + stage[1])
2643 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2644
2645 # read from db: vnfd's for every vnf
2646 db_vnfds = [] # every vnfd data
2647
2648 # for each vnf in ns, read vnfd
2649 for vnfr in db_vnfrs_list:
2650 if vnfr.get("kdur"):
2651 kdur_list = []
2652 for kdur in vnfr["kdur"]:
2653 if kdur.get("additionalParams"):
2654 kdur["additionalParams"] = json.loads(
2655 kdur["additionalParams"]
2656 )
2657 kdur_list.append(kdur)
2658 vnfr["kdur"] = kdur_list
2659
2660 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2661 vnfd_id = vnfr["vnfd-id"]
2662 vnfd_ref = vnfr["vnfd-ref"]
2663 self.fs.sync(vnfd_id)
2664
2665 # if we haven't this vnfd, read it from db
2666 if vnfd_id not in db_vnfds:
2667 # read from db
2668 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2669 vnfd_id, vnfd_ref
2670 )
2671 self.logger.debug(logging_text + stage[1])
2672 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2673
2674 # store vnfd
2675 db_vnfds.append(vnfd)
2676
2677 # Get or generates the _admin.deployed.VCA list
2678 vca_deployed_list = None
2679 if db_nsr["_admin"].get("deployed"):
2680 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2681 if vca_deployed_list is None:
2682 vca_deployed_list = []
2683 configuration_status_list = []
2684 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2685 db_nsr_update["configurationStatus"] = configuration_status_list
2686 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2687 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2688 elif isinstance(vca_deployed_list, dict):
2689 # maintain backward compatibility. Change a dict to list at database
2690 vca_deployed_list = list(vca_deployed_list.values())
2691 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2692 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2693
2694 if not isinstance(
2695 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2696 ):
2697 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2698 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2699
2700 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2701 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2702 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2703 self.db.set_list(
2704 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2705 )
2706
2707 # n2vc_redesign STEP 2 Deploy Network Scenario
2708 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2709 self._write_op_status(op_id=nslcmop_id, stage=stage)
2710
2711 stage[1] = "Deploying KDUs."
2712 # self.logger.debug(logging_text + "Before deploy_kdus")
2713 # Call to deploy_kdus in case exists the "vdu:kdu" param
2714 await self.deploy_kdus(
2715 logging_text=logging_text,
2716 nsr_id=nsr_id,
2717 nslcmop_id=nslcmop_id,
2718 db_vnfrs=db_vnfrs,
2719 db_vnfds=db_vnfds,
2720 task_instantiation_info=tasks_dict_info,
2721 )
2722
2723 stage[1] = "Getting VCA public key."
2724 # n2vc_redesign STEP 1 Get VCA public ssh-key
2725 # feature 1429. Add n2vc public key to needed VMs
2726 n2vc_key = self.n2vc.get_public_key()
2727 n2vc_key_list = [n2vc_key]
2728 if self.vca_config.public_key:
2729 n2vc_key_list.append(self.vca_config.public_key)
2730
2731 stage[1] = "Deploying NS at VIM."
2732 task_ro = asyncio.ensure_future(
2733 self.instantiate_RO(
2734 logging_text=logging_text,
2735 nsr_id=nsr_id,
2736 nsd=nsd,
2737 db_nsr=db_nsr,
2738 db_nslcmop=db_nslcmop,
2739 db_vnfrs=db_vnfrs,
2740 db_vnfds=db_vnfds,
2741 n2vc_key_list=n2vc_key_list,
2742 stage=stage,
2743 )
2744 )
2745 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2746 tasks_dict_info[task_ro] = "Deploying at VIM"
2747
2748 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2749 stage[1] = "Deploying Execution Environments."
2750 self.logger.debug(logging_text + stage[1])
2751
2752 # create namespace and certificate if any helm based EE is present in the NS
2753 if check_helm_ee_in_ns(db_vnfds):
2754 await self.vca_map["helm-v3"].setup_ns_namespace(
2755 name=nsr_id,
2756 )
2757 # create TLS certificates
2758 await self.vca_map["helm-v3"].create_tls_certificate(
2759 secret_name=self.EE_TLS_NAME,
2760 dns_prefix="*",
2761 nsr_id=nsr_id,
2762 usage="server auth",
2763 namespace=nsr_id,
2764 )
2765
2766 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2767 for vnf_profile in get_vnf_profiles(nsd):
2768 vnfd_id = vnf_profile["vnfd-id"]
2769 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2770 member_vnf_index = str(vnf_profile["id"])
2771 db_vnfr = db_vnfrs[member_vnf_index]
2772 base_folder = vnfd["_admin"]["storage"]
2773 vdu_id = None
2774 vdu_index = 0
2775 vdu_name = None
2776 kdu_name = None
2777 kdu_index = None
2778
2779 # Get additional parameters
2780 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2781 if db_vnfr.get("additionalParamsForVnf"):
2782 deploy_params.update(
2783 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2784 )
2785
2786 descriptor_config = get_configuration(vnfd, vnfd["id"])
2787 if descriptor_config:
2788 self._deploy_n2vc(
2789 logging_text=logging_text
2790 + "member_vnf_index={} ".format(member_vnf_index),
2791 db_nsr=db_nsr,
2792 db_vnfr=db_vnfr,
2793 nslcmop_id=nslcmop_id,
2794 nsr_id=nsr_id,
2795 nsi_id=nsi_id,
2796 vnfd_id=vnfd_id,
2797 vdu_id=vdu_id,
2798 kdu_name=kdu_name,
2799 member_vnf_index=member_vnf_index,
2800 vdu_index=vdu_index,
2801 kdu_index=kdu_index,
2802 vdu_name=vdu_name,
2803 deploy_params=deploy_params,
2804 descriptor_config=descriptor_config,
2805 base_folder=base_folder,
2806 task_instantiation_info=tasks_dict_info,
2807 stage=stage,
2808 )
2809
2810 # Deploy charms for each VDU that supports one.
2811 for vdud in get_vdu_list(vnfd):
2812 vdu_id = vdud["id"]
2813 descriptor_config = get_configuration(vnfd, vdu_id)
2814 vdur = find_in_list(
2815 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2816 )
2817
2818 if vdur.get("additionalParams"):
2819 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2820 else:
2821 deploy_params_vdu = deploy_params
2822 deploy_params_vdu["OSM"] = get_osm_params(
2823 db_vnfr, vdu_id, vdu_count_index=0
2824 )
2825 vdud_count = get_number_of_instances(vnfd, vdu_id)
2826
2827 self.logger.debug("VDUD > {}".format(vdud))
2828 self.logger.debug(
2829 "Descriptor config > {}".format(descriptor_config)
2830 )
2831 if descriptor_config:
2832 vdu_name = None
2833 kdu_name = None
2834 kdu_index = None
2835 for vdu_index in range(vdud_count):
2836 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2837 self._deploy_n2vc(
2838 logging_text=logging_text
2839 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2840 member_vnf_index, vdu_id, vdu_index
2841 ),
2842 db_nsr=db_nsr,
2843 db_vnfr=db_vnfr,
2844 nslcmop_id=nslcmop_id,
2845 nsr_id=nsr_id,
2846 nsi_id=nsi_id,
2847 vnfd_id=vnfd_id,
2848 vdu_id=vdu_id,
2849 kdu_name=kdu_name,
2850 kdu_index=kdu_index,
2851 member_vnf_index=member_vnf_index,
2852 vdu_index=vdu_index,
2853 vdu_name=vdu_name,
2854 deploy_params=deploy_params_vdu,
2855 descriptor_config=descriptor_config,
2856 base_folder=base_folder,
2857 task_instantiation_info=tasks_dict_info,
2858 stage=stage,
2859 )
2860 for kdud in get_kdu_list(vnfd):
2861 kdu_name = kdud["name"]
2862 descriptor_config = get_configuration(vnfd, kdu_name)
2863 if descriptor_config:
2864 vdu_id = None
2865 vdu_index = 0
2866 vdu_name = None
2867 kdu_index, kdur = next(
2868 x
2869 for x in enumerate(db_vnfr["kdur"])
2870 if x[1]["kdu-name"] == kdu_name
2871 )
2872 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2873 if kdur.get("additionalParams"):
2874 deploy_params_kdu.update(
2875 parse_yaml_strings(kdur["additionalParams"].copy())
2876 )
2877
2878 self._deploy_n2vc(
2879 logging_text=logging_text,
2880 db_nsr=db_nsr,
2881 db_vnfr=db_vnfr,
2882 nslcmop_id=nslcmop_id,
2883 nsr_id=nsr_id,
2884 nsi_id=nsi_id,
2885 vnfd_id=vnfd_id,
2886 vdu_id=vdu_id,
2887 kdu_name=kdu_name,
2888 member_vnf_index=member_vnf_index,
2889 vdu_index=vdu_index,
2890 kdu_index=kdu_index,
2891 vdu_name=vdu_name,
2892 deploy_params=deploy_params_kdu,
2893 descriptor_config=descriptor_config,
2894 base_folder=base_folder,
2895 task_instantiation_info=tasks_dict_info,
2896 stage=stage,
2897 )
2898
2899 # Check if each vnf has exporter for metric collection if so update prometheus job records
2900 if "exporters-endpoints" in vnfd.get("df")[0]:
2901 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2902 self.logger.debug("exporter config :{}".format(exporter_config))
2903 artifact_path = "{}/{}/{}".format(
2904 base_folder["folder"],
2905 base_folder["pkg-dir"],
2906 "exporter-endpoint",
2907 )
2908 ee_id = None
2909 ee_config_descriptor = exporter_config
2910 vnfr_id = db_vnfr["id"]
2911 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2912 logging_text,
2913 nsr_id,
2914 vnfr_id,
2915 vdu_id=None,
2916 vdu_index=None,
2917 user=None,
2918 pub_key=None,
2919 )
2920 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2921 self.logger.debug("Artifact_path:{}".format(artifact_path))
2922 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2923 vdu_id_for_prom = None
2924 vdu_index_for_prom = None
2925 for x in get_iterable(db_vnfr, "vdur"):
2926 vdu_id_for_prom = x.get("vdu-id-ref")
2927 vdu_index_for_prom = x.get("count-index")
2928 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2929 ee_id=ee_id,
2930 artifact_path=artifact_path,
2931 ee_config_descriptor=ee_config_descriptor,
2932 vnfr_id=vnfr_id,
2933 nsr_id=nsr_id,
2934 target_ip=rw_mgmt_ip,
2935 element_type="VDU",
2936 vdu_id=vdu_id_for_prom,
2937 vdu_index=vdu_index_for_prom,
2938 )
2939
2940 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2941 if prometheus_jobs:
2942 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2943 self.update_db_2(
2944 "nsrs",
2945 nsr_id,
2946 db_nsr_update,
2947 )
2948
2949 for job in prometheus_jobs:
2950 self.db.set_one(
2951 "prometheus_jobs",
2952 {"job_name": job["job_name"]},
2953 job,
2954 upsert=True,
2955 fail_on_empty=False,
2956 )
2957
2958 # Check if this NS has a charm configuration
2959 descriptor_config = nsd.get("ns-configuration")
2960 if descriptor_config and descriptor_config.get("juju"):
2961 vnfd_id = None
2962 db_vnfr = None
2963 member_vnf_index = None
2964 vdu_id = None
2965 kdu_name = None
2966 kdu_index = None
2967 vdu_index = 0
2968 vdu_name = None
2969
2970 # Get additional parameters
2971 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2972 if db_nsr.get("additionalParamsForNs"):
2973 deploy_params.update(
2974 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2975 )
2976 base_folder = nsd["_admin"]["storage"]
2977 self._deploy_n2vc(
2978 logging_text=logging_text,
2979 db_nsr=db_nsr,
2980 db_vnfr=db_vnfr,
2981 nslcmop_id=nslcmop_id,
2982 nsr_id=nsr_id,
2983 nsi_id=nsi_id,
2984 vnfd_id=vnfd_id,
2985 vdu_id=vdu_id,
2986 kdu_name=kdu_name,
2987 member_vnf_index=member_vnf_index,
2988 vdu_index=vdu_index,
2989 kdu_index=kdu_index,
2990 vdu_name=vdu_name,
2991 deploy_params=deploy_params,
2992 descriptor_config=descriptor_config,
2993 base_folder=base_folder,
2994 task_instantiation_info=tasks_dict_info,
2995 stage=stage,
2996 )
2997
2998 # rest of staff will be done at finally
2999
3000 except (
3001 ROclient.ROClientException,
3002 DbException,
3003 LcmException,
3004 N2VCException,
3005 ) as e:
3006 self.logger.error(
3007 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
3008 )
3009 exc = e
3010 except asyncio.CancelledError:
3011 self.logger.error(
3012 logging_text + "Cancelled Exception while '{}'".format(stage[1])
3013 )
3014 exc = "Operation was cancelled"
3015 except Exception as e:
3016 exc = traceback.format_exc()
3017 self.logger.critical(
3018 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
3019 exc_info=True,
3020 )
3021 finally:
3022 if exc:
3023 error_list.append(str(exc))
3024 try:
3025 # wait for pending tasks
3026 if tasks_dict_info:
3027 stage[1] = "Waiting for instantiate pending tasks."
3028 self.logger.debug(logging_text + stage[1])
3029 error_list += await self._wait_for_tasks(
3030 logging_text,
3031 tasks_dict_info,
3032 timeout_ns_deploy,
3033 stage,
3034 nslcmop_id,
3035 nsr_id=nsr_id,
3036 )
3037 stage[1] = stage[2] = ""
3038 except asyncio.CancelledError:
3039 error_list.append("Cancelled")
3040 # TODO cancel all tasks
3041 except Exception as exc:
3042 error_list.append(str(exc))
3043
3044 # update operation-status
3045 db_nsr_update["operational-status"] = "running"
3046 # let's begin with VCA 'configured' status (later we can change it)
3047 db_nsr_update["config-status"] = "configured"
3048 for task, task_name in tasks_dict_info.items():
3049 if not task.done() or task.cancelled() or task.exception():
3050 if task_name.startswith(self.task_name_deploy_vca):
3051 # A N2VC task is pending
3052 db_nsr_update["config-status"] = "failed"
3053 else:
3054 # RO or KDU task is pending
3055 db_nsr_update["operational-status"] = "failed"
3056
3057 # update status at database
3058 if error_list:
3059 error_detail = ". ".join(error_list)
3060 self.logger.error(logging_text + error_detail)
3061 error_description_nslcmop = "{} Detail: {}".format(
3062 stage[0], error_detail
3063 )
3064 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3065 nslcmop_id, stage[0]
3066 )
3067
3068 db_nsr_update["detailed-status"] = (
3069 error_description_nsr + " Detail: " + error_detail
3070 )
3071 db_nslcmop_update["detailed-status"] = error_detail
3072 nslcmop_operation_state = "FAILED"
3073 ns_state = "BROKEN"
3074 else:
3075 error_detail = None
3076 error_description_nsr = error_description_nslcmop = None
3077 ns_state = "READY"
3078 db_nsr_update["detailed-status"] = "Done"
3079 db_nslcmop_update["detailed-status"] = "Done"
3080 nslcmop_operation_state = "COMPLETED"
3081 # Gather auto-healing and auto-scaling alerts for each vnfr
3082 healing_alerts = []
3083 scaling_alerts = []
3084 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3085 vnfd = next(
3086 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3087 )
3088 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3089 for alert in healing_alerts:
3090 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3091 self.db.create("alerts", alert)
3092
3093 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3094 for alert in scaling_alerts:
3095 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3096 self.db.create("alerts", alert)
3097
3098 if db_nsr:
3099 self._write_ns_status(
3100 nsr_id=nsr_id,
3101 ns_state=ns_state,
3102 current_operation="IDLE",
3103 current_operation_id=None,
3104 error_description=error_description_nsr,
3105 error_detail=error_detail,
3106 other_update=db_nsr_update,
3107 )
3108 self._write_op_status(
3109 op_id=nslcmop_id,
3110 stage="",
3111 error_message=error_description_nslcmop,
3112 operation_state=nslcmop_operation_state,
3113 other_update=db_nslcmop_update,
3114 )
3115
3116 if nslcmop_operation_state:
3117 try:
3118 await self.msg.aiowrite(
3119 "ns",
3120 "instantiated",
3121 {
3122 "nsr_id": nsr_id,
3123 "nslcmop_id": nslcmop_id,
3124 "operationState": nslcmop_operation_state,
3125 },
3126 )
3127 except Exception as e:
3128 self.logger.error(
3129 logging_text + "kafka_write notification Exception {}".format(e)
3130 )
3131
3132 self.logger.debug(logging_text + "Exit")
3133 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3134
3135 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3136 if vnfd_id not in cached_vnfds:
3137 cached_vnfds[vnfd_id] = self.db.get_one(
3138 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3139 )
3140 return cached_vnfds[vnfd_id]
3141
3142 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3143 if vnf_profile_id not in cached_vnfrs:
3144 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3145 "vnfrs",
3146 {
3147 "member-vnf-index-ref": vnf_profile_id,
3148 "nsr-id-ref": nsr_id,
3149 },
3150 )
3151 return cached_vnfrs[vnf_profile_id]
3152
3153 def _is_deployed_vca_in_relation(
3154 self, vca: DeployedVCA, relation: Relation
3155 ) -> bool:
3156 found = False
3157 for endpoint in (relation.provider, relation.requirer):
3158 if endpoint["kdu-resource-profile-id"]:
3159 continue
3160 found = (
3161 vca.vnf_profile_id == endpoint.vnf_profile_id
3162 and vca.vdu_profile_id == endpoint.vdu_profile_id
3163 and vca.execution_environment_ref == endpoint.execution_environment_ref
3164 )
3165 if found:
3166 break
3167 return found
3168
3169 def _update_ee_relation_data_with_implicit_data(
3170 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3171 ):
3172 ee_relation_data = safe_get_ee_relation(
3173 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3174 )
3175 ee_relation_level = EELevel.get_level(ee_relation_data)
3176 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3177 "execution-environment-ref"
3178 ]:
3179 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3180 vnfd_id = vnf_profile["vnfd-id"]
3181 project = nsd["_admin"]["projects_read"][0]
3182 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3183 entity_id = (
3184 vnfd_id
3185 if ee_relation_level == EELevel.VNF
3186 else ee_relation_data["vdu-profile-id"]
3187 )
3188 ee = get_juju_ee_ref(db_vnfd, entity_id)
3189 if not ee:
3190 raise Exception(
3191 f"not execution environments found for ee_relation {ee_relation_data}"
3192 )
3193 ee_relation_data["execution-environment-ref"] = ee["id"]
3194 return ee_relation_data
3195
3196 def _get_ns_relations(
3197 self,
3198 nsr_id: str,
3199 nsd: Dict[str, Any],
3200 vca: DeployedVCA,
3201 cached_vnfds: Dict[str, Any],
3202 ) -> List[Relation]:
3203 relations = []
3204 db_ns_relations = get_ns_configuration_relation_list(nsd)
3205 for r in db_ns_relations:
3206 provider_dict = None
3207 requirer_dict = None
3208 if all(key in r for key in ("provider", "requirer")):
3209 provider_dict = r["provider"]
3210 requirer_dict = r["requirer"]
3211 elif "entities" in r:
3212 provider_id = r["entities"][0]["id"]
3213 provider_dict = {
3214 "nsr-id": nsr_id,
3215 "endpoint": r["entities"][0]["endpoint"],
3216 }
3217 if provider_id != nsd["id"]:
3218 provider_dict["vnf-profile-id"] = provider_id
3219 requirer_id = r["entities"][1]["id"]
3220 requirer_dict = {
3221 "nsr-id": nsr_id,
3222 "endpoint": r["entities"][1]["endpoint"],
3223 }
3224 if requirer_id != nsd["id"]:
3225 requirer_dict["vnf-profile-id"] = requirer_id
3226 else:
3227 raise Exception(
3228 "provider/requirer or entities must be included in the relation."
3229 )
3230 relation_provider = self._update_ee_relation_data_with_implicit_data(
3231 nsr_id, nsd, provider_dict, cached_vnfds
3232 )
3233 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3234 nsr_id, nsd, requirer_dict, cached_vnfds
3235 )
3236 provider = EERelation(relation_provider)
3237 requirer = EERelation(relation_requirer)
3238 relation = Relation(r["name"], provider, requirer)
3239 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3240 if vca_in_relation:
3241 relations.append(relation)
3242 return relations
3243
3244 def _get_vnf_relations(
3245 self,
3246 nsr_id: str,
3247 nsd: Dict[str, Any],
3248 vca: DeployedVCA,
3249 cached_vnfds: Dict[str, Any],
3250 ) -> List[Relation]:
3251 relations = []
3252 if vca.target_element == "ns":
3253 self.logger.debug("VCA is a NS charm, not a VNF.")
3254 return relations
3255 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3256 vnf_profile_id = vnf_profile["id"]
3257 vnfd_id = vnf_profile["vnfd-id"]
3258 project = nsd["_admin"]["projects_read"][0]
3259 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3260 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3261 for r in db_vnf_relations:
3262 provider_dict = None
3263 requirer_dict = None
3264 if all(key in r for key in ("provider", "requirer")):
3265 provider_dict = r["provider"]
3266 requirer_dict = r["requirer"]
3267 elif "entities" in r:
3268 provider_id = r["entities"][0]["id"]
3269 provider_dict = {
3270 "nsr-id": nsr_id,
3271 "vnf-profile-id": vnf_profile_id,
3272 "endpoint": r["entities"][0]["endpoint"],
3273 }
3274 if provider_id != vnfd_id:
3275 provider_dict["vdu-profile-id"] = provider_id
3276 requirer_id = r["entities"][1]["id"]
3277 requirer_dict = {
3278 "nsr-id": nsr_id,
3279 "vnf-profile-id": vnf_profile_id,
3280 "endpoint": r["entities"][1]["endpoint"],
3281 }
3282 if requirer_id != vnfd_id:
3283 requirer_dict["vdu-profile-id"] = requirer_id
3284 else:
3285 raise Exception(
3286 "provider/requirer or entities must be included in the relation."
3287 )
3288 relation_provider = self._update_ee_relation_data_with_implicit_data(
3289 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3290 )
3291 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3292 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3293 )
3294 provider = EERelation(relation_provider)
3295 requirer = EERelation(relation_requirer)
3296 relation = Relation(r["name"], provider, requirer)
3297 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3298 if vca_in_relation:
3299 relations.append(relation)
3300 return relations
3301
3302 def _get_kdu_resource_data(
3303 self,
3304 ee_relation: EERelation,
3305 db_nsr: Dict[str, Any],
3306 cached_vnfds: Dict[str, Any],
3307 ) -> DeployedK8sResource:
3308 nsd = get_nsd(db_nsr)
3309 vnf_profiles = get_vnf_profiles(nsd)
3310 vnfd_id = find_in_list(
3311 vnf_profiles,
3312 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3313 )["vnfd-id"]
3314 project = nsd["_admin"]["projects_read"][0]
3315 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3316 kdu_resource_profile = get_kdu_resource_profile(
3317 db_vnfd, ee_relation.kdu_resource_profile_id
3318 )
3319 kdu_name = kdu_resource_profile["kdu-name"]
3320 deployed_kdu, _ = get_deployed_kdu(
3321 db_nsr.get("_admin", ()).get("deployed", ()),
3322 kdu_name,
3323 ee_relation.vnf_profile_id,
3324 )
3325 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3326 return deployed_kdu
3327
3328 def _get_deployed_component(
3329 self,
3330 ee_relation: EERelation,
3331 db_nsr: Dict[str, Any],
3332 cached_vnfds: Dict[str, Any],
3333 ) -> DeployedComponent:
3334 nsr_id = db_nsr["_id"]
3335 deployed_component = None
3336 ee_level = EELevel.get_level(ee_relation)
3337 if ee_level == EELevel.NS:
3338 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3339 if vca:
3340 deployed_component = DeployedVCA(nsr_id, vca)
3341 elif ee_level == EELevel.VNF:
3342 vca = get_deployed_vca(
3343 db_nsr,
3344 {
3345 "vdu_id": None,
3346 "member-vnf-index": ee_relation.vnf_profile_id,
3347 "ee_descriptor_id": ee_relation.execution_environment_ref,
3348 },
3349 )
3350 if vca:
3351 deployed_component = DeployedVCA(nsr_id, vca)
3352 elif ee_level == EELevel.VDU:
3353 vca = get_deployed_vca(
3354 db_nsr,
3355 {
3356 "vdu_id": ee_relation.vdu_profile_id,
3357 "member-vnf-index": ee_relation.vnf_profile_id,
3358 "ee_descriptor_id": ee_relation.execution_environment_ref,
3359 },
3360 )
3361 if vca:
3362 deployed_component = DeployedVCA(nsr_id, vca)
3363 elif ee_level == EELevel.KDU:
3364 kdu_resource_data = self._get_kdu_resource_data(
3365 ee_relation, db_nsr, cached_vnfds
3366 )
3367 if kdu_resource_data:
3368 deployed_component = DeployedK8sResource(kdu_resource_data)
3369 return deployed_component
3370
3371 async def _add_relation(
3372 self,
3373 relation: Relation,
3374 vca_type: str,
3375 db_nsr: Dict[str, Any],
3376 cached_vnfds: Dict[str, Any],
3377 cached_vnfrs: Dict[str, Any],
3378 ) -> bool:
3379 deployed_provider = self._get_deployed_component(
3380 relation.provider, db_nsr, cached_vnfds
3381 )
3382 deployed_requirer = self._get_deployed_component(
3383 relation.requirer, db_nsr, cached_vnfds
3384 )
3385 if (
3386 deployed_provider
3387 and deployed_requirer
3388 and deployed_provider.config_sw_installed
3389 and deployed_requirer.config_sw_installed
3390 ):
3391 provider_db_vnfr = (
3392 self._get_vnfr(
3393 relation.provider.nsr_id,
3394 relation.provider.vnf_profile_id,
3395 cached_vnfrs,
3396 )
3397 if relation.provider.vnf_profile_id
3398 else None
3399 )
3400 requirer_db_vnfr = (
3401 self._get_vnfr(
3402 relation.requirer.nsr_id,
3403 relation.requirer.vnf_profile_id,
3404 cached_vnfrs,
3405 )
3406 if relation.requirer.vnf_profile_id
3407 else None
3408 )
3409 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3410 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3411 provider_relation_endpoint = RelationEndpoint(
3412 deployed_provider.ee_id,
3413 provider_vca_id,
3414 relation.provider.endpoint,
3415 )
3416 requirer_relation_endpoint = RelationEndpoint(
3417 deployed_requirer.ee_id,
3418 requirer_vca_id,
3419 relation.requirer.endpoint,
3420 )
3421 try:
3422 await self.vca_map[vca_type].add_relation(
3423 provider=provider_relation_endpoint,
3424 requirer=requirer_relation_endpoint,
3425 )
3426 except N2VCException as exception:
3427 self.logger.error(exception)
3428 raise LcmException(exception)
3429 return True
3430 return False
3431
3432 async def _add_vca_relations(
3433 self,
3434 logging_text,
3435 nsr_id,
3436 vca_type: str,
3437 vca_index: int,
3438 timeout: int = 3600,
3439 ) -> bool:
3440 # steps:
3441 # 1. find all relations for this VCA
3442 # 2. wait for other peers related
3443 # 3. add relations
3444
3445 try:
3446 # STEP 1: find all relations for this VCA
3447
3448 # read nsr record
3449 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3450 nsd = get_nsd(db_nsr)
3451
3452 # this VCA data
3453 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3454 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3455
3456 cached_vnfds = {}
3457 cached_vnfrs = {}
3458 relations = []
3459 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3460 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3461
3462 # if no relations, terminate
3463 if not relations:
3464 self.logger.debug(logging_text + " No relations")
3465 return True
3466
3467 self.logger.debug(logging_text + " adding relations {}".format(relations))
3468
3469 # add all relations
3470 start = time()
3471 while True:
3472 # check timeout
3473 now = time()
3474 if now - start >= timeout:
3475 self.logger.error(logging_text + " : timeout adding relations")
3476 return False
3477
3478 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3479 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3480
3481 # for each relation, find the VCA's related
3482 for relation in relations.copy():
3483 added = await self._add_relation(
3484 relation,
3485 vca_type,
3486 db_nsr,
3487 cached_vnfds,
3488 cached_vnfrs,
3489 )
3490 if added:
3491 relations.remove(relation)
3492
3493 if not relations:
3494 self.logger.debug("Relations added")
3495 break
3496 await asyncio.sleep(5.0)
3497
3498 return True
3499
3500 except Exception as e:
3501 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3502 return False
3503
3504 async def _install_kdu(
3505 self,
3506 nsr_id: str,
3507 nsr_db_path: str,
3508 vnfr_data: dict,
3509 kdu_index: int,
3510 kdud: dict,
3511 vnfd: dict,
3512 k8s_instance_info: dict,
3513 k8params: dict = None,
3514 timeout: int = 600,
3515 vca_id: str = None,
3516 ):
3517 try:
3518 k8sclustertype = k8s_instance_info["k8scluster-type"]
3519 # Instantiate kdu
3520 db_dict_install = {
3521 "collection": "nsrs",
3522 "filter": {"_id": nsr_id},
3523 "path": nsr_db_path,
3524 }
3525
3526 if k8s_instance_info.get("kdu-deployment-name"):
3527 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3528 else:
3529 kdu_instance = self.k8scluster_map[
3530 k8sclustertype
3531 ].generate_kdu_instance_name(
3532 db_dict=db_dict_install,
3533 kdu_model=k8s_instance_info["kdu-model"],
3534 kdu_name=k8s_instance_info["kdu-name"],
3535 )
3536
3537 # Update the nsrs table with the kdu-instance value
3538 self.update_db_2(
3539 item="nsrs",
3540 _id=nsr_id,
3541 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3542 )
3543
3544 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3545 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3546 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3547 # namespace, this first verification could be removed, and the next step would be done for any kind
3548 # of KNF.
3549 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3550 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3551 if k8sclustertype in ("juju", "juju-bundle"):
3552 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3553 # that the user passed a namespace which he wants its KDU to be deployed in)
3554 if (
3555 self.db.count(
3556 table="nsrs",
3557 q_filter={
3558 "_id": nsr_id,
3559 "_admin.projects_write": k8s_instance_info["namespace"],
3560 "_admin.projects_read": k8s_instance_info["namespace"],
3561 },
3562 )
3563 > 0
3564 ):
3565 self.logger.debug(
3566 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3567 )
3568 self.update_db_2(
3569 item="nsrs",
3570 _id=nsr_id,
3571 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3572 )
3573 k8s_instance_info["namespace"] = kdu_instance
3574
3575 await self.k8scluster_map[k8sclustertype].install(
3576 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3577 kdu_model=k8s_instance_info["kdu-model"],
3578 atomic=True,
3579 params=k8params,
3580 db_dict=db_dict_install,
3581 timeout=timeout,
3582 kdu_name=k8s_instance_info["kdu-name"],
3583 namespace=k8s_instance_info["namespace"],
3584 kdu_instance=kdu_instance,
3585 vca_id=vca_id,
3586 )
3587
3588 # Obtain services to obtain management service ip
3589 services = await self.k8scluster_map[k8sclustertype].get_services(
3590 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3591 kdu_instance=kdu_instance,
3592 namespace=k8s_instance_info["namespace"],
3593 )
3594
3595 # Obtain management service info (if exists)
3596 vnfr_update_dict = {}
3597 kdu_config = get_configuration(vnfd, kdud["name"])
3598 if kdu_config:
3599 target_ee_list = kdu_config.get("execution-environment-list", [])
3600 else:
3601 target_ee_list = []
3602
3603 if services:
3604 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3605 mgmt_services = [
3606 service
3607 for service in kdud.get("service", [])
3608 if service.get("mgmt-service")
3609 ]
3610 for mgmt_service in mgmt_services:
3611 for service in services:
3612 if service["name"].startswith(mgmt_service["name"]):
3613 # Mgmt service found, Obtain service ip
3614 ip = service.get("external_ip", service.get("cluster_ip"))
3615 if isinstance(ip, list) and len(ip) == 1:
3616 ip = ip[0]
3617
3618 vnfr_update_dict[
3619 "kdur.{}.ip-address".format(kdu_index)
3620 ] = ip
3621
3622 # Check if must update also mgmt ip at the vnf
3623 service_external_cp = mgmt_service.get(
3624 "external-connection-point-ref"
3625 )
3626 if service_external_cp:
3627 if (
3628 deep_get(vnfd, ("mgmt-interface", "cp"))
3629 == service_external_cp
3630 ):
3631 vnfr_update_dict["ip-address"] = ip
3632
3633 if find_in_list(
3634 target_ee_list,
3635 lambda ee: ee.get(
3636 "external-connection-point-ref", ""
3637 )
3638 == service_external_cp,
3639 ):
3640 vnfr_update_dict[
3641 "kdur.{}.ip-address".format(kdu_index)
3642 ] = ip
3643 break
3644 else:
3645 self.logger.warn(
3646 "Mgmt service name: {} not found".format(
3647 mgmt_service["name"]
3648 )
3649 )
3650
3651 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3652 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3653
3654 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3655 if (
3656 kdu_config
3657 and kdu_config.get("initial-config-primitive")
3658 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3659 ):
3660 initial_config_primitive_list = kdu_config.get(
3661 "initial-config-primitive"
3662 )
3663 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3664
3665 for initial_config_primitive in initial_config_primitive_list:
3666 primitive_params_ = self._map_primitive_params(
3667 initial_config_primitive, {}, {}
3668 )
3669
3670 await asyncio.wait_for(
3671 self.k8scluster_map[k8sclustertype].exec_primitive(
3672 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3673 kdu_instance=kdu_instance,
3674 primitive_name=initial_config_primitive["name"],
3675 params=primitive_params_,
3676 db_dict=db_dict_install,
3677 vca_id=vca_id,
3678 ),
3679 timeout=timeout,
3680 )
3681
3682 except Exception as e:
3683 # Prepare update db with error and raise exception
3684 try:
3685 self.update_db_2(
3686 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3687 )
3688 self.update_db_2(
3689 "vnfrs",
3690 vnfr_data.get("_id"),
3691 {"kdur.{}.status".format(kdu_index): "ERROR"},
3692 )
3693 except Exception:
3694 # ignore to keep original exception
3695 pass
3696 # reraise original error
3697 raise
3698
3699 return kdu_instance
3700
3701 async def deploy_kdus(
3702 self,
3703 logging_text,
3704 nsr_id,
3705 nslcmop_id,
3706 db_vnfrs,
3707 db_vnfds,
3708 task_instantiation_info,
3709 ):
3710 # Launch kdus if present in the descriptor
3711
3712 k8scluster_id_2_uuic = {
3713 "helm-chart-v3": {},
3714 "helm-chart": {},
3715 "juju-bundle": {},
3716 }
3717
3718 async def _get_cluster_id(cluster_id, cluster_type):
3719 nonlocal k8scluster_id_2_uuic
3720 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3721 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3722
3723 # check if K8scluster is creating and wait look if previous tasks in process
3724 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3725 "k8scluster", cluster_id
3726 )
3727 if task_dependency:
3728 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3729 task_name, cluster_id
3730 )
3731 self.logger.debug(logging_text + text)
3732 await asyncio.wait(task_dependency, timeout=3600)
3733
3734 db_k8scluster = self.db.get_one(
3735 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3736 )
3737 if not db_k8scluster:
3738 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3739
3740 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3741 if not k8s_id:
3742 if cluster_type == "helm-chart-v3":
3743 try:
3744 # backward compatibility for existing clusters that have not been initialized for helm v3
3745 k8s_credentials = yaml.safe_dump(
3746 db_k8scluster.get("credentials")
3747 )
3748 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3749 k8s_credentials, reuse_cluster_uuid=cluster_id
3750 )
3751 db_k8scluster_update = {}
3752 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3753 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3754 db_k8scluster_update[
3755 "_admin.helm-chart-v3.created"
3756 ] = uninstall_sw
3757 db_k8scluster_update[
3758 "_admin.helm-chart-v3.operationalState"
3759 ] = "ENABLED"
3760 self.update_db_2(
3761 "k8sclusters", cluster_id, db_k8scluster_update
3762 )
3763 except Exception as e:
3764 self.logger.error(
3765 logging_text
3766 + "error initializing helm-v3 cluster: {}".format(str(e))
3767 )
3768 raise LcmException(
3769 "K8s cluster '{}' has not been initialized for '{}'".format(
3770 cluster_id, cluster_type
3771 )
3772 )
3773 else:
3774 raise LcmException(
3775 "K8s cluster '{}' has not been initialized for '{}'".format(
3776 cluster_id, cluster_type
3777 )
3778 )
3779 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3780 return k8s_id
3781
3782 logging_text += "Deploy kdus: "
3783 step = ""
3784 try:
3785 db_nsr_update = {"_admin.deployed.K8s": []}
3786 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3787
3788 index = 0
3789 updated_cluster_list = []
3790 updated_v3_cluster_list = []
3791
3792 for vnfr_data in db_vnfrs.values():
3793 vca_id = self.get_vca_id(vnfr_data, {})
3794 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3795 # Step 0: Prepare and set parameters
3796 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3797 vnfd_id = vnfr_data.get("vnfd-id")
3798 vnfd_with_id = find_in_list(
3799 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3800 )
3801 kdud = next(
3802 kdud
3803 for kdud in vnfd_with_id["kdu"]
3804 if kdud["name"] == kdur["kdu-name"]
3805 )
3806 namespace = kdur.get("k8s-namespace")
3807 kdu_deployment_name = kdur.get("kdu-deployment-name")
3808 if kdur.get("helm-chart"):
3809 kdumodel = kdur["helm-chart"]
3810 # Default version: helm3, if helm-version is v2 assign v2
3811 k8sclustertype = "helm-chart-v3"
3812 self.logger.debug("kdur: {}".format(kdur))
3813 if (
3814 kdur.get("helm-version")
3815 and kdur.get("helm-version") == "v2"
3816 ):
3817 k8sclustertype = "helm-chart"
3818 elif kdur.get("juju-bundle"):
3819 kdumodel = kdur["juju-bundle"]
3820 k8sclustertype = "juju-bundle"
3821 else:
3822 raise LcmException(
3823 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3824 "juju-bundle. Maybe an old NBI version is running".format(
3825 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3826 )
3827 )
3828 # check if kdumodel is a file and exists
3829 try:
3830 vnfd_with_id = find_in_list(
3831 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3832 )
3833 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3834 if storage: # may be not present if vnfd has not artifacts
3835 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3836 if storage["pkg-dir"]:
3837 filename = "{}/{}/{}s/{}".format(
3838 storage["folder"],
3839 storage["pkg-dir"],
3840 k8sclustertype,
3841 kdumodel,
3842 )
3843 else:
3844 filename = "{}/Scripts/{}s/{}".format(
3845 storage["folder"],
3846 k8sclustertype,
3847 kdumodel,
3848 )
3849 if self.fs.file_exists(
3850 filename, mode="file"
3851 ) or self.fs.file_exists(filename, mode="dir"):
3852 kdumodel = self.fs.path + filename
3853 except (asyncio.TimeoutError, asyncio.CancelledError):
3854 raise
3855 except Exception: # it is not a file
3856 pass
3857
3858 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3859 step = "Synchronize repos for k8s cluster '{}'".format(
3860 k8s_cluster_id
3861 )
3862 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3863
3864 # Synchronize repos
3865 if (
3866 k8sclustertype == "helm-chart"
3867 and cluster_uuid not in updated_cluster_list
3868 ) or (
3869 k8sclustertype == "helm-chart-v3"
3870 and cluster_uuid not in updated_v3_cluster_list
3871 ):
3872 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3873 self.k8scluster_map[k8sclustertype].synchronize_repos(
3874 cluster_uuid=cluster_uuid
3875 )
3876 )
3877 if del_repo_list or added_repo_dict:
3878 if k8sclustertype == "helm-chart":
3879 unset = {
3880 "_admin.helm_charts_added." + item: None
3881 for item in del_repo_list
3882 }
3883 updated = {
3884 "_admin.helm_charts_added." + item: name
3885 for item, name in added_repo_dict.items()
3886 }
3887 updated_cluster_list.append(cluster_uuid)
3888 elif k8sclustertype == "helm-chart-v3":
3889 unset = {
3890 "_admin.helm_charts_v3_added." + item: None
3891 for item in del_repo_list
3892 }
3893 updated = {
3894 "_admin.helm_charts_v3_added." + item: name
3895 for item, name in added_repo_dict.items()
3896 }
3897 updated_v3_cluster_list.append(cluster_uuid)
3898 self.logger.debug(
3899 logging_text + "repos synchronized on k8s cluster "
3900 "'{}' to_delete: {}, to_add: {}".format(
3901 k8s_cluster_id, del_repo_list, added_repo_dict
3902 )
3903 )
3904 self.db.set_one(
3905 "k8sclusters",
3906 {"_id": k8s_cluster_id},
3907 updated,
3908 unset=unset,
3909 )
3910
3911 # Instantiate kdu
3912 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3913 vnfr_data["member-vnf-index-ref"],
3914 kdur["kdu-name"],
3915 k8s_cluster_id,
3916 )
3917 k8s_instance_info = {
3918 "kdu-instance": None,
3919 "k8scluster-uuid": cluster_uuid,
3920 "k8scluster-type": k8sclustertype,
3921 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3922 "kdu-name": kdur["kdu-name"],
3923 "kdu-model": kdumodel,
3924 "namespace": namespace,
3925 "kdu-deployment-name": kdu_deployment_name,
3926 }
3927 db_path = "_admin.deployed.K8s.{}".format(index)
3928 db_nsr_update[db_path] = k8s_instance_info
3929 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3930 vnfd_with_id = find_in_list(
3931 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3932 )
3933 task = asyncio.ensure_future(
3934 self._install_kdu(
3935 nsr_id,
3936 db_path,
3937 vnfr_data,
3938 kdu_index,
3939 kdud,
3940 vnfd_with_id,
3941 k8s_instance_info,
3942 k8params=desc_params,
3943 timeout=1800,
3944 vca_id=vca_id,
3945 )
3946 )
3947 self.lcm_tasks.register(
3948 "ns",
3949 nsr_id,
3950 nslcmop_id,
3951 "instantiate_KDU-{}".format(index),
3952 task,
3953 )
3954 task_instantiation_info[task] = "Deploying KDU {}".format(
3955 kdur["kdu-name"]
3956 )
3957
3958 index += 1
3959
3960 except (LcmException, asyncio.CancelledError):
3961 raise
3962 except Exception as e:
3963 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3964 if isinstance(e, (N2VCException, DbException)):
3965 self.logger.error(logging_text + msg)
3966 else:
3967 self.logger.critical(logging_text + msg, exc_info=True)
3968 raise LcmException(msg)
3969 finally:
3970 if db_nsr_update:
3971 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3972
3973 def _deploy_n2vc(
3974 self,
3975 logging_text,
3976 db_nsr,
3977 db_vnfr,
3978 nslcmop_id,
3979 nsr_id,
3980 nsi_id,
3981 vnfd_id,
3982 vdu_id,
3983 kdu_name,
3984 member_vnf_index,
3985 vdu_index,
3986 kdu_index,
3987 vdu_name,
3988 deploy_params,
3989 descriptor_config,
3990 base_folder,
3991 task_instantiation_info,
3992 stage,
3993 ):
3994 # launch instantiate_N2VC in a asyncio task and register task object
3995 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3996 # if not found, create one entry and update database
3997 # fill db_nsr._admin.deployed.VCA.<index>
3998
3999 self.logger.debug(
4000 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
4001 )
4002
4003 charm_name = ""
4004 get_charm_name = False
4005 if "execution-environment-list" in descriptor_config:
4006 ee_list = descriptor_config.get("execution-environment-list", [])
4007 elif "juju" in descriptor_config:
4008 ee_list = [descriptor_config] # ns charms
4009 if "execution-environment-list" not in descriptor_config:
4010 # charm name is only required for ns charms
4011 get_charm_name = True
4012 else: # other types as script are not supported
4013 ee_list = []
4014
4015 for ee_item in ee_list:
4016 self.logger.debug(
4017 logging_text
4018 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4019 ee_item.get("juju"), ee_item.get("helm-chart")
4020 )
4021 )
4022 ee_descriptor_id = ee_item.get("id")
4023 if ee_item.get("juju"):
4024 vca_name = ee_item["juju"].get("charm")
4025 if get_charm_name:
4026 charm_name = self.find_charm_name(db_nsr, str(vca_name))
4027 vca_type = (
4028 "lxc_proxy_charm"
4029 if ee_item["juju"].get("charm") is not None
4030 else "native_charm"
4031 )
4032 if ee_item["juju"].get("cloud") == "k8s":
4033 vca_type = "k8s_proxy_charm"
4034 elif ee_item["juju"].get("proxy") is False:
4035 vca_type = "native_charm"
4036 elif ee_item.get("helm-chart"):
4037 vca_name = ee_item["helm-chart"]
4038 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
4039 vca_type = "helm"
4040 else:
4041 vca_type = "helm-v3"
4042 else:
4043 self.logger.debug(
4044 logging_text + "skipping non juju neither charm configuration"
4045 )
4046 continue
4047
4048 vca_index = -1
4049 for vca_index, vca_deployed in enumerate(
4050 db_nsr["_admin"]["deployed"]["VCA"]
4051 ):
4052 if not vca_deployed:
4053 continue
4054 if (
4055 vca_deployed.get("member-vnf-index") == member_vnf_index
4056 and vca_deployed.get("vdu_id") == vdu_id
4057 and vca_deployed.get("kdu_name") == kdu_name
4058 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4059 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4060 ):
4061 break
4062 else:
4063 # not found, create one.
4064 target = (
4065 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4066 )
4067 if vdu_id:
4068 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4069 elif kdu_name:
4070 target += "/kdu/{}".format(kdu_name)
4071 vca_deployed = {
4072 "target_element": target,
4073 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4074 "member-vnf-index": member_vnf_index,
4075 "vdu_id": vdu_id,
4076 "kdu_name": kdu_name,
4077 "vdu_count_index": vdu_index,
4078 "operational-status": "init", # TODO revise
4079 "detailed-status": "", # TODO revise
4080 "step": "initial-deploy", # TODO revise
4081 "vnfd_id": vnfd_id,
4082 "vdu_name": vdu_name,
4083 "type": vca_type,
4084 "ee_descriptor_id": ee_descriptor_id,
4085 "charm_name": charm_name,
4086 }
4087 vca_index += 1
4088
4089 # create VCA and configurationStatus in db
4090 db_dict = {
4091 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4092 "configurationStatus.{}".format(vca_index): dict(),
4093 }
4094 self.update_db_2("nsrs", nsr_id, db_dict)
4095
4096 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4097
4098 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4099 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4100 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4101
4102 # Launch task
4103 task_n2vc = asyncio.ensure_future(
4104 self.instantiate_N2VC(
4105 logging_text=logging_text,
4106 vca_index=vca_index,
4107 nsi_id=nsi_id,
4108 db_nsr=db_nsr,
4109 db_vnfr=db_vnfr,
4110 vdu_id=vdu_id,
4111 kdu_name=kdu_name,
4112 vdu_index=vdu_index,
4113 kdu_index=kdu_index,
4114 deploy_params=deploy_params,
4115 config_descriptor=descriptor_config,
4116 base_folder=base_folder,
4117 nslcmop_id=nslcmop_id,
4118 stage=stage,
4119 vca_type=vca_type,
4120 vca_name=vca_name,
4121 ee_config_descriptor=ee_item,
4122 )
4123 )
4124 self.lcm_tasks.register(
4125 "ns",
4126 nsr_id,
4127 nslcmop_id,
4128 "instantiate_N2VC-{}".format(vca_index),
4129 task_n2vc,
4130 )
4131 task_instantiation_info[
4132 task_n2vc
4133 ] = self.task_name_deploy_vca + " {}.{}".format(
4134 member_vnf_index or "", vdu_id or ""
4135 )
4136
4137 @staticmethod
4138 def _create_nslcmop(nsr_id, operation, params):
4139 """
4140 Creates a ns-lcm-opp content to be stored at database.
4141 :param nsr_id: internal id of the instance
4142 :param operation: instantiate, terminate, scale, action, ...
4143 :param params: user parameters for the operation
4144 :return: dictionary following SOL005 format
4145 """
4146 # Raise exception if invalid arguments
4147 if not (nsr_id and operation and params):
4148 raise LcmException(
4149 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4150 )
4151 now = time()
4152 _id = str(uuid4())
4153 nslcmop = {
4154 "id": _id,
4155 "_id": _id,
4156 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4157 "operationState": "PROCESSING",
4158 "statusEnteredTime": now,
4159 "nsInstanceId": nsr_id,
4160 "lcmOperationType": operation,
4161 "startTime": now,
4162 "isAutomaticInvocation": False,
4163 "operationParams": params,
4164 "isCancelPending": False,
4165 "links": {
4166 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4167 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4168 },
4169 }
4170 return nslcmop
4171
4172 def _format_additional_params(self, params):
4173 params = params or {}
4174 for key, value in params.items():
4175 if str(value).startswith("!!yaml "):
4176 params[key] = yaml.safe_load(value[7:])
4177 return params
4178
4179 def _get_terminate_primitive_params(self, seq, vnf_index):
4180 primitive = seq.get("name")
4181 primitive_params = {}
4182 params = {
4183 "member_vnf_index": vnf_index,
4184 "primitive": primitive,
4185 "primitive_params": primitive_params,
4186 }
4187 desc_params = {}
4188 return self._map_primitive_params(seq, params, desc_params)
4189
4190 # sub-operations
4191
4192 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4193 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4194 if op.get("operationState") == "COMPLETED":
4195 # b. Skip sub-operation
4196 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4197 return self.SUBOPERATION_STATUS_SKIP
4198 else:
4199 # c. retry executing sub-operation
4200 # The sub-operation exists, and operationState != 'COMPLETED'
4201 # Update operationState = 'PROCESSING' to indicate a retry.
4202 operationState = "PROCESSING"
4203 detailed_status = "In progress"
4204 self._update_suboperation_status(
4205 db_nslcmop, op_index, operationState, detailed_status
4206 )
4207 # Return the sub-operation index
4208 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4209 # with arguments extracted from the sub-operation
4210 return op_index
4211
4212 # Find a sub-operation where all keys in a matching dictionary must match
4213 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4214 def _find_suboperation(self, db_nslcmop, match):
4215 if db_nslcmop and match:
4216 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4217 for i, op in enumerate(op_list):
4218 if all(op.get(k) == match[k] for k in match):
4219 return i
4220 return self.SUBOPERATION_STATUS_NOT_FOUND
4221
4222 # Update status for a sub-operation given its index
4223 def _update_suboperation_status(
4224 self, db_nslcmop, op_index, operationState, detailed_status
4225 ):
4226 # Update DB for HA tasks
4227 q_filter = {"_id": db_nslcmop["_id"]}
4228 update_dict = {
4229 "_admin.operations.{}.operationState".format(op_index): operationState,
4230 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4231 }
4232 self.db.set_one(
4233 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4234 )
4235
4236 # Add sub-operation, return the index of the added sub-operation
4237 # Optionally, set operationState, detailed-status, and operationType
4238 # Status and type are currently set for 'scale' sub-operations:
4239 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4240 # 'detailed-status' : status message
4241 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4242 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4243 def _add_suboperation(
4244 self,
4245 db_nslcmop,
4246 vnf_index,
4247 vdu_id,
4248 vdu_count_index,
4249 vdu_name,
4250 primitive,
4251 mapped_primitive_params,
4252 operationState=None,
4253 detailed_status=None,
4254 operationType=None,
4255 RO_nsr_id=None,
4256 RO_scaling_info=None,
4257 ):
4258 if not db_nslcmop:
4259 return self.SUBOPERATION_STATUS_NOT_FOUND
4260 # Get the "_admin.operations" list, if it exists
4261 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4262 op_list = db_nslcmop_admin.get("operations")
4263 # Create or append to the "_admin.operations" list
4264 new_op = {
4265 "member_vnf_index": vnf_index,
4266 "vdu_id": vdu_id,
4267 "vdu_count_index": vdu_count_index,
4268 "primitive": primitive,
4269 "primitive_params": mapped_primitive_params,
4270 }
4271 if operationState:
4272 new_op["operationState"] = operationState
4273 if detailed_status:
4274 new_op["detailed-status"] = detailed_status
4275 if operationType:
4276 new_op["lcmOperationType"] = operationType
4277 if RO_nsr_id:
4278 new_op["RO_nsr_id"] = RO_nsr_id
4279 if RO_scaling_info:
4280 new_op["RO_scaling_info"] = RO_scaling_info
4281 if not op_list:
4282 # No existing operations, create key 'operations' with current operation as first list element
4283 db_nslcmop_admin.update({"operations": [new_op]})
4284 op_list = db_nslcmop_admin.get("operations")
4285 else:
4286 # Existing operations, append operation to list
4287 op_list.append(new_op)
4288
4289 db_nslcmop_update = {"_admin.operations": op_list}
4290 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4291 op_index = len(op_list) - 1
4292 return op_index
4293
4294 # Helper methods for scale() sub-operations
4295
4296 # pre-scale/post-scale:
4297 # Check for 3 different cases:
4298 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4299 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4300 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4301 def _check_or_add_scale_suboperation(
4302 self,
4303 db_nslcmop,
4304 vnf_index,
4305 vnf_config_primitive,
4306 primitive_params,
4307 operationType,
4308 RO_nsr_id=None,
4309 RO_scaling_info=None,
4310 ):
4311 # Find this sub-operation
4312 if RO_nsr_id and RO_scaling_info:
4313 operationType = "SCALE-RO"
4314 match = {
4315 "member_vnf_index": vnf_index,
4316 "RO_nsr_id": RO_nsr_id,
4317 "RO_scaling_info": RO_scaling_info,
4318 }
4319 else:
4320 match = {
4321 "member_vnf_index": vnf_index,
4322 "primitive": vnf_config_primitive,
4323 "primitive_params": primitive_params,
4324 "lcmOperationType": operationType,
4325 }
4326 op_index = self._find_suboperation(db_nslcmop, match)
4327 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4328 # a. New sub-operation
4329 # The sub-operation does not exist, add it.
4330 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4331 # The following parameters are set to None for all kind of scaling:
4332 vdu_id = None
4333 vdu_count_index = None
4334 vdu_name = None
4335 if RO_nsr_id and RO_scaling_info:
4336 vnf_config_primitive = None
4337 primitive_params = None
4338 else:
4339 RO_nsr_id = None
4340 RO_scaling_info = None
4341 # Initial status for sub-operation
4342 operationState = "PROCESSING"
4343 detailed_status = "In progress"
4344 # Add sub-operation for pre/post-scaling (zero or more operations)
4345 self._add_suboperation(
4346 db_nslcmop,
4347 vnf_index,
4348 vdu_id,
4349 vdu_count_index,
4350 vdu_name,
4351 vnf_config_primitive,
4352 primitive_params,
4353 operationState,
4354 detailed_status,
4355 operationType,
4356 RO_nsr_id,
4357 RO_scaling_info,
4358 )
4359 return self.SUBOPERATION_STATUS_NEW
4360 else:
4361 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4362 # or op_index (operationState != 'COMPLETED')
4363 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4364
4365 # Function to return execution_environment id
4366
4367 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4368 # TODO vdu_index_count
4369 for vca in vca_deployed_list:
4370 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4371 return vca.get("ee_id")
4372
4373 async def destroy_N2VC(
4374 self,
4375 logging_text,
4376 db_nslcmop,
4377 vca_deployed,
4378 config_descriptor,
4379 vca_index,
4380 destroy_ee=True,
4381 exec_primitives=True,
4382 scaling_in=False,
4383 vca_id: str = None,
4384 ):
4385 """
4386 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4387 :param logging_text:
4388 :param db_nslcmop:
4389 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4390 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4391 :param vca_index: index in the database _admin.deployed.VCA
4392 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4393 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4394 not executed properly
4395 :param scaling_in: True destroys the application, False destroys the model
4396 :return: None or exception
4397 """
4398
4399 self.logger.debug(
4400 logging_text
4401 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4402 vca_index, vca_deployed, config_descriptor, destroy_ee
4403 )
4404 )
4405
4406 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4407
4408 # execute terminate_primitives
4409 if exec_primitives:
4410 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4411 config_descriptor.get("terminate-config-primitive"),
4412 vca_deployed.get("ee_descriptor_id"),
4413 )
4414 vdu_id = vca_deployed.get("vdu_id")
4415 vdu_count_index = vca_deployed.get("vdu_count_index")
4416 vdu_name = vca_deployed.get("vdu_name")
4417 vnf_index = vca_deployed.get("member-vnf-index")
4418 if terminate_primitives and vca_deployed.get("needed_terminate"):
4419 for seq in terminate_primitives:
4420 # For each sequence in list, get primitive and call _ns_execute_primitive()
4421 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4422 vnf_index, seq.get("name")
4423 )
4424 self.logger.debug(logging_text + step)
4425 # Create the primitive for each sequence, i.e. "primitive": "touch"
4426 primitive = seq.get("name")
4427 mapped_primitive_params = self._get_terminate_primitive_params(
4428 seq, vnf_index
4429 )
4430
4431 # Add sub-operation
4432 self._add_suboperation(
4433 db_nslcmop,
4434 vnf_index,
4435 vdu_id,
4436 vdu_count_index,
4437 vdu_name,
4438 primitive,
4439 mapped_primitive_params,
4440 )
4441 # Sub-operations: Call _ns_execute_primitive() instead of action()
4442 try:
4443 result, result_detail = await self._ns_execute_primitive(
4444 vca_deployed["ee_id"],
4445 primitive,
4446 mapped_primitive_params,
4447 vca_type=vca_type,
4448 vca_id=vca_id,
4449 )
4450 except LcmException:
4451 # this happens when VCA is not deployed. In this case it is not needed to terminate
4452 continue
4453 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4454 if result not in result_ok:
4455 raise LcmException(
4456 "terminate_primitive {} for vnf_member_index={} fails with "
4457 "error {}".format(seq.get("name"), vnf_index, result_detail)
4458 )
4459 # set that this VCA do not need terminated
4460 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4461 vca_index
4462 )
4463 self.update_db_2(
4464 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4465 )
4466
4467 # Delete Prometheus Jobs if any
4468 # This uses NSR_ID, so it will destroy any jobs under this index
4469 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4470
4471 if destroy_ee:
4472 await self.vca_map[vca_type].delete_execution_environment(
4473 vca_deployed["ee_id"],
4474 scaling_in=scaling_in,
4475 vca_type=vca_type,
4476 vca_id=vca_id,
4477 )
4478
4479 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4480 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4481 namespace = "." + db_nsr["_id"]
4482 try:
4483 await self.n2vc.delete_namespace(
4484 namespace=namespace,
4485 total_timeout=self.timeout.charm_delete,
4486 vca_id=vca_id,
4487 )
4488 except N2VCNotFound: # already deleted. Skip
4489 pass
4490 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4491
4492 async def terminate(self, nsr_id, nslcmop_id):
4493 # Try to lock HA task here
4494 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4495 if not task_is_locked_by_me:
4496 return
4497
4498 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4499 self.logger.debug(logging_text + "Enter")
4500 timeout_ns_terminate = self.timeout.ns_terminate
4501 db_nsr = None
4502 db_nslcmop = None
4503 operation_params = None
4504 exc = None
4505 error_list = [] # annotates all failed error messages
4506 db_nslcmop_update = {}
4507 autoremove = False # autoremove after terminated
4508 tasks_dict_info = {}
4509 db_nsr_update = {}
4510 stage = [
4511 "Stage 1/3: Preparing task.",
4512 "Waiting for previous operations to terminate.",
4513 "",
4514 ]
4515 # ^ contains [stage, step, VIM-status]
4516 try:
4517 # wait for any previous tasks in process
4518 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4519
4520 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4521 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4522 operation_params = db_nslcmop.get("operationParams") or {}
4523 if operation_params.get("timeout_ns_terminate"):
4524 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4525 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4526 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4527
4528 db_nsr_update["operational-status"] = "terminating"
4529 db_nsr_update["config-status"] = "terminating"
4530 self._write_ns_status(
4531 nsr_id=nsr_id,
4532 ns_state="TERMINATING",
4533 current_operation="TERMINATING",
4534 current_operation_id=nslcmop_id,
4535 other_update=db_nsr_update,
4536 )
4537 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4538 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4539 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4540 return
4541
4542 stage[1] = "Getting vnf descriptors from db."
4543 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4544 db_vnfrs_dict = {
4545 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4546 }
4547 db_vnfds_from_id = {}
4548 db_vnfds_from_member_index = {}
4549 # Loop over VNFRs
4550 for vnfr in db_vnfrs_list:
4551 vnfd_id = vnfr["vnfd-id"]
4552 if vnfd_id not in db_vnfds_from_id:
4553 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4554 db_vnfds_from_id[vnfd_id] = vnfd
4555 db_vnfds_from_member_index[
4556 vnfr["member-vnf-index-ref"]
4557 ] = db_vnfds_from_id[vnfd_id]
4558
4559 # Destroy individual execution environments when there are terminating primitives.
4560 # Rest of EE will be deleted at once
4561 # TODO - check before calling _destroy_N2VC
4562 # if not operation_params.get("skip_terminate_primitives"):#
4563 # or not vca.get("needed_terminate"):
4564 stage[0] = "Stage 2/3 execute terminating primitives."
4565 self.logger.debug(logging_text + stage[0])
4566 stage[1] = "Looking execution environment that needs terminate."
4567 self.logger.debug(logging_text + stage[1])
4568
4569 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4570 config_descriptor = None
4571 vca_member_vnf_index = vca.get("member-vnf-index")
4572 vca_id = self.get_vca_id(
4573 db_vnfrs_dict.get(vca_member_vnf_index)
4574 if vca_member_vnf_index
4575 else None,
4576 db_nsr,
4577 )
4578 if not vca or not vca.get("ee_id"):
4579 continue
4580 if not vca.get("member-vnf-index"):
4581 # ns
4582 config_descriptor = db_nsr.get("ns-configuration")
4583 elif vca.get("vdu_id"):
4584 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4585 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4586 elif vca.get("kdu_name"):
4587 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4588 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4589 else:
4590 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4591 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4592 vca_type = vca.get("type")
4593 exec_terminate_primitives = not operation_params.get(
4594 "skip_terminate_primitives"
4595 ) and vca.get("needed_terminate")
4596 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4597 # pending native charms
4598 destroy_ee = (
4599 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4600 )
4601 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4602 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4603 task = asyncio.ensure_future(
4604 self.destroy_N2VC(
4605 logging_text,
4606 db_nslcmop,
4607 vca,
4608 config_descriptor,
4609 vca_index,
4610 destroy_ee,
4611 exec_terminate_primitives,
4612 vca_id=vca_id,
4613 )
4614 )
4615 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4616
4617 # wait for pending tasks of terminate primitives
4618 if tasks_dict_info:
4619 self.logger.debug(
4620 logging_text
4621 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4622 )
4623 error_list = await self._wait_for_tasks(
4624 logging_text,
4625 tasks_dict_info,
4626 min(self.timeout.charm_delete, timeout_ns_terminate),
4627 stage,
4628 nslcmop_id,
4629 )
4630 tasks_dict_info.clear()
4631 if error_list:
4632 return # raise LcmException("; ".join(error_list))
4633
4634 # remove All execution environments at once
4635 stage[0] = "Stage 3/3 delete all."
4636
4637 if nsr_deployed.get("VCA"):
4638 stage[1] = "Deleting all execution environments."
4639 self.logger.debug(logging_text + stage[1])
4640 vca_id = self.get_vca_id({}, db_nsr)
4641 task_delete_ee = asyncio.ensure_future(
4642 asyncio.wait_for(
4643 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4644 timeout=self.timeout.charm_delete,
4645 )
4646 )
4647 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4648 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4649
4650 # Delete Namespace and Certificates if necessary
4651 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4652 await self.vca_map["helm-v3"].delete_tls_certificate(
4653 namespace=db_nslcmop["nsInstanceId"],
4654 certificate_name=self.EE_TLS_NAME,
4655 )
4656 await self.vca_map["helm-v3"].delete_namespace(
4657 namespace=db_nslcmop["nsInstanceId"],
4658 )
4659
4660 # Delete from k8scluster
4661 stage[1] = "Deleting KDUs."
4662 self.logger.debug(logging_text + stage[1])
4663 # print(nsr_deployed)
4664 for kdu in get_iterable(nsr_deployed, "K8s"):
4665 if not kdu or not kdu.get("kdu-instance"):
4666 continue
4667 kdu_instance = kdu.get("kdu-instance")
4668 if kdu.get("k8scluster-type") in self.k8scluster_map:
4669 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4670 vca_id = self.get_vca_id({}, db_nsr)
4671 task_delete_kdu_instance = asyncio.ensure_future(
4672 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4673 cluster_uuid=kdu.get("k8scluster-uuid"),
4674 kdu_instance=kdu_instance,
4675 vca_id=vca_id,
4676 namespace=kdu.get("namespace"),
4677 )
4678 )
4679 else:
4680 self.logger.error(
4681 logging_text
4682 + "Unknown k8s deployment type {}".format(
4683 kdu.get("k8scluster-type")
4684 )
4685 )
4686 continue
4687 tasks_dict_info[
4688 task_delete_kdu_instance
4689 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4690
4691 # remove from RO
4692 stage[1] = "Deleting ns from VIM."
4693 if self.ro_config.ng:
4694 task_delete_ro = asyncio.ensure_future(
4695 self._terminate_ng_ro(
4696 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4697 )
4698 )
4699 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4700
4701 # rest of staff will be done at finally
4702
4703 except (
4704 ROclient.ROClientException,
4705 DbException,
4706 LcmException,
4707 N2VCException,
4708 ) as e:
4709 self.logger.error(logging_text + "Exit Exception {}".format(e))
4710 exc = e
4711 except asyncio.CancelledError:
4712 self.logger.error(
4713 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4714 )
4715 exc = "Operation was cancelled"
4716 except Exception as e:
4717 exc = traceback.format_exc()
4718 self.logger.critical(
4719 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4720 exc_info=True,
4721 )
4722 finally:
4723 if exc:
4724 error_list.append(str(exc))
4725 try:
4726 # wait for pending tasks
4727 if tasks_dict_info:
4728 stage[1] = "Waiting for terminate pending tasks."
4729 self.logger.debug(logging_text + stage[1])
4730 error_list += await self._wait_for_tasks(
4731 logging_text,
4732 tasks_dict_info,
4733 timeout_ns_terminate,
4734 stage,
4735 nslcmop_id,
4736 )
4737 stage[1] = stage[2] = ""
4738 except asyncio.CancelledError:
4739 error_list.append("Cancelled")
4740 # TODO cancell all tasks
4741 except Exception as exc:
4742 error_list.append(str(exc))
4743 # update status at database
4744 if error_list:
4745 error_detail = "; ".join(error_list)
4746 # self.logger.error(logging_text + error_detail)
4747 error_description_nslcmop = "{} Detail: {}".format(
4748 stage[0], error_detail
4749 )
4750 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4751 nslcmop_id, stage[0]
4752 )
4753
4754 db_nsr_update["operational-status"] = "failed"
4755 db_nsr_update["detailed-status"] = (
4756 error_description_nsr + " Detail: " + error_detail
4757 )
4758 db_nslcmop_update["detailed-status"] = error_detail
4759 nslcmop_operation_state = "FAILED"
4760 ns_state = "BROKEN"
4761 else:
4762 error_detail = None
4763 error_description_nsr = error_description_nslcmop = None
4764 ns_state = "NOT_INSTANTIATED"
4765 db_nsr_update["operational-status"] = "terminated"
4766 db_nsr_update["detailed-status"] = "Done"
4767 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4768 db_nslcmop_update["detailed-status"] = "Done"
4769 nslcmop_operation_state = "COMPLETED"
4770
4771 if db_nsr:
4772 self._write_ns_status(
4773 nsr_id=nsr_id,
4774 ns_state=ns_state,
4775 current_operation="IDLE",
4776 current_operation_id=None,
4777 error_description=error_description_nsr,
4778 error_detail=error_detail,
4779 other_update=db_nsr_update,
4780 )
4781 self._write_op_status(
4782 op_id=nslcmop_id,
4783 stage="",
4784 error_message=error_description_nslcmop,
4785 operation_state=nslcmop_operation_state,
4786 other_update=db_nslcmop_update,
4787 )
4788 if ns_state == "NOT_INSTANTIATED":
4789 try:
4790 self.db.set_list(
4791 "vnfrs",
4792 {"nsr-id-ref": nsr_id},
4793 {"_admin.nsState": "NOT_INSTANTIATED"},
4794 )
4795 except DbException as e:
4796 self.logger.warn(
4797 logging_text
4798 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4799 nsr_id, e
4800 )
4801 )
4802 if operation_params:
4803 autoremove = operation_params.get("autoremove", False)
4804 if nslcmop_operation_state:
4805 try:
4806 await self.msg.aiowrite(
4807 "ns",
4808 "terminated",
4809 {
4810 "nsr_id": nsr_id,
4811 "nslcmop_id": nslcmop_id,
4812 "operationState": nslcmop_operation_state,
4813 "autoremove": autoremove,
4814 },
4815 )
4816 except Exception as e:
4817 self.logger.error(
4818 logging_text + "kafka_write notification Exception {}".format(e)
4819 )
4820 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4821 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4822
4823 self.logger.debug(logging_text + "Exit")
4824 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4825
4826 async def _wait_for_tasks(
4827 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4828 ):
4829 time_start = time()
4830 error_detail_list = []
4831 error_list = []
4832 pending_tasks = list(created_tasks_info.keys())
4833 num_tasks = len(pending_tasks)
4834 num_done = 0
4835 stage[1] = "{}/{}.".format(num_done, num_tasks)
4836 self._write_op_status(nslcmop_id, stage)
4837 while pending_tasks:
4838 new_error = None
4839 _timeout = timeout + time_start - time()
4840 done, pending_tasks = await asyncio.wait(
4841 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4842 )
4843 num_done += len(done)
4844 if not done: # Timeout
4845 for task in pending_tasks:
4846 new_error = created_tasks_info[task] + ": Timeout"
4847 error_detail_list.append(new_error)
4848 error_list.append(new_error)
4849 break
4850 for task in done:
4851 if task.cancelled():
4852 exc = "Cancelled"
4853 else:
4854 exc = task.exception()
4855 if exc:
4856 if isinstance(exc, asyncio.TimeoutError):
4857 exc = "Timeout"
4858 new_error = created_tasks_info[task] + ": {}".format(exc)
4859 error_list.append(created_tasks_info[task])
4860 error_detail_list.append(new_error)
4861 if isinstance(
4862 exc,
4863 (
4864 str,
4865 DbException,
4866 N2VCException,
4867 ROclient.ROClientException,
4868 LcmException,
4869 K8sException,
4870 NgRoException,
4871 ),
4872 ):
4873 self.logger.error(logging_text + new_error)
4874 else:
4875 exc_traceback = "".join(
4876 traceback.format_exception(None, exc, exc.__traceback__)
4877 )
4878 self.logger.error(
4879 logging_text
4880 + created_tasks_info[task]
4881 + " "
4882 + exc_traceback
4883 )
4884 else:
4885 self.logger.debug(
4886 logging_text + created_tasks_info[task] + ": Done"
4887 )
4888 stage[1] = "{}/{}.".format(num_done, num_tasks)
4889 if new_error:
4890 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4891 if nsr_id: # update also nsr
4892 self.update_db_2(
4893 "nsrs",
4894 nsr_id,
4895 {
4896 "errorDescription": "Error at: " + ", ".join(error_list),
4897 "errorDetail": ". ".join(error_detail_list),
4898 },
4899 )
4900 self._write_op_status(nslcmop_id, stage)
4901 return error_detail_list
4902
4903 @staticmethod
4904 def _map_primitive_params(primitive_desc, params, instantiation_params):
4905 """
4906 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4907 The default-value is used. If it is between < > it look for a value at instantiation_params
4908 :param primitive_desc: portion of VNFD/NSD that describes primitive
4909 :param params: Params provided by user
4910 :param instantiation_params: Instantiation params provided by user
4911 :return: a dictionary with the calculated params
4912 """
4913 calculated_params = {}
4914 for parameter in primitive_desc.get("parameter", ()):
4915 param_name = parameter["name"]
4916 if param_name in params:
4917 calculated_params[param_name] = params[param_name]
4918 elif "default-value" in parameter or "value" in parameter:
4919 if "value" in parameter:
4920 calculated_params[param_name] = parameter["value"]
4921 else:
4922 calculated_params[param_name] = parameter["default-value"]
4923 if (
4924 isinstance(calculated_params[param_name], str)
4925 and calculated_params[param_name].startswith("<")
4926 and calculated_params[param_name].endswith(">")
4927 ):
4928 if calculated_params[param_name][1:-1] in instantiation_params:
4929 calculated_params[param_name] = instantiation_params[
4930 calculated_params[param_name][1:-1]
4931 ]
4932 else:
4933 raise LcmException(
4934 "Parameter {} needed to execute primitive {} not provided".format(
4935 calculated_params[param_name], primitive_desc["name"]
4936 )
4937 )
4938 else:
4939 raise LcmException(
4940 "Parameter {} needed to execute primitive {} not provided".format(
4941 param_name, primitive_desc["name"]
4942 )
4943 )
4944
4945 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4946 calculated_params[param_name] = yaml.safe_dump(
4947 calculated_params[param_name], default_flow_style=True, width=256
4948 )
4949 elif isinstance(calculated_params[param_name], str) and calculated_params[
4950 param_name
4951 ].startswith("!!yaml "):
4952 calculated_params[param_name] = calculated_params[param_name][7:]
4953 if parameter.get("data-type") == "INTEGER":
4954 try:
4955 calculated_params[param_name] = int(calculated_params[param_name])
4956 except ValueError: # error converting string to int
4957 raise LcmException(
4958 "Parameter {} of primitive {} must be integer".format(
4959 param_name, primitive_desc["name"]
4960 )
4961 )
4962 elif parameter.get("data-type") == "BOOLEAN":
4963 calculated_params[param_name] = not (
4964 (str(calculated_params[param_name])).lower() == "false"
4965 )
4966
4967 # add always ns_config_info if primitive name is config
4968 if primitive_desc["name"] == "config":
4969 if "ns_config_info" in instantiation_params:
4970 calculated_params["ns_config_info"] = instantiation_params[
4971 "ns_config_info"
4972 ]
4973 return calculated_params
4974
4975 def _look_for_deployed_vca(
4976 self,
4977 deployed_vca,
4978 member_vnf_index,
4979 vdu_id,
4980 vdu_count_index,
4981 kdu_name=None,
4982 ee_descriptor_id=None,
4983 ):
4984 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4985 for vca in deployed_vca:
4986 if not vca:
4987 continue
4988 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4989 continue
4990 if (
4991 vdu_count_index is not None
4992 and vdu_count_index != vca["vdu_count_index"]
4993 ):
4994 continue
4995 if kdu_name and kdu_name != vca["kdu_name"]:
4996 continue
4997 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4998 continue
4999 break
5000 else:
5001 # vca_deployed not found
5002 raise LcmException(
5003 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
5004 " is not deployed".format(
5005 member_vnf_index,
5006 vdu_id,
5007 vdu_count_index,
5008 kdu_name,
5009 ee_descriptor_id,
5010 )
5011 )
5012 # get ee_id
5013 ee_id = vca.get("ee_id")
5014 vca_type = vca.get(
5015 "type", "lxc_proxy_charm"
5016 ) # default value for backward compatibility - proxy charm
5017 if not ee_id:
5018 raise LcmException(
5019 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5020 "execution environment".format(
5021 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5022 )
5023 )
5024 return ee_id, vca_type
5025
5026 async def _ns_execute_primitive(
5027 self,
5028 ee_id,
5029 primitive,
5030 primitive_params,
5031 retries=0,
5032 retries_interval=30,
5033 timeout=None,
5034 vca_type=None,
5035 db_dict=None,
5036 vca_id: str = None,
5037 ) -> (str, str):
5038 try:
5039 if primitive == "config":
5040 primitive_params = {"params": primitive_params}
5041
5042 vca_type = vca_type or "lxc_proxy_charm"
5043
5044 while retries >= 0:
5045 try:
5046 output = await asyncio.wait_for(
5047 self.vca_map[vca_type].exec_primitive(
5048 ee_id=ee_id,
5049 primitive_name=primitive,
5050 params_dict=primitive_params,
5051 progress_timeout=self.timeout.progress_primitive,
5052 total_timeout=self.timeout.primitive,
5053 db_dict=db_dict,
5054 vca_id=vca_id,
5055 vca_type=vca_type,
5056 ),
5057 timeout=timeout or self.timeout.primitive,
5058 )
5059 # execution was OK
5060 break
5061 except asyncio.CancelledError:
5062 raise
5063 except Exception as e:
5064 retries -= 1
5065 if retries >= 0:
5066 self.logger.debug(
5067 "Error executing action {} on {} -> {}".format(
5068 primitive, ee_id, e
5069 )
5070 )
5071 # wait and retry
5072 await asyncio.sleep(retries_interval)
5073 else:
5074 if isinstance(e, asyncio.TimeoutError):
5075 e = N2VCException(
5076 message="Timed out waiting for action to complete"
5077 )
5078 return "FAILED", getattr(e, "message", repr(e))
5079
5080 return "COMPLETED", output
5081
5082 except (LcmException, asyncio.CancelledError):
5083 raise
5084 except Exception as e:
5085 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5086
5087 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5088 """
5089 Updating the vca_status with latest juju information in nsrs record
5090 :param: nsr_id: Id of the nsr
5091 :param: nslcmop_id: Id of the nslcmop
5092 :return: None
5093 """
5094
5095 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5096 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5097 vca_id = self.get_vca_id({}, db_nsr)
5098 if db_nsr["_admin"]["deployed"]["K8s"]:
5099 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5100 cluster_uuid, kdu_instance, cluster_type = (
5101 k8s["k8scluster-uuid"],
5102 k8s["kdu-instance"],
5103 k8s["k8scluster-type"],
5104 )
5105 await self._on_update_k8s_db(
5106 cluster_uuid=cluster_uuid,
5107 kdu_instance=kdu_instance,
5108 filter={"_id": nsr_id},
5109 vca_id=vca_id,
5110 cluster_type=cluster_type,
5111 )
5112 else:
5113 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5114 table, filter = "nsrs", {"_id": nsr_id}
5115 path = "_admin.deployed.VCA.{}.".format(vca_index)
5116 await self._on_update_n2vc_db(table, filter, path, {})
5117
5118 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5119 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5120
5121 async def action(self, nsr_id, nslcmop_id):
5122 # Try to lock HA task here
5123 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5124 if not task_is_locked_by_me:
5125 return
5126
5127 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5128 self.logger.debug(logging_text + "Enter")
5129 # get all needed from database
5130 db_nsr = None
5131 db_nslcmop = None
5132 db_nsr_update = {}
5133 db_nslcmop_update = {}
5134 nslcmop_operation_state = None
5135 error_description_nslcmop = None
5136 exc = None
5137 step = ""
5138 try:
5139 # wait for any previous tasks in process
5140 step = "Waiting for previous operations to terminate"
5141 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5142
5143 self._write_ns_status(
5144 nsr_id=nsr_id,
5145 ns_state=None,
5146 current_operation="RUNNING ACTION",
5147 current_operation_id=nslcmop_id,
5148 )
5149
5150 step = "Getting information from database"
5151 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5152 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5153 if db_nslcmop["operationParams"].get("primitive_params"):
5154 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5155 db_nslcmop["operationParams"]["primitive_params"]
5156 )
5157
5158 nsr_deployed = db_nsr["_admin"].get("deployed")
5159 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5160 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5161 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5162 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5163 primitive = db_nslcmop["operationParams"]["primitive"]
5164 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5165 timeout_ns_action = db_nslcmop["operationParams"].get(
5166 "timeout_ns_action", self.timeout.primitive
5167 )
5168
5169 if vnf_index:
5170 step = "Getting vnfr from database"
5171 db_vnfr = self.db.get_one(
5172 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5173 )
5174 if db_vnfr.get("kdur"):
5175 kdur_list = []
5176 for kdur in db_vnfr["kdur"]:
5177 if kdur.get("additionalParams"):
5178 kdur["additionalParams"] = json.loads(
5179 kdur["additionalParams"]
5180 )
5181 kdur_list.append(kdur)
5182 db_vnfr["kdur"] = kdur_list
5183 step = "Getting vnfd from database"
5184 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5185
5186 # Sync filesystem before running a primitive
5187 self.fs.sync(db_vnfr["vnfd-id"])
5188 else:
5189 step = "Getting nsd from database"
5190 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5191
5192 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5193 # for backward compatibility
5194 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5195 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5196 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5197 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5198
5199 # look for primitive
5200 config_primitive_desc = descriptor_configuration = None
5201 if vdu_id:
5202 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5203 elif kdu_name:
5204 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5205 elif vnf_index:
5206 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5207 else:
5208 descriptor_configuration = db_nsd.get("ns-configuration")
5209
5210 if descriptor_configuration and descriptor_configuration.get(
5211 "config-primitive"
5212 ):
5213 for config_primitive in descriptor_configuration["config-primitive"]:
5214 if config_primitive["name"] == primitive:
5215 config_primitive_desc = config_primitive
5216 break
5217
5218 if not config_primitive_desc:
5219 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5220 raise LcmException(
5221 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5222 primitive
5223 )
5224 )
5225 primitive_name = primitive
5226 ee_descriptor_id = None
5227 else:
5228 primitive_name = config_primitive_desc.get(
5229 "execution-environment-primitive", primitive
5230 )
5231 ee_descriptor_id = config_primitive_desc.get(
5232 "execution-environment-ref"
5233 )
5234
5235 if vnf_index:
5236 if vdu_id:
5237 vdur = next(
5238 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5239 )
5240 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5241 elif kdu_name:
5242 kdur = next(
5243 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5244 )
5245 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5246 else:
5247 desc_params = parse_yaml_strings(
5248 db_vnfr.get("additionalParamsForVnf")
5249 )
5250 else:
5251 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5252 if kdu_name and get_configuration(db_vnfd, kdu_name):
5253 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5254 actions = set()
5255 for primitive in kdu_configuration.get("initial-config-primitive", []):
5256 actions.add(primitive["name"])
5257 for primitive in kdu_configuration.get("config-primitive", []):
5258 actions.add(primitive["name"])
5259 kdu = find_in_list(
5260 nsr_deployed["K8s"],
5261 lambda kdu: kdu_name == kdu["kdu-name"]
5262 and kdu["member-vnf-index"] == vnf_index,
5263 )
5264 kdu_action = (
5265 True
5266 if primitive_name in actions
5267 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5268 else False
5269 )
5270
5271 # TODO check if ns is in a proper status
5272 if kdu_name and (
5273 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5274 ):
5275 # kdur and desc_params already set from before
5276 if primitive_params:
5277 desc_params.update(primitive_params)
5278 # TODO Check if we will need something at vnf level
5279 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5280 if (
5281 kdu_name == kdu["kdu-name"]
5282 and kdu["member-vnf-index"] == vnf_index
5283 ):
5284 break
5285 else:
5286 raise LcmException(
5287 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5288 )
5289
5290 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5291 msg = "unknown k8scluster-type '{}'".format(
5292 kdu.get("k8scluster-type")
5293 )
5294 raise LcmException(msg)
5295
5296 db_dict = {
5297 "collection": "nsrs",
5298 "filter": {"_id": nsr_id},
5299 "path": "_admin.deployed.K8s.{}".format(index),
5300 }
5301 self.logger.debug(
5302 logging_text
5303 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5304 )
5305 step = "Executing kdu {}".format(primitive_name)
5306 if primitive_name == "upgrade":
5307 if desc_params.get("kdu_model"):
5308 kdu_model = desc_params.get("kdu_model")
5309 del desc_params["kdu_model"]
5310 else:
5311 kdu_model = kdu.get("kdu-model")
5312 if kdu_model.count("/") < 2: # helm chart is not embedded
5313 parts = kdu_model.split(sep=":")
5314 if len(parts) == 2:
5315 kdu_model = parts[0]
5316 if desc_params.get("kdu_atomic_upgrade"):
5317 atomic_upgrade = desc_params.get(
5318 "kdu_atomic_upgrade"
5319 ).lower() in ("yes", "true", "1")
5320 del desc_params["kdu_atomic_upgrade"]
5321 else:
5322 atomic_upgrade = True
5323
5324 detailed_status = await asyncio.wait_for(
5325 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5326 cluster_uuid=kdu.get("k8scluster-uuid"),
5327 kdu_instance=kdu.get("kdu-instance"),
5328 atomic=atomic_upgrade,
5329 kdu_model=kdu_model,
5330 params=desc_params,
5331 db_dict=db_dict,
5332 timeout=timeout_ns_action,
5333 ),
5334 timeout=timeout_ns_action + 10,
5335 )
5336 self.logger.debug(
5337 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5338 )
5339 elif primitive_name == "rollback":
5340 detailed_status = await asyncio.wait_for(
5341 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5342 cluster_uuid=kdu.get("k8scluster-uuid"),
5343 kdu_instance=kdu.get("kdu-instance"),
5344 db_dict=db_dict,
5345 ),
5346 timeout=timeout_ns_action,
5347 )
5348 elif primitive_name == "status":
5349 detailed_status = await asyncio.wait_for(
5350 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5351 cluster_uuid=kdu.get("k8scluster-uuid"),
5352 kdu_instance=kdu.get("kdu-instance"),
5353 vca_id=vca_id,
5354 ),
5355 timeout=timeout_ns_action,
5356 )
5357 else:
5358 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5359 kdu["kdu-name"], nsr_id
5360 )
5361 params = self._map_primitive_params(
5362 config_primitive_desc, primitive_params, desc_params
5363 )
5364
5365 detailed_status = await asyncio.wait_for(
5366 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5367 cluster_uuid=kdu.get("k8scluster-uuid"),
5368 kdu_instance=kdu_instance,
5369 primitive_name=primitive_name,
5370 params=params,
5371 db_dict=db_dict,
5372 timeout=timeout_ns_action,
5373 vca_id=vca_id,
5374 ),
5375 timeout=timeout_ns_action,
5376 )
5377
5378 if detailed_status:
5379 nslcmop_operation_state = "COMPLETED"
5380 else:
5381 detailed_status = ""
5382 nslcmop_operation_state = "FAILED"
5383 else:
5384 ee_id, vca_type = self._look_for_deployed_vca(
5385 nsr_deployed["VCA"],
5386 member_vnf_index=vnf_index,
5387 vdu_id=vdu_id,
5388 vdu_count_index=vdu_count_index,
5389 ee_descriptor_id=ee_descriptor_id,
5390 )
5391 for vca_index, vca_deployed in enumerate(
5392 db_nsr["_admin"]["deployed"]["VCA"]
5393 ):
5394 if vca_deployed.get("member-vnf-index") == vnf_index:
5395 db_dict = {
5396 "collection": "nsrs",
5397 "filter": {"_id": nsr_id},
5398 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5399 }
5400 break
5401 (
5402 nslcmop_operation_state,
5403 detailed_status,
5404 ) = await self._ns_execute_primitive(
5405 ee_id,
5406 primitive=primitive_name,
5407 primitive_params=self._map_primitive_params(
5408 config_primitive_desc, primitive_params, desc_params
5409 ),
5410 timeout=timeout_ns_action,
5411 vca_type=vca_type,
5412 db_dict=db_dict,
5413 vca_id=vca_id,
5414 )
5415
5416 db_nslcmop_update["detailed-status"] = detailed_status
5417 error_description_nslcmop = (
5418 detailed_status if nslcmop_operation_state == "FAILED" else ""
5419 )
5420 self.logger.debug(
5421 logging_text
5422 + "Done with result {} {}".format(
5423 nslcmop_operation_state, detailed_status
5424 )
5425 )
5426 return # database update is called inside finally
5427
5428 except (DbException, LcmException, N2VCException, K8sException) as e:
5429 self.logger.error(logging_text + "Exit Exception {}".format(e))
5430 exc = e
5431 except asyncio.CancelledError:
5432 self.logger.error(
5433 logging_text + "Cancelled Exception while '{}'".format(step)
5434 )
5435 exc = "Operation was cancelled"
5436 except asyncio.TimeoutError:
5437 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5438 exc = "Timeout"
5439 except Exception as e:
5440 exc = traceback.format_exc()
5441 self.logger.critical(
5442 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5443 exc_info=True,
5444 )
5445 finally:
5446 if exc:
5447 db_nslcmop_update[
5448 "detailed-status"
5449 ] = (
5450 detailed_status
5451 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5452 nslcmop_operation_state = "FAILED"
5453 if db_nsr:
5454 self._write_ns_status(
5455 nsr_id=nsr_id,
5456 ns_state=db_nsr[
5457 "nsState"
5458 ], # TODO check if degraded. For the moment use previous status
5459 current_operation="IDLE",
5460 current_operation_id=None,
5461 # error_description=error_description_nsr,
5462 # error_detail=error_detail,
5463 other_update=db_nsr_update,
5464 )
5465
5466 self._write_op_status(
5467 op_id=nslcmop_id,
5468 stage="",
5469 error_message=error_description_nslcmop,
5470 operation_state=nslcmop_operation_state,
5471 other_update=db_nslcmop_update,
5472 )
5473
5474 if nslcmop_operation_state:
5475 try:
5476 await self.msg.aiowrite(
5477 "ns",
5478 "actioned",
5479 {
5480 "nsr_id": nsr_id,
5481 "nslcmop_id": nslcmop_id,
5482 "operationState": nslcmop_operation_state,
5483 },
5484 )
5485 except Exception as e:
5486 self.logger.error(
5487 logging_text + "kafka_write notification Exception {}".format(e)
5488 )
5489 self.logger.debug(logging_text + "Exit")
5490 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5491 return nslcmop_operation_state, detailed_status
5492
5493 async def terminate_vdus(
5494 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5495 ):
5496 """This method terminates VDUs
5497
5498 Args:
5499 db_vnfr: VNF instance record
5500 member_vnf_index: VNF index to identify the VDUs to be removed
5501 db_nsr: NS instance record
5502 update_db_nslcmops: Nslcmop update record
5503 """
5504 vca_scaling_info = []
5505 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5506 scaling_info["scaling_direction"] = "IN"
5507 scaling_info["vdu-delete"] = {}
5508 scaling_info["kdu-delete"] = {}
5509 db_vdur = db_vnfr.get("vdur")
5510 vdur_list = copy(db_vdur)
5511 count_index = 0
5512 for index, vdu in enumerate(vdur_list):
5513 vca_scaling_info.append(
5514 {
5515 "osm_vdu_id": vdu["vdu-id-ref"],
5516 "member-vnf-index": member_vnf_index,
5517 "type": "delete",
5518 "vdu_index": count_index,
5519 }
5520 )
5521 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5522 scaling_info["vdu"].append(
5523 {
5524 "name": vdu.get("name") or vdu.get("vdu-name"),
5525 "vdu_id": vdu["vdu-id-ref"],
5526 "interface": [],
5527 }
5528 )
5529 for interface in vdu["interfaces"]:
5530 scaling_info["vdu"][index]["interface"].append(
5531 {
5532 "name": interface["name"],
5533 "ip_address": interface["ip-address"],
5534 "mac_address": interface.get("mac-address"),
5535 }
5536 )
5537 self.logger.info("NS update scaling info{}".format(scaling_info))
5538 stage[2] = "Terminating VDUs"
5539 if scaling_info.get("vdu-delete"):
5540 # scale_process = "RO"
5541 if self.ro_config.ng:
5542 await self._scale_ng_ro(
5543 logging_text,
5544 db_nsr,
5545 update_db_nslcmops,
5546 db_vnfr,
5547 scaling_info,
5548 stage,
5549 )
5550
5551 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5552 """This method is to Remove VNF instances from NS.
5553
5554 Args:
5555 nsr_id: NS instance id
5556 nslcmop_id: nslcmop id of update
5557 vnf_instance_id: id of the VNF instance to be removed
5558
5559 Returns:
5560 result: (str, str) COMPLETED/FAILED, details
5561 """
5562 try:
5563 db_nsr_update = {}
5564 logging_text = "Task ns={} update ".format(nsr_id)
5565 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5566 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5567 if check_vnfr_count > 1:
5568 stage = ["", "", ""]
5569 step = "Getting nslcmop from database"
5570 self.logger.debug(
5571 step + " after having waited for previous tasks to be completed"
5572 )
5573 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5574 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5575 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5576 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5577 """ db_vnfr = self.db.get_one(
5578 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5579
5580 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5581 await self.terminate_vdus(
5582 db_vnfr,
5583 member_vnf_index,
5584 db_nsr,
5585 update_db_nslcmops,
5586 stage,
5587 logging_text,
5588 )
5589
5590 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5591 constituent_vnfr.remove(db_vnfr.get("_id"))
5592 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5593 "constituent-vnfr-ref"
5594 )
5595 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5596 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5597 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5598 return "COMPLETED", "Done"
5599 else:
5600 step = "Terminate VNF Failed with"
5601 raise LcmException(
5602 "{} Cannot terminate the last VNF in this NS.".format(
5603 vnf_instance_id
5604 )
5605 )
5606 except (LcmException, asyncio.CancelledError):
5607 raise
5608 except Exception as e:
5609 self.logger.debug("Error removing VNF {}".format(e))
5610 return "FAILED", "Error removing VNF {}".format(e)
5611
5612 async def _ns_redeploy_vnf(
5613 self,
5614 nsr_id,
5615 nslcmop_id,
5616 db_vnfd,
5617 db_vnfr,
5618 db_nsr,
5619 ):
5620 """This method updates and redeploys VNF instances
5621
5622 Args:
5623 nsr_id: NS instance id
5624 nslcmop_id: nslcmop id
5625 db_vnfd: VNF descriptor
5626 db_vnfr: VNF instance record
5627 db_nsr: NS instance record
5628
5629 Returns:
5630 result: (str, str) COMPLETED/FAILED, details
5631 """
5632 try:
5633 count_index = 0
5634 stage = ["", "", ""]
5635 logging_text = "Task ns={} update ".format(nsr_id)
5636 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5637 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5638
5639 # Terminate old VNF resources
5640 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5641 await self.terminate_vdus(
5642 db_vnfr,
5643 member_vnf_index,
5644 db_nsr,
5645 update_db_nslcmops,
5646 stage,
5647 logging_text,
5648 )
5649
5650 # old_vnfd_id = db_vnfr["vnfd-id"]
5651 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5652 new_db_vnfd = db_vnfd
5653 # new_vnfd_ref = new_db_vnfd["id"]
5654 # new_vnfd_id = vnfd_id
5655
5656 # Create VDUR
5657 new_vnfr_cp = []
5658 for cp in new_db_vnfd.get("ext-cpd", ()):
5659 vnf_cp = {
5660 "name": cp.get("id"),
5661 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5662 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5663 "id": cp.get("id"),
5664 }
5665 new_vnfr_cp.append(vnf_cp)
5666 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5667 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5668 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5669 new_vnfr_update = {
5670 "revision": latest_vnfd_revision,
5671 "connection-point": new_vnfr_cp,
5672 "vdur": new_vdur,
5673 "ip-address": "",
5674 }
5675 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5676 updated_db_vnfr = self.db.get_one(
5677 "vnfrs",
5678 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5679 )
5680
5681 # Instantiate new VNF resources
5682 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5683 vca_scaling_info = []
5684 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5685 scaling_info["scaling_direction"] = "OUT"
5686 scaling_info["vdu-create"] = {}
5687 scaling_info["kdu-create"] = {}
5688 vdud_instantiate_list = db_vnfd["vdu"]
5689 for index, vdud in enumerate(vdud_instantiate_list):
5690 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5691 if cloud_init_text:
5692 additional_params = (
5693 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5694 or {}
5695 )
5696 cloud_init_list = []
5697 if cloud_init_text:
5698 # TODO Information of its own ip is not available because db_vnfr is not updated.
5699 additional_params["OSM"] = get_osm_params(
5700 updated_db_vnfr, vdud["id"], 1
5701 )
5702 cloud_init_list.append(
5703 self._parse_cloud_init(
5704 cloud_init_text,
5705 additional_params,
5706 db_vnfd["id"],
5707 vdud["id"],
5708 )
5709 )
5710 vca_scaling_info.append(
5711 {
5712 "osm_vdu_id": vdud["id"],
5713 "member-vnf-index": member_vnf_index,
5714 "type": "create",
5715 "vdu_index": count_index,
5716 }
5717 )
5718 scaling_info["vdu-create"][vdud["id"]] = count_index
5719 if self.ro_config.ng:
5720 self.logger.debug(
5721 "New Resources to be deployed: {}".format(scaling_info)
5722 )
5723 await self._scale_ng_ro(
5724 logging_text,
5725 db_nsr,
5726 update_db_nslcmops,
5727 updated_db_vnfr,
5728 scaling_info,
5729 stage,
5730 )
5731 return "COMPLETED", "Done"
5732 except (LcmException, asyncio.CancelledError):
5733 raise
5734 except Exception as e:
5735 self.logger.debug("Error updating VNF {}".format(e))
5736 return "FAILED", "Error updating VNF {}".format(e)
5737
5738 async def _ns_charm_upgrade(
5739 self,
5740 ee_id,
5741 charm_id,
5742 charm_type,
5743 path,
5744 timeout: float = None,
5745 ) -> (str, str):
5746 """This method upgrade charms in VNF instances
5747
5748 Args:
5749 ee_id: Execution environment id
5750 path: Local path to the charm
5751 charm_id: charm-id
5752 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5753 timeout: (Float) Timeout for the ns update operation
5754
5755 Returns:
5756 result: (str, str) COMPLETED/FAILED, details
5757 """
5758 try:
5759 charm_type = charm_type or "lxc_proxy_charm"
5760 output = await self.vca_map[charm_type].upgrade_charm(
5761 ee_id=ee_id,
5762 path=path,
5763 charm_id=charm_id,
5764 charm_type=charm_type,
5765 timeout=timeout or self.timeout.ns_update,
5766 )
5767
5768 if output:
5769 return "COMPLETED", output
5770
5771 except (LcmException, asyncio.CancelledError):
5772 raise
5773
5774 except Exception as e:
5775 self.logger.debug("Error upgrading charm {}".format(path))
5776
5777 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5778
5779 async def update(self, nsr_id, nslcmop_id):
5780 """Update NS according to different update types
5781
5782 This method performs upgrade of VNF instances then updates the revision
5783 number in VNF record
5784
5785 Args:
5786 nsr_id: Network service will be updated
5787 nslcmop_id: ns lcm operation id
5788
5789 Returns:
5790 It may raise DbException, LcmException, N2VCException, K8sException
5791
5792 """
5793 # Try to lock HA task here
5794 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5795 if not task_is_locked_by_me:
5796 return
5797
5798 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5799 self.logger.debug(logging_text + "Enter")
5800
5801 # Set the required variables to be filled up later
5802 db_nsr = None
5803 db_nslcmop_update = {}
5804 vnfr_update = {}
5805 nslcmop_operation_state = None
5806 db_nsr_update = {}
5807 error_description_nslcmop = ""
5808 exc = None
5809 change_type = "updated"
5810 detailed_status = ""
5811 member_vnf_index = None
5812
5813 try:
5814 # wait for any previous tasks in process
5815 step = "Waiting for previous operations to terminate"
5816 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5817 self._write_ns_status(
5818 nsr_id=nsr_id,
5819 ns_state=None,
5820 current_operation="UPDATING",
5821 current_operation_id=nslcmop_id,
5822 )
5823
5824 step = "Getting nslcmop from database"
5825 db_nslcmop = self.db.get_one(
5826 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5827 )
5828 update_type = db_nslcmop["operationParams"]["updateType"]
5829
5830 step = "Getting nsr from database"
5831 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5832 old_operational_status = db_nsr["operational-status"]
5833 db_nsr_update["operational-status"] = "updating"
5834 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5835 nsr_deployed = db_nsr["_admin"].get("deployed")
5836
5837 if update_type == "CHANGE_VNFPKG":
5838 # Get the input parameters given through update request
5839 vnf_instance_id = db_nslcmop["operationParams"][
5840 "changeVnfPackageData"
5841 ].get("vnfInstanceId")
5842
5843 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5844 "vnfdId"
5845 )
5846 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5847
5848 step = "Getting vnfr from database"
5849 db_vnfr = self.db.get_one(
5850 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5851 )
5852
5853 step = "Getting vnfds from database"
5854 # Latest VNFD
5855 latest_vnfd = self.db.get_one(
5856 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5857 )
5858 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5859
5860 # Current VNFD
5861 current_vnf_revision = db_vnfr.get("revision", 1)
5862 current_vnfd = self.db.get_one(
5863 "vnfds_revisions",
5864 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5865 fail_on_empty=False,
5866 )
5867 # Charm artifact paths will be filled up later
5868 (
5869 current_charm_artifact_path,
5870 target_charm_artifact_path,
5871 charm_artifact_paths,
5872 helm_artifacts,
5873 ) = ([], [], [], [])
5874
5875 step = "Checking if revision has changed in VNFD"
5876 if current_vnf_revision != latest_vnfd_revision:
5877 change_type = "policy_updated"
5878
5879 # There is new revision of VNFD, update operation is required
5880 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5881 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5882
5883 step = "Removing the VNFD packages if they exist in the local path"
5884 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5885 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5886
5887 step = "Get the VNFD packages from FSMongo"
5888 self.fs.sync(from_path=latest_vnfd_path)
5889 self.fs.sync(from_path=current_vnfd_path)
5890
5891 step = (
5892 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5893 )
5894 current_base_folder = current_vnfd["_admin"]["storage"]
5895 latest_base_folder = latest_vnfd["_admin"]["storage"]
5896
5897 for vca_index, vca_deployed in enumerate(
5898 get_iterable(nsr_deployed, "VCA")
5899 ):
5900 vnf_index = db_vnfr.get("member-vnf-index-ref")
5901
5902 # Getting charm-id and charm-type
5903 if vca_deployed.get("member-vnf-index") == vnf_index:
5904 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5905 vca_type = vca_deployed.get("type")
5906 vdu_count_index = vca_deployed.get("vdu_count_index")
5907
5908 # Getting ee-id
5909 ee_id = vca_deployed.get("ee_id")
5910
5911 step = "Getting descriptor config"
5912 if current_vnfd.get("kdu"):
5913 search_key = "kdu_name"
5914 else:
5915 search_key = "vnfd_id"
5916
5917 entity_id = vca_deployed.get(search_key)
5918
5919 descriptor_config = get_configuration(
5920 current_vnfd, entity_id
5921 )
5922
5923 if "execution-environment-list" in descriptor_config:
5924 ee_list = descriptor_config.get(
5925 "execution-environment-list", []
5926 )
5927 else:
5928 ee_list = []
5929
5930 # There could be several charm used in the same VNF
5931 for ee_item in ee_list:
5932 if ee_item.get("juju"):
5933 step = "Getting charm name"
5934 charm_name = ee_item["juju"].get("charm")
5935
5936 step = "Setting Charm artifact paths"
5937 current_charm_artifact_path.append(
5938 get_charm_artifact_path(
5939 current_base_folder,
5940 charm_name,
5941 vca_type,
5942 current_vnf_revision,
5943 )
5944 )
5945 target_charm_artifact_path.append(
5946 get_charm_artifact_path(
5947 latest_base_folder,
5948 charm_name,
5949 vca_type,
5950 latest_vnfd_revision,
5951 )
5952 )
5953 elif ee_item.get("helm-chart"):
5954 # add chart to list and all parameters
5955 step = "Getting helm chart name"
5956 chart_name = ee_item.get("helm-chart")
5957 if (
5958 ee_item.get("helm-version")
5959 and ee_item.get("helm-version") == "v2"
5960 ):
5961 vca_type = "helm"
5962 else:
5963 vca_type = "helm-v3"
5964 step = "Setting Helm chart artifact paths"
5965
5966 helm_artifacts.append(
5967 {
5968 "current_artifact_path": get_charm_artifact_path(
5969 current_base_folder,
5970 chart_name,
5971 vca_type,
5972 current_vnf_revision,
5973 ),
5974 "target_artifact_path": get_charm_artifact_path(
5975 latest_base_folder,
5976 chart_name,
5977 vca_type,
5978 latest_vnfd_revision,
5979 ),
5980 "ee_id": ee_id,
5981 "vca_index": vca_index,
5982 "vdu_index": vdu_count_index,
5983 }
5984 )
5985
5986 charm_artifact_paths = zip(
5987 current_charm_artifact_path, target_charm_artifact_path
5988 )
5989
5990 step = "Checking if software version has changed in VNFD"
5991 if find_software_version(current_vnfd) != find_software_version(
5992 latest_vnfd
5993 ):
5994 step = "Checking if existing VNF has charm"
5995 for current_charm_path, target_charm_path in list(
5996 charm_artifact_paths
5997 ):
5998 if current_charm_path:
5999 raise LcmException(
6000 "Software version change is not supported as VNF instance {} has charm.".format(
6001 vnf_instance_id
6002 )
6003 )
6004
6005 # There is no change in the charm package, then redeploy the VNF
6006 # based on new descriptor
6007 step = "Redeploying VNF"
6008 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6009 (result, detailed_status) = await self._ns_redeploy_vnf(
6010 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6011 )
6012 if result == "FAILED":
6013 nslcmop_operation_state = result
6014 error_description_nslcmop = detailed_status
6015 db_nslcmop_update["detailed-status"] = detailed_status
6016 self.logger.debug(
6017 logging_text
6018 + " step {} Done with result {} {}".format(
6019 step, nslcmop_operation_state, detailed_status
6020 )
6021 )
6022
6023 else:
6024 step = "Checking if any charm package has changed or not"
6025 for current_charm_path, target_charm_path in list(
6026 charm_artifact_paths
6027 ):
6028 if (
6029 current_charm_path
6030 and target_charm_path
6031 and self.check_charm_hash_changed(
6032 current_charm_path, target_charm_path
6033 )
6034 ):
6035 step = "Checking whether VNF uses juju bundle"
6036 if check_juju_bundle_existence(current_vnfd):
6037 raise LcmException(
6038 "Charm upgrade is not supported for the instance which"
6039 " uses juju-bundle: {}".format(
6040 check_juju_bundle_existence(current_vnfd)
6041 )
6042 )
6043
6044 step = "Upgrading Charm"
6045 (
6046 result,
6047 detailed_status,
6048 ) = await self._ns_charm_upgrade(
6049 ee_id=ee_id,
6050 charm_id=vca_id,
6051 charm_type=vca_type,
6052 path=self.fs.path + target_charm_path,
6053 timeout=timeout_seconds,
6054 )
6055
6056 if result == "FAILED":
6057 nslcmop_operation_state = result
6058 error_description_nslcmop = detailed_status
6059
6060 db_nslcmop_update["detailed-status"] = detailed_status
6061 self.logger.debug(
6062 logging_text
6063 + " step {} Done with result {} {}".format(
6064 step, nslcmop_operation_state, detailed_status
6065 )
6066 )
6067
6068 step = "Updating policies"
6069 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6070 result = "COMPLETED"
6071 detailed_status = "Done"
6072 db_nslcmop_update["detailed-status"] = "Done"
6073
6074 # helm base EE
6075 for item in helm_artifacts:
6076 if not (
6077 item["current_artifact_path"]
6078 and item["target_artifact_path"]
6079 and self.check_charm_hash_changed(
6080 item["current_artifact_path"],
6081 item["target_artifact_path"],
6082 )
6083 ):
6084 continue
6085 db_update_entry = "_admin.deployed.VCA.{}.".format(
6086 item["vca_index"]
6087 )
6088 vnfr_id = db_vnfr["_id"]
6089 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6090 db_dict = {
6091 "collection": "nsrs",
6092 "filter": {"_id": nsr_id},
6093 "path": db_update_entry,
6094 }
6095 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6096 await self.vca_map[vca_type].upgrade_execution_environment(
6097 namespace=namespace,
6098 helm_id=helm_id,
6099 db_dict=db_dict,
6100 config=osm_config,
6101 artifact_path=item["target_artifact_path"],
6102 vca_type=vca_type,
6103 )
6104 vnf_id = db_vnfr.get("vnfd-ref")
6105 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6106 self.logger.debug("get ssh key block")
6107 rw_mgmt_ip = None
6108 if deep_get(
6109 config_descriptor,
6110 ("config-access", "ssh-access", "required"),
6111 ):
6112 # Needed to inject a ssh key
6113 user = deep_get(
6114 config_descriptor,
6115 ("config-access", "ssh-access", "default-user"),
6116 )
6117 step = (
6118 "Install configuration Software, getting public ssh key"
6119 )
6120 pub_key = await self.vca_map[
6121 vca_type
6122 ].get_ee_ssh_public__key(
6123 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6124 )
6125
6126 step = (
6127 "Insert public key into VM user={} ssh_key={}".format(
6128 user, pub_key
6129 )
6130 )
6131 self.logger.debug(logging_text + step)
6132
6133 # wait for RO (ip-address) Insert pub_key into VM
6134 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6135 logging_text,
6136 nsr_id,
6137 vnfr_id,
6138 None,
6139 item["vdu_index"],
6140 user=user,
6141 pub_key=pub_key,
6142 )
6143
6144 initial_config_primitive_list = config_descriptor.get(
6145 "initial-config-primitive"
6146 )
6147 config_primitive = next(
6148 (
6149 p
6150 for p in initial_config_primitive_list
6151 if p["name"] == "config"
6152 ),
6153 None,
6154 )
6155 if not config_primitive:
6156 continue
6157
6158 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6159 if rw_mgmt_ip:
6160 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6161 if db_vnfr.get("additionalParamsForVnf"):
6162 deploy_params.update(
6163 parse_yaml_strings(
6164 db_vnfr["additionalParamsForVnf"].copy()
6165 )
6166 )
6167 primitive_params_ = self._map_primitive_params(
6168 config_primitive, {}, deploy_params
6169 )
6170
6171 step = "execute primitive '{}' params '{}'".format(
6172 config_primitive["name"], primitive_params_
6173 )
6174 self.logger.debug(logging_text + step)
6175 await self.vca_map[vca_type].exec_primitive(
6176 ee_id=ee_id,
6177 primitive_name=config_primitive["name"],
6178 params_dict=primitive_params_,
6179 db_dict=db_dict,
6180 vca_id=vca_id,
6181 vca_type=vca_type,
6182 )
6183
6184 step = "Updating policies"
6185 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6186 detailed_status = "Done"
6187 db_nslcmop_update["detailed-status"] = "Done"
6188
6189 # If nslcmop_operation_state is None, so any operation is not failed.
6190 if not nslcmop_operation_state:
6191 nslcmop_operation_state = "COMPLETED"
6192
6193 # If update CHANGE_VNFPKG nslcmop_operation is successful
6194 # vnf revision need to be updated
6195 vnfr_update["revision"] = latest_vnfd_revision
6196 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6197
6198 self.logger.debug(
6199 logging_text
6200 + " task Done with result {} {}".format(
6201 nslcmop_operation_state, detailed_status
6202 )
6203 )
6204 elif update_type == "REMOVE_VNF":
6205 # This part is included in https://osm.etsi.org/gerrit/11876
6206 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6207 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6208 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6209 step = "Removing VNF"
6210 (result, detailed_status) = await self.remove_vnf(
6211 nsr_id, nslcmop_id, vnf_instance_id
6212 )
6213 if result == "FAILED":
6214 nslcmop_operation_state = result
6215 error_description_nslcmop = detailed_status
6216 db_nslcmop_update["detailed-status"] = detailed_status
6217 change_type = "vnf_terminated"
6218 if not nslcmop_operation_state:
6219 nslcmop_operation_state = "COMPLETED"
6220 self.logger.debug(
6221 logging_text
6222 + " task Done with result {} {}".format(
6223 nslcmop_operation_state, detailed_status
6224 )
6225 )
6226
6227 elif update_type == "OPERATE_VNF":
6228 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6229 "vnfInstanceId"
6230 ]
6231 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6232 "changeStateTo"
6233 ]
6234 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6235 "additionalParam"
6236 ]
6237 (result, detailed_status) = await self.rebuild_start_stop(
6238 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6239 )
6240 if result == "FAILED":
6241 nslcmop_operation_state = result
6242 error_description_nslcmop = detailed_status
6243 db_nslcmop_update["detailed-status"] = detailed_status
6244 if not nslcmop_operation_state:
6245 nslcmop_operation_state = "COMPLETED"
6246 self.logger.debug(
6247 logging_text
6248 + " task Done with result {} {}".format(
6249 nslcmop_operation_state, detailed_status
6250 )
6251 )
6252
6253 # If nslcmop_operation_state is None, so any operation is not failed.
6254 # All operations are executed in overall.
6255 if not nslcmop_operation_state:
6256 nslcmop_operation_state = "COMPLETED"
6257 db_nsr_update["operational-status"] = old_operational_status
6258
6259 except (DbException, LcmException, N2VCException, K8sException) as e:
6260 self.logger.error(logging_text + "Exit Exception {}".format(e))
6261 exc = e
6262 except asyncio.CancelledError:
6263 self.logger.error(
6264 logging_text + "Cancelled Exception while '{}'".format(step)
6265 )
6266 exc = "Operation was cancelled"
6267 except asyncio.TimeoutError:
6268 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6269 exc = "Timeout"
6270 except Exception as e:
6271 exc = traceback.format_exc()
6272 self.logger.critical(
6273 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6274 exc_info=True,
6275 )
6276 finally:
6277 if exc:
6278 db_nslcmop_update[
6279 "detailed-status"
6280 ] = (
6281 detailed_status
6282 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6283 nslcmop_operation_state = "FAILED"
6284 db_nsr_update["operational-status"] = old_operational_status
6285 if db_nsr:
6286 self._write_ns_status(
6287 nsr_id=nsr_id,
6288 ns_state=db_nsr["nsState"],
6289 current_operation="IDLE",
6290 current_operation_id=None,
6291 other_update=db_nsr_update,
6292 )
6293
6294 self._write_op_status(
6295 op_id=nslcmop_id,
6296 stage="",
6297 error_message=error_description_nslcmop,
6298 operation_state=nslcmop_operation_state,
6299 other_update=db_nslcmop_update,
6300 )
6301
6302 if nslcmop_operation_state:
6303 try:
6304 msg = {
6305 "nsr_id": nsr_id,
6306 "nslcmop_id": nslcmop_id,
6307 "operationState": nslcmop_operation_state,
6308 }
6309 if (
6310 change_type in ("vnf_terminated", "policy_updated")
6311 and member_vnf_index
6312 ):
6313 msg.update({"vnf_member_index": member_vnf_index})
6314 await self.msg.aiowrite("ns", change_type, msg)
6315 except Exception as e:
6316 self.logger.error(
6317 logging_text + "kafka_write notification Exception {}".format(e)
6318 )
6319 self.logger.debug(logging_text + "Exit")
6320 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6321 return nslcmop_operation_state, detailed_status
6322
6323 async def scale(self, nsr_id, nslcmop_id):
6324 # Try to lock HA task here
6325 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6326 if not task_is_locked_by_me:
6327 return
6328
6329 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6330 stage = ["", "", ""]
6331 tasks_dict_info = {}
6332 # ^ stage, step, VIM progress
6333 self.logger.debug(logging_text + "Enter")
6334 # get all needed from database
6335 db_nsr = None
6336 db_nslcmop_update = {}
6337 db_nsr_update = {}
6338 exc = None
6339 # in case of error, indicates what part of scale was failed to put nsr at error status
6340 scale_process = None
6341 old_operational_status = ""
6342 old_config_status = ""
6343 nsi_id = None
6344 try:
6345 # wait for any previous tasks in process
6346 step = "Waiting for previous operations to terminate"
6347 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6348 self._write_ns_status(
6349 nsr_id=nsr_id,
6350 ns_state=None,
6351 current_operation="SCALING",
6352 current_operation_id=nslcmop_id,
6353 )
6354
6355 step = "Getting nslcmop from database"
6356 self.logger.debug(
6357 step + " after having waited for previous tasks to be completed"
6358 )
6359 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6360
6361 step = "Getting nsr from database"
6362 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6363 old_operational_status = db_nsr["operational-status"]
6364 old_config_status = db_nsr["config-status"]
6365
6366 step = "Parsing scaling parameters"
6367 db_nsr_update["operational-status"] = "scaling"
6368 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6369 nsr_deployed = db_nsr["_admin"].get("deployed")
6370
6371 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6372 "scaleByStepData"
6373 ]["member-vnf-index"]
6374 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6375 "scaleByStepData"
6376 ]["scaling-group-descriptor"]
6377 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6378 # for backward compatibility
6379 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6380 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6381 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6382 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6383
6384 step = "Getting vnfr from database"
6385 db_vnfr = self.db.get_one(
6386 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6387 )
6388
6389 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6390
6391 step = "Getting vnfd from database"
6392 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6393
6394 base_folder = db_vnfd["_admin"]["storage"]
6395
6396 step = "Getting scaling-group-descriptor"
6397 scaling_descriptor = find_in_list(
6398 get_scaling_aspect(db_vnfd),
6399 lambda scale_desc: scale_desc["name"] == scaling_group,
6400 )
6401 if not scaling_descriptor:
6402 raise LcmException(
6403 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6404 "at vnfd:scaling-group-descriptor".format(scaling_group)
6405 )
6406
6407 step = "Sending scale order to VIM"
6408 # TODO check if ns is in a proper status
6409 nb_scale_op = 0
6410 if not db_nsr["_admin"].get("scaling-group"):
6411 self.update_db_2(
6412 "nsrs",
6413 nsr_id,
6414 {
6415 "_admin.scaling-group": [
6416 {"name": scaling_group, "nb-scale-op": 0}
6417 ]
6418 },
6419 )
6420 admin_scale_index = 0
6421 else:
6422 for admin_scale_index, admin_scale_info in enumerate(
6423 db_nsr["_admin"]["scaling-group"]
6424 ):
6425 if admin_scale_info["name"] == scaling_group:
6426 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6427 break
6428 else: # not found, set index one plus last element and add new entry with the name
6429 admin_scale_index += 1
6430 db_nsr_update[
6431 "_admin.scaling-group.{}.name".format(admin_scale_index)
6432 ] = scaling_group
6433
6434 vca_scaling_info = []
6435 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6436 if scaling_type == "SCALE_OUT":
6437 if "aspect-delta-details" not in scaling_descriptor:
6438 raise LcmException(
6439 "Aspect delta details not fount in scaling descriptor {}".format(
6440 scaling_descriptor["name"]
6441 )
6442 )
6443 # count if max-instance-count is reached
6444 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6445
6446 scaling_info["scaling_direction"] = "OUT"
6447 scaling_info["vdu-create"] = {}
6448 scaling_info["kdu-create"] = {}
6449 for delta in deltas:
6450 for vdu_delta in delta.get("vdu-delta", {}):
6451 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6452 # vdu_index also provides the number of instance of the targeted vdu
6453 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6454 cloud_init_text = self._get_vdu_cloud_init_content(
6455 vdud, db_vnfd
6456 )
6457 if cloud_init_text:
6458 additional_params = (
6459 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6460 or {}
6461 )
6462 cloud_init_list = []
6463
6464 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6465 max_instance_count = 10
6466 if vdu_profile and "max-number-of-instances" in vdu_profile:
6467 max_instance_count = vdu_profile.get(
6468 "max-number-of-instances", 10
6469 )
6470
6471 default_instance_num = get_number_of_instances(
6472 db_vnfd, vdud["id"]
6473 )
6474 instances_number = vdu_delta.get("number-of-instances", 1)
6475 nb_scale_op += instances_number
6476
6477 new_instance_count = nb_scale_op + default_instance_num
6478 # Control if new count is over max and vdu count is less than max.
6479 # Then assign new instance count
6480 if new_instance_count > max_instance_count > vdu_count:
6481 instances_number = new_instance_count - max_instance_count
6482 else:
6483 instances_number = instances_number
6484
6485 if new_instance_count > max_instance_count:
6486 raise LcmException(
6487 "reached the limit of {} (max-instance-count) "
6488 "scaling-out operations for the "
6489 "scaling-group-descriptor '{}'".format(
6490 nb_scale_op, scaling_group
6491 )
6492 )
6493 for x in range(vdu_delta.get("number-of-instances", 1)):
6494 if cloud_init_text:
6495 # TODO Information of its own ip is not available because db_vnfr is not updated.
6496 additional_params["OSM"] = get_osm_params(
6497 db_vnfr, vdu_delta["id"], vdu_index + x
6498 )
6499 cloud_init_list.append(
6500 self._parse_cloud_init(
6501 cloud_init_text,
6502 additional_params,
6503 db_vnfd["id"],
6504 vdud["id"],
6505 )
6506 )
6507 vca_scaling_info.append(
6508 {
6509 "osm_vdu_id": vdu_delta["id"],
6510 "member-vnf-index": vnf_index,
6511 "type": "create",
6512 "vdu_index": vdu_index + x,
6513 }
6514 )
6515 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6516 for kdu_delta in delta.get("kdu-resource-delta", {}):
6517 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6518 kdu_name = kdu_profile["kdu-name"]
6519 resource_name = kdu_profile.get("resource-name", "")
6520
6521 # Might have different kdus in the same delta
6522 # Should have list for each kdu
6523 if not scaling_info["kdu-create"].get(kdu_name, None):
6524 scaling_info["kdu-create"][kdu_name] = []
6525
6526 kdur = get_kdur(db_vnfr, kdu_name)
6527 if kdur.get("helm-chart"):
6528 k8s_cluster_type = "helm-chart-v3"
6529 self.logger.debug("kdur: {}".format(kdur))
6530 if (
6531 kdur.get("helm-version")
6532 and kdur.get("helm-version") == "v2"
6533 ):
6534 k8s_cluster_type = "helm-chart"
6535 elif kdur.get("juju-bundle"):
6536 k8s_cluster_type = "juju-bundle"
6537 else:
6538 raise LcmException(
6539 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6540 "juju-bundle. Maybe an old NBI version is running".format(
6541 db_vnfr["member-vnf-index-ref"], kdu_name
6542 )
6543 )
6544
6545 max_instance_count = 10
6546 if kdu_profile and "max-number-of-instances" in kdu_profile:
6547 max_instance_count = kdu_profile.get(
6548 "max-number-of-instances", 10
6549 )
6550
6551 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6552 deployed_kdu, _ = get_deployed_kdu(
6553 nsr_deployed, kdu_name, vnf_index
6554 )
6555 if deployed_kdu is None:
6556 raise LcmException(
6557 "KDU '{}' for vnf '{}' not deployed".format(
6558 kdu_name, vnf_index
6559 )
6560 )
6561 kdu_instance = deployed_kdu.get("kdu-instance")
6562 instance_num = await self.k8scluster_map[
6563 k8s_cluster_type
6564 ].get_scale_count(
6565 resource_name,
6566 kdu_instance,
6567 vca_id=vca_id,
6568 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6569 kdu_model=deployed_kdu.get("kdu-model"),
6570 )
6571 kdu_replica_count = instance_num + kdu_delta.get(
6572 "number-of-instances", 1
6573 )
6574
6575 # Control if new count is over max and instance_num is less than max.
6576 # Then assign max instance number to kdu replica count
6577 if kdu_replica_count > max_instance_count > instance_num:
6578 kdu_replica_count = max_instance_count
6579 if kdu_replica_count > max_instance_count:
6580 raise LcmException(
6581 "reached the limit of {} (max-instance-count) "
6582 "scaling-out operations for the "
6583 "scaling-group-descriptor '{}'".format(
6584 instance_num, scaling_group
6585 )
6586 )
6587
6588 for x in range(kdu_delta.get("number-of-instances", 1)):
6589 vca_scaling_info.append(
6590 {
6591 "osm_kdu_id": kdu_name,
6592 "member-vnf-index": vnf_index,
6593 "type": "create",
6594 "kdu_index": instance_num + x - 1,
6595 }
6596 )
6597 scaling_info["kdu-create"][kdu_name].append(
6598 {
6599 "member-vnf-index": vnf_index,
6600 "type": "create",
6601 "k8s-cluster-type": k8s_cluster_type,
6602 "resource-name": resource_name,
6603 "scale": kdu_replica_count,
6604 }
6605 )
6606 elif scaling_type == "SCALE_IN":
6607 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6608
6609 scaling_info["scaling_direction"] = "IN"
6610 scaling_info["vdu-delete"] = {}
6611 scaling_info["kdu-delete"] = {}
6612
6613 for delta in deltas:
6614 for vdu_delta in delta.get("vdu-delta", {}):
6615 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6616 min_instance_count = 0
6617 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6618 if vdu_profile and "min-number-of-instances" in vdu_profile:
6619 min_instance_count = vdu_profile["min-number-of-instances"]
6620
6621 default_instance_num = get_number_of_instances(
6622 db_vnfd, vdu_delta["id"]
6623 )
6624 instance_num = vdu_delta.get("number-of-instances", 1)
6625 nb_scale_op -= instance_num
6626
6627 new_instance_count = nb_scale_op + default_instance_num
6628
6629 if new_instance_count < min_instance_count < vdu_count:
6630 instances_number = min_instance_count - new_instance_count
6631 else:
6632 instances_number = instance_num
6633
6634 if new_instance_count < min_instance_count:
6635 raise LcmException(
6636 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6637 "scaling-group-descriptor '{}'".format(
6638 nb_scale_op, scaling_group
6639 )
6640 )
6641 for x in range(vdu_delta.get("number-of-instances", 1)):
6642 vca_scaling_info.append(
6643 {
6644 "osm_vdu_id": vdu_delta["id"],
6645 "member-vnf-index": vnf_index,
6646 "type": "delete",
6647 "vdu_index": vdu_index - 1 - x,
6648 }
6649 )
6650 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6651 for kdu_delta in delta.get("kdu-resource-delta", {}):
6652 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6653 kdu_name = kdu_profile["kdu-name"]
6654 resource_name = kdu_profile.get("resource-name", "")
6655
6656 if not scaling_info["kdu-delete"].get(kdu_name, None):
6657 scaling_info["kdu-delete"][kdu_name] = []
6658
6659 kdur = get_kdur(db_vnfr, kdu_name)
6660 if kdur.get("helm-chart"):
6661 k8s_cluster_type = "helm-chart-v3"
6662 self.logger.debug("kdur: {}".format(kdur))
6663 if (
6664 kdur.get("helm-version")
6665 and kdur.get("helm-version") == "v2"
6666 ):
6667 k8s_cluster_type = "helm-chart"
6668 elif kdur.get("juju-bundle"):
6669 k8s_cluster_type = "juju-bundle"
6670 else:
6671 raise LcmException(
6672 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6673 "juju-bundle. Maybe an old NBI version is running".format(
6674 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6675 )
6676 )
6677
6678 min_instance_count = 0
6679 if kdu_profile and "min-number-of-instances" in kdu_profile:
6680 min_instance_count = kdu_profile["min-number-of-instances"]
6681
6682 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6683 deployed_kdu, _ = get_deployed_kdu(
6684 nsr_deployed, kdu_name, vnf_index
6685 )
6686 if deployed_kdu is None:
6687 raise LcmException(
6688 "KDU '{}' for vnf '{}' not deployed".format(
6689 kdu_name, vnf_index
6690 )
6691 )
6692 kdu_instance = deployed_kdu.get("kdu-instance")
6693 instance_num = await self.k8scluster_map[
6694 k8s_cluster_type
6695 ].get_scale_count(
6696 resource_name,
6697 kdu_instance,
6698 vca_id=vca_id,
6699 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6700 kdu_model=deployed_kdu.get("kdu-model"),
6701 )
6702 kdu_replica_count = instance_num - kdu_delta.get(
6703 "number-of-instances", 1
6704 )
6705
6706 if kdu_replica_count < min_instance_count < instance_num:
6707 kdu_replica_count = min_instance_count
6708 if kdu_replica_count < min_instance_count:
6709 raise LcmException(
6710 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6711 "scaling-group-descriptor '{}'".format(
6712 instance_num, scaling_group
6713 )
6714 )
6715
6716 for x in range(kdu_delta.get("number-of-instances", 1)):
6717 vca_scaling_info.append(
6718 {
6719 "osm_kdu_id": kdu_name,
6720 "member-vnf-index": vnf_index,
6721 "type": "delete",
6722 "kdu_index": instance_num - x - 1,
6723 }
6724 )
6725 scaling_info["kdu-delete"][kdu_name].append(
6726 {
6727 "member-vnf-index": vnf_index,
6728 "type": "delete",
6729 "k8s-cluster-type": k8s_cluster_type,
6730 "resource-name": resource_name,
6731 "scale": kdu_replica_count,
6732 }
6733 )
6734
6735 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6736 vdu_delete = copy(scaling_info.get("vdu-delete"))
6737 if scaling_info["scaling_direction"] == "IN":
6738 for vdur in reversed(db_vnfr["vdur"]):
6739 if vdu_delete.get(vdur["vdu-id-ref"]):
6740 vdu_delete[vdur["vdu-id-ref"]] -= 1
6741 scaling_info["vdu"].append(
6742 {
6743 "name": vdur.get("name") or vdur.get("vdu-name"),
6744 "vdu_id": vdur["vdu-id-ref"],
6745 "interface": [],
6746 }
6747 )
6748 for interface in vdur["interfaces"]:
6749 scaling_info["vdu"][-1]["interface"].append(
6750 {
6751 "name": interface["name"],
6752 "ip_address": interface["ip-address"],
6753 "mac_address": interface.get("mac-address"),
6754 }
6755 )
6756 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6757
6758 # PRE-SCALE BEGIN
6759 step = "Executing pre-scale vnf-config-primitive"
6760 if scaling_descriptor.get("scaling-config-action"):
6761 for scaling_config_action in scaling_descriptor[
6762 "scaling-config-action"
6763 ]:
6764 if (
6765 scaling_config_action.get("trigger") == "pre-scale-in"
6766 and scaling_type == "SCALE_IN"
6767 ) or (
6768 scaling_config_action.get("trigger") == "pre-scale-out"
6769 and scaling_type == "SCALE_OUT"
6770 ):
6771 vnf_config_primitive = scaling_config_action[
6772 "vnf-config-primitive-name-ref"
6773 ]
6774 step = db_nslcmop_update[
6775 "detailed-status"
6776 ] = "executing pre-scale scaling-config-action '{}'".format(
6777 vnf_config_primitive
6778 )
6779
6780 # look for primitive
6781 for config_primitive in (
6782 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6783 ).get("config-primitive", ()):
6784 if config_primitive["name"] == vnf_config_primitive:
6785 break
6786 else:
6787 raise LcmException(
6788 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6789 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6790 "primitive".format(scaling_group, vnf_config_primitive)
6791 )
6792
6793 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6794 if db_vnfr.get("additionalParamsForVnf"):
6795 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6796
6797 scale_process = "VCA"
6798 db_nsr_update["config-status"] = "configuring pre-scaling"
6799 primitive_params = self._map_primitive_params(
6800 config_primitive, {}, vnfr_params
6801 )
6802
6803 # Pre-scale retry check: Check if this sub-operation has been executed before
6804 op_index = self._check_or_add_scale_suboperation(
6805 db_nslcmop,
6806 vnf_index,
6807 vnf_config_primitive,
6808 primitive_params,
6809 "PRE-SCALE",
6810 )
6811 if op_index == self.SUBOPERATION_STATUS_SKIP:
6812 # Skip sub-operation
6813 result = "COMPLETED"
6814 result_detail = "Done"
6815 self.logger.debug(
6816 logging_text
6817 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6818 vnf_config_primitive, result, result_detail
6819 )
6820 )
6821 else:
6822 if op_index == self.SUBOPERATION_STATUS_NEW:
6823 # New sub-operation: Get index of this sub-operation
6824 op_index = (
6825 len(db_nslcmop.get("_admin", {}).get("operations"))
6826 - 1
6827 )
6828 self.logger.debug(
6829 logging_text
6830 + "vnf_config_primitive={} New sub-operation".format(
6831 vnf_config_primitive
6832 )
6833 )
6834 else:
6835 # retry: Get registered params for this existing sub-operation
6836 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6837 op_index
6838 ]
6839 vnf_index = op.get("member_vnf_index")
6840 vnf_config_primitive = op.get("primitive")
6841 primitive_params = op.get("primitive_params")
6842 self.logger.debug(
6843 logging_text
6844 + "vnf_config_primitive={} Sub-operation retry".format(
6845 vnf_config_primitive
6846 )
6847 )
6848 # Execute the primitive, either with new (first-time) or registered (reintent) args
6849 ee_descriptor_id = config_primitive.get(
6850 "execution-environment-ref"
6851 )
6852 primitive_name = config_primitive.get(
6853 "execution-environment-primitive", vnf_config_primitive
6854 )
6855 ee_id, vca_type = self._look_for_deployed_vca(
6856 nsr_deployed["VCA"],
6857 member_vnf_index=vnf_index,
6858 vdu_id=None,
6859 vdu_count_index=None,
6860 ee_descriptor_id=ee_descriptor_id,
6861 )
6862 result, result_detail = await self._ns_execute_primitive(
6863 ee_id,
6864 primitive_name,
6865 primitive_params,
6866 vca_type=vca_type,
6867 vca_id=vca_id,
6868 )
6869 self.logger.debug(
6870 logging_text
6871 + "vnf_config_primitive={} Done with result {} {}".format(
6872 vnf_config_primitive, result, result_detail
6873 )
6874 )
6875 # Update operationState = COMPLETED | FAILED
6876 self._update_suboperation_status(
6877 db_nslcmop, op_index, result, result_detail
6878 )
6879
6880 if result == "FAILED":
6881 raise LcmException(result_detail)
6882 db_nsr_update["config-status"] = old_config_status
6883 scale_process = None
6884 # PRE-SCALE END
6885
6886 db_nsr_update[
6887 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6888 ] = nb_scale_op
6889 db_nsr_update[
6890 "_admin.scaling-group.{}.time".format(admin_scale_index)
6891 ] = time()
6892
6893 # SCALE-IN VCA - BEGIN
6894 if vca_scaling_info:
6895 step = db_nslcmop_update[
6896 "detailed-status"
6897 ] = "Deleting the execution environments"
6898 scale_process = "VCA"
6899 for vca_info in vca_scaling_info:
6900 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6901 member_vnf_index = str(vca_info["member-vnf-index"])
6902 self.logger.debug(
6903 logging_text + "vdu info: {}".format(vca_info)
6904 )
6905 if vca_info.get("osm_vdu_id"):
6906 vdu_id = vca_info["osm_vdu_id"]
6907 vdu_index = int(vca_info["vdu_index"])
6908 stage[
6909 1
6910 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6911 member_vnf_index, vdu_id, vdu_index
6912 )
6913 stage[2] = step = "Scaling in VCA"
6914 self._write_op_status(op_id=nslcmop_id, stage=stage)
6915 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6916 config_update = db_nsr["configurationStatus"]
6917 for vca_index, vca in enumerate(vca_update):
6918 if (
6919 (vca or vca.get("ee_id"))
6920 and vca["member-vnf-index"] == member_vnf_index
6921 and vca["vdu_count_index"] == vdu_index
6922 ):
6923 if vca.get("vdu_id"):
6924 config_descriptor = get_configuration(
6925 db_vnfd, vca.get("vdu_id")
6926 )
6927 elif vca.get("kdu_name"):
6928 config_descriptor = get_configuration(
6929 db_vnfd, vca.get("kdu_name")
6930 )
6931 else:
6932 config_descriptor = get_configuration(
6933 db_vnfd, db_vnfd["id"]
6934 )
6935 operation_params = (
6936 db_nslcmop.get("operationParams") or {}
6937 )
6938 exec_terminate_primitives = not operation_params.get(
6939 "skip_terminate_primitives"
6940 ) and vca.get("needed_terminate")
6941 task = asyncio.ensure_future(
6942 asyncio.wait_for(
6943 self.destroy_N2VC(
6944 logging_text,
6945 db_nslcmop,
6946 vca,
6947 config_descriptor,
6948 vca_index,
6949 destroy_ee=True,
6950 exec_primitives=exec_terminate_primitives,
6951 scaling_in=True,
6952 vca_id=vca_id,
6953 ),
6954 timeout=self.timeout.charm_delete,
6955 )
6956 )
6957 tasks_dict_info[task] = "Terminating VCA {}".format(
6958 vca.get("ee_id")
6959 )
6960 del vca_update[vca_index]
6961 del config_update[vca_index]
6962 # wait for pending tasks of terminate primitives
6963 if tasks_dict_info:
6964 self.logger.debug(
6965 logging_text
6966 + "Waiting for tasks {}".format(
6967 list(tasks_dict_info.keys())
6968 )
6969 )
6970 error_list = await self._wait_for_tasks(
6971 logging_text,
6972 tasks_dict_info,
6973 min(
6974 self.timeout.charm_delete, self.timeout.ns_terminate
6975 ),
6976 stage,
6977 nslcmop_id,
6978 )
6979 tasks_dict_info.clear()
6980 if error_list:
6981 raise LcmException("; ".join(error_list))
6982
6983 db_vca_and_config_update = {
6984 "_admin.deployed.VCA": vca_update,
6985 "configurationStatus": config_update,
6986 }
6987 self.update_db_2(
6988 "nsrs", db_nsr["_id"], db_vca_and_config_update
6989 )
6990 scale_process = None
6991 # SCALE-IN VCA - END
6992
6993 # SCALE RO - BEGIN
6994 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6995 scale_process = "RO"
6996 if self.ro_config.ng:
6997 await self._scale_ng_ro(
6998 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6999 )
7000 scaling_info.pop("vdu-create", None)
7001 scaling_info.pop("vdu-delete", None)
7002
7003 scale_process = None
7004 # SCALE RO - END
7005
7006 # SCALE KDU - BEGIN
7007 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7008 scale_process = "KDU"
7009 await self._scale_kdu(
7010 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7011 )
7012 scaling_info.pop("kdu-create", None)
7013 scaling_info.pop("kdu-delete", None)
7014
7015 scale_process = None
7016 # SCALE KDU - END
7017
7018 if db_nsr_update:
7019 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7020
7021 # SCALE-UP VCA - BEGIN
7022 if vca_scaling_info:
7023 step = db_nslcmop_update[
7024 "detailed-status"
7025 ] = "Creating new execution environments"
7026 scale_process = "VCA"
7027 for vca_info in vca_scaling_info:
7028 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7029 member_vnf_index = str(vca_info["member-vnf-index"])
7030 self.logger.debug(
7031 logging_text + "vdu info: {}".format(vca_info)
7032 )
7033 vnfd_id = db_vnfr["vnfd-ref"]
7034 if vca_info.get("osm_vdu_id"):
7035 vdu_index = int(vca_info["vdu_index"])
7036 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7037 if db_vnfr.get("additionalParamsForVnf"):
7038 deploy_params.update(
7039 parse_yaml_strings(
7040 db_vnfr["additionalParamsForVnf"].copy()
7041 )
7042 )
7043 descriptor_config = get_configuration(
7044 db_vnfd, db_vnfd["id"]
7045 )
7046 if descriptor_config:
7047 vdu_id = None
7048 vdu_name = None
7049 kdu_name = None
7050 kdu_index = None
7051 self._deploy_n2vc(
7052 logging_text=logging_text
7053 + "member_vnf_index={} ".format(member_vnf_index),
7054 db_nsr=db_nsr,
7055 db_vnfr=db_vnfr,
7056 nslcmop_id=nslcmop_id,
7057 nsr_id=nsr_id,
7058 nsi_id=nsi_id,
7059 vnfd_id=vnfd_id,
7060 vdu_id=vdu_id,
7061 kdu_name=kdu_name,
7062 kdu_index=kdu_index,
7063 member_vnf_index=member_vnf_index,
7064 vdu_index=vdu_index,
7065 vdu_name=vdu_name,
7066 deploy_params=deploy_params,
7067 descriptor_config=descriptor_config,
7068 base_folder=base_folder,
7069 task_instantiation_info=tasks_dict_info,
7070 stage=stage,
7071 )
7072 vdu_id = vca_info["osm_vdu_id"]
7073 vdur = find_in_list(
7074 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7075 )
7076 descriptor_config = get_configuration(db_vnfd, vdu_id)
7077 if vdur.get("additionalParams"):
7078 deploy_params_vdu = parse_yaml_strings(
7079 vdur["additionalParams"]
7080 )
7081 else:
7082 deploy_params_vdu = deploy_params
7083 deploy_params_vdu["OSM"] = get_osm_params(
7084 db_vnfr, vdu_id, vdu_count_index=vdu_index
7085 )
7086 if descriptor_config:
7087 vdu_name = None
7088 kdu_name = None
7089 kdu_index = None
7090 stage[
7091 1
7092 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7093 member_vnf_index, vdu_id, vdu_index
7094 )
7095 stage[2] = step = "Scaling out VCA"
7096 self._write_op_status(op_id=nslcmop_id, stage=stage)
7097 self._deploy_n2vc(
7098 logging_text=logging_text
7099 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7100 member_vnf_index, vdu_id, vdu_index
7101 ),
7102 db_nsr=db_nsr,
7103 db_vnfr=db_vnfr,
7104 nslcmop_id=nslcmop_id,
7105 nsr_id=nsr_id,
7106 nsi_id=nsi_id,
7107 vnfd_id=vnfd_id,
7108 vdu_id=vdu_id,
7109 kdu_name=kdu_name,
7110 member_vnf_index=member_vnf_index,
7111 vdu_index=vdu_index,
7112 kdu_index=kdu_index,
7113 vdu_name=vdu_name,
7114 deploy_params=deploy_params_vdu,
7115 descriptor_config=descriptor_config,
7116 base_folder=base_folder,
7117 task_instantiation_info=tasks_dict_info,
7118 stage=stage,
7119 )
7120 # SCALE-UP VCA - END
7121 scale_process = None
7122
7123 # POST-SCALE BEGIN
7124 # execute primitive service POST-SCALING
7125 step = "Executing post-scale vnf-config-primitive"
7126 if scaling_descriptor.get("scaling-config-action"):
7127 for scaling_config_action in scaling_descriptor[
7128 "scaling-config-action"
7129 ]:
7130 if (
7131 scaling_config_action.get("trigger") == "post-scale-in"
7132 and scaling_type == "SCALE_IN"
7133 ) or (
7134 scaling_config_action.get("trigger") == "post-scale-out"
7135 and scaling_type == "SCALE_OUT"
7136 ):
7137 vnf_config_primitive = scaling_config_action[
7138 "vnf-config-primitive-name-ref"
7139 ]
7140 step = db_nslcmop_update[
7141 "detailed-status"
7142 ] = "executing post-scale scaling-config-action '{}'".format(
7143 vnf_config_primitive
7144 )
7145
7146 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7147 if db_vnfr.get("additionalParamsForVnf"):
7148 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7149
7150 # look for primitive
7151 for config_primitive in (
7152 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7153 ).get("config-primitive", ()):
7154 if config_primitive["name"] == vnf_config_primitive:
7155 break
7156 else:
7157 raise LcmException(
7158 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7159 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7160 "config-primitive".format(
7161 scaling_group, vnf_config_primitive
7162 )
7163 )
7164 scale_process = "VCA"
7165 db_nsr_update["config-status"] = "configuring post-scaling"
7166 primitive_params = self._map_primitive_params(
7167 config_primitive, {}, vnfr_params
7168 )
7169
7170 # Post-scale retry check: Check if this sub-operation has been executed before
7171 op_index = self._check_or_add_scale_suboperation(
7172 db_nslcmop,
7173 vnf_index,
7174 vnf_config_primitive,
7175 primitive_params,
7176 "POST-SCALE",
7177 )
7178 if op_index == self.SUBOPERATION_STATUS_SKIP:
7179 # Skip sub-operation
7180 result = "COMPLETED"
7181 result_detail = "Done"
7182 self.logger.debug(
7183 logging_text
7184 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7185 vnf_config_primitive, result, result_detail
7186 )
7187 )
7188 else:
7189 if op_index == self.SUBOPERATION_STATUS_NEW:
7190 # New sub-operation: Get index of this sub-operation
7191 op_index = (
7192 len(db_nslcmop.get("_admin", {}).get("operations"))
7193 - 1
7194 )
7195 self.logger.debug(
7196 logging_text
7197 + "vnf_config_primitive={} New sub-operation".format(
7198 vnf_config_primitive
7199 )
7200 )
7201 else:
7202 # retry: Get registered params for this existing sub-operation
7203 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7204 op_index
7205 ]
7206 vnf_index = op.get("member_vnf_index")
7207 vnf_config_primitive = op.get("primitive")
7208 primitive_params = op.get("primitive_params")
7209 self.logger.debug(
7210 logging_text
7211 + "vnf_config_primitive={} Sub-operation retry".format(
7212 vnf_config_primitive
7213 )
7214 )
7215 # Execute the primitive, either with new (first-time) or registered (reintent) args
7216 ee_descriptor_id = config_primitive.get(
7217 "execution-environment-ref"
7218 )
7219 primitive_name = config_primitive.get(
7220 "execution-environment-primitive", vnf_config_primitive
7221 )
7222 ee_id, vca_type = self._look_for_deployed_vca(
7223 nsr_deployed["VCA"],
7224 member_vnf_index=vnf_index,
7225 vdu_id=None,
7226 vdu_count_index=None,
7227 ee_descriptor_id=ee_descriptor_id,
7228 )
7229 result, result_detail = await self._ns_execute_primitive(
7230 ee_id,
7231 primitive_name,
7232 primitive_params,
7233 vca_type=vca_type,
7234 vca_id=vca_id,
7235 )
7236 self.logger.debug(
7237 logging_text
7238 + "vnf_config_primitive={} Done with result {} {}".format(
7239 vnf_config_primitive, result, result_detail
7240 )
7241 )
7242 # Update operationState = COMPLETED | FAILED
7243 self._update_suboperation_status(
7244 db_nslcmop, op_index, result, result_detail
7245 )
7246
7247 if result == "FAILED":
7248 raise LcmException(result_detail)
7249 db_nsr_update["config-status"] = old_config_status
7250 scale_process = None
7251 # POST-SCALE END
7252
7253 db_nsr_update[
7254 "detailed-status"
7255 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7256 db_nsr_update["operational-status"] = (
7257 "running"
7258 if old_operational_status == "failed"
7259 else old_operational_status
7260 )
7261 db_nsr_update["config-status"] = old_config_status
7262 return
7263 except (
7264 ROclient.ROClientException,
7265 DbException,
7266 LcmException,
7267 NgRoException,
7268 ) as e:
7269 self.logger.error(logging_text + "Exit Exception {}".format(e))
7270 exc = e
7271 except asyncio.CancelledError:
7272 self.logger.error(
7273 logging_text + "Cancelled Exception while '{}'".format(step)
7274 )
7275 exc = "Operation was cancelled"
7276 except Exception as e:
7277 exc = traceback.format_exc()
7278 self.logger.critical(
7279 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7280 exc_info=True,
7281 )
7282 finally:
7283 self._write_ns_status(
7284 nsr_id=nsr_id,
7285 ns_state=None,
7286 current_operation="IDLE",
7287 current_operation_id=None,
7288 )
7289 if tasks_dict_info:
7290 stage[1] = "Waiting for instantiate pending tasks."
7291 self.logger.debug(logging_text + stage[1])
7292 exc = await self._wait_for_tasks(
7293 logging_text,
7294 tasks_dict_info,
7295 self.timeout.ns_deploy,
7296 stage,
7297 nslcmop_id,
7298 nsr_id=nsr_id,
7299 )
7300 if exc:
7301 db_nslcmop_update[
7302 "detailed-status"
7303 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7304 nslcmop_operation_state = "FAILED"
7305 if db_nsr:
7306 db_nsr_update["operational-status"] = old_operational_status
7307 db_nsr_update["config-status"] = old_config_status
7308 db_nsr_update["detailed-status"] = ""
7309 if scale_process:
7310 if "VCA" in scale_process:
7311 db_nsr_update["config-status"] = "failed"
7312 if "RO" in scale_process:
7313 db_nsr_update["operational-status"] = "failed"
7314 db_nsr_update[
7315 "detailed-status"
7316 ] = "FAILED scaling nslcmop={} {}: {}".format(
7317 nslcmop_id, step, exc
7318 )
7319 else:
7320 error_description_nslcmop = None
7321 nslcmop_operation_state = "COMPLETED"
7322 db_nslcmop_update["detailed-status"] = "Done"
7323
7324 self._write_op_status(
7325 op_id=nslcmop_id,
7326 stage="",
7327 error_message=error_description_nslcmop,
7328 operation_state=nslcmop_operation_state,
7329 other_update=db_nslcmop_update,
7330 )
7331 if db_nsr:
7332 self._write_ns_status(
7333 nsr_id=nsr_id,
7334 ns_state=None,
7335 current_operation="IDLE",
7336 current_operation_id=None,
7337 other_update=db_nsr_update,
7338 )
7339
7340 if nslcmop_operation_state:
7341 try:
7342 msg = {
7343 "nsr_id": nsr_id,
7344 "nslcmop_id": nslcmop_id,
7345 "operationState": nslcmop_operation_state,
7346 }
7347 await self.msg.aiowrite("ns", "scaled", msg)
7348 except Exception as e:
7349 self.logger.error(
7350 logging_text + "kafka_write notification Exception {}".format(e)
7351 )
7352 self.logger.debug(logging_text + "Exit")
7353 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7354
7355 async def _scale_kdu(
7356 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7357 ):
7358 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7359 for kdu_name in _scaling_info:
7360 for kdu_scaling_info in _scaling_info[kdu_name]:
7361 deployed_kdu, index = get_deployed_kdu(
7362 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7363 )
7364 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7365 kdu_instance = deployed_kdu["kdu-instance"]
7366 kdu_model = deployed_kdu.get("kdu-model")
7367 scale = int(kdu_scaling_info["scale"])
7368 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7369
7370 db_dict = {
7371 "collection": "nsrs",
7372 "filter": {"_id": nsr_id},
7373 "path": "_admin.deployed.K8s.{}".format(index),
7374 }
7375
7376 step = "scaling application {}".format(
7377 kdu_scaling_info["resource-name"]
7378 )
7379 self.logger.debug(logging_text + step)
7380
7381 if kdu_scaling_info["type"] == "delete":
7382 kdu_config = get_configuration(db_vnfd, kdu_name)
7383 if (
7384 kdu_config
7385 and kdu_config.get("terminate-config-primitive")
7386 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7387 ):
7388 terminate_config_primitive_list = kdu_config.get(
7389 "terminate-config-primitive"
7390 )
7391 terminate_config_primitive_list.sort(
7392 key=lambda val: int(val["seq"])
7393 )
7394
7395 for (
7396 terminate_config_primitive
7397 ) in terminate_config_primitive_list:
7398 primitive_params_ = self._map_primitive_params(
7399 terminate_config_primitive, {}, {}
7400 )
7401 step = "execute terminate config primitive"
7402 self.logger.debug(logging_text + step)
7403 await asyncio.wait_for(
7404 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7405 cluster_uuid=cluster_uuid,
7406 kdu_instance=kdu_instance,
7407 primitive_name=terminate_config_primitive["name"],
7408 params=primitive_params_,
7409 db_dict=db_dict,
7410 total_timeout=self.timeout.primitive,
7411 vca_id=vca_id,
7412 ),
7413 timeout=self.timeout.primitive
7414 * self.timeout.primitive_outer_factor,
7415 )
7416
7417 await asyncio.wait_for(
7418 self.k8scluster_map[k8s_cluster_type].scale(
7419 kdu_instance=kdu_instance,
7420 scale=scale,
7421 resource_name=kdu_scaling_info["resource-name"],
7422 total_timeout=self.timeout.scale_on_error,
7423 vca_id=vca_id,
7424 cluster_uuid=cluster_uuid,
7425 kdu_model=kdu_model,
7426 atomic=True,
7427 db_dict=db_dict,
7428 ),
7429 timeout=self.timeout.scale_on_error
7430 * self.timeout.scale_on_error_outer_factor,
7431 )
7432
7433 if kdu_scaling_info["type"] == "create":
7434 kdu_config = get_configuration(db_vnfd, kdu_name)
7435 if (
7436 kdu_config
7437 and kdu_config.get("initial-config-primitive")
7438 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7439 ):
7440 initial_config_primitive_list = kdu_config.get(
7441 "initial-config-primitive"
7442 )
7443 initial_config_primitive_list.sort(
7444 key=lambda val: int(val["seq"])
7445 )
7446
7447 for initial_config_primitive in initial_config_primitive_list:
7448 primitive_params_ = self._map_primitive_params(
7449 initial_config_primitive, {}, {}
7450 )
7451 step = "execute initial config primitive"
7452 self.logger.debug(logging_text + step)
7453 await asyncio.wait_for(
7454 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7455 cluster_uuid=cluster_uuid,
7456 kdu_instance=kdu_instance,
7457 primitive_name=initial_config_primitive["name"],
7458 params=primitive_params_,
7459 db_dict=db_dict,
7460 vca_id=vca_id,
7461 ),
7462 timeout=600,
7463 )
7464
7465 async def _scale_ng_ro(
7466 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7467 ):
7468 nsr_id = db_nslcmop["nsInstanceId"]
7469 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7470 db_vnfrs = {}
7471
7472 # read from db: vnfd's for every vnf
7473 db_vnfds = []
7474
7475 # for each vnf in ns, read vnfd
7476 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7477 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7478 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7479 # if we haven't this vnfd, read it from db
7480 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7481 # read from db
7482 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7483 db_vnfds.append(vnfd)
7484 n2vc_key = self.n2vc.get_public_key()
7485 n2vc_key_list = [n2vc_key]
7486 self.scale_vnfr(
7487 db_vnfr,
7488 vdu_scaling_info.get("vdu-create"),
7489 vdu_scaling_info.get("vdu-delete"),
7490 mark_delete=True,
7491 )
7492 # db_vnfr has been updated, update db_vnfrs to use it
7493 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7494 await self._instantiate_ng_ro(
7495 logging_text,
7496 nsr_id,
7497 db_nsd,
7498 db_nsr,
7499 db_nslcmop,
7500 db_vnfrs,
7501 db_vnfds,
7502 n2vc_key_list,
7503 stage=stage,
7504 start_deploy=time(),
7505 timeout_ns_deploy=self.timeout.ns_deploy,
7506 )
7507 if vdu_scaling_info.get("vdu-delete"):
7508 self.scale_vnfr(
7509 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7510 )
7511
7512 async def extract_prometheus_scrape_jobs(
7513 self,
7514 ee_id: str,
7515 artifact_path: str,
7516 ee_config_descriptor: dict,
7517 vnfr_id: str,
7518 nsr_id: str,
7519 target_ip: str,
7520 element_type: str,
7521 vnf_member_index: str = "",
7522 vdu_id: str = "",
7523 vdu_index: int = None,
7524 kdu_name: str = "",
7525 kdu_index: int = None,
7526 ) -> dict:
7527 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7528 This method will wait until the corresponding VDU or KDU is fully instantiated
7529
7530 Args:
7531 ee_id (str): Execution Environment ID
7532 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7533 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7534 vnfr_id (str): VNFR ID where this EE applies
7535 nsr_id (str): NSR ID where this EE applies
7536 target_ip (str): VDU/KDU instance IP address
7537 element_type (str): NS or VNF or VDU or KDU
7538 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7539 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7540 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7541 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7542 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7543
7544 Raises:
7545 LcmException: When the VDU or KDU instance was not found in an hour
7546
7547 Returns:
7548 _type_: Prometheus jobs
7549 """
7550 # default the vdur and kdur names to an empty string, to avoid any later
7551 # problem with Prometheus when the element type is not VDU or KDU
7552 vdur_name = ""
7553 kdur_name = ""
7554
7555 # look if exist a file called 'prometheus*.j2' and
7556 artifact_content = self.fs.dir_ls(artifact_path)
7557 job_file = next(
7558 (
7559 f
7560 for f in artifact_content
7561 if f.startswith("prometheus") and f.endswith(".j2")
7562 ),
7563 None,
7564 )
7565 if not job_file:
7566 return
7567 self.logger.debug("Artifact path{}".format(artifact_path))
7568 self.logger.debug("job file{}".format(job_file))
7569 with self.fs.file_open((artifact_path, job_file), "r") as f:
7570 job_data = f.read()
7571
7572 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7573 if element_type in ("VDU", "KDU"):
7574 for _ in range(360):
7575 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7576 if vdu_id and vdu_index is not None:
7577 vdur = next(
7578 (
7579 x
7580 for x in get_iterable(db_vnfr, "vdur")
7581 if (
7582 x.get("vdu-id-ref") == vdu_id
7583 and x.get("count-index") == vdu_index
7584 )
7585 ),
7586 {},
7587 )
7588 if vdur.get("name"):
7589 vdur_name = vdur.get("name")
7590 break
7591 if kdu_name and kdu_index is not None:
7592 kdur = next(
7593 (
7594 x
7595 for x in get_iterable(db_vnfr, "kdur")
7596 if (
7597 x.get("kdu-name") == kdu_name
7598 and x.get("count-index") == kdu_index
7599 )
7600 ),
7601 {},
7602 )
7603 if kdur.get("name"):
7604 kdur_name = kdur.get("name")
7605 break
7606
7607 await asyncio.sleep(10)
7608 else:
7609 if vdu_id and vdu_index is not None:
7610 raise LcmException(
7611 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7612 )
7613 if kdu_name and kdu_index is not None:
7614 raise LcmException(
7615 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7616 )
7617
7618 # TODO get_service
7619 if ee_id is not None:
7620 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7621 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7622 host_port = "80"
7623 vnfr_id = vnfr_id.replace("-", "")
7624 variables = {
7625 "JOB_NAME": vnfr_id,
7626 "TARGET_IP": target_ip,
7627 "EXPORTER_POD_IP": host_name,
7628 "EXPORTER_POD_PORT": host_port,
7629 "NSR_ID": nsr_id,
7630 "VNF_MEMBER_INDEX": vnf_member_index,
7631 "VDUR_NAME": vdur_name,
7632 "KDUR_NAME": kdur_name,
7633 "ELEMENT_TYPE": element_type,
7634 }
7635 else:
7636 metric_path = ee_config_descriptor["metric-path"]
7637 target_port = ee_config_descriptor["metric-port"]
7638 vnfr_id = vnfr_id.replace("-", "")
7639 variables = {
7640 "JOB_NAME": vnfr_id,
7641 "TARGET_IP": target_ip,
7642 "TARGET_PORT": target_port,
7643 "METRIC_PATH": metric_path,
7644 }
7645
7646 job_list = parse_job(job_data, variables)
7647 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7648 for job in job_list:
7649 if (
7650 not isinstance(job.get("job_name"), str)
7651 or vnfr_id not in job["job_name"]
7652 ):
7653 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7654 job["nsr_id"] = nsr_id
7655 job["vnfr_id"] = vnfr_id
7656 return job_list
7657
7658 async def rebuild_start_stop(
7659 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7660 ):
7661 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7662 self.logger.info(logging_text + "Enter")
7663 stage = ["Preparing the environment", ""]
7664 # database nsrs record
7665 db_nsr_update = {}
7666 vdu_vim_name = None
7667 vim_vm_id = None
7668 # in case of error, indicates what part of scale was failed to put nsr at error status
7669 start_deploy = time()
7670 try:
7671 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7672 vim_account_id = db_vnfr.get("vim-account-id")
7673 vim_info_key = "vim:" + vim_account_id
7674 vdu_id = additional_param["vdu_id"]
7675 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7676 vdur = find_in_list(
7677 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7678 )
7679 if vdur:
7680 vdu_vim_name = vdur["name"]
7681 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7682 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7683 else:
7684 raise LcmException("Target vdu is not found")
7685 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7686 # wait for any previous tasks in process
7687 stage[1] = "Waiting for previous operations to terminate"
7688 self.logger.info(stage[1])
7689 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7690
7691 stage[1] = "Reading from database."
7692 self.logger.info(stage[1])
7693 self._write_ns_status(
7694 nsr_id=nsr_id,
7695 ns_state=None,
7696 current_operation=operation_type.upper(),
7697 current_operation_id=nslcmop_id,
7698 )
7699 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7700
7701 # read from db: ns
7702 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7703 db_nsr_update["operational-status"] = operation_type
7704 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7705 # Payload for RO
7706 desc = {
7707 operation_type: {
7708 "vim_vm_id": vim_vm_id,
7709 "vnf_id": vnf_id,
7710 "vdu_index": additional_param["count-index"],
7711 "vdu_id": vdur["id"],
7712 "target_vim": target_vim,
7713 "vim_account_id": vim_account_id,
7714 }
7715 }
7716 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7717 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7718 self.logger.info("ro nsr id: {}".format(nsr_id))
7719 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7720 self.logger.info("response from RO: {}".format(result_dict))
7721 action_id = result_dict["action_id"]
7722 await self._wait_ng_ro(
7723 nsr_id,
7724 action_id,
7725 nslcmop_id,
7726 start_deploy,
7727 self.timeout.operate,
7728 None,
7729 "start_stop_rebuild",
7730 )
7731 return "COMPLETED", "Done"
7732 except (ROclient.ROClientException, DbException, LcmException) as e:
7733 self.logger.error("Exit Exception {}".format(e))
7734 exc = e
7735 except asyncio.CancelledError:
7736 self.logger.error("Cancelled Exception while '{}'".format(stage))
7737 exc = "Operation was cancelled"
7738 except Exception as e:
7739 exc = traceback.format_exc()
7740 self.logger.critical(
7741 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7742 )
7743 return "FAILED", "Error in operate VNF {}".format(exc)
7744
7745 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7746 """
7747 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7748
7749 :param: vim_account_id: VIM Account ID
7750
7751 :return: (cloud_name, cloud_credential)
7752 """
7753 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7754 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7755
7756 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7757 """
7758 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7759
7760 :param: vim_account_id: VIM Account ID
7761
7762 :return: (cloud_name, cloud_credential)
7763 """
7764 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7765 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7766
7767 async def migrate(self, nsr_id, nslcmop_id):
7768 """
7769 Migrate VNFs and VDUs instances in a NS
7770
7771 :param: nsr_id: NS Instance ID
7772 :param: nslcmop_id: nslcmop ID of migrate
7773
7774 """
7775 # Try to lock HA task here
7776 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7777 if not task_is_locked_by_me:
7778 return
7779 logging_text = "Task ns={} migrate ".format(nsr_id)
7780 self.logger.debug(logging_text + "Enter")
7781 # get all needed from database
7782 db_nslcmop = None
7783 db_nslcmop_update = {}
7784 nslcmop_operation_state = None
7785 db_nsr_update = {}
7786 target = {}
7787 exc = None
7788 # in case of error, indicates what part of scale was failed to put nsr at error status
7789 start_deploy = time()
7790
7791 try:
7792 # wait for any previous tasks in process
7793 step = "Waiting for previous operations to terminate"
7794 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7795
7796 self._write_ns_status(
7797 nsr_id=nsr_id,
7798 ns_state=None,
7799 current_operation="MIGRATING",
7800 current_operation_id=nslcmop_id,
7801 )
7802 step = "Getting nslcmop from database"
7803 self.logger.debug(
7804 step + " after having waited for previous tasks to be completed"
7805 )
7806 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7807 migrate_params = db_nslcmop.get("operationParams")
7808
7809 target = {}
7810 target.update(migrate_params)
7811 desc = await self.RO.migrate(nsr_id, target)
7812 self.logger.debug("RO return > {}".format(desc))
7813 action_id = desc["action_id"]
7814 await self._wait_ng_ro(
7815 nsr_id,
7816 action_id,
7817 nslcmop_id,
7818 start_deploy,
7819 self.timeout.migrate,
7820 operation="migrate",
7821 )
7822 except (ROclient.ROClientException, DbException, LcmException) as e:
7823 self.logger.error("Exit Exception {}".format(e))
7824 exc = e
7825 except asyncio.CancelledError:
7826 self.logger.error("Cancelled Exception while '{}'".format(step))
7827 exc = "Operation was cancelled"
7828 except Exception as e:
7829 exc = traceback.format_exc()
7830 self.logger.critical(
7831 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7832 )
7833 finally:
7834 self._write_ns_status(
7835 nsr_id=nsr_id,
7836 ns_state=None,
7837 current_operation="IDLE",
7838 current_operation_id=None,
7839 )
7840 if exc:
7841 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7842 nslcmop_operation_state = "FAILED"
7843 else:
7844 nslcmop_operation_state = "COMPLETED"
7845 db_nslcmop_update["detailed-status"] = "Done"
7846 db_nsr_update["detailed-status"] = "Done"
7847
7848 self._write_op_status(
7849 op_id=nslcmop_id,
7850 stage="",
7851 error_message="",
7852 operation_state=nslcmop_operation_state,
7853 other_update=db_nslcmop_update,
7854 )
7855 if nslcmop_operation_state:
7856 try:
7857 msg = {
7858 "nsr_id": nsr_id,
7859 "nslcmop_id": nslcmop_id,
7860 "operationState": nslcmop_operation_state,
7861 }
7862 await self.msg.aiowrite("ns", "migrated", msg)
7863 except Exception as e:
7864 self.logger.error(
7865 logging_text + "kafka_write notification Exception {}".format(e)
7866 )
7867 self.logger.debug(logging_text + "Exit")
7868 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7869
7870 async def heal(self, nsr_id, nslcmop_id):
7871 """
7872 Heal NS
7873
7874 :param nsr_id: ns instance to heal
7875 :param nslcmop_id: operation to run
7876 :return:
7877 """
7878
7879 # Try to lock HA task here
7880 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7881 if not task_is_locked_by_me:
7882 return
7883
7884 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7885 stage = ["", "", ""]
7886 tasks_dict_info = {}
7887 # ^ stage, step, VIM progress
7888 self.logger.debug(logging_text + "Enter")
7889 # get all needed from database
7890 db_nsr = None
7891 db_nslcmop_update = {}
7892 db_nsr_update = {}
7893 db_vnfrs = {} # vnf's info indexed by _id
7894 exc = None
7895 old_operational_status = ""
7896 old_config_status = ""
7897 nsi_id = None
7898 try:
7899 # wait for any previous tasks in process
7900 step = "Waiting for previous operations to terminate"
7901 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7902 self._write_ns_status(
7903 nsr_id=nsr_id,
7904 ns_state=None,
7905 current_operation="HEALING",
7906 current_operation_id=nslcmop_id,
7907 )
7908
7909 step = "Getting nslcmop from database"
7910 self.logger.debug(
7911 step + " after having waited for previous tasks to be completed"
7912 )
7913 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7914
7915 step = "Getting nsr from database"
7916 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7917 old_operational_status = db_nsr["operational-status"]
7918 old_config_status = db_nsr["config-status"]
7919
7920 db_nsr_update = {
7921 "_admin.deployed.RO.operational-status": "healing",
7922 }
7923 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7924
7925 step = "Sending heal order to VIM"
7926 await self.heal_RO(
7927 logging_text=logging_text,
7928 nsr_id=nsr_id,
7929 db_nslcmop=db_nslcmop,
7930 stage=stage,
7931 )
7932 # VCA tasks
7933 # read from db: nsd
7934 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7935 self.logger.debug(logging_text + stage[1])
7936 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7937 self.fs.sync(db_nsr["nsd-id"])
7938 db_nsr["nsd"] = nsd
7939 # read from db: vnfr's of this ns
7940 step = "Getting vnfrs from db"
7941 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7942 for vnfr in db_vnfrs_list:
7943 db_vnfrs[vnfr["_id"]] = vnfr
7944 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7945
7946 # Check for each target VNF
7947 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7948 for target_vnf in target_list:
7949 # Find this VNF in the list from DB
7950 vnfr_id = target_vnf.get("vnfInstanceId", None)
7951 if vnfr_id:
7952 db_vnfr = db_vnfrs[vnfr_id]
7953 vnfd_id = db_vnfr.get("vnfd-id")
7954 vnfd_ref = db_vnfr.get("vnfd-ref")
7955 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7956 base_folder = vnfd["_admin"]["storage"]
7957 vdu_id = None
7958 vdu_index = 0
7959 vdu_name = None
7960 kdu_name = None
7961 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7962 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7963
7964 # Check each target VDU and deploy N2VC
7965 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7966 "vdu", []
7967 )
7968 if not target_vdu_list:
7969 # Codigo nuevo para crear diccionario
7970 target_vdu_list = []
7971 for existing_vdu in db_vnfr.get("vdur"):
7972 vdu_name = existing_vdu.get("vdu-name", None)
7973 vdu_index = existing_vdu.get("count-index", 0)
7974 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7975 "run-day1", False
7976 )
7977 vdu_to_be_healed = {
7978 "vdu-id": vdu_name,
7979 "count-index": vdu_index,
7980 "run-day1": vdu_run_day1,
7981 }
7982 target_vdu_list.append(vdu_to_be_healed)
7983 for target_vdu in target_vdu_list:
7984 deploy_params_vdu = target_vdu
7985 # Set run-day1 vnf level value if not vdu level value exists
7986 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7987 "additionalParams", {}
7988 ).get("run-day1"):
7989 deploy_params_vdu["run-day1"] = target_vnf[
7990 "additionalParams"
7991 ].get("run-day1")
7992 vdu_name = target_vdu.get("vdu-id", None)
7993 # TODO: Get vdu_id from vdud.
7994 vdu_id = vdu_name
7995 # For multi instance VDU count-index is mandatory
7996 # For single session VDU count-indes is 0
7997 vdu_index = target_vdu.get("count-index", 0)
7998
7999 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8000 stage[1] = "Deploying Execution Environments."
8001 self.logger.debug(logging_text + stage[1])
8002
8003 # VNF Level charm. Normal case when proxy charms.
8004 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8005 descriptor_config = get_configuration(vnfd, vnfd_ref)
8006 if descriptor_config:
8007 # Continue if healed machine is management machine
8008 vnf_ip_address = db_vnfr.get("ip-address")
8009 target_instance = None
8010 for instance in db_vnfr.get("vdur", None):
8011 if (
8012 instance["vdu-name"] == vdu_name
8013 and instance["count-index"] == vdu_index
8014 ):
8015 target_instance = instance
8016 break
8017 if vnf_ip_address == target_instance.get("ip-address"):
8018 self._heal_n2vc(
8019 logging_text=logging_text
8020 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8021 member_vnf_index, vdu_name, vdu_index
8022 ),
8023 db_nsr=db_nsr,
8024 db_vnfr=db_vnfr,
8025 nslcmop_id=nslcmop_id,
8026 nsr_id=nsr_id,
8027 nsi_id=nsi_id,
8028 vnfd_id=vnfd_ref,
8029 vdu_id=None,
8030 kdu_name=None,
8031 member_vnf_index=member_vnf_index,
8032 vdu_index=0,
8033 vdu_name=None,
8034 deploy_params=deploy_params_vdu,
8035 descriptor_config=descriptor_config,
8036 base_folder=base_folder,
8037 task_instantiation_info=tasks_dict_info,
8038 stage=stage,
8039 )
8040
8041 # VDU Level charm. Normal case with native charms.
8042 descriptor_config = get_configuration(vnfd, vdu_name)
8043 if descriptor_config:
8044 self._heal_n2vc(
8045 logging_text=logging_text
8046 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8047 member_vnf_index, vdu_name, vdu_index
8048 ),
8049 db_nsr=db_nsr,
8050 db_vnfr=db_vnfr,
8051 nslcmop_id=nslcmop_id,
8052 nsr_id=nsr_id,
8053 nsi_id=nsi_id,
8054 vnfd_id=vnfd_ref,
8055 vdu_id=vdu_id,
8056 kdu_name=kdu_name,
8057 member_vnf_index=member_vnf_index,
8058 vdu_index=vdu_index,
8059 vdu_name=vdu_name,
8060 deploy_params=deploy_params_vdu,
8061 descriptor_config=descriptor_config,
8062 base_folder=base_folder,
8063 task_instantiation_info=tasks_dict_info,
8064 stage=stage,
8065 )
8066
8067 except (
8068 ROclient.ROClientException,
8069 DbException,
8070 LcmException,
8071 NgRoException,
8072 ) as e:
8073 self.logger.error(logging_text + "Exit Exception {}".format(e))
8074 exc = e
8075 except asyncio.CancelledError:
8076 self.logger.error(
8077 logging_text + "Cancelled Exception while '{}'".format(step)
8078 )
8079 exc = "Operation was cancelled"
8080 except Exception as e:
8081 exc = traceback.format_exc()
8082 self.logger.critical(
8083 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8084 exc_info=True,
8085 )
8086 finally:
8087 if tasks_dict_info:
8088 stage[1] = "Waiting for healing pending tasks."
8089 self.logger.debug(logging_text + stage[1])
8090 exc = await self._wait_for_tasks(
8091 logging_text,
8092 tasks_dict_info,
8093 self.timeout.ns_deploy,
8094 stage,
8095 nslcmop_id,
8096 nsr_id=nsr_id,
8097 )
8098 if exc:
8099 db_nslcmop_update[
8100 "detailed-status"
8101 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8102 nslcmop_operation_state = "FAILED"
8103 if db_nsr:
8104 db_nsr_update["operational-status"] = old_operational_status
8105 db_nsr_update["config-status"] = old_config_status
8106 db_nsr_update[
8107 "detailed-status"
8108 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8109 for task, task_name in tasks_dict_info.items():
8110 if not task.done() or task.cancelled() or task.exception():
8111 if task_name.startswith(self.task_name_deploy_vca):
8112 # A N2VC task is pending
8113 db_nsr_update["config-status"] = "failed"
8114 else:
8115 # RO task is pending
8116 db_nsr_update["operational-status"] = "failed"
8117 else:
8118 error_description_nslcmop = None
8119 nslcmop_operation_state = "COMPLETED"
8120 db_nslcmop_update["detailed-status"] = "Done"
8121 db_nsr_update["detailed-status"] = "Done"
8122 db_nsr_update["operational-status"] = "running"
8123 db_nsr_update["config-status"] = "configured"
8124
8125 self._write_op_status(
8126 op_id=nslcmop_id,
8127 stage="",
8128 error_message=error_description_nslcmop,
8129 operation_state=nslcmop_operation_state,
8130 other_update=db_nslcmop_update,
8131 )
8132 if db_nsr:
8133 self._write_ns_status(
8134 nsr_id=nsr_id,
8135 ns_state=None,
8136 current_operation="IDLE",
8137 current_operation_id=None,
8138 other_update=db_nsr_update,
8139 )
8140
8141 if nslcmop_operation_state:
8142 try:
8143 msg = {
8144 "nsr_id": nsr_id,
8145 "nslcmop_id": nslcmop_id,
8146 "operationState": nslcmop_operation_state,
8147 }
8148 await self.msg.aiowrite("ns", "healed", msg)
8149 except Exception as e:
8150 self.logger.error(
8151 logging_text + "kafka_write notification Exception {}".format(e)
8152 )
8153 self.logger.debug(logging_text + "Exit")
8154 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8155
8156 async def heal_RO(
8157 self,
8158 logging_text,
8159 nsr_id,
8160 db_nslcmop,
8161 stage,
8162 ):
8163 """
8164 Heal at RO
8165 :param logging_text: preffix text to use at logging
8166 :param nsr_id: nsr identity
8167 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8168 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8169 :return: None or exception
8170 """
8171
8172 def get_vim_account(vim_account_id):
8173 nonlocal db_vims
8174 if vim_account_id in db_vims:
8175 return db_vims[vim_account_id]
8176 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8177 db_vims[vim_account_id] = db_vim
8178 return db_vim
8179
8180 try:
8181 start_heal = time()
8182 ns_params = db_nslcmop.get("operationParams")
8183 if ns_params and ns_params.get("timeout_ns_heal"):
8184 timeout_ns_heal = ns_params["timeout_ns_heal"]
8185 else:
8186 timeout_ns_heal = self.timeout.ns_heal
8187
8188 db_vims = {}
8189
8190 nslcmop_id = db_nslcmop["_id"]
8191 target = {
8192 "action_id": nslcmop_id,
8193 }
8194 self.logger.warning(
8195 "db_nslcmop={} and timeout_ns_heal={}".format(
8196 db_nslcmop, timeout_ns_heal
8197 )
8198 )
8199 target.update(db_nslcmop.get("operationParams", {}))
8200
8201 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8202 desc = await self.RO.recreate(nsr_id, target)
8203 self.logger.debug("RO return > {}".format(desc))
8204 action_id = desc["action_id"]
8205 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8206 await self._wait_ng_ro(
8207 nsr_id,
8208 action_id,
8209 nslcmop_id,
8210 start_heal,
8211 timeout_ns_heal,
8212 stage,
8213 operation="healing",
8214 )
8215
8216 # Updating NSR
8217 db_nsr_update = {
8218 "_admin.deployed.RO.operational-status": "running",
8219 "detailed-status": " ".join(stage),
8220 }
8221 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8222 self._write_op_status(nslcmop_id, stage)
8223 self.logger.debug(
8224 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8225 )
8226
8227 except Exception as e:
8228 stage[2] = "ERROR healing at VIM"
8229 # self.set_vnfr_at_error(db_vnfrs, str(e))
8230 self.logger.error(
8231 "Error healing at VIM {}".format(e),
8232 exc_info=not isinstance(
8233 e,
8234 (
8235 ROclient.ROClientException,
8236 LcmException,
8237 DbException,
8238 NgRoException,
8239 ),
8240 ),
8241 )
8242 raise
8243
8244 def _heal_n2vc(
8245 self,
8246 logging_text,
8247 db_nsr,
8248 db_vnfr,
8249 nslcmop_id,
8250 nsr_id,
8251 nsi_id,
8252 vnfd_id,
8253 vdu_id,
8254 kdu_name,
8255 member_vnf_index,
8256 vdu_index,
8257 vdu_name,
8258 deploy_params,
8259 descriptor_config,
8260 base_folder,
8261 task_instantiation_info,
8262 stage,
8263 ):
8264 # launch instantiate_N2VC in a asyncio task and register task object
8265 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8266 # if not found, create one entry and update database
8267 # fill db_nsr._admin.deployed.VCA.<index>
8268
8269 self.logger.debug(
8270 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8271 )
8272
8273 charm_name = ""
8274 get_charm_name = False
8275 if "execution-environment-list" in descriptor_config:
8276 ee_list = descriptor_config.get("execution-environment-list", [])
8277 elif "juju" in descriptor_config:
8278 ee_list = [descriptor_config] # ns charms
8279 if "execution-environment-list" not in descriptor_config:
8280 # charm name is only required for ns charms
8281 get_charm_name = True
8282 else: # other types as script are not supported
8283 ee_list = []
8284
8285 for ee_item in ee_list:
8286 self.logger.debug(
8287 logging_text
8288 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8289 ee_item.get("juju"), ee_item.get("helm-chart")
8290 )
8291 )
8292 ee_descriptor_id = ee_item.get("id")
8293 if ee_item.get("juju"):
8294 vca_name = ee_item["juju"].get("charm")
8295 if get_charm_name:
8296 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8297 vca_type = (
8298 "lxc_proxy_charm"
8299 if ee_item["juju"].get("charm") is not None
8300 else "native_charm"
8301 )
8302 if ee_item["juju"].get("cloud") == "k8s":
8303 vca_type = "k8s_proxy_charm"
8304 elif ee_item["juju"].get("proxy") is False:
8305 vca_type = "native_charm"
8306 elif ee_item.get("helm-chart"):
8307 vca_name = ee_item["helm-chart"]
8308 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8309 vca_type = "helm"
8310 else:
8311 vca_type = "helm-v3"
8312 else:
8313 self.logger.debug(
8314 logging_text + "skipping non juju neither charm configuration"
8315 )
8316 continue
8317
8318 vca_index = -1
8319 for vca_index, vca_deployed in enumerate(
8320 db_nsr["_admin"]["deployed"]["VCA"]
8321 ):
8322 if not vca_deployed:
8323 continue
8324 if (
8325 vca_deployed.get("member-vnf-index") == member_vnf_index
8326 and vca_deployed.get("vdu_id") == vdu_id
8327 and vca_deployed.get("kdu_name") == kdu_name
8328 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8329 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8330 ):
8331 break
8332 else:
8333 # not found, create one.
8334 target = (
8335 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8336 )
8337 if vdu_id:
8338 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8339 elif kdu_name:
8340 target += "/kdu/{}".format(kdu_name)
8341 vca_deployed = {
8342 "target_element": target,
8343 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8344 "member-vnf-index": member_vnf_index,
8345 "vdu_id": vdu_id,
8346 "kdu_name": kdu_name,
8347 "vdu_count_index": vdu_index,
8348 "operational-status": "init", # TODO revise
8349 "detailed-status": "", # TODO revise
8350 "step": "initial-deploy", # TODO revise
8351 "vnfd_id": vnfd_id,
8352 "vdu_name": vdu_name,
8353 "type": vca_type,
8354 "ee_descriptor_id": ee_descriptor_id,
8355 "charm_name": charm_name,
8356 }
8357 vca_index += 1
8358
8359 # create VCA and configurationStatus in db
8360 db_dict = {
8361 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8362 "configurationStatus.{}".format(vca_index): dict(),
8363 }
8364 self.update_db_2("nsrs", nsr_id, db_dict)
8365
8366 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8367
8368 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8369 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8370 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8371
8372 # Launch task
8373 task_n2vc = asyncio.ensure_future(
8374 self.heal_N2VC(
8375 logging_text=logging_text,
8376 vca_index=vca_index,
8377 nsi_id=nsi_id,
8378 db_nsr=db_nsr,
8379 db_vnfr=db_vnfr,
8380 vdu_id=vdu_id,
8381 kdu_name=kdu_name,
8382 vdu_index=vdu_index,
8383 deploy_params=deploy_params,
8384 config_descriptor=descriptor_config,
8385 base_folder=base_folder,
8386 nslcmop_id=nslcmop_id,
8387 stage=stage,
8388 vca_type=vca_type,
8389 vca_name=vca_name,
8390 ee_config_descriptor=ee_item,
8391 )
8392 )
8393 self.lcm_tasks.register(
8394 "ns",
8395 nsr_id,
8396 nslcmop_id,
8397 "instantiate_N2VC-{}".format(vca_index),
8398 task_n2vc,
8399 )
8400 task_instantiation_info[
8401 task_n2vc
8402 ] = self.task_name_deploy_vca + " {}.{}".format(
8403 member_vnf_index or "", vdu_id or ""
8404 )
8405
8406 async def heal_N2VC(
8407 self,
8408 logging_text,
8409 vca_index,
8410 nsi_id,
8411 db_nsr,
8412 db_vnfr,
8413 vdu_id,
8414 kdu_name,
8415 vdu_index,
8416 config_descriptor,
8417 deploy_params,
8418 base_folder,
8419 nslcmop_id,
8420 stage,
8421 vca_type,
8422 vca_name,
8423 ee_config_descriptor,
8424 ):
8425 nsr_id = db_nsr["_id"]
8426 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8427 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8428 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8429 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8430 db_dict = {
8431 "collection": "nsrs",
8432 "filter": {"_id": nsr_id},
8433 "path": db_update_entry,
8434 }
8435 step = ""
8436 try:
8437 element_type = "NS"
8438 element_under_configuration = nsr_id
8439
8440 vnfr_id = None
8441 if db_vnfr:
8442 vnfr_id = db_vnfr["_id"]
8443 osm_config["osm"]["vnf_id"] = vnfr_id
8444
8445 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8446
8447 if vca_type == "native_charm":
8448 index_number = 0
8449 else:
8450 index_number = vdu_index or 0
8451
8452 if vnfr_id:
8453 element_type = "VNF"
8454 element_under_configuration = vnfr_id
8455 namespace += ".{}-{}".format(vnfr_id, index_number)
8456 if vdu_id:
8457 namespace += ".{}-{}".format(vdu_id, index_number)
8458 element_type = "VDU"
8459 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8460 osm_config["osm"]["vdu_id"] = vdu_id
8461 elif kdu_name:
8462 namespace += ".{}".format(kdu_name)
8463 element_type = "KDU"
8464 element_under_configuration = kdu_name
8465 osm_config["osm"]["kdu_name"] = kdu_name
8466
8467 # Get artifact path
8468 if base_folder["pkg-dir"]:
8469 artifact_path = "{}/{}/{}/{}".format(
8470 base_folder["folder"],
8471 base_folder["pkg-dir"],
8472 "charms"
8473 if vca_type
8474 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8475 else "helm-charts",
8476 vca_name,
8477 )
8478 else:
8479 artifact_path = "{}/Scripts/{}/{}/".format(
8480 base_folder["folder"],
8481 "charms"
8482 if vca_type
8483 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8484 else "helm-charts",
8485 vca_name,
8486 )
8487
8488 self.logger.debug("Artifact path > {}".format(artifact_path))
8489
8490 # get initial_config_primitive_list that applies to this element
8491 initial_config_primitive_list = config_descriptor.get(
8492 "initial-config-primitive"
8493 )
8494
8495 self.logger.debug(
8496 "Initial config primitive list > {}".format(
8497 initial_config_primitive_list
8498 )
8499 )
8500
8501 # add config if not present for NS charm
8502 ee_descriptor_id = ee_config_descriptor.get("id")
8503 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8504 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8505 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8506 )
8507
8508 self.logger.debug(
8509 "Initial config primitive list #2 > {}".format(
8510 initial_config_primitive_list
8511 )
8512 )
8513 # n2vc_redesign STEP 3.1
8514 # find old ee_id if exists
8515 ee_id = vca_deployed.get("ee_id")
8516
8517 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8518 # create or register execution environment in VCA. Only for native charms when healing
8519 if vca_type == "native_charm":
8520 step = "Waiting to VM being up and getting IP address"
8521 self.logger.debug(logging_text + step)
8522 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8523 logging_text,
8524 nsr_id,
8525 vnfr_id,
8526 vdu_id,
8527 vdu_index,
8528 user=None,
8529 pub_key=None,
8530 )
8531 credentials = {"hostname": rw_mgmt_ip}
8532 # get username
8533 username = deep_get(
8534 config_descriptor, ("config-access", "ssh-access", "default-user")
8535 )
8536 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8537 # merged. Meanwhile let's get username from initial-config-primitive
8538 if not username and initial_config_primitive_list:
8539 for config_primitive in initial_config_primitive_list:
8540 for param in config_primitive.get("parameter", ()):
8541 if param["name"] == "ssh-username":
8542 username = param["value"]
8543 break
8544 if not username:
8545 raise LcmException(
8546 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8547 "'config-access.ssh-access.default-user'"
8548 )
8549 credentials["username"] = username
8550
8551 # n2vc_redesign STEP 3.2
8552 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8553 self._write_configuration_status(
8554 nsr_id=nsr_id,
8555 vca_index=vca_index,
8556 status="REGISTERING",
8557 element_under_configuration=element_under_configuration,
8558 element_type=element_type,
8559 )
8560
8561 step = "register execution environment {}".format(credentials)
8562 self.logger.debug(logging_text + step)
8563 ee_id = await self.vca_map[vca_type].register_execution_environment(
8564 credentials=credentials,
8565 namespace=namespace,
8566 db_dict=db_dict,
8567 vca_id=vca_id,
8568 )
8569
8570 # update ee_id en db
8571 db_dict_ee_id = {
8572 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8573 }
8574 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8575
8576 # for compatibility with MON/POL modules, the need model and application name at database
8577 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8578 # Not sure if this need to be done when healing
8579 """
8580 ee_id_parts = ee_id.split(".")
8581 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8582 if len(ee_id_parts) >= 2:
8583 model_name = ee_id_parts[0]
8584 application_name = ee_id_parts[1]
8585 db_nsr_update[db_update_entry + "model"] = model_name
8586 db_nsr_update[db_update_entry + "application"] = application_name
8587 """
8588
8589 # n2vc_redesign STEP 3.3
8590 # Install configuration software. Only for native charms.
8591 step = "Install configuration Software"
8592
8593 self._write_configuration_status(
8594 nsr_id=nsr_id,
8595 vca_index=vca_index,
8596 status="INSTALLING SW",
8597 element_under_configuration=element_under_configuration,
8598 element_type=element_type,
8599 # other_update=db_nsr_update,
8600 other_update=None,
8601 )
8602
8603 # TODO check if already done
8604 self.logger.debug(logging_text + step)
8605 config = None
8606 if vca_type == "native_charm":
8607 config_primitive = next(
8608 (p for p in initial_config_primitive_list if p["name"] == "config"),
8609 None,
8610 )
8611 if config_primitive:
8612 config = self._map_primitive_params(
8613 config_primitive, {}, deploy_params
8614 )
8615 await self.vca_map[vca_type].install_configuration_sw(
8616 ee_id=ee_id,
8617 artifact_path=artifact_path,
8618 db_dict=db_dict,
8619 config=config,
8620 num_units=1,
8621 vca_id=vca_id,
8622 vca_type=vca_type,
8623 )
8624
8625 # write in db flag of configuration_sw already installed
8626 self.update_db_2(
8627 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8628 )
8629
8630 # Not sure if this need to be done when healing
8631 """
8632 # add relations for this VCA (wait for other peers related with this VCA)
8633 await self._add_vca_relations(
8634 logging_text=logging_text,
8635 nsr_id=nsr_id,
8636 vca_type=vca_type,
8637 vca_index=vca_index,
8638 )
8639 """
8640
8641 # if SSH access is required, then get execution environment SSH public
8642 # if native charm we have waited already to VM be UP
8643 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8644 pub_key = None
8645 user = None
8646 # self.logger.debug("get ssh key block")
8647 if deep_get(
8648 config_descriptor, ("config-access", "ssh-access", "required")
8649 ):
8650 # self.logger.debug("ssh key needed")
8651 # Needed to inject a ssh key
8652 user = deep_get(
8653 config_descriptor,
8654 ("config-access", "ssh-access", "default-user"),
8655 )
8656 step = "Install configuration Software, getting public ssh key"
8657 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8658 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8659 )
8660
8661 step = "Insert public key into VM user={} ssh_key={}".format(
8662 user, pub_key
8663 )
8664 else:
8665 # self.logger.debug("no need to get ssh key")
8666 step = "Waiting to VM being up and getting IP address"
8667 self.logger.debug(logging_text + step)
8668
8669 # n2vc_redesign STEP 5.1
8670 # wait for RO (ip-address) Insert pub_key into VM
8671 # IMPORTANT: We need do wait for RO to complete healing operation.
8672 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8673 if vnfr_id:
8674 if kdu_name:
8675 rw_mgmt_ip = await self.wait_kdu_up(
8676 logging_text, nsr_id, vnfr_id, kdu_name
8677 )
8678 else:
8679 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8680 logging_text,
8681 nsr_id,
8682 vnfr_id,
8683 vdu_id,
8684 vdu_index,
8685 user=user,
8686 pub_key=pub_key,
8687 )
8688 else:
8689 rw_mgmt_ip = None # This is for a NS configuration
8690
8691 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8692
8693 # store rw_mgmt_ip in deploy params for later replacement
8694 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8695
8696 # Day1 operations.
8697 # get run-day1 operation parameter
8698 runDay1 = deploy_params.get("run-day1", False)
8699 self.logger.debug(
8700 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8701 )
8702 if runDay1:
8703 # n2vc_redesign STEP 6 Execute initial config primitive
8704 step = "execute initial config primitive"
8705
8706 # wait for dependent primitives execution (NS -> VNF -> VDU)
8707 if initial_config_primitive_list:
8708 await self._wait_dependent_n2vc(
8709 nsr_id, vca_deployed_list, vca_index
8710 )
8711
8712 # stage, in function of element type: vdu, kdu, vnf or ns
8713 my_vca = vca_deployed_list[vca_index]
8714 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8715 # VDU or KDU
8716 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8717 elif my_vca.get("member-vnf-index"):
8718 # VNF
8719 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8720 else:
8721 # NS
8722 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8723
8724 self._write_configuration_status(
8725 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8726 )
8727
8728 self._write_op_status(op_id=nslcmop_id, stage=stage)
8729
8730 check_if_terminated_needed = True
8731 for initial_config_primitive in initial_config_primitive_list:
8732 # adding information on the vca_deployed if it is a NS execution environment
8733 if not vca_deployed["member-vnf-index"]:
8734 deploy_params["ns_config_info"] = json.dumps(
8735 self._get_ns_config_info(nsr_id)
8736 )
8737 # TODO check if already done
8738 primitive_params_ = self._map_primitive_params(
8739 initial_config_primitive, {}, deploy_params
8740 )
8741
8742 step = "execute primitive '{}' params '{}'".format(
8743 initial_config_primitive["name"], primitive_params_
8744 )
8745 self.logger.debug(logging_text + step)
8746 await self.vca_map[vca_type].exec_primitive(
8747 ee_id=ee_id,
8748 primitive_name=initial_config_primitive["name"],
8749 params_dict=primitive_params_,
8750 db_dict=db_dict,
8751 vca_id=vca_id,
8752 vca_type=vca_type,
8753 )
8754 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8755 if check_if_terminated_needed:
8756 if config_descriptor.get("terminate-config-primitive"):
8757 self.update_db_2(
8758 "nsrs",
8759 nsr_id,
8760 {db_update_entry + "needed_terminate": True},
8761 )
8762 check_if_terminated_needed = False
8763
8764 # TODO register in database that primitive is done
8765
8766 # STEP 7 Configure metrics
8767 # Not sure if this need to be done when healing
8768 """
8769 if vca_type == "helm" or vca_type == "helm-v3":
8770 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8771 ee_id=ee_id,
8772 artifact_path=artifact_path,
8773 ee_config_descriptor=ee_config_descriptor,
8774 vnfr_id=vnfr_id,
8775 nsr_id=nsr_id,
8776 target_ip=rw_mgmt_ip,
8777 )
8778 if prometheus_jobs:
8779 self.update_db_2(
8780 "nsrs",
8781 nsr_id,
8782 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8783 )
8784
8785 for job in prometheus_jobs:
8786 self.db.set_one(
8787 "prometheus_jobs",
8788 {"job_name": job["job_name"]},
8789 job,
8790 upsert=True,
8791 fail_on_empty=False,
8792 )
8793
8794 """
8795 step = "instantiated at VCA"
8796 self.logger.debug(logging_text + step)
8797
8798 self._write_configuration_status(
8799 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8800 )
8801
8802 except Exception as e: # TODO not use Exception but N2VC exception
8803 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8804 if not isinstance(
8805 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8806 ):
8807 self.logger.error(
8808 "Exception while {} : {}".format(step, e), exc_info=True
8809 )
8810 self._write_configuration_status(
8811 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8812 )
8813 raise LcmException("{} {}".format(step, e)) from e
8814
8815 async def _wait_heal_ro(
8816 self,
8817 nsr_id,
8818 timeout=600,
8819 ):
8820 start_time = time()
8821 while time() <= start_time + timeout:
8822 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8823 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8824 "operational-status"
8825 ]
8826 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8827 if operational_status_ro != "healing":
8828 break
8829 await asyncio.sleep(15)
8830 else: # timeout_ns_deploy
8831 raise NgRoException("Timeout waiting ns to deploy")
8832
8833 async def vertical_scale(self, nsr_id, nslcmop_id):
8834 """
8835 Vertical Scale the VDUs in a NS
8836
8837 :param: nsr_id: NS Instance ID
8838 :param: nslcmop_id: nslcmop ID of migrate
8839
8840 """
8841 # Try to lock HA task here
8842 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8843 if not task_is_locked_by_me:
8844 return
8845 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8846 self.logger.debug(logging_text + "Enter")
8847 # get all needed from database
8848 db_nslcmop = None
8849 db_nslcmop_update = {}
8850 nslcmop_operation_state = None
8851 db_nsr_update = {}
8852 target = {}
8853 exc = None
8854 # in case of error, indicates what part of scale was failed to put nsr at error status
8855 start_deploy = time()
8856
8857 try:
8858 # wait for any previous tasks in process
8859 step = "Waiting for previous operations to terminate"
8860 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8861
8862 self._write_ns_status(
8863 nsr_id=nsr_id,
8864 ns_state=None,
8865 current_operation="VerticalScale",
8866 current_operation_id=nslcmop_id,
8867 )
8868 step = "Getting nslcmop from database"
8869 self.logger.debug(
8870 step + " after having waited for previous tasks to be completed"
8871 )
8872 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8873 operationParams = db_nslcmop.get("operationParams")
8874 target = {}
8875 target.update(operationParams)
8876 desc = await self.RO.vertical_scale(nsr_id, target)
8877 self.logger.debug("RO return > {}".format(desc))
8878 action_id = desc["action_id"]
8879 await self._wait_ng_ro(
8880 nsr_id,
8881 action_id,
8882 nslcmop_id,
8883 start_deploy,
8884 self.timeout.verticalscale,
8885 operation="verticalscale",
8886 )
8887 except (ROclient.ROClientException, DbException, LcmException) as e:
8888 self.logger.error("Exit Exception {}".format(e))
8889 exc = e
8890 except asyncio.CancelledError:
8891 self.logger.error("Cancelled Exception while '{}'".format(step))
8892 exc = "Operation was cancelled"
8893 except Exception as e:
8894 exc = traceback.format_exc()
8895 self.logger.critical(
8896 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8897 )
8898 finally:
8899 self._write_ns_status(
8900 nsr_id=nsr_id,
8901 ns_state=None,
8902 current_operation="IDLE",
8903 current_operation_id=None,
8904 )
8905 if exc:
8906 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8907 nslcmop_operation_state = "FAILED"
8908 else:
8909 nslcmop_operation_state = "COMPLETED"
8910 db_nslcmop_update["detailed-status"] = "Done"
8911 db_nsr_update["detailed-status"] = "Done"
8912
8913 self._write_op_status(
8914 op_id=nslcmop_id,
8915 stage="",
8916 error_message="",
8917 operation_state=nslcmop_operation_state,
8918 other_update=db_nslcmop_update,
8919 )
8920 if nslcmop_operation_state:
8921 try:
8922 msg = {
8923 "nsr_id": nsr_id,
8924 "nslcmop_id": nslcmop_id,
8925 "operationState": nslcmop_operation_state,
8926 }
8927 await self.msg.aiowrite("ns", "verticalscaled", msg)
8928 except Exception as e:
8929 self.logger.error(
8930 logging_text + "kafka_write notification Exception {}".format(e)
8931 )
8932 self.logger.debug(logging_text + "Exit")
8933 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")