Feature 10945: Service KPI of VNF using exporter endpoint
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import SystemRandom
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 on_update_db=self._on_update_n2vc_db,
157 fs=self.fs,
158 db=self.db,
159 )
160
161 self.conn_helm_ee = LCMHelmConn(
162 log=self.logger,
163 vca_config=self.vca_config,
164 on_update_db=self._on_update_n2vc_db,
165 )
166
167 self.k8sclusterhelm2 = K8sHelmConnector(
168 kubectl_command=self.vca_config.kubectlpath,
169 helm_command=self.vca_config.helmpath,
170 log=self.logger,
171 on_update_db=None,
172 fs=self.fs,
173 db=self.db,
174 )
175
176 self.k8sclusterhelm3 = K8sHelm3Connector(
177 kubectl_command=self.vca_config.kubectlpath,
178 helm_command=self.vca_config.helm3path,
179 fs=self.fs,
180 log=self.logger,
181 db=self.db,
182 on_update_db=None,
183 )
184
185 self.k8sclusterjuju = K8sJujuConnector(
186 kubectl_command=self.vca_config.kubectlpath,
187 juju_command=self.vca_config.jujupath,
188 log=self.logger,
189 on_update_db=self._on_update_k8s_db,
190 fs=self.fs,
191 db=self.db,
192 )
193
194 self.k8scluster_map = {
195 "helm-chart": self.k8sclusterhelm2,
196 "helm-chart-v3": self.k8sclusterhelm3,
197 "chart": self.k8sclusterhelm3,
198 "juju-bundle": self.k8sclusterjuju,
199 "juju": self.k8sclusterjuju,
200 }
201
202 self.vca_map = {
203 "lxc_proxy_charm": self.n2vc,
204 "native_charm": self.n2vc,
205 "k8s_proxy_charm": self.n2vc,
206 "helm": self.conn_helm_ee,
207 "helm-v3": self.conn_helm_ee,
208 }
209
210 # create RO client
211 self.RO = NgRoClient(**self.ro_config.to_dict())
212
213 self.op_status_map = {
214 "instantiation": self.RO.status,
215 "termination": self.RO.status,
216 "migrate": self.RO.status,
217 "healing": self.RO.recreate_status,
218 "verticalscale": self.RO.status,
219 "start_stop_rebuild": self.RO.status,
220 }
221
222 @staticmethod
223 def increment_ip_mac(ip_mac, vm_index=1):
224 if not isinstance(ip_mac, str):
225 return ip_mac
226 try:
227 # try with ipv4 look for last dot
228 i = ip_mac.rfind(".")
229 if i > 0:
230 i += 1
231 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
232 # try with ipv6 or mac look for last colon. Operate in hex
233 i = ip_mac.rfind(":")
234 if i > 0:
235 i += 1
236 # format in hex, len can be 2 for mac or 4 for ipv6
237 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
238 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
239 )
240 except Exception:
241 pass
242 return None
243
244 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
245 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
246
247 try:
248 # TODO filter RO descriptor fields...
249
250 # write to database
251 db_dict = dict()
252 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
253 db_dict["deploymentStatus"] = ro_descriptor
254 self.update_db_2("nsrs", nsrs_id, db_dict)
255
256 except Exception as e:
257 self.logger.warn(
258 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
259 )
260
261 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
262 # remove last dot from path (if exists)
263 if path.endswith("."):
264 path = path[:-1]
265
266 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
267 # .format(table, filter, path, updated_data))
268 try:
269 nsr_id = filter.get("_id")
270
271 # read ns record from database
272 nsr = self.db.get_one(table="nsrs", q_filter=filter)
273 current_ns_status = nsr.get("nsState")
274
275 # get vca status for NS
276 status_dict = await self.n2vc.get_status(
277 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
278 )
279
280 # vcaStatus
281 db_dict = dict()
282 db_dict["vcaStatus"] = status_dict
283
284 # update configurationStatus for this VCA
285 try:
286 vca_index = int(path[path.rfind(".") + 1 :])
287
288 vca_list = deep_get(
289 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
290 )
291 vca_status = vca_list[vca_index].get("status")
292
293 configuration_status_list = nsr.get("configurationStatus")
294 config_status = configuration_status_list[vca_index].get("status")
295
296 if config_status == "BROKEN" and vca_status != "failed":
297 db_dict["configurationStatus"][vca_index] = "READY"
298 elif config_status != "BROKEN" and vca_status == "failed":
299 db_dict["configurationStatus"][vca_index] = "BROKEN"
300 except Exception as e:
301 # not update configurationStatus
302 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
303
304 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
305 # if nsState = 'DEGRADED' check if all is OK
306 is_degraded = False
307 if current_ns_status in ("READY", "DEGRADED"):
308 error_description = ""
309 # check machines
310 if status_dict.get("machines"):
311 for machine_id in status_dict.get("machines"):
312 machine = status_dict.get("machines").get(machine_id)
313 # check machine agent-status
314 if machine.get("agent-status"):
315 s = machine.get("agent-status").get("status")
316 if s != "started":
317 is_degraded = True
318 error_description += (
319 "machine {} agent-status={} ; ".format(
320 machine_id, s
321 )
322 )
323 # check machine instance status
324 if machine.get("instance-status"):
325 s = machine.get("instance-status").get("status")
326 if s != "running":
327 is_degraded = True
328 error_description += (
329 "machine {} instance-status={} ; ".format(
330 machine_id, s
331 )
332 )
333 # check applications
334 if status_dict.get("applications"):
335 for app_id in status_dict.get("applications"):
336 app = status_dict.get("applications").get(app_id)
337 # check application status
338 if app.get("status"):
339 s = app.get("status").get("status")
340 if s != "active":
341 is_degraded = True
342 error_description += (
343 "application {} status={} ; ".format(app_id, s)
344 )
345
346 if error_description:
347 db_dict["errorDescription"] = error_description
348 if current_ns_status == "READY" and is_degraded:
349 db_dict["nsState"] = "DEGRADED"
350 if current_ns_status == "DEGRADED" and not is_degraded:
351 db_dict["nsState"] = "READY"
352
353 # write to database
354 self.update_db_2("nsrs", nsr_id, db_dict)
355
356 except (asyncio.CancelledError, asyncio.TimeoutError):
357 raise
358 except Exception as e:
359 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
360
361 async def _on_update_k8s_db(
362 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
363 ):
364 """
365 Updating vca status in NSR record
366 :param cluster_uuid: UUID of a k8s cluster
367 :param kdu_instance: The unique name of the KDU instance
368 :param filter: To get nsr_id
369 :cluster_type: The cluster type (juju, k8s)
370 :return: none
371 """
372
373 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
374 # .format(cluster_uuid, kdu_instance, filter))
375
376 nsr_id = filter.get("_id")
377 try:
378 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
379 cluster_uuid=cluster_uuid,
380 kdu_instance=kdu_instance,
381 yaml_format=False,
382 complete_status=True,
383 vca_id=vca_id,
384 )
385
386 # vcaStatus
387 db_dict = dict()
388 db_dict["vcaStatus"] = {nsr_id: vca_status}
389
390 self.logger.debug(
391 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
392 )
393
394 # write to database
395 self.update_db_2("nsrs", nsr_id, db_dict)
396 except (asyncio.CancelledError, asyncio.TimeoutError):
397 raise
398 except Exception as e:
399 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
400
401 @staticmethod
402 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
403 try:
404 env = Environment(
405 undefined=StrictUndefined,
406 autoescape=select_autoescape(default_for_string=True, default=True),
407 )
408 template = env.from_string(cloud_init_text)
409 return template.render(additional_params or {})
410 except UndefinedError as e:
411 raise LcmException(
412 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
413 "file, must be provided in the instantiation parameters inside the "
414 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
415 )
416 except (TemplateError, TemplateNotFound) as e:
417 raise LcmException(
418 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
419 vnfd_id, vdu_id, e
420 )
421 )
422
423 def _get_vdu_cloud_init_content(self, vdu, vnfd):
424 cloud_init_content = cloud_init_file = None
425 try:
426 if vdu.get("cloud-init-file"):
427 base_folder = vnfd["_admin"]["storage"]
428 if base_folder["pkg-dir"]:
429 cloud_init_file = "{}/{}/cloud_init/{}".format(
430 base_folder["folder"],
431 base_folder["pkg-dir"],
432 vdu["cloud-init-file"],
433 )
434 else:
435 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
436 base_folder["folder"],
437 vdu["cloud-init-file"],
438 )
439 with self.fs.file_open(cloud_init_file, "r") as ci_file:
440 cloud_init_content = ci_file.read()
441 elif vdu.get("cloud-init"):
442 cloud_init_content = vdu["cloud-init"]
443
444 return cloud_init_content
445 except FsException as e:
446 raise LcmException(
447 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
448 vnfd["id"], vdu["id"], cloud_init_file, e
449 )
450 )
451
452 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
453 vdur = next(
454 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
455 )
456 additional_params = vdur.get("additionalParams")
457 return parse_yaml_strings(additional_params)
458
459 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
460 """
461 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
462 :param vnfd: input vnfd
463 :param new_id: overrides vnf id if provided
464 :param additionalParams: Instantiation params for VNFs provided
465 :param nsrId: Id of the NSR
466 :return: copy of vnfd
467 """
468 vnfd_RO = deepcopy(vnfd)
469 # remove unused by RO configuration, monitoring, scaling and internal keys
470 vnfd_RO.pop("_id", None)
471 vnfd_RO.pop("_admin", None)
472 vnfd_RO.pop("monitoring-param", None)
473 vnfd_RO.pop("scaling-group-descriptor", None)
474 vnfd_RO.pop("kdu", None)
475 vnfd_RO.pop("k8s-cluster", None)
476 if new_id:
477 vnfd_RO["id"] = new_id
478
479 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
480 for vdu in get_iterable(vnfd_RO, "vdu"):
481 vdu.pop("cloud-init-file", None)
482 vdu.pop("cloud-init", None)
483 return vnfd_RO
484
485 @staticmethod
486 def ip_profile_2_RO(ip_profile):
487 RO_ip_profile = deepcopy(ip_profile)
488 if "dns-server" in RO_ip_profile:
489 if isinstance(RO_ip_profile["dns-server"], list):
490 RO_ip_profile["dns-address"] = []
491 for ds in RO_ip_profile.pop("dns-server"):
492 RO_ip_profile["dns-address"].append(ds["address"])
493 else:
494 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
495 if RO_ip_profile.get("ip-version") == "ipv4":
496 RO_ip_profile["ip-version"] = "IPv4"
497 if RO_ip_profile.get("ip-version") == "ipv6":
498 RO_ip_profile["ip-version"] = "IPv6"
499 if "dhcp-params" in RO_ip_profile:
500 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
501 return RO_ip_profile
502
503 def _get_ro_vim_id_for_vim_account(self, vim_account):
504 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
505 if db_vim["_admin"]["operationalState"] != "ENABLED":
506 raise LcmException(
507 "VIM={} is not available. operationalState={}".format(
508 vim_account, db_vim["_admin"]["operationalState"]
509 )
510 )
511 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
512 return RO_vim_id
513
514 def get_ro_wim_id_for_wim_account(self, wim_account):
515 if isinstance(wim_account, str):
516 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
517 if db_wim["_admin"]["operationalState"] != "ENABLED":
518 raise LcmException(
519 "WIM={} is not available. operationalState={}".format(
520 wim_account, db_wim["_admin"]["operationalState"]
521 )
522 )
523 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
524 return RO_wim_id
525 else:
526 return wim_account
527
528 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
529 db_vdu_push_list = []
530 template_vdur = []
531 db_update = {"_admin.modified": time()}
532 if vdu_create:
533 for vdu_id, vdu_count in vdu_create.items():
534 vdur = next(
535 (
536 vdur
537 for vdur in reversed(db_vnfr["vdur"])
538 if vdur["vdu-id-ref"] == vdu_id
539 ),
540 None,
541 )
542 if not vdur:
543 # Read the template saved in the db:
544 self.logger.debug(
545 "No vdur in the database. Using the vdur-template to scale"
546 )
547 vdur_template = db_vnfr.get("vdur-template")
548 if not vdur_template:
549 raise LcmException(
550 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
551 vdu_id
552 )
553 )
554 vdur = vdur_template[0]
555 # Delete a template from the database after using it
556 self.db.set_one(
557 "vnfrs",
558 {"_id": db_vnfr["_id"]},
559 None,
560 pull={"vdur-template": {"_id": vdur["_id"]}},
561 )
562 for count in range(vdu_count):
563 vdur_copy = deepcopy(vdur)
564 vdur_copy["status"] = "BUILD"
565 vdur_copy["status-detailed"] = None
566 vdur_copy["ip-address"] = None
567 vdur_copy["_id"] = str(uuid4())
568 vdur_copy["count-index"] += count + 1
569 vdur_copy["id"] = "{}-{}".format(
570 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
571 )
572 vdur_copy.pop("vim_info", None)
573 for iface in vdur_copy["interfaces"]:
574 if iface.get("fixed-ip"):
575 iface["ip-address"] = self.increment_ip_mac(
576 iface["ip-address"], count + 1
577 )
578 else:
579 iface.pop("ip-address", None)
580 if iface.get("fixed-mac"):
581 iface["mac-address"] = self.increment_ip_mac(
582 iface["mac-address"], count + 1
583 )
584 else:
585 iface.pop("mac-address", None)
586 if db_vnfr["vdur"]:
587 iface.pop(
588 "mgmt_vnf", None
589 ) # only first vdu can be managment of vnf
590 db_vdu_push_list.append(vdur_copy)
591 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
592 if vdu_delete:
593 if len(db_vnfr["vdur"]) == 1:
594 # The scale will move to 0 instances
595 self.logger.debug(
596 "Scaling to 0 !, creating the template with the last vdur"
597 )
598 template_vdur = [db_vnfr["vdur"][0]]
599 for vdu_id, vdu_count in vdu_delete.items():
600 if mark_delete:
601 indexes_to_delete = [
602 iv[0]
603 for iv in enumerate(db_vnfr["vdur"])
604 if iv[1]["vdu-id-ref"] == vdu_id
605 ]
606 db_update.update(
607 {
608 "vdur.{}.status".format(i): "DELETING"
609 for i in indexes_to_delete[-vdu_count:]
610 }
611 )
612 else:
613 # it must be deleted one by one because common.db does not allow otherwise
614 vdus_to_delete = [
615 v
616 for v in reversed(db_vnfr["vdur"])
617 if v["vdu-id-ref"] == vdu_id
618 ]
619 for vdu in vdus_to_delete[:vdu_count]:
620 self.db.set_one(
621 "vnfrs",
622 {"_id": db_vnfr["_id"]},
623 None,
624 pull={"vdur": {"_id": vdu["_id"]}},
625 )
626 db_push = {}
627 if db_vdu_push_list:
628 db_push["vdur"] = db_vdu_push_list
629 if template_vdur:
630 db_push["vdur-template"] = template_vdur
631 if not db_push:
632 db_push = None
633 db_vnfr["vdur-template"] = template_vdur
634 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
635 # modify passed dictionary db_vnfr
636 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
637 db_vnfr["vdur"] = db_vnfr_["vdur"]
638
639 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
640 """
641 Updates database nsr with the RO info for the created vld
642 :param ns_update_nsr: dictionary to be filled with the updated info
643 :param db_nsr: content of db_nsr. This is also modified
644 :param nsr_desc_RO: nsr descriptor from RO
645 :return: Nothing, LcmException is raised on errors
646 """
647
648 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
649 for net_RO in get_iterable(nsr_desc_RO, "nets"):
650 if vld["id"] != net_RO.get("ns_net_osm_id"):
651 continue
652 vld["vim-id"] = net_RO.get("vim_net_id")
653 vld["name"] = net_RO.get("vim_name")
654 vld["status"] = net_RO.get("status")
655 vld["status-detailed"] = net_RO.get("error_msg")
656 ns_update_nsr["vld.{}".format(vld_index)] = vld
657 break
658 else:
659 raise LcmException(
660 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
661 )
662
663 def set_vnfr_at_error(self, db_vnfrs, error_text):
664 try:
665 for db_vnfr in db_vnfrs.values():
666 vnfr_update = {"status": "ERROR"}
667 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
668 if "status" not in vdur:
669 vdur["status"] = "ERROR"
670 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
671 if error_text:
672 vdur["status-detailed"] = str(error_text)
673 vnfr_update[
674 "vdur.{}.status-detailed".format(vdu_index)
675 ] = "ERROR"
676 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
677 except DbException as e:
678 self.logger.error("Cannot update vnf. {}".format(e))
679
680 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
681 """
682 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
683 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
684 :param nsr_desc_RO: nsr descriptor from RO
685 :return: Nothing, LcmException is raised on errors
686 """
687 for vnf_index, db_vnfr in db_vnfrs.items():
688 for vnf_RO in nsr_desc_RO["vnfs"]:
689 if vnf_RO["member_vnf_index"] != vnf_index:
690 continue
691 vnfr_update = {}
692 if vnf_RO.get("ip_address"):
693 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
694 "ip_address"
695 ].split(";")[0]
696 elif not db_vnfr.get("ip-address"):
697 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
698 raise LcmExceptionNoMgmtIP(
699 "ns member_vnf_index '{}' has no IP address".format(
700 vnf_index
701 )
702 )
703
704 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
705 vdur_RO_count_index = 0
706 if vdur.get("pdu-type"):
707 continue
708 for vdur_RO in get_iterable(vnf_RO, "vms"):
709 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
710 continue
711 if vdur["count-index"] != vdur_RO_count_index:
712 vdur_RO_count_index += 1
713 continue
714 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
715 if vdur_RO.get("ip_address"):
716 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
717 else:
718 vdur["ip-address"] = None
719 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
720 vdur["name"] = vdur_RO.get("vim_name")
721 vdur["status"] = vdur_RO.get("status")
722 vdur["status-detailed"] = vdur_RO.get("error_msg")
723 for ifacer in get_iterable(vdur, "interfaces"):
724 for interface_RO in get_iterable(vdur_RO, "interfaces"):
725 if ifacer["name"] == interface_RO.get("internal_name"):
726 ifacer["ip-address"] = interface_RO.get(
727 "ip_address"
728 )
729 ifacer["mac-address"] = interface_RO.get(
730 "mac_address"
731 )
732 break
733 else:
734 raise LcmException(
735 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
736 "from VIM info".format(
737 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
738 )
739 )
740 vnfr_update["vdur.{}".format(vdu_index)] = vdur
741 break
742 else:
743 raise LcmException(
744 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
745 "VIM info".format(
746 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
747 )
748 )
749
750 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
751 for net_RO in get_iterable(nsr_desc_RO, "nets"):
752 if vld["id"] != net_RO.get("vnf_net_osm_id"):
753 continue
754 vld["vim-id"] = net_RO.get("vim_net_id")
755 vld["name"] = net_RO.get("vim_name")
756 vld["status"] = net_RO.get("status")
757 vld["status-detailed"] = net_RO.get("error_msg")
758 vnfr_update["vld.{}".format(vld_index)] = vld
759 break
760 else:
761 raise LcmException(
762 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
763 vnf_index, vld["id"]
764 )
765 )
766
767 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
768 break
769
770 else:
771 raise LcmException(
772 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
773 vnf_index
774 )
775 )
776
777 def _get_ns_config_info(self, nsr_id):
778 """
779 Generates a mapping between vnf,vdu elements and the N2VC id
780 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
781 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
782 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
783 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
784 """
785 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
786 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
787 mapping = {}
788 ns_config_info = {"osm-config-mapping": mapping}
789 for vca in vca_deployed_list:
790 if not vca["member-vnf-index"]:
791 continue
792 if not vca["vdu_id"]:
793 mapping[vca["member-vnf-index"]] = vca["application"]
794 else:
795 mapping[
796 "{}.{}.{}".format(
797 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
798 )
799 ] = vca["application"]
800 return ns_config_info
801
802 async def _instantiate_ng_ro(
803 self,
804 logging_text,
805 nsr_id,
806 nsd,
807 db_nsr,
808 db_nslcmop,
809 db_vnfrs,
810 db_vnfds,
811 n2vc_key_list,
812 stage,
813 start_deploy,
814 timeout_ns_deploy,
815 ):
816 db_vims = {}
817
818 def get_vim_account(vim_account_id):
819 nonlocal db_vims
820 if vim_account_id in db_vims:
821 return db_vims[vim_account_id]
822 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
823 db_vims[vim_account_id] = db_vim
824 return db_vim
825
826 # modify target_vld info with instantiation parameters
827 def parse_vld_instantiation_params(
828 target_vim, target_vld, vld_params, target_sdn
829 ):
830 if vld_params.get("ip-profile"):
831 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
832 vld_params["ip-profile"]
833 )
834 if vld_params.get("provider-network"):
835 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
836 "provider-network"
837 ]
838 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
839 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
840 "provider-network"
841 ]["sdn-ports"]
842
843 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
844 # if wim_account_id is specified in vld_params, validate if it is feasible.
845 wim_account_id, db_wim = select_feasible_wim_account(
846 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
847 )
848
849 if wim_account_id:
850 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
851 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
852 # update vld_params with correct WIM account Id
853 vld_params["wimAccountId"] = wim_account_id
854
855 target_wim = "wim:{}".format(wim_account_id)
856 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
857 sdn_ports = get_sdn_ports(vld_params, db_wim)
858 if len(sdn_ports) > 0:
859 target_vld["vim_info"][target_wim] = target_wim_attrs
860 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
861
862 self.logger.debug(
863 "Target VLD with WIM data: {:s}".format(str(target_vld))
864 )
865
866 for param in ("vim-network-name", "vim-network-id"):
867 if vld_params.get(param):
868 if isinstance(vld_params[param], dict):
869 for vim, vim_net in vld_params[param].items():
870 other_target_vim = "vim:" + vim
871 populate_dict(
872 target_vld["vim_info"],
873 (other_target_vim, param.replace("-", "_")),
874 vim_net,
875 )
876 else: # isinstance str
877 target_vld["vim_info"][target_vim][
878 param.replace("-", "_")
879 ] = vld_params[param]
880 if vld_params.get("common_id"):
881 target_vld["common_id"] = vld_params.get("common_id")
882
883 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
884 def update_ns_vld_target(target, ns_params):
885 for vnf_params in ns_params.get("vnf", ()):
886 if vnf_params.get("vimAccountId"):
887 target_vnf = next(
888 (
889 vnfr
890 for vnfr in db_vnfrs.values()
891 if vnf_params["member-vnf-index"]
892 == vnfr["member-vnf-index-ref"]
893 ),
894 None,
895 )
896 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
897 if not vdur:
898 return
899 for a_index, a_vld in enumerate(target["ns"]["vld"]):
900 target_vld = find_in_list(
901 get_iterable(vdur, "interfaces"),
902 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
903 )
904
905 vld_params = find_in_list(
906 get_iterable(ns_params, "vld"),
907 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
908 )
909 if target_vld:
910 if vnf_params.get("vimAccountId") not in a_vld.get(
911 "vim_info", {}
912 ):
913 target_vim_network_list = [
914 v for _, v in a_vld.get("vim_info").items()
915 ]
916 target_vim_network_name = next(
917 (
918 item.get("vim_network_name", "")
919 for item in target_vim_network_list
920 ),
921 "",
922 )
923
924 target["ns"]["vld"][a_index].get("vim_info").update(
925 {
926 "vim:{}".format(vnf_params["vimAccountId"]): {
927 "vim_network_name": target_vim_network_name,
928 }
929 }
930 )
931
932 if vld_params:
933 for param in ("vim-network-name", "vim-network-id"):
934 if vld_params.get(param) and isinstance(
935 vld_params[param], dict
936 ):
937 for vim, vim_net in vld_params[
938 param
939 ].items():
940 other_target_vim = "vim:" + vim
941 populate_dict(
942 target["ns"]["vld"][a_index].get(
943 "vim_info"
944 ),
945 (
946 other_target_vim,
947 param.replace("-", "_"),
948 ),
949 vim_net,
950 )
951
952 nslcmop_id = db_nslcmop["_id"]
953 target = {
954 "name": db_nsr["name"],
955 "ns": {"vld": []},
956 "vnf": [],
957 "image": deepcopy(db_nsr["image"]),
958 "flavor": deepcopy(db_nsr["flavor"]),
959 "action_id": nslcmop_id,
960 "cloud_init_content": {},
961 }
962 for image in target["image"]:
963 image["vim_info"] = {}
964 for flavor in target["flavor"]:
965 flavor["vim_info"] = {}
966 if db_nsr.get("affinity-or-anti-affinity-group"):
967 target["affinity-or-anti-affinity-group"] = deepcopy(
968 db_nsr["affinity-or-anti-affinity-group"]
969 )
970 for affinity_or_anti_affinity_group in target[
971 "affinity-or-anti-affinity-group"
972 ]:
973 affinity_or_anti_affinity_group["vim_info"] = {}
974
975 if db_nslcmop.get("lcmOperationType") != "instantiate":
976 # get parameters of instantiation:
977 db_nslcmop_instantiate = self.db.get_list(
978 "nslcmops",
979 {
980 "nsInstanceId": db_nslcmop["nsInstanceId"],
981 "lcmOperationType": "instantiate",
982 },
983 )[-1]
984 ns_params = db_nslcmop_instantiate.get("operationParams")
985 else:
986 ns_params = db_nslcmop.get("operationParams")
987 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
988 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
989
990 cp2target = {}
991 for vld_index, vld in enumerate(db_nsr.get("vld")):
992 target_vim = "vim:{}".format(ns_params["vimAccountId"])
993 target_vld = {
994 "id": vld["id"],
995 "name": vld["name"],
996 "mgmt-network": vld.get("mgmt-network", False),
997 "type": vld.get("type"),
998 "vim_info": {
999 target_vim: {
1000 "vim_network_name": vld.get("vim-network-name"),
1001 "vim_account_id": ns_params["vimAccountId"],
1002 }
1003 },
1004 }
1005 # check if this network needs SDN assist
1006 if vld.get("pci-interfaces"):
1007 db_vim = get_vim_account(ns_params["vimAccountId"])
1008 if vim_config := db_vim.get("config"):
1009 if sdnc_id := vim_config.get("sdn-controller"):
1010 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1011 target_sdn = "sdn:{}".format(sdnc_id)
1012 target_vld["vim_info"][target_sdn] = {
1013 "sdn": True,
1014 "target_vim": target_vim,
1015 "vlds": [sdn_vld],
1016 "type": vld.get("type"),
1017 }
1018
1019 nsd_vnf_profiles = get_vnf_profiles(nsd)
1020 for nsd_vnf_profile in nsd_vnf_profiles:
1021 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1022 if cp["virtual-link-profile-id"] == vld["id"]:
1023 cp2target[
1024 "member_vnf:{}.{}".format(
1025 cp["constituent-cpd-id"][0][
1026 "constituent-base-element-id"
1027 ],
1028 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1029 )
1030 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1031
1032 # check at nsd descriptor, if there is an ip-profile
1033 vld_params = {}
1034 nsd_vlp = find_in_list(
1035 get_virtual_link_profiles(nsd),
1036 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1037 == vld["id"],
1038 )
1039 if (
1040 nsd_vlp
1041 and nsd_vlp.get("virtual-link-protocol-data")
1042 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1043 ):
1044 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1045 "l3-protocol-data"
1046 ]
1047
1048 # update vld_params with instantiation params
1049 vld_instantiation_params = find_in_list(
1050 get_iterable(ns_params, "vld"),
1051 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1052 )
1053 if vld_instantiation_params:
1054 vld_params.update(vld_instantiation_params)
1055 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1056 target["ns"]["vld"].append(target_vld)
1057 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1058 update_ns_vld_target(target, ns_params)
1059
1060 for vnfr in db_vnfrs.values():
1061 vnfd = find_in_list(
1062 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1063 )
1064 vnf_params = find_in_list(
1065 get_iterable(ns_params, "vnf"),
1066 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1067 )
1068 target_vnf = deepcopy(vnfr)
1069 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1070 for vld in target_vnf.get("vld", ()):
1071 # check if connected to a ns.vld, to fill target'
1072 vnf_cp = find_in_list(
1073 vnfd.get("int-virtual-link-desc", ()),
1074 lambda cpd: cpd.get("id") == vld["id"],
1075 )
1076 if vnf_cp:
1077 ns_cp = "member_vnf:{}.{}".format(
1078 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1079 )
1080 if cp2target.get(ns_cp):
1081 vld["target"] = cp2target[ns_cp]
1082
1083 vld["vim_info"] = {
1084 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1085 }
1086 # check if this network needs SDN assist
1087 target_sdn = None
1088 if vld.get("pci-interfaces"):
1089 db_vim = get_vim_account(vnfr["vim-account-id"])
1090 sdnc_id = db_vim["config"].get("sdn-controller")
1091 if sdnc_id:
1092 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1093 target_sdn = "sdn:{}".format(sdnc_id)
1094 vld["vim_info"][target_sdn] = {
1095 "sdn": True,
1096 "target_vim": target_vim,
1097 "vlds": [sdn_vld],
1098 "type": vld.get("type"),
1099 }
1100
1101 # check at vnfd descriptor, if there is an ip-profile
1102 vld_params = {}
1103 vnfd_vlp = find_in_list(
1104 get_virtual_link_profiles(vnfd),
1105 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1106 )
1107 if (
1108 vnfd_vlp
1109 and vnfd_vlp.get("virtual-link-protocol-data")
1110 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1111 ):
1112 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1113 "l3-protocol-data"
1114 ]
1115 # update vld_params with instantiation params
1116 if vnf_params:
1117 vld_instantiation_params = find_in_list(
1118 get_iterable(vnf_params, "internal-vld"),
1119 lambda i_vld: i_vld["name"] == vld["id"],
1120 )
1121 if vld_instantiation_params:
1122 vld_params.update(vld_instantiation_params)
1123 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1124
1125 vdur_list = []
1126 for vdur in target_vnf.get("vdur", ()):
1127 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1128 continue # This vdu must not be created
1129 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1130
1131 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1132
1133 if ssh_keys_all:
1134 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1135 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1136 if (
1137 vdu_configuration
1138 and vdu_configuration.get("config-access")
1139 and vdu_configuration.get("config-access").get("ssh-access")
1140 ):
1141 vdur["ssh-keys"] = ssh_keys_all
1142 vdur["ssh-access-required"] = vdu_configuration[
1143 "config-access"
1144 ]["ssh-access"]["required"]
1145 elif (
1146 vnf_configuration
1147 and vnf_configuration.get("config-access")
1148 and vnf_configuration.get("config-access").get("ssh-access")
1149 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1150 ):
1151 vdur["ssh-keys"] = ssh_keys_all
1152 vdur["ssh-access-required"] = vnf_configuration[
1153 "config-access"
1154 ]["ssh-access"]["required"]
1155 elif ssh_keys_instantiation and find_in_list(
1156 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1157 ):
1158 vdur["ssh-keys"] = ssh_keys_instantiation
1159
1160 self.logger.debug("NS > vdur > {}".format(vdur))
1161
1162 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1163 # cloud-init
1164 if vdud.get("cloud-init-file"):
1165 vdur["cloud-init"] = "{}:file:{}".format(
1166 vnfd["_id"], vdud.get("cloud-init-file")
1167 )
1168 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1169 if vdur["cloud-init"] not in target["cloud_init_content"]:
1170 base_folder = vnfd["_admin"]["storage"]
1171 if base_folder["pkg-dir"]:
1172 cloud_init_file = "{}/{}/cloud_init/{}".format(
1173 base_folder["folder"],
1174 base_folder["pkg-dir"],
1175 vdud.get("cloud-init-file"),
1176 )
1177 else:
1178 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1179 base_folder["folder"],
1180 vdud.get("cloud-init-file"),
1181 )
1182 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1183 target["cloud_init_content"][
1184 vdur["cloud-init"]
1185 ] = ci_file.read()
1186 elif vdud.get("cloud-init"):
1187 vdur["cloud-init"] = "{}:vdu:{}".format(
1188 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1189 )
1190 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1191 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1192 "cloud-init"
1193 ]
1194 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1195 deploy_params_vdu = self._format_additional_params(
1196 vdur.get("additionalParams") or {}
1197 )
1198 deploy_params_vdu["OSM"] = get_osm_params(
1199 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1200 )
1201 vdur["additionalParams"] = deploy_params_vdu
1202
1203 # flavor
1204 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1205 if target_vim not in ns_flavor["vim_info"]:
1206 ns_flavor["vim_info"][target_vim] = {}
1207
1208 # deal with images
1209 # in case alternative images are provided we must check if they should be applied
1210 # for the vim_type, modify the vim_type taking into account
1211 ns_image_id = int(vdur["ns-image-id"])
1212 if vdur.get("alt-image-ids"):
1213 db_vim = get_vim_account(vnfr["vim-account-id"])
1214 vim_type = db_vim["vim_type"]
1215 for alt_image_id in vdur.get("alt-image-ids"):
1216 ns_alt_image = target["image"][int(alt_image_id)]
1217 if vim_type == ns_alt_image.get("vim-type"):
1218 # must use alternative image
1219 self.logger.debug(
1220 "use alternative image id: {}".format(alt_image_id)
1221 )
1222 ns_image_id = alt_image_id
1223 vdur["ns-image-id"] = ns_image_id
1224 break
1225 ns_image = target["image"][int(ns_image_id)]
1226 if target_vim not in ns_image["vim_info"]:
1227 ns_image["vim_info"][target_vim] = {}
1228
1229 # Affinity groups
1230 if vdur.get("affinity-or-anti-affinity-group-id"):
1231 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1232 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1233 if target_vim not in ns_ags["vim_info"]:
1234 ns_ags["vim_info"][target_vim] = {}
1235
1236 vdur["vim_info"] = {target_vim: {}}
1237 # instantiation parameters
1238 if vnf_params:
1239 vdu_instantiation_params = find_in_list(
1240 get_iterable(vnf_params, "vdu"),
1241 lambda i_vdu: i_vdu["id"] == vdud["id"],
1242 )
1243 if vdu_instantiation_params:
1244 # Parse the vdu_volumes from the instantiation params
1245 vdu_volumes = get_volumes_from_instantiation_params(
1246 vdu_instantiation_params, vdud
1247 )
1248 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1249 vdur["additionalParams"]["OSM"][
1250 "vim_flavor_id"
1251 ] = vdu_instantiation_params.get("vim-flavor-id")
1252 vdur_list.append(vdur)
1253 target_vnf["vdur"] = vdur_list
1254 target["vnf"].append(target_vnf)
1255
1256 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1257 desc = await self.RO.deploy(nsr_id, target)
1258 self.logger.debug("RO return > {}".format(desc))
1259 action_id = desc["action_id"]
1260 await self._wait_ng_ro(
1261 nsr_id,
1262 action_id,
1263 nslcmop_id,
1264 start_deploy,
1265 timeout_ns_deploy,
1266 stage,
1267 operation="instantiation",
1268 )
1269
1270 # Updating NSR
1271 db_nsr_update = {
1272 "_admin.deployed.RO.operational-status": "running",
1273 "detailed-status": " ".join(stage),
1274 }
1275 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1276 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1277 self._write_op_status(nslcmop_id, stage)
1278 self.logger.debug(
1279 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1280 )
1281 return
1282
1283 async def _wait_ng_ro(
1284 self,
1285 nsr_id,
1286 action_id,
1287 nslcmop_id=None,
1288 start_time=None,
1289 timeout=600,
1290 stage=None,
1291 operation=None,
1292 ):
1293 detailed_status_old = None
1294 db_nsr_update = {}
1295 start_time = start_time or time()
1296 while time() <= start_time + timeout:
1297 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1298 self.logger.debug("Wait NG RO > {}".format(desc_status))
1299 if desc_status["status"] == "FAILED":
1300 raise NgRoException(desc_status["details"])
1301 elif desc_status["status"] == "BUILD":
1302 if stage:
1303 stage[2] = "VIM: ({})".format(desc_status["details"])
1304 elif desc_status["status"] == "DONE":
1305 if stage:
1306 stage[2] = "Deployed at VIM"
1307 break
1308 else:
1309 assert False, "ROclient.check_ns_status returns unknown {}".format(
1310 desc_status["status"]
1311 )
1312 if stage and nslcmop_id and stage[2] != detailed_status_old:
1313 detailed_status_old = stage[2]
1314 db_nsr_update["detailed-status"] = " ".join(stage)
1315 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1316 self._write_op_status(nslcmop_id, stage)
1317 await asyncio.sleep(15)
1318 else: # timeout_ns_deploy
1319 raise NgRoException("Timeout waiting ns to deploy")
1320
1321 async def _terminate_ng_ro(
1322 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1323 ):
1324 db_nsr_update = {}
1325 failed_detail = []
1326 action_id = None
1327 start_deploy = time()
1328 try:
1329 target = {
1330 "ns": {"vld": []},
1331 "vnf": [],
1332 "image": [],
1333 "flavor": [],
1334 "action_id": nslcmop_id,
1335 }
1336 desc = await self.RO.deploy(nsr_id, target)
1337 action_id = desc["action_id"]
1338 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1339 self.logger.debug(
1340 logging_text
1341 + "ns terminate action at RO. action_id={}".format(action_id)
1342 )
1343
1344 # wait until done
1345 delete_timeout = 20 * 60 # 20 minutes
1346 await self._wait_ng_ro(
1347 nsr_id,
1348 action_id,
1349 nslcmop_id,
1350 start_deploy,
1351 delete_timeout,
1352 stage,
1353 operation="termination",
1354 )
1355 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1356 # delete all nsr
1357 await self.RO.delete(nsr_id)
1358 except NgRoException as e:
1359 if e.http_code == 404: # not found
1360 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1361 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1362 self.logger.debug(
1363 logging_text + "RO_action_id={} already deleted".format(action_id)
1364 )
1365 elif e.http_code == 409: # conflict
1366 failed_detail.append("delete conflict: {}".format(e))
1367 self.logger.debug(
1368 logging_text
1369 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1370 )
1371 else:
1372 failed_detail.append("delete error: {}".format(e))
1373 self.logger.error(
1374 logging_text
1375 + "RO_action_id={} delete error: {}".format(action_id, e)
1376 )
1377 except Exception as e:
1378 failed_detail.append("delete error: {}".format(e))
1379 self.logger.error(
1380 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1381 )
1382
1383 if failed_detail:
1384 stage[2] = "Error deleting from VIM"
1385 else:
1386 stage[2] = "Deleted from VIM"
1387 db_nsr_update["detailed-status"] = " ".join(stage)
1388 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1389 self._write_op_status(nslcmop_id, stage)
1390
1391 if failed_detail:
1392 raise LcmException("; ".join(failed_detail))
1393 return
1394
1395 async def instantiate_RO(
1396 self,
1397 logging_text,
1398 nsr_id,
1399 nsd,
1400 db_nsr,
1401 db_nslcmop,
1402 db_vnfrs,
1403 db_vnfds,
1404 n2vc_key_list,
1405 stage,
1406 ):
1407 """
1408 Instantiate at RO
1409 :param logging_text: preffix text to use at logging
1410 :param nsr_id: nsr identity
1411 :param nsd: database content of ns descriptor
1412 :param db_nsr: database content of ns record
1413 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1414 :param db_vnfrs:
1415 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1416 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1417 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1418 :return: None or exception
1419 """
1420 try:
1421 start_deploy = time()
1422 ns_params = db_nslcmop.get("operationParams")
1423 if ns_params and ns_params.get("timeout_ns_deploy"):
1424 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1425 else:
1426 timeout_ns_deploy = self.timeout.ns_deploy
1427
1428 # Check for and optionally request placement optimization. Database will be updated if placement activated
1429 stage[2] = "Waiting for Placement."
1430 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1431 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1432 for vnfr in db_vnfrs.values():
1433 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1434 break
1435 else:
1436 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1437
1438 return await self._instantiate_ng_ro(
1439 logging_text,
1440 nsr_id,
1441 nsd,
1442 db_nsr,
1443 db_nslcmop,
1444 db_vnfrs,
1445 db_vnfds,
1446 n2vc_key_list,
1447 stage,
1448 start_deploy,
1449 timeout_ns_deploy,
1450 )
1451 except Exception as e:
1452 stage[2] = "ERROR deploying at VIM"
1453 self.set_vnfr_at_error(db_vnfrs, str(e))
1454 self.logger.error(
1455 "Error deploying at VIM {}".format(e),
1456 exc_info=not isinstance(
1457 e,
1458 (
1459 ROclient.ROClientException,
1460 LcmException,
1461 DbException,
1462 NgRoException,
1463 ),
1464 ),
1465 )
1466 raise
1467
1468 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1469 """
1470 Wait for kdu to be up, get ip address
1471 :param logging_text: prefix use for logging
1472 :param nsr_id:
1473 :param vnfr_id:
1474 :param kdu_name:
1475 :return: IP address, K8s services
1476 """
1477
1478 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1479 nb_tries = 0
1480
1481 while nb_tries < 360:
1482 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1483 kdur = next(
1484 (
1485 x
1486 for x in get_iterable(db_vnfr, "kdur")
1487 if x.get("kdu-name") == kdu_name
1488 ),
1489 None,
1490 )
1491 if not kdur:
1492 raise LcmException(
1493 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1494 )
1495 if kdur.get("status"):
1496 if kdur["status"] in ("READY", "ENABLED"):
1497 return kdur.get("ip-address"), kdur.get("services")
1498 else:
1499 raise LcmException(
1500 "target KDU={} is in error state".format(kdu_name)
1501 )
1502
1503 await asyncio.sleep(10)
1504 nb_tries += 1
1505 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1506
1507 async def wait_vm_up_insert_key_ro(
1508 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1509 ):
1510 """
1511 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1512 :param logging_text: prefix use for logging
1513 :param nsr_id:
1514 :param vnfr_id:
1515 :param vdu_id:
1516 :param vdu_index:
1517 :param pub_key: public ssh key to inject, None to skip
1518 :param user: user to apply the public ssh key
1519 :return: IP address
1520 """
1521
1522 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1523 ip_address = None
1524 target_vdu_id = None
1525 ro_retries = 0
1526
1527 while True:
1528 ro_retries += 1
1529 if ro_retries >= 360: # 1 hour
1530 raise LcmException(
1531 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1532 )
1533
1534 await asyncio.sleep(10)
1535
1536 # get ip address
1537 if not target_vdu_id:
1538 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1539
1540 if not vdu_id: # for the VNF case
1541 if db_vnfr.get("status") == "ERROR":
1542 raise LcmException(
1543 "Cannot inject ssh-key because target VNF is in error state"
1544 )
1545 ip_address = db_vnfr.get("ip-address")
1546 if not ip_address:
1547 continue
1548 vdur = next(
1549 (
1550 x
1551 for x in get_iterable(db_vnfr, "vdur")
1552 if x.get("ip-address") == ip_address
1553 ),
1554 None,
1555 )
1556 else: # VDU case
1557 vdur = next(
1558 (
1559 x
1560 for x in get_iterable(db_vnfr, "vdur")
1561 if x.get("vdu-id-ref") == vdu_id
1562 and x.get("count-index") == vdu_index
1563 ),
1564 None,
1565 )
1566
1567 if (
1568 not vdur and len(db_vnfr.get("vdur", ())) == 1
1569 ): # If only one, this should be the target vdu
1570 vdur = db_vnfr["vdur"][0]
1571 if not vdur:
1572 raise LcmException(
1573 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1574 vnfr_id, vdu_id, vdu_index
1575 )
1576 )
1577 # New generation RO stores information at "vim_info"
1578 ng_ro_status = None
1579 target_vim = None
1580 if vdur.get("vim_info"):
1581 target_vim = next(
1582 t for t in vdur["vim_info"]
1583 ) # there should be only one key
1584 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1585 if (
1586 vdur.get("pdu-type")
1587 or vdur.get("status") == "ACTIVE"
1588 or ng_ro_status == "ACTIVE"
1589 ):
1590 ip_address = vdur.get("ip-address")
1591 if not ip_address:
1592 continue
1593 target_vdu_id = vdur["vdu-id-ref"]
1594 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1595 raise LcmException(
1596 "Cannot inject ssh-key because target VM is in error state"
1597 )
1598
1599 if not target_vdu_id:
1600 continue
1601
1602 # inject public key into machine
1603 if pub_key and user:
1604 self.logger.debug(logging_text + "Inserting RO key")
1605 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1606 if vdur.get("pdu-type"):
1607 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1608 return ip_address
1609 try:
1610 target = {
1611 "action": {
1612 "action": "inject_ssh_key",
1613 "key": pub_key,
1614 "user": user,
1615 },
1616 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1617 }
1618 desc = await self.RO.deploy(nsr_id, target)
1619 action_id = desc["action_id"]
1620 await self._wait_ng_ro(
1621 nsr_id, action_id, timeout=600, operation="instantiation"
1622 )
1623 break
1624 except NgRoException as e:
1625 raise LcmException(
1626 "Reaching max tries injecting key. Error: {}".format(e)
1627 )
1628 else:
1629 break
1630
1631 return ip_address
1632
1633 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1634 """
1635 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1636 """
1637 my_vca = vca_deployed_list[vca_index]
1638 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1639 # vdu or kdu: no dependencies
1640 return
1641 timeout = 300
1642 while timeout >= 0:
1643 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1644 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1645 configuration_status_list = db_nsr["configurationStatus"]
1646 for index, vca_deployed in enumerate(configuration_status_list):
1647 if index == vca_index:
1648 # myself
1649 continue
1650 if not my_vca.get("member-vnf-index") or (
1651 vca_deployed.get("member-vnf-index")
1652 == my_vca.get("member-vnf-index")
1653 ):
1654 internal_status = configuration_status_list[index].get("status")
1655 if internal_status == "READY":
1656 continue
1657 elif internal_status == "BROKEN":
1658 raise LcmException(
1659 "Configuration aborted because dependent charm/s has failed"
1660 )
1661 else:
1662 break
1663 else:
1664 # no dependencies, return
1665 return
1666 await asyncio.sleep(10)
1667 timeout -= 1
1668
1669 raise LcmException("Configuration aborted because dependent charm/s timeout")
1670
1671 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1672 vca_id = None
1673 if db_vnfr:
1674 vca_id = deep_get(db_vnfr, ("vca-id",))
1675 elif db_nsr:
1676 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1677 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1678 return vca_id
1679
1680 async def instantiate_N2VC(
1681 self,
1682 logging_text,
1683 vca_index,
1684 nsi_id,
1685 db_nsr,
1686 db_vnfr,
1687 vdu_id,
1688 kdu_name,
1689 vdu_index,
1690 kdu_index,
1691 config_descriptor,
1692 deploy_params,
1693 base_folder,
1694 nslcmop_id,
1695 stage,
1696 vca_type,
1697 vca_name,
1698 ee_config_descriptor,
1699 ):
1700 nsr_id = db_nsr["_id"]
1701 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1702 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1703 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1704 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1705 db_dict = {
1706 "collection": "nsrs",
1707 "filter": {"_id": nsr_id},
1708 "path": db_update_entry,
1709 }
1710 step = ""
1711 try:
1712 element_type = "NS"
1713 element_under_configuration = nsr_id
1714
1715 vnfr_id = None
1716 if db_vnfr:
1717 vnfr_id = db_vnfr["_id"]
1718 osm_config["osm"]["vnf_id"] = vnfr_id
1719
1720 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1721
1722 if vca_type == "native_charm":
1723 index_number = 0
1724 else:
1725 index_number = vdu_index or 0
1726
1727 if vnfr_id:
1728 element_type = "VNF"
1729 element_under_configuration = vnfr_id
1730 namespace += ".{}-{}".format(vnfr_id, index_number)
1731 if vdu_id:
1732 namespace += ".{}-{}".format(vdu_id, index_number)
1733 element_type = "VDU"
1734 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1735 osm_config["osm"]["vdu_id"] = vdu_id
1736 elif kdu_name:
1737 namespace += ".{}".format(kdu_name)
1738 element_type = "KDU"
1739 element_under_configuration = kdu_name
1740 osm_config["osm"]["kdu_name"] = kdu_name
1741
1742 # Get artifact path
1743 if base_folder["pkg-dir"]:
1744 artifact_path = "{}/{}/{}/{}".format(
1745 base_folder["folder"],
1746 base_folder["pkg-dir"],
1747 "charms"
1748 if vca_type
1749 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1750 else "helm-charts",
1751 vca_name,
1752 )
1753 else:
1754 artifact_path = "{}/Scripts/{}/{}/".format(
1755 base_folder["folder"],
1756 "charms"
1757 if vca_type
1758 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1759 else "helm-charts",
1760 vca_name,
1761 )
1762
1763 self.logger.debug("Artifact path > {}".format(artifact_path))
1764
1765 # get initial_config_primitive_list that applies to this element
1766 initial_config_primitive_list = config_descriptor.get(
1767 "initial-config-primitive"
1768 )
1769
1770 self.logger.debug(
1771 "Initial config primitive list > {}".format(
1772 initial_config_primitive_list
1773 )
1774 )
1775
1776 # add config if not present for NS charm
1777 ee_descriptor_id = ee_config_descriptor.get("id")
1778 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1779 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1780 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1781 )
1782
1783 self.logger.debug(
1784 "Initial config primitive list #2 > {}".format(
1785 initial_config_primitive_list
1786 )
1787 )
1788 # n2vc_redesign STEP 3.1
1789 # find old ee_id if exists
1790 ee_id = vca_deployed.get("ee_id")
1791
1792 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1793 # create or register execution environment in VCA
1794 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1795 self._write_configuration_status(
1796 nsr_id=nsr_id,
1797 vca_index=vca_index,
1798 status="CREATING",
1799 element_under_configuration=element_under_configuration,
1800 element_type=element_type,
1801 )
1802
1803 step = "create execution environment"
1804 self.logger.debug(logging_text + step)
1805
1806 ee_id = None
1807 credentials = None
1808 if vca_type == "k8s_proxy_charm":
1809 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1810 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1811 namespace=namespace,
1812 artifact_path=artifact_path,
1813 db_dict=db_dict,
1814 vca_id=vca_id,
1815 )
1816 elif vca_type == "helm" or vca_type == "helm-v3":
1817 ee_id, credentials = await self.vca_map[
1818 vca_type
1819 ].create_execution_environment(
1820 namespace=namespace,
1821 reuse_ee_id=ee_id,
1822 db_dict=db_dict,
1823 config=osm_config,
1824 artifact_path=artifact_path,
1825 chart_model=vca_name,
1826 vca_type=vca_type,
1827 )
1828 else:
1829 ee_id, credentials = await self.vca_map[
1830 vca_type
1831 ].create_execution_environment(
1832 namespace=namespace,
1833 reuse_ee_id=ee_id,
1834 db_dict=db_dict,
1835 vca_id=vca_id,
1836 )
1837
1838 elif vca_type == "native_charm":
1839 step = "Waiting to VM being up and getting IP address"
1840 self.logger.debug(logging_text + step)
1841 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1842 logging_text,
1843 nsr_id,
1844 vnfr_id,
1845 vdu_id,
1846 vdu_index,
1847 user=None,
1848 pub_key=None,
1849 )
1850 credentials = {"hostname": rw_mgmt_ip}
1851 # get username
1852 username = deep_get(
1853 config_descriptor, ("config-access", "ssh-access", "default-user")
1854 )
1855 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1856 # merged. Meanwhile let's get username from initial-config-primitive
1857 if not username and initial_config_primitive_list:
1858 for config_primitive in initial_config_primitive_list:
1859 for param in config_primitive.get("parameter", ()):
1860 if param["name"] == "ssh-username":
1861 username = param["value"]
1862 break
1863 if not username:
1864 raise LcmException(
1865 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1866 "'config-access.ssh-access.default-user'"
1867 )
1868 credentials["username"] = username
1869 # n2vc_redesign STEP 3.2
1870
1871 self._write_configuration_status(
1872 nsr_id=nsr_id,
1873 vca_index=vca_index,
1874 status="REGISTERING",
1875 element_under_configuration=element_under_configuration,
1876 element_type=element_type,
1877 )
1878
1879 step = "register execution environment {}".format(credentials)
1880 self.logger.debug(logging_text + step)
1881 ee_id = await self.vca_map[vca_type].register_execution_environment(
1882 credentials=credentials,
1883 namespace=namespace,
1884 db_dict=db_dict,
1885 vca_id=vca_id,
1886 )
1887
1888 # for compatibility with MON/POL modules, the need model and application name at database
1889 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1890 ee_id_parts = ee_id.split(".")
1891 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1892 if len(ee_id_parts) >= 2:
1893 model_name = ee_id_parts[0]
1894 application_name = ee_id_parts[1]
1895 db_nsr_update[db_update_entry + "model"] = model_name
1896 db_nsr_update[db_update_entry + "application"] = application_name
1897
1898 # n2vc_redesign STEP 3.3
1899 step = "Install configuration Software"
1900
1901 self._write_configuration_status(
1902 nsr_id=nsr_id,
1903 vca_index=vca_index,
1904 status="INSTALLING SW",
1905 element_under_configuration=element_under_configuration,
1906 element_type=element_type,
1907 other_update=db_nsr_update,
1908 )
1909
1910 # TODO check if already done
1911 self.logger.debug(logging_text + step)
1912 config = None
1913 if vca_type == "native_charm":
1914 config_primitive = next(
1915 (p for p in initial_config_primitive_list if p["name"] == "config"),
1916 None,
1917 )
1918 if config_primitive:
1919 config = self._map_primitive_params(
1920 config_primitive, {}, deploy_params
1921 )
1922 num_units = 1
1923 if vca_type == "lxc_proxy_charm":
1924 if element_type == "NS":
1925 num_units = db_nsr.get("config-units") or 1
1926 elif element_type == "VNF":
1927 num_units = db_vnfr.get("config-units") or 1
1928 elif element_type == "VDU":
1929 for v in db_vnfr["vdur"]:
1930 if vdu_id == v["vdu-id-ref"]:
1931 num_units = v.get("config-units") or 1
1932 break
1933 if vca_type != "k8s_proxy_charm":
1934 await self.vca_map[vca_type].install_configuration_sw(
1935 ee_id=ee_id,
1936 artifact_path=artifact_path,
1937 db_dict=db_dict,
1938 config=config,
1939 num_units=num_units,
1940 vca_id=vca_id,
1941 vca_type=vca_type,
1942 )
1943
1944 # write in db flag of configuration_sw already installed
1945 self.update_db_2(
1946 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1947 )
1948
1949 # add relations for this VCA (wait for other peers related with this VCA)
1950 is_relation_added = await self._add_vca_relations(
1951 logging_text=logging_text,
1952 nsr_id=nsr_id,
1953 vca_type=vca_type,
1954 vca_index=vca_index,
1955 )
1956
1957 if not is_relation_added:
1958 raise LcmException("Relations could not be added to VCA.")
1959
1960 # if SSH access is required, then get execution environment SSH public
1961 # if native charm we have waited already to VM be UP
1962 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1963 pub_key = None
1964 user = None
1965 # self.logger.debug("get ssh key block")
1966 if deep_get(
1967 config_descriptor, ("config-access", "ssh-access", "required")
1968 ):
1969 # self.logger.debug("ssh key needed")
1970 # Needed to inject a ssh key
1971 user = deep_get(
1972 config_descriptor,
1973 ("config-access", "ssh-access", "default-user"),
1974 )
1975 step = "Install configuration Software, getting public ssh key"
1976 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1977 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1978 )
1979
1980 step = "Insert public key into VM user={} ssh_key={}".format(
1981 user, pub_key
1982 )
1983 else:
1984 # self.logger.debug("no need to get ssh key")
1985 step = "Waiting to VM being up and getting IP address"
1986 self.logger.debug(logging_text + step)
1987
1988 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1989 rw_mgmt_ip = None
1990
1991 # n2vc_redesign STEP 5.1
1992 # wait for RO (ip-address) Insert pub_key into VM
1993 if vnfr_id:
1994 if kdu_name:
1995 rw_mgmt_ip, services = await self.wait_kdu_up(
1996 logging_text, nsr_id, vnfr_id, kdu_name
1997 )
1998 vnfd = self.db.get_one(
1999 "vnfds_revisions",
2000 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2001 )
2002 kdu = get_kdu(vnfd, kdu_name)
2003 kdu_services = [
2004 service["name"] for service in get_kdu_services(kdu)
2005 ]
2006 exposed_services = []
2007 for service in services:
2008 if any(s in service["name"] for s in kdu_services):
2009 exposed_services.append(service)
2010 await self.vca_map[vca_type].exec_primitive(
2011 ee_id=ee_id,
2012 primitive_name="config",
2013 params_dict={
2014 "osm-config": json.dumps(
2015 OsmConfigBuilder(
2016 k8s={"services": exposed_services}
2017 ).build()
2018 )
2019 },
2020 vca_id=vca_id,
2021 )
2022
2023 # This verification is needed in order to avoid trying to add a public key
2024 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2025 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2026 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2027 # or it is a KNF)
2028 elif db_vnfr.get("vdur"):
2029 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2030 logging_text,
2031 nsr_id,
2032 vnfr_id,
2033 vdu_id,
2034 vdu_index,
2035 user=user,
2036 pub_key=pub_key,
2037 )
2038
2039 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2040
2041 # store rw_mgmt_ip in deploy params for later replacement
2042 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2043
2044 # n2vc_redesign STEP 6 Execute initial config primitive
2045 step = "execute initial config primitive"
2046
2047 # wait for dependent primitives execution (NS -> VNF -> VDU)
2048 if initial_config_primitive_list:
2049 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2050
2051 # stage, in function of element type: vdu, kdu, vnf or ns
2052 my_vca = vca_deployed_list[vca_index]
2053 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2054 # VDU or KDU
2055 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2056 elif my_vca.get("member-vnf-index"):
2057 # VNF
2058 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2059 else:
2060 # NS
2061 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2062
2063 self._write_configuration_status(
2064 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2065 )
2066
2067 self._write_op_status(op_id=nslcmop_id, stage=stage)
2068
2069 check_if_terminated_needed = True
2070 for initial_config_primitive in initial_config_primitive_list:
2071 # adding information on the vca_deployed if it is a NS execution environment
2072 if not vca_deployed["member-vnf-index"]:
2073 deploy_params["ns_config_info"] = json.dumps(
2074 self._get_ns_config_info(nsr_id)
2075 )
2076 # TODO check if already done
2077 primitive_params_ = self._map_primitive_params(
2078 initial_config_primitive, {}, deploy_params
2079 )
2080
2081 step = "execute primitive '{}' params '{}'".format(
2082 initial_config_primitive["name"], primitive_params_
2083 )
2084 self.logger.debug(logging_text + step)
2085 await self.vca_map[vca_type].exec_primitive(
2086 ee_id=ee_id,
2087 primitive_name=initial_config_primitive["name"],
2088 params_dict=primitive_params_,
2089 db_dict=db_dict,
2090 vca_id=vca_id,
2091 vca_type=vca_type,
2092 )
2093 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2094 if check_if_terminated_needed:
2095 if config_descriptor.get("terminate-config-primitive"):
2096 self.update_db_2(
2097 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2098 )
2099 check_if_terminated_needed = False
2100
2101 # TODO register in database that primitive is done
2102
2103 # STEP 7 Configure metrics
2104 if vca_type == "helm" or vca_type == "helm-v3":
2105 # TODO: review for those cases where the helm chart is a reference and
2106 # is not part of the NF package
2107 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2108 ee_id=ee_id,
2109 artifact_path=artifact_path,
2110 ee_config_descriptor=ee_config_descriptor,
2111 vnfr_id=vnfr_id,
2112 nsr_id=nsr_id,
2113 target_ip=rw_mgmt_ip,
2114 element_type=element_type,
2115 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2116 vdu_id=vdu_id,
2117 vdu_index=vdu_index,
2118 kdu_name=kdu_name,
2119 kdu_index=kdu_index,
2120 )
2121 if prometheus_jobs:
2122 self.update_db_2(
2123 "nsrs",
2124 nsr_id,
2125 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2126 )
2127
2128 for job in prometheus_jobs:
2129 self.db.set_one(
2130 "prometheus_jobs",
2131 {"job_name": job["job_name"]},
2132 job,
2133 upsert=True,
2134 fail_on_empty=False,
2135 )
2136
2137 step = "instantiated at VCA"
2138 self.logger.debug(logging_text + step)
2139
2140 self._write_configuration_status(
2141 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2142 )
2143
2144 except Exception as e: # TODO not use Exception but N2VC exception
2145 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2146 if not isinstance(
2147 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2148 ):
2149 self.logger.error(
2150 "Exception while {} : {}".format(step, e), exc_info=True
2151 )
2152 self._write_configuration_status(
2153 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2154 )
2155 raise LcmException("{}. {}".format(step, e)) from e
2156
2157 def _write_ns_status(
2158 self,
2159 nsr_id: str,
2160 ns_state: str,
2161 current_operation: str,
2162 current_operation_id: str,
2163 error_description: str = None,
2164 error_detail: str = None,
2165 other_update: dict = None,
2166 ):
2167 """
2168 Update db_nsr fields.
2169 :param nsr_id:
2170 :param ns_state:
2171 :param current_operation:
2172 :param current_operation_id:
2173 :param error_description:
2174 :param error_detail:
2175 :param other_update: Other required changes at database if provided, will be cleared
2176 :return:
2177 """
2178 try:
2179 db_dict = other_update or {}
2180 db_dict[
2181 "_admin.nslcmop"
2182 ] = current_operation_id # for backward compatibility
2183 db_dict["_admin.current-operation"] = current_operation_id
2184 db_dict["_admin.operation-type"] = (
2185 current_operation if current_operation != "IDLE" else None
2186 )
2187 db_dict["currentOperation"] = current_operation
2188 db_dict["currentOperationID"] = current_operation_id
2189 db_dict["errorDescription"] = error_description
2190 db_dict["errorDetail"] = error_detail
2191
2192 if ns_state:
2193 db_dict["nsState"] = ns_state
2194 self.update_db_2("nsrs", nsr_id, db_dict)
2195 except DbException as e:
2196 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2197
2198 def _write_op_status(
2199 self,
2200 op_id: str,
2201 stage: list = None,
2202 error_message: str = None,
2203 queuePosition: int = 0,
2204 operation_state: str = None,
2205 other_update: dict = None,
2206 ):
2207 try:
2208 db_dict = other_update or {}
2209 db_dict["queuePosition"] = queuePosition
2210 if isinstance(stage, list):
2211 db_dict["stage"] = stage[0]
2212 db_dict["detailed-status"] = " ".join(stage)
2213 elif stage is not None:
2214 db_dict["stage"] = str(stage)
2215
2216 if error_message is not None:
2217 db_dict["errorMessage"] = error_message
2218 if operation_state is not None:
2219 db_dict["operationState"] = operation_state
2220 db_dict["statusEnteredTime"] = time()
2221 self.update_db_2("nslcmops", op_id, db_dict)
2222 except DbException as e:
2223 self.logger.warn(
2224 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2225 )
2226
2227 def _write_all_config_status(self, db_nsr: dict, status: str):
2228 try:
2229 nsr_id = db_nsr["_id"]
2230 # configurationStatus
2231 config_status = db_nsr.get("configurationStatus")
2232 if config_status:
2233 db_nsr_update = {
2234 "configurationStatus.{}.status".format(index): status
2235 for index, v in enumerate(config_status)
2236 if v
2237 }
2238 # update status
2239 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2240
2241 except DbException as e:
2242 self.logger.warn(
2243 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2244 )
2245
2246 def _write_configuration_status(
2247 self,
2248 nsr_id: str,
2249 vca_index: int,
2250 status: str = None,
2251 element_under_configuration: str = None,
2252 element_type: str = None,
2253 other_update: dict = None,
2254 ):
2255 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2256 # .format(vca_index, status))
2257
2258 try:
2259 db_path = "configurationStatus.{}.".format(vca_index)
2260 db_dict = other_update or {}
2261 if status:
2262 db_dict[db_path + "status"] = status
2263 if element_under_configuration:
2264 db_dict[
2265 db_path + "elementUnderConfiguration"
2266 ] = element_under_configuration
2267 if element_type:
2268 db_dict[db_path + "elementType"] = element_type
2269 self.update_db_2("nsrs", nsr_id, db_dict)
2270 except DbException as e:
2271 self.logger.warn(
2272 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2273 status, nsr_id, vca_index, e
2274 )
2275 )
2276
2277 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2278 """
2279 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2280 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2281 Database is used because the result can be obtained from a different LCM worker in case of HA.
2282 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2283 :param db_nslcmop: database content of nslcmop
2284 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2285 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2286 computed 'vim-account-id'
2287 """
2288 modified = False
2289 nslcmop_id = db_nslcmop["_id"]
2290 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2291 if placement_engine == "PLA":
2292 self.logger.debug(
2293 logging_text + "Invoke and wait for placement optimization"
2294 )
2295 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2296 db_poll_interval = 5
2297 wait = db_poll_interval * 10
2298 pla_result = None
2299 while not pla_result and wait >= 0:
2300 await asyncio.sleep(db_poll_interval)
2301 wait -= db_poll_interval
2302 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2303 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2304
2305 if not pla_result:
2306 raise LcmException(
2307 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2308 )
2309
2310 for pla_vnf in pla_result["vnf"]:
2311 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2312 if not pla_vnf.get("vimAccountId") or not vnfr:
2313 continue
2314 modified = True
2315 self.db.set_one(
2316 "vnfrs",
2317 {"_id": vnfr["_id"]},
2318 {"vim-account-id": pla_vnf["vimAccountId"]},
2319 )
2320 # Modifies db_vnfrs
2321 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2322 return modified
2323
2324 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2325 alerts = []
2326 nsr_id = vnfr["nsr-id-ref"]
2327 df = vnfd.get("df", [{}])[0]
2328 # Checking for auto-healing configuration
2329 if "healing-aspect" in df:
2330 healing_aspects = df["healing-aspect"]
2331 for healing in healing_aspects:
2332 for healing_policy in healing.get("healing-policy", ()):
2333 vdu_id = healing_policy["vdu-id"]
2334 vdur = next(
2335 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2336 {},
2337 )
2338 if not vdur:
2339 continue
2340 metric_name = "vm_status"
2341 vdu_name = vdur.get("name")
2342 vnf_member_index = vnfr["member-vnf-index-ref"]
2343 uuid = str(uuid4())
2344 name = f"healing_{uuid}"
2345 action = healing_policy
2346 # action_on_recovery = healing.get("action-on-recovery")
2347 # cooldown_time = healing.get("cooldown-time")
2348 # day1 = healing.get("day1")
2349 alert = {
2350 "uuid": uuid,
2351 "name": name,
2352 "metric": metric_name,
2353 "tags": {
2354 "ns_id": nsr_id,
2355 "vnf_member_index": vnf_member_index,
2356 "vdu_name": vdu_name,
2357 },
2358 "alarm_status": "ok",
2359 "action_type": "healing",
2360 "action": action,
2361 }
2362 alerts.append(alert)
2363 return alerts
2364
2365 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2366 alerts = []
2367 nsr_id = vnfr["nsr-id-ref"]
2368 df = vnfd.get("df", [{}])[0]
2369 # Checking for auto-scaling configuration
2370 if "scaling-aspect" in df:
2371 rel_operation_types = {
2372 "GE": ">=",
2373 "LE": "<=",
2374 "GT": ">",
2375 "LT": "<",
2376 "EQ": "==",
2377 "NE": "!=",
2378 }
2379 scaling_aspects = df["scaling-aspect"]
2380 all_vnfd_monitoring_params = {}
2381 for ivld in vnfd.get("int-virtual-link-desc", ()):
2382 for mp in ivld.get("monitoring-parameters", ()):
2383 all_vnfd_monitoring_params[mp.get("id")] = mp
2384 for vdu in vnfd.get("vdu", ()):
2385 for mp in vdu.get("monitoring-parameter", ()):
2386 all_vnfd_monitoring_params[mp.get("id")] = mp
2387 for df in vnfd.get("df", ()):
2388 for mp in df.get("monitoring-parameter", ()):
2389 all_vnfd_monitoring_params[mp.get("id")] = mp
2390 for scaling_aspect in scaling_aspects:
2391 scaling_group_name = scaling_aspect.get("name", "")
2392 # Get monitored VDUs
2393 all_monitored_vdus = set()
2394 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2395 "deltas", ()
2396 ):
2397 for vdu_delta in delta.get("vdu-delta", ()):
2398 all_monitored_vdus.add(vdu_delta.get("id"))
2399 monitored_vdurs = list(
2400 filter(
2401 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2402 vnfr["vdur"],
2403 )
2404 )
2405 if not monitored_vdurs:
2406 self.logger.error(
2407 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2408 )
2409 continue
2410 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2411 if scaling_policy["scaling-type"] != "automatic":
2412 continue
2413 threshold_time = scaling_policy.get("threshold-time", "1")
2414 cooldown_time = scaling_policy.get("cooldown-time", "0")
2415 for scaling_criteria in scaling_policy["scaling-criteria"]:
2416 monitoring_param_ref = scaling_criteria.get(
2417 "vnf-monitoring-param-ref"
2418 )
2419 vnf_monitoring_param = all_vnfd_monitoring_params[
2420 monitoring_param_ref
2421 ]
2422 for vdur in monitored_vdurs:
2423 vdu_id = vdur["vdu-id-ref"]
2424 metric_name = vnf_monitoring_param.get("performance-metric")
2425 metric_name = f"osm_{metric_name}"
2426 vnf_member_index = vnfr["member-vnf-index-ref"]
2427 scalein_threshold = scaling_criteria.get(
2428 "scale-in-threshold"
2429 )
2430 scaleout_threshold = scaling_criteria.get(
2431 "scale-out-threshold"
2432 )
2433 # Looking for min/max-number-of-instances
2434 instances_min_number = 1
2435 instances_max_number = 1
2436 vdu_profile = df["vdu-profile"]
2437 if vdu_profile:
2438 profile = next(
2439 item for item in vdu_profile if item["id"] == vdu_id
2440 )
2441 instances_min_number = profile.get(
2442 "min-number-of-instances", 1
2443 )
2444 instances_max_number = profile.get(
2445 "max-number-of-instances", 1
2446 )
2447
2448 if scalein_threshold:
2449 uuid = str(uuid4())
2450 name = f"scalein_{uuid}"
2451 operation = scaling_criteria[
2452 "scale-in-relational-operation"
2453 ]
2454 rel_operator = rel_operation_types.get(operation, "<=")
2455 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2456 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2457 labels = {
2458 "ns_id": nsr_id,
2459 "vnf_member_index": vnf_member_index,
2460 "vdu_id": vdu_id,
2461 }
2462 prom_cfg = {
2463 "alert": name,
2464 "expr": expression,
2465 "for": str(threshold_time) + "m",
2466 "labels": labels,
2467 }
2468 action = scaling_policy
2469 action = {
2470 "scaling-group": scaling_group_name,
2471 "cooldown-time": cooldown_time,
2472 }
2473 alert = {
2474 "uuid": uuid,
2475 "name": name,
2476 "metric": metric_name,
2477 "tags": {
2478 "ns_id": nsr_id,
2479 "vnf_member_index": vnf_member_index,
2480 "vdu_id": vdu_id,
2481 },
2482 "alarm_status": "ok",
2483 "action_type": "scale_in",
2484 "action": action,
2485 "prometheus_config": prom_cfg,
2486 }
2487 alerts.append(alert)
2488
2489 if scaleout_threshold:
2490 uuid = str(uuid4())
2491 name = f"scaleout_{uuid}"
2492 operation = scaling_criteria[
2493 "scale-out-relational-operation"
2494 ]
2495 rel_operator = rel_operation_types.get(operation, "<=")
2496 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2497 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2498 labels = {
2499 "ns_id": nsr_id,
2500 "vnf_member_index": vnf_member_index,
2501 "vdu_id": vdu_id,
2502 }
2503 prom_cfg = {
2504 "alert": name,
2505 "expr": expression,
2506 "for": str(threshold_time) + "m",
2507 "labels": labels,
2508 }
2509 action = scaling_policy
2510 action = {
2511 "scaling-group": scaling_group_name,
2512 "cooldown-time": cooldown_time,
2513 }
2514 alert = {
2515 "uuid": uuid,
2516 "name": name,
2517 "metric": metric_name,
2518 "tags": {
2519 "ns_id": nsr_id,
2520 "vnf_member_index": vnf_member_index,
2521 "vdu_id": vdu_id,
2522 },
2523 "alarm_status": "ok",
2524 "action_type": "scale_out",
2525 "action": action,
2526 "prometheus_config": prom_cfg,
2527 }
2528 alerts.append(alert)
2529 return alerts
2530
2531 def update_nsrs_with_pla_result(self, params):
2532 try:
2533 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2534 self.update_db_2(
2535 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2536 )
2537 except Exception as e:
2538 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2539
2540 async def instantiate(self, nsr_id, nslcmop_id):
2541 """
2542
2543 :param nsr_id: ns instance to deploy
2544 :param nslcmop_id: operation to run
2545 :return:
2546 """
2547
2548 # Try to lock HA task here
2549 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2550 if not task_is_locked_by_me:
2551 self.logger.debug(
2552 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2553 )
2554 return
2555
2556 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2557 self.logger.debug(logging_text + "Enter")
2558
2559 # get all needed from database
2560
2561 # database nsrs record
2562 db_nsr = None
2563
2564 # database nslcmops record
2565 db_nslcmop = None
2566
2567 # update operation on nsrs
2568 db_nsr_update = {}
2569 # update operation on nslcmops
2570 db_nslcmop_update = {}
2571
2572 timeout_ns_deploy = self.timeout.ns_deploy
2573
2574 nslcmop_operation_state = None
2575 db_vnfrs = {} # vnf's info indexed by member-index
2576 # n2vc_info = {}
2577 tasks_dict_info = {} # from task to info text
2578 exc = None
2579 error_list = []
2580 stage = [
2581 "Stage 1/5: preparation of the environment.",
2582 "Waiting for previous operations to terminate.",
2583 "",
2584 ]
2585 # ^ stage, step, VIM progress
2586 try:
2587 # wait for any previous tasks in process
2588 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2589
2590 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2591 stage[1] = "Reading from database."
2592 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2593 db_nsr_update["detailed-status"] = "creating"
2594 db_nsr_update["operational-status"] = "init"
2595 self._write_ns_status(
2596 nsr_id=nsr_id,
2597 ns_state="BUILDING",
2598 current_operation="INSTANTIATING",
2599 current_operation_id=nslcmop_id,
2600 other_update=db_nsr_update,
2601 )
2602 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2603
2604 # read from db: operation
2605 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2606 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2607 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2608 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2609 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2610 )
2611 ns_params = db_nslcmop.get("operationParams")
2612 if ns_params and ns_params.get("timeout_ns_deploy"):
2613 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2614
2615 # read from db: ns
2616 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2617 self.logger.debug(logging_text + stage[1])
2618 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2619 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2620 self.logger.debug(logging_text + stage[1])
2621 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2622 self.fs.sync(db_nsr["nsd-id"])
2623 db_nsr["nsd"] = nsd
2624 # nsr_name = db_nsr["name"] # TODO short-name??
2625
2626 # read from db: vnf's of this ns
2627 stage[1] = "Getting vnfrs from db."
2628 self.logger.debug(logging_text + stage[1])
2629 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2630
2631 # read from db: vnfd's for every vnf
2632 db_vnfds = [] # every vnfd data
2633
2634 # for each vnf in ns, read vnfd
2635 for vnfr in db_vnfrs_list:
2636 if vnfr.get("kdur"):
2637 kdur_list = []
2638 for kdur in vnfr["kdur"]:
2639 if kdur.get("additionalParams"):
2640 kdur["additionalParams"] = json.loads(
2641 kdur["additionalParams"]
2642 )
2643 kdur_list.append(kdur)
2644 vnfr["kdur"] = kdur_list
2645
2646 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2647 vnfd_id = vnfr["vnfd-id"]
2648 vnfd_ref = vnfr["vnfd-ref"]
2649 self.fs.sync(vnfd_id)
2650
2651 # if we haven't this vnfd, read it from db
2652 if vnfd_id not in db_vnfds:
2653 # read from db
2654 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2655 vnfd_id, vnfd_ref
2656 )
2657 self.logger.debug(logging_text + stage[1])
2658 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2659
2660 # store vnfd
2661 db_vnfds.append(vnfd)
2662
2663 # Get or generates the _admin.deployed.VCA list
2664 vca_deployed_list = None
2665 if db_nsr["_admin"].get("deployed"):
2666 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2667 if vca_deployed_list is None:
2668 vca_deployed_list = []
2669 configuration_status_list = []
2670 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2671 db_nsr_update["configurationStatus"] = configuration_status_list
2672 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2673 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2674 elif isinstance(vca_deployed_list, dict):
2675 # maintain backward compatibility. Change a dict to list at database
2676 vca_deployed_list = list(vca_deployed_list.values())
2677 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2678 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2679
2680 if not isinstance(
2681 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2682 ):
2683 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2684 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2685
2686 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2687 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2688 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2689 self.db.set_list(
2690 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2691 )
2692
2693 # n2vc_redesign STEP 2 Deploy Network Scenario
2694 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2695 self._write_op_status(op_id=nslcmop_id, stage=stage)
2696
2697 stage[1] = "Deploying KDUs."
2698 # self.logger.debug(logging_text + "Before deploy_kdus")
2699 # Call to deploy_kdus in case exists the "vdu:kdu" param
2700 await self.deploy_kdus(
2701 logging_text=logging_text,
2702 nsr_id=nsr_id,
2703 nslcmop_id=nslcmop_id,
2704 db_vnfrs=db_vnfrs,
2705 db_vnfds=db_vnfds,
2706 task_instantiation_info=tasks_dict_info,
2707 )
2708
2709 stage[1] = "Getting VCA public key."
2710 # n2vc_redesign STEP 1 Get VCA public ssh-key
2711 # feature 1429. Add n2vc public key to needed VMs
2712 n2vc_key = self.n2vc.get_public_key()
2713 n2vc_key_list = [n2vc_key]
2714 if self.vca_config.public_key:
2715 n2vc_key_list.append(self.vca_config.public_key)
2716
2717 stage[1] = "Deploying NS at VIM."
2718 task_ro = asyncio.ensure_future(
2719 self.instantiate_RO(
2720 logging_text=logging_text,
2721 nsr_id=nsr_id,
2722 nsd=nsd,
2723 db_nsr=db_nsr,
2724 db_nslcmop=db_nslcmop,
2725 db_vnfrs=db_vnfrs,
2726 db_vnfds=db_vnfds,
2727 n2vc_key_list=n2vc_key_list,
2728 stage=stage,
2729 )
2730 )
2731 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2732 tasks_dict_info[task_ro] = "Deploying at VIM"
2733
2734 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2735 stage[1] = "Deploying Execution Environments."
2736 self.logger.debug(logging_text + stage[1])
2737
2738 # create namespace and certificate if any helm based EE is present in the NS
2739 if check_helm_ee_in_ns(db_vnfds):
2740 # TODO: create EE namespace
2741 # create TLS certificates
2742 await self.vca_map["helm-v3"].create_tls_certificate(
2743 secret_name="ee-tls-{}".format(nsr_id),
2744 dns_prefix="*",
2745 nsr_id=nsr_id,
2746 usage="server auth",
2747 )
2748
2749 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2750 for vnf_profile in get_vnf_profiles(nsd):
2751 vnfd_id = vnf_profile["vnfd-id"]
2752 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2753 member_vnf_index = str(vnf_profile["id"])
2754 db_vnfr = db_vnfrs[member_vnf_index]
2755 base_folder = vnfd["_admin"]["storage"]
2756 vdu_id = None
2757 vdu_index = 0
2758 vdu_name = None
2759 kdu_name = None
2760 kdu_index = None
2761
2762 # Get additional parameters
2763 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2764 if db_vnfr.get("additionalParamsForVnf"):
2765 deploy_params.update(
2766 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2767 )
2768
2769 descriptor_config = get_configuration(vnfd, vnfd["id"])
2770 if descriptor_config:
2771 self._deploy_n2vc(
2772 logging_text=logging_text
2773 + "member_vnf_index={} ".format(member_vnf_index),
2774 db_nsr=db_nsr,
2775 db_vnfr=db_vnfr,
2776 nslcmop_id=nslcmop_id,
2777 nsr_id=nsr_id,
2778 nsi_id=nsi_id,
2779 vnfd_id=vnfd_id,
2780 vdu_id=vdu_id,
2781 kdu_name=kdu_name,
2782 member_vnf_index=member_vnf_index,
2783 vdu_index=vdu_index,
2784 kdu_index=kdu_index,
2785 vdu_name=vdu_name,
2786 deploy_params=deploy_params,
2787 descriptor_config=descriptor_config,
2788 base_folder=base_folder,
2789 task_instantiation_info=tasks_dict_info,
2790 stage=stage,
2791 )
2792
2793 # Deploy charms for each VDU that supports one.
2794 for vdud in get_vdu_list(vnfd):
2795 vdu_id = vdud["id"]
2796 descriptor_config = get_configuration(vnfd, vdu_id)
2797 vdur = find_in_list(
2798 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2799 )
2800
2801 if vdur.get("additionalParams"):
2802 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2803 else:
2804 deploy_params_vdu = deploy_params
2805 deploy_params_vdu["OSM"] = get_osm_params(
2806 db_vnfr, vdu_id, vdu_count_index=0
2807 )
2808 vdud_count = get_number_of_instances(vnfd, vdu_id)
2809
2810 self.logger.debug("VDUD > {}".format(vdud))
2811 self.logger.debug(
2812 "Descriptor config > {}".format(descriptor_config)
2813 )
2814 if descriptor_config:
2815 vdu_name = None
2816 kdu_name = None
2817 kdu_index = None
2818 for vdu_index in range(vdud_count):
2819 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2820 self._deploy_n2vc(
2821 logging_text=logging_text
2822 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2823 member_vnf_index, vdu_id, vdu_index
2824 ),
2825 db_nsr=db_nsr,
2826 db_vnfr=db_vnfr,
2827 nslcmop_id=nslcmop_id,
2828 nsr_id=nsr_id,
2829 nsi_id=nsi_id,
2830 vnfd_id=vnfd_id,
2831 vdu_id=vdu_id,
2832 kdu_name=kdu_name,
2833 kdu_index=kdu_index,
2834 member_vnf_index=member_vnf_index,
2835 vdu_index=vdu_index,
2836 vdu_name=vdu_name,
2837 deploy_params=deploy_params_vdu,
2838 descriptor_config=descriptor_config,
2839 base_folder=base_folder,
2840 task_instantiation_info=tasks_dict_info,
2841 stage=stage,
2842 )
2843 for kdud in get_kdu_list(vnfd):
2844 kdu_name = kdud["name"]
2845 descriptor_config = get_configuration(vnfd, kdu_name)
2846 if descriptor_config:
2847 vdu_id = None
2848 vdu_index = 0
2849 vdu_name = None
2850 kdu_index, kdur = next(
2851 x
2852 for x in enumerate(db_vnfr["kdur"])
2853 if x[1]["kdu-name"] == kdu_name
2854 )
2855 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2856 if kdur.get("additionalParams"):
2857 deploy_params_kdu.update(
2858 parse_yaml_strings(kdur["additionalParams"].copy())
2859 )
2860
2861 self._deploy_n2vc(
2862 logging_text=logging_text,
2863 db_nsr=db_nsr,
2864 db_vnfr=db_vnfr,
2865 nslcmop_id=nslcmop_id,
2866 nsr_id=nsr_id,
2867 nsi_id=nsi_id,
2868 vnfd_id=vnfd_id,
2869 vdu_id=vdu_id,
2870 kdu_name=kdu_name,
2871 member_vnf_index=member_vnf_index,
2872 vdu_index=vdu_index,
2873 kdu_index=kdu_index,
2874 vdu_name=vdu_name,
2875 deploy_params=deploy_params_kdu,
2876 descriptor_config=descriptor_config,
2877 base_folder=base_folder,
2878 task_instantiation_info=tasks_dict_info,
2879 stage=stage,
2880 )
2881
2882 # Check if each vnf has exporter for metric collection if so update prometheus job records
2883 if "exporters-endpoints" in vnfd.get("df")[0]:
2884 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2885 self.logger.debug("exporter config :{}".format(exporter_config))
2886 artifact_path = "{}/{}/{}".format(
2887 base_folder["folder"],
2888 base_folder["pkg-dir"],
2889 "exporter-endpoint",
2890 )
2891 ee_id = None
2892 ee_config_descriptor = exporter_config
2893 vnfr_id = db_vnfr["id"]
2894 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2895 logging_text,
2896 nsr_id,
2897 vnfr_id,
2898 vdu_id=None,
2899 vdu_index=None,
2900 user=None,
2901 pub_key=None,
2902 )
2903 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2904 self.logger.debug("Artifact_path:{}".format(artifact_path))
2905 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2906 vdu_id_for_prom = None
2907 vdu_index_for_prom = None
2908 for x in get_iterable(db_vnfr, "vdur"):
2909 vdu_id_for_prom = x.get("vdu-id-ref")
2910 vdu_index_for_prom = x.get("count-index")
2911 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2912 ee_id=ee_id,
2913 artifact_path=artifact_path,
2914 ee_config_descriptor=ee_config_descriptor,
2915 vnfr_id=vnfr_id,
2916 nsr_id=nsr_id,
2917 target_ip=rw_mgmt_ip,
2918 element_type="VDU",
2919 vdu_id=vdu_id_for_prom,
2920 vdu_index=vdu_index_for_prom,
2921 )
2922
2923 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2924 if prometheus_jobs:
2925 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2926 self.update_db_2(
2927 "nsrs",
2928 nsr_id,
2929 db_nsr_update,
2930 )
2931
2932 for job in prometheus_jobs:
2933 self.db.set_one(
2934 "prometheus_jobs",
2935 {"job_name": job["job_name"]},
2936 job,
2937 upsert=True,
2938 fail_on_empty=False,
2939 )
2940
2941 # Check if this NS has a charm configuration
2942 descriptor_config = nsd.get("ns-configuration")
2943 if descriptor_config and descriptor_config.get("juju"):
2944 vnfd_id = None
2945 db_vnfr = None
2946 member_vnf_index = None
2947 vdu_id = None
2948 kdu_name = None
2949 kdu_index = None
2950 vdu_index = 0
2951 vdu_name = None
2952
2953 # Get additional parameters
2954 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2955 if db_nsr.get("additionalParamsForNs"):
2956 deploy_params.update(
2957 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2958 )
2959 base_folder = nsd["_admin"]["storage"]
2960 self._deploy_n2vc(
2961 logging_text=logging_text,
2962 db_nsr=db_nsr,
2963 db_vnfr=db_vnfr,
2964 nslcmop_id=nslcmop_id,
2965 nsr_id=nsr_id,
2966 nsi_id=nsi_id,
2967 vnfd_id=vnfd_id,
2968 vdu_id=vdu_id,
2969 kdu_name=kdu_name,
2970 member_vnf_index=member_vnf_index,
2971 vdu_index=vdu_index,
2972 kdu_index=kdu_index,
2973 vdu_name=vdu_name,
2974 deploy_params=deploy_params,
2975 descriptor_config=descriptor_config,
2976 base_folder=base_folder,
2977 task_instantiation_info=tasks_dict_info,
2978 stage=stage,
2979 )
2980
2981 # rest of staff will be done at finally
2982
2983 except (
2984 ROclient.ROClientException,
2985 DbException,
2986 LcmException,
2987 N2VCException,
2988 ) as e:
2989 self.logger.error(
2990 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2991 )
2992 exc = e
2993 except asyncio.CancelledError:
2994 self.logger.error(
2995 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2996 )
2997 exc = "Operation was cancelled"
2998 except Exception as e:
2999 exc = traceback.format_exc()
3000 self.logger.critical(
3001 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
3002 exc_info=True,
3003 )
3004 finally:
3005 if exc:
3006 error_list.append(str(exc))
3007 try:
3008 # wait for pending tasks
3009 if tasks_dict_info:
3010 stage[1] = "Waiting for instantiate pending tasks."
3011 self.logger.debug(logging_text + stage[1])
3012 error_list += await self._wait_for_tasks(
3013 logging_text,
3014 tasks_dict_info,
3015 timeout_ns_deploy,
3016 stage,
3017 nslcmop_id,
3018 nsr_id=nsr_id,
3019 )
3020 stage[1] = stage[2] = ""
3021 except asyncio.CancelledError:
3022 error_list.append("Cancelled")
3023 # TODO cancel all tasks
3024 except Exception as exc:
3025 error_list.append(str(exc))
3026
3027 # update operation-status
3028 db_nsr_update["operational-status"] = "running"
3029 # let's begin with VCA 'configured' status (later we can change it)
3030 db_nsr_update["config-status"] = "configured"
3031 for task, task_name in tasks_dict_info.items():
3032 if not task.done() or task.cancelled() or task.exception():
3033 if task_name.startswith(self.task_name_deploy_vca):
3034 # A N2VC task is pending
3035 db_nsr_update["config-status"] = "failed"
3036 else:
3037 # RO or KDU task is pending
3038 db_nsr_update["operational-status"] = "failed"
3039
3040 # update status at database
3041 if error_list:
3042 error_detail = ". ".join(error_list)
3043 self.logger.error(logging_text + error_detail)
3044 error_description_nslcmop = "{} Detail: {}".format(
3045 stage[0], error_detail
3046 )
3047 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3048 nslcmop_id, stage[0]
3049 )
3050
3051 db_nsr_update["detailed-status"] = (
3052 error_description_nsr + " Detail: " + error_detail
3053 )
3054 db_nslcmop_update["detailed-status"] = error_detail
3055 nslcmop_operation_state = "FAILED"
3056 ns_state = "BROKEN"
3057 else:
3058 error_detail = None
3059 error_description_nsr = error_description_nslcmop = None
3060 ns_state = "READY"
3061 db_nsr_update["detailed-status"] = "Done"
3062 db_nslcmop_update["detailed-status"] = "Done"
3063 nslcmop_operation_state = "COMPLETED"
3064 # Gather auto-healing and auto-scaling alerts for each vnfr
3065 healing_alerts = []
3066 scaling_alerts = []
3067 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3068 vnfd = next(
3069 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3070 )
3071 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3072 for alert in healing_alerts:
3073 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3074 self.db.create("alerts", alert)
3075
3076 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3077 for alert in scaling_alerts:
3078 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3079 self.db.create("alerts", alert)
3080
3081 if db_nsr:
3082 self._write_ns_status(
3083 nsr_id=nsr_id,
3084 ns_state=ns_state,
3085 current_operation="IDLE",
3086 current_operation_id=None,
3087 error_description=error_description_nsr,
3088 error_detail=error_detail,
3089 other_update=db_nsr_update,
3090 )
3091 self._write_op_status(
3092 op_id=nslcmop_id,
3093 stage="",
3094 error_message=error_description_nslcmop,
3095 operation_state=nslcmop_operation_state,
3096 other_update=db_nslcmop_update,
3097 )
3098
3099 if nslcmop_operation_state:
3100 try:
3101 await self.msg.aiowrite(
3102 "ns",
3103 "instantiated",
3104 {
3105 "nsr_id": nsr_id,
3106 "nslcmop_id": nslcmop_id,
3107 "operationState": nslcmop_operation_state,
3108 },
3109 )
3110 except Exception as e:
3111 self.logger.error(
3112 logging_text + "kafka_write notification Exception {}".format(e)
3113 )
3114
3115 self.logger.debug(logging_text + "Exit")
3116 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3117
3118 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3119 if vnfd_id not in cached_vnfds:
3120 cached_vnfds[vnfd_id] = self.db.get_one(
3121 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3122 )
3123 return cached_vnfds[vnfd_id]
3124
3125 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3126 if vnf_profile_id not in cached_vnfrs:
3127 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3128 "vnfrs",
3129 {
3130 "member-vnf-index-ref": vnf_profile_id,
3131 "nsr-id-ref": nsr_id,
3132 },
3133 )
3134 return cached_vnfrs[vnf_profile_id]
3135
3136 def _is_deployed_vca_in_relation(
3137 self, vca: DeployedVCA, relation: Relation
3138 ) -> bool:
3139 found = False
3140 for endpoint in (relation.provider, relation.requirer):
3141 if endpoint["kdu-resource-profile-id"]:
3142 continue
3143 found = (
3144 vca.vnf_profile_id == endpoint.vnf_profile_id
3145 and vca.vdu_profile_id == endpoint.vdu_profile_id
3146 and vca.execution_environment_ref == endpoint.execution_environment_ref
3147 )
3148 if found:
3149 break
3150 return found
3151
3152 def _update_ee_relation_data_with_implicit_data(
3153 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3154 ):
3155 ee_relation_data = safe_get_ee_relation(
3156 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3157 )
3158 ee_relation_level = EELevel.get_level(ee_relation_data)
3159 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3160 "execution-environment-ref"
3161 ]:
3162 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3163 vnfd_id = vnf_profile["vnfd-id"]
3164 project = nsd["_admin"]["projects_read"][0]
3165 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3166 entity_id = (
3167 vnfd_id
3168 if ee_relation_level == EELevel.VNF
3169 else ee_relation_data["vdu-profile-id"]
3170 )
3171 ee = get_juju_ee_ref(db_vnfd, entity_id)
3172 if not ee:
3173 raise Exception(
3174 f"not execution environments found for ee_relation {ee_relation_data}"
3175 )
3176 ee_relation_data["execution-environment-ref"] = ee["id"]
3177 return ee_relation_data
3178
3179 def _get_ns_relations(
3180 self,
3181 nsr_id: str,
3182 nsd: Dict[str, Any],
3183 vca: DeployedVCA,
3184 cached_vnfds: Dict[str, Any],
3185 ) -> List[Relation]:
3186 relations = []
3187 db_ns_relations = get_ns_configuration_relation_list(nsd)
3188 for r in db_ns_relations:
3189 provider_dict = None
3190 requirer_dict = None
3191 if all(key in r for key in ("provider", "requirer")):
3192 provider_dict = r["provider"]
3193 requirer_dict = r["requirer"]
3194 elif "entities" in r:
3195 provider_id = r["entities"][0]["id"]
3196 provider_dict = {
3197 "nsr-id": nsr_id,
3198 "endpoint": r["entities"][0]["endpoint"],
3199 }
3200 if provider_id != nsd["id"]:
3201 provider_dict["vnf-profile-id"] = provider_id
3202 requirer_id = r["entities"][1]["id"]
3203 requirer_dict = {
3204 "nsr-id": nsr_id,
3205 "endpoint": r["entities"][1]["endpoint"],
3206 }
3207 if requirer_id != nsd["id"]:
3208 requirer_dict["vnf-profile-id"] = requirer_id
3209 else:
3210 raise Exception(
3211 "provider/requirer or entities must be included in the relation."
3212 )
3213 relation_provider = self._update_ee_relation_data_with_implicit_data(
3214 nsr_id, nsd, provider_dict, cached_vnfds
3215 )
3216 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3217 nsr_id, nsd, requirer_dict, cached_vnfds
3218 )
3219 provider = EERelation(relation_provider)
3220 requirer = EERelation(relation_requirer)
3221 relation = Relation(r["name"], provider, requirer)
3222 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3223 if vca_in_relation:
3224 relations.append(relation)
3225 return relations
3226
3227 def _get_vnf_relations(
3228 self,
3229 nsr_id: str,
3230 nsd: Dict[str, Any],
3231 vca: DeployedVCA,
3232 cached_vnfds: Dict[str, Any],
3233 ) -> List[Relation]:
3234 relations = []
3235 if vca.target_element == "ns":
3236 self.logger.debug("VCA is a NS charm, not a VNF.")
3237 return relations
3238 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3239 vnf_profile_id = vnf_profile["id"]
3240 vnfd_id = vnf_profile["vnfd-id"]
3241 project = nsd["_admin"]["projects_read"][0]
3242 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3243 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3244 for r in db_vnf_relations:
3245 provider_dict = None
3246 requirer_dict = None
3247 if all(key in r for key in ("provider", "requirer")):
3248 provider_dict = r["provider"]
3249 requirer_dict = r["requirer"]
3250 elif "entities" in r:
3251 provider_id = r["entities"][0]["id"]
3252 provider_dict = {
3253 "nsr-id": nsr_id,
3254 "vnf-profile-id": vnf_profile_id,
3255 "endpoint": r["entities"][0]["endpoint"],
3256 }
3257 if provider_id != vnfd_id:
3258 provider_dict["vdu-profile-id"] = provider_id
3259 requirer_id = r["entities"][1]["id"]
3260 requirer_dict = {
3261 "nsr-id": nsr_id,
3262 "vnf-profile-id": vnf_profile_id,
3263 "endpoint": r["entities"][1]["endpoint"],
3264 }
3265 if requirer_id != vnfd_id:
3266 requirer_dict["vdu-profile-id"] = requirer_id
3267 else:
3268 raise Exception(
3269 "provider/requirer or entities must be included in the relation."
3270 )
3271 relation_provider = self._update_ee_relation_data_with_implicit_data(
3272 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3273 )
3274 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3275 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3276 )
3277 provider = EERelation(relation_provider)
3278 requirer = EERelation(relation_requirer)
3279 relation = Relation(r["name"], provider, requirer)
3280 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3281 if vca_in_relation:
3282 relations.append(relation)
3283 return relations
3284
3285 def _get_kdu_resource_data(
3286 self,
3287 ee_relation: EERelation,
3288 db_nsr: Dict[str, Any],
3289 cached_vnfds: Dict[str, Any],
3290 ) -> DeployedK8sResource:
3291 nsd = get_nsd(db_nsr)
3292 vnf_profiles = get_vnf_profiles(nsd)
3293 vnfd_id = find_in_list(
3294 vnf_profiles,
3295 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3296 )["vnfd-id"]
3297 project = nsd["_admin"]["projects_read"][0]
3298 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3299 kdu_resource_profile = get_kdu_resource_profile(
3300 db_vnfd, ee_relation.kdu_resource_profile_id
3301 )
3302 kdu_name = kdu_resource_profile["kdu-name"]
3303 deployed_kdu, _ = get_deployed_kdu(
3304 db_nsr.get("_admin", ()).get("deployed", ()),
3305 kdu_name,
3306 ee_relation.vnf_profile_id,
3307 )
3308 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3309 return deployed_kdu
3310
3311 def _get_deployed_component(
3312 self,
3313 ee_relation: EERelation,
3314 db_nsr: Dict[str, Any],
3315 cached_vnfds: Dict[str, Any],
3316 ) -> DeployedComponent:
3317 nsr_id = db_nsr["_id"]
3318 deployed_component = None
3319 ee_level = EELevel.get_level(ee_relation)
3320 if ee_level == EELevel.NS:
3321 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3322 if vca:
3323 deployed_component = DeployedVCA(nsr_id, vca)
3324 elif ee_level == EELevel.VNF:
3325 vca = get_deployed_vca(
3326 db_nsr,
3327 {
3328 "vdu_id": None,
3329 "member-vnf-index": ee_relation.vnf_profile_id,
3330 "ee_descriptor_id": ee_relation.execution_environment_ref,
3331 },
3332 )
3333 if vca:
3334 deployed_component = DeployedVCA(nsr_id, vca)
3335 elif ee_level == EELevel.VDU:
3336 vca = get_deployed_vca(
3337 db_nsr,
3338 {
3339 "vdu_id": ee_relation.vdu_profile_id,
3340 "member-vnf-index": ee_relation.vnf_profile_id,
3341 "ee_descriptor_id": ee_relation.execution_environment_ref,
3342 },
3343 )
3344 if vca:
3345 deployed_component = DeployedVCA(nsr_id, vca)
3346 elif ee_level == EELevel.KDU:
3347 kdu_resource_data = self._get_kdu_resource_data(
3348 ee_relation, db_nsr, cached_vnfds
3349 )
3350 if kdu_resource_data:
3351 deployed_component = DeployedK8sResource(kdu_resource_data)
3352 return deployed_component
3353
3354 async def _add_relation(
3355 self,
3356 relation: Relation,
3357 vca_type: str,
3358 db_nsr: Dict[str, Any],
3359 cached_vnfds: Dict[str, Any],
3360 cached_vnfrs: Dict[str, Any],
3361 ) -> bool:
3362 deployed_provider = self._get_deployed_component(
3363 relation.provider, db_nsr, cached_vnfds
3364 )
3365 deployed_requirer = self._get_deployed_component(
3366 relation.requirer, db_nsr, cached_vnfds
3367 )
3368 if (
3369 deployed_provider
3370 and deployed_requirer
3371 and deployed_provider.config_sw_installed
3372 and deployed_requirer.config_sw_installed
3373 ):
3374 provider_db_vnfr = (
3375 self._get_vnfr(
3376 relation.provider.nsr_id,
3377 relation.provider.vnf_profile_id,
3378 cached_vnfrs,
3379 )
3380 if relation.provider.vnf_profile_id
3381 else None
3382 )
3383 requirer_db_vnfr = (
3384 self._get_vnfr(
3385 relation.requirer.nsr_id,
3386 relation.requirer.vnf_profile_id,
3387 cached_vnfrs,
3388 )
3389 if relation.requirer.vnf_profile_id
3390 else None
3391 )
3392 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3393 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3394 provider_relation_endpoint = RelationEndpoint(
3395 deployed_provider.ee_id,
3396 provider_vca_id,
3397 relation.provider.endpoint,
3398 )
3399 requirer_relation_endpoint = RelationEndpoint(
3400 deployed_requirer.ee_id,
3401 requirer_vca_id,
3402 relation.requirer.endpoint,
3403 )
3404 try:
3405 await self.vca_map[vca_type].add_relation(
3406 provider=provider_relation_endpoint,
3407 requirer=requirer_relation_endpoint,
3408 )
3409 except N2VCException as exception:
3410 self.logger.error(exception)
3411 raise LcmException(exception)
3412 return True
3413 return False
3414
3415 async def _add_vca_relations(
3416 self,
3417 logging_text,
3418 nsr_id,
3419 vca_type: str,
3420 vca_index: int,
3421 timeout: int = 3600,
3422 ) -> bool:
3423 # steps:
3424 # 1. find all relations for this VCA
3425 # 2. wait for other peers related
3426 # 3. add relations
3427
3428 try:
3429 # STEP 1: find all relations for this VCA
3430
3431 # read nsr record
3432 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3433 nsd = get_nsd(db_nsr)
3434
3435 # this VCA data
3436 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3437 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3438
3439 cached_vnfds = {}
3440 cached_vnfrs = {}
3441 relations = []
3442 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3443 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3444
3445 # if no relations, terminate
3446 if not relations:
3447 self.logger.debug(logging_text + " No relations")
3448 return True
3449
3450 self.logger.debug(logging_text + " adding relations {}".format(relations))
3451
3452 # add all relations
3453 start = time()
3454 while True:
3455 # check timeout
3456 now = time()
3457 if now - start >= timeout:
3458 self.logger.error(logging_text + " : timeout adding relations")
3459 return False
3460
3461 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3462 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3463
3464 # for each relation, find the VCA's related
3465 for relation in relations.copy():
3466 added = await self._add_relation(
3467 relation,
3468 vca_type,
3469 db_nsr,
3470 cached_vnfds,
3471 cached_vnfrs,
3472 )
3473 if added:
3474 relations.remove(relation)
3475
3476 if not relations:
3477 self.logger.debug("Relations added")
3478 break
3479 await asyncio.sleep(5.0)
3480
3481 return True
3482
3483 except Exception as e:
3484 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3485 return False
3486
3487 async def _install_kdu(
3488 self,
3489 nsr_id: str,
3490 nsr_db_path: str,
3491 vnfr_data: dict,
3492 kdu_index: int,
3493 kdud: dict,
3494 vnfd: dict,
3495 k8s_instance_info: dict,
3496 k8params: dict = None,
3497 timeout: int = 600,
3498 vca_id: str = None,
3499 ):
3500 try:
3501 k8sclustertype = k8s_instance_info["k8scluster-type"]
3502 # Instantiate kdu
3503 db_dict_install = {
3504 "collection": "nsrs",
3505 "filter": {"_id": nsr_id},
3506 "path": nsr_db_path,
3507 }
3508
3509 if k8s_instance_info.get("kdu-deployment-name"):
3510 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3511 else:
3512 kdu_instance = self.k8scluster_map[
3513 k8sclustertype
3514 ].generate_kdu_instance_name(
3515 db_dict=db_dict_install,
3516 kdu_model=k8s_instance_info["kdu-model"],
3517 kdu_name=k8s_instance_info["kdu-name"],
3518 )
3519
3520 # Update the nsrs table with the kdu-instance value
3521 self.update_db_2(
3522 item="nsrs",
3523 _id=nsr_id,
3524 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3525 )
3526
3527 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3528 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3529 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3530 # namespace, this first verification could be removed, and the next step would be done for any kind
3531 # of KNF.
3532 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3533 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3534 if k8sclustertype in ("juju", "juju-bundle"):
3535 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3536 # that the user passed a namespace which he wants its KDU to be deployed in)
3537 if (
3538 self.db.count(
3539 table="nsrs",
3540 q_filter={
3541 "_id": nsr_id,
3542 "_admin.projects_write": k8s_instance_info["namespace"],
3543 "_admin.projects_read": k8s_instance_info["namespace"],
3544 },
3545 )
3546 > 0
3547 ):
3548 self.logger.debug(
3549 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3550 )
3551 self.update_db_2(
3552 item="nsrs",
3553 _id=nsr_id,
3554 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3555 )
3556 k8s_instance_info["namespace"] = kdu_instance
3557
3558 await self.k8scluster_map[k8sclustertype].install(
3559 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3560 kdu_model=k8s_instance_info["kdu-model"],
3561 atomic=True,
3562 params=k8params,
3563 db_dict=db_dict_install,
3564 timeout=timeout,
3565 kdu_name=k8s_instance_info["kdu-name"],
3566 namespace=k8s_instance_info["namespace"],
3567 kdu_instance=kdu_instance,
3568 vca_id=vca_id,
3569 )
3570
3571 # Obtain services to obtain management service ip
3572 services = await self.k8scluster_map[k8sclustertype].get_services(
3573 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3574 kdu_instance=kdu_instance,
3575 namespace=k8s_instance_info["namespace"],
3576 )
3577
3578 # Obtain management service info (if exists)
3579 vnfr_update_dict = {}
3580 kdu_config = get_configuration(vnfd, kdud["name"])
3581 if kdu_config:
3582 target_ee_list = kdu_config.get("execution-environment-list", [])
3583 else:
3584 target_ee_list = []
3585
3586 if services:
3587 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3588 mgmt_services = [
3589 service
3590 for service in kdud.get("service", [])
3591 if service.get("mgmt-service")
3592 ]
3593 for mgmt_service in mgmt_services:
3594 for service in services:
3595 if service["name"].startswith(mgmt_service["name"]):
3596 # Mgmt service found, Obtain service ip
3597 ip = service.get("external_ip", service.get("cluster_ip"))
3598 if isinstance(ip, list) and len(ip) == 1:
3599 ip = ip[0]
3600
3601 vnfr_update_dict[
3602 "kdur.{}.ip-address".format(kdu_index)
3603 ] = ip
3604
3605 # Check if must update also mgmt ip at the vnf
3606 service_external_cp = mgmt_service.get(
3607 "external-connection-point-ref"
3608 )
3609 if service_external_cp:
3610 if (
3611 deep_get(vnfd, ("mgmt-interface", "cp"))
3612 == service_external_cp
3613 ):
3614 vnfr_update_dict["ip-address"] = ip
3615
3616 if find_in_list(
3617 target_ee_list,
3618 lambda ee: ee.get(
3619 "external-connection-point-ref", ""
3620 )
3621 == service_external_cp,
3622 ):
3623 vnfr_update_dict[
3624 "kdur.{}.ip-address".format(kdu_index)
3625 ] = ip
3626 break
3627 else:
3628 self.logger.warn(
3629 "Mgmt service name: {} not found".format(
3630 mgmt_service["name"]
3631 )
3632 )
3633
3634 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3635 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3636
3637 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3638 if (
3639 kdu_config
3640 and kdu_config.get("initial-config-primitive")
3641 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3642 ):
3643 initial_config_primitive_list = kdu_config.get(
3644 "initial-config-primitive"
3645 )
3646 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3647
3648 for initial_config_primitive in initial_config_primitive_list:
3649 primitive_params_ = self._map_primitive_params(
3650 initial_config_primitive, {}, {}
3651 )
3652
3653 await asyncio.wait_for(
3654 self.k8scluster_map[k8sclustertype].exec_primitive(
3655 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3656 kdu_instance=kdu_instance,
3657 primitive_name=initial_config_primitive["name"],
3658 params=primitive_params_,
3659 db_dict=db_dict_install,
3660 vca_id=vca_id,
3661 ),
3662 timeout=timeout,
3663 )
3664
3665 except Exception as e:
3666 # Prepare update db with error and raise exception
3667 try:
3668 self.update_db_2(
3669 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3670 )
3671 self.update_db_2(
3672 "vnfrs",
3673 vnfr_data.get("_id"),
3674 {"kdur.{}.status".format(kdu_index): "ERROR"},
3675 )
3676 except Exception:
3677 # ignore to keep original exception
3678 pass
3679 # reraise original error
3680 raise
3681
3682 return kdu_instance
3683
3684 async def deploy_kdus(
3685 self,
3686 logging_text,
3687 nsr_id,
3688 nslcmop_id,
3689 db_vnfrs,
3690 db_vnfds,
3691 task_instantiation_info,
3692 ):
3693 # Launch kdus if present in the descriptor
3694
3695 k8scluster_id_2_uuic = {
3696 "helm-chart-v3": {},
3697 "helm-chart": {},
3698 "juju-bundle": {},
3699 }
3700
3701 async def _get_cluster_id(cluster_id, cluster_type):
3702 nonlocal k8scluster_id_2_uuic
3703 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3704 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3705
3706 # check if K8scluster is creating and wait look if previous tasks in process
3707 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3708 "k8scluster", cluster_id
3709 )
3710 if task_dependency:
3711 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3712 task_name, cluster_id
3713 )
3714 self.logger.debug(logging_text + text)
3715 await asyncio.wait(task_dependency, timeout=3600)
3716
3717 db_k8scluster = self.db.get_one(
3718 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3719 )
3720 if not db_k8scluster:
3721 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3722
3723 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3724 if not k8s_id:
3725 if cluster_type == "helm-chart-v3":
3726 try:
3727 # backward compatibility for existing clusters that have not been initialized for helm v3
3728 k8s_credentials = yaml.safe_dump(
3729 db_k8scluster.get("credentials")
3730 )
3731 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3732 k8s_credentials, reuse_cluster_uuid=cluster_id
3733 )
3734 db_k8scluster_update = {}
3735 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3736 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3737 db_k8scluster_update[
3738 "_admin.helm-chart-v3.created"
3739 ] = uninstall_sw
3740 db_k8scluster_update[
3741 "_admin.helm-chart-v3.operationalState"
3742 ] = "ENABLED"
3743 self.update_db_2(
3744 "k8sclusters", cluster_id, db_k8scluster_update
3745 )
3746 except Exception as e:
3747 self.logger.error(
3748 logging_text
3749 + "error initializing helm-v3 cluster: {}".format(str(e))
3750 )
3751 raise LcmException(
3752 "K8s cluster '{}' has not been initialized for '{}'".format(
3753 cluster_id, cluster_type
3754 )
3755 )
3756 else:
3757 raise LcmException(
3758 "K8s cluster '{}' has not been initialized for '{}'".format(
3759 cluster_id, cluster_type
3760 )
3761 )
3762 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3763 return k8s_id
3764
3765 logging_text += "Deploy kdus: "
3766 step = ""
3767 try:
3768 db_nsr_update = {"_admin.deployed.K8s": []}
3769 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3770
3771 index = 0
3772 updated_cluster_list = []
3773 updated_v3_cluster_list = []
3774
3775 for vnfr_data in db_vnfrs.values():
3776 vca_id = self.get_vca_id(vnfr_data, {})
3777 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3778 # Step 0: Prepare and set parameters
3779 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3780 vnfd_id = vnfr_data.get("vnfd-id")
3781 vnfd_with_id = find_in_list(
3782 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3783 )
3784 kdud = next(
3785 kdud
3786 for kdud in vnfd_with_id["kdu"]
3787 if kdud["name"] == kdur["kdu-name"]
3788 )
3789 namespace = kdur.get("k8s-namespace")
3790 kdu_deployment_name = kdur.get("kdu-deployment-name")
3791 if kdur.get("helm-chart"):
3792 kdumodel = kdur["helm-chart"]
3793 # Default version: helm3, if helm-version is v2 assign v2
3794 k8sclustertype = "helm-chart-v3"
3795 self.logger.debug("kdur: {}".format(kdur))
3796 if (
3797 kdur.get("helm-version")
3798 and kdur.get("helm-version") == "v2"
3799 ):
3800 k8sclustertype = "helm-chart"
3801 elif kdur.get("juju-bundle"):
3802 kdumodel = kdur["juju-bundle"]
3803 k8sclustertype = "juju-bundle"
3804 else:
3805 raise LcmException(
3806 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3807 "juju-bundle. Maybe an old NBI version is running".format(
3808 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3809 )
3810 )
3811 # check if kdumodel is a file and exists
3812 try:
3813 vnfd_with_id = find_in_list(
3814 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3815 )
3816 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3817 if storage: # may be not present if vnfd has not artifacts
3818 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3819 if storage["pkg-dir"]:
3820 filename = "{}/{}/{}s/{}".format(
3821 storage["folder"],
3822 storage["pkg-dir"],
3823 k8sclustertype,
3824 kdumodel,
3825 )
3826 else:
3827 filename = "{}/Scripts/{}s/{}".format(
3828 storage["folder"],
3829 k8sclustertype,
3830 kdumodel,
3831 )
3832 if self.fs.file_exists(
3833 filename, mode="file"
3834 ) or self.fs.file_exists(filename, mode="dir"):
3835 kdumodel = self.fs.path + filename
3836 except (asyncio.TimeoutError, asyncio.CancelledError):
3837 raise
3838 except Exception: # it is not a file
3839 pass
3840
3841 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3842 step = "Synchronize repos for k8s cluster '{}'".format(
3843 k8s_cluster_id
3844 )
3845 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3846
3847 # Synchronize repos
3848 if (
3849 k8sclustertype == "helm-chart"
3850 and cluster_uuid not in updated_cluster_list
3851 ) or (
3852 k8sclustertype == "helm-chart-v3"
3853 and cluster_uuid not in updated_v3_cluster_list
3854 ):
3855 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3856 self.k8scluster_map[k8sclustertype].synchronize_repos(
3857 cluster_uuid=cluster_uuid
3858 )
3859 )
3860 if del_repo_list or added_repo_dict:
3861 if k8sclustertype == "helm-chart":
3862 unset = {
3863 "_admin.helm_charts_added." + item: None
3864 for item in del_repo_list
3865 }
3866 updated = {
3867 "_admin.helm_charts_added." + item: name
3868 for item, name in added_repo_dict.items()
3869 }
3870 updated_cluster_list.append(cluster_uuid)
3871 elif k8sclustertype == "helm-chart-v3":
3872 unset = {
3873 "_admin.helm_charts_v3_added." + item: None
3874 for item in del_repo_list
3875 }
3876 updated = {
3877 "_admin.helm_charts_v3_added." + item: name
3878 for item, name in added_repo_dict.items()
3879 }
3880 updated_v3_cluster_list.append(cluster_uuid)
3881 self.logger.debug(
3882 logging_text + "repos synchronized on k8s cluster "
3883 "'{}' to_delete: {}, to_add: {}".format(
3884 k8s_cluster_id, del_repo_list, added_repo_dict
3885 )
3886 )
3887 self.db.set_one(
3888 "k8sclusters",
3889 {"_id": k8s_cluster_id},
3890 updated,
3891 unset=unset,
3892 )
3893
3894 # Instantiate kdu
3895 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3896 vnfr_data["member-vnf-index-ref"],
3897 kdur["kdu-name"],
3898 k8s_cluster_id,
3899 )
3900 k8s_instance_info = {
3901 "kdu-instance": None,
3902 "k8scluster-uuid": cluster_uuid,
3903 "k8scluster-type": k8sclustertype,
3904 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3905 "kdu-name": kdur["kdu-name"],
3906 "kdu-model": kdumodel,
3907 "namespace": namespace,
3908 "kdu-deployment-name": kdu_deployment_name,
3909 }
3910 db_path = "_admin.deployed.K8s.{}".format(index)
3911 db_nsr_update[db_path] = k8s_instance_info
3912 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3913 vnfd_with_id = find_in_list(
3914 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3915 )
3916 task = asyncio.ensure_future(
3917 self._install_kdu(
3918 nsr_id,
3919 db_path,
3920 vnfr_data,
3921 kdu_index,
3922 kdud,
3923 vnfd_with_id,
3924 k8s_instance_info,
3925 k8params=desc_params,
3926 timeout=1800,
3927 vca_id=vca_id,
3928 )
3929 )
3930 self.lcm_tasks.register(
3931 "ns",
3932 nsr_id,
3933 nslcmop_id,
3934 "instantiate_KDU-{}".format(index),
3935 task,
3936 )
3937 task_instantiation_info[task] = "Deploying KDU {}".format(
3938 kdur["kdu-name"]
3939 )
3940
3941 index += 1
3942
3943 except (LcmException, asyncio.CancelledError):
3944 raise
3945 except Exception as e:
3946 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3947 if isinstance(e, (N2VCException, DbException)):
3948 self.logger.error(logging_text + msg)
3949 else:
3950 self.logger.critical(logging_text + msg, exc_info=True)
3951 raise LcmException(msg)
3952 finally:
3953 if db_nsr_update:
3954 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3955
3956 def _deploy_n2vc(
3957 self,
3958 logging_text,
3959 db_nsr,
3960 db_vnfr,
3961 nslcmop_id,
3962 nsr_id,
3963 nsi_id,
3964 vnfd_id,
3965 vdu_id,
3966 kdu_name,
3967 member_vnf_index,
3968 vdu_index,
3969 kdu_index,
3970 vdu_name,
3971 deploy_params,
3972 descriptor_config,
3973 base_folder,
3974 task_instantiation_info,
3975 stage,
3976 ):
3977 # launch instantiate_N2VC in a asyncio task and register task object
3978 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3979 # if not found, create one entry and update database
3980 # fill db_nsr._admin.deployed.VCA.<index>
3981
3982 self.logger.debug(
3983 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3984 )
3985
3986 charm_name = ""
3987 get_charm_name = False
3988 if "execution-environment-list" in descriptor_config:
3989 ee_list = descriptor_config.get("execution-environment-list", [])
3990 elif "juju" in descriptor_config:
3991 ee_list = [descriptor_config] # ns charms
3992 if "execution-environment-list" not in descriptor_config:
3993 # charm name is only required for ns charms
3994 get_charm_name = True
3995 else: # other types as script are not supported
3996 ee_list = []
3997
3998 for ee_item in ee_list:
3999 self.logger.debug(
4000 logging_text
4001 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4002 ee_item.get("juju"), ee_item.get("helm-chart")
4003 )
4004 )
4005 ee_descriptor_id = ee_item.get("id")
4006 if ee_item.get("juju"):
4007 vca_name = ee_item["juju"].get("charm")
4008 if get_charm_name:
4009 charm_name = self.find_charm_name(db_nsr, str(vca_name))
4010 vca_type = (
4011 "lxc_proxy_charm"
4012 if ee_item["juju"].get("charm") is not None
4013 else "native_charm"
4014 )
4015 if ee_item["juju"].get("cloud") == "k8s":
4016 vca_type = "k8s_proxy_charm"
4017 elif ee_item["juju"].get("proxy") is False:
4018 vca_type = "native_charm"
4019 elif ee_item.get("helm-chart"):
4020 vca_name = ee_item["helm-chart"]
4021 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
4022 vca_type = "helm"
4023 else:
4024 vca_type = "helm-v3"
4025 else:
4026 self.logger.debug(
4027 logging_text + "skipping non juju neither charm configuration"
4028 )
4029 continue
4030
4031 vca_index = -1
4032 for vca_index, vca_deployed in enumerate(
4033 db_nsr["_admin"]["deployed"]["VCA"]
4034 ):
4035 if not vca_deployed:
4036 continue
4037 if (
4038 vca_deployed.get("member-vnf-index") == member_vnf_index
4039 and vca_deployed.get("vdu_id") == vdu_id
4040 and vca_deployed.get("kdu_name") == kdu_name
4041 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4042 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4043 ):
4044 break
4045 else:
4046 # not found, create one.
4047 target = (
4048 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4049 )
4050 if vdu_id:
4051 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4052 elif kdu_name:
4053 target += "/kdu/{}".format(kdu_name)
4054 vca_deployed = {
4055 "target_element": target,
4056 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4057 "member-vnf-index": member_vnf_index,
4058 "vdu_id": vdu_id,
4059 "kdu_name": kdu_name,
4060 "vdu_count_index": vdu_index,
4061 "operational-status": "init", # TODO revise
4062 "detailed-status": "", # TODO revise
4063 "step": "initial-deploy", # TODO revise
4064 "vnfd_id": vnfd_id,
4065 "vdu_name": vdu_name,
4066 "type": vca_type,
4067 "ee_descriptor_id": ee_descriptor_id,
4068 "charm_name": charm_name,
4069 }
4070 vca_index += 1
4071
4072 # create VCA and configurationStatus in db
4073 db_dict = {
4074 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4075 "configurationStatus.{}".format(vca_index): dict(),
4076 }
4077 self.update_db_2("nsrs", nsr_id, db_dict)
4078
4079 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4080
4081 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4082 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4083 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4084
4085 # Launch task
4086 task_n2vc = asyncio.ensure_future(
4087 self.instantiate_N2VC(
4088 logging_text=logging_text,
4089 vca_index=vca_index,
4090 nsi_id=nsi_id,
4091 db_nsr=db_nsr,
4092 db_vnfr=db_vnfr,
4093 vdu_id=vdu_id,
4094 kdu_name=kdu_name,
4095 vdu_index=vdu_index,
4096 kdu_index=kdu_index,
4097 deploy_params=deploy_params,
4098 config_descriptor=descriptor_config,
4099 base_folder=base_folder,
4100 nslcmop_id=nslcmop_id,
4101 stage=stage,
4102 vca_type=vca_type,
4103 vca_name=vca_name,
4104 ee_config_descriptor=ee_item,
4105 )
4106 )
4107 self.lcm_tasks.register(
4108 "ns",
4109 nsr_id,
4110 nslcmop_id,
4111 "instantiate_N2VC-{}".format(vca_index),
4112 task_n2vc,
4113 )
4114 task_instantiation_info[
4115 task_n2vc
4116 ] = self.task_name_deploy_vca + " {}.{}".format(
4117 member_vnf_index or "", vdu_id or ""
4118 )
4119
4120 @staticmethod
4121 def _create_nslcmop(nsr_id, operation, params):
4122 """
4123 Creates a ns-lcm-opp content to be stored at database.
4124 :param nsr_id: internal id of the instance
4125 :param operation: instantiate, terminate, scale, action, ...
4126 :param params: user parameters for the operation
4127 :return: dictionary following SOL005 format
4128 """
4129 # Raise exception if invalid arguments
4130 if not (nsr_id and operation and params):
4131 raise LcmException(
4132 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4133 )
4134 now = time()
4135 _id = str(uuid4())
4136 nslcmop = {
4137 "id": _id,
4138 "_id": _id,
4139 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4140 "operationState": "PROCESSING",
4141 "statusEnteredTime": now,
4142 "nsInstanceId": nsr_id,
4143 "lcmOperationType": operation,
4144 "startTime": now,
4145 "isAutomaticInvocation": False,
4146 "operationParams": params,
4147 "isCancelPending": False,
4148 "links": {
4149 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4150 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4151 },
4152 }
4153 return nslcmop
4154
4155 def _format_additional_params(self, params):
4156 params = params or {}
4157 for key, value in params.items():
4158 if str(value).startswith("!!yaml "):
4159 params[key] = yaml.safe_load(value[7:])
4160 return params
4161
4162 def _get_terminate_primitive_params(self, seq, vnf_index):
4163 primitive = seq.get("name")
4164 primitive_params = {}
4165 params = {
4166 "member_vnf_index": vnf_index,
4167 "primitive": primitive,
4168 "primitive_params": primitive_params,
4169 }
4170 desc_params = {}
4171 return self._map_primitive_params(seq, params, desc_params)
4172
4173 # sub-operations
4174
4175 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4176 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4177 if op.get("operationState") == "COMPLETED":
4178 # b. Skip sub-operation
4179 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4180 return self.SUBOPERATION_STATUS_SKIP
4181 else:
4182 # c. retry executing sub-operation
4183 # The sub-operation exists, and operationState != 'COMPLETED'
4184 # Update operationState = 'PROCESSING' to indicate a retry.
4185 operationState = "PROCESSING"
4186 detailed_status = "In progress"
4187 self._update_suboperation_status(
4188 db_nslcmop, op_index, operationState, detailed_status
4189 )
4190 # Return the sub-operation index
4191 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4192 # with arguments extracted from the sub-operation
4193 return op_index
4194
4195 # Find a sub-operation where all keys in a matching dictionary must match
4196 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4197 def _find_suboperation(self, db_nslcmop, match):
4198 if db_nslcmop and match:
4199 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4200 for i, op in enumerate(op_list):
4201 if all(op.get(k) == match[k] for k in match):
4202 return i
4203 return self.SUBOPERATION_STATUS_NOT_FOUND
4204
4205 # Update status for a sub-operation given its index
4206 def _update_suboperation_status(
4207 self, db_nslcmop, op_index, operationState, detailed_status
4208 ):
4209 # Update DB for HA tasks
4210 q_filter = {"_id": db_nslcmop["_id"]}
4211 update_dict = {
4212 "_admin.operations.{}.operationState".format(op_index): operationState,
4213 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4214 }
4215 self.db.set_one(
4216 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4217 )
4218
4219 # Add sub-operation, return the index of the added sub-operation
4220 # Optionally, set operationState, detailed-status, and operationType
4221 # Status and type are currently set for 'scale' sub-operations:
4222 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4223 # 'detailed-status' : status message
4224 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4225 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4226 def _add_suboperation(
4227 self,
4228 db_nslcmop,
4229 vnf_index,
4230 vdu_id,
4231 vdu_count_index,
4232 vdu_name,
4233 primitive,
4234 mapped_primitive_params,
4235 operationState=None,
4236 detailed_status=None,
4237 operationType=None,
4238 RO_nsr_id=None,
4239 RO_scaling_info=None,
4240 ):
4241 if not db_nslcmop:
4242 return self.SUBOPERATION_STATUS_NOT_FOUND
4243 # Get the "_admin.operations" list, if it exists
4244 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4245 op_list = db_nslcmop_admin.get("operations")
4246 # Create or append to the "_admin.operations" list
4247 new_op = {
4248 "member_vnf_index": vnf_index,
4249 "vdu_id": vdu_id,
4250 "vdu_count_index": vdu_count_index,
4251 "primitive": primitive,
4252 "primitive_params": mapped_primitive_params,
4253 }
4254 if operationState:
4255 new_op["operationState"] = operationState
4256 if detailed_status:
4257 new_op["detailed-status"] = detailed_status
4258 if operationType:
4259 new_op["lcmOperationType"] = operationType
4260 if RO_nsr_id:
4261 new_op["RO_nsr_id"] = RO_nsr_id
4262 if RO_scaling_info:
4263 new_op["RO_scaling_info"] = RO_scaling_info
4264 if not op_list:
4265 # No existing operations, create key 'operations' with current operation as first list element
4266 db_nslcmop_admin.update({"operations": [new_op]})
4267 op_list = db_nslcmop_admin.get("operations")
4268 else:
4269 # Existing operations, append operation to list
4270 op_list.append(new_op)
4271
4272 db_nslcmop_update = {"_admin.operations": op_list}
4273 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4274 op_index = len(op_list) - 1
4275 return op_index
4276
4277 # Helper methods for scale() sub-operations
4278
4279 # pre-scale/post-scale:
4280 # Check for 3 different cases:
4281 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4282 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4283 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4284 def _check_or_add_scale_suboperation(
4285 self,
4286 db_nslcmop,
4287 vnf_index,
4288 vnf_config_primitive,
4289 primitive_params,
4290 operationType,
4291 RO_nsr_id=None,
4292 RO_scaling_info=None,
4293 ):
4294 # Find this sub-operation
4295 if RO_nsr_id and RO_scaling_info:
4296 operationType = "SCALE-RO"
4297 match = {
4298 "member_vnf_index": vnf_index,
4299 "RO_nsr_id": RO_nsr_id,
4300 "RO_scaling_info": RO_scaling_info,
4301 }
4302 else:
4303 match = {
4304 "member_vnf_index": vnf_index,
4305 "primitive": vnf_config_primitive,
4306 "primitive_params": primitive_params,
4307 "lcmOperationType": operationType,
4308 }
4309 op_index = self._find_suboperation(db_nslcmop, match)
4310 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4311 # a. New sub-operation
4312 # The sub-operation does not exist, add it.
4313 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4314 # The following parameters are set to None for all kind of scaling:
4315 vdu_id = None
4316 vdu_count_index = None
4317 vdu_name = None
4318 if RO_nsr_id and RO_scaling_info:
4319 vnf_config_primitive = None
4320 primitive_params = None
4321 else:
4322 RO_nsr_id = None
4323 RO_scaling_info = None
4324 # Initial status for sub-operation
4325 operationState = "PROCESSING"
4326 detailed_status = "In progress"
4327 # Add sub-operation for pre/post-scaling (zero or more operations)
4328 self._add_suboperation(
4329 db_nslcmop,
4330 vnf_index,
4331 vdu_id,
4332 vdu_count_index,
4333 vdu_name,
4334 vnf_config_primitive,
4335 primitive_params,
4336 operationState,
4337 detailed_status,
4338 operationType,
4339 RO_nsr_id,
4340 RO_scaling_info,
4341 )
4342 return self.SUBOPERATION_STATUS_NEW
4343 else:
4344 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4345 # or op_index (operationState != 'COMPLETED')
4346 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4347
4348 # Function to return execution_environment id
4349
4350 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4351 # TODO vdu_index_count
4352 for vca in vca_deployed_list:
4353 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4354 return vca.get("ee_id")
4355
4356 async def destroy_N2VC(
4357 self,
4358 logging_text,
4359 db_nslcmop,
4360 vca_deployed,
4361 config_descriptor,
4362 vca_index,
4363 destroy_ee=True,
4364 exec_primitives=True,
4365 scaling_in=False,
4366 vca_id: str = None,
4367 ):
4368 """
4369 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4370 :param logging_text:
4371 :param db_nslcmop:
4372 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4373 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4374 :param vca_index: index in the database _admin.deployed.VCA
4375 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4376 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4377 not executed properly
4378 :param scaling_in: True destroys the application, False destroys the model
4379 :return: None or exception
4380 """
4381
4382 self.logger.debug(
4383 logging_text
4384 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4385 vca_index, vca_deployed, config_descriptor, destroy_ee
4386 )
4387 )
4388
4389 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4390
4391 # execute terminate_primitives
4392 if exec_primitives:
4393 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4394 config_descriptor.get("terminate-config-primitive"),
4395 vca_deployed.get("ee_descriptor_id"),
4396 )
4397 vdu_id = vca_deployed.get("vdu_id")
4398 vdu_count_index = vca_deployed.get("vdu_count_index")
4399 vdu_name = vca_deployed.get("vdu_name")
4400 vnf_index = vca_deployed.get("member-vnf-index")
4401 if terminate_primitives and vca_deployed.get("needed_terminate"):
4402 for seq in terminate_primitives:
4403 # For each sequence in list, get primitive and call _ns_execute_primitive()
4404 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4405 vnf_index, seq.get("name")
4406 )
4407 self.logger.debug(logging_text + step)
4408 # Create the primitive for each sequence, i.e. "primitive": "touch"
4409 primitive = seq.get("name")
4410 mapped_primitive_params = self._get_terminate_primitive_params(
4411 seq, vnf_index
4412 )
4413
4414 # Add sub-operation
4415 self._add_suboperation(
4416 db_nslcmop,
4417 vnf_index,
4418 vdu_id,
4419 vdu_count_index,
4420 vdu_name,
4421 primitive,
4422 mapped_primitive_params,
4423 )
4424 # Sub-operations: Call _ns_execute_primitive() instead of action()
4425 try:
4426 result, result_detail = await self._ns_execute_primitive(
4427 vca_deployed["ee_id"],
4428 primitive,
4429 mapped_primitive_params,
4430 vca_type=vca_type,
4431 vca_id=vca_id,
4432 )
4433 except LcmException:
4434 # this happens when VCA is not deployed. In this case it is not needed to terminate
4435 continue
4436 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4437 if result not in result_ok:
4438 raise LcmException(
4439 "terminate_primitive {} for vnf_member_index={} fails with "
4440 "error {}".format(seq.get("name"), vnf_index, result_detail)
4441 )
4442 # set that this VCA do not need terminated
4443 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4444 vca_index
4445 )
4446 self.update_db_2(
4447 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4448 )
4449
4450 # Delete Prometheus Jobs if any
4451 # This uses NSR_ID, so it will destroy any jobs under this index
4452 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4453
4454 if destroy_ee:
4455 await self.vca_map[vca_type].delete_execution_environment(
4456 vca_deployed["ee_id"],
4457 scaling_in=scaling_in,
4458 vca_type=vca_type,
4459 vca_id=vca_id,
4460 )
4461
4462 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4463 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4464 namespace = "." + db_nsr["_id"]
4465 try:
4466 await self.n2vc.delete_namespace(
4467 namespace=namespace,
4468 total_timeout=self.timeout.charm_delete,
4469 vca_id=vca_id,
4470 )
4471 except N2VCNotFound: # already deleted. Skip
4472 pass
4473 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4474
4475 async def terminate(self, nsr_id, nslcmop_id):
4476 # Try to lock HA task here
4477 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4478 if not task_is_locked_by_me:
4479 return
4480
4481 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4482 self.logger.debug(logging_text + "Enter")
4483 timeout_ns_terminate = self.timeout.ns_terminate
4484 db_nsr = None
4485 db_nslcmop = None
4486 operation_params = None
4487 exc = None
4488 error_list = [] # annotates all failed error messages
4489 db_nslcmop_update = {}
4490 autoremove = False # autoremove after terminated
4491 tasks_dict_info = {}
4492 db_nsr_update = {}
4493 stage = [
4494 "Stage 1/3: Preparing task.",
4495 "Waiting for previous operations to terminate.",
4496 "",
4497 ]
4498 # ^ contains [stage, step, VIM-status]
4499 try:
4500 # wait for any previous tasks in process
4501 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4502
4503 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4504 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4505 operation_params = db_nslcmop.get("operationParams") or {}
4506 if operation_params.get("timeout_ns_terminate"):
4507 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4508 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4509 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4510
4511 db_nsr_update["operational-status"] = "terminating"
4512 db_nsr_update["config-status"] = "terminating"
4513 self._write_ns_status(
4514 nsr_id=nsr_id,
4515 ns_state="TERMINATING",
4516 current_operation="TERMINATING",
4517 current_operation_id=nslcmop_id,
4518 other_update=db_nsr_update,
4519 )
4520 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4521 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4522 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4523 return
4524
4525 stage[1] = "Getting vnf descriptors from db."
4526 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4527 db_vnfrs_dict = {
4528 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4529 }
4530 db_vnfds_from_id = {}
4531 db_vnfds_from_member_index = {}
4532 # Loop over VNFRs
4533 for vnfr in db_vnfrs_list:
4534 vnfd_id = vnfr["vnfd-id"]
4535 if vnfd_id not in db_vnfds_from_id:
4536 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4537 db_vnfds_from_id[vnfd_id] = vnfd
4538 db_vnfds_from_member_index[
4539 vnfr["member-vnf-index-ref"]
4540 ] = db_vnfds_from_id[vnfd_id]
4541
4542 # Destroy individual execution environments when there are terminating primitives.
4543 # Rest of EE will be deleted at once
4544 # TODO - check before calling _destroy_N2VC
4545 # if not operation_params.get("skip_terminate_primitives"):#
4546 # or not vca.get("needed_terminate"):
4547 stage[0] = "Stage 2/3 execute terminating primitives."
4548 self.logger.debug(logging_text + stage[0])
4549 stage[1] = "Looking execution environment that needs terminate."
4550 self.logger.debug(logging_text + stage[1])
4551
4552 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4553 config_descriptor = None
4554 vca_member_vnf_index = vca.get("member-vnf-index")
4555 vca_id = self.get_vca_id(
4556 db_vnfrs_dict.get(vca_member_vnf_index)
4557 if vca_member_vnf_index
4558 else None,
4559 db_nsr,
4560 )
4561 if not vca or not vca.get("ee_id"):
4562 continue
4563 if not vca.get("member-vnf-index"):
4564 # ns
4565 config_descriptor = db_nsr.get("ns-configuration")
4566 elif vca.get("vdu_id"):
4567 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4568 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4569 elif vca.get("kdu_name"):
4570 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4571 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4572 else:
4573 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4574 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4575 vca_type = vca.get("type")
4576 exec_terminate_primitives = not operation_params.get(
4577 "skip_terminate_primitives"
4578 ) and vca.get("needed_terminate")
4579 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4580 # pending native charms
4581 destroy_ee = (
4582 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4583 )
4584 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4585 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4586 task = asyncio.ensure_future(
4587 self.destroy_N2VC(
4588 logging_text,
4589 db_nslcmop,
4590 vca,
4591 config_descriptor,
4592 vca_index,
4593 destroy_ee,
4594 exec_terminate_primitives,
4595 vca_id=vca_id,
4596 )
4597 )
4598 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4599
4600 # wait for pending tasks of terminate primitives
4601 if tasks_dict_info:
4602 self.logger.debug(
4603 logging_text
4604 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4605 )
4606 error_list = await self._wait_for_tasks(
4607 logging_text,
4608 tasks_dict_info,
4609 min(self.timeout.charm_delete, timeout_ns_terminate),
4610 stage,
4611 nslcmop_id,
4612 )
4613 tasks_dict_info.clear()
4614 if error_list:
4615 return # raise LcmException("; ".join(error_list))
4616
4617 # remove All execution environments at once
4618 stage[0] = "Stage 3/3 delete all."
4619
4620 if nsr_deployed.get("VCA"):
4621 stage[1] = "Deleting all execution environments."
4622 self.logger.debug(logging_text + stage[1])
4623 vca_id = self.get_vca_id({}, db_nsr)
4624 task_delete_ee = asyncio.ensure_future(
4625 asyncio.wait_for(
4626 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4627 timeout=self.timeout.charm_delete,
4628 )
4629 )
4630 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4631 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4632
4633 # Delete Namespace and Certificates if necessary
4634 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4635 await self.vca_map["helm-v3"].delete_tls_certificate(
4636 certificate_name=db_nslcmop["nsInstanceId"],
4637 )
4638 # TODO: Delete namespace
4639
4640 # Delete from k8scluster
4641 stage[1] = "Deleting KDUs."
4642 self.logger.debug(logging_text + stage[1])
4643 # print(nsr_deployed)
4644 for kdu in get_iterable(nsr_deployed, "K8s"):
4645 if not kdu or not kdu.get("kdu-instance"):
4646 continue
4647 kdu_instance = kdu.get("kdu-instance")
4648 if kdu.get("k8scluster-type") in self.k8scluster_map:
4649 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4650 vca_id = self.get_vca_id({}, db_nsr)
4651 task_delete_kdu_instance = asyncio.ensure_future(
4652 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4653 cluster_uuid=kdu.get("k8scluster-uuid"),
4654 kdu_instance=kdu_instance,
4655 vca_id=vca_id,
4656 namespace=kdu.get("namespace"),
4657 )
4658 )
4659 else:
4660 self.logger.error(
4661 logging_text
4662 + "Unknown k8s deployment type {}".format(
4663 kdu.get("k8scluster-type")
4664 )
4665 )
4666 continue
4667 tasks_dict_info[
4668 task_delete_kdu_instance
4669 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4670
4671 # remove from RO
4672 stage[1] = "Deleting ns from VIM."
4673 if self.ro_config.ng:
4674 task_delete_ro = asyncio.ensure_future(
4675 self._terminate_ng_ro(
4676 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4677 )
4678 )
4679 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4680
4681 # rest of staff will be done at finally
4682
4683 except (
4684 ROclient.ROClientException,
4685 DbException,
4686 LcmException,
4687 N2VCException,
4688 ) as e:
4689 self.logger.error(logging_text + "Exit Exception {}".format(e))
4690 exc = e
4691 except asyncio.CancelledError:
4692 self.logger.error(
4693 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4694 )
4695 exc = "Operation was cancelled"
4696 except Exception as e:
4697 exc = traceback.format_exc()
4698 self.logger.critical(
4699 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4700 exc_info=True,
4701 )
4702 finally:
4703 if exc:
4704 error_list.append(str(exc))
4705 try:
4706 # wait for pending tasks
4707 if tasks_dict_info:
4708 stage[1] = "Waiting for terminate pending tasks."
4709 self.logger.debug(logging_text + stage[1])
4710 error_list += await self._wait_for_tasks(
4711 logging_text,
4712 tasks_dict_info,
4713 timeout_ns_terminate,
4714 stage,
4715 nslcmop_id,
4716 )
4717 stage[1] = stage[2] = ""
4718 except asyncio.CancelledError:
4719 error_list.append("Cancelled")
4720 # TODO cancell all tasks
4721 except Exception as exc:
4722 error_list.append(str(exc))
4723 # update status at database
4724 if error_list:
4725 error_detail = "; ".join(error_list)
4726 # self.logger.error(logging_text + error_detail)
4727 error_description_nslcmop = "{} Detail: {}".format(
4728 stage[0], error_detail
4729 )
4730 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4731 nslcmop_id, stage[0]
4732 )
4733
4734 db_nsr_update["operational-status"] = "failed"
4735 db_nsr_update["detailed-status"] = (
4736 error_description_nsr + " Detail: " + error_detail
4737 )
4738 db_nslcmop_update["detailed-status"] = error_detail
4739 nslcmop_operation_state = "FAILED"
4740 ns_state = "BROKEN"
4741 else:
4742 error_detail = None
4743 error_description_nsr = error_description_nslcmop = None
4744 ns_state = "NOT_INSTANTIATED"
4745 db_nsr_update["operational-status"] = "terminated"
4746 db_nsr_update["detailed-status"] = "Done"
4747 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4748 db_nslcmop_update["detailed-status"] = "Done"
4749 nslcmop_operation_state = "COMPLETED"
4750
4751 if db_nsr:
4752 self._write_ns_status(
4753 nsr_id=nsr_id,
4754 ns_state=ns_state,
4755 current_operation="IDLE",
4756 current_operation_id=None,
4757 error_description=error_description_nsr,
4758 error_detail=error_detail,
4759 other_update=db_nsr_update,
4760 )
4761 self._write_op_status(
4762 op_id=nslcmop_id,
4763 stage="",
4764 error_message=error_description_nslcmop,
4765 operation_state=nslcmop_operation_state,
4766 other_update=db_nslcmop_update,
4767 )
4768 if ns_state == "NOT_INSTANTIATED":
4769 try:
4770 self.db.set_list(
4771 "vnfrs",
4772 {"nsr-id-ref": nsr_id},
4773 {"_admin.nsState": "NOT_INSTANTIATED"},
4774 )
4775 except DbException as e:
4776 self.logger.warn(
4777 logging_text
4778 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4779 nsr_id, e
4780 )
4781 )
4782 if operation_params:
4783 autoremove = operation_params.get("autoremove", False)
4784 if nslcmop_operation_state:
4785 try:
4786 await self.msg.aiowrite(
4787 "ns",
4788 "terminated",
4789 {
4790 "nsr_id": nsr_id,
4791 "nslcmop_id": nslcmop_id,
4792 "operationState": nslcmop_operation_state,
4793 "autoremove": autoremove,
4794 },
4795 )
4796 except Exception as e:
4797 self.logger.error(
4798 logging_text + "kafka_write notification Exception {}".format(e)
4799 )
4800 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4801 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4802
4803 self.logger.debug(logging_text + "Exit")
4804 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4805
4806 async def _wait_for_tasks(
4807 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4808 ):
4809 time_start = time()
4810 error_detail_list = []
4811 error_list = []
4812 pending_tasks = list(created_tasks_info.keys())
4813 num_tasks = len(pending_tasks)
4814 num_done = 0
4815 stage[1] = "{}/{}.".format(num_done, num_tasks)
4816 self._write_op_status(nslcmop_id, stage)
4817 while pending_tasks:
4818 new_error = None
4819 _timeout = timeout + time_start - time()
4820 done, pending_tasks = await asyncio.wait(
4821 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4822 )
4823 num_done += len(done)
4824 if not done: # Timeout
4825 for task in pending_tasks:
4826 new_error = created_tasks_info[task] + ": Timeout"
4827 error_detail_list.append(new_error)
4828 error_list.append(new_error)
4829 break
4830 for task in done:
4831 if task.cancelled():
4832 exc = "Cancelled"
4833 else:
4834 exc = task.exception()
4835 if exc:
4836 if isinstance(exc, asyncio.TimeoutError):
4837 exc = "Timeout"
4838 new_error = created_tasks_info[task] + ": {}".format(exc)
4839 error_list.append(created_tasks_info[task])
4840 error_detail_list.append(new_error)
4841 if isinstance(
4842 exc,
4843 (
4844 str,
4845 DbException,
4846 N2VCException,
4847 ROclient.ROClientException,
4848 LcmException,
4849 K8sException,
4850 NgRoException,
4851 ),
4852 ):
4853 self.logger.error(logging_text + new_error)
4854 else:
4855 exc_traceback = "".join(
4856 traceback.format_exception(None, exc, exc.__traceback__)
4857 )
4858 self.logger.error(
4859 logging_text
4860 + created_tasks_info[task]
4861 + " "
4862 + exc_traceback
4863 )
4864 else:
4865 self.logger.debug(
4866 logging_text + created_tasks_info[task] + ": Done"
4867 )
4868 stage[1] = "{}/{}.".format(num_done, num_tasks)
4869 if new_error:
4870 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4871 if nsr_id: # update also nsr
4872 self.update_db_2(
4873 "nsrs",
4874 nsr_id,
4875 {
4876 "errorDescription": "Error at: " + ", ".join(error_list),
4877 "errorDetail": ". ".join(error_detail_list),
4878 },
4879 )
4880 self._write_op_status(nslcmop_id, stage)
4881 return error_detail_list
4882
4883 @staticmethod
4884 def _map_primitive_params(primitive_desc, params, instantiation_params):
4885 """
4886 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4887 The default-value is used. If it is between < > it look for a value at instantiation_params
4888 :param primitive_desc: portion of VNFD/NSD that describes primitive
4889 :param params: Params provided by user
4890 :param instantiation_params: Instantiation params provided by user
4891 :return: a dictionary with the calculated params
4892 """
4893 calculated_params = {}
4894 for parameter in primitive_desc.get("parameter", ()):
4895 param_name = parameter["name"]
4896 if param_name in params:
4897 calculated_params[param_name] = params[param_name]
4898 elif "default-value" in parameter or "value" in parameter:
4899 if "value" in parameter:
4900 calculated_params[param_name] = parameter["value"]
4901 else:
4902 calculated_params[param_name] = parameter["default-value"]
4903 if (
4904 isinstance(calculated_params[param_name], str)
4905 and calculated_params[param_name].startswith("<")
4906 and calculated_params[param_name].endswith(">")
4907 ):
4908 if calculated_params[param_name][1:-1] in instantiation_params:
4909 calculated_params[param_name] = instantiation_params[
4910 calculated_params[param_name][1:-1]
4911 ]
4912 else:
4913 raise LcmException(
4914 "Parameter {} needed to execute primitive {} not provided".format(
4915 calculated_params[param_name], primitive_desc["name"]
4916 )
4917 )
4918 else:
4919 raise LcmException(
4920 "Parameter {} needed to execute primitive {} not provided".format(
4921 param_name, primitive_desc["name"]
4922 )
4923 )
4924
4925 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4926 calculated_params[param_name] = yaml.safe_dump(
4927 calculated_params[param_name], default_flow_style=True, width=256
4928 )
4929 elif isinstance(calculated_params[param_name], str) and calculated_params[
4930 param_name
4931 ].startswith("!!yaml "):
4932 calculated_params[param_name] = calculated_params[param_name][7:]
4933 if parameter.get("data-type") == "INTEGER":
4934 try:
4935 calculated_params[param_name] = int(calculated_params[param_name])
4936 except ValueError: # error converting string to int
4937 raise LcmException(
4938 "Parameter {} of primitive {} must be integer".format(
4939 param_name, primitive_desc["name"]
4940 )
4941 )
4942 elif parameter.get("data-type") == "BOOLEAN":
4943 calculated_params[param_name] = not (
4944 (str(calculated_params[param_name])).lower() == "false"
4945 )
4946
4947 # add always ns_config_info if primitive name is config
4948 if primitive_desc["name"] == "config":
4949 if "ns_config_info" in instantiation_params:
4950 calculated_params["ns_config_info"] = instantiation_params[
4951 "ns_config_info"
4952 ]
4953 return calculated_params
4954
4955 def _look_for_deployed_vca(
4956 self,
4957 deployed_vca,
4958 member_vnf_index,
4959 vdu_id,
4960 vdu_count_index,
4961 kdu_name=None,
4962 ee_descriptor_id=None,
4963 ):
4964 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4965 for vca in deployed_vca:
4966 if not vca:
4967 continue
4968 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4969 continue
4970 if (
4971 vdu_count_index is not None
4972 and vdu_count_index != vca["vdu_count_index"]
4973 ):
4974 continue
4975 if kdu_name and kdu_name != vca["kdu_name"]:
4976 continue
4977 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4978 continue
4979 break
4980 else:
4981 # vca_deployed not found
4982 raise LcmException(
4983 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4984 " is not deployed".format(
4985 member_vnf_index,
4986 vdu_id,
4987 vdu_count_index,
4988 kdu_name,
4989 ee_descriptor_id,
4990 )
4991 )
4992 # get ee_id
4993 ee_id = vca.get("ee_id")
4994 vca_type = vca.get(
4995 "type", "lxc_proxy_charm"
4996 ) # default value for backward compatibility - proxy charm
4997 if not ee_id:
4998 raise LcmException(
4999 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5000 "execution environment".format(
5001 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5002 )
5003 )
5004 return ee_id, vca_type
5005
5006 async def _ns_execute_primitive(
5007 self,
5008 ee_id,
5009 primitive,
5010 primitive_params,
5011 retries=0,
5012 retries_interval=30,
5013 timeout=None,
5014 vca_type=None,
5015 db_dict=None,
5016 vca_id: str = None,
5017 ) -> (str, str):
5018 try:
5019 if primitive == "config":
5020 primitive_params = {"params": primitive_params}
5021
5022 vca_type = vca_type or "lxc_proxy_charm"
5023
5024 while retries >= 0:
5025 try:
5026 output = await asyncio.wait_for(
5027 self.vca_map[vca_type].exec_primitive(
5028 ee_id=ee_id,
5029 primitive_name=primitive,
5030 params_dict=primitive_params,
5031 progress_timeout=self.timeout.progress_primitive,
5032 total_timeout=self.timeout.primitive,
5033 db_dict=db_dict,
5034 vca_id=vca_id,
5035 vca_type=vca_type,
5036 ),
5037 timeout=timeout or self.timeout.primitive,
5038 )
5039 # execution was OK
5040 break
5041 except asyncio.CancelledError:
5042 raise
5043 except Exception as e:
5044 retries -= 1
5045 if retries >= 0:
5046 self.logger.debug(
5047 "Error executing action {} on {} -> {}".format(
5048 primitive, ee_id, e
5049 )
5050 )
5051 # wait and retry
5052 await asyncio.sleep(retries_interval)
5053 else:
5054 if isinstance(e, asyncio.TimeoutError):
5055 e = N2VCException(
5056 message="Timed out waiting for action to complete"
5057 )
5058 return "FAILED", getattr(e, "message", repr(e))
5059
5060 return "COMPLETED", output
5061
5062 except (LcmException, asyncio.CancelledError):
5063 raise
5064 except Exception as e:
5065 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5066
5067 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5068 """
5069 Updating the vca_status with latest juju information in nsrs record
5070 :param: nsr_id: Id of the nsr
5071 :param: nslcmop_id: Id of the nslcmop
5072 :return: None
5073 """
5074
5075 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5077 vca_id = self.get_vca_id({}, db_nsr)
5078 if db_nsr["_admin"]["deployed"]["K8s"]:
5079 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5080 cluster_uuid, kdu_instance, cluster_type = (
5081 k8s["k8scluster-uuid"],
5082 k8s["kdu-instance"],
5083 k8s["k8scluster-type"],
5084 )
5085 await self._on_update_k8s_db(
5086 cluster_uuid=cluster_uuid,
5087 kdu_instance=kdu_instance,
5088 filter={"_id": nsr_id},
5089 vca_id=vca_id,
5090 cluster_type=cluster_type,
5091 )
5092 else:
5093 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5094 table, filter = "nsrs", {"_id": nsr_id}
5095 path = "_admin.deployed.VCA.{}.".format(vca_index)
5096 await self._on_update_n2vc_db(table, filter, path, {})
5097
5098 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5099 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5100
5101 async def action(self, nsr_id, nslcmop_id):
5102 # Try to lock HA task here
5103 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5104 if not task_is_locked_by_me:
5105 return
5106
5107 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5108 self.logger.debug(logging_text + "Enter")
5109 # get all needed from database
5110 db_nsr = None
5111 db_nslcmop = None
5112 db_nsr_update = {}
5113 db_nslcmop_update = {}
5114 nslcmop_operation_state = None
5115 error_description_nslcmop = None
5116 exc = None
5117 step = ""
5118 try:
5119 # wait for any previous tasks in process
5120 step = "Waiting for previous operations to terminate"
5121 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5122
5123 self._write_ns_status(
5124 nsr_id=nsr_id,
5125 ns_state=None,
5126 current_operation="RUNNING ACTION",
5127 current_operation_id=nslcmop_id,
5128 )
5129
5130 step = "Getting information from database"
5131 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5132 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5133 if db_nslcmop["operationParams"].get("primitive_params"):
5134 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5135 db_nslcmop["operationParams"]["primitive_params"]
5136 )
5137
5138 nsr_deployed = db_nsr["_admin"].get("deployed")
5139 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5140 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5141 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5142 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5143 primitive = db_nslcmop["operationParams"]["primitive"]
5144 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5145 timeout_ns_action = db_nslcmop["operationParams"].get(
5146 "timeout_ns_action", self.timeout.primitive
5147 )
5148
5149 if vnf_index:
5150 step = "Getting vnfr from database"
5151 db_vnfr = self.db.get_one(
5152 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5153 )
5154 if db_vnfr.get("kdur"):
5155 kdur_list = []
5156 for kdur in db_vnfr["kdur"]:
5157 if kdur.get("additionalParams"):
5158 kdur["additionalParams"] = json.loads(
5159 kdur["additionalParams"]
5160 )
5161 kdur_list.append(kdur)
5162 db_vnfr["kdur"] = kdur_list
5163 step = "Getting vnfd from database"
5164 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5165
5166 # Sync filesystem before running a primitive
5167 self.fs.sync(db_vnfr["vnfd-id"])
5168 else:
5169 step = "Getting nsd from database"
5170 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5171
5172 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5173 # for backward compatibility
5174 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5175 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5176 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5177 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5178
5179 # look for primitive
5180 config_primitive_desc = descriptor_configuration = None
5181 if vdu_id:
5182 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5183 elif kdu_name:
5184 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5185 elif vnf_index:
5186 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5187 else:
5188 descriptor_configuration = db_nsd.get("ns-configuration")
5189
5190 if descriptor_configuration and descriptor_configuration.get(
5191 "config-primitive"
5192 ):
5193 for config_primitive in descriptor_configuration["config-primitive"]:
5194 if config_primitive["name"] == primitive:
5195 config_primitive_desc = config_primitive
5196 break
5197
5198 if not config_primitive_desc:
5199 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5200 raise LcmException(
5201 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5202 primitive
5203 )
5204 )
5205 primitive_name = primitive
5206 ee_descriptor_id = None
5207 else:
5208 primitive_name = config_primitive_desc.get(
5209 "execution-environment-primitive", primitive
5210 )
5211 ee_descriptor_id = config_primitive_desc.get(
5212 "execution-environment-ref"
5213 )
5214
5215 if vnf_index:
5216 if vdu_id:
5217 vdur = next(
5218 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5219 )
5220 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5221 elif kdu_name:
5222 kdur = next(
5223 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5224 )
5225 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5226 else:
5227 desc_params = parse_yaml_strings(
5228 db_vnfr.get("additionalParamsForVnf")
5229 )
5230 else:
5231 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5232 if kdu_name and get_configuration(db_vnfd, kdu_name):
5233 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5234 actions = set()
5235 for primitive in kdu_configuration.get("initial-config-primitive", []):
5236 actions.add(primitive["name"])
5237 for primitive in kdu_configuration.get("config-primitive", []):
5238 actions.add(primitive["name"])
5239 kdu = find_in_list(
5240 nsr_deployed["K8s"],
5241 lambda kdu: kdu_name == kdu["kdu-name"]
5242 and kdu["member-vnf-index"] == vnf_index,
5243 )
5244 kdu_action = (
5245 True
5246 if primitive_name in actions
5247 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5248 else False
5249 )
5250
5251 # TODO check if ns is in a proper status
5252 if kdu_name and (
5253 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5254 ):
5255 # kdur and desc_params already set from before
5256 if primitive_params:
5257 desc_params.update(primitive_params)
5258 # TODO Check if we will need something at vnf level
5259 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5260 if (
5261 kdu_name == kdu["kdu-name"]
5262 and kdu["member-vnf-index"] == vnf_index
5263 ):
5264 break
5265 else:
5266 raise LcmException(
5267 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5268 )
5269
5270 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5271 msg = "unknown k8scluster-type '{}'".format(
5272 kdu.get("k8scluster-type")
5273 )
5274 raise LcmException(msg)
5275
5276 db_dict = {
5277 "collection": "nsrs",
5278 "filter": {"_id": nsr_id},
5279 "path": "_admin.deployed.K8s.{}".format(index),
5280 }
5281 self.logger.debug(
5282 logging_text
5283 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5284 )
5285 step = "Executing kdu {}".format(primitive_name)
5286 if primitive_name == "upgrade":
5287 if desc_params.get("kdu_model"):
5288 kdu_model = desc_params.get("kdu_model")
5289 del desc_params["kdu_model"]
5290 else:
5291 kdu_model = kdu.get("kdu-model")
5292 if kdu_model.count("/") < 2: # helm chart is not embedded
5293 parts = kdu_model.split(sep=":")
5294 if len(parts) == 2:
5295 kdu_model = parts[0]
5296 if desc_params.get("kdu_atomic_upgrade"):
5297 atomic_upgrade = desc_params.get(
5298 "kdu_atomic_upgrade"
5299 ).lower() in ("yes", "true", "1")
5300 del desc_params["kdu_atomic_upgrade"]
5301 else:
5302 atomic_upgrade = True
5303
5304 detailed_status = await asyncio.wait_for(
5305 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5306 cluster_uuid=kdu.get("k8scluster-uuid"),
5307 kdu_instance=kdu.get("kdu-instance"),
5308 atomic=atomic_upgrade,
5309 kdu_model=kdu_model,
5310 params=desc_params,
5311 db_dict=db_dict,
5312 timeout=timeout_ns_action,
5313 ),
5314 timeout=timeout_ns_action + 10,
5315 )
5316 self.logger.debug(
5317 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5318 )
5319 elif primitive_name == "rollback":
5320 detailed_status = await asyncio.wait_for(
5321 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5322 cluster_uuid=kdu.get("k8scluster-uuid"),
5323 kdu_instance=kdu.get("kdu-instance"),
5324 db_dict=db_dict,
5325 ),
5326 timeout=timeout_ns_action,
5327 )
5328 elif primitive_name == "status":
5329 detailed_status = await asyncio.wait_for(
5330 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5331 cluster_uuid=kdu.get("k8scluster-uuid"),
5332 kdu_instance=kdu.get("kdu-instance"),
5333 vca_id=vca_id,
5334 ),
5335 timeout=timeout_ns_action,
5336 )
5337 else:
5338 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5339 kdu["kdu-name"], nsr_id
5340 )
5341 params = self._map_primitive_params(
5342 config_primitive_desc, primitive_params, desc_params
5343 )
5344
5345 detailed_status = await asyncio.wait_for(
5346 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5347 cluster_uuid=kdu.get("k8scluster-uuid"),
5348 kdu_instance=kdu_instance,
5349 primitive_name=primitive_name,
5350 params=params,
5351 db_dict=db_dict,
5352 timeout=timeout_ns_action,
5353 vca_id=vca_id,
5354 ),
5355 timeout=timeout_ns_action,
5356 )
5357
5358 if detailed_status:
5359 nslcmop_operation_state = "COMPLETED"
5360 else:
5361 detailed_status = ""
5362 nslcmop_operation_state = "FAILED"
5363 else:
5364 ee_id, vca_type = self._look_for_deployed_vca(
5365 nsr_deployed["VCA"],
5366 member_vnf_index=vnf_index,
5367 vdu_id=vdu_id,
5368 vdu_count_index=vdu_count_index,
5369 ee_descriptor_id=ee_descriptor_id,
5370 )
5371 for vca_index, vca_deployed in enumerate(
5372 db_nsr["_admin"]["deployed"]["VCA"]
5373 ):
5374 if vca_deployed.get("member-vnf-index") == vnf_index:
5375 db_dict = {
5376 "collection": "nsrs",
5377 "filter": {"_id": nsr_id},
5378 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5379 }
5380 break
5381 (
5382 nslcmop_operation_state,
5383 detailed_status,
5384 ) = await self._ns_execute_primitive(
5385 ee_id,
5386 primitive=primitive_name,
5387 primitive_params=self._map_primitive_params(
5388 config_primitive_desc, primitive_params, desc_params
5389 ),
5390 timeout=timeout_ns_action,
5391 vca_type=vca_type,
5392 db_dict=db_dict,
5393 vca_id=vca_id,
5394 )
5395
5396 db_nslcmop_update["detailed-status"] = detailed_status
5397 error_description_nslcmop = (
5398 detailed_status if nslcmop_operation_state == "FAILED" else ""
5399 )
5400 self.logger.debug(
5401 logging_text
5402 + "Done with result {} {}".format(
5403 nslcmop_operation_state, detailed_status
5404 )
5405 )
5406 return # database update is called inside finally
5407
5408 except (DbException, LcmException, N2VCException, K8sException) as e:
5409 self.logger.error(logging_text + "Exit Exception {}".format(e))
5410 exc = e
5411 except asyncio.CancelledError:
5412 self.logger.error(
5413 logging_text + "Cancelled Exception while '{}'".format(step)
5414 )
5415 exc = "Operation was cancelled"
5416 except asyncio.TimeoutError:
5417 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5418 exc = "Timeout"
5419 except Exception as e:
5420 exc = traceback.format_exc()
5421 self.logger.critical(
5422 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5423 exc_info=True,
5424 )
5425 finally:
5426 if exc:
5427 db_nslcmop_update[
5428 "detailed-status"
5429 ] = (
5430 detailed_status
5431 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5432 nslcmop_operation_state = "FAILED"
5433 if db_nsr:
5434 self._write_ns_status(
5435 nsr_id=nsr_id,
5436 ns_state=db_nsr[
5437 "nsState"
5438 ], # TODO check if degraded. For the moment use previous status
5439 current_operation="IDLE",
5440 current_operation_id=None,
5441 # error_description=error_description_nsr,
5442 # error_detail=error_detail,
5443 other_update=db_nsr_update,
5444 )
5445
5446 self._write_op_status(
5447 op_id=nslcmop_id,
5448 stage="",
5449 error_message=error_description_nslcmop,
5450 operation_state=nslcmop_operation_state,
5451 other_update=db_nslcmop_update,
5452 )
5453
5454 if nslcmop_operation_state:
5455 try:
5456 await self.msg.aiowrite(
5457 "ns",
5458 "actioned",
5459 {
5460 "nsr_id": nsr_id,
5461 "nslcmop_id": nslcmop_id,
5462 "operationState": nslcmop_operation_state,
5463 },
5464 )
5465 except Exception as e:
5466 self.logger.error(
5467 logging_text + "kafka_write notification Exception {}".format(e)
5468 )
5469 self.logger.debug(logging_text + "Exit")
5470 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5471 return nslcmop_operation_state, detailed_status
5472
5473 async def terminate_vdus(
5474 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5475 ):
5476 """This method terminates VDUs
5477
5478 Args:
5479 db_vnfr: VNF instance record
5480 member_vnf_index: VNF index to identify the VDUs to be removed
5481 db_nsr: NS instance record
5482 update_db_nslcmops: Nslcmop update record
5483 """
5484 vca_scaling_info = []
5485 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5486 scaling_info["scaling_direction"] = "IN"
5487 scaling_info["vdu-delete"] = {}
5488 scaling_info["kdu-delete"] = {}
5489 db_vdur = db_vnfr.get("vdur")
5490 vdur_list = copy(db_vdur)
5491 count_index = 0
5492 for index, vdu in enumerate(vdur_list):
5493 vca_scaling_info.append(
5494 {
5495 "osm_vdu_id": vdu["vdu-id-ref"],
5496 "member-vnf-index": member_vnf_index,
5497 "type": "delete",
5498 "vdu_index": count_index,
5499 }
5500 )
5501 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5502 scaling_info["vdu"].append(
5503 {
5504 "name": vdu.get("name") or vdu.get("vdu-name"),
5505 "vdu_id": vdu["vdu-id-ref"],
5506 "interface": [],
5507 }
5508 )
5509 for interface in vdu["interfaces"]:
5510 scaling_info["vdu"][index]["interface"].append(
5511 {
5512 "name": interface["name"],
5513 "ip_address": interface["ip-address"],
5514 "mac_address": interface.get("mac-address"),
5515 }
5516 )
5517 self.logger.info("NS update scaling info{}".format(scaling_info))
5518 stage[2] = "Terminating VDUs"
5519 if scaling_info.get("vdu-delete"):
5520 # scale_process = "RO"
5521 if self.ro_config.ng:
5522 await self._scale_ng_ro(
5523 logging_text,
5524 db_nsr,
5525 update_db_nslcmops,
5526 db_vnfr,
5527 scaling_info,
5528 stage,
5529 )
5530
5531 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5532 """This method is to Remove VNF instances from NS.
5533
5534 Args:
5535 nsr_id: NS instance id
5536 nslcmop_id: nslcmop id of update
5537 vnf_instance_id: id of the VNF instance to be removed
5538
5539 Returns:
5540 result: (str, str) COMPLETED/FAILED, details
5541 """
5542 try:
5543 db_nsr_update = {}
5544 logging_text = "Task ns={} update ".format(nsr_id)
5545 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5546 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5547 if check_vnfr_count > 1:
5548 stage = ["", "", ""]
5549 step = "Getting nslcmop from database"
5550 self.logger.debug(
5551 step + " after having waited for previous tasks to be completed"
5552 )
5553 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5554 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5555 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5556 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5557 """ db_vnfr = self.db.get_one(
5558 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5559
5560 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5561 await self.terminate_vdus(
5562 db_vnfr,
5563 member_vnf_index,
5564 db_nsr,
5565 update_db_nslcmops,
5566 stage,
5567 logging_text,
5568 )
5569
5570 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5571 constituent_vnfr.remove(db_vnfr.get("_id"))
5572 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5573 "constituent-vnfr-ref"
5574 )
5575 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5576 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5577 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5578 return "COMPLETED", "Done"
5579 else:
5580 step = "Terminate VNF Failed with"
5581 raise LcmException(
5582 "{} Cannot terminate the last VNF in this NS.".format(
5583 vnf_instance_id
5584 )
5585 )
5586 except (LcmException, asyncio.CancelledError):
5587 raise
5588 except Exception as e:
5589 self.logger.debug("Error removing VNF {}".format(e))
5590 return "FAILED", "Error removing VNF {}".format(e)
5591
5592 async def _ns_redeploy_vnf(
5593 self,
5594 nsr_id,
5595 nslcmop_id,
5596 db_vnfd,
5597 db_vnfr,
5598 db_nsr,
5599 ):
5600 """This method updates and redeploys VNF instances
5601
5602 Args:
5603 nsr_id: NS instance id
5604 nslcmop_id: nslcmop id
5605 db_vnfd: VNF descriptor
5606 db_vnfr: VNF instance record
5607 db_nsr: NS instance record
5608
5609 Returns:
5610 result: (str, str) COMPLETED/FAILED, details
5611 """
5612 try:
5613 count_index = 0
5614 stage = ["", "", ""]
5615 logging_text = "Task ns={} update ".format(nsr_id)
5616 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5617 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5618
5619 # Terminate old VNF resources
5620 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5621 await self.terminate_vdus(
5622 db_vnfr,
5623 member_vnf_index,
5624 db_nsr,
5625 update_db_nslcmops,
5626 stage,
5627 logging_text,
5628 )
5629
5630 # old_vnfd_id = db_vnfr["vnfd-id"]
5631 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5632 new_db_vnfd = db_vnfd
5633 # new_vnfd_ref = new_db_vnfd["id"]
5634 # new_vnfd_id = vnfd_id
5635
5636 # Create VDUR
5637 new_vnfr_cp = []
5638 for cp in new_db_vnfd.get("ext-cpd", ()):
5639 vnf_cp = {
5640 "name": cp.get("id"),
5641 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5642 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5643 "id": cp.get("id"),
5644 }
5645 new_vnfr_cp.append(vnf_cp)
5646 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5647 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5648 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5649 new_vnfr_update = {
5650 "revision": latest_vnfd_revision,
5651 "connection-point": new_vnfr_cp,
5652 "vdur": new_vdur,
5653 "ip-address": "",
5654 }
5655 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5656 updated_db_vnfr = self.db.get_one(
5657 "vnfrs",
5658 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5659 )
5660
5661 # Instantiate new VNF resources
5662 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5663 vca_scaling_info = []
5664 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5665 scaling_info["scaling_direction"] = "OUT"
5666 scaling_info["vdu-create"] = {}
5667 scaling_info["kdu-create"] = {}
5668 vdud_instantiate_list = db_vnfd["vdu"]
5669 for index, vdud in enumerate(vdud_instantiate_list):
5670 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5671 if cloud_init_text:
5672 additional_params = (
5673 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5674 or {}
5675 )
5676 cloud_init_list = []
5677 if cloud_init_text:
5678 # TODO Information of its own ip is not available because db_vnfr is not updated.
5679 additional_params["OSM"] = get_osm_params(
5680 updated_db_vnfr, vdud["id"], 1
5681 )
5682 cloud_init_list.append(
5683 self._parse_cloud_init(
5684 cloud_init_text,
5685 additional_params,
5686 db_vnfd["id"],
5687 vdud["id"],
5688 )
5689 )
5690 vca_scaling_info.append(
5691 {
5692 "osm_vdu_id": vdud["id"],
5693 "member-vnf-index": member_vnf_index,
5694 "type": "create",
5695 "vdu_index": count_index,
5696 }
5697 )
5698 scaling_info["vdu-create"][vdud["id"]] = count_index
5699 if self.ro_config.ng:
5700 self.logger.debug(
5701 "New Resources to be deployed: {}".format(scaling_info)
5702 )
5703 await self._scale_ng_ro(
5704 logging_text,
5705 db_nsr,
5706 update_db_nslcmops,
5707 updated_db_vnfr,
5708 scaling_info,
5709 stage,
5710 )
5711 return "COMPLETED", "Done"
5712 except (LcmException, asyncio.CancelledError):
5713 raise
5714 except Exception as e:
5715 self.logger.debug("Error updating VNF {}".format(e))
5716 return "FAILED", "Error updating VNF {}".format(e)
5717
5718 async def _ns_charm_upgrade(
5719 self,
5720 ee_id,
5721 charm_id,
5722 charm_type,
5723 path,
5724 timeout: float = None,
5725 ) -> (str, str):
5726 """This method upgrade charms in VNF instances
5727
5728 Args:
5729 ee_id: Execution environment id
5730 path: Local path to the charm
5731 charm_id: charm-id
5732 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5733 timeout: (Float) Timeout for the ns update operation
5734
5735 Returns:
5736 result: (str, str) COMPLETED/FAILED, details
5737 """
5738 try:
5739 charm_type = charm_type or "lxc_proxy_charm"
5740 output = await self.vca_map[charm_type].upgrade_charm(
5741 ee_id=ee_id,
5742 path=path,
5743 charm_id=charm_id,
5744 charm_type=charm_type,
5745 timeout=timeout or self.timeout.ns_update,
5746 )
5747
5748 if output:
5749 return "COMPLETED", output
5750
5751 except (LcmException, asyncio.CancelledError):
5752 raise
5753
5754 except Exception as e:
5755 self.logger.debug("Error upgrading charm {}".format(path))
5756
5757 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5758
5759 async def update(self, nsr_id, nslcmop_id):
5760 """Update NS according to different update types
5761
5762 This method performs upgrade of VNF instances then updates the revision
5763 number in VNF record
5764
5765 Args:
5766 nsr_id: Network service will be updated
5767 nslcmop_id: ns lcm operation id
5768
5769 Returns:
5770 It may raise DbException, LcmException, N2VCException, K8sException
5771
5772 """
5773 # Try to lock HA task here
5774 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5775 if not task_is_locked_by_me:
5776 return
5777
5778 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5779 self.logger.debug(logging_text + "Enter")
5780
5781 # Set the required variables to be filled up later
5782 db_nsr = None
5783 db_nslcmop_update = {}
5784 vnfr_update = {}
5785 nslcmop_operation_state = None
5786 db_nsr_update = {}
5787 error_description_nslcmop = ""
5788 exc = None
5789 change_type = "updated"
5790 detailed_status = ""
5791 member_vnf_index = None
5792
5793 try:
5794 # wait for any previous tasks in process
5795 step = "Waiting for previous operations to terminate"
5796 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5797 self._write_ns_status(
5798 nsr_id=nsr_id,
5799 ns_state=None,
5800 current_operation="UPDATING",
5801 current_operation_id=nslcmop_id,
5802 )
5803
5804 step = "Getting nslcmop from database"
5805 db_nslcmop = self.db.get_one(
5806 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5807 )
5808 update_type = db_nslcmop["operationParams"]["updateType"]
5809
5810 step = "Getting nsr from database"
5811 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5812 old_operational_status = db_nsr["operational-status"]
5813 db_nsr_update["operational-status"] = "updating"
5814 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5815 nsr_deployed = db_nsr["_admin"].get("deployed")
5816
5817 if update_type == "CHANGE_VNFPKG":
5818 # Get the input parameters given through update request
5819 vnf_instance_id = db_nslcmop["operationParams"][
5820 "changeVnfPackageData"
5821 ].get("vnfInstanceId")
5822
5823 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5824 "vnfdId"
5825 )
5826 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5827
5828 step = "Getting vnfr from database"
5829 db_vnfr = self.db.get_one(
5830 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5831 )
5832
5833 step = "Getting vnfds from database"
5834 # Latest VNFD
5835 latest_vnfd = self.db.get_one(
5836 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5837 )
5838 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5839
5840 # Current VNFD
5841 current_vnf_revision = db_vnfr.get("revision", 1)
5842 current_vnfd = self.db.get_one(
5843 "vnfds_revisions",
5844 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5845 fail_on_empty=False,
5846 )
5847 # Charm artifact paths will be filled up later
5848 (
5849 current_charm_artifact_path,
5850 target_charm_artifact_path,
5851 charm_artifact_paths,
5852 helm_artifacts,
5853 ) = ([], [], [], [])
5854
5855 step = "Checking if revision has changed in VNFD"
5856 if current_vnf_revision != latest_vnfd_revision:
5857 change_type = "policy_updated"
5858
5859 # There is new revision of VNFD, update operation is required
5860 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5861 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5862
5863 step = "Removing the VNFD packages if they exist in the local path"
5864 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5865 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5866
5867 step = "Get the VNFD packages from FSMongo"
5868 self.fs.sync(from_path=latest_vnfd_path)
5869 self.fs.sync(from_path=current_vnfd_path)
5870
5871 step = (
5872 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5873 )
5874 current_base_folder = current_vnfd["_admin"]["storage"]
5875 latest_base_folder = latest_vnfd["_admin"]["storage"]
5876
5877 for vca_index, vca_deployed in enumerate(
5878 get_iterable(nsr_deployed, "VCA")
5879 ):
5880 vnf_index = db_vnfr.get("member-vnf-index-ref")
5881
5882 # Getting charm-id and charm-type
5883 if vca_deployed.get("member-vnf-index") == vnf_index:
5884 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5885 vca_type = vca_deployed.get("type")
5886 vdu_count_index = vca_deployed.get("vdu_count_index")
5887
5888 # Getting ee-id
5889 ee_id = vca_deployed.get("ee_id")
5890
5891 step = "Getting descriptor config"
5892 if current_vnfd.get("kdu"):
5893 search_key = "kdu_name"
5894 else:
5895 search_key = "vnfd_id"
5896
5897 entity_id = vca_deployed.get(search_key)
5898
5899 descriptor_config = get_configuration(
5900 current_vnfd, entity_id
5901 )
5902
5903 if "execution-environment-list" in descriptor_config:
5904 ee_list = descriptor_config.get(
5905 "execution-environment-list", []
5906 )
5907 else:
5908 ee_list = []
5909
5910 # There could be several charm used in the same VNF
5911 for ee_item in ee_list:
5912 if ee_item.get("juju"):
5913 step = "Getting charm name"
5914 charm_name = ee_item["juju"].get("charm")
5915
5916 step = "Setting Charm artifact paths"
5917 current_charm_artifact_path.append(
5918 get_charm_artifact_path(
5919 current_base_folder,
5920 charm_name,
5921 vca_type,
5922 current_vnf_revision,
5923 )
5924 )
5925 target_charm_artifact_path.append(
5926 get_charm_artifact_path(
5927 latest_base_folder,
5928 charm_name,
5929 vca_type,
5930 latest_vnfd_revision,
5931 )
5932 )
5933 elif ee_item.get("helm-chart"):
5934 # add chart to list and all parameters
5935 step = "Getting helm chart name"
5936 chart_name = ee_item.get("helm-chart")
5937 if (
5938 ee_item.get("helm-version")
5939 and ee_item.get("helm-version") == "v2"
5940 ):
5941 vca_type = "helm"
5942 else:
5943 vca_type = "helm-v3"
5944 step = "Setting Helm chart artifact paths"
5945
5946 helm_artifacts.append(
5947 {
5948 "current_artifact_path": get_charm_artifact_path(
5949 current_base_folder,
5950 chart_name,
5951 vca_type,
5952 current_vnf_revision,
5953 ),
5954 "target_artifact_path": get_charm_artifact_path(
5955 latest_base_folder,
5956 chart_name,
5957 vca_type,
5958 latest_vnfd_revision,
5959 ),
5960 "ee_id": ee_id,
5961 "vca_index": vca_index,
5962 "vdu_index": vdu_count_index,
5963 }
5964 )
5965
5966 charm_artifact_paths = zip(
5967 current_charm_artifact_path, target_charm_artifact_path
5968 )
5969
5970 step = "Checking if software version has changed in VNFD"
5971 if find_software_version(current_vnfd) != find_software_version(
5972 latest_vnfd
5973 ):
5974 step = "Checking if existing VNF has charm"
5975 for current_charm_path, target_charm_path in list(
5976 charm_artifact_paths
5977 ):
5978 if current_charm_path:
5979 raise LcmException(
5980 "Software version change is not supported as VNF instance {} has charm.".format(
5981 vnf_instance_id
5982 )
5983 )
5984
5985 # There is no change in the charm package, then redeploy the VNF
5986 # based on new descriptor
5987 step = "Redeploying VNF"
5988 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5989 (result, detailed_status) = await self._ns_redeploy_vnf(
5990 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5991 )
5992 if result == "FAILED":
5993 nslcmop_operation_state = result
5994 error_description_nslcmop = detailed_status
5995 db_nslcmop_update["detailed-status"] = detailed_status
5996 self.logger.debug(
5997 logging_text
5998 + " step {} Done with result {} {}".format(
5999 step, nslcmop_operation_state, detailed_status
6000 )
6001 )
6002
6003 else:
6004 step = "Checking if any charm package has changed or not"
6005 for current_charm_path, target_charm_path in list(
6006 charm_artifact_paths
6007 ):
6008 if (
6009 current_charm_path
6010 and target_charm_path
6011 and self.check_charm_hash_changed(
6012 current_charm_path, target_charm_path
6013 )
6014 ):
6015 step = "Checking whether VNF uses juju bundle"
6016 if check_juju_bundle_existence(current_vnfd):
6017 raise LcmException(
6018 "Charm upgrade is not supported for the instance which"
6019 " uses juju-bundle: {}".format(
6020 check_juju_bundle_existence(current_vnfd)
6021 )
6022 )
6023
6024 step = "Upgrading Charm"
6025 (
6026 result,
6027 detailed_status,
6028 ) = await self._ns_charm_upgrade(
6029 ee_id=ee_id,
6030 charm_id=vca_id,
6031 charm_type=vca_type,
6032 path=self.fs.path + target_charm_path,
6033 timeout=timeout_seconds,
6034 )
6035
6036 if result == "FAILED":
6037 nslcmop_operation_state = result
6038 error_description_nslcmop = detailed_status
6039
6040 db_nslcmop_update["detailed-status"] = detailed_status
6041 self.logger.debug(
6042 logging_text
6043 + " step {} Done with result {} {}".format(
6044 step, nslcmop_operation_state, detailed_status
6045 )
6046 )
6047
6048 step = "Updating policies"
6049 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6050 result = "COMPLETED"
6051 detailed_status = "Done"
6052 db_nslcmop_update["detailed-status"] = "Done"
6053
6054 # helm base EE
6055 for item in helm_artifacts:
6056 if not (
6057 item["current_artifact_path"]
6058 and item["target_artifact_path"]
6059 and self.check_charm_hash_changed(
6060 item["current_artifact_path"],
6061 item["target_artifact_path"],
6062 )
6063 ):
6064 continue
6065 db_update_entry = "_admin.deployed.VCA.{}.".format(
6066 item["vca_index"]
6067 )
6068 vnfr_id = db_vnfr["_id"]
6069 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6070 db_dict = {
6071 "collection": "nsrs",
6072 "filter": {"_id": nsr_id},
6073 "path": db_update_entry,
6074 }
6075 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6076 await self.vca_map[vca_type].upgrade_execution_environment(
6077 namespace=namespace,
6078 helm_id=helm_id,
6079 db_dict=db_dict,
6080 config=osm_config,
6081 artifact_path=item["target_artifact_path"],
6082 vca_type=vca_type,
6083 )
6084 vnf_id = db_vnfr.get("vnfd-ref")
6085 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6086 self.logger.debug("get ssh key block")
6087 rw_mgmt_ip = None
6088 if deep_get(
6089 config_descriptor,
6090 ("config-access", "ssh-access", "required"),
6091 ):
6092 # Needed to inject a ssh key
6093 user = deep_get(
6094 config_descriptor,
6095 ("config-access", "ssh-access", "default-user"),
6096 )
6097 step = (
6098 "Install configuration Software, getting public ssh key"
6099 )
6100 pub_key = await self.vca_map[
6101 vca_type
6102 ].get_ee_ssh_public__key(
6103 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6104 )
6105
6106 step = (
6107 "Insert public key into VM user={} ssh_key={}".format(
6108 user, pub_key
6109 )
6110 )
6111 self.logger.debug(logging_text + step)
6112
6113 # wait for RO (ip-address) Insert pub_key into VM
6114 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6115 logging_text,
6116 nsr_id,
6117 vnfr_id,
6118 None,
6119 item["vdu_index"],
6120 user=user,
6121 pub_key=pub_key,
6122 )
6123
6124 initial_config_primitive_list = config_descriptor.get(
6125 "initial-config-primitive"
6126 )
6127 config_primitive = next(
6128 (
6129 p
6130 for p in initial_config_primitive_list
6131 if p["name"] == "config"
6132 ),
6133 None,
6134 )
6135 if not config_primitive:
6136 continue
6137
6138 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6139 if rw_mgmt_ip:
6140 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6141 if db_vnfr.get("additionalParamsForVnf"):
6142 deploy_params.update(
6143 parse_yaml_strings(
6144 db_vnfr["additionalParamsForVnf"].copy()
6145 )
6146 )
6147 primitive_params_ = self._map_primitive_params(
6148 config_primitive, {}, deploy_params
6149 )
6150
6151 step = "execute primitive '{}' params '{}'".format(
6152 config_primitive["name"], primitive_params_
6153 )
6154 self.logger.debug(logging_text + step)
6155 await self.vca_map[vca_type].exec_primitive(
6156 ee_id=ee_id,
6157 primitive_name=config_primitive["name"],
6158 params_dict=primitive_params_,
6159 db_dict=db_dict,
6160 vca_id=vca_id,
6161 vca_type=vca_type,
6162 )
6163
6164 step = "Updating policies"
6165 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6166 detailed_status = "Done"
6167 db_nslcmop_update["detailed-status"] = "Done"
6168
6169 # If nslcmop_operation_state is None, so any operation is not failed.
6170 if not nslcmop_operation_state:
6171 nslcmop_operation_state = "COMPLETED"
6172
6173 # If update CHANGE_VNFPKG nslcmop_operation is successful
6174 # vnf revision need to be updated
6175 vnfr_update["revision"] = latest_vnfd_revision
6176 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6177
6178 self.logger.debug(
6179 logging_text
6180 + " task Done with result {} {}".format(
6181 nslcmop_operation_state, detailed_status
6182 )
6183 )
6184 elif update_type == "REMOVE_VNF":
6185 # This part is included in https://osm.etsi.org/gerrit/11876
6186 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6187 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6188 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6189 step = "Removing VNF"
6190 (result, detailed_status) = await self.remove_vnf(
6191 nsr_id, nslcmop_id, vnf_instance_id
6192 )
6193 if result == "FAILED":
6194 nslcmop_operation_state = result
6195 error_description_nslcmop = detailed_status
6196 db_nslcmop_update["detailed-status"] = detailed_status
6197 change_type = "vnf_terminated"
6198 if not nslcmop_operation_state:
6199 nslcmop_operation_state = "COMPLETED"
6200 self.logger.debug(
6201 logging_text
6202 + " task Done with result {} {}".format(
6203 nslcmop_operation_state, detailed_status
6204 )
6205 )
6206
6207 elif update_type == "OPERATE_VNF":
6208 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6209 "vnfInstanceId"
6210 ]
6211 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6212 "changeStateTo"
6213 ]
6214 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6215 "additionalParam"
6216 ]
6217 (result, detailed_status) = await self.rebuild_start_stop(
6218 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6219 )
6220 if result == "FAILED":
6221 nslcmop_operation_state = result
6222 error_description_nslcmop = detailed_status
6223 db_nslcmop_update["detailed-status"] = detailed_status
6224 if not nslcmop_operation_state:
6225 nslcmop_operation_state = "COMPLETED"
6226 self.logger.debug(
6227 logging_text
6228 + " task Done with result {} {}".format(
6229 nslcmop_operation_state, detailed_status
6230 )
6231 )
6232
6233 # If nslcmop_operation_state is None, so any operation is not failed.
6234 # All operations are executed in overall.
6235 if not nslcmop_operation_state:
6236 nslcmop_operation_state = "COMPLETED"
6237 db_nsr_update["operational-status"] = old_operational_status
6238
6239 except (DbException, LcmException, N2VCException, K8sException) as e:
6240 self.logger.error(logging_text + "Exit Exception {}".format(e))
6241 exc = e
6242 except asyncio.CancelledError:
6243 self.logger.error(
6244 logging_text + "Cancelled Exception while '{}'".format(step)
6245 )
6246 exc = "Operation was cancelled"
6247 except asyncio.TimeoutError:
6248 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6249 exc = "Timeout"
6250 except Exception as e:
6251 exc = traceback.format_exc()
6252 self.logger.critical(
6253 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6254 exc_info=True,
6255 )
6256 finally:
6257 if exc:
6258 db_nslcmop_update[
6259 "detailed-status"
6260 ] = (
6261 detailed_status
6262 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6263 nslcmop_operation_state = "FAILED"
6264 db_nsr_update["operational-status"] = old_operational_status
6265 if db_nsr:
6266 self._write_ns_status(
6267 nsr_id=nsr_id,
6268 ns_state=db_nsr["nsState"],
6269 current_operation="IDLE",
6270 current_operation_id=None,
6271 other_update=db_nsr_update,
6272 )
6273
6274 self._write_op_status(
6275 op_id=nslcmop_id,
6276 stage="",
6277 error_message=error_description_nslcmop,
6278 operation_state=nslcmop_operation_state,
6279 other_update=db_nslcmop_update,
6280 )
6281
6282 if nslcmop_operation_state:
6283 try:
6284 msg = {
6285 "nsr_id": nsr_id,
6286 "nslcmop_id": nslcmop_id,
6287 "operationState": nslcmop_operation_state,
6288 }
6289 if (
6290 change_type in ("vnf_terminated", "policy_updated")
6291 and member_vnf_index
6292 ):
6293 msg.update({"vnf_member_index": member_vnf_index})
6294 await self.msg.aiowrite("ns", change_type, msg)
6295 except Exception as e:
6296 self.logger.error(
6297 logging_text + "kafka_write notification Exception {}".format(e)
6298 )
6299 self.logger.debug(logging_text + "Exit")
6300 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6301 return nslcmop_operation_state, detailed_status
6302
6303 async def scale(self, nsr_id, nslcmop_id):
6304 # Try to lock HA task here
6305 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6306 if not task_is_locked_by_me:
6307 return
6308
6309 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6310 stage = ["", "", ""]
6311 tasks_dict_info = {}
6312 # ^ stage, step, VIM progress
6313 self.logger.debug(logging_text + "Enter")
6314 # get all needed from database
6315 db_nsr = None
6316 db_nslcmop_update = {}
6317 db_nsr_update = {}
6318 exc = None
6319 # in case of error, indicates what part of scale was failed to put nsr at error status
6320 scale_process = None
6321 old_operational_status = ""
6322 old_config_status = ""
6323 nsi_id = None
6324 try:
6325 # wait for any previous tasks in process
6326 step = "Waiting for previous operations to terminate"
6327 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6328 self._write_ns_status(
6329 nsr_id=nsr_id,
6330 ns_state=None,
6331 current_operation="SCALING",
6332 current_operation_id=nslcmop_id,
6333 )
6334
6335 step = "Getting nslcmop from database"
6336 self.logger.debug(
6337 step + " after having waited for previous tasks to be completed"
6338 )
6339 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6340
6341 step = "Getting nsr from database"
6342 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6343 old_operational_status = db_nsr["operational-status"]
6344 old_config_status = db_nsr["config-status"]
6345
6346 step = "Parsing scaling parameters"
6347 db_nsr_update["operational-status"] = "scaling"
6348 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6349 nsr_deployed = db_nsr["_admin"].get("deployed")
6350
6351 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6352 "scaleByStepData"
6353 ]["member-vnf-index"]
6354 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6355 "scaleByStepData"
6356 ]["scaling-group-descriptor"]
6357 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6358 # for backward compatibility
6359 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6360 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6361 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6362 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6363
6364 step = "Getting vnfr from database"
6365 db_vnfr = self.db.get_one(
6366 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6367 )
6368
6369 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6370
6371 step = "Getting vnfd from database"
6372 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6373
6374 base_folder = db_vnfd["_admin"]["storage"]
6375
6376 step = "Getting scaling-group-descriptor"
6377 scaling_descriptor = find_in_list(
6378 get_scaling_aspect(db_vnfd),
6379 lambda scale_desc: scale_desc["name"] == scaling_group,
6380 )
6381 if not scaling_descriptor:
6382 raise LcmException(
6383 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6384 "at vnfd:scaling-group-descriptor".format(scaling_group)
6385 )
6386
6387 step = "Sending scale order to VIM"
6388 # TODO check if ns is in a proper status
6389 nb_scale_op = 0
6390 if not db_nsr["_admin"].get("scaling-group"):
6391 self.update_db_2(
6392 "nsrs",
6393 nsr_id,
6394 {
6395 "_admin.scaling-group": [
6396 {"name": scaling_group, "nb-scale-op": 0}
6397 ]
6398 },
6399 )
6400 admin_scale_index = 0
6401 else:
6402 for admin_scale_index, admin_scale_info in enumerate(
6403 db_nsr["_admin"]["scaling-group"]
6404 ):
6405 if admin_scale_info["name"] == scaling_group:
6406 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6407 break
6408 else: # not found, set index one plus last element and add new entry with the name
6409 admin_scale_index += 1
6410 db_nsr_update[
6411 "_admin.scaling-group.{}.name".format(admin_scale_index)
6412 ] = scaling_group
6413
6414 vca_scaling_info = []
6415 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6416 if scaling_type == "SCALE_OUT":
6417 if "aspect-delta-details" not in scaling_descriptor:
6418 raise LcmException(
6419 "Aspect delta details not fount in scaling descriptor {}".format(
6420 scaling_descriptor["name"]
6421 )
6422 )
6423 # count if max-instance-count is reached
6424 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6425
6426 scaling_info["scaling_direction"] = "OUT"
6427 scaling_info["vdu-create"] = {}
6428 scaling_info["kdu-create"] = {}
6429 for delta in deltas:
6430 for vdu_delta in delta.get("vdu-delta", {}):
6431 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6432 # vdu_index also provides the number of instance of the targeted vdu
6433 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6434 cloud_init_text = self._get_vdu_cloud_init_content(
6435 vdud, db_vnfd
6436 )
6437 if cloud_init_text:
6438 additional_params = (
6439 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6440 or {}
6441 )
6442 cloud_init_list = []
6443
6444 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6445 max_instance_count = 10
6446 if vdu_profile and "max-number-of-instances" in vdu_profile:
6447 max_instance_count = vdu_profile.get(
6448 "max-number-of-instances", 10
6449 )
6450
6451 default_instance_num = get_number_of_instances(
6452 db_vnfd, vdud["id"]
6453 )
6454 instances_number = vdu_delta.get("number-of-instances", 1)
6455 nb_scale_op += instances_number
6456
6457 new_instance_count = nb_scale_op + default_instance_num
6458 # Control if new count is over max and vdu count is less than max.
6459 # Then assign new instance count
6460 if new_instance_count > max_instance_count > vdu_count:
6461 instances_number = new_instance_count - max_instance_count
6462 else:
6463 instances_number = instances_number
6464
6465 if new_instance_count > max_instance_count:
6466 raise LcmException(
6467 "reached the limit of {} (max-instance-count) "
6468 "scaling-out operations for the "
6469 "scaling-group-descriptor '{}'".format(
6470 nb_scale_op, scaling_group
6471 )
6472 )
6473 for x in range(vdu_delta.get("number-of-instances", 1)):
6474 if cloud_init_text:
6475 # TODO Information of its own ip is not available because db_vnfr is not updated.
6476 additional_params["OSM"] = get_osm_params(
6477 db_vnfr, vdu_delta["id"], vdu_index + x
6478 )
6479 cloud_init_list.append(
6480 self._parse_cloud_init(
6481 cloud_init_text,
6482 additional_params,
6483 db_vnfd["id"],
6484 vdud["id"],
6485 )
6486 )
6487 vca_scaling_info.append(
6488 {
6489 "osm_vdu_id": vdu_delta["id"],
6490 "member-vnf-index": vnf_index,
6491 "type": "create",
6492 "vdu_index": vdu_index + x,
6493 }
6494 )
6495 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6496 for kdu_delta in delta.get("kdu-resource-delta", {}):
6497 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6498 kdu_name = kdu_profile["kdu-name"]
6499 resource_name = kdu_profile.get("resource-name", "")
6500
6501 # Might have different kdus in the same delta
6502 # Should have list for each kdu
6503 if not scaling_info["kdu-create"].get(kdu_name, None):
6504 scaling_info["kdu-create"][kdu_name] = []
6505
6506 kdur = get_kdur(db_vnfr, kdu_name)
6507 if kdur.get("helm-chart"):
6508 k8s_cluster_type = "helm-chart-v3"
6509 self.logger.debug("kdur: {}".format(kdur))
6510 if (
6511 kdur.get("helm-version")
6512 and kdur.get("helm-version") == "v2"
6513 ):
6514 k8s_cluster_type = "helm-chart"
6515 elif kdur.get("juju-bundle"):
6516 k8s_cluster_type = "juju-bundle"
6517 else:
6518 raise LcmException(
6519 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6520 "juju-bundle. Maybe an old NBI version is running".format(
6521 db_vnfr["member-vnf-index-ref"], kdu_name
6522 )
6523 )
6524
6525 max_instance_count = 10
6526 if kdu_profile and "max-number-of-instances" in kdu_profile:
6527 max_instance_count = kdu_profile.get(
6528 "max-number-of-instances", 10
6529 )
6530
6531 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6532 deployed_kdu, _ = get_deployed_kdu(
6533 nsr_deployed, kdu_name, vnf_index
6534 )
6535 if deployed_kdu is None:
6536 raise LcmException(
6537 "KDU '{}' for vnf '{}' not deployed".format(
6538 kdu_name, vnf_index
6539 )
6540 )
6541 kdu_instance = deployed_kdu.get("kdu-instance")
6542 instance_num = await self.k8scluster_map[
6543 k8s_cluster_type
6544 ].get_scale_count(
6545 resource_name,
6546 kdu_instance,
6547 vca_id=vca_id,
6548 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6549 kdu_model=deployed_kdu.get("kdu-model"),
6550 )
6551 kdu_replica_count = instance_num + kdu_delta.get(
6552 "number-of-instances", 1
6553 )
6554
6555 # Control if new count is over max and instance_num is less than max.
6556 # Then assign max instance number to kdu replica count
6557 if kdu_replica_count > max_instance_count > instance_num:
6558 kdu_replica_count = max_instance_count
6559 if kdu_replica_count > max_instance_count:
6560 raise LcmException(
6561 "reached the limit of {} (max-instance-count) "
6562 "scaling-out operations for the "
6563 "scaling-group-descriptor '{}'".format(
6564 instance_num, scaling_group
6565 )
6566 )
6567
6568 for x in range(kdu_delta.get("number-of-instances", 1)):
6569 vca_scaling_info.append(
6570 {
6571 "osm_kdu_id": kdu_name,
6572 "member-vnf-index": vnf_index,
6573 "type": "create",
6574 "kdu_index": instance_num + x - 1,
6575 }
6576 )
6577 scaling_info["kdu-create"][kdu_name].append(
6578 {
6579 "member-vnf-index": vnf_index,
6580 "type": "create",
6581 "k8s-cluster-type": k8s_cluster_type,
6582 "resource-name": resource_name,
6583 "scale": kdu_replica_count,
6584 }
6585 )
6586 elif scaling_type == "SCALE_IN":
6587 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6588
6589 scaling_info["scaling_direction"] = "IN"
6590 scaling_info["vdu-delete"] = {}
6591 scaling_info["kdu-delete"] = {}
6592
6593 for delta in deltas:
6594 for vdu_delta in delta.get("vdu-delta", {}):
6595 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6596 min_instance_count = 0
6597 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6598 if vdu_profile and "min-number-of-instances" in vdu_profile:
6599 min_instance_count = vdu_profile["min-number-of-instances"]
6600
6601 default_instance_num = get_number_of_instances(
6602 db_vnfd, vdu_delta["id"]
6603 )
6604 instance_num = vdu_delta.get("number-of-instances", 1)
6605 nb_scale_op -= instance_num
6606
6607 new_instance_count = nb_scale_op + default_instance_num
6608
6609 if new_instance_count < min_instance_count < vdu_count:
6610 instances_number = min_instance_count - new_instance_count
6611 else:
6612 instances_number = instance_num
6613
6614 if new_instance_count < min_instance_count:
6615 raise LcmException(
6616 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6617 "scaling-group-descriptor '{}'".format(
6618 nb_scale_op, scaling_group
6619 )
6620 )
6621 for x in range(vdu_delta.get("number-of-instances", 1)):
6622 vca_scaling_info.append(
6623 {
6624 "osm_vdu_id": vdu_delta["id"],
6625 "member-vnf-index": vnf_index,
6626 "type": "delete",
6627 "vdu_index": vdu_index - 1 - x,
6628 }
6629 )
6630 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6631 for kdu_delta in delta.get("kdu-resource-delta", {}):
6632 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6633 kdu_name = kdu_profile["kdu-name"]
6634 resource_name = kdu_profile.get("resource-name", "")
6635
6636 if not scaling_info["kdu-delete"].get(kdu_name, None):
6637 scaling_info["kdu-delete"][kdu_name] = []
6638
6639 kdur = get_kdur(db_vnfr, kdu_name)
6640 if kdur.get("helm-chart"):
6641 k8s_cluster_type = "helm-chart-v3"
6642 self.logger.debug("kdur: {}".format(kdur))
6643 if (
6644 kdur.get("helm-version")
6645 and kdur.get("helm-version") == "v2"
6646 ):
6647 k8s_cluster_type = "helm-chart"
6648 elif kdur.get("juju-bundle"):
6649 k8s_cluster_type = "juju-bundle"
6650 else:
6651 raise LcmException(
6652 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6653 "juju-bundle. Maybe an old NBI version is running".format(
6654 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6655 )
6656 )
6657
6658 min_instance_count = 0
6659 if kdu_profile and "min-number-of-instances" in kdu_profile:
6660 min_instance_count = kdu_profile["min-number-of-instances"]
6661
6662 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6663 deployed_kdu, _ = get_deployed_kdu(
6664 nsr_deployed, kdu_name, vnf_index
6665 )
6666 if deployed_kdu is None:
6667 raise LcmException(
6668 "KDU '{}' for vnf '{}' not deployed".format(
6669 kdu_name, vnf_index
6670 )
6671 )
6672 kdu_instance = deployed_kdu.get("kdu-instance")
6673 instance_num = await self.k8scluster_map[
6674 k8s_cluster_type
6675 ].get_scale_count(
6676 resource_name,
6677 kdu_instance,
6678 vca_id=vca_id,
6679 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6680 kdu_model=deployed_kdu.get("kdu-model"),
6681 )
6682 kdu_replica_count = instance_num - kdu_delta.get(
6683 "number-of-instances", 1
6684 )
6685
6686 if kdu_replica_count < min_instance_count < instance_num:
6687 kdu_replica_count = min_instance_count
6688 if kdu_replica_count < min_instance_count:
6689 raise LcmException(
6690 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6691 "scaling-group-descriptor '{}'".format(
6692 instance_num, scaling_group
6693 )
6694 )
6695
6696 for x in range(kdu_delta.get("number-of-instances", 1)):
6697 vca_scaling_info.append(
6698 {
6699 "osm_kdu_id": kdu_name,
6700 "member-vnf-index": vnf_index,
6701 "type": "delete",
6702 "kdu_index": instance_num - x - 1,
6703 }
6704 )
6705 scaling_info["kdu-delete"][kdu_name].append(
6706 {
6707 "member-vnf-index": vnf_index,
6708 "type": "delete",
6709 "k8s-cluster-type": k8s_cluster_type,
6710 "resource-name": resource_name,
6711 "scale": kdu_replica_count,
6712 }
6713 )
6714
6715 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6716 vdu_delete = copy(scaling_info.get("vdu-delete"))
6717 if scaling_info["scaling_direction"] == "IN":
6718 for vdur in reversed(db_vnfr["vdur"]):
6719 if vdu_delete.get(vdur["vdu-id-ref"]):
6720 vdu_delete[vdur["vdu-id-ref"]] -= 1
6721 scaling_info["vdu"].append(
6722 {
6723 "name": vdur.get("name") or vdur.get("vdu-name"),
6724 "vdu_id": vdur["vdu-id-ref"],
6725 "interface": [],
6726 }
6727 )
6728 for interface in vdur["interfaces"]:
6729 scaling_info["vdu"][-1]["interface"].append(
6730 {
6731 "name": interface["name"],
6732 "ip_address": interface["ip-address"],
6733 "mac_address": interface.get("mac-address"),
6734 }
6735 )
6736 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6737
6738 # PRE-SCALE BEGIN
6739 step = "Executing pre-scale vnf-config-primitive"
6740 if scaling_descriptor.get("scaling-config-action"):
6741 for scaling_config_action in scaling_descriptor[
6742 "scaling-config-action"
6743 ]:
6744 if (
6745 scaling_config_action.get("trigger") == "pre-scale-in"
6746 and scaling_type == "SCALE_IN"
6747 ) or (
6748 scaling_config_action.get("trigger") == "pre-scale-out"
6749 and scaling_type == "SCALE_OUT"
6750 ):
6751 vnf_config_primitive = scaling_config_action[
6752 "vnf-config-primitive-name-ref"
6753 ]
6754 step = db_nslcmop_update[
6755 "detailed-status"
6756 ] = "executing pre-scale scaling-config-action '{}'".format(
6757 vnf_config_primitive
6758 )
6759
6760 # look for primitive
6761 for config_primitive in (
6762 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6763 ).get("config-primitive", ()):
6764 if config_primitive["name"] == vnf_config_primitive:
6765 break
6766 else:
6767 raise LcmException(
6768 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6769 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6770 "primitive".format(scaling_group, vnf_config_primitive)
6771 )
6772
6773 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6774 if db_vnfr.get("additionalParamsForVnf"):
6775 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6776
6777 scale_process = "VCA"
6778 db_nsr_update["config-status"] = "configuring pre-scaling"
6779 primitive_params = self._map_primitive_params(
6780 config_primitive, {}, vnfr_params
6781 )
6782
6783 # Pre-scale retry check: Check if this sub-operation has been executed before
6784 op_index = self._check_or_add_scale_suboperation(
6785 db_nslcmop,
6786 vnf_index,
6787 vnf_config_primitive,
6788 primitive_params,
6789 "PRE-SCALE",
6790 )
6791 if op_index == self.SUBOPERATION_STATUS_SKIP:
6792 # Skip sub-operation
6793 result = "COMPLETED"
6794 result_detail = "Done"
6795 self.logger.debug(
6796 logging_text
6797 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6798 vnf_config_primitive, result, result_detail
6799 )
6800 )
6801 else:
6802 if op_index == self.SUBOPERATION_STATUS_NEW:
6803 # New sub-operation: Get index of this sub-operation
6804 op_index = (
6805 len(db_nslcmop.get("_admin", {}).get("operations"))
6806 - 1
6807 )
6808 self.logger.debug(
6809 logging_text
6810 + "vnf_config_primitive={} New sub-operation".format(
6811 vnf_config_primitive
6812 )
6813 )
6814 else:
6815 # retry: Get registered params for this existing sub-operation
6816 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6817 op_index
6818 ]
6819 vnf_index = op.get("member_vnf_index")
6820 vnf_config_primitive = op.get("primitive")
6821 primitive_params = op.get("primitive_params")
6822 self.logger.debug(
6823 logging_text
6824 + "vnf_config_primitive={} Sub-operation retry".format(
6825 vnf_config_primitive
6826 )
6827 )
6828 # Execute the primitive, either with new (first-time) or registered (reintent) args
6829 ee_descriptor_id = config_primitive.get(
6830 "execution-environment-ref"
6831 )
6832 primitive_name = config_primitive.get(
6833 "execution-environment-primitive", vnf_config_primitive
6834 )
6835 ee_id, vca_type = self._look_for_deployed_vca(
6836 nsr_deployed["VCA"],
6837 member_vnf_index=vnf_index,
6838 vdu_id=None,
6839 vdu_count_index=None,
6840 ee_descriptor_id=ee_descriptor_id,
6841 )
6842 result, result_detail = await self._ns_execute_primitive(
6843 ee_id,
6844 primitive_name,
6845 primitive_params,
6846 vca_type=vca_type,
6847 vca_id=vca_id,
6848 )
6849 self.logger.debug(
6850 logging_text
6851 + "vnf_config_primitive={} Done with result {} {}".format(
6852 vnf_config_primitive, result, result_detail
6853 )
6854 )
6855 # Update operationState = COMPLETED | FAILED
6856 self._update_suboperation_status(
6857 db_nslcmop, op_index, result, result_detail
6858 )
6859
6860 if result == "FAILED":
6861 raise LcmException(result_detail)
6862 db_nsr_update["config-status"] = old_config_status
6863 scale_process = None
6864 # PRE-SCALE END
6865
6866 db_nsr_update[
6867 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6868 ] = nb_scale_op
6869 db_nsr_update[
6870 "_admin.scaling-group.{}.time".format(admin_scale_index)
6871 ] = time()
6872
6873 # SCALE-IN VCA - BEGIN
6874 if vca_scaling_info:
6875 step = db_nslcmop_update[
6876 "detailed-status"
6877 ] = "Deleting the execution environments"
6878 scale_process = "VCA"
6879 for vca_info in vca_scaling_info:
6880 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6881 member_vnf_index = str(vca_info["member-vnf-index"])
6882 self.logger.debug(
6883 logging_text + "vdu info: {}".format(vca_info)
6884 )
6885 if vca_info.get("osm_vdu_id"):
6886 vdu_id = vca_info["osm_vdu_id"]
6887 vdu_index = int(vca_info["vdu_index"])
6888 stage[
6889 1
6890 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6891 member_vnf_index, vdu_id, vdu_index
6892 )
6893 stage[2] = step = "Scaling in VCA"
6894 self._write_op_status(op_id=nslcmop_id, stage=stage)
6895 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6896 config_update = db_nsr["configurationStatus"]
6897 for vca_index, vca in enumerate(vca_update):
6898 if (
6899 (vca or vca.get("ee_id"))
6900 and vca["member-vnf-index"] == member_vnf_index
6901 and vca["vdu_count_index"] == vdu_index
6902 ):
6903 if vca.get("vdu_id"):
6904 config_descriptor = get_configuration(
6905 db_vnfd, vca.get("vdu_id")
6906 )
6907 elif vca.get("kdu_name"):
6908 config_descriptor = get_configuration(
6909 db_vnfd, vca.get("kdu_name")
6910 )
6911 else:
6912 config_descriptor = get_configuration(
6913 db_vnfd, db_vnfd["id"]
6914 )
6915 operation_params = (
6916 db_nslcmop.get("operationParams") or {}
6917 )
6918 exec_terminate_primitives = not operation_params.get(
6919 "skip_terminate_primitives"
6920 ) and vca.get("needed_terminate")
6921 task = asyncio.ensure_future(
6922 asyncio.wait_for(
6923 self.destroy_N2VC(
6924 logging_text,
6925 db_nslcmop,
6926 vca,
6927 config_descriptor,
6928 vca_index,
6929 destroy_ee=True,
6930 exec_primitives=exec_terminate_primitives,
6931 scaling_in=True,
6932 vca_id=vca_id,
6933 ),
6934 timeout=self.timeout.charm_delete,
6935 )
6936 )
6937 tasks_dict_info[task] = "Terminating VCA {}".format(
6938 vca.get("ee_id")
6939 )
6940 del vca_update[vca_index]
6941 del config_update[vca_index]
6942 # wait for pending tasks of terminate primitives
6943 if tasks_dict_info:
6944 self.logger.debug(
6945 logging_text
6946 + "Waiting for tasks {}".format(
6947 list(tasks_dict_info.keys())
6948 )
6949 )
6950 error_list = await self._wait_for_tasks(
6951 logging_text,
6952 tasks_dict_info,
6953 min(
6954 self.timeout.charm_delete, self.timeout.ns_terminate
6955 ),
6956 stage,
6957 nslcmop_id,
6958 )
6959 tasks_dict_info.clear()
6960 if error_list:
6961 raise LcmException("; ".join(error_list))
6962
6963 db_vca_and_config_update = {
6964 "_admin.deployed.VCA": vca_update,
6965 "configurationStatus": config_update,
6966 }
6967 self.update_db_2(
6968 "nsrs", db_nsr["_id"], db_vca_and_config_update
6969 )
6970 scale_process = None
6971 # SCALE-IN VCA - END
6972
6973 # SCALE RO - BEGIN
6974 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6975 scale_process = "RO"
6976 if self.ro_config.ng:
6977 await self._scale_ng_ro(
6978 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6979 )
6980 scaling_info.pop("vdu-create", None)
6981 scaling_info.pop("vdu-delete", None)
6982
6983 scale_process = None
6984 # SCALE RO - END
6985
6986 # SCALE KDU - BEGIN
6987 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6988 scale_process = "KDU"
6989 await self._scale_kdu(
6990 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6991 )
6992 scaling_info.pop("kdu-create", None)
6993 scaling_info.pop("kdu-delete", None)
6994
6995 scale_process = None
6996 # SCALE KDU - END
6997
6998 if db_nsr_update:
6999 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7000
7001 # SCALE-UP VCA - BEGIN
7002 if vca_scaling_info:
7003 step = db_nslcmop_update[
7004 "detailed-status"
7005 ] = "Creating new execution environments"
7006 scale_process = "VCA"
7007 for vca_info in vca_scaling_info:
7008 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7009 member_vnf_index = str(vca_info["member-vnf-index"])
7010 self.logger.debug(
7011 logging_text + "vdu info: {}".format(vca_info)
7012 )
7013 vnfd_id = db_vnfr["vnfd-ref"]
7014 if vca_info.get("osm_vdu_id"):
7015 vdu_index = int(vca_info["vdu_index"])
7016 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7017 if db_vnfr.get("additionalParamsForVnf"):
7018 deploy_params.update(
7019 parse_yaml_strings(
7020 db_vnfr["additionalParamsForVnf"].copy()
7021 )
7022 )
7023 descriptor_config = get_configuration(
7024 db_vnfd, db_vnfd["id"]
7025 )
7026 if descriptor_config:
7027 vdu_id = None
7028 vdu_name = None
7029 kdu_name = None
7030 kdu_index = None
7031 self._deploy_n2vc(
7032 logging_text=logging_text
7033 + "member_vnf_index={} ".format(member_vnf_index),
7034 db_nsr=db_nsr,
7035 db_vnfr=db_vnfr,
7036 nslcmop_id=nslcmop_id,
7037 nsr_id=nsr_id,
7038 nsi_id=nsi_id,
7039 vnfd_id=vnfd_id,
7040 vdu_id=vdu_id,
7041 kdu_name=kdu_name,
7042 kdu_index=kdu_index,
7043 member_vnf_index=member_vnf_index,
7044 vdu_index=vdu_index,
7045 vdu_name=vdu_name,
7046 deploy_params=deploy_params,
7047 descriptor_config=descriptor_config,
7048 base_folder=base_folder,
7049 task_instantiation_info=tasks_dict_info,
7050 stage=stage,
7051 )
7052 vdu_id = vca_info["osm_vdu_id"]
7053 vdur = find_in_list(
7054 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7055 )
7056 descriptor_config = get_configuration(db_vnfd, vdu_id)
7057 if vdur.get("additionalParams"):
7058 deploy_params_vdu = parse_yaml_strings(
7059 vdur["additionalParams"]
7060 )
7061 else:
7062 deploy_params_vdu = deploy_params
7063 deploy_params_vdu["OSM"] = get_osm_params(
7064 db_vnfr, vdu_id, vdu_count_index=vdu_index
7065 )
7066 if descriptor_config:
7067 vdu_name = None
7068 kdu_name = None
7069 kdu_index = None
7070 stage[
7071 1
7072 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7073 member_vnf_index, vdu_id, vdu_index
7074 )
7075 stage[2] = step = "Scaling out VCA"
7076 self._write_op_status(op_id=nslcmop_id, stage=stage)
7077 self._deploy_n2vc(
7078 logging_text=logging_text
7079 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7080 member_vnf_index, vdu_id, vdu_index
7081 ),
7082 db_nsr=db_nsr,
7083 db_vnfr=db_vnfr,
7084 nslcmop_id=nslcmop_id,
7085 nsr_id=nsr_id,
7086 nsi_id=nsi_id,
7087 vnfd_id=vnfd_id,
7088 vdu_id=vdu_id,
7089 kdu_name=kdu_name,
7090 member_vnf_index=member_vnf_index,
7091 vdu_index=vdu_index,
7092 kdu_index=kdu_index,
7093 vdu_name=vdu_name,
7094 deploy_params=deploy_params_vdu,
7095 descriptor_config=descriptor_config,
7096 base_folder=base_folder,
7097 task_instantiation_info=tasks_dict_info,
7098 stage=stage,
7099 )
7100 # SCALE-UP VCA - END
7101 scale_process = None
7102
7103 # POST-SCALE BEGIN
7104 # execute primitive service POST-SCALING
7105 step = "Executing post-scale vnf-config-primitive"
7106 if scaling_descriptor.get("scaling-config-action"):
7107 for scaling_config_action in scaling_descriptor[
7108 "scaling-config-action"
7109 ]:
7110 if (
7111 scaling_config_action.get("trigger") == "post-scale-in"
7112 and scaling_type == "SCALE_IN"
7113 ) or (
7114 scaling_config_action.get("trigger") == "post-scale-out"
7115 and scaling_type == "SCALE_OUT"
7116 ):
7117 vnf_config_primitive = scaling_config_action[
7118 "vnf-config-primitive-name-ref"
7119 ]
7120 step = db_nslcmop_update[
7121 "detailed-status"
7122 ] = "executing post-scale scaling-config-action '{}'".format(
7123 vnf_config_primitive
7124 )
7125
7126 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7127 if db_vnfr.get("additionalParamsForVnf"):
7128 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7129
7130 # look for primitive
7131 for config_primitive in (
7132 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7133 ).get("config-primitive", ()):
7134 if config_primitive["name"] == vnf_config_primitive:
7135 break
7136 else:
7137 raise LcmException(
7138 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7139 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7140 "config-primitive".format(
7141 scaling_group, vnf_config_primitive
7142 )
7143 )
7144 scale_process = "VCA"
7145 db_nsr_update["config-status"] = "configuring post-scaling"
7146 primitive_params = self._map_primitive_params(
7147 config_primitive, {}, vnfr_params
7148 )
7149
7150 # Post-scale retry check: Check if this sub-operation has been executed before
7151 op_index = self._check_or_add_scale_suboperation(
7152 db_nslcmop,
7153 vnf_index,
7154 vnf_config_primitive,
7155 primitive_params,
7156 "POST-SCALE",
7157 )
7158 if op_index == self.SUBOPERATION_STATUS_SKIP:
7159 # Skip sub-operation
7160 result = "COMPLETED"
7161 result_detail = "Done"
7162 self.logger.debug(
7163 logging_text
7164 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7165 vnf_config_primitive, result, result_detail
7166 )
7167 )
7168 else:
7169 if op_index == self.SUBOPERATION_STATUS_NEW:
7170 # New sub-operation: Get index of this sub-operation
7171 op_index = (
7172 len(db_nslcmop.get("_admin", {}).get("operations"))
7173 - 1
7174 )
7175 self.logger.debug(
7176 logging_text
7177 + "vnf_config_primitive={} New sub-operation".format(
7178 vnf_config_primitive
7179 )
7180 )
7181 else:
7182 # retry: Get registered params for this existing sub-operation
7183 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7184 op_index
7185 ]
7186 vnf_index = op.get("member_vnf_index")
7187 vnf_config_primitive = op.get("primitive")
7188 primitive_params = op.get("primitive_params")
7189 self.logger.debug(
7190 logging_text
7191 + "vnf_config_primitive={} Sub-operation retry".format(
7192 vnf_config_primitive
7193 )
7194 )
7195 # Execute the primitive, either with new (first-time) or registered (reintent) args
7196 ee_descriptor_id = config_primitive.get(
7197 "execution-environment-ref"
7198 )
7199 primitive_name = config_primitive.get(
7200 "execution-environment-primitive", vnf_config_primitive
7201 )
7202 ee_id, vca_type = self._look_for_deployed_vca(
7203 nsr_deployed["VCA"],
7204 member_vnf_index=vnf_index,
7205 vdu_id=None,
7206 vdu_count_index=None,
7207 ee_descriptor_id=ee_descriptor_id,
7208 )
7209 result, result_detail = await self._ns_execute_primitive(
7210 ee_id,
7211 primitive_name,
7212 primitive_params,
7213 vca_type=vca_type,
7214 vca_id=vca_id,
7215 )
7216 self.logger.debug(
7217 logging_text
7218 + "vnf_config_primitive={} Done with result {} {}".format(
7219 vnf_config_primitive, result, result_detail
7220 )
7221 )
7222 # Update operationState = COMPLETED | FAILED
7223 self._update_suboperation_status(
7224 db_nslcmop, op_index, result, result_detail
7225 )
7226
7227 if result == "FAILED":
7228 raise LcmException(result_detail)
7229 db_nsr_update["config-status"] = old_config_status
7230 scale_process = None
7231 # POST-SCALE END
7232
7233 db_nsr_update[
7234 "detailed-status"
7235 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7236 db_nsr_update["operational-status"] = (
7237 "running"
7238 if old_operational_status == "failed"
7239 else old_operational_status
7240 )
7241 db_nsr_update["config-status"] = old_config_status
7242 return
7243 except (
7244 ROclient.ROClientException,
7245 DbException,
7246 LcmException,
7247 NgRoException,
7248 ) as e:
7249 self.logger.error(logging_text + "Exit Exception {}".format(e))
7250 exc = e
7251 except asyncio.CancelledError:
7252 self.logger.error(
7253 logging_text + "Cancelled Exception while '{}'".format(step)
7254 )
7255 exc = "Operation was cancelled"
7256 except Exception as e:
7257 exc = traceback.format_exc()
7258 self.logger.critical(
7259 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7260 exc_info=True,
7261 )
7262 finally:
7263 self._write_ns_status(
7264 nsr_id=nsr_id,
7265 ns_state=None,
7266 current_operation="IDLE",
7267 current_operation_id=None,
7268 )
7269 if tasks_dict_info:
7270 stage[1] = "Waiting for instantiate pending tasks."
7271 self.logger.debug(logging_text + stage[1])
7272 exc = await self._wait_for_tasks(
7273 logging_text,
7274 tasks_dict_info,
7275 self.timeout.ns_deploy,
7276 stage,
7277 nslcmop_id,
7278 nsr_id=nsr_id,
7279 )
7280 if exc:
7281 db_nslcmop_update[
7282 "detailed-status"
7283 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7284 nslcmop_operation_state = "FAILED"
7285 if db_nsr:
7286 db_nsr_update["operational-status"] = old_operational_status
7287 db_nsr_update["config-status"] = old_config_status
7288 db_nsr_update["detailed-status"] = ""
7289 if scale_process:
7290 if "VCA" in scale_process:
7291 db_nsr_update["config-status"] = "failed"
7292 if "RO" in scale_process:
7293 db_nsr_update["operational-status"] = "failed"
7294 db_nsr_update[
7295 "detailed-status"
7296 ] = "FAILED scaling nslcmop={} {}: {}".format(
7297 nslcmop_id, step, exc
7298 )
7299 else:
7300 error_description_nslcmop = None
7301 nslcmop_operation_state = "COMPLETED"
7302 db_nslcmop_update["detailed-status"] = "Done"
7303
7304 self._write_op_status(
7305 op_id=nslcmop_id,
7306 stage="",
7307 error_message=error_description_nslcmop,
7308 operation_state=nslcmop_operation_state,
7309 other_update=db_nslcmop_update,
7310 )
7311 if db_nsr:
7312 self._write_ns_status(
7313 nsr_id=nsr_id,
7314 ns_state=None,
7315 current_operation="IDLE",
7316 current_operation_id=None,
7317 other_update=db_nsr_update,
7318 )
7319
7320 if nslcmop_operation_state:
7321 try:
7322 msg = {
7323 "nsr_id": nsr_id,
7324 "nslcmop_id": nslcmop_id,
7325 "operationState": nslcmop_operation_state,
7326 }
7327 await self.msg.aiowrite("ns", "scaled", msg)
7328 except Exception as e:
7329 self.logger.error(
7330 logging_text + "kafka_write notification Exception {}".format(e)
7331 )
7332 self.logger.debug(logging_text + "Exit")
7333 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7334
7335 async def _scale_kdu(
7336 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7337 ):
7338 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7339 for kdu_name in _scaling_info:
7340 for kdu_scaling_info in _scaling_info[kdu_name]:
7341 deployed_kdu, index = get_deployed_kdu(
7342 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7343 )
7344 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7345 kdu_instance = deployed_kdu["kdu-instance"]
7346 kdu_model = deployed_kdu.get("kdu-model")
7347 scale = int(kdu_scaling_info["scale"])
7348 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7349
7350 db_dict = {
7351 "collection": "nsrs",
7352 "filter": {"_id": nsr_id},
7353 "path": "_admin.deployed.K8s.{}".format(index),
7354 }
7355
7356 step = "scaling application {}".format(
7357 kdu_scaling_info["resource-name"]
7358 )
7359 self.logger.debug(logging_text + step)
7360
7361 if kdu_scaling_info["type"] == "delete":
7362 kdu_config = get_configuration(db_vnfd, kdu_name)
7363 if (
7364 kdu_config
7365 and kdu_config.get("terminate-config-primitive")
7366 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7367 ):
7368 terminate_config_primitive_list = kdu_config.get(
7369 "terminate-config-primitive"
7370 )
7371 terminate_config_primitive_list.sort(
7372 key=lambda val: int(val["seq"])
7373 )
7374
7375 for (
7376 terminate_config_primitive
7377 ) in terminate_config_primitive_list:
7378 primitive_params_ = self._map_primitive_params(
7379 terminate_config_primitive, {}, {}
7380 )
7381 step = "execute terminate config primitive"
7382 self.logger.debug(logging_text + step)
7383 await asyncio.wait_for(
7384 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7385 cluster_uuid=cluster_uuid,
7386 kdu_instance=kdu_instance,
7387 primitive_name=terminate_config_primitive["name"],
7388 params=primitive_params_,
7389 db_dict=db_dict,
7390 total_timeout=self.timeout.primitive,
7391 vca_id=vca_id,
7392 ),
7393 timeout=self.timeout.primitive
7394 * self.timeout.primitive_outer_factor,
7395 )
7396
7397 await asyncio.wait_for(
7398 self.k8scluster_map[k8s_cluster_type].scale(
7399 kdu_instance=kdu_instance,
7400 scale=scale,
7401 resource_name=kdu_scaling_info["resource-name"],
7402 total_timeout=self.timeout.scale_on_error,
7403 vca_id=vca_id,
7404 cluster_uuid=cluster_uuid,
7405 kdu_model=kdu_model,
7406 atomic=True,
7407 db_dict=db_dict,
7408 ),
7409 timeout=self.timeout.scale_on_error
7410 * self.timeout.scale_on_error_outer_factor,
7411 )
7412
7413 if kdu_scaling_info["type"] == "create":
7414 kdu_config = get_configuration(db_vnfd, kdu_name)
7415 if (
7416 kdu_config
7417 and kdu_config.get("initial-config-primitive")
7418 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7419 ):
7420 initial_config_primitive_list = kdu_config.get(
7421 "initial-config-primitive"
7422 )
7423 initial_config_primitive_list.sort(
7424 key=lambda val: int(val["seq"])
7425 )
7426
7427 for initial_config_primitive in initial_config_primitive_list:
7428 primitive_params_ = self._map_primitive_params(
7429 initial_config_primitive, {}, {}
7430 )
7431 step = "execute initial config primitive"
7432 self.logger.debug(logging_text + step)
7433 await asyncio.wait_for(
7434 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7435 cluster_uuid=cluster_uuid,
7436 kdu_instance=kdu_instance,
7437 primitive_name=initial_config_primitive["name"],
7438 params=primitive_params_,
7439 db_dict=db_dict,
7440 vca_id=vca_id,
7441 ),
7442 timeout=600,
7443 )
7444
7445 async def _scale_ng_ro(
7446 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7447 ):
7448 nsr_id = db_nslcmop["nsInstanceId"]
7449 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7450 db_vnfrs = {}
7451
7452 # read from db: vnfd's for every vnf
7453 db_vnfds = []
7454
7455 # for each vnf in ns, read vnfd
7456 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7457 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7458 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7459 # if we haven't this vnfd, read it from db
7460 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7461 # read from db
7462 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7463 db_vnfds.append(vnfd)
7464 n2vc_key = self.n2vc.get_public_key()
7465 n2vc_key_list = [n2vc_key]
7466 self.scale_vnfr(
7467 db_vnfr,
7468 vdu_scaling_info.get("vdu-create"),
7469 vdu_scaling_info.get("vdu-delete"),
7470 mark_delete=True,
7471 )
7472 # db_vnfr has been updated, update db_vnfrs to use it
7473 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7474 await self._instantiate_ng_ro(
7475 logging_text,
7476 nsr_id,
7477 db_nsd,
7478 db_nsr,
7479 db_nslcmop,
7480 db_vnfrs,
7481 db_vnfds,
7482 n2vc_key_list,
7483 stage=stage,
7484 start_deploy=time(),
7485 timeout_ns_deploy=self.timeout.ns_deploy,
7486 )
7487 if vdu_scaling_info.get("vdu-delete"):
7488 self.scale_vnfr(
7489 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7490 )
7491
7492 async def extract_prometheus_scrape_jobs(
7493 self,
7494 ee_id: str,
7495 artifact_path: str,
7496 ee_config_descriptor: dict,
7497 vnfr_id: str,
7498 nsr_id: str,
7499 target_ip: str,
7500 element_type: str,
7501 vnf_member_index: str = "",
7502 vdu_id: str = "",
7503 vdu_index: int = None,
7504 kdu_name: str = "",
7505 kdu_index: int = None,
7506 ) -> dict:
7507 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7508 This method will wait until the corresponding VDU or KDU is fully instantiated
7509
7510 Args:
7511 ee_id (str): Execution Environment ID
7512 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7513 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7514 vnfr_id (str): VNFR ID where this EE applies
7515 nsr_id (str): NSR ID where this EE applies
7516 target_ip (str): VDU/KDU instance IP address
7517 element_type (str): NS or VNF or VDU or KDU
7518 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7519 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7520 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7521 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7522 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7523
7524 Raises:
7525 LcmException: When the VDU or KDU instance was not found in an hour
7526
7527 Returns:
7528 _type_: Prometheus jobs
7529 """
7530 # default the vdur and kdur names to an empty string, to avoid any later
7531 # problem with Prometheus when the element type is not VDU or KDU
7532 vdur_name = ""
7533 kdur_name = ""
7534
7535 # look if exist a file called 'prometheus*.j2' and
7536 artifact_content = self.fs.dir_ls(artifact_path)
7537 job_file = next(
7538 (
7539 f
7540 for f in artifact_content
7541 if f.startswith("prometheus") and f.endswith(".j2")
7542 ),
7543 None,
7544 )
7545 if not job_file:
7546 return
7547 self.logger.debug("Artifact path{}".format(artifact_path))
7548 self.logger.debug("job file{}".format(job_file))
7549 with self.fs.file_open((artifact_path, job_file), "r") as f:
7550 job_data = f.read()
7551
7552 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7553 if element_type in ("VDU", "KDU"):
7554 for _ in range(360):
7555 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7556 if vdu_id and vdu_index is not None:
7557 vdur = next(
7558 (
7559 x
7560 for x in get_iterable(db_vnfr, "vdur")
7561 if (
7562 x.get("vdu-id-ref") == vdu_id
7563 and x.get("count-index") == vdu_index
7564 )
7565 ),
7566 {},
7567 )
7568 if vdur.get("name"):
7569 vdur_name = vdur.get("name")
7570 break
7571 if kdu_name and kdu_index is not None:
7572 kdur = next(
7573 (
7574 x
7575 for x in get_iterable(db_vnfr, "kdur")
7576 if (
7577 x.get("kdu-name") == kdu_name
7578 and x.get("count-index") == kdu_index
7579 )
7580 ),
7581 {},
7582 )
7583 if kdur.get("name"):
7584 kdur_name = kdur.get("name")
7585 break
7586
7587 await asyncio.sleep(10)
7588 else:
7589 if vdu_id and vdu_index is not None:
7590 raise LcmException(
7591 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7592 )
7593 if kdu_name and kdu_index is not None:
7594 raise LcmException(
7595 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7596 )
7597
7598 # TODO get_service
7599 if ee_id is not None:
7600 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7601 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7602 host_port = "80"
7603 vnfr_id = vnfr_id.replace("-", "")
7604 variables = {
7605 "JOB_NAME": vnfr_id,
7606 "TARGET_IP": target_ip,
7607 "EXPORTER_POD_IP": host_name,
7608 "EXPORTER_POD_PORT": host_port,
7609 "NSR_ID": nsr_id,
7610 "VNF_MEMBER_INDEX": vnf_member_index,
7611 "VDUR_NAME": vdur_name,
7612 "KDUR_NAME": kdur_name,
7613 "ELEMENT_TYPE": element_type,
7614 }
7615 else:
7616 metric_path = ee_config_descriptor["metric-path"]
7617 target_port = ee_config_descriptor["metric-port"]
7618 vnfr_id = vnfr_id.replace("-", "")
7619 variables = {
7620 "JOB_NAME": vnfr_id,
7621 "TARGET_IP": target_ip,
7622 "TARGET_PORT": target_port,
7623 "METRIC_PATH": metric_path,
7624 }
7625
7626 job_list = parse_job(job_data, variables)
7627 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7628 for job in job_list:
7629 if (
7630 not isinstance(job.get("job_name"), str)
7631 or vnfr_id not in job["job_name"]
7632 ):
7633 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7634 job["nsr_id"] = nsr_id
7635 job["vnfr_id"] = vnfr_id
7636 return job_list
7637
7638 async def rebuild_start_stop(
7639 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7640 ):
7641 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7642 self.logger.info(logging_text + "Enter")
7643 stage = ["Preparing the environment", ""]
7644 # database nsrs record
7645 db_nsr_update = {}
7646 vdu_vim_name = None
7647 vim_vm_id = None
7648 # in case of error, indicates what part of scale was failed to put nsr at error status
7649 start_deploy = time()
7650 try:
7651 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7652 vim_account_id = db_vnfr.get("vim-account-id")
7653 vim_info_key = "vim:" + vim_account_id
7654 vdu_id = additional_param["vdu_id"]
7655 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7656 vdur = find_in_list(
7657 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7658 )
7659 if vdur:
7660 vdu_vim_name = vdur["name"]
7661 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7662 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7663 else:
7664 raise LcmException("Target vdu is not found")
7665 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7666 # wait for any previous tasks in process
7667 stage[1] = "Waiting for previous operations to terminate"
7668 self.logger.info(stage[1])
7669 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7670
7671 stage[1] = "Reading from database."
7672 self.logger.info(stage[1])
7673 self._write_ns_status(
7674 nsr_id=nsr_id,
7675 ns_state=None,
7676 current_operation=operation_type.upper(),
7677 current_operation_id=nslcmop_id,
7678 )
7679 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7680
7681 # read from db: ns
7682 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7683 db_nsr_update["operational-status"] = operation_type
7684 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7685 # Payload for RO
7686 desc = {
7687 operation_type: {
7688 "vim_vm_id": vim_vm_id,
7689 "vnf_id": vnf_id,
7690 "vdu_index": additional_param["count-index"],
7691 "vdu_id": vdur["id"],
7692 "target_vim": target_vim,
7693 "vim_account_id": vim_account_id,
7694 }
7695 }
7696 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7697 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7698 self.logger.info("ro nsr id: {}".format(nsr_id))
7699 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7700 self.logger.info("response from RO: {}".format(result_dict))
7701 action_id = result_dict["action_id"]
7702 await self._wait_ng_ro(
7703 nsr_id,
7704 action_id,
7705 nslcmop_id,
7706 start_deploy,
7707 self.timeout.operate,
7708 None,
7709 "start_stop_rebuild",
7710 )
7711 return "COMPLETED", "Done"
7712 except (ROclient.ROClientException, DbException, LcmException) as e:
7713 self.logger.error("Exit Exception {}".format(e))
7714 exc = e
7715 except asyncio.CancelledError:
7716 self.logger.error("Cancelled Exception while '{}'".format(stage))
7717 exc = "Operation was cancelled"
7718 except Exception as e:
7719 exc = traceback.format_exc()
7720 self.logger.critical(
7721 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7722 )
7723 return "FAILED", "Error in operate VNF {}".format(exc)
7724
7725 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7726 """
7727 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7728
7729 :param: vim_account_id: VIM Account ID
7730
7731 :return: (cloud_name, cloud_credential)
7732 """
7733 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7734 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7735
7736 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7737 """
7738 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7739
7740 :param: vim_account_id: VIM Account ID
7741
7742 :return: (cloud_name, cloud_credential)
7743 """
7744 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7745 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7746
7747 async def migrate(self, nsr_id, nslcmop_id):
7748 """
7749 Migrate VNFs and VDUs instances in a NS
7750
7751 :param: nsr_id: NS Instance ID
7752 :param: nslcmop_id: nslcmop ID of migrate
7753
7754 """
7755 # Try to lock HA task here
7756 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7757 if not task_is_locked_by_me:
7758 return
7759 logging_text = "Task ns={} migrate ".format(nsr_id)
7760 self.logger.debug(logging_text + "Enter")
7761 # get all needed from database
7762 db_nslcmop = None
7763 db_nslcmop_update = {}
7764 nslcmop_operation_state = None
7765 db_nsr_update = {}
7766 target = {}
7767 exc = None
7768 # in case of error, indicates what part of scale was failed to put nsr at error status
7769 start_deploy = time()
7770
7771 try:
7772 # wait for any previous tasks in process
7773 step = "Waiting for previous operations to terminate"
7774 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7775
7776 self._write_ns_status(
7777 nsr_id=nsr_id,
7778 ns_state=None,
7779 current_operation="MIGRATING",
7780 current_operation_id=nslcmop_id,
7781 )
7782 step = "Getting nslcmop from database"
7783 self.logger.debug(
7784 step + " after having waited for previous tasks to be completed"
7785 )
7786 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7787 migrate_params = db_nslcmop.get("operationParams")
7788
7789 target = {}
7790 target.update(migrate_params)
7791 desc = await self.RO.migrate(nsr_id, target)
7792 self.logger.debug("RO return > {}".format(desc))
7793 action_id = desc["action_id"]
7794 await self._wait_ng_ro(
7795 nsr_id,
7796 action_id,
7797 nslcmop_id,
7798 start_deploy,
7799 self.timeout.migrate,
7800 operation="migrate",
7801 )
7802 except (ROclient.ROClientException, DbException, LcmException) as e:
7803 self.logger.error("Exit Exception {}".format(e))
7804 exc = e
7805 except asyncio.CancelledError:
7806 self.logger.error("Cancelled Exception while '{}'".format(step))
7807 exc = "Operation was cancelled"
7808 except Exception as e:
7809 exc = traceback.format_exc()
7810 self.logger.critical(
7811 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7812 )
7813 finally:
7814 self._write_ns_status(
7815 nsr_id=nsr_id,
7816 ns_state=None,
7817 current_operation="IDLE",
7818 current_operation_id=None,
7819 )
7820 if exc:
7821 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7822 nslcmop_operation_state = "FAILED"
7823 else:
7824 nslcmop_operation_state = "COMPLETED"
7825 db_nslcmop_update["detailed-status"] = "Done"
7826 db_nsr_update["detailed-status"] = "Done"
7827
7828 self._write_op_status(
7829 op_id=nslcmop_id,
7830 stage="",
7831 error_message="",
7832 operation_state=nslcmop_operation_state,
7833 other_update=db_nslcmop_update,
7834 )
7835 if nslcmop_operation_state:
7836 try:
7837 msg = {
7838 "nsr_id": nsr_id,
7839 "nslcmop_id": nslcmop_id,
7840 "operationState": nslcmop_operation_state,
7841 }
7842 await self.msg.aiowrite("ns", "migrated", msg)
7843 except Exception as e:
7844 self.logger.error(
7845 logging_text + "kafka_write notification Exception {}".format(e)
7846 )
7847 self.logger.debug(logging_text + "Exit")
7848 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7849
7850 async def heal(self, nsr_id, nslcmop_id):
7851 """
7852 Heal NS
7853
7854 :param nsr_id: ns instance to heal
7855 :param nslcmop_id: operation to run
7856 :return:
7857 """
7858
7859 # Try to lock HA task here
7860 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7861 if not task_is_locked_by_me:
7862 return
7863
7864 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7865 stage = ["", "", ""]
7866 tasks_dict_info = {}
7867 # ^ stage, step, VIM progress
7868 self.logger.debug(logging_text + "Enter")
7869 # get all needed from database
7870 db_nsr = None
7871 db_nslcmop_update = {}
7872 db_nsr_update = {}
7873 db_vnfrs = {} # vnf's info indexed by _id
7874 exc = None
7875 old_operational_status = ""
7876 old_config_status = ""
7877 nsi_id = None
7878 try:
7879 # wait for any previous tasks in process
7880 step = "Waiting for previous operations to terminate"
7881 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7882 self._write_ns_status(
7883 nsr_id=nsr_id,
7884 ns_state=None,
7885 current_operation="HEALING",
7886 current_operation_id=nslcmop_id,
7887 )
7888
7889 step = "Getting nslcmop from database"
7890 self.logger.debug(
7891 step + " after having waited for previous tasks to be completed"
7892 )
7893 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7894
7895 step = "Getting nsr from database"
7896 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7897 old_operational_status = db_nsr["operational-status"]
7898 old_config_status = db_nsr["config-status"]
7899
7900 db_nsr_update = {
7901 "_admin.deployed.RO.operational-status": "healing",
7902 }
7903 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7904
7905 step = "Sending heal order to VIM"
7906 await self.heal_RO(
7907 logging_text=logging_text,
7908 nsr_id=nsr_id,
7909 db_nslcmop=db_nslcmop,
7910 stage=stage,
7911 )
7912 # VCA tasks
7913 # read from db: nsd
7914 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7915 self.logger.debug(logging_text + stage[1])
7916 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7917 self.fs.sync(db_nsr["nsd-id"])
7918 db_nsr["nsd"] = nsd
7919 # read from db: vnfr's of this ns
7920 step = "Getting vnfrs from db"
7921 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7922 for vnfr in db_vnfrs_list:
7923 db_vnfrs[vnfr["_id"]] = vnfr
7924 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7925
7926 # Check for each target VNF
7927 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7928 for target_vnf in target_list:
7929 # Find this VNF in the list from DB
7930 vnfr_id = target_vnf.get("vnfInstanceId", None)
7931 if vnfr_id:
7932 db_vnfr = db_vnfrs[vnfr_id]
7933 vnfd_id = db_vnfr.get("vnfd-id")
7934 vnfd_ref = db_vnfr.get("vnfd-ref")
7935 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7936 base_folder = vnfd["_admin"]["storage"]
7937 vdu_id = None
7938 vdu_index = 0
7939 vdu_name = None
7940 kdu_name = None
7941 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7942 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7943
7944 # Check each target VDU and deploy N2VC
7945 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7946 "vdu", []
7947 )
7948 if not target_vdu_list:
7949 # Codigo nuevo para crear diccionario
7950 target_vdu_list = []
7951 for existing_vdu in db_vnfr.get("vdur"):
7952 vdu_name = existing_vdu.get("vdu-name", None)
7953 vdu_index = existing_vdu.get("count-index", 0)
7954 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7955 "run-day1", False
7956 )
7957 vdu_to_be_healed = {
7958 "vdu-id": vdu_name,
7959 "count-index": vdu_index,
7960 "run-day1": vdu_run_day1,
7961 }
7962 target_vdu_list.append(vdu_to_be_healed)
7963 for target_vdu in target_vdu_list:
7964 deploy_params_vdu = target_vdu
7965 # Set run-day1 vnf level value if not vdu level value exists
7966 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7967 "additionalParams", {}
7968 ).get("run-day1"):
7969 deploy_params_vdu["run-day1"] = target_vnf[
7970 "additionalParams"
7971 ].get("run-day1")
7972 vdu_name = target_vdu.get("vdu-id", None)
7973 # TODO: Get vdu_id from vdud.
7974 vdu_id = vdu_name
7975 # For multi instance VDU count-index is mandatory
7976 # For single session VDU count-indes is 0
7977 vdu_index = target_vdu.get("count-index", 0)
7978
7979 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7980 stage[1] = "Deploying Execution Environments."
7981 self.logger.debug(logging_text + stage[1])
7982
7983 # VNF Level charm. Normal case when proxy charms.
7984 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7985 descriptor_config = get_configuration(vnfd, vnfd_ref)
7986 if descriptor_config:
7987 # Continue if healed machine is management machine
7988 vnf_ip_address = db_vnfr.get("ip-address")
7989 target_instance = None
7990 for instance in db_vnfr.get("vdur", None):
7991 if (
7992 instance["vdu-name"] == vdu_name
7993 and instance["count-index"] == vdu_index
7994 ):
7995 target_instance = instance
7996 break
7997 if vnf_ip_address == target_instance.get("ip-address"):
7998 self._heal_n2vc(
7999 logging_text=logging_text
8000 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8001 member_vnf_index, vdu_name, vdu_index
8002 ),
8003 db_nsr=db_nsr,
8004 db_vnfr=db_vnfr,
8005 nslcmop_id=nslcmop_id,
8006 nsr_id=nsr_id,
8007 nsi_id=nsi_id,
8008 vnfd_id=vnfd_ref,
8009 vdu_id=None,
8010 kdu_name=None,
8011 member_vnf_index=member_vnf_index,
8012 vdu_index=0,
8013 vdu_name=None,
8014 deploy_params=deploy_params_vdu,
8015 descriptor_config=descriptor_config,
8016 base_folder=base_folder,
8017 task_instantiation_info=tasks_dict_info,
8018 stage=stage,
8019 )
8020
8021 # VDU Level charm. Normal case with native charms.
8022 descriptor_config = get_configuration(vnfd, vdu_name)
8023 if descriptor_config:
8024 self._heal_n2vc(
8025 logging_text=logging_text
8026 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8027 member_vnf_index, vdu_name, vdu_index
8028 ),
8029 db_nsr=db_nsr,
8030 db_vnfr=db_vnfr,
8031 nslcmop_id=nslcmop_id,
8032 nsr_id=nsr_id,
8033 nsi_id=nsi_id,
8034 vnfd_id=vnfd_ref,
8035 vdu_id=vdu_id,
8036 kdu_name=kdu_name,
8037 member_vnf_index=member_vnf_index,
8038 vdu_index=vdu_index,
8039 vdu_name=vdu_name,
8040 deploy_params=deploy_params_vdu,
8041 descriptor_config=descriptor_config,
8042 base_folder=base_folder,
8043 task_instantiation_info=tasks_dict_info,
8044 stage=stage,
8045 )
8046
8047 except (
8048 ROclient.ROClientException,
8049 DbException,
8050 LcmException,
8051 NgRoException,
8052 ) as e:
8053 self.logger.error(logging_text + "Exit Exception {}".format(e))
8054 exc = e
8055 except asyncio.CancelledError:
8056 self.logger.error(
8057 logging_text + "Cancelled Exception while '{}'".format(step)
8058 )
8059 exc = "Operation was cancelled"
8060 except Exception as e:
8061 exc = traceback.format_exc()
8062 self.logger.critical(
8063 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8064 exc_info=True,
8065 )
8066 finally:
8067 if tasks_dict_info:
8068 stage[1] = "Waiting for healing pending tasks."
8069 self.logger.debug(logging_text + stage[1])
8070 exc = await self._wait_for_tasks(
8071 logging_text,
8072 tasks_dict_info,
8073 self.timeout.ns_deploy,
8074 stage,
8075 nslcmop_id,
8076 nsr_id=nsr_id,
8077 )
8078 if exc:
8079 db_nslcmop_update[
8080 "detailed-status"
8081 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8082 nslcmop_operation_state = "FAILED"
8083 if db_nsr:
8084 db_nsr_update["operational-status"] = old_operational_status
8085 db_nsr_update["config-status"] = old_config_status
8086 db_nsr_update[
8087 "detailed-status"
8088 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8089 for task, task_name in tasks_dict_info.items():
8090 if not task.done() or task.cancelled() or task.exception():
8091 if task_name.startswith(self.task_name_deploy_vca):
8092 # A N2VC task is pending
8093 db_nsr_update["config-status"] = "failed"
8094 else:
8095 # RO task is pending
8096 db_nsr_update["operational-status"] = "failed"
8097 else:
8098 error_description_nslcmop = None
8099 nslcmop_operation_state = "COMPLETED"
8100 db_nslcmop_update["detailed-status"] = "Done"
8101 db_nsr_update["detailed-status"] = "Done"
8102 db_nsr_update["operational-status"] = "running"
8103 db_nsr_update["config-status"] = "configured"
8104
8105 self._write_op_status(
8106 op_id=nslcmop_id,
8107 stage="",
8108 error_message=error_description_nslcmop,
8109 operation_state=nslcmop_operation_state,
8110 other_update=db_nslcmop_update,
8111 )
8112 if db_nsr:
8113 self._write_ns_status(
8114 nsr_id=nsr_id,
8115 ns_state=None,
8116 current_operation="IDLE",
8117 current_operation_id=None,
8118 other_update=db_nsr_update,
8119 )
8120
8121 if nslcmop_operation_state:
8122 try:
8123 msg = {
8124 "nsr_id": nsr_id,
8125 "nslcmop_id": nslcmop_id,
8126 "operationState": nslcmop_operation_state,
8127 }
8128 await self.msg.aiowrite("ns", "healed", msg)
8129 except Exception as e:
8130 self.logger.error(
8131 logging_text + "kafka_write notification Exception {}".format(e)
8132 )
8133 self.logger.debug(logging_text + "Exit")
8134 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8135
8136 async def heal_RO(
8137 self,
8138 logging_text,
8139 nsr_id,
8140 db_nslcmop,
8141 stage,
8142 ):
8143 """
8144 Heal at RO
8145 :param logging_text: preffix text to use at logging
8146 :param nsr_id: nsr identity
8147 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8148 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8149 :return: None or exception
8150 """
8151
8152 def get_vim_account(vim_account_id):
8153 nonlocal db_vims
8154 if vim_account_id in db_vims:
8155 return db_vims[vim_account_id]
8156 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8157 db_vims[vim_account_id] = db_vim
8158 return db_vim
8159
8160 try:
8161 start_heal = time()
8162 ns_params = db_nslcmop.get("operationParams")
8163 if ns_params and ns_params.get("timeout_ns_heal"):
8164 timeout_ns_heal = ns_params["timeout_ns_heal"]
8165 else:
8166 timeout_ns_heal = self.timeout.ns_heal
8167
8168 db_vims = {}
8169
8170 nslcmop_id = db_nslcmop["_id"]
8171 target = {
8172 "action_id": nslcmop_id,
8173 }
8174 self.logger.warning(
8175 "db_nslcmop={} and timeout_ns_heal={}".format(
8176 db_nslcmop, timeout_ns_heal
8177 )
8178 )
8179 target.update(db_nslcmop.get("operationParams", {}))
8180
8181 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8182 desc = await self.RO.recreate(nsr_id, target)
8183 self.logger.debug("RO return > {}".format(desc))
8184 action_id = desc["action_id"]
8185 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8186 await self._wait_ng_ro(
8187 nsr_id,
8188 action_id,
8189 nslcmop_id,
8190 start_heal,
8191 timeout_ns_heal,
8192 stage,
8193 operation="healing",
8194 )
8195
8196 # Updating NSR
8197 db_nsr_update = {
8198 "_admin.deployed.RO.operational-status": "running",
8199 "detailed-status": " ".join(stage),
8200 }
8201 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8202 self._write_op_status(nslcmop_id, stage)
8203 self.logger.debug(
8204 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8205 )
8206
8207 except Exception as e:
8208 stage[2] = "ERROR healing at VIM"
8209 # self.set_vnfr_at_error(db_vnfrs, str(e))
8210 self.logger.error(
8211 "Error healing at VIM {}".format(e),
8212 exc_info=not isinstance(
8213 e,
8214 (
8215 ROclient.ROClientException,
8216 LcmException,
8217 DbException,
8218 NgRoException,
8219 ),
8220 ),
8221 )
8222 raise
8223
8224 def _heal_n2vc(
8225 self,
8226 logging_text,
8227 db_nsr,
8228 db_vnfr,
8229 nslcmop_id,
8230 nsr_id,
8231 nsi_id,
8232 vnfd_id,
8233 vdu_id,
8234 kdu_name,
8235 member_vnf_index,
8236 vdu_index,
8237 vdu_name,
8238 deploy_params,
8239 descriptor_config,
8240 base_folder,
8241 task_instantiation_info,
8242 stage,
8243 ):
8244 # launch instantiate_N2VC in a asyncio task and register task object
8245 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8246 # if not found, create one entry and update database
8247 # fill db_nsr._admin.deployed.VCA.<index>
8248
8249 self.logger.debug(
8250 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8251 )
8252
8253 charm_name = ""
8254 get_charm_name = False
8255 if "execution-environment-list" in descriptor_config:
8256 ee_list = descriptor_config.get("execution-environment-list", [])
8257 elif "juju" in descriptor_config:
8258 ee_list = [descriptor_config] # ns charms
8259 if "execution-environment-list" not in descriptor_config:
8260 # charm name is only required for ns charms
8261 get_charm_name = True
8262 else: # other types as script are not supported
8263 ee_list = []
8264
8265 for ee_item in ee_list:
8266 self.logger.debug(
8267 logging_text
8268 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8269 ee_item.get("juju"), ee_item.get("helm-chart")
8270 )
8271 )
8272 ee_descriptor_id = ee_item.get("id")
8273 if ee_item.get("juju"):
8274 vca_name = ee_item["juju"].get("charm")
8275 if get_charm_name:
8276 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8277 vca_type = (
8278 "lxc_proxy_charm"
8279 if ee_item["juju"].get("charm") is not None
8280 else "native_charm"
8281 )
8282 if ee_item["juju"].get("cloud") == "k8s":
8283 vca_type = "k8s_proxy_charm"
8284 elif ee_item["juju"].get("proxy") is False:
8285 vca_type = "native_charm"
8286 elif ee_item.get("helm-chart"):
8287 vca_name = ee_item["helm-chart"]
8288 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8289 vca_type = "helm"
8290 else:
8291 vca_type = "helm-v3"
8292 else:
8293 self.logger.debug(
8294 logging_text + "skipping non juju neither charm configuration"
8295 )
8296 continue
8297
8298 vca_index = -1
8299 for vca_index, vca_deployed in enumerate(
8300 db_nsr["_admin"]["deployed"]["VCA"]
8301 ):
8302 if not vca_deployed:
8303 continue
8304 if (
8305 vca_deployed.get("member-vnf-index") == member_vnf_index
8306 and vca_deployed.get("vdu_id") == vdu_id
8307 and vca_deployed.get("kdu_name") == kdu_name
8308 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8309 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8310 ):
8311 break
8312 else:
8313 # not found, create one.
8314 target = (
8315 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8316 )
8317 if vdu_id:
8318 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8319 elif kdu_name:
8320 target += "/kdu/{}".format(kdu_name)
8321 vca_deployed = {
8322 "target_element": target,
8323 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8324 "member-vnf-index": member_vnf_index,
8325 "vdu_id": vdu_id,
8326 "kdu_name": kdu_name,
8327 "vdu_count_index": vdu_index,
8328 "operational-status": "init", # TODO revise
8329 "detailed-status": "", # TODO revise
8330 "step": "initial-deploy", # TODO revise
8331 "vnfd_id": vnfd_id,
8332 "vdu_name": vdu_name,
8333 "type": vca_type,
8334 "ee_descriptor_id": ee_descriptor_id,
8335 "charm_name": charm_name,
8336 }
8337 vca_index += 1
8338
8339 # create VCA and configurationStatus in db
8340 db_dict = {
8341 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8342 "configurationStatus.{}".format(vca_index): dict(),
8343 }
8344 self.update_db_2("nsrs", nsr_id, db_dict)
8345
8346 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8347
8348 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8349 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8350 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8351
8352 # Launch task
8353 task_n2vc = asyncio.ensure_future(
8354 self.heal_N2VC(
8355 logging_text=logging_text,
8356 vca_index=vca_index,
8357 nsi_id=nsi_id,
8358 db_nsr=db_nsr,
8359 db_vnfr=db_vnfr,
8360 vdu_id=vdu_id,
8361 kdu_name=kdu_name,
8362 vdu_index=vdu_index,
8363 deploy_params=deploy_params,
8364 config_descriptor=descriptor_config,
8365 base_folder=base_folder,
8366 nslcmop_id=nslcmop_id,
8367 stage=stage,
8368 vca_type=vca_type,
8369 vca_name=vca_name,
8370 ee_config_descriptor=ee_item,
8371 )
8372 )
8373 self.lcm_tasks.register(
8374 "ns",
8375 nsr_id,
8376 nslcmop_id,
8377 "instantiate_N2VC-{}".format(vca_index),
8378 task_n2vc,
8379 )
8380 task_instantiation_info[
8381 task_n2vc
8382 ] = self.task_name_deploy_vca + " {}.{}".format(
8383 member_vnf_index or "", vdu_id or ""
8384 )
8385
8386 async def heal_N2VC(
8387 self,
8388 logging_text,
8389 vca_index,
8390 nsi_id,
8391 db_nsr,
8392 db_vnfr,
8393 vdu_id,
8394 kdu_name,
8395 vdu_index,
8396 config_descriptor,
8397 deploy_params,
8398 base_folder,
8399 nslcmop_id,
8400 stage,
8401 vca_type,
8402 vca_name,
8403 ee_config_descriptor,
8404 ):
8405 nsr_id = db_nsr["_id"]
8406 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8407 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8408 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8409 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8410 db_dict = {
8411 "collection": "nsrs",
8412 "filter": {"_id": nsr_id},
8413 "path": db_update_entry,
8414 }
8415 step = ""
8416 try:
8417 element_type = "NS"
8418 element_under_configuration = nsr_id
8419
8420 vnfr_id = None
8421 if db_vnfr:
8422 vnfr_id = db_vnfr["_id"]
8423 osm_config["osm"]["vnf_id"] = vnfr_id
8424
8425 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8426
8427 if vca_type == "native_charm":
8428 index_number = 0
8429 else:
8430 index_number = vdu_index or 0
8431
8432 if vnfr_id:
8433 element_type = "VNF"
8434 element_under_configuration = vnfr_id
8435 namespace += ".{}-{}".format(vnfr_id, index_number)
8436 if vdu_id:
8437 namespace += ".{}-{}".format(vdu_id, index_number)
8438 element_type = "VDU"
8439 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8440 osm_config["osm"]["vdu_id"] = vdu_id
8441 elif kdu_name:
8442 namespace += ".{}".format(kdu_name)
8443 element_type = "KDU"
8444 element_under_configuration = kdu_name
8445 osm_config["osm"]["kdu_name"] = kdu_name
8446
8447 # Get artifact path
8448 if base_folder["pkg-dir"]:
8449 artifact_path = "{}/{}/{}/{}".format(
8450 base_folder["folder"],
8451 base_folder["pkg-dir"],
8452 "charms"
8453 if vca_type
8454 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8455 else "helm-charts",
8456 vca_name,
8457 )
8458 else:
8459 artifact_path = "{}/Scripts/{}/{}/".format(
8460 base_folder["folder"],
8461 "charms"
8462 if vca_type
8463 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8464 else "helm-charts",
8465 vca_name,
8466 )
8467
8468 self.logger.debug("Artifact path > {}".format(artifact_path))
8469
8470 # get initial_config_primitive_list that applies to this element
8471 initial_config_primitive_list = config_descriptor.get(
8472 "initial-config-primitive"
8473 )
8474
8475 self.logger.debug(
8476 "Initial config primitive list > {}".format(
8477 initial_config_primitive_list
8478 )
8479 )
8480
8481 # add config if not present for NS charm
8482 ee_descriptor_id = ee_config_descriptor.get("id")
8483 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8484 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8485 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8486 )
8487
8488 self.logger.debug(
8489 "Initial config primitive list #2 > {}".format(
8490 initial_config_primitive_list
8491 )
8492 )
8493 # n2vc_redesign STEP 3.1
8494 # find old ee_id if exists
8495 ee_id = vca_deployed.get("ee_id")
8496
8497 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8498 # create or register execution environment in VCA. Only for native charms when healing
8499 if vca_type == "native_charm":
8500 step = "Waiting to VM being up and getting IP address"
8501 self.logger.debug(logging_text + step)
8502 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8503 logging_text,
8504 nsr_id,
8505 vnfr_id,
8506 vdu_id,
8507 vdu_index,
8508 user=None,
8509 pub_key=None,
8510 )
8511 credentials = {"hostname": rw_mgmt_ip}
8512 # get username
8513 username = deep_get(
8514 config_descriptor, ("config-access", "ssh-access", "default-user")
8515 )
8516 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8517 # merged. Meanwhile let's get username from initial-config-primitive
8518 if not username and initial_config_primitive_list:
8519 for config_primitive in initial_config_primitive_list:
8520 for param in config_primitive.get("parameter", ()):
8521 if param["name"] == "ssh-username":
8522 username = param["value"]
8523 break
8524 if not username:
8525 raise LcmException(
8526 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8527 "'config-access.ssh-access.default-user'"
8528 )
8529 credentials["username"] = username
8530
8531 # n2vc_redesign STEP 3.2
8532 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8533 self._write_configuration_status(
8534 nsr_id=nsr_id,
8535 vca_index=vca_index,
8536 status="REGISTERING",
8537 element_under_configuration=element_under_configuration,
8538 element_type=element_type,
8539 )
8540
8541 step = "register execution environment {}".format(credentials)
8542 self.logger.debug(logging_text + step)
8543 ee_id = await self.vca_map[vca_type].register_execution_environment(
8544 credentials=credentials,
8545 namespace=namespace,
8546 db_dict=db_dict,
8547 vca_id=vca_id,
8548 )
8549
8550 # update ee_id en db
8551 db_dict_ee_id = {
8552 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8553 }
8554 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8555
8556 # for compatibility with MON/POL modules, the need model and application name at database
8557 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8558 # Not sure if this need to be done when healing
8559 """
8560 ee_id_parts = ee_id.split(".")
8561 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8562 if len(ee_id_parts) >= 2:
8563 model_name = ee_id_parts[0]
8564 application_name = ee_id_parts[1]
8565 db_nsr_update[db_update_entry + "model"] = model_name
8566 db_nsr_update[db_update_entry + "application"] = application_name
8567 """
8568
8569 # n2vc_redesign STEP 3.3
8570 # Install configuration software. Only for native charms.
8571 step = "Install configuration Software"
8572
8573 self._write_configuration_status(
8574 nsr_id=nsr_id,
8575 vca_index=vca_index,
8576 status="INSTALLING SW",
8577 element_under_configuration=element_under_configuration,
8578 element_type=element_type,
8579 # other_update=db_nsr_update,
8580 other_update=None,
8581 )
8582
8583 # TODO check if already done
8584 self.logger.debug(logging_text + step)
8585 config = None
8586 if vca_type == "native_charm":
8587 config_primitive = next(
8588 (p for p in initial_config_primitive_list if p["name"] == "config"),
8589 None,
8590 )
8591 if config_primitive:
8592 config = self._map_primitive_params(
8593 config_primitive, {}, deploy_params
8594 )
8595 await self.vca_map[vca_type].install_configuration_sw(
8596 ee_id=ee_id,
8597 artifact_path=artifact_path,
8598 db_dict=db_dict,
8599 config=config,
8600 num_units=1,
8601 vca_id=vca_id,
8602 vca_type=vca_type,
8603 )
8604
8605 # write in db flag of configuration_sw already installed
8606 self.update_db_2(
8607 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8608 )
8609
8610 # Not sure if this need to be done when healing
8611 """
8612 # add relations for this VCA (wait for other peers related with this VCA)
8613 await self._add_vca_relations(
8614 logging_text=logging_text,
8615 nsr_id=nsr_id,
8616 vca_type=vca_type,
8617 vca_index=vca_index,
8618 )
8619 """
8620
8621 # if SSH access is required, then get execution environment SSH public
8622 # if native charm we have waited already to VM be UP
8623 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8624 pub_key = None
8625 user = None
8626 # self.logger.debug("get ssh key block")
8627 if deep_get(
8628 config_descriptor, ("config-access", "ssh-access", "required")
8629 ):
8630 # self.logger.debug("ssh key needed")
8631 # Needed to inject a ssh key
8632 user = deep_get(
8633 config_descriptor,
8634 ("config-access", "ssh-access", "default-user"),
8635 )
8636 step = "Install configuration Software, getting public ssh key"
8637 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8638 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8639 )
8640
8641 step = "Insert public key into VM user={} ssh_key={}".format(
8642 user, pub_key
8643 )
8644 else:
8645 # self.logger.debug("no need to get ssh key")
8646 step = "Waiting to VM being up and getting IP address"
8647 self.logger.debug(logging_text + step)
8648
8649 # n2vc_redesign STEP 5.1
8650 # wait for RO (ip-address) Insert pub_key into VM
8651 # IMPORTANT: We need do wait for RO to complete healing operation.
8652 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8653 if vnfr_id:
8654 if kdu_name:
8655 rw_mgmt_ip = await self.wait_kdu_up(
8656 logging_text, nsr_id, vnfr_id, kdu_name
8657 )
8658 else:
8659 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8660 logging_text,
8661 nsr_id,
8662 vnfr_id,
8663 vdu_id,
8664 vdu_index,
8665 user=user,
8666 pub_key=pub_key,
8667 )
8668 else:
8669 rw_mgmt_ip = None # This is for a NS configuration
8670
8671 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8672
8673 # store rw_mgmt_ip in deploy params for later replacement
8674 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8675
8676 # Day1 operations.
8677 # get run-day1 operation parameter
8678 runDay1 = deploy_params.get("run-day1", False)
8679 self.logger.debug(
8680 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8681 )
8682 if runDay1:
8683 # n2vc_redesign STEP 6 Execute initial config primitive
8684 step = "execute initial config primitive"
8685
8686 # wait for dependent primitives execution (NS -> VNF -> VDU)
8687 if initial_config_primitive_list:
8688 await self._wait_dependent_n2vc(
8689 nsr_id, vca_deployed_list, vca_index
8690 )
8691
8692 # stage, in function of element type: vdu, kdu, vnf or ns
8693 my_vca = vca_deployed_list[vca_index]
8694 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8695 # VDU or KDU
8696 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8697 elif my_vca.get("member-vnf-index"):
8698 # VNF
8699 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8700 else:
8701 # NS
8702 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8703
8704 self._write_configuration_status(
8705 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8706 )
8707
8708 self._write_op_status(op_id=nslcmop_id, stage=stage)
8709
8710 check_if_terminated_needed = True
8711 for initial_config_primitive in initial_config_primitive_list:
8712 # adding information on the vca_deployed if it is a NS execution environment
8713 if not vca_deployed["member-vnf-index"]:
8714 deploy_params["ns_config_info"] = json.dumps(
8715 self._get_ns_config_info(nsr_id)
8716 )
8717 # TODO check if already done
8718 primitive_params_ = self._map_primitive_params(
8719 initial_config_primitive, {}, deploy_params
8720 )
8721
8722 step = "execute primitive '{}' params '{}'".format(
8723 initial_config_primitive["name"], primitive_params_
8724 )
8725 self.logger.debug(logging_text + step)
8726 await self.vca_map[vca_type].exec_primitive(
8727 ee_id=ee_id,
8728 primitive_name=initial_config_primitive["name"],
8729 params_dict=primitive_params_,
8730 db_dict=db_dict,
8731 vca_id=vca_id,
8732 vca_type=vca_type,
8733 )
8734 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8735 if check_if_terminated_needed:
8736 if config_descriptor.get("terminate-config-primitive"):
8737 self.update_db_2(
8738 "nsrs",
8739 nsr_id,
8740 {db_update_entry + "needed_terminate": True},
8741 )
8742 check_if_terminated_needed = False
8743
8744 # TODO register in database that primitive is done
8745
8746 # STEP 7 Configure metrics
8747 # Not sure if this need to be done when healing
8748 """
8749 if vca_type == "helm" or vca_type == "helm-v3":
8750 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8751 ee_id=ee_id,
8752 artifact_path=artifact_path,
8753 ee_config_descriptor=ee_config_descriptor,
8754 vnfr_id=vnfr_id,
8755 nsr_id=nsr_id,
8756 target_ip=rw_mgmt_ip,
8757 )
8758 if prometheus_jobs:
8759 self.update_db_2(
8760 "nsrs",
8761 nsr_id,
8762 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8763 )
8764
8765 for job in prometheus_jobs:
8766 self.db.set_one(
8767 "prometheus_jobs",
8768 {"job_name": job["job_name"]},
8769 job,
8770 upsert=True,
8771 fail_on_empty=False,
8772 )
8773
8774 """
8775 step = "instantiated at VCA"
8776 self.logger.debug(logging_text + step)
8777
8778 self._write_configuration_status(
8779 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8780 )
8781
8782 except Exception as e: # TODO not use Exception but N2VC exception
8783 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8784 if not isinstance(
8785 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8786 ):
8787 self.logger.error(
8788 "Exception while {} : {}".format(step, e), exc_info=True
8789 )
8790 self._write_configuration_status(
8791 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8792 )
8793 raise LcmException("{} {}".format(step, e)) from e
8794
8795 async def _wait_heal_ro(
8796 self,
8797 nsr_id,
8798 timeout=600,
8799 ):
8800 start_time = time()
8801 while time() <= start_time + timeout:
8802 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8803 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8804 "operational-status"
8805 ]
8806 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8807 if operational_status_ro != "healing":
8808 break
8809 await asyncio.sleep(15)
8810 else: # timeout_ns_deploy
8811 raise NgRoException("Timeout waiting ns to deploy")
8812
8813 async def vertical_scale(self, nsr_id, nslcmop_id):
8814 """
8815 Vertical Scale the VDUs in a NS
8816
8817 :param: nsr_id: NS Instance ID
8818 :param: nslcmop_id: nslcmop ID of migrate
8819
8820 """
8821 # Try to lock HA task here
8822 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8823 if not task_is_locked_by_me:
8824 return
8825 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8826 self.logger.debug(logging_text + "Enter")
8827 # get all needed from database
8828 db_nslcmop = None
8829 db_nslcmop_update = {}
8830 nslcmop_operation_state = None
8831 db_nsr_update = {}
8832 target = {}
8833 exc = None
8834 # in case of error, indicates what part of scale was failed to put nsr at error status
8835 start_deploy = time()
8836
8837 try:
8838 # wait for any previous tasks in process
8839 step = "Waiting for previous operations to terminate"
8840 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8841
8842 self._write_ns_status(
8843 nsr_id=nsr_id,
8844 ns_state=None,
8845 current_operation="VerticalScale",
8846 current_operation_id=nslcmop_id,
8847 )
8848 step = "Getting nslcmop from database"
8849 self.logger.debug(
8850 step + " after having waited for previous tasks to be completed"
8851 )
8852 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8853 operationParams = db_nslcmop.get("operationParams")
8854 target = {}
8855 target.update(operationParams)
8856 desc = await self.RO.vertical_scale(nsr_id, target)
8857 self.logger.debug("RO return > {}".format(desc))
8858 action_id = desc["action_id"]
8859 await self._wait_ng_ro(
8860 nsr_id,
8861 action_id,
8862 nslcmop_id,
8863 start_deploy,
8864 self.timeout.verticalscale,
8865 operation="verticalscale",
8866 )
8867 except (ROclient.ROClientException, DbException, LcmException) as e:
8868 self.logger.error("Exit Exception {}".format(e))
8869 exc = e
8870 except asyncio.CancelledError:
8871 self.logger.error("Cancelled Exception while '{}'".format(step))
8872 exc = "Operation was cancelled"
8873 except Exception as e:
8874 exc = traceback.format_exc()
8875 self.logger.critical(
8876 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8877 )
8878 finally:
8879 self._write_ns_status(
8880 nsr_id=nsr_id,
8881 ns_state=None,
8882 current_operation="IDLE",
8883 current_operation_id=None,
8884 )
8885 if exc:
8886 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8887 nslcmop_operation_state = "FAILED"
8888 else:
8889 nslcmop_operation_state = "COMPLETED"
8890 db_nslcmop_update["detailed-status"] = "Done"
8891 db_nsr_update["detailed-status"] = "Done"
8892
8893 self._write_op_status(
8894 op_id=nslcmop_id,
8895 stage="",
8896 error_message="",
8897 operation_state=nslcmop_operation_state,
8898 other_update=db_nslcmop_update,
8899 )
8900 if nslcmop_operation_state:
8901 try:
8902 msg = {
8903 "nsr_id": nsr_id,
8904 "nslcmop_id": nslcmop_id,
8905 "operationState": nslcmop_operation_state,
8906 }
8907 await self.msg.aiowrite("ns", "verticalscaled", msg)
8908 except Exception as e:
8909 self.logger.error(
8910 logging_text + "kafka_write notification Exception {}".format(e)
8911 )
8912 self.logger.debug(logging_text + "Exit")
8913 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")