9ceb60984657cf02bd1e52c182062bdd5304cfe4
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import SystemRandom
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 on_update_db=self._on_update_n2vc_db,
157 fs=self.fs,
158 db=self.db,
159 )
160
161 self.conn_helm_ee = LCMHelmConn(
162 log=self.logger,
163 vca_config=self.vca_config,
164 on_update_db=self._on_update_n2vc_db,
165 )
166
167 self.k8sclusterhelm2 = K8sHelmConnector(
168 kubectl_command=self.vca_config.kubectlpath,
169 helm_command=self.vca_config.helmpath,
170 log=self.logger,
171 on_update_db=None,
172 fs=self.fs,
173 db=self.db,
174 )
175
176 self.k8sclusterhelm3 = K8sHelm3Connector(
177 kubectl_command=self.vca_config.kubectlpath,
178 helm_command=self.vca_config.helm3path,
179 fs=self.fs,
180 log=self.logger,
181 db=self.db,
182 on_update_db=None,
183 )
184
185 self.k8sclusterjuju = K8sJujuConnector(
186 kubectl_command=self.vca_config.kubectlpath,
187 juju_command=self.vca_config.jujupath,
188 log=self.logger,
189 on_update_db=self._on_update_k8s_db,
190 fs=self.fs,
191 db=self.db,
192 )
193
194 self.k8scluster_map = {
195 "helm-chart": self.k8sclusterhelm2,
196 "helm-chart-v3": self.k8sclusterhelm3,
197 "chart": self.k8sclusterhelm3,
198 "juju-bundle": self.k8sclusterjuju,
199 "juju": self.k8sclusterjuju,
200 }
201
202 self.vca_map = {
203 "lxc_proxy_charm": self.n2vc,
204 "native_charm": self.n2vc,
205 "k8s_proxy_charm": self.n2vc,
206 "helm": self.conn_helm_ee,
207 "helm-v3": self.conn_helm_ee,
208 }
209
210 # create RO client
211 self.RO = NgRoClient(**self.ro_config.to_dict())
212
213 self.op_status_map = {
214 "instantiation": self.RO.status,
215 "termination": self.RO.status,
216 "migrate": self.RO.status,
217 "healing": self.RO.recreate_status,
218 "verticalscale": self.RO.status,
219 "start_stop_rebuild": self.RO.status,
220 }
221
222 @staticmethod
223 def increment_ip_mac(ip_mac, vm_index=1):
224 if not isinstance(ip_mac, str):
225 return ip_mac
226 try:
227 # try with ipv4 look for last dot
228 i = ip_mac.rfind(".")
229 if i > 0:
230 i += 1
231 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
232 # try with ipv6 or mac look for last colon. Operate in hex
233 i = ip_mac.rfind(":")
234 if i > 0:
235 i += 1
236 # format in hex, len can be 2 for mac or 4 for ipv6
237 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
238 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
239 )
240 except Exception:
241 pass
242 return None
243
244 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
245 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
246
247 try:
248 # TODO filter RO descriptor fields...
249
250 # write to database
251 db_dict = dict()
252 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
253 db_dict["deploymentStatus"] = ro_descriptor
254 self.update_db_2("nsrs", nsrs_id, db_dict)
255
256 except Exception as e:
257 self.logger.warn(
258 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
259 )
260
261 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
262 # remove last dot from path (if exists)
263 if path.endswith("."):
264 path = path[:-1]
265
266 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
267 # .format(table, filter, path, updated_data))
268 try:
269 nsr_id = filter.get("_id")
270
271 # read ns record from database
272 nsr = self.db.get_one(table="nsrs", q_filter=filter)
273 current_ns_status = nsr.get("nsState")
274
275 # get vca status for NS
276 status_dict = await self.n2vc.get_status(
277 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
278 )
279
280 # vcaStatus
281 db_dict = dict()
282 db_dict["vcaStatus"] = status_dict
283
284 # update configurationStatus for this VCA
285 try:
286 vca_index = int(path[path.rfind(".") + 1 :])
287
288 vca_list = deep_get(
289 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
290 )
291 vca_status = vca_list[vca_index].get("status")
292
293 configuration_status_list = nsr.get("configurationStatus")
294 config_status = configuration_status_list[vca_index].get("status")
295
296 if config_status == "BROKEN" and vca_status != "failed":
297 db_dict["configurationStatus"][vca_index] = "READY"
298 elif config_status != "BROKEN" and vca_status == "failed":
299 db_dict["configurationStatus"][vca_index] = "BROKEN"
300 except Exception as e:
301 # not update configurationStatus
302 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
303
304 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
305 # if nsState = 'DEGRADED' check if all is OK
306 is_degraded = False
307 if current_ns_status in ("READY", "DEGRADED"):
308 error_description = ""
309 # check machines
310 if status_dict.get("machines"):
311 for machine_id in status_dict.get("machines"):
312 machine = status_dict.get("machines").get(machine_id)
313 # check machine agent-status
314 if machine.get("agent-status"):
315 s = machine.get("agent-status").get("status")
316 if s != "started":
317 is_degraded = True
318 error_description += (
319 "machine {} agent-status={} ; ".format(
320 machine_id, s
321 )
322 )
323 # check machine instance status
324 if machine.get("instance-status"):
325 s = machine.get("instance-status").get("status")
326 if s != "running":
327 is_degraded = True
328 error_description += (
329 "machine {} instance-status={} ; ".format(
330 machine_id, s
331 )
332 )
333 # check applications
334 if status_dict.get("applications"):
335 for app_id in status_dict.get("applications"):
336 app = status_dict.get("applications").get(app_id)
337 # check application status
338 if app.get("status"):
339 s = app.get("status").get("status")
340 if s != "active":
341 is_degraded = True
342 error_description += (
343 "application {} status={} ; ".format(app_id, s)
344 )
345
346 if error_description:
347 db_dict["errorDescription"] = error_description
348 if current_ns_status == "READY" and is_degraded:
349 db_dict["nsState"] = "DEGRADED"
350 if current_ns_status == "DEGRADED" and not is_degraded:
351 db_dict["nsState"] = "READY"
352
353 # write to database
354 self.update_db_2("nsrs", nsr_id, db_dict)
355
356 except (asyncio.CancelledError, asyncio.TimeoutError):
357 raise
358 except Exception as e:
359 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
360
361 async def _on_update_k8s_db(
362 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
363 ):
364 """
365 Updating vca status in NSR record
366 :param cluster_uuid: UUID of a k8s cluster
367 :param kdu_instance: The unique name of the KDU instance
368 :param filter: To get nsr_id
369 :cluster_type: The cluster type (juju, k8s)
370 :return: none
371 """
372
373 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
374 # .format(cluster_uuid, kdu_instance, filter))
375
376 nsr_id = filter.get("_id")
377 try:
378 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
379 cluster_uuid=cluster_uuid,
380 kdu_instance=kdu_instance,
381 yaml_format=False,
382 complete_status=True,
383 vca_id=vca_id,
384 )
385
386 # vcaStatus
387 db_dict = dict()
388 db_dict["vcaStatus"] = {nsr_id: vca_status}
389
390 self.logger.debug(
391 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
392 )
393
394 # write to database
395 self.update_db_2("nsrs", nsr_id, db_dict)
396 except (asyncio.CancelledError, asyncio.TimeoutError):
397 raise
398 except Exception as e:
399 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
400
401 @staticmethod
402 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
403 try:
404 env = Environment(
405 undefined=StrictUndefined,
406 autoescape=select_autoescape(default_for_string=True, default=True),
407 )
408 template = env.from_string(cloud_init_text)
409 return template.render(additional_params or {})
410 except UndefinedError as e:
411 raise LcmException(
412 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
413 "file, must be provided in the instantiation parameters inside the "
414 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
415 )
416 except (TemplateError, TemplateNotFound) as e:
417 raise LcmException(
418 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
419 vnfd_id, vdu_id, e
420 )
421 )
422
423 def _get_vdu_cloud_init_content(self, vdu, vnfd):
424 cloud_init_content = cloud_init_file = None
425 try:
426 if vdu.get("cloud-init-file"):
427 base_folder = vnfd["_admin"]["storage"]
428 if base_folder["pkg-dir"]:
429 cloud_init_file = "{}/{}/cloud_init/{}".format(
430 base_folder["folder"],
431 base_folder["pkg-dir"],
432 vdu["cloud-init-file"],
433 )
434 else:
435 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
436 base_folder["folder"],
437 vdu["cloud-init-file"],
438 )
439 with self.fs.file_open(cloud_init_file, "r") as ci_file:
440 cloud_init_content = ci_file.read()
441 elif vdu.get("cloud-init"):
442 cloud_init_content = vdu["cloud-init"]
443
444 return cloud_init_content
445 except FsException as e:
446 raise LcmException(
447 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
448 vnfd["id"], vdu["id"], cloud_init_file, e
449 )
450 )
451
452 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
453 vdur = next(
454 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
455 )
456 additional_params = vdur.get("additionalParams")
457 return parse_yaml_strings(additional_params)
458
459 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
460 """
461 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
462 :param vnfd: input vnfd
463 :param new_id: overrides vnf id if provided
464 :param additionalParams: Instantiation params for VNFs provided
465 :param nsrId: Id of the NSR
466 :return: copy of vnfd
467 """
468 vnfd_RO = deepcopy(vnfd)
469 # remove unused by RO configuration, monitoring, scaling and internal keys
470 vnfd_RO.pop("_id", None)
471 vnfd_RO.pop("_admin", None)
472 vnfd_RO.pop("monitoring-param", None)
473 vnfd_RO.pop("scaling-group-descriptor", None)
474 vnfd_RO.pop("kdu", None)
475 vnfd_RO.pop("k8s-cluster", None)
476 if new_id:
477 vnfd_RO["id"] = new_id
478
479 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
480 for vdu in get_iterable(vnfd_RO, "vdu"):
481 vdu.pop("cloud-init-file", None)
482 vdu.pop("cloud-init", None)
483 return vnfd_RO
484
485 @staticmethod
486 def ip_profile_2_RO(ip_profile):
487 RO_ip_profile = deepcopy(ip_profile)
488 if "dns-server" in RO_ip_profile:
489 if isinstance(RO_ip_profile["dns-server"], list):
490 RO_ip_profile["dns-address"] = []
491 for ds in RO_ip_profile.pop("dns-server"):
492 RO_ip_profile["dns-address"].append(ds["address"])
493 else:
494 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
495 if RO_ip_profile.get("ip-version") == "ipv4":
496 RO_ip_profile["ip-version"] = "IPv4"
497 if RO_ip_profile.get("ip-version") == "ipv6":
498 RO_ip_profile["ip-version"] = "IPv6"
499 if "dhcp-params" in RO_ip_profile:
500 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
501 return RO_ip_profile
502
503 def _get_ro_vim_id_for_vim_account(self, vim_account):
504 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
505 if db_vim["_admin"]["operationalState"] != "ENABLED":
506 raise LcmException(
507 "VIM={} is not available. operationalState={}".format(
508 vim_account, db_vim["_admin"]["operationalState"]
509 )
510 )
511 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
512 return RO_vim_id
513
514 def get_ro_wim_id_for_wim_account(self, wim_account):
515 if isinstance(wim_account, str):
516 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
517 if db_wim["_admin"]["operationalState"] != "ENABLED":
518 raise LcmException(
519 "WIM={} is not available. operationalState={}".format(
520 wim_account, db_wim["_admin"]["operationalState"]
521 )
522 )
523 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
524 return RO_wim_id
525 else:
526 return wim_account
527
528 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
529 db_vdu_push_list = []
530 template_vdur = []
531 db_update = {"_admin.modified": time()}
532 if vdu_create:
533 for vdu_id, vdu_count in vdu_create.items():
534 vdur = next(
535 (
536 vdur
537 for vdur in reversed(db_vnfr["vdur"])
538 if vdur["vdu-id-ref"] == vdu_id
539 ),
540 None,
541 )
542 if not vdur:
543 # Read the template saved in the db:
544 self.logger.debug(
545 "No vdur in the database. Using the vdur-template to scale"
546 )
547 vdur_template = db_vnfr.get("vdur-template")
548 if not vdur_template:
549 raise LcmException(
550 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
551 vdu_id
552 )
553 )
554 vdur = vdur_template[0]
555 # Delete a template from the database after using it
556 self.db.set_one(
557 "vnfrs",
558 {"_id": db_vnfr["_id"]},
559 None,
560 pull={"vdur-template": {"_id": vdur["_id"]}},
561 )
562 for count in range(vdu_count):
563 vdur_copy = deepcopy(vdur)
564 vdur_copy["status"] = "BUILD"
565 vdur_copy["status-detailed"] = None
566 vdur_copy["ip-address"] = None
567 vdur_copy["_id"] = str(uuid4())
568 vdur_copy["count-index"] += count + 1
569 vdur_copy["id"] = "{}-{}".format(
570 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
571 )
572 vdur_copy.pop("vim_info", None)
573 for iface in vdur_copy["interfaces"]:
574 if iface.get("fixed-ip"):
575 iface["ip-address"] = self.increment_ip_mac(
576 iface["ip-address"], count + 1
577 )
578 else:
579 iface.pop("ip-address", None)
580 if iface.get("fixed-mac"):
581 iface["mac-address"] = self.increment_ip_mac(
582 iface["mac-address"], count + 1
583 )
584 else:
585 iface.pop("mac-address", None)
586 if db_vnfr["vdur"]:
587 iface.pop(
588 "mgmt_vnf", None
589 ) # only first vdu can be managment of vnf
590 db_vdu_push_list.append(vdur_copy)
591 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
592 if vdu_delete:
593 if len(db_vnfr["vdur"]) == 1:
594 # The scale will move to 0 instances
595 self.logger.debug(
596 "Scaling to 0 !, creating the template with the last vdur"
597 )
598 template_vdur = [db_vnfr["vdur"][0]]
599 for vdu_id, vdu_count in vdu_delete.items():
600 if mark_delete:
601 indexes_to_delete = [
602 iv[0]
603 for iv in enumerate(db_vnfr["vdur"])
604 if iv[1]["vdu-id-ref"] == vdu_id
605 ]
606 db_update.update(
607 {
608 "vdur.{}.status".format(i): "DELETING"
609 for i in indexes_to_delete[-vdu_count:]
610 }
611 )
612 else:
613 # it must be deleted one by one because common.db does not allow otherwise
614 vdus_to_delete = [
615 v
616 for v in reversed(db_vnfr["vdur"])
617 if v["vdu-id-ref"] == vdu_id
618 ]
619 for vdu in vdus_to_delete[:vdu_count]:
620 self.db.set_one(
621 "vnfrs",
622 {"_id": db_vnfr["_id"]},
623 None,
624 pull={"vdur": {"_id": vdu["_id"]}},
625 )
626 db_push = {}
627 if db_vdu_push_list:
628 db_push["vdur"] = db_vdu_push_list
629 if template_vdur:
630 db_push["vdur-template"] = template_vdur
631 if not db_push:
632 db_push = None
633 db_vnfr["vdur-template"] = template_vdur
634 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
635 # modify passed dictionary db_vnfr
636 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
637 db_vnfr["vdur"] = db_vnfr_["vdur"]
638
639 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
640 """
641 Updates database nsr with the RO info for the created vld
642 :param ns_update_nsr: dictionary to be filled with the updated info
643 :param db_nsr: content of db_nsr. This is also modified
644 :param nsr_desc_RO: nsr descriptor from RO
645 :return: Nothing, LcmException is raised on errors
646 """
647
648 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
649 for net_RO in get_iterable(nsr_desc_RO, "nets"):
650 if vld["id"] != net_RO.get("ns_net_osm_id"):
651 continue
652 vld["vim-id"] = net_RO.get("vim_net_id")
653 vld["name"] = net_RO.get("vim_name")
654 vld["status"] = net_RO.get("status")
655 vld["status-detailed"] = net_RO.get("error_msg")
656 ns_update_nsr["vld.{}".format(vld_index)] = vld
657 break
658 else:
659 raise LcmException(
660 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
661 )
662
663 def set_vnfr_at_error(self, db_vnfrs, error_text):
664 try:
665 for db_vnfr in db_vnfrs.values():
666 vnfr_update = {"status": "ERROR"}
667 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
668 if "status" not in vdur:
669 vdur["status"] = "ERROR"
670 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
671 if error_text:
672 vdur["status-detailed"] = str(error_text)
673 vnfr_update[
674 "vdur.{}.status-detailed".format(vdu_index)
675 ] = "ERROR"
676 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
677 except DbException as e:
678 self.logger.error("Cannot update vnf. {}".format(e))
679
680 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
681 """
682 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
683 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
684 :param nsr_desc_RO: nsr descriptor from RO
685 :return: Nothing, LcmException is raised on errors
686 """
687 for vnf_index, db_vnfr in db_vnfrs.items():
688 for vnf_RO in nsr_desc_RO["vnfs"]:
689 if vnf_RO["member_vnf_index"] != vnf_index:
690 continue
691 vnfr_update = {}
692 if vnf_RO.get("ip_address"):
693 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
694 "ip_address"
695 ].split(";")[0]
696 elif not db_vnfr.get("ip-address"):
697 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
698 raise LcmExceptionNoMgmtIP(
699 "ns member_vnf_index '{}' has no IP address".format(
700 vnf_index
701 )
702 )
703
704 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
705 vdur_RO_count_index = 0
706 if vdur.get("pdu-type"):
707 continue
708 for vdur_RO in get_iterable(vnf_RO, "vms"):
709 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
710 continue
711 if vdur["count-index"] != vdur_RO_count_index:
712 vdur_RO_count_index += 1
713 continue
714 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
715 if vdur_RO.get("ip_address"):
716 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
717 else:
718 vdur["ip-address"] = None
719 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
720 vdur["name"] = vdur_RO.get("vim_name")
721 vdur["status"] = vdur_RO.get("status")
722 vdur["status-detailed"] = vdur_RO.get("error_msg")
723 for ifacer in get_iterable(vdur, "interfaces"):
724 for interface_RO in get_iterable(vdur_RO, "interfaces"):
725 if ifacer["name"] == interface_RO.get("internal_name"):
726 ifacer["ip-address"] = interface_RO.get(
727 "ip_address"
728 )
729 ifacer["mac-address"] = interface_RO.get(
730 "mac_address"
731 )
732 break
733 else:
734 raise LcmException(
735 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
736 "from VIM info".format(
737 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
738 )
739 )
740 vnfr_update["vdur.{}".format(vdu_index)] = vdur
741 break
742 else:
743 raise LcmException(
744 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
745 "VIM info".format(
746 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
747 )
748 )
749
750 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
751 for net_RO in get_iterable(nsr_desc_RO, "nets"):
752 if vld["id"] != net_RO.get("vnf_net_osm_id"):
753 continue
754 vld["vim-id"] = net_RO.get("vim_net_id")
755 vld["name"] = net_RO.get("vim_name")
756 vld["status"] = net_RO.get("status")
757 vld["status-detailed"] = net_RO.get("error_msg")
758 vnfr_update["vld.{}".format(vld_index)] = vld
759 break
760 else:
761 raise LcmException(
762 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
763 vnf_index, vld["id"]
764 )
765 )
766
767 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
768 break
769
770 else:
771 raise LcmException(
772 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
773 vnf_index
774 )
775 )
776
777 def _get_ns_config_info(self, nsr_id):
778 """
779 Generates a mapping between vnf,vdu elements and the N2VC id
780 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
781 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
782 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
783 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
784 """
785 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
786 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
787 mapping = {}
788 ns_config_info = {"osm-config-mapping": mapping}
789 for vca in vca_deployed_list:
790 if not vca["member-vnf-index"]:
791 continue
792 if not vca["vdu_id"]:
793 mapping[vca["member-vnf-index"]] = vca["application"]
794 else:
795 mapping[
796 "{}.{}.{}".format(
797 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
798 )
799 ] = vca["application"]
800 return ns_config_info
801
802 async def _instantiate_ng_ro(
803 self,
804 logging_text,
805 nsr_id,
806 nsd,
807 db_nsr,
808 db_nslcmop,
809 db_vnfrs,
810 db_vnfds,
811 n2vc_key_list,
812 stage,
813 start_deploy,
814 timeout_ns_deploy,
815 ):
816 db_vims = {}
817
818 def get_vim_account(vim_account_id):
819 nonlocal db_vims
820 if vim_account_id in db_vims:
821 return db_vims[vim_account_id]
822 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
823 db_vims[vim_account_id] = db_vim
824 return db_vim
825
826 # modify target_vld info with instantiation parameters
827 def parse_vld_instantiation_params(
828 target_vim, target_vld, vld_params, target_sdn
829 ):
830 if vld_params.get("ip-profile"):
831 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
832 vld_params["ip-profile"]
833 )
834 if vld_params.get("provider-network"):
835 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
836 "provider-network"
837 ]
838 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
839 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
840 "provider-network"
841 ]["sdn-ports"]
842
843 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
844 # if wim_account_id is specified in vld_params, validate if it is feasible.
845 wim_account_id, db_wim = select_feasible_wim_account(
846 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
847 )
848
849 if wim_account_id:
850 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
851 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
852 # update vld_params with correct WIM account Id
853 vld_params["wimAccountId"] = wim_account_id
854
855 target_wim = "wim:{}".format(wim_account_id)
856 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
857 sdn_ports = get_sdn_ports(vld_params, db_wim)
858 if len(sdn_ports) > 0:
859 target_vld["vim_info"][target_wim] = target_wim_attrs
860 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
861
862 self.logger.debug(
863 "Target VLD with WIM data: {:s}".format(str(target_vld))
864 )
865
866 for param in ("vim-network-name", "vim-network-id"):
867 if vld_params.get(param):
868 if isinstance(vld_params[param], dict):
869 for vim, vim_net in vld_params[param].items():
870 other_target_vim = "vim:" + vim
871 populate_dict(
872 target_vld["vim_info"],
873 (other_target_vim, param.replace("-", "_")),
874 vim_net,
875 )
876 else: # isinstance str
877 target_vld["vim_info"][target_vim][
878 param.replace("-", "_")
879 ] = vld_params[param]
880 if vld_params.get("common_id"):
881 target_vld["common_id"] = vld_params.get("common_id")
882
883 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
884 def update_ns_vld_target(target, ns_params):
885 for vnf_params in ns_params.get("vnf", ()):
886 if vnf_params.get("vimAccountId"):
887 target_vnf = next(
888 (
889 vnfr
890 for vnfr in db_vnfrs.values()
891 if vnf_params["member-vnf-index"]
892 == vnfr["member-vnf-index-ref"]
893 ),
894 None,
895 )
896 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
897 if not vdur:
898 return
899 for a_index, a_vld in enumerate(target["ns"]["vld"]):
900 target_vld = find_in_list(
901 get_iterable(vdur, "interfaces"),
902 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
903 )
904
905 vld_params = find_in_list(
906 get_iterable(ns_params, "vld"),
907 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
908 )
909 if target_vld:
910 if vnf_params.get("vimAccountId") not in a_vld.get(
911 "vim_info", {}
912 ):
913 target_vim_network_list = [
914 v for _, v in a_vld.get("vim_info").items()
915 ]
916 target_vim_network_name = next(
917 (
918 item.get("vim_network_name", "")
919 for item in target_vim_network_list
920 ),
921 "",
922 )
923
924 target["ns"]["vld"][a_index].get("vim_info").update(
925 {
926 "vim:{}".format(vnf_params["vimAccountId"]): {
927 "vim_network_name": target_vim_network_name,
928 }
929 }
930 )
931
932 if vld_params:
933 for param in ("vim-network-name", "vim-network-id"):
934 if vld_params.get(param) and isinstance(
935 vld_params[param], dict
936 ):
937 for vim, vim_net in vld_params[
938 param
939 ].items():
940 other_target_vim = "vim:" + vim
941 populate_dict(
942 target["ns"]["vld"][a_index].get(
943 "vim_info"
944 ),
945 (
946 other_target_vim,
947 param.replace("-", "_"),
948 ),
949 vim_net,
950 )
951
952 nslcmop_id = db_nslcmop["_id"]
953 target = {
954 "name": db_nsr["name"],
955 "ns": {"vld": []},
956 "vnf": [],
957 "image": deepcopy(db_nsr["image"]),
958 "flavor": deepcopy(db_nsr["flavor"]),
959 "action_id": nslcmop_id,
960 "cloud_init_content": {},
961 }
962 for image in target["image"]:
963 image["vim_info"] = {}
964 for flavor in target["flavor"]:
965 flavor["vim_info"] = {}
966 if db_nsr.get("shared-volumes"):
967 target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
968 for shared_volumes in target["shared-volumes"]:
969 shared_volumes["vim_info"] = {}
970 if db_nsr.get("affinity-or-anti-affinity-group"):
971 target["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr["affinity-or-anti-affinity-group"]
973 )
974 for affinity_or_anti_affinity_group in target[
975 "affinity-or-anti-affinity-group"
976 ]:
977 affinity_or_anti_affinity_group["vim_info"] = {}
978
979 if db_nslcmop.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate = self.db.get_list(
982 "nslcmops",
983 {
984 "nsInstanceId": db_nslcmop["nsInstanceId"],
985 "lcmOperationType": "instantiate",
986 },
987 )[-1]
988 ns_params = db_nslcmop_instantiate.get("operationParams")
989 else:
990 ns_params = db_nslcmop.get("operationParams")
991 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
992 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
993
994 cp2target = {}
995 for vld_index, vld in enumerate(db_nsr.get("vld")):
996 target_vim = "vim:{}".format(ns_params["vimAccountId"])
997 target_vld = {
998 "id": vld["id"],
999 "name": vld["name"],
1000 "mgmt-network": vld.get("mgmt-network", False),
1001 "type": vld.get("type"),
1002 "vim_info": {
1003 target_vim: {
1004 "vim_network_name": vld.get("vim-network-name"),
1005 "vim_account_id": ns_params["vimAccountId"],
1006 }
1007 },
1008 }
1009 # check if this network needs SDN assist
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(ns_params["vimAccountId"])
1012 if vim_config := db_vim.get("config"):
1013 if sdnc_id := vim_config.get("sdn-controller"):
1014 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 target_vld["vim_info"][target_sdn] = {
1017 "sdn": True,
1018 "target_vim": target_vim,
1019 "vlds": [sdn_vld],
1020 "type": vld.get("type"),
1021 }
1022
1023 nsd_vnf_profiles = get_vnf_profiles(nsd)
1024 for nsd_vnf_profile in nsd_vnf_profiles:
1025 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1026 if cp["virtual-link-profile-id"] == vld["id"]:
1027 cp2target[
1028 "member_vnf:{}.{}".format(
1029 cp["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1031 ],
1032 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1033 )
1034 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1035
1036 # check at nsd descriptor, if there is an ip-profile
1037 vld_params = {}
1038 nsd_vlp = find_in_list(
1039 get_virtual_link_profiles(nsd),
1040 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1041 == vld["id"],
1042 )
1043 if (
1044 nsd_vlp
1045 and nsd_vlp.get("virtual-link-protocol-data")
1046 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1047 ):
1048 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1049 "l3-protocol-data"
1050 ]
1051
1052 # update vld_params with instantiation params
1053 vld_instantiation_params = find_in_list(
1054 get_iterable(ns_params, "vld"),
1055 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1056 )
1057 if vld_instantiation_params:
1058 vld_params.update(vld_instantiation_params)
1059 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1060 target["ns"]["vld"].append(target_vld)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target, ns_params)
1063
1064 for vnfr in db_vnfrs.values():
1065 vnfd = find_in_list(
1066 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1067 )
1068 vnf_params = find_in_list(
1069 get_iterable(ns_params, "vnf"),
1070 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1071 )
1072 target_vnf = deepcopy(vnfr)
1073 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1074 for vld in target_vnf.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp = find_in_list(
1077 vnfd.get("int-virtual-link-desc", ()),
1078 lambda cpd: cpd.get("id") == vld["id"],
1079 )
1080 if vnf_cp:
1081 ns_cp = "member_vnf:{}.{}".format(
1082 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1083 )
1084 if cp2target.get(ns_cp):
1085 vld["target"] = cp2target[ns_cp]
1086
1087 vld["vim_info"] = {
1088 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1089 }
1090 # check if this network needs SDN assist
1091 target_sdn = None
1092 if vld.get("pci-interfaces"):
1093 db_vim = get_vim_account(vnfr["vim-account-id"])
1094 sdnc_id = db_vim["config"].get("sdn-controller")
1095 if sdnc_id:
1096 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1097 target_sdn = "sdn:{}".format(sdnc_id)
1098 vld["vim_info"][target_sdn] = {
1099 "sdn": True,
1100 "target_vim": target_vim,
1101 "vlds": [sdn_vld],
1102 "type": vld.get("type"),
1103 }
1104
1105 # check at vnfd descriptor, if there is an ip-profile
1106 vld_params = {}
1107 vnfd_vlp = find_in_list(
1108 get_virtual_link_profiles(vnfd),
1109 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1110 )
1111 if (
1112 vnfd_vlp
1113 and vnfd_vlp.get("virtual-link-protocol-data")
1114 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1115 ):
1116 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1117 "l3-protocol-data"
1118 ]
1119 # update vld_params with instantiation params
1120 if vnf_params:
1121 vld_instantiation_params = find_in_list(
1122 get_iterable(vnf_params, "internal-vld"),
1123 lambda i_vld: i_vld["name"] == vld["id"],
1124 )
1125 if vld_instantiation_params:
1126 vld_params.update(vld_instantiation_params)
1127 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1128
1129 vdur_list = []
1130 for vdur in target_vnf.get("vdur", ()):
1131 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1134
1135 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1136
1137 if ssh_keys_all:
1138 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1139 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1140 if (
1141 vdu_configuration
1142 and vdu_configuration.get("config-access")
1143 and vdu_configuration.get("config-access").get("ssh-access")
1144 ):
1145 vdur["ssh-keys"] = ssh_keys_all
1146 vdur["ssh-access-required"] = vdu_configuration[
1147 "config-access"
1148 ]["ssh-access"]["required"]
1149 elif (
1150 vnf_configuration
1151 and vnf_configuration.get("config-access")
1152 and vnf_configuration.get("config-access").get("ssh-access")
1153 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_all
1156 vdur["ssh-access-required"] = vnf_configuration[
1157 "config-access"
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation and find_in_list(
1160 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1161 ):
1162 vdur["ssh-keys"] = ssh_keys_instantiation
1163
1164 self.logger.debug("NS > vdur > {}".format(vdur))
1165
1166 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1167 # cloud-init
1168 if vdud.get("cloud-init-file"):
1169 vdur["cloud-init"] = "{}:file:{}".format(
1170 vnfd["_id"], vdud.get("cloud-init-file")
1171 )
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur["cloud-init"] not in target["cloud_init_content"]:
1174 base_folder = vnfd["_admin"]["storage"]
1175 if base_folder["pkg-dir"]:
1176 cloud_init_file = "{}/{}/cloud_init/{}".format(
1177 base_folder["folder"],
1178 base_folder["pkg-dir"],
1179 vdud.get("cloud-init-file"),
1180 )
1181 else:
1182 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1183 base_folder["folder"],
1184 vdud.get("cloud-init-file"),
1185 )
1186 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1187 target["cloud_init_content"][
1188 vdur["cloud-init"]
1189 ] = ci_file.read()
1190 elif vdud.get("cloud-init"):
1191 vdur["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1193 )
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1196 "cloud-init"
1197 ]
1198 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1199 deploy_params_vdu = self._format_additional_params(
1200 vdur.get("additionalParams") or {}
1201 )
1202 deploy_params_vdu["OSM"] = get_osm_params(
1203 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1204 )
1205 vdur["additionalParams"] = deploy_params_vdu
1206
1207 # flavor
1208 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1209 if target_vim not in ns_flavor["vim_info"]:
1210 ns_flavor["vim_info"][target_vim] = {}
1211
1212 # deal with images
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id = int(vdur["ns-image-id"])
1216 if vdur.get("alt-image-ids"):
1217 db_vim = get_vim_account(vnfr["vim-account-id"])
1218 vim_type = db_vim["vim_type"]
1219 for alt_image_id in vdur.get("alt-image-ids"):
1220 ns_alt_image = target["image"][int(alt_image_id)]
1221 if vim_type == ns_alt_image.get("vim-type"):
1222 # must use alternative image
1223 self.logger.debug(
1224 "use alternative image id: {}".format(alt_image_id)
1225 )
1226 ns_image_id = alt_image_id
1227 vdur["ns-image-id"] = ns_image_id
1228 break
1229 ns_image = target["image"][int(ns_image_id)]
1230 if target_vim not in ns_image["vim_info"]:
1231 ns_image["vim_info"][target_vim] = {}
1232
1233 # Affinity groups
1234 if vdur.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1236 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1237 if target_vim not in ns_ags["vim_info"]:
1238 ns_ags["vim_info"][target_vim] = {}
1239
1240 # shared-volumes
1241 if vdur.get("shared-volumes-id"):
1242 for sv_id in vdur["shared-volumes-id"]:
1243 ns_sv = find_in_list(
1244 target["shared-volumes"], lambda sv: sv_id in sv["id"]
1245 )
1246 if ns_sv:
1247 ns_sv["vim_info"][target_vim] = {}
1248
1249 vdur["vim_info"] = {target_vim: {}}
1250 # instantiation parameters
1251 if vnf_params:
1252 vdu_instantiation_params = find_in_list(
1253 get_iterable(vnf_params, "vdu"),
1254 lambda i_vdu: i_vdu["id"] == vdud["id"],
1255 )
1256 if vdu_instantiation_params:
1257 # Parse the vdu_volumes from the instantiation params
1258 vdu_volumes = get_volumes_from_instantiation_params(
1259 vdu_instantiation_params, vdud
1260 )
1261 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1262 vdur["additionalParams"]["OSM"][
1263 "vim_flavor_id"
1264 ] = vdu_instantiation_params.get("vim-flavor-id")
1265 vdur_list.append(vdur)
1266 target_vnf["vdur"] = vdur_list
1267 target["vnf"].append(target_vnf)
1268
1269 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1270 desc = await self.RO.deploy(nsr_id, target)
1271 self.logger.debug("RO return > {}".format(desc))
1272 action_id = desc["action_id"]
1273 await self._wait_ng_ro(
1274 nsr_id,
1275 action_id,
1276 nslcmop_id,
1277 start_deploy,
1278 timeout_ns_deploy,
1279 stage,
1280 operation="instantiation",
1281 )
1282
1283 # Updating NSR
1284 db_nsr_update = {
1285 "_admin.deployed.RO.operational-status": "running",
1286 "detailed-status": " ".join(stage),
1287 }
1288 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1289 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1290 self._write_op_status(nslcmop_id, stage)
1291 self.logger.debug(
1292 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1293 )
1294 return
1295
1296 async def _wait_ng_ro(
1297 self,
1298 nsr_id,
1299 action_id,
1300 nslcmop_id=None,
1301 start_time=None,
1302 timeout=600,
1303 stage=None,
1304 operation=None,
1305 ):
1306 detailed_status_old = None
1307 db_nsr_update = {}
1308 start_time = start_time or time()
1309 while time() <= start_time + timeout:
1310 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1311 self.logger.debug("Wait NG RO > {}".format(desc_status))
1312 if desc_status["status"] == "FAILED":
1313 raise NgRoException(desc_status["details"])
1314 elif desc_status["status"] == "BUILD":
1315 if stage:
1316 stage[2] = "VIM: ({})".format(desc_status["details"])
1317 elif desc_status["status"] == "DONE":
1318 if stage:
1319 stage[2] = "Deployed at VIM"
1320 break
1321 else:
1322 assert False, "ROclient.check_ns_status returns unknown {}".format(
1323 desc_status["status"]
1324 )
1325 if stage and nslcmop_id and stage[2] != detailed_status_old:
1326 detailed_status_old = stage[2]
1327 db_nsr_update["detailed-status"] = " ".join(stage)
1328 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1329 self._write_op_status(nslcmop_id, stage)
1330 await asyncio.sleep(15)
1331 else: # timeout_ns_deploy
1332 raise NgRoException("Timeout waiting ns to deploy")
1333
1334 async def _terminate_ng_ro(
1335 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1336 ):
1337 db_nsr_update = {}
1338 failed_detail = []
1339 action_id = None
1340 start_deploy = time()
1341 try:
1342 target = {
1343 "ns": {"vld": []},
1344 "vnf": [],
1345 "image": [],
1346 "flavor": [],
1347 "action_id": nslcmop_id,
1348 }
1349 desc = await self.RO.deploy(nsr_id, target)
1350 action_id = desc["action_id"]
1351 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1352 self.logger.debug(
1353 logging_text
1354 + "ns terminate action at RO. action_id={}".format(action_id)
1355 )
1356
1357 # wait until done
1358 delete_timeout = 20 * 60 # 20 minutes
1359 await self._wait_ng_ro(
1360 nsr_id,
1361 action_id,
1362 nslcmop_id,
1363 start_deploy,
1364 delete_timeout,
1365 stage,
1366 operation="termination",
1367 )
1368 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1369 # delete all nsr
1370 await self.RO.delete(nsr_id)
1371 except NgRoException as e:
1372 if e.http_code == 404: # not found
1373 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1374 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1375 self.logger.debug(
1376 logging_text + "RO_action_id={} already deleted".format(action_id)
1377 )
1378 elif e.http_code == 409: # conflict
1379 failed_detail.append("delete conflict: {}".format(e))
1380 self.logger.debug(
1381 logging_text
1382 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1383 )
1384 else:
1385 failed_detail.append("delete error: {}".format(e))
1386 self.logger.error(
1387 logging_text
1388 + "RO_action_id={} delete error: {}".format(action_id, e)
1389 )
1390 except Exception as e:
1391 failed_detail.append("delete error: {}".format(e))
1392 self.logger.error(
1393 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1394 )
1395
1396 if failed_detail:
1397 stage[2] = "Error deleting from VIM"
1398 else:
1399 stage[2] = "Deleted from VIM"
1400 db_nsr_update["detailed-status"] = " ".join(stage)
1401 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1402 self._write_op_status(nslcmop_id, stage)
1403
1404 if failed_detail:
1405 raise LcmException("; ".join(failed_detail))
1406 return
1407
1408 async def instantiate_RO(
1409 self,
1410 logging_text,
1411 nsr_id,
1412 nsd,
1413 db_nsr,
1414 db_nslcmop,
1415 db_vnfrs,
1416 db_vnfds,
1417 n2vc_key_list,
1418 stage,
1419 ):
1420 """
1421 Instantiate at RO
1422 :param logging_text: preffix text to use at logging
1423 :param nsr_id: nsr identity
1424 :param nsd: database content of ns descriptor
1425 :param db_nsr: database content of ns record
1426 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1427 :param db_vnfrs:
1428 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1429 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1430 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1431 :return: None or exception
1432 """
1433 try:
1434 start_deploy = time()
1435 ns_params = db_nslcmop.get("operationParams")
1436 if ns_params and ns_params.get("timeout_ns_deploy"):
1437 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1438 else:
1439 timeout_ns_deploy = self.timeout.ns_deploy
1440
1441 # Check for and optionally request placement optimization. Database will be updated if placement activated
1442 stage[2] = "Waiting for Placement."
1443 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1444 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1445 for vnfr in db_vnfrs.values():
1446 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1447 break
1448 else:
1449 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1450
1451 return await self._instantiate_ng_ro(
1452 logging_text,
1453 nsr_id,
1454 nsd,
1455 db_nsr,
1456 db_nslcmop,
1457 db_vnfrs,
1458 db_vnfds,
1459 n2vc_key_list,
1460 stage,
1461 start_deploy,
1462 timeout_ns_deploy,
1463 )
1464 except Exception as e:
1465 stage[2] = "ERROR deploying at VIM"
1466 self.set_vnfr_at_error(db_vnfrs, str(e))
1467 self.logger.error(
1468 "Error deploying at VIM {}".format(e),
1469 exc_info=not isinstance(
1470 e,
1471 (
1472 ROclient.ROClientException,
1473 LcmException,
1474 DbException,
1475 NgRoException,
1476 ),
1477 ),
1478 )
1479 raise
1480
1481 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1482 """
1483 Wait for kdu to be up, get ip address
1484 :param logging_text: prefix use for logging
1485 :param nsr_id:
1486 :param vnfr_id:
1487 :param kdu_name:
1488 :return: IP address, K8s services
1489 """
1490
1491 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1492 nb_tries = 0
1493
1494 while nb_tries < 360:
1495 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1496 kdur = next(
1497 (
1498 x
1499 for x in get_iterable(db_vnfr, "kdur")
1500 if x.get("kdu-name") == kdu_name
1501 ),
1502 None,
1503 )
1504 if not kdur:
1505 raise LcmException(
1506 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1507 )
1508 if kdur.get("status"):
1509 if kdur["status"] in ("READY", "ENABLED"):
1510 return kdur.get("ip-address"), kdur.get("services")
1511 else:
1512 raise LcmException(
1513 "target KDU={} is in error state".format(kdu_name)
1514 )
1515
1516 await asyncio.sleep(10)
1517 nb_tries += 1
1518 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1519
1520 async def wait_vm_up_insert_key_ro(
1521 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1522 ):
1523 """
1524 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1525 :param logging_text: prefix use for logging
1526 :param nsr_id:
1527 :param vnfr_id:
1528 :param vdu_id:
1529 :param vdu_index:
1530 :param pub_key: public ssh key to inject, None to skip
1531 :param user: user to apply the public ssh key
1532 :return: IP address
1533 """
1534
1535 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1536 ip_address = None
1537 target_vdu_id = None
1538 ro_retries = 0
1539
1540 while True:
1541 ro_retries += 1
1542 if ro_retries >= 360: # 1 hour
1543 raise LcmException(
1544 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1545 )
1546
1547 await asyncio.sleep(10)
1548
1549 # get ip address
1550 if not target_vdu_id:
1551 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1552
1553 if not vdu_id: # for the VNF case
1554 if db_vnfr.get("status") == "ERROR":
1555 raise LcmException(
1556 "Cannot inject ssh-key because target VNF is in error state"
1557 )
1558 ip_address = db_vnfr.get("ip-address")
1559 if not ip_address:
1560 continue
1561 vdur = next(
1562 (
1563 x
1564 for x in get_iterable(db_vnfr, "vdur")
1565 if x.get("ip-address") == ip_address
1566 ),
1567 None,
1568 )
1569 else: # VDU case
1570 vdur = next(
1571 (
1572 x
1573 for x in get_iterable(db_vnfr, "vdur")
1574 if x.get("vdu-id-ref") == vdu_id
1575 and x.get("count-index") == vdu_index
1576 ),
1577 None,
1578 )
1579
1580 if (
1581 not vdur and len(db_vnfr.get("vdur", ())) == 1
1582 ): # If only one, this should be the target vdu
1583 vdur = db_vnfr["vdur"][0]
1584 if not vdur:
1585 raise LcmException(
1586 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1587 vnfr_id, vdu_id, vdu_index
1588 )
1589 )
1590 # New generation RO stores information at "vim_info"
1591 ng_ro_status = None
1592 target_vim = None
1593 if vdur.get("vim_info"):
1594 target_vim = next(
1595 t for t in vdur["vim_info"]
1596 ) # there should be only one key
1597 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1598 if (
1599 vdur.get("pdu-type")
1600 or vdur.get("status") == "ACTIVE"
1601 or ng_ro_status == "ACTIVE"
1602 ):
1603 ip_address = vdur.get("ip-address")
1604 if not ip_address:
1605 continue
1606 target_vdu_id = vdur["vdu-id-ref"]
1607 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1608 raise LcmException(
1609 "Cannot inject ssh-key because target VM is in error state"
1610 )
1611
1612 if not target_vdu_id:
1613 continue
1614
1615 # inject public key into machine
1616 if pub_key and user:
1617 self.logger.debug(logging_text + "Inserting RO key")
1618 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1619 if vdur.get("pdu-type"):
1620 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1621 return ip_address
1622 try:
1623 target = {
1624 "action": {
1625 "action": "inject_ssh_key",
1626 "key": pub_key,
1627 "user": user,
1628 },
1629 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1630 }
1631 desc = await self.RO.deploy(nsr_id, target)
1632 action_id = desc["action_id"]
1633 await self._wait_ng_ro(
1634 nsr_id, action_id, timeout=600, operation="instantiation"
1635 )
1636 break
1637 except NgRoException as e:
1638 raise LcmException(
1639 "Reaching max tries injecting key. Error: {}".format(e)
1640 )
1641 else:
1642 break
1643
1644 return ip_address
1645
1646 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1647 """
1648 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1649 """
1650 my_vca = vca_deployed_list[vca_index]
1651 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1652 # vdu or kdu: no dependencies
1653 return
1654 timeout = 300
1655 while timeout >= 0:
1656 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1657 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1658 configuration_status_list = db_nsr["configurationStatus"]
1659 for index, vca_deployed in enumerate(configuration_status_list):
1660 if index == vca_index:
1661 # myself
1662 continue
1663 if not my_vca.get("member-vnf-index") or (
1664 vca_deployed.get("member-vnf-index")
1665 == my_vca.get("member-vnf-index")
1666 ):
1667 internal_status = configuration_status_list[index].get("status")
1668 if internal_status == "READY":
1669 continue
1670 elif internal_status == "BROKEN":
1671 raise LcmException(
1672 "Configuration aborted because dependent charm/s has failed"
1673 )
1674 else:
1675 break
1676 else:
1677 # no dependencies, return
1678 return
1679 await asyncio.sleep(10)
1680 timeout -= 1
1681
1682 raise LcmException("Configuration aborted because dependent charm/s timeout")
1683
1684 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1685 vca_id = None
1686 if db_vnfr:
1687 vca_id = deep_get(db_vnfr, ("vca-id",))
1688 elif db_nsr:
1689 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1690 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1691 return vca_id
1692
1693 async def instantiate_N2VC(
1694 self,
1695 logging_text,
1696 vca_index,
1697 nsi_id,
1698 db_nsr,
1699 db_vnfr,
1700 vdu_id,
1701 kdu_name,
1702 vdu_index,
1703 kdu_index,
1704 config_descriptor,
1705 deploy_params,
1706 base_folder,
1707 nslcmop_id,
1708 stage,
1709 vca_type,
1710 vca_name,
1711 ee_config_descriptor,
1712 ):
1713 nsr_id = db_nsr["_id"]
1714 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1715 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1716 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1717 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1718 db_dict = {
1719 "collection": "nsrs",
1720 "filter": {"_id": nsr_id},
1721 "path": db_update_entry,
1722 }
1723 step = ""
1724 try:
1725 element_type = "NS"
1726 element_under_configuration = nsr_id
1727
1728 vnfr_id = None
1729 if db_vnfr:
1730 vnfr_id = db_vnfr["_id"]
1731 osm_config["osm"]["vnf_id"] = vnfr_id
1732
1733 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1734
1735 if vca_type == "native_charm":
1736 index_number = 0
1737 else:
1738 index_number = vdu_index or 0
1739
1740 if vnfr_id:
1741 element_type = "VNF"
1742 element_under_configuration = vnfr_id
1743 namespace += ".{}-{}".format(vnfr_id, index_number)
1744 if vdu_id:
1745 namespace += ".{}-{}".format(vdu_id, index_number)
1746 element_type = "VDU"
1747 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1748 osm_config["osm"]["vdu_id"] = vdu_id
1749 elif kdu_name:
1750 namespace += ".{}".format(kdu_name)
1751 element_type = "KDU"
1752 element_under_configuration = kdu_name
1753 osm_config["osm"]["kdu_name"] = kdu_name
1754
1755 # Get artifact path
1756 if base_folder["pkg-dir"]:
1757 artifact_path = "{}/{}/{}/{}".format(
1758 base_folder["folder"],
1759 base_folder["pkg-dir"],
1760 "charms"
1761 if vca_type
1762 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1763 else "helm-charts",
1764 vca_name,
1765 )
1766 else:
1767 artifact_path = "{}/Scripts/{}/{}/".format(
1768 base_folder["folder"],
1769 "charms"
1770 if vca_type
1771 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1772 else "helm-charts",
1773 vca_name,
1774 )
1775
1776 self.logger.debug("Artifact path > {}".format(artifact_path))
1777
1778 # get initial_config_primitive_list that applies to this element
1779 initial_config_primitive_list = config_descriptor.get(
1780 "initial-config-primitive"
1781 )
1782
1783 self.logger.debug(
1784 "Initial config primitive list > {}".format(
1785 initial_config_primitive_list
1786 )
1787 )
1788
1789 # add config if not present for NS charm
1790 ee_descriptor_id = ee_config_descriptor.get("id")
1791 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1792 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1793 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1794 )
1795
1796 self.logger.debug(
1797 "Initial config primitive list #2 > {}".format(
1798 initial_config_primitive_list
1799 )
1800 )
1801 # n2vc_redesign STEP 3.1
1802 # find old ee_id if exists
1803 ee_id = vca_deployed.get("ee_id")
1804
1805 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1806 # create or register execution environment in VCA
1807 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1808 self._write_configuration_status(
1809 nsr_id=nsr_id,
1810 vca_index=vca_index,
1811 status="CREATING",
1812 element_under_configuration=element_under_configuration,
1813 element_type=element_type,
1814 )
1815
1816 step = "create execution environment"
1817 self.logger.debug(logging_text + step)
1818
1819 ee_id = None
1820 credentials = None
1821 if vca_type == "k8s_proxy_charm":
1822 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1823 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1824 namespace=namespace,
1825 artifact_path=artifact_path,
1826 db_dict=db_dict,
1827 vca_id=vca_id,
1828 )
1829 elif vca_type == "helm" or vca_type == "helm-v3":
1830 ee_id, credentials = await self.vca_map[
1831 vca_type
1832 ].create_execution_environment(
1833 namespace=namespace,
1834 reuse_ee_id=ee_id,
1835 db_dict=db_dict,
1836 config=osm_config,
1837 artifact_path=artifact_path,
1838 chart_model=vca_name,
1839 vca_type=vca_type,
1840 )
1841 else:
1842 ee_id, credentials = await self.vca_map[
1843 vca_type
1844 ].create_execution_environment(
1845 namespace=namespace,
1846 reuse_ee_id=ee_id,
1847 db_dict=db_dict,
1848 vca_id=vca_id,
1849 )
1850
1851 elif vca_type == "native_charm":
1852 step = "Waiting to VM being up and getting IP address"
1853 self.logger.debug(logging_text + step)
1854 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1855 logging_text,
1856 nsr_id,
1857 vnfr_id,
1858 vdu_id,
1859 vdu_index,
1860 user=None,
1861 pub_key=None,
1862 )
1863 credentials = {"hostname": rw_mgmt_ip}
1864 # get username
1865 username = deep_get(
1866 config_descriptor, ("config-access", "ssh-access", "default-user")
1867 )
1868 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1869 # merged. Meanwhile let's get username from initial-config-primitive
1870 if not username and initial_config_primitive_list:
1871 for config_primitive in initial_config_primitive_list:
1872 for param in config_primitive.get("parameter", ()):
1873 if param["name"] == "ssh-username":
1874 username = param["value"]
1875 break
1876 if not username:
1877 raise LcmException(
1878 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1879 "'config-access.ssh-access.default-user'"
1880 )
1881 credentials["username"] = username
1882 # n2vc_redesign STEP 3.2
1883
1884 self._write_configuration_status(
1885 nsr_id=nsr_id,
1886 vca_index=vca_index,
1887 status="REGISTERING",
1888 element_under_configuration=element_under_configuration,
1889 element_type=element_type,
1890 )
1891
1892 step = "register execution environment {}".format(credentials)
1893 self.logger.debug(logging_text + step)
1894 ee_id = await self.vca_map[vca_type].register_execution_environment(
1895 credentials=credentials,
1896 namespace=namespace,
1897 db_dict=db_dict,
1898 vca_id=vca_id,
1899 )
1900
1901 # for compatibility with MON/POL modules, the need model and application name at database
1902 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1903 ee_id_parts = ee_id.split(".")
1904 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1905 if len(ee_id_parts) >= 2:
1906 model_name = ee_id_parts[0]
1907 application_name = ee_id_parts[1]
1908 db_nsr_update[db_update_entry + "model"] = model_name
1909 db_nsr_update[db_update_entry + "application"] = application_name
1910
1911 # n2vc_redesign STEP 3.3
1912 step = "Install configuration Software"
1913
1914 self._write_configuration_status(
1915 nsr_id=nsr_id,
1916 vca_index=vca_index,
1917 status="INSTALLING SW",
1918 element_under_configuration=element_under_configuration,
1919 element_type=element_type,
1920 other_update=db_nsr_update,
1921 )
1922
1923 # TODO check if already done
1924 self.logger.debug(logging_text + step)
1925 config = None
1926 if vca_type == "native_charm":
1927 config_primitive = next(
1928 (p for p in initial_config_primitive_list if p["name"] == "config"),
1929 None,
1930 )
1931 if config_primitive:
1932 config = self._map_primitive_params(
1933 config_primitive, {}, deploy_params
1934 )
1935 num_units = 1
1936 if vca_type == "lxc_proxy_charm":
1937 if element_type == "NS":
1938 num_units = db_nsr.get("config-units") or 1
1939 elif element_type == "VNF":
1940 num_units = db_vnfr.get("config-units") or 1
1941 elif element_type == "VDU":
1942 for v in db_vnfr["vdur"]:
1943 if vdu_id == v["vdu-id-ref"]:
1944 num_units = v.get("config-units") or 1
1945 break
1946 if vca_type != "k8s_proxy_charm":
1947 await self.vca_map[vca_type].install_configuration_sw(
1948 ee_id=ee_id,
1949 artifact_path=artifact_path,
1950 db_dict=db_dict,
1951 config=config,
1952 num_units=num_units,
1953 vca_id=vca_id,
1954 vca_type=vca_type,
1955 )
1956
1957 # write in db flag of configuration_sw already installed
1958 self.update_db_2(
1959 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1960 )
1961
1962 # add relations for this VCA (wait for other peers related with this VCA)
1963 is_relation_added = await self._add_vca_relations(
1964 logging_text=logging_text,
1965 nsr_id=nsr_id,
1966 vca_type=vca_type,
1967 vca_index=vca_index,
1968 )
1969
1970 if not is_relation_added:
1971 raise LcmException("Relations could not be added to VCA.")
1972
1973 # if SSH access is required, then get execution environment SSH public
1974 # if native charm we have waited already to VM be UP
1975 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1976 pub_key = None
1977 user = None
1978 # self.logger.debug("get ssh key block")
1979 if deep_get(
1980 config_descriptor, ("config-access", "ssh-access", "required")
1981 ):
1982 # self.logger.debug("ssh key needed")
1983 # Needed to inject a ssh key
1984 user = deep_get(
1985 config_descriptor,
1986 ("config-access", "ssh-access", "default-user"),
1987 )
1988 step = "Install configuration Software, getting public ssh key"
1989 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1990 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1991 )
1992
1993 step = "Insert public key into VM user={} ssh_key={}".format(
1994 user, pub_key
1995 )
1996 else:
1997 # self.logger.debug("no need to get ssh key")
1998 step = "Waiting to VM being up and getting IP address"
1999 self.logger.debug(logging_text + step)
2000
2001 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2002 rw_mgmt_ip = None
2003
2004 # n2vc_redesign STEP 5.1
2005 # wait for RO (ip-address) Insert pub_key into VM
2006 if vnfr_id:
2007 if kdu_name:
2008 rw_mgmt_ip, services = await self.wait_kdu_up(
2009 logging_text, nsr_id, vnfr_id, kdu_name
2010 )
2011 vnfd = self.db.get_one(
2012 "vnfds_revisions",
2013 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2014 )
2015 kdu = get_kdu(vnfd, kdu_name)
2016 kdu_services = [
2017 service["name"] for service in get_kdu_services(kdu)
2018 ]
2019 exposed_services = []
2020 for service in services:
2021 if any(s in service["name"] for s in kdu_services):
2022 exposed_services.append(service)
2023 await self.vca_map[vca_type].exec_primitive(
2024 ee_id=ee_id,
2025 primitive_name="config",
2026 params_dict={
2027 "osm-config": json.dumps(
2028 OsmConfigBuilder(
2029 k8s={"services": exposed_services}
2030 ).build()
2031 )
2032 },
2033 vca_id=vca_id,
2034 )
2035
2036 # This verification is needed in order to avoid trying to add a public key
2037 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2038 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2039 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2040 # or it is a KNF)
2041 elif db_vnfr.get("vdur"):
2042 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2043 logging_text,
2044 nsr_id,
2045 vnfr_id,
2046 vdu_id,
2047 vdu_index,
2048 user=user,
2049 pub_key=pub_key,
2050 )
2051
2052 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2053
2054 # store rw_mgmt_ip in deploy params for later replacement
2055 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2056
2057 # n2vc_redesign STEP 6 Execute initial config primitive
2058 step = "execute initial config primitive"
2059
2060 # wait for dependent primitives execution (NS -> VNF -> VDU)
2061 if initial_config_primitive_list:
2062 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2063
2064 # stage, in function of element type: vdu, kdu, vnf or ns
2065 my_vca = vca_deployed_list[vca_index]
2066 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2067 # VDU or KDU
2068 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2069 elif my_vca.get("member-vnf-index"):
2070 # VNF
2071 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2072 else:
2073 # NS
2074 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2075
2076 self._write_configuration_status(
2077 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2078 )
2079
2080 self._write_op_status(op_id=nslcmop_id, stage=stage)
2081
2082 check_if_terminated_needed = True
2083 for initial_config_primitive in initial_config_primitive_list:
2084 # adding information on the vca_deployed if it is a NS execution environment
2085 if not vca_deployed["member-vnf-index"]:
2086 deploy_params["ns_config_info"] = json.dumps(
2087 self._get_ns_config_info(nsr_id)
2088 )
2089 # TODO check if already done
2090 primitive_params_ = self._map_primitive_params(
2091 initial_config_primitive, {}, deploy_params
2092 )
2093
2094 step = "execute primitive '{}' params '{}'".format(
2095 initial_config_primitive["name"], primitive_params_
2096 )
2097 self.logger.debug(logging_text + step)
2098 await self.vca_map[vca_type].exec_primitive(
2099 ee_id=ee_id,
2100 primitive_name=initial_config_primitive["name"],
2101 params_dict=primitive_params_,
2102 db_dict=db_dict,
2103 vca_id=vca_id,
2104 vca_type=vca_type,
2105 )
2106 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2107 if check_if_terminated_needed:
2108 if config_descriptor.get("terminate-config-primitive"):
2109 self.update_db_2(
2110 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2111 )
2112 check_if_terminated_needed = False
2113
2114 # TODO register in database that primitive is done
2115
2116 # STEP 7 Configure metrics
2117 if vca_type == "helm" or vca_type == "helm-v3":
2118 # TODO: review for those cases where the helm chart is a reference and
2119 # is not part of the NF package
2120 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2121 ee_id=ee_id,
2122 artifact_path=artifact_path,
2123 ee_config_descriptor=ee_config_descriptor,
2124 vnfr_id=vnfr_id,
2125 nsr_id=nsr_id,
2126 target_ip=rw_mgmt_ip,
2127 element_type=element_type,
2128 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2129 vdu_id=vdu_id,
2130 vdu_index=vdu_index,
2131 kdu_name=kdu_name,
2132 kdu_index=kdu_index,
2133 )
2134 if prometheus_jobs:
2135 self.update_db_2(
2136 "nsrs",
2137 nsr_id,
2138 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2139 )
2140
2141 for job in prometheus_jobs:
2142 self.db.set_one(
2143 "prometheus_jobs",
2144 {"job_name": job["job_name"]},
2145 job,
2146 upsert=True,
2147 fail_on_empty=False,
2148 )
2149
2150 step = "instantiated at VCA"
2151 self.logger.debug(logging_text + step)
2152
2153 self._write_configuration_status(
2154 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2155 )
2156
2157 except Exception as e: # TODO not use Exception but N2VC exception
2158 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2159 if not isinstance(
2160 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2161 ):
2162 self.logger.error(
2163 "Exception while {} : {}".format(step, e), exc_info=True
2164 )
2165 self._write_configuration_status(
2166 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2167 )
2168 raise LcmException("{}. {}".format(step, e)) from e
2169
2170 def _write_ns_status(
2171 self,
2172 nsr_id: str,
2173 ns_state: str,
2174 current_operation: str,
2175 current_operation_id: str,
2176 error_description: str = None,
2177 error_detail: str = None,
2178 other_update: dict = None,
2179 ):
2180 """
2181 Update db_nsr fields.
2182 :param nsr_id:
2183 :param ns_state:
2184 :param current_operation:
2185 :param current_operation_id:
2186 :param error_description:
2187 :param error_detail:
2188 :param other_update: Other required changes at database if provided, will be cleared
2189 :return:
2190 """
2191 try:
2192 db_dict = other_update or {}
2193 db_dict[
2194 "_admin.nslcmop"
2195 ] = current_operation_id # for backward compatibility
2196 db_dict["_admin.current-operation"] = current_operation_id
2197 db_dict["_admin.operation-type"] = (
2198 current_operation if current_operation != "IDLE" else None
2199 )
2200 db_dict["currentOperation"] = current_operation
2201 db_dict["currentOperationID"] = current_operation_id
2202 db_dict["errorDescription"] = error_description
2203 db_dict["errorDetail"] = error_detail
2204
2205 if ns_state:
2206 db_dict["nsState"] = ns_state
2207 self.update_db_2("nsrs", nsr_id, db_dict)
2208 except DbException as e:
2209 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2210
2211 def _write_op_status(
2212 self,
2213 op_id: str,
2214 stage: list = None,
2215 error_message: str = None,
2216 queuePosition: int = 0,
2217 operation_state: str = None,
2218 other_update: dict = None,
2219 ):
2220 try:
2221 db_dict = other_update or {}
2222 db_dict["queuePosition"] = queuePosition
2223 if isinstance(stage, list):
2224 db_dict["stage"] = stage[0]
2225 db_dict["detailed-status"] = " ".join(stage)
2226 elif stage is not None:
2227 db_dict["stage"] = str(stage)
2228
2229 if error_message is not None:
2230 db_dict["errorMessage"] = error_message
2231 if operation_state is not None:
2232 db_dict["operationState"] = operation_state
2233 db_dict["statusEnteredTime"] = time()
2234 self.update_db_2("nslcmops", op_id, db_dict)
2235 except DbException as e:
2236 self.logger.warn(
2237 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2238 )
2239
2240 def _write_all_config_status(self, db_nsr: dict, status: str):
2241 try:
2242 nsr_id = db_nsr["_id"]
2243 # configurationStatus
2244 config_status = db_nsr.get("configurationStatus")
2245 if config_status:
2246 db_nsr_update = {
2247 "configurationStatus.{}.status".format(index): status
2248 for index, v in enumerate(config_status)
2249 if v
2250 }
2251 # update status
2252 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2253
2254 except DbException as e:
2255 self.logger.warn(
2256 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2257 )
2258
2259 def _write_configuration_status(
2260 self,
2261 nsr_id: str,
2262 vca_index: int,
2263 status: str = None,
2264 element_under_configuration: str = None,
2265 element_type: str = None,
2266 other_update: dict = None,
2267 ):
2268 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2269 # .format(vca_index, status))
2270
2271 try:
2272 db_path = "configurationStatus.{}.".format(vca_index)
2273 db_dict = other_update or {}
2274 if status:
2275 db_dict[db_path + "status"] = status
2276 if element_under_configuration:
2277 db_dict[
2278 db_path + "elementUnderConfiguration"
2279 ] = element_under_configuration
2280 if element_type:
2281 db_dict[db_path + "elementType"] = element_type
2282 self.update_db_2("nsrs", nsr_id, db_dict)
2283 except DbException as e:
2284 self.logger.warn(
2285 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2286 status, nsr_id, vca_index, e
2287 )
2288 )
2289
2290 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2291 """
2292 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2293 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2294 Database is used because the result can be obtained from a different LCM worker in case of HA.
2295 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2296 :param db_nslcmop: database content of nslcmop
2297 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2298 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2299 computed 'vim-account-id'
2300 """
2301 modified = False
2302 nslcmop_id = db_nslcmop["_id"]
2303 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2304 if placement_engine == "PLA":
2305 self.logger.debug(
2306 logging_text + "Invoke and wait for placement optimization"
2307 )
2308 await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
2309 db_poll_interval = 5
2310 wait = db_poll_interval * 10
2311 pla_result = None
2312 while not pla_result and wait >= 0:
2313 await asyncio.sleep(db_poll_interval)
2314 wait -= db_poll_interval
2315 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2316 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2317
2318 if not pla_result:
2319 raise LcmException(
2320 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2321 )
2322
2323 for pla_vnf in pla_result["vnf"]:
2324 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2325 if not pla_vnf.get("vimAccountId") or not vnfr:
2326 continue
2327 modified = True
2328 self.db.set_one(
2329 "vnfrs",
2330 {"_id": vnfr["_id"]},
2331 {"vim-account-id": pla_vnf["vimAccountId"]},
2332 )
2333 # Modifies db_vnfrs
2334 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2335 return modified
2336
2337 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2338 alerts = []
2339 nsr_id = vnfr["nsr-id-ref"]
2340 df = vnfd.get("df", [{}])[0]
2341 # Checking for auto-healing configuration
2342 if "healing-aspect" in df:
2343 healing_aspects = df["healing-aspect"]
2344 for healing in healing_aspects:
2345 for healing_policy in healing.get("healing-policy", ()):
2346 vdu_id = healing_policy["vdu-id"]
2347 vdur = next(
2348 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2349 {},
2350 )
2351 if not vdur:
2352 continue
2353 metric_name = "vm_status"
2354 vdu_name = vdur.get("name")
2355 vnf_member_index = vnfr["member-vnf-index-ref"]
2356 uuid = str(uuid4())
2357 name = f"healing_{uuid}"
2358 action = healing_policy
2359 # action_on_recovery = healing.get("action-on-recovery")
2360 # cooldown_time = healing.get("cooldown-time")
2361 # day1 = healing.get("day1")
2362 alert = {
2363 "uuid": uuid,
2364 "name": name,
2365 "metric": metric_name,
2366 "tags": {
2367 "ns_id": nsr_id,
2368 "vnf_member_index": vnf_member_index,
2369 "vdu_name": vdu_name,
2370 },
2371 "alarm_status": "ok",
2372 "action_type": "healing",
2373 "action": action,
2374 }
2375 alerts.append(alert)
2376 return alerts
2377
2378 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2379 alerts = []
2380 nsr_id = vnfr["nsr-id-ref"]
2381 df = vnfd.get("df", [{}])[0]
2382 # Checking for auto-scaling configuration
2383 if "scaling-aspect" in df:
2384 rel_operation_types = {
2385 "GE": ">=",
2386 "LE": "<=",
2387 "GT": ">",
2388 "LT": "<",
2389 "EQ": "==",
2390 "NE": "!=",
2391 }
2392 scaling_aspects = df["scaling-aspect"]
2393 all_vnfd_monitoring_params = {}
2394 for ivld in vnfd.get("int-virtual-link-desc", ()):
2395 for mp in ivld.get("monitoring-parameters", ()):
2396 all_vnfd_monitoring_params[mp.get("id")] = mp
2397 for vdu in vnfd.get("vdu", ()):
2398 for mp in vdu.get("monitoring-parameter", ()):
2399 all_vnfd_monitoring_params[mp.get("id")] = mp
2400 for df in vnfd.get("df", ()):
2401 for mp in df.get("monitoring-parameter", ()):
2402 all_vnfd_monitoring_params[mp.get("id")] = mp
2403 for scaling_aspect in scaling_aspects:
2404 scaling_group_name = scaling_aspect.get("name", "")
2405 # Get monitored VDUs
2406 all_monitored_vdus = set()
2407 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2408 "deltas", ()
2409 ):
2410 for vdu_delta in delta.get("vdu-delta", ()):
2411 all_monitored_vdus.add(vdu_delta.get("id"))
2412 monitored_vdurs = list(
2413 filter(
2414 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2415 vnfr["vdur"],
2416 )
2417 )
2418 if not monitored_vdurs:
2419 self.logger.error(
2420 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2421 )
2422 continue
2423 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2424 if scaling_policy["scaling-type"] != "automatic":
2425 continue
2426 threshold_time = scaling_policy.get("threshold-time", "1")
2427 cooldown_time = scaling_policy.get("cooldown-time", "0")
2428 for scaling_criteria in scaling_policy["scaling-criteria"]:
2429 monitoring_param_ref = scaling_criteria.get(
2430 "vnf-monitoring-param-ref"
2431 )
2432 vnf_monitoring_param = all_vnfd_monitoring_params[
2433 monitoring_param_ref
2434 ]
2435 for vdur in monitored_vdurs:
2436 vdu_id = vdur["vdu-id-ref"]
2437 metric_name = vnf_monitoring_param.get("performance-metric")
2438 metric_name = f"osm_{metric_name}"
2439 vnf_member_index = vnfr["member-vnf-index-ref"]
2440 scalein_threshold = scaling_criteria.get(
2441 "scale-in-threshold"
2442 )
2443 scaleout_threshold = scaling_criteria.get(
2444 "scale-out-threshold"
2445 )
2446 # Looking for min/max-number-of-instances
2447 instances_min_number = 1
2448 instances_max_number = 1
2449 vdu_profile = df["vdu-profile"]
2450 if vdu_profile:
2451 profile = next(
2452 item for item in vdu_profile if item["id"] == vdu_id
2453 )
2454 instances_min_number = profile.get(
2455 "min-number-of-instances", 1
2456 )
2457 instances_max_number = profile.get(
2458 "max-number-of-instances", 1
2459 )
2460
2461 if scalein_threshold:
2462 uuid = str(uuid4())
2463 name = f"scalein_{uuid}"
2464 operation = scaling_criteria[
2465 "scale-in-relational-operation"
2466 ]
2467 rel_operator = rel_operation_types.get(operation, "<=")
2468 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2469 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2470 labels = {
2471 "ns_id": nsr_id,
2472 "vnf_member_index": vnf_member_index,
2473 "vdu_id": vdu_id,
2474 }
2475 prom_cfg = {
2476 "alert": name,
2477 "expr": expression,
2478 "for": str(threshold_time) + "m",
2479 "labels": labels,
2480 }
2481 action = scaling_policy
2482 action = {
2483 "scaling-group": scaling_group_name,
2484 "cooldown-time": cooldown_time,
2485 }
2486 alert = {
2487 "uuid": uuid,
2488 "name": name,
2489 "metric": metric_name,
2490 "tags": {
2491 "ns_id": nsr_id,
2492 "vnf_member_index": vnf_member_index,
2493 "vdu_id": vdu_id,
2494 },
2495 "alarm_status": "ok",
2496 "action_type": "scale_in",
2497 "action": action,
2498 "prometheus_config": prom_cfg,
2499 }
2500 alerts.append(alert)
2501
2502 if scaleout_threshold:
2503 uuid = str(uuid4())
2504 name = f"scaleout_{uuid}"
2505 operation = scaling_criteria[
2506 "scale-out-relational-operation"
2507 ]
2508 rel_operator = rel_operation_types.get(operation, "<=")
2509 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2510 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2511 labels = {
2512 "ns_id": nsr_id,
2513 "vnf_member_index": vnf_member_index,
2514 "vdu_id": vdu_id,
2515 }
2516 prom_cfg = {
2517 "alert": name,
2518 "expr": expression,
2519 "for": str(threshold_time) + "m",
2520 "labels": labels,
2521 }
2522 action = scaling_policy
2523 action = {
2524 "scaling-group": scaling_group_name,
2525 "cooldown-time": cooldown_time,
2526 }
2527 alert = {
2528 "uuid": uuid,
2529 "name": name,
2530 "metric": metric_name,
2531 "tags": {
2532 "ns_id": nsr_id,
2533 "vnf_member_index": vnf_member_index,
2534 "vdu_id": vdu_id,
2535 },
2536 "alarm_status": "ok",
2537 "action_type": "scale_out",
2538 "action": action,
2539 "prometheus_config": prom_cfg,
2540 }
2541 alerts.append(alert)
2542 return alerts
2543
2544 def update_nsrs_with_pla_result(self, params):
2545 try:
2546 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2547 self.update_db_2(
2548 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2549 )
2550 except Exception as e:
2551 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2552
2553 async def instantiate(self, nsr_id, nslcmop_id):
2554 """
2555
2556 :param nsr_id: ns instance to deploy
2557 :param nslcmop_id: operation to run
2558 :return:
2559 """
2560
2561 # Try to lock HA task here
2562 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2563 if not task_is_locked_by_me:
2564 self.logger.debug(
2565 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2566 )
2567 return
2568
2569 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2570 self.logger.debug(logging_text + "Enter")
2571
2572 # get all needed from database
2573
2574 # database nsrs record
2575 db_nsr = None
2576
2577 # database nslcmops record
2578 db_nslcmop = None
2579
2580 # update operation on nsrs
2581 db_nsr_update = {}
2582 # update operation on nslcmops
2583 db_nslcmop_update = {}
2584
2585 timeout_ns_deploy = self.timeout.ns_deploy
2586
2587 nslcmop_operation_state = None
2588 db_vnfrs = {} # vnf's info indexed by member-index
2589 # n2vc_info = {}
2590 tasks_dict_info = {} # from task to info text
2591 exc = None
2592 error_list = []
2593 stage = [
2594 "Stage 1/5: preparation of the environment.",
2595 "Waiting for previous operations to terminate.",
2596 "",
2597 ]
2598 # ^ stage, step, VIM progress
2599 try:
2600 # wait for any previous tasks in process
2601 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2602
2603 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2604 stage[1] = "Reading from database."
2605 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2606 db_nsr_update["detailed-status"] = "creating"
2607 db_nsr_update["operational-status"] = "init"
2608 self._write_ns_status(
2609 nsr_id=nsr_id,
2610 ns_state="BUILDING",
2611 current_operation="INSTANTIATING",
2612 current_operation_id=nslcmop_id,
2613 other_update=db_nsr_update,
2614 )
2615 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2616
2617 # read from db: operation
2618 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2619 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2620 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2621 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2622 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2623 )
2624 ns_params = db_nslcmop.get("operationParams")
2625 if ns_params and ns_params.get("timeout_ns_deploy"):
2626 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2627
2628 # read from db: ns
2629 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2630 self.logger.debug(logging_text + stage[1])
2631 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2632 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2633 self.logger.debug(logging_text + stage[1])
2634 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2635 self.fs.sync(db_nsr["nsd-id"])
2636 db_nsr["nsd"] = nsd
2637 # nsr_name = db_nsr["name"] # TODO short-name??
2638
2639 # read from db: vnf's of this ns
2640 stage[1] = "Getting vnfrs from db."
2641 self.logger.debug(logging_text + stage[1])
2642 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2643
2644 # read from db: vnfd's for every vnf
2645 db_vnfds = [] # every vnfd data
2646
2647 # for each vnf in ns, read vnfd
2648 for vnfr in db_vnfrs_list:
2649 if vnfr.get("kdur"):
2650 kdur_list = []
2651 for kdur in vnfr["kdur"]:
2652 if kdur.get("additionalParams"):
2653 kdur["additionalParams"] = json.loads(
2654 kdur["additionalParams"]
2655 )
2656 kdur_list.append(kdur)
2657 vnfr["kdur"] = kdur_list
2658
2659 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2660 vnfd_id = vnfr["vnfd-id"]
2661 vnfd_ref = vnfr["vnfd-ref"]
2662 self.fs.sync(vnfd_id)
2663
2664 # if we haven't this vnfd, read it from db
2665 if vnfd_id not in db_vnfds:
2666 # read from db
2667 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2668 vnfd_id, vnfd_ref
2669 )
2670 self.logger.debug(logging_text + stage[1])
2671 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2672
2673 # store vnfd
2674 db_vnfds.append(vnfd)
2675
2676 # Get or generates the _admin.deployed.VCA list
2677 vca_deployed_list = None
2678 if db_nsr["_admin"].get("deployed"):
2679 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2680 if vca_deployed_list is None:
2681 vca_deployed_list = []
2682 configuration_status_list = []
2683 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2684 db_nsr_update["configurationStatus"] = configuration_status_list
2685 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2686 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2687 elif isinstance(vca_deployed_list, dict):
2688 # maintain backward compatibility. Change a dict to list at database
2689 vca_deployed_list = list(vca_deployed_list.values())
2690 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2691 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2692
2693 if not isinstance(
2694 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2695 ):
2696 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2697 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2698
2699 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2700 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2701 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2702 self.db.set_list(
2703 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2704 )
2705
2706 # n2vc_redesign STEP 2 Deploy Network Scenario
2707 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2708 self._write_op_status(op_id=nslcmop_id, stage=stage)
2709
2710 stage[1] = "Deploying KDUs."
2711 # self.logger.debug(logging_text + "Before deploy_kdus")
2712 # Call to deploy_kdus in case exists the "vdu:kdu" param
2713 await self.deploy_kdus(
2714 logging_text=logging_text,
2715 nsr_id=nsr_id,
2716 nslcmop_id=nslcmop_id,
2717 db_vnfrs=db_vnfrs,
2718 db_vnfds=db_vnfds,
2719 task_instantiation_info=tasks_dict_info,
2720 )
2721
2722 stage[1] = "Getting VCA public key."
2723 # n2vc_redesign STEP 1 Get VCA public ssh-key
2724 # feature 1429. Add n2vc public key to needed VMs
2725 n2vc_key = self.n2vc.get_public_key()
2726 n2vc_key_list = [n2vc_key]
2727 if self.vca_config.public_key:
2728 n2vc_key_list.append(self.vca_config.public_key)
2729
2730 stage[1] = "Deploying NS at VIM."
2731 task_ro = asyncio.ensure_future(
2732 self.instantiate_RO(
2733 logging_text=logging_text,
2734 nsr_id=nsr_id,
2735 nsd=nsd,
2736 db_nsr=db_nsr,
2737 db_nslcmop=db_nslcmop,
2738 db_vnfrs=db_vnfrs,
2739 db_vnfds=db_vnfds,
2740 n2vc_key_list=n2vc_key_list,
2741 stage=stage,
2742 )
2743 )
2744 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2745 tasks_dict_info[task_ro] = "Deploying at VIM"
2746
2747 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2748 stage[1] = "Deploying Execution Environments."
2749 self.logger.debug(logging_text + stage[1])
2750
2751 # create namespace and certificate if any helm based EE is present in the NS
2752 if check_helm_ee_in_ns(db_vnfds):
2753 # TODO: create EE namespace
2754 # create TLS certificates
2755 await self.vca_map["helm-v3"].create_tls_certificate(
2756 secret_name="ee-tls-{}".format(nsr_id),
2757 dns_prefix="*",
2758 nsr_id=nsr_id,
2759 usage="server auth",
2760 )
2761
2762 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2763 for vnf_profile in get_vnf_profiles(nsd):
2764 vnfd_id = vnf_profile["vnfd-id"]
2765 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2766 member_vnf_index = str(vnf_profile["id"])
2767 db_vnfr = db_vnfrs[member_vnf_index]
2768 base_folder = vnfd["_admin"]["storage"]
2769 vdu_id = None
2770 vdu_index = 0
2771 vdu_name = None
2772 kdu_name = None
2773 kdu_index = None
2774
2775 # Get additional parameters
2776 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2777 if db_vnfr.get("additionalParamsForVnf"):
2778 deploy_params.update(
2779 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2780 )
2781
2782 descriptor_config = get_configuration(vnfd, vnfd["id"])
2783 if descriptor_config:
2784 self._deploy_n2vc(
2785 logging_text=logging_text
2786 + "member_vnf_index={} ".format(member_vnf_index),
2787 db_nsr=db_nsr,
2788 db_vnfr=db_vnfr,
2789 nslcmop_id=nslcmop_id,
2790 nsr_id=nsr_id,
2791 nsi_id=nsi_id,
2792 vnfd_id=vnfd_id,
2793 vdu_id=vdu_id,
2794 kdu_name=kdu_name,
2795 member_vnf_index=member_vnf_index,
2796 vdu_index=vdu_index,
2797 kdu_index=kdu_index,
2798 vdu_name=vdu_name,
2799 deploy_params=deploy_params,
2800 descriptor_config=descriptor_config,
2801 base_folder=base_folder,
2802 task_instantiation_info=tasks_dict_info,
2803 stage=stage,
2804 )
2805
2806 # Deploy charms for each VDU that supports one.
2807 for vdud in get_vdu_list(vnfd):
2808 vdu_id = vdud["id"]
2809 descriptor_config = get_configuration(vnfd, vdu_id)
2810 vdur = find_in_list(
2811 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2812 )
2813
2814 if vdur.get("additionalParams"):
2815 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2816 else:
2817 deploy_params_vdu = deploy_params
2818 deploy_params_vdu["OSM"] = get_osm_params(
2819 db_vnfr, vdu_id, vdu_count_index=0
2820 )
2821 vdud_count = get_number_of_instances(vnfd, vdu_id)
2822
2823 self.logger.debug("VDUD > {}".format(vdud))
2824 self.logger.debug(
2825 "Descriptor config > {}".format(descriptor_config)
2826 )
2827 if descriptor_config:
2828 vdu_name = None
2829 kdu_name = None
2830 kdu_index = None
2831 for vdu_index in range(vdud_count):
2832 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2833 self._deploy_n2vc(
2834 logging_text=logging_text
2835 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2836 member_vnf_index, vdu_id, vdu_index
2837 ),
2838 db_nsr=db_nsr,
2839 db_vnfr=db_vnfr,
2840 nslcmop_id=nslcmop_id,
2841 nsr_id=nsr_id,
2842 nsi_id=nsi_id,
2843 vnfd_id=vnfd_id,
2844 vdu_id=vdu_id,
2845 kdu_name=kdu_name,
2846 kdu_index=kdu_index,
2847 member_vnf_index=member_vnf_index,
2848 vdu_index=vdu_index,
2849 vdu_name=vdu_name,
2850 deploy_params=deploy_params_vdu,
2851 descriptor_config=descriptor_config,
2852 base_folder=base_folder,
2853 task_instantiation_info=tasks_dict_info,
2854 stage=stage,
2855 )
2856 for kdud in get_kdu_list(vnfd):
2857 kdu_name = kdud["name"]
2858 descriptor_config = get_configuration(vnfd, kdu_name)
2859 if descriptor_config:
2860 vdu_id = None
2861 vdu_index = 0
2862 vdu_name = None
2863 kdu_index, kdur = next(
2864 x
2865 for x in enumerate(db_vnfr["kdur"])
2866 if x[1]["kdu-name"] == kdu_name
2867 )
2868 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2869 if kdur.get("additionalParams"):
2870 deploy_params_kdu.update(
2871 parse_yaml_strings(kdur["additionalParams"].copy())
2872 )
2873
2874 self._deploy_n2vc(
2875 logging_text=logging_text,
2876 db_nsr=db_nsr,
2877 db_vnfr=db_vnfr,
2878 nslcmop_id=nslcmop_id,
2879 nsr_id=nsr_id,
2880 nsi_id=nsi_id,
2881 vnfd_id=vnfd_id,
2882 vdu_id=vdu_id,
2883 kdu_name=kdu_name,
2884 member_vnf_index=member_vnf_index,
2885 vdu_index=vdu_index,
2886 kdu_index=kdu_index,
2887 vdu_name=vdu_name,
2888 deploy_params=deploy_params_kdu,
2889 descriptor_config=descriptor_config,
2890 base_folder=base_folder,
2891 task_instantiation_info=tasks_dict_info,
2892 stage=stage,
2893 )
2894
2895 # Check if each vnf has exporter for metric collection if so update prometheus job records
2896 if "exporters-endpoints" in vnfd.get("df")[0]:
2897 exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
2898 self.logger.debug("exporter config :{}".format(exporter_config))
2899 artifact_path = "{}/{}/{}".format(
2900 base_folder["folder"],
2901 base_folder["pkg-dir"],
2902 "exporter-endpoint",
2903 )
2904 ee_id = None
2905 ee_config_descriptor = exporter_config
2906 vnfr_id = db_vnfr["id"]
2907 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2908 logging_text,
2909 nsr_id,
2910 vnfr_id,
2911 vdu_id=None,
2912 vdu_index=None,
2913 user=None,
2914 pub_key=None,
2915 )
2916 self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
2917 self.logger.debug("Artifact_path:{}".format(artifact_path))
2918 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
2919 vdu_id_for_prom = None
2920 vdu_index_for_prom = None
2921 for x in get_iterable(db_vnfr, "vdur"):
2922 vdu_id_for_prom = x.get("vdu-id-ref")
2923 vdu_index_for_prom = x.get("count-index")
2924 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2925 ee_id=ee_id,
2926 artifact_path=artifact_path,
2927 ee_config_descriptor=ee_config_descriptor,
2928 vnfr_id=vnfr_id,
2929 nsr_id=nsr_id,
2930 target_ip=rw_mgmt_ip,
2931 element_type="VDU",
2932 vdu_id=vdu_id_for_prom,
2933 vdu_index=vdu_index_for_prom,
2934 )
2935
2936 self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
2937 if prometheus_jobs:
2938 db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2939 self.update_db_2(
2940 "nsrs",
2941 nsr_id,
2942 db_nsr_update,
2943 )
2944
2945 for job in prometheus_jobs:
2946 self.db.set_one(
2947 "prometheus_jobs",
2948 {"job_name": job["job_name"]},
2949 job,
2950 upsert=True,
2951 fail_on_empty=False,
2952 )
2953
2954 # Check if this NS has a charm configuration
2955 descriptor_config = nsd.get("ns-configuration")
2956 if descriptor_config and descriptor_config.get("juju"):
2957 vnfd_id = None
2958 db_vnfr = None
2959 member_vnf_index = None
2960 vdu_id = None
2961 kdu_name = None
2962 kdu_index = None
2963 vdu_index = 0
2964 vdu_name = None
2965
2966 # Get additional parameters
2967 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2968 if db_nsr.get("additionalParamsForNs"):
2969 deploy_params.update(
2970 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2971 )
2972 base_folder = nsd["_admin"]["storage"]
2973 self._deploy_n2vc(
2974 logging_text=logging_text,
2975 db_nsr=db_nsr,
2976 db_vnfr=db_vnfr,
2977 nslcmop_id=nslcmop_id,
2978 nsr_id=nsr_id,
2979 nsi_id=nsi_id,
2980 vnfd_id=vnfd_id,
2981 vdu_id=vdu_id,
2982 kdu_name=kdu_name,
2983 member_vnf_index=member_vnf_index,
2984 vdu_index=vdu_index,
2985 kdu_index=kdu_index,
2986 vdu_name=vdu_name,
2987 deploy_params=deploy_params,
2988 descriptor_config=descriptor_config,
2989 base_folder=base_folder,
2990 task_instantiation_info=tasks_dict_info,
2991 stage=stage,
2992 )
2993
2994 # rest of staff will be done at finally
2995
2996 except (
2997 ROclient.ROClientException,
2998 DbException,
2999 LcmException,
3000 N2VCException,
3001 ) as e:
3002 self.logger.error(
3003 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
3004 )
3005 exc = e
3006 except asyncio.CancelledError:
3007 self.logger.error(
3008 logging_text + "Cancelled Exception while '{}'".format(stage[1])
3009 )
3010 exc = "Operation was cancelled"
3011 except Exception as e:
3012 exc = traceback.format_exc()
3013 self.logger.critical(
3014 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
3015 exc_info=True,
3016 )
3017 finally:
3018 if exc:
3019 error_list.append(str(exc))
3020 try:
3021 # wait for pending tasks
3022 if tasks_dict_info:
3023 stage[1] = "Waiting for instantiate pending tasks."
3024 self.logger.debug(logging_text + stage[1])
3025 error_list += await self._wait_for_tasks(
3026 logging_text,
3027 tasks_dict_info,
3028 timeout_ns_deploy,
3029 stage,
3030 nslcmop_id,
3031 nsr_id=nsr_id,
3032 )
3033 stage[1] = stage[2] = ""
3034 except asyncio.CancelledError:
3035 error_list.append("Cancelled")
3036 # TODO cancel all tasks
3037 except Exception as exc:
3038 error_list.append(str(exc))
3039
3040 # update operation-status
3041 db_nsr_update["operational-status"] = "running"
3042 # let's begin with VCA 'configured' status (later we can change it)
3043 db_nsr_update["config-status"] = "configured"
3044 for task, task_name in tasks_dict_info.items():
3045 if not task.done() or task.cancelled() or task.exception():
3046 if task_name.startswith(self.task_name_deploy_vca):
3047 # A N2VC task is pending
3048 db_nsr_update["config-status"] = "failed"
3049 else:
3050 # RO or KDU task is pending
3051 db_nsr_update["operational-status"] = "failed"
3052
3053 # update status at database
3054 if error_list:
3055 error_detail = ". ".join(error_list)
3056 self.logger.error(logging_text + error_detail)
3057 error_description_nslcmop = "{} Detail: {}".format(
3058 stage[0], error_detail
3059 )
3060 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
3061 nslcmop_id, stage[0]
3062 )
3063
3064 db_nsr_update["detailed-status"] = (
3065 error_description_nsr + " Detail: " + error_detail
3066 )
3067 db_nslcmop_update["detailed-status"] = error_detail
3068 nslcmop_operation_state = "FAILED"
3069 ns_state = "BROKEN"
3070 else:
3071 error_detail = None
3072 error_description_nsr = error_description_nslcmop = None
3073 ns_state = "READY"
3074 db_nsr_update["detailed-status"] = "Done"
3075 db_nslcmop_update["detailed-status"] = "Done"
3076 nslcmop_operation_state = "COMPLETED"
3077 # Gather auto-healing and auto-scaling alerts for each vnfr
3078 healing_alerts = []
3079 scaling_alerts = []
3080 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3081 vnfd = next(
3082 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3083 )
3084 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3085 for alert in healing_alerts:
3086 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3087 self.db.create("alerts", alert)
3088
3089 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3090 for alert in scaling_alerts:
3091 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3092 self.db.create("alerts", alert)
3093
3094 if db_nsr:
3095 self._write_ns_status(
3096 nsr_id=nsr_id,
3097 ns_state=ns_state,
3098 current_operation="IDLE",
3099 current_operation_id=None,
3100 error_description=error_description_nsr,
3101 error_detail=error_detail,
3102 other_update=db_nsr_update,
3103 )
3104 self._write_op_status(
3105 op_id=nslcmop_id,
3106 stage="",
3107 error_message=error_description_nslcmop,
3108 operation_state=nslcmop_operation_state,
3109 other_update=db_nslcmop_update,
3110 )
3111
3112 if nslcmop_operation_state:
3113 try:
3114 await self.msg.aiowrite(
3115 "ns",
3116 "instantiated",
3117 {
3118 "nsr_id": nsr_id,
3119 "nslcmop_id": nslcmop_id,
3120 "operationState": nslcmop_operation_state,
3121 },
3122 )
3123 except Exception as e:
3124 self.logger.error(
3125 logging_text + "kafka_write notification Exception {}".format(e)
3126 )
3127
3128 self.logger.debug(logging_text + "Exit")
3129 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3130
3131 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3132 if vnfd_id not in cached_vnfds:
3133 cached_vnfds[vnfd_id] = self.db.get_one(
3134 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3135 )
3136 return cached_vnfds[vnfd_id]
3137
3138 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3139 if vnf_profile_id not in cached_vnfrs:
3140 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3141 "vnfrs",
3142 {
3143 "member-vnf-index-ref": vnf_profile_id,
3144 "nsr-id-ref": nsr_id,
3145 },
3146 )
3147 return cached_vnfrs[vnf_profile_id]
3148
3149 def _is_deployed_vca_in_relation(
3150 self, vca: DeployedVCA, relation: Relation
3151 ) -> bool:
3152 found = False
3153 for endpoint in (relation.provider, relation.requirer):
3154 if endpoint["kdu-resource-profile-id"]:
3155 continue
3156 found = (
3157 vca.vnf_profile_id == endpoint.vnf_profile_id
3158 and vca.vdu_profile_id == endpoint.vdu_profile_id
3159 and vca.execution_environment_ref == endpoint.execution_environment_ref
3160 )
3161 if found:
3162 break
3163 return found
3164
3165 def _update_ee_relation_data_with_implicit_data(
3166 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3167 ):
3168 ee_relation_data = safe_get_ee_relation(
3169 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3170 )
3171 ee_relation_level = EELevel.get_level(ee_relation_data)
3172 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3173 "execution-environment-ref"
3174 ]:
3175 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3176 vnfd_id = vnf_profile["vnfd-id"]
3177 project = nsd["_admin"]["projects_read"][0]
3178 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3179 entity_id = (
3180 vnfd_id
3181 if ee_relation_level == EELevel.VNF
3182 else ee_relation_data["vdu-profile-id"]
3183 )
3184 ee = get_juju_ee_ref(db_vnfd, entity_id)
3185 if not ee:
3186 raise Exception(
3187 f"not execution environments found for ee_relation {ee_relation_data}"
3188 )
3189 ee_relation_data["execution-environment-ref"] = ee["id"]
3190 return ee_relation_data
3191
3192 def _get_ns_relations(
3193 self,
3194 nsr_id: str,
3195 nsd: Dict[str, Any],
3196 vca: DeployedVCA,
3197 cached_vnfds: Dict[str, Any],
3198 ) -> List[Relation]:
3199 relations = []
3200 db_ns_relations = get_ns_configuration_relation_list(nsd)
3201 for r in db_ns_relations:
3202 provider_dict = None
3203 requirer_dict = None
3204 if all(key in r for key in ("provider", "requirer")):
3205 provider_dict = r["provider"]
3206 requirer_dict = r["requirer"]
3207 elif "entities" in r:
3208 provider_id = r["entities"][0]["id"]
3209 provider_dict = {
3210 "nsr-id": nsr_id,
3211 "endpoint": r["entities"][0]["endpoint"],
3212 }
3213 if provider_id != nsd["id"]:
3214 provider_dict["vnf-profile-id"] = provider_id
3215 requirer_id = r["entities"][1]["id"]
3216 requirer_dict = {
3217 "nsr-id": nsr_id,
3218 "endpoint": r["entities"][1]["endpoint"],
3219 }
3220 if requirer_id != nsd["id"]:
3221 requirer_dict["vnf-profile-id"] = requirer_id
3222 else:
3223 raise Exception(
3224 "provider/requirer or entities must be included in the relation."
3225 )
3226 relation_provider = self._update_ee_relation_data_with_implicit_data(
3227 nsr_id, nsd, provider_dict, cached_vnfds
3228 )
3229 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3230 nsr_id, nsd, requirer_dict, cached_vnfds
3231 )
3232 provider = EERelation(relation_provider)
3233 requirer = EERelation(relation_requirer)
3234 relation = Relation(r["name"], provider, requirer)
3235 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3236 if vca_in_relation:
3237 relations.append(relation)
3238 return relations
3239
3240 def _get_vnf_relations(
3241 self,
3242 nsr_id: str,
3243 nsd: Dict[str, Any],
3244 vca: DeployedVCA,
3245 cached_vnfds: Dict[str, Any],
3246 ) -> List[Relation]:
3247 relations = []
3248 if vca.target_element == "ns":
3249 self.logger.debug("VCA is a NS charm, not a VNF.")
3250 return relations
3251 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3252 vnf_profile_id = vnf_profile["id"]
3253 vnfd_id = vnf_profile["vnfd-id"]
3254 project = nsd["_admin"]["projects_read"][0]
3255 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3256 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3257 for r in db_vnf_relations:
3258 provider_dict = None
3259 requirer_dict = None
3260 if all(key in r for key in ("provider", "requirer")):
3261 provider_dict = r["provider"]
3262 requirer_dict = r["requirer"]
3263 elif "entities" in r:
3264 provider_id = r["entities"][0]["id"]
3265 provider_dict = {
3266 "nsr-id": nsr_id,
3267 "vnf-profile-id": vnf_profile_id,
3268 "endpoint": r["entities"][0]["endpoint"],
3269 }
3270 if provider_id != vnfd_id:
3271 provider_dict["vdu-profile-id"] = provider_id
3272 requirer_id = r["entities"][1]["id"]
3273 requirer_dict = {
3274 "nsr-id": nsr_id,
3275 "vnf-profile-id": vnf_profile_id,
3276 "endpoint": r["entities"][1]["endpoint"],
3277 }
3278 if requirer_id != vnfd_id:
3279 requirer_dict["vdu-profile-id"] = requirer_id
3280 else:
3281 raise Exception(
3282 "provider/requirer or entities must be included in the relation."
3283 )
3284 relation_provider = self._update_ee_relation_data_with_implicit_data(
3285 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3286 )
3287 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3288 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3289 )
3290 provider = EERelation(relation_provider)
3291 requirer = EERelation(relation_requirer)
3292 relation = Relation(r["name"], provider, requirer)
3293 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3294 if vca_in_relation:
3295 relations.append(relation)
3296 return relations
3297
3298 def _get_kdu_resource_data(
3299 self,
3300 ee_relation: EERelation,
3301 db_nsr: Dict[str, Any],
3302 cached_vnfds: Dict[str, Any],
3303 ) -> DeployedK8sResource:
3304 nsd = get_nsd(db_nsr)
3305 vnf_profiles = get_vnf_profiles(nsd)
3306 vnfd_id = find_in_list(
3307 vnf_profiles,
3308 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3309 )["vnfd-id"]
3310 project = nsd["_admin"]["projects_read"][0]
3311 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3312 kdu_resource_profile = get_kdu_resource_profile(
3313 db_vnfd, ee_relation.kdu_resource_profile_id
3314 )
3315 kdu_name = kdu_resource_profile["kdu-name"]
3316 deployed_kdu, _ = get_deployed_kdu(
3317 db_nsr.get("_admin", ()).get("deployed", ()),
3318 kdu_name,
3319 ee_relation.vnf_profile_id,
3320 )
3321 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3322 return deployed_kdu
3323
3324 def _get_deployed_component(
3325 self,
3326 ee_relation: EERelation,
3327 db_nsr: Dict[str, Any],
3328 cached_vnfds: Dict[str, Any],
3329 ) -> DeployedComponent:
3330 nsr_id = db_nsr["_id"]
3331 deployed_component = None
3332 ee_level = EELevel.get_level(ee_relation)
3333 if ee_level == EELevel.NS:
3334 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3335 if vca:
3336 deployed_component = DeployedVCA(nsr_id, vca)
3337 elif ee_level == EELevel.VNF:
3338 vca = get_deployed_vca(
3339 db_nsr,
3340 {
3341 "vdu_id": None,
3342 "member-vnf-index": ee_relation.vnf_profile_id,
3343 "ee_descriptor_id": ee_relation.execution_environment_ref,
3344 },
3345 )
3346 if vca:
3347 deployed_component = DeployedVCA(nsr_id, vca)
3348 elif ee_level == EELevel.VDU:
3349 vca = get_deployed_vca(
3350 db_nsr,
3351 {
3352 "vdu_id": ee_relation.vdu_profile_id,
3353 "member-vnf-index": ee_relation.vnf_profile_id,
3354 "ee_descriptor_id": ee_relation.execution_environment_ref,
3355 },
3356 )
3357 if vca:
3358 deployed_component = DeployedVCA(nsr_id, vca)
3359 elif ee_level == EELevel.KDU:
3360 kdu_resource_data = self._get_kdu_resource_data(
3361 ee_relation, db_nsr, cached_vnfds
3362 )
3363 if kdu_resource_data:
3364 deployed_component = DeployedK8sResource(kdu_resource_data)
3365 return deployed_component
3366
3367 async def _add_relation(
3368 self,
3369 relation: Relation,
3370 vca_type: str,
3371 db_nsr: Dict[str, Any],
3372 cached_vnfds: Dict[str, Any],
3373 cached_vnfrs: Dict[str, Any],
3374 ) -> bool:
3375 deployed_provider = self._get_deployed_component(
3376 relation.provider, db_nsr, cached_vnfds
3377 )
3378 deployed_requirer = self._get_deployed_component(
3379 relation.requirer, db_nsr, cached_vnfds
3380 )
3381 if (
3382 deployed_provider
3383 and deployed_requirer
3384 and deployed_provider.config_sw_installed
3385 and deployed_requirer.config_sw_installed
3386 ):
3387 provider_db_vnfr = (
3388 self._get_vnfr(
3389 relation.provider.nsr_id,
3390 relation.provider.vnf_profile_id,
3391 cached_vnfrs,
3392 )
3393 if relation.provider.vnf_profile_id
3394 else None
3395 )
3396 requirer_db_vnfr = (
3397 self._get_vnfr(
3398 relation.requirer.nsr_id,
3399 relation.requirer.vnf_profile_id,
3400 cached_vnfrs,
3401 )
3402 if relation.requirer.vnf_profile_id
3403 else None
3404 )
3405 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3406 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3407 provider_relation_endpoint = RelationEndpoint(
3408 deployed_provider.ee_id,
3409 provider_vca_id,
3410 relation.provider.endpoint,
3411 )
3412 requirer_relation_endpoint = RelationEndpoint(
3413 deployed_requirer.ee_id,
3414 requirer_vca_id,
3415 relation.requirer.endpoint,
3416 )
3417 try:
3418 await self.vca_map[vca_type].add_relation(
3419 provider=provider_relation_endpoint,
3420 requirer=requirer_relation_endpoint,
3421 )
3422 except N2VCException as exception:
3423 self.logger.error(exception)
3424 raise LcmException(exception)
3425 return True
3426 return False
3427
3428 async def _add_vca_relations(
3429 self,
3430 logging_text,
3431 nsr_id,
3432 vca_type: str,
3433 vca_index: int,
3434 timeout: int = 3600,
3435 ) -> bool:
3436 # steps:
3437 # 1. find all relations for this VCA
3438 # 2. wait for other peers related
3439 # 3. add relations
3440
3441 try:
3442 # STEP 1: find all relations for this VCA
3443
3444 # read nsr record
3445 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3446 nsd = get_nsd(db_nsr)
3447
3448 # this VCA data
3449 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3450 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3451
3452 cached_vnfds = {}
3453 cached_vnfrs = {}
3454 relations = []
3455 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3456 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3457
3458 # if no relations, terminate
3459 if not relations:
3460 self.logger.debug(logging_text + " No relations")
3461 return True
3462
3463 self.logger.debug(logging_text + " adding relations {}".format(relations))
3464
3465 # add all relations
3466 start = time()
3467 while True:
3468 # check timeout
3469 now = time()
3470 if now - start >= timeout:
3471 self.logger.error(logging_text + " : timeout adding relations")
3472 return False
3473
3474 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3475 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3476
3477 # for each relation, find the VCA's related
3478 for relation in relations.copy():
3479 added = await self._add_relation(
3480 relation,
3481 vca_type,
3482 db_nsr,
3483 cached_vnfds,
3484 cached_vnfrs,
3485 )
3486 if added:
3487 relations.remove(relation)
3488
3489 if not relations:
3490 self.logger.debug("Relations added")
3491 break
3492 await asyncio.sleep(5.0)
3493
3494 return True
3495
3496 except Exception as e:
3497 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3498 return False
3499
3500 async def _install_kdu(
3501 self,
3502 nsr_id: str,
3503 nsr_db_path: str,
3504 vnfr_data: dict,
3505 kdu_index: int,
3506 kdud: dict,
3507 vnfd: dict,
3508 k8s_instance_info: dict,
3509 k8params: dict = None,
3510 timeout: int = 600,
3511 vca_id: str = None,
3512 ):
3513 try:
3514 k8sclustertype = k8s_instance_info["k8scluster-type"]
3515 # Instantiate kdu
3516 db_dict_install = {
3517 "collection": "nsrs",
3518 "filter": {"_id": nsr_id},
3519 "path": nsr_db_path,
3520 }
3521
3522 if k8s_instance_info.get("kdu-deployment-name"):
3523 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3524 else:
3525 kdu_instance = self.k8scluster_map[
3526 k8sclustertype
3527 ].generate_kdu_instance_name(
3528 db_dict=db_dict_install,
3529 kdu_model=k8s_instance_info["kdu-model"],
3530 kdu_name=k8s_instance_info["kdu-name"],
3531 )
3532
3533 # Update the nsrs table with the kdu-instance value
3534 self.update_db_2(
3535 item="nsrs",
3536 _id=nsr_id,
3537 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3538 )
3539
3540 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3541 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3542 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3543 # namespace, this first verification could be removed, and the next step would be done for any kind
3544 # of KNF.
3545 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3546 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3547 if k8sclustertype in ("juju", "juju-bundle"):
3548 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3549 # that the user passed a namespace which he wants its KDU to be deployed in)
3550 if (
3551 self.db.count(
3552 table="nsrs",
3553 q_filter={
3554 "_id": nsr_id,
3555 "_admin.projects_write": k8s_instance_info["namespace"],
3556 "_admin.projects_read": k8s_instance_info["namespace"],
3557 },
3558 )
3559 > 0
3560 ):
3561 self.logger.debug(
3562 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3563 )
3564 self.update_db_2(
3565 item="nsrs",
3566 _id=nsr_id,
3567 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3568 )
3569 k8s_instance_info["namespace"] = kdu_instance
3570
3571 await self.k8scluster_map[k8sclustertype].install(
3572 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3573 kdu_model=k8s_instance_info["kdu-model"],
3574 atomic=True,
3575 params=k8params,
3576 db_dict=db_dict_install,
3577 timeout=timeout,
3578 kdu_name=k8s_instance_info["kdu-name"],
3579 namespace=k8s_instance_info["namespace"],
3580 kdu_instance=kdu_instance,
3581 vca_id=vca_id,
3582 )
3583
3584 # Obtain services to obtain management service ip
3585 services = await self.k8scluster_map[k8sclustertype].get_services(
3586 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3587 kdu_instance=kdu_instance,
3588 namespace=k8s_instance_info["namespace"],
3589 )
3590
3591 # Obtain management service info (if exists)
3592 vnfr_update_dict = {}
3593 kdu_config = get_configuration(vnfd, kdud["name"])
3594 if kdu_config:
3595 target_ee_list = kdu_config.get("execution-environment-list", [])
3596 else:
3597 target_ee_list = []
3598
3599 if services:
3600 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3601 mgmt_services = [
3602 service
3603 for service in kdud.get("service", [])
3604 if service.get("mgmt-service")
3605 ]
3606 for mgmt_service in mgmt_services:
3607 for service in services:
3608 if service["name"].startswith(mgmt_service["name"]):
3609 # Mgmt service found, Obtain service ip
3610 ip = service.get("external_ip", service.get("cluster_ip"))
3611 if isinstance(ip, list) and len(ip) == 1:
3612 ip = ip[0]
3613
3614 vnfr_update_dict[
3615 "kdur.{}.ip-address".format(kdu_index)
3616 ] = ip
3617
3618 # Check if must update also mgmt ip at the vnf
3619 service_external_cp = mgmt_service.get(
3620 "external-connection-point-ref"
3621 )
3622 if service_external_cp:
3623 if (
3624 deep_get(vnfd, ("mgmt-interface", "cp"))
3625 == service_external_cp
3626 ):
3627 vnfr_update_dict["ip-address"] = ip
3628
3629 if find_in_list(
3630 target_ee_list,
3631 lambda ee: ee.get(
3632 "external-connection-point-ref", ""
3633 )
3634 == service_external_cp,
3635 ):
3636 vnfr_update_dict[
3637 "kdur.{}.ip-address".format(kdu_index)
3638 ] = ip
3639 break
3640 else:
3641 self.logger.warn(
3642 "Mgmt service name: {} not found".format(
3643 mgmt_service["name"]
3644 )
3645 )
3646
3647 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3648 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3649
3650 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3651 if (
3652 kdu_config
3653 and kdu_config.get("initial-config-primitive")
3654 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3655 ):
3656 initial_config_primitive_list = kdu_config.get(
3657 "initial-config-primitive"
3658 )
3659 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3660
3661 for initial_config_primitive in initial_config_primitive_list:
3662 primitive_params_ = self._map_primitive_params(
3663 initial_config_primitive, {}, {}
3664 )
3665
3666 await asyncio.wait_for(
3667 self.k8scluster_map[k8sclustertype].exec_primitive(
3668 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3669 kdu_instance=kdu_instance,
3670 primitive_name=initial_config_primitive["name"],
3671 params=primitive_params_,
3672 db_dict=db_dict_install,
3673 vca_id=vca_id,
3674 ),
3675 timeout=timeout,
3676 )
3677
3678 except Exception as e:
3679 # Prepare update db with error and raise exception
3680 try:
3681 self.update_db_2(
3682 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3683 )
3684 self.update_db_2(
3685 "vnfrs",
3686 vnfr_data.get("_id"),
3687 {"kdur.{}.status".format(kdu_index): "ERROR"},
3688 )
3689 except Exception:
3690 # ignore to keep original exception
3691 pass
3692 # reraise original error
3693 raise
3694
3695 return kdu_instance
3696
3697 async def deploy_kdus(
3698 self,
3699 logging_text,
3700 nsr_id,
3701 nslcmop_id,
3702 db_vnfrs,
3703 db_vnfds,
3704 task_instantiation_info,
3705 ):
3706 # Launch kdus if present in the descriptor
3707
3708 k8scluster_id_2_uuic = {
3709 "helm-chart-v3": {},
3710 "helm-chart": {},
3711 "juju-bundle": {},
3712 }
3713
3714 async def _get_cluster_id(cluster_id, cluster_type):
3715 nonlocal k8scluster_id_2_uuic
3716 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3717 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3718
3719 # check if K8scluster is creating and wait look if previous tasks in process
3720 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3721 "k8scluster", cluster_id
3722 )
3723 if task_dependency:
3724 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3725 task_name, cluster_id
3726 )
3727 self.logger.debug(logging_text + text)
3728 await asyncio.wait(task_dependency, timeout=3600)
3729
3730 db_k8scluster = self.db.get_one(
3731 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3732 )
3733 if not db_k8scluster:
3734 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3735
3736 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3737 if not k8s_id:
3738 if cluster_type == "helm-chart-v3":
3739 try:
3740 # backward compatibility for existing clusters that have not been initialized for helm v3
3741 k8s_credentials = yaml.safe_dump(
3742 db_k8scluster.get("credentials")
3743 )
3744 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3745 k8s_credentials, reuse_cluster_uuid=cluster_id
3746 )
3747 db_k8scluster_update = {}
3748 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3749 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3750 db_k8scluster_update[
3751 "_admin.helm-chart-v3.created"
3752 ] = uninstall_sw
3753 db_k8scluster_update[
3754 "_admin.helm-chart-v3.operationalState"
3755 ] = "ENABLED"
3756 self.update_db_2(
3757 "k8sclusters", cluster_id, db_k8scluster_update
3758 )
3759 except Exception as e:
3760 self.logger.error(
3761 logging_text
3762 + "error initializing helm-v3 cluster: {}".format(str(e))
3763 )
3764 raise LcmException(
3765 "K8s cluster '{}' has not been initialized for '{}'".format(
3766 cluster_id, cluster_type
3767 )
3768 )
3769 else:
3770 raise LcmException(
3771 "K8s cluster '{}' has not been initialized for '{}'".format(
3772 cluster_id, cluster_type
3773 )
3774 )
3775 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3776 return k8s_id
3777
3778 logging_text += "Deploy kdus: "
3779 step = ""
3780 try:
3781 db_nsr_update = {"_admin.deployed.K8s": []}
3782 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3783
3784 index = 0
3785 updated_cluster_list = []
3786 updated_v3_cluster_list = []
3787
3788 for vnfr_data in db_vnfrs.values():
3789 vca_id = self.get_vca_id(vnfr_data, {})
3790 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3791 # Step 0: Prepare and set parameters
3792 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3793 vnfd_id = vnfr_data.get("vnfd-id")
3794 vnfd_with_id = find_in_list(
3795 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3796 )
3797 kdud = next(
3798 kdud
3799 for kdud in vnfd_with_id["kdu"]
3800 if kdud["name"] == kdur["kdu-name"]
3801 )
3802 namespace = kdur.get("k8s-namespace")
3803 kdu_deployment_name = kdur.get("kdu-deployment-name")
3804 if kdur.get("helm-chart"):
3805 kdumodel = kdur["helm-chart"]
3806 # Default version: helm3, if helm-version is v2 assign v2
3807 k8sclustertype = "helm-chart-v3"
3808 self.logger.debug("kdur: {}".format(kdur))
3809 if (
3810 kdur.get("helm-version")
3811 and kdur.get("helm-version") == "v2"
3812 ):
3813 k8sclustertype = "helm-chart"
3814 elif kdur.get("juju-bundle"):
3815 kdumodel = kdur["juju-bundle"]
3816 k8sclustertype = "juju-bundle"
3817 else:
3818 raise LcmException(
3819 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3820 "juju-bundle. Maybe an old NBI version is running".format(
3821 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3822 )
3823 )
3824 # check if kdumodel is a file and exists
3825 try:
3826 vnfd_with_id = find_in_list(
3827 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3828 )
3829 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3830 if storage: # may be not present if vnfd has not artifacts
3831 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3832 if storage["pkg-dir"]:
3833 filename = "{}/{}/{}s/{}".format(
3834 storage["folder"],
3835 storage["pkg-dir"],
3836 k8sclustertype,
3837 kdumodel,
3838 )
3839 else:
3840 filename = "{}/Scripts/{}s/{}".format(
3841 storage["folder"],
3842 k8sclustertype,
3843 kdumodel,
3844 )
3845 if self.fs.file_exists(
3846 filename, mode="file"
3847 ) or self.fs.file_exists(filename, mode="dir"):
3848 kdumodel = self.fs.path + filename
3849 except (asyncio.TimeoutError, asyncio.CancelledError):
3850 raise
3851 except Exception: # it is not a file
3852 pass
3853
3854 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3855 step = "Synchronize repos for k8s cluster '{}'".format(
3856 k8s_cluster_id
3857 )
3858 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3859
3860 # Synchronize repos
3861 if (
3862 k8sclustertype == "helm-chart"
3863 and cluster_uuid not in updated_cluster_list
3864 ) or (
3865 k8sclustertype == "helm-chart-v3"
3866 and cluster_uuid not in updated_v3_cluster_list
3867 ):
3868 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3869 self.k8scluster_map[k8sclustertype].synchronize_repos(
3870 cluster_uuid=cluster_uuid
3871 )
3872 )
3873 if del_repo_list or added_repo_dict:
3874 if k8sclustertype == "helm-chart":
3875 unset = {
3876 "_admin.helm_charts_added." + item: None
3877 for item in del_repo_list
3878 }
3879 updated = {
3880 "_admin.helm_charts_added." + item: name
3881 for item, name in added_repo_dict.items()
3882 }
3883 updated_cluster_list.append(cluster_uuid)
3884 elif k8sclustertype == "helm-chart-v3":
3885 unset = {
3886 "_admin.helm_charts_v3_added." + item: None
3887 for item in del_repo_list
3888 }
3889 updated = {
3890 "_admin.helm_charts_v3_added." + item: name
3891 for item, name in added_repo_dict.items()
3892 }
3893 updated_v3_cluster_list.append(cluster_uuid)
3894 self.logger.debug(
3895 logging_text + "repos synchronized on k8s cluster "
3896 "'{}' to_delete: {}, to_add: {}".format(
3897 k8s_cluster_id, del_repo_list, added_repo_dict
3898 )
3899 )
3900 self.db.set_one(
3901 "k8sclusters",
3902 {"_id": k8s_cluster_id},
3903 updated,
3904 unset=unset,
3905 )
3906
3907 # Instantiate kdu
3908 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3909 vnfr_data["member-vnf-index-ref"],
3910 kdur["kdu-name"],
3911 k8s_cluster_id,
3912 )
3913 k8s_instance_info = {
3914 "kdu-instance": None,
3915 "k8scluster-uuid": cluster_uuid,
3916 "k8scluster-type": k8sclustertype,
3917 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3918 "kdu-name": kdur["kdu-name"],
3919 "kdu-model": kdumodel,
3920 "namespace": namespace,
3921 "kdu-deployment-name": kdu_deployment_name,
3922 }
3923 db_path = "_admin.deployed.K8s.{}".format(index)
3924 db_nsr_update[db_path] = k8s_instance_info
3925 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3926 vnfd_with_id = find_in_list(
3927 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3928 )
3929 task = asyncio.ensure_future(
3930 self._install_kdu(
3931 nsr_id,
3932 db_path,
3933 vnfr_data,
3934 kdu_index,
3935 kdud,
3936 vnfd_with_id,
3937 k8s_instance_info,
3938 k8params=desc_params,
3939 timeout=1800,
3940 vca_id=vca_id,
3941 )
3942 )
3943 self.lcm_tasks.register(
3944 "ns",
3945 nsr_id,
3946 nslcmop_id,
3947 "instantiate_KDU-{}".format(index),
3948 task,
3949 )
3950 task_instantiation_info[task] = "Deploying KDU {}".format(
3951 kdur["kdu-name"]
3952 )
3953
3954 index += 1
3955
3956 except (LcmException, asyncio.CancelledError):
3957 raise
3958 except Exception as e:
3959 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3960 if isinstance(e, (N2VCException, DbException)):
3961 self.logger.error(logging_text + msg)
3962 else:
3963 self.logger.critical(logging_text + msg, exc_info=True)
3964 raise LcmException(msg)
3965 finally:
3966 if db_nsr_update:
3967 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3968
3969 def _deploy_n2vc(
3970 self,
3971 logging_text,
3972 db_nsr,
3973 db_vnfr,
3974 nslcmop_id,
3975 nsr_id,
3976 nsi_id,
3977 vnfd_id,
3978 vdu_id,
3979 kdu_name,
3980 member_vnf_index,
3981 vdu_index,
3982 kdu_index,
3983 vdu_name,
3984 deploy_params,
3985 descriptor_config,
3986 base_folder,
3987 task_instantiation_info,
3988 stage,
3989 ):
3990 # launch instantiate_N2VC in a asyncio task and register task object
3991 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3992 # if not found, create one entry and update database
3993 # fill db_nsr._admin.deployed.VCA.<index>
3994
3995 self.logger.debug(
3996 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3997 )
3998
3999 charm_name = ""
4000 get_charm_name = False
4001 if "execution-environment-list" in descriptor_config:
4002 ee_list = descriptor_config.get("execution-environment-list", [])
4003 elif "juju" in descriptor_config:
4004 ee_list = [descriptor_config] # ns charms
4005 if "execution-environment-list" not in descriptor_config:
4006 # charm name is only required for ns charms
4007 get_charm_name = True
4008 else: # other types as script are not supported
4009 ee_list = []
4010
4011 for ee_item in ee_list:
4012 self.logger.debug(
4013 logging_text
4014 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4015 ee_item.get("juju"), ee_item.get("helm-chart")
4016 )
4017 )
4018 ee_descriptor_id = ee_item.get("id")
4019 if ee_item.get("juju"):
4020 vca_name = ee_item["juju"].get("charm")
4021 if get_charm_name:
4022 charm_name = self.find_charm_name(db_nsr, str(vca_name))
4023 vca_type = (
4024 "lxc_proxy_charm"
4025 if ee_item["juju"].get("charm") is not None
4026 else "native_charm"
4027 )
4028 if ee_item["juju"].get("cloud") == "k8s":
4029 vca_type = "k8s_proxy_charm"
4030 elif ee_item["juju"].get("proxy") is False:
4031 vca_type = "native_charm"
4032 elif ee_item.get("helm-chart"):
4033 vca_name = ee_item["helm-chart"]
4034 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
4035 vca_type = "helm"
4036 else:
4037 vca_type = "helm-v3"
4038 else:
4039 self.logger.debug(
4040 logging_text + "skipping non juju neither charm configuration"
4041 )
4042 continue
4043
4044 vca_index = -1
4045 for vca_index, vca_deployed in enumerate(
4046 db_nsr["_admin"]["deployed"]["VCA"]
4047 ):
4048 if not vca_deployed:
4049 continue
4050 if (
4051 vca_deployed.get("member-vnf-index") == member_vnf_index
4052 and vca_deployed.get("vdu_id") == vdu_id
4053 and vca_deployed.get("kdu_name") == kdu_name
4054 and vca_deployed.get("vdu_count_index", 0) == vdu_index
4055 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
4056 ):
4057 break
4058 else:
4059 # not found, create one.
4060 target = (
4061 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
4062 )
4063 if vdu_id:
4064 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4065 elif kdu_name:
4066 target += "/kdu/{}".format(kdu_name)
4067 vca_deployed = {
4068 "target_element": target,
4069 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4070 "member-vnf-index": member_vnf_index,
4071 "vdu_id": vdu_id,
4072 "kdu_name": kdu_name,
4073 "vdu_count_index": vdu_index,
4074 "operational-status": "init", # TODO revise
4075 "detailed-status": "", # TODO revise
4076 "step": "initial-deploy", # TODO revise
4077 "vnfd_id": vnfd_id,
4078 "vdu_name": vdu_name,
4079 "type": vca_type,
4080 "ee_descriptor_id": ee_descriptor_id,
4081 "charm_name": charm_name,
4082 }
4083 vca_index += 1
4084
4085 # create VCA and configurationStatus in db
4086 db_dict = {
4087 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4088 "configurationStatus.{}".format(vca_index): dict(),
4089 }
4090 self.update_db_2("nsrs", nsr_id, db_dict)
4091
4092 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4093
4094 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4095 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4096 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4097
4098 # Launch task
4099 task_n2vc = asyncio.ensure_future(
4100 self.instantiate_N2VC(
4101 logging_text=logging_text,
4102 vca_index=vca_index,
4103 nsi_id=nsi_id,
4104 db_nsr=db_nsr,
4105 db_vnfr=db_vnfr,
4106 vdu_id=vdu_id,
4107 kdu_name=kdu_name,
4108 vdu_index=vdu_index,
4109 kdu_index=kdu_index,
4110 deploy_params=deploy_params,
4111 config_descriptor=descriptor_config,
4112 base_folder=base_folder,
4113 nslcmop_id=nslcmop_id,
4114 stage=stage,
4115 vca_type=vca_type,
4116 vca_name=vca_name,
4117 ee_config_descriptor=ee_item,
4118 )
4119 )
4120 self.lcm_tasks.register(
4121 "ns",
4122 nsr_id,
4123 nslcmop_id,
4124 "instantiate_N2VC-{}".format(vca_index),
4125 task_n2vc,
4126 )
4127 task_instantiation_info[
4128 task_n2vc
4129 ] = self.task_name_deploy_vca + " {}.{}".format(
4130 member_vnf_index or "", vdu_id or ""
4131 )
4132
4133 @staticmethod
4134 def _create_nslcmop(nsr_id, operation, params):
4135 """
4136 Creates a ns-lcm-opp content to be stored at database.
4137 :param nsr_id: internal id of the instance
4138 :param operation: instantiate, terminate, scale, action, ...
4139 :param params: user parameters for the operation
4140 :return: dictionary following SOL005 format
4141 """
4142 # Raise exception if invalid arguments
4143 if not (nsr_id and operation and params):
4144 raise LcmException(
4145 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4146 )
4147 now = time()
4148 _id = str(uuid4())
4149 nslcmop = {
4150 "id": _id,
4151 "_id": _id,
4152 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4153 "operationState": "PROCESSING",
4154 "statusEnteredTime": now,
4155 "nsInstanceId": nsr_id,
4156 "lcmOperationType": operation,
4157 "startTime": now,
4158 "isAutomaticInvocation": False,
4159 "operationParams": params,
4160 "isCancelPending": False,
4161 "links": {
4162 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4163 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4164 },
4165 }
4166 return nslcmop
4167
4168 def _format_additional_params(self, params):
4169 params = params or {}
4170 for key, value in params.items():
4171 if str(value).startswith("!!yaml "):
4172 params[key] = yaml.safe_load(value[7:])
4173 return params
4174
4175 def _get_terminate_primitive_params(self, seq, vnf_index):
4176 primitive = seq.get("name")
4177 primitive_params = {}
4178 params = {
4179 "member_vnf_index": vnf_index,
4180 "primitive": primitive,
4181 "primitive_params": primitive_params,
4182 }
4183 desc_params = {}
4184 return self._map_primitive_params(seq, params, desc_params)
4185
4186 # sub-operations
4187
4188 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4189 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4190 if op.get("operationState") == "COMPLETED":
4191 # b. Skip sub-operation
4192 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4193 return self.SUBOPERATION_STATUS_SKIP
4194 else:
4195 # c. retry executing sub-operation
4196 # The sub-operation exists, and operationState != 'COMPLETED'
4197 # Update operationState = 'PROCESSING' to indicate a retry.
4198 operationState = "PROCESSING"
4199 detailed_status = "In progress"
4200 self._update_suboperation_status(
4201 db_nslcmop, op_index, operationState, detailed_status
4202 )
4203 # Return the sub-operation index
4204 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4205 # with arguments extracted from the sub-operation
4206 return op_index
4207
4208 # Find a sub-operation where all keys in a matching dictionary must match
4209 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4210 def _find_suboperation(self, db_nslcmop, match):
4211 if db_nslcmop and match:
4212 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4213 for i, op in enumerate(op_list):
4214 if all(op.get(k) == match[k] for k in match):
4215 return i
4216 return self.SUBOPERATION_STATUS_NOT_FOUND
4217
4218 # Update status for a sub-operation given its index
4219 def _update_suboperation_status(
4220 self, db_nslcmop, op_index, operationState, detailed_status
4221 ):
4222 # Update DB for HA tasks
4223 q_filter = {"_id": db_nslcmop["_id"]}
4224 update_dict = {
4225 "_admin.operations.{}.operationState".format(op_index): operationState,
4226 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4227 }
4228 self.db.set_one(
4229 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4230 )
4231
4232 # Add sub-operation, return the index of the added sub-operation
4233 # Optionally, set operationState, detailed-status, and operationType
4234 # Status and type are currently set for 'scale' sub-operations:
4235 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4236 # 'detailed-status' : status message
4237 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4238 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4239 def _add_suboperation(
4240 self,
4241 db_nslcmop,
4242 vnf_index,
4243 vdu_id,
4244 vdu_count_index,
4245 vdu_name,
4246 primitive,
4247 mapped_primitive_params,
4248 operationState=None,
4249 detailed_status=None,
4250 operationType=None,
4251 RO_nsr_id=None,
4252 RO_scaling_info=None,
4253 ):
4254 if not db_nslcmop:
4255 return self.SUBOPERATION_STATUS_NOT_FOUND
4256 # Get the "_admin.operations" list, if it exists
4257 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4258 op_list = db_nslcmop_admin.get("operations")
4259 # Create or append to the "_admin.operations" list
4260 new_op = {
4261 "member_vnf_index": vnf_index,
4262 "vdu_id": vdu_id,
4263 "vdu_count_index": vdu_count_index,
4264 "primitive": primitive,
4265 "primitive_params": mapped_primitive_params,
4266 }
4267 if operationState:
4268 new_op["operationState"] = operationState
4269 if detailed_status:
4270 new_op["detailed-status"] = detailed_status
4271 if operationType:
4272 new_op["lcmOperationType"] = operationType
4273 if RO_nsr_id:
4274 new_op["RO_nsr_id"] = RO_nsr_id
4275 if RO_scaling_info:
4276 new_op["RO_scaling_info"] = RO_scaling_info
4277 if not op_list:
4278 # No existing operations, create key 'operations' with current operation as first list element
4279 db_nslcmop_admin.update({"operations": [new_op]})
4280 op_list = db_nslcmop_admin.get("operations")
4281 else:
4282 # Existing operations, append operation to list
4283 op_list.append(new_op)
4284
4285 db_nslcmop_update = {"_admin.operations": op_list}
4286 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4287 op_index = len(op_list) - 1
4288 return op_index
4289
4290 # Helper methods for scale() sub-operations
4291
4292 # pre-scale/post-scale:
4293 # Check for 3 different cases:
4294 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4295 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4296 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4297 def _check_or_add_scale_suboperation(
4298 self,
4299 db_nslcmop,
4300 vnf_index,
4301 vnf_config_primitive,
4302 primitive_params,
4303 operationType,
4304 RO_nsr_id=None,
4305 RO_scaling_info=None,
4306 ):
4307 # Find this sub-operation
4308 if RO_nsr_id and RO_scaling_info:
4309 operationType = "SCALE-RO"
4310 match = {
4311 "member_vnf_index": vnf_index,
4312 "RO_nsr_id": RO_nsr_id,
4313 "RO_scaling_info": RO_scaling_info,
4314 }
4315 else:
4316 match = {
4317 "member_vnf_index": vnf_index,
4318 "primitive": vnf_config_primitive,
4319 "primitive_params": primitive_params,
4320 "lcmOperationType": operationType,
4321 }
4322 op_index = self._find_suboperation(db_nslcmop, match)
4323 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4324 # a. New sub-operation
4325 # The sub-operation does not exist, add it.
4326 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4327 # The following parameters are set to None for all kind of scaling:
4328 vdu_id = None
4329 vdu_count_index = None
4330 vdu_name = None
4331 if RO_nsr_id and RO_scaling_info:
4332 vnf_config_primitive = None
4333 primitive_params = None
4334 else:
4335 RO_nsr_id = None
4336 RO_scaling_info = None
4337 # Initial status for sub-operation
4338 operationState = "PROCESSING"
4339 detailed_status = "In progress"
4340 # Add sub-operation for pre/post-scaling (zero or more operations)
4341 self._add_suboperation(
4342 db_nslcmop,
4343 vnf_index,
4344 vdu_id,
4345 vdu_count_index,
4346 vdu_name,
4347 vnf_config_primitive,
4348 primitive_params,
4349 operationState,
4350 detailed_status,
4351 operationType,
4352 RO_nsr_id,
4353 RO_scaling_info,
4354 )
4355 return self.SUBOPERATION_STATUS_NEW
4356 else:
4357 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4358 # or op_index (operationState != 'COMPLETED')
4359 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4360
4361 # Function to return execution_environment id
4362
4363 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4364 # TODO vdu_index_count
4365 for vca in vca_deployed_list:
4366 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4367 return vca.get("ee_id")
4368
4369 async def destroy_N2VC(
4370 self,
4371 logging_text,
4372 db_nslcmop,
4373 vca_deployed,
4374 config_descriptor,
4375 vca_index,
4376 destroy_ee=True,
4377 exec_primitives=True,
4378 scaling_in=False,
4379 vca_id: str = None,
4380 ):
4381 """
4382 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4383 :param logging_text:
4384 :param db_nslcmop:
4385 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4386 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4387 :param vca_index: index in the database _admin.deployed.VCA
4388 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4389 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4390 not executed properly
4391 :param scaling_in: True destroys the application, False destroys the model
4392 :return: None or exception
4393 """
4394
4395 self.logger.debug(
4396 logging_text
4397 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4398 vca_index, vca_deployed, config_descriptor, destroy_ee
4399 )
4400 )
4401
4402 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4403
4404 # execute terminate_primitives
4405 if exec_primitives:
4406 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4407 config_descriptor.get("terminate-config-primitive"),
4408 vca_deployed.get("ee_descriptor_id"),
4409 )
4410 vdu_id = vca_deployed.get("vdu_id")
4411 vdu_count_index = vca_deployed.get("vdu_count_index")
4412 vdu_name = vca_deployed.get("vdu_name")
4413 vnf_index = vca_deployed.get("member-vnf-index")
4414 if terminate_primitives and vca_deployed.get("needed_terminate"):
4415 for seq in terminate_primitives:
4416 # For each sequence in list, get primitive and call _ns_execute_primitive()
4417 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4418 vnf_index, seq.get("name")
4419 )
4420 self.logger.debug(logging_text + step)
4421 # Create the primitive for each sequence, i.e. "primitive": "touch"
4422 primitive = seq.get("name")
4423 mapped_primitive_params = self._get_terminate_primitive_params(
4424 seq, vnf_index
4425 )
4426
4427 # Add sub-operation
4428 self._add_suboperation(
4429 db_nslcmop,
4430 vnf_index,
4431 vdu_id,
4432 vdu_count_index,
4433 vdu_name,
4434 primitive,
4435 mapped_primitive_params,
4436 )
4437 # Sub-operations: Call _ns_execute_primitive() instead of action()
4438 try:
4439 result, result_detail = await self._ns_execute_primitive(
4440 vca_deployed["ee_id"],
4441 primitive,
4442 mapped_primitive_params,
4443 vca_type=vca_type,
4444 vca_id=vca_id,
4445 )
4446 except LcmException:
4447 # this happens when VCA is not deployed. In this case it is not needed to terminate
4448 continue
4449 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4450 if result not in result_ok:
4451 raise LcmException(
4452 "terminate_primitive {} for vnf_member_index={} fails with "
4453 "error {}".format(seq.get("name"), vnf_index, result_detail)
4454 )
4455 # set that this VCA do not need terminated
4456 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4457 vca_index
4458 )
4459 self.update_db_2(
4460 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4461 )
4462
4463 # Delete Prometheus Jobs if any
4464 # This uses NSR_ID, so it will destroy any jobs under this index
4465 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4466
4467 if destroy_ee:
4468 await self.vca_map[vca_type].delete_execution_environment(
4469 vca_deployed["ee_id"],
4470 scaling_in=scaling_in,
4471 vca_type=vca_type,
4472 vca_id=vca_id,
4473 )
4474
4475 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4476 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4477 namespace = "." + db_nsr["_id"]
4478 try:
4479 await self.n2vc.delete_namespace(
4480 namespace=namespace,
4481 total_timeout=self.timeout.charm_delete,
4482 vca_id=vca_id,
4483 )
4484 except N2VCNotFound: # already deleted. Skip
4485 pass
4486 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4487
4488 async def terminate(self, nsr_id, nslcmop_id):
4489 # Try to lock HA task here
4490 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4491 if not task_is_locked_by_me:
4492 return
4493
4494 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4495 self.logger.debug(logging_text + "Enter")
4496 timeout_ns_terminate = self.timeout.ns_terminate
4497 db_nsr = None
4498 db_nslcmop = None
4499 operation_params = None
4500 exc = None
4501 error_list = [] # annotates all failed error messages
4502 db_nslcmop_update = {}
4503 autoremove = False # autoremove after terminated
4504 tasks_dict_info = {}
4505 db_nsr_update = {}
4506 stage = [
4507 "Stage 1/3: Preparing task.",
4508 "Waiting for previous operations to terminate.",
4509 "",
4510 ]
4511 # ^ contains [stage, step, VIM-status]
4512 try:
4513 # wait for any previous tasks in process
4514 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4515
4516 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4517 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4518 operation_params = db_nslcmop.get("operationParams") or {}
4519 if operation_params.get("timeout_ns_terminate"):
4520 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4521 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4522 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4523
4524 db_nsr_update["operational-status"] = "terminating"
4525 db_nsr_update["config-status"] = "terminating"
4526 self._write_ns_status(
4527 nsr_id=nsr_id,
4528 ns_state="TERMINATING",
4529 current_operation="TERMINATING",
4530 current_operation_id=nslcmop_id,
4531 other_update=db_nsr_update,
4532 )
4533 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4534 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4535 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4536 return
4537
4538 stage[1] = "Getting vnf descriptors from db."
4539 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4540 db_vnfrs_dict = {
4541 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4542 }
4543 db_vnfds_from_id = {}
4544 db_vnfds_from_member_index = {}
4545 # Loop over VNFRs
4546 for vnfr in db_vnfrs_list:
4547 vnfd_id = vnfr["vnfd-id"]
4548 if vnfd_id not in db_vnfds_from_id:
4549 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4550 db_vnfds_from_id[vnfd_id] = vnfd
4551 db_vnfds_from_member_index[
4552 vnfr["member-vnf-index-ref"]
4553 ] = db_vnfds_from_id[vnfd_id]
4554
4555 # Destroy individual execution environments when there are terminating primitives.
4556 # Rest of EE will be deleted at once
4557 # TODO - check before calling _destroy_N2VC
4558 # if not operation_params.get("skip_terminate_primitives"):#
4559 # or not vca.get("needed_terminate"):
4560 stage[0] = "Stage 2/3 execute terminating primitives."
4561 self.logger.debug(logging_text + stage[0])
4562 stage[1] = "Looking execution environment that needs terminate."
4563 self.logger.debug(logging_text + stage[1])
4564
4565 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4566 config_descriptor = None
4567 vca_member_vnf_index = vca.get("member-vnf-index")
4568 vca_id = self.get_vca_id(
4569 db_vnfrs_dict.get(vca_member_vnf_index)
4570 if vca_member_vnf_index
4571 else None,
4572 db_nsr,
4573 )
4574 if not vca or not vca.get("ee_id"):
4575 continue
4576 if not vca.get("member-vnf-index"):
4577 # ns
4578 config_descriptor = db_nsr.get("ns-configuration")
4579 elif vca.get("vdu_id"):
4580 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4581 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4582 elif vca.get("kdu_name"):
4583 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4584 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4585 else:
4586 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4587 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4588 vca_type = vca.get("type")
4589 exec_terminate_primitives = not operation_params.get(
4590 "skip_terminate_primitives"
4591 ) and vca.get("needed_terminate")
4592 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4593 # pending native charms
4594 destroy_ee = (
4595 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4596 )
4597 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4598 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4599 task = asyncio.ensure_future(
4600 self.destroy_N2VC(
4601 logging_text,
4602 db_nslcmop,
4603 vca,
4604 config_descriptor,
4605 vca_index,
4606 destroy_ee,
4607 exec_terminate_primitives,
4608 vca_id=vca_id,
4609 )
4610 )
4611 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4612
4613 # wait for pending tasks of terminate primitives
4614 if tasks_dict_info:
4615 self.logger.debug(
4616 logging_text
4617 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4618 )
4619 error_list = await self._wait_for_tasks(
4620 logging_text,
4621 tasks_dict_info,
4622 min(self.timeout.charm_delete, timeout_ns_terminate),
4623 stage,
4624 nslcmop_id,
4625 )
4626 tasks_dict_info.clear()
4627 if error_list:
4628 return # raise LcmException("; ".join(error_list))
4629
4630 # remove All execution environments at once
4631 stage[0] = "Stage 3/3 delete all."
4632
4633 if nsr_deployed.get("VCA"):
4634 stage[1] = "Deleting all execution environments."
4635 self.logger.debug(logging_text + stage[1])
4636 vca_id = self.get_vca_id({}, db_nsr)
4637 task_delete_ee = asyncio.ensure_future(
4638 asyncio.wait_for(
4639 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4640 timeout=self.timeout.charm_delete,
4641 )
4642 )
4643 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4644 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4645
4646 # Delete Namespace and Certificates if necessary
4647 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4648 await self.vca_map["helm-v3"].delete_tls_certificate(
4649 certificate_name=db_nslcmop["nsInstanceId"],
4650 )
4651 # TODO: Delete namespace
4652
4653 # Delete from k8scluster
4654 stage[1] = "Deleting KDUs."
4655 self.logger.debug(logging_text + stage[1])
4656 # print(nsr_deployed)
4657 for kdu in get_iterable(nsr_deployed, "K8s"):
4658 if not kdu or not kdu.get("kdu-instance"):
4659 continue
4660 kdu_instance = kdu.get("kdu-instance")
4661 if kdu.get("k8scluster-type") in self.k8scluster_map:
4662 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4663 vca_id = self.get_vca_id({}, db_nsr)
4664 task_delete_kdu_instance = asyncio.ensure_future(
4665 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4666 cluster_uuid=kdu.get("k8scluster-uuid"),
4667 kdu_instance=kdu_instance,
4668 vca_id=vca_id,
4669 namespace=kdu.get("namespace"),
4670 )
4671 )
4672 else:
4673 self.logger.error(
4674 logging_text
4675 + "Unknown k8s deployment type {}".format(
4676 kdu.get("k8scluster-type")
4677 )
4678 )
4679 continue
4680 tasks_dict_info[
4681 task_delete_kdu_instance
4682 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4683
4684 # remove from RO
4685 stage[1] = "Deleting ns from VIM."
4686 if self.ro_config.ng:
4687 task_delete_ro = asyncio.ensure_future(
4688 self._terminate_ng_ro(
4689 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4690 )
4691 )
4692 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4693
4694 # rest of staff will be done at finally
4695
4696 except (
4697 ROclient.ROClientException,
4698 DbException,
4699 LcmException,
4700 N2VCException,
4701 ) as e:
4702 self.logger.error(logging_text + "Exit Exception {}".format(e))
4703 exc = e
4704 except asyncio.CancelledError:
4705 self.logger.error(
4706 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4707 )
4708 exc = "Operation was cancelled"
4709 except Exception as e:
4710 exc = traceback.format_exc()
4711 self.logger.critical(
4712 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4713 exc_info=True,
4714 )
4715 finally:
4716 if exc:
4717 error_list.append(str(exc))
4718 try:
4719 # wait for pending tasks
4720 if tasks_dict_info:
4721 stage[1] = "Waiting for terminate pending tasks."
4722 self.logger.debug(logging_text + stage[1])
4723 error_list += await self._wait_for_tasks(
4724 logging_text,
4725 tasks_dict_info,
4726 timeout_ns_terminate,
4727 stage,
4728 nslcmop_id,
4729 )
4730 stage[1] = stage[2] = ""
4731 except asyncio.CancelledError:
4732 error_list.append("Cancelled")
4733 # TODO cancell all tasks
4734 except Exception as exc:
4735 error_list.append(str(exc))
4736 # update status at database
4737 if error_list:
4738 error_detail = "; ".join(error_list)
4739 # self.logger.error(logging_text + error_detail)
4740 error_description_nslcmop = "{} Detail: {}".format(
4741 stage[0], error_detail
4742 )
4743 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4744 nslcmop_id, stage[0]
4745 )
4746
4747 db_nsr_update["operational-status"] = "failed"
4748 db_nsr_update["detailed-status"] = (
4749 error_description_nsr + " Detail: " + error_detail
4750 )
4751 db_nslcmop_update["detailed-status"] = error_detail
4752 nslcmop_operation_state = "FAILED"
4753 ns_state = "BROKEN"
4754 else:
4755 error_detail = None
4756 error_description_nsr = error_description_nslcmop = None
4757 ns_state = "NOT_INSTANTIATED"
4758 db_nsr_update["operational-status"] = "terminated"
4759 db_nsr_update["detailed-status"] = "Done"
4760 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4761 db_nslcmop_update["detailed-status"] = "Done"
4762 nslcmop_operation_state = "COMPLETED"
4763
4764 if db_nsr:
4765 self._write_ns_status(
4766 nsr_id=nsr_id,
4767 ns_state=ns_state,
4768 current_operation="IDLE",
4769 current_operation_id=None,
4770 error_description=error_description_nsr,
4771 error_detail=error_detail,
4772 other_update=db_nsr_update,
4773 )
4774 self._write_op_status(
4775 op_id=nslcmop_id,
4776 stage="",
4777 error_message=error_description_nslcmop,
4778 operation_state=nslcmop_operation_state,
4779 other_update=db_nslcmop_update,
4780 )
4781 if ns_state == "NOT_INSTANTIATED":
4782 try:
4783 self.db.set_list(
4784 "vnfrs",
4785 {"nsr-id-ref": nsr_id},
4786 {"_admin.nsState": "NOT_INSTANTIATED"},
4787 )
4788 except DbException as e:
4789 self.logger.warn(
4790 logging_text
4791 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4792 nsr_id, e
4793 )
4794 )
4795 if operation_params:
4796 autoremove = operation_params.get("autoremove", False)
4797 if nslcmop_operation_state:
4798 try:
4799 await self.msg.aiowrite(
4800 "ns",
4801 "terminated",
4802 {
4803 "nsr_id": nsr_id,
4804 "nslcmop_id": nslcmop_id,
4805 "operationState": nslcmop_operation_state,
4806 "autoremove": autoremove,
4807 },
4808 )
4809 except Exception as e:
4810 self.logger.error(
4811 logging_text + "kafka_write notification Exception {}".format(e)
4812 )
4813 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4814 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4815
4816 self.logger.debug(logging_text + "Exit")
4817 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4818
4819 async def _wait_for_tasks(
4820 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4821 ):
4822 time_start = time()
4823 error_detail_list = []
4824 error_list = []
4825 pending_tasks = list(created_tasks_info.keys())
4826 num_tasks = len(pending_tasks)
4827 num_done = 0
4828 stage[1] = "{}/{}.".format(num_done, num_tasks)
4829 self._write_op_status(nslcmop_id, stage)
4830 while pending_tasks:
4831 new_error = None
4832 _timeout = timeout + time_start - time()
4833 done, pending_tasks = await asyncio.wait(
4834 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4835 )
4836 num_done += len(done)
4837 if not done: # Timeout
4838 for task in pending_tasks:
4839 new_error = created_tasks_info[task] + ": Timeout"
4840 error_detail_list.append(new_error)
4841 error_list.append(new_error)
4842 break
4843 for task in done:
4844 if task.cancelled():
4845 exc = "Cancelled"
4846 else:
4847 exc = task.exception()
4848 if exc:
4849 if isinstance(exc, asyncio.TimeoutError):
4850 exc = "Timeout"
4851 new_error = created_tasks_info[task] + ": {}".format(exc)
4852 error_list.append(created_tasks_info[task])
4853 error_detail_list.append(new_error)
4854 if isinstance(
4855 exc,
4856 (
4857 str,
4858 DbException,
4859 N2VCException,
4860 ROclient.ROClientException,
4861 LcmException,
4862 K8sException,
4863 NgRoException,
4864 ),
4865 ):
4866 self.logger.error(logging_text + new_error)
4867 else:
4868 exc_traceback = "".join(
4869 traceback.format_exception(None, exc, exc.__traceback__)
4870 )
4871 self.logger.error(
4872 logging_text
4873 + created_tasks_info[task]
4874 + " "
4875 + exc_traceback
4876 )
4877 else:
4878 self.logger.debug(
4879 logging_text + created_tasks_info[task] + ": Done"
4880 )
4881 stage[1] = "{}/{}.".format(num_done, num_tasks)
4882 if new_error:
4883 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4884 if nsr_id: # update also nsr
4885 self.update_db_2(
4886 "nsrs",
4887 nsr_id,
4888 {
4889 "errorDescription": "Error at: " + ", ".join(error_list),
4890 "errorDetail": ". ".join(error_detail_list),
4891 },
4892 )
4893 self._write_op_status(nslcmop_id, stage)
4894 return error_detail_list
4895
4896 @staticmethod
4897 def _map_primitive_params(primitive_desc, params, instantiation_params):
4898 """
4899 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4900 The default-value is used. If it is between < > it look for a value at instantiation_params
4901 :param primitive_desc: portion of VNFD/NSD that describes primitive
4902 :param params: Params provided by user
4903 :param instantiation_params: Instantiation params provided by user
4904 :return: a dictionary with the calculated params
4905 """
4906 calculated_params = {}
4907 for parameter in primitive_desc.get("parameter", ()):
4908 param_name = parameter["name"]
4909 if param_name in params:
4910 calculated_params[param_name] = params[param_name]
4911 elif "default-value" in parameter or "value" in parameter:
4912 if "value" in parameter:
4913 calculated_params[param_name] = parameter["value"]
4914 else:
4915 calculated_params[param_name] = parameter["default-value"]
4916 if (
4917 isinstance(calculated_params[param_name], str)
4918 and calculated_params[param_name].startswith("<")
4919 and calculated_params[param_name].endswith(">")
4920 ):
4921 if calculated_params[param_name][1:-1] in instantiation_params:
4922 calculated_params[param_name] = instantiation_params[
4923 calculated_params[param_name][1:-1]
4924 ]
4925 else:
4926 raise LcmException(
4927 "Parameter {} needed to execute primitive {} not provided".format(
4928 calculated_params[param_name], primitive_desc["name"]
4929 )
4930 )
4931 else:
4932 raise LcmException(
4933 "Parameter {} needed to execute primitive {} not provided".format(
4934 param_name, primitive_desc["name"]
4935 )
4936 )
4937
4938 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4939 calculated_params[param_name] = yaml.safe_dump(
4940 calculated_params[param_name], default_flow_style=True, width=256
4941 )
4942 elif isinstance(calculated_params[param_name], str) and calculated_params[
4943 param_name
4944 ].startswith("!!yaml "):
4945 calculated_params[param_name] = calculated_params[param_name][7:]
4946 if parameter.get("data-type") == "INTEGER":
4947 try:
4948 calculated_params[param_name] = int(calculated_params[param_name])
4949 except ValueError: # error converting string to int
4950 raise LcmException(
4951 "Parameter {} of primitive {} must be integer".format(
4952 param_name, primitive_desc["name"]
4953 )
4954 )
4955 elif parameter.get("data-type") == "BOOLEAN":
4956 calculated_params[param_name] = not (
4957 (str(calculated_params[param_name])).lower() == "false"
4958 )
4959
4960 # add always ns_config_info if primitive name is config
4961 if primitive_desc["name"] == "config":
4962 if "ns_config_info" in instantiation_params:
4963 calculated_params["ns_config_info"] = instantiation_params[
4964 "ns_config_info"
4965 ]
4966 return calculated_params
4967
4968 def _look_for_deployed_vca(
4969 self,
4970 deployed_vca,
4971 member_vnf_index,
4972 vdu_id,
4973 vdu_count_index,
4974 kdu_name=None,
4975 ee_descriptor_id=None,
4976 ):
4977 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4978 for vca in deployed_vca:
4979 if not vca:
4980 continue
4981 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4982 continue
4983 if (
4984 vdu_count_index is not None
4985 and vdu_count_index != vca["vdu_count_index"]
4986 ):
4987 continue
4988 if kdu_name and kdu_name != vca["kdu_name"]:
4989 continue
4990 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4991 continue
4992 break
4993 else:
4994 # vca_deployed not found
4995 raise LcmException(
4996 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4997 " is not deployed".format(
4998 member_vnf_index,
4999 vdu_id,
5000 vdu_count_index,
5001 kdu_name,
5002 ee_descriptor_id,
5003 )
5004 )
5005 # get ee_id
5006 ee_id = vca.get("ee_id")
5007 vca_type = vca.get(
5008 "type", "lxc_proxy_charm"
5009 ) # default value for backward compatibility - proxy charm
5010 if not ee_id:
5011 raise LcmException(
5012 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5013 "execution environment".format(
5014 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5015 )
5016 )
5017 return ee_id, vca_type
5018
5019 async def _ns_execute_primitive(
5020 self,
5021 ee_id,
5022 primitive,
5023 primitive_params,
5024 retries=0,
5025 retries_interval=30,
5026 timeout=None,
5027 vca_type=None,
5028 db_dict=None,
5029 vca_id: str = None,
5030 ) -> (str, str):
5031 try:
5032 if primitive == "config":
5033 primitive_params = {"params": primitive_params}
5034
5035 vca_type = vca_type or "lxc_proxy_charm"
5036
5037 while retries >= 0:
5038 try:
5039 output = await asyncio.wait_for(
5040 self.vca_map[vca_type].exec_primitive(
5041 ee_id=ee_id,
5042 primitive_name=primitive,
5043 params_dict=primitive_params,
5044 progress_timeout=self.timeout.progress_primitive,
5045 total_timeout=self.timeout.primitive,
5046 db_dict=db_dict,
5047 vca_id=vca_id,
5048 vca_type=vca_type,
5049 ),
5050 timeout=timeout or self.timeout.primitive,
5051 )
5052 # execution was OK
5053 break
5054 except asyncio.CancelledError:
5055 raise
5056 except Exception as e:
5057 retries -= 1
5058 if retries >= 0:
5059 self.logger.debug(
5060 "Error executing action {} on {} -> {}".format(
5061 primitive, ee_id, e
5062 )
5063 )
5064 # wait and retry
5065 await asyncio.sleep(retries_interval)
5066 else:
5067 if isinstance(e, asyncio.TimeoutError):
5068 e = N2VCException(
5069 message="Timed out waiting for action to complete"
5070 )
5071 return "FAILED", getattr(e, "message", repr(e))
5072
5073 return "COMPLETED", output
5074
5075 except (LcmException, asyncio.CancelledError):
5076 raise
5077 except Exception as e:
5078 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5079
5080 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5081 """
5082 Updating the vca_status with latest juju information in nsrs record
5083 :param: nsr_id: Id of the nsr
5084 :param: nslcmop_id: Id of the nslcmop
5085 :return: None
5086 """
5087
5088 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5089 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5090 vca_id = self.get_vca_id({}, db_nsr)
5091 if db_nsr["_admin"]["deployed"]["K8s"]:
5092 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5093 cluster_uuid, kdu_instance, cluster_type = (
5094 k8s["k8scluster-uuid"],
5095 k8s["kdu-instance"],
5096 k8s["k8scluster-type"],
5097 )
5098 await self._on_update_k8s_db(
5099 cluster_uuid=cluster_uuid,
5100 kdu_instance=kdu_instance,
5101 filter={"_id": nsr_id},
5102 vca_id=vca_id,
5103 cluster_type=cluster_type,
5104 )
5105 else:
5106 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5107 table, filter = "nsrs", {"_id": nsr_id}
5108 path = "_admin.deployed.VCA.{}.".format(vca_index)
5109 await self._on_update_n2vc_db(table, filter, path, {})
5110
5111 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5112 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5113
5114 async def action(self, nsr_id, nslcmop_id):
5115 # Try to lock HA task here
5116 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5117 if not task_is_locked_by_me:
5118 return
5119
5120 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5121 self.logger.debug(logging_text + "Enter")
5122 # get all needed from database
5123 db_nsr = None
5124 db_nslcmop = None
5125 db_nsr_update = {}
5126 db_nslcmop_update = {}
5127 nslcmop_operation_state = None
5128 error_description_nslcmop = None
5129 exc = None
5130 step = ""
5131 try:
5132 # wait for any previous tasks in process
5133 step = "Waiting for previous operations to terminate"
5134 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5135
5136 self._write_ns_status(
5137 nsr_id=nsr_id,
5138 ns_state=None,
5139 current_operation="RUNNING ACTION",
5140 current_operation_id=nslcmop_id,
5141 )
5142
5143 step = "Getting information from database"
5144 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5145 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5146 if db_nslcmop["operationParams"].get("primitive_params"):
5147 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5148 db_nslcmop["operationParams"]["primitive_params"]
5149 )
5150
5151 nsr_deployed = db_nsr["_admin"].get("deployed")
5152 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5153 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5154 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5155 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5156 primitive = db_nslcmop["operationParams"]["primitive"]
5157 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5158 timeout_ns_action = db_nslcmop["operationParams"].get(
5159 "timeout_ns_action", self.timeout.primitive
5160 )
5161
5162 if vnf_index:
5163 step = "Getting vnfr from database"
5164 db_vnfr = self.db.get_one(
5165 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5166 )
5167 if db_vnfr.get("kdur"):
5168 kdur_list = []
5169 for kdur in db_vnfr["kdur"]:
5170 if kdur.get("additionalParams"):
5171 kdur["additionalParams"] = json.loads(
5172 kdur["additionalParams"]
5173 )
5174 kdur_list.append(kdur)
5175 db_vnfr["kdur"] = kdur_list
5176 step = "Getting vnfd from database"
5177 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5178
5179 # Sync filesystem before running a primitive
5180 self.fs.sync(db_vnfr["vnfd-id"])
5181 else:
5182 step = "Getting nsd from database"
5183 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5184
5185 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5186 # for backward compatibility
5187 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5188 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5189 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5190 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5191
5192 # look for primitive
5193 config_primitive_desc = descriptor_configuration = None
5194 if vdu_id:
5195 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5196 elif kdu_name:
5197 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5198 elif vnf_index:
5199 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5200 else:
5201 descriptor_configuration = db_nsd.get("ns-configuration")
5202
5203 if descriptor_configuration and descriptor_configuration.get(
5204 "config-primitive"
5205 ):
5206 for config_primitive in descriptor_configuration["config-primitive"]:
5207 if config_primitive["name"] == primitive:
5208 config_primitive_desc = config_primitive
5209 break
5210
5211 if not config_primitive_desc:
5212 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5213 raise LcmException(
5214 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5215 primitive
5216 )
5217 )
5218 primitive_name = primitive
5219 ee_descriptor_id = None
5220 else:
5221 primitive_name = config_primitive_desc.get(
5222 "execution-environment-primitive", primitive
5223 )
5224 ee_descriptor_id = config_primitive_desc.get(
5225 "execution-environment-ref"
5226 )
5227
5228 if vnf_index:
5229 if vdu_id:
5230 vdur = next(
5231 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5232 )
5233 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5234 elif kdu_name:
5235 kdur = next(
5236 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5237 )
5238 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5239 else:
5240 desc_params = parse_yaml_strings(
5241 db_vnfr.get("additionalParamsForVnf")
5242 )
5243 else:
5244 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5245 if kdu_name and get_configuration(db_vnfd, kdu_name):
5246 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5247 actions = set()
5248 for primitive in kdu_configuration.get("initial-config-primitive", []):
5249 actions.add(primitive["name"])
5250 for primitive in kdu_configuration.get("config-primitive", []):
5251 actions.add(primitive["name"])
5252 kdu = find_in_list(
5253 nsr_deployed["K8s"],
5254 lambda kdu: kdu_name == kdu["kdu-name"]
5255 and kdu["member-vnf-index"] == vnf_index,
5256 )
5257 kdu_action = (
5258 True
5259 if primitive_name in actions
5260 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5261 else False
5262 )
5263
5264 # TODO check if ns is in a proper status
5265 if kdu_name and (
5266 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5267 ):
5268 # kdur and desc_params already set from before
5269 if primitive_params:
5270 desc_params.update(primitive_params)
5271 # TODO Check if we will need something at vnf level
5272 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5273 if (
5274 kdu_name == kdu["kdu-name"]
5275 and kdu["member-vnf-index"] == vnf_index
5276 ):
5277 break
5278 else:
5279 raise LcmException(
5280 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5281 )
5282
5283 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5284 msg = "unknown k8scluster-type '{}'".format(
5285 kdu.get("k8scluster-type")
5286 )
5287 raise LcmException(msg)
5288
5289 db_dict = {
5290 "collection": "nsrs",
5291 "filter": {"_id": nsr_id},
5292 "path": "_admin.deployed.K8s.{}".format(index),
5293 }
5294 self.logger.debug(
5295 logging_text
5296 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5297 )
5298 step = "Executing kdu {}".format(primitive_name)
5299 if primitive_name == "upgrade":
5300 if desc_params.get("kdu_model"):
5301 kdu_model = desc_params.get("kdu_model")
5302 del desc_params["kdu_model"]
5303 else:
5304 kdu_model = kdu.get("kdu-model")
5305 if kdu_model.count("/") < 2: # helm chart is not embedded
5306 parts = kdu_model.split(sep=":")
5307 if len(parts) == 2:
5308 kdu_model = parts[0]
5309 if desc_params.get("kdu_atomic_upgrade"):
5310 atomic_upgrade = desc_params.get(
5311 "kdu_atomic_upgrade"
5312 ).lower() in ("yes", "true", "1")
5313 del desc_params["kdu_atomic_upgrade"]
5314 else:
5315 atomic_upgrade = True
5316
5317 detailed_status = await asyncio.wait_for(
5318 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5319 cluster_uuid=kdu.get("k8scluster-uuid"),
5320 kdu_instance=kdu.get("kdu-instance"),
5321 atomic=atomic_upgrade,
5322 kdu_model=kdu_model,
5323 params=desc_params,
5324 db_dict=db_dict,
5325 timeout=timeout_ns_action,
5326 ),
5327 timeout=timeout_ns_action + 10,
5328 )
5329 self.logger.debug(
5330 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5331 )
5332 elif primitive_name == "rollback":
5333 detailed_status = await asyncio.wait_for(
5334 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5335 cluster_uuid=kdu.get("k8scluster-uuid"),
5336 kdu_instance=kdu.get("kdu-instance"),
5337 db_dict=db_dict,
5338 ),
5339 timeout=timeout_ns_action,
5340 )
5341 elif primitive_name == "status":
5342 detailed_status = await asyncio.wait_for(
5343 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5344 cluster_uuid=kdu.get("k8scluster-uuid"),
5345 kdu_instance=kdu.get("kdu-instance"),
5346 vca_id=vca_id,
5347 ),
5348 timeout=timeout_ns_action,
5349 )
5350 else:
5351 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5352 kdu["kdu-name"], nsr_id
5353 )
5354 params = self._map_primitive_params(
5355 config_primitive_desc, primitive_params, desc_params
5356 )
5357
5358 detailed_status = await asyncio.wait_for(
5359 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5360 cluster_uuid=kdu.get("k8scluster-uuid"),
5361 kdu_instance=kdu_instance,
5362 primitive_name=primitive_name,
5363 params=params,
5364 db_dict=db_dict,
5365 timeout=timeout_ns_action,
5366 vca_id=vca_id,
5367 ),
5368 timeout=timeout_ns_action,
5369 )
5370
5371 if detailed_status:
5372 nslcmop_operation_state = "COMPLETED"
5373 else:
5374 detailed_status = ""
5375 nslcmop_operation_state = "FAILED"
5376 else:
5377 ee_id, vca_type = self._look_for_deployed_vca(
5378 nsr_deployed["VCA"],
5379 member_vnf_index=vnf_index,
5380 vdu_id=vdu_id,
5381 vdu_count_index=vdu_count_index,
5382 ee_descriptor_id=ee_descriptor_id,
5383 )
5384 for vca_index, vca_deployed in enumerate(
5385 db_nsr["_admin"]["deployed"]["VCA"]
5386 ):
5387 if vca_deployed.get("member-vnf-index") == vnf_index:
5388 db_dict = {
5389 "collection": "nsrs",
5390 "filter": {"_id": nsr_id},
5391 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5392 }
5393 break
5394 (
5395 nslcmop_operation_state,
5396 detailed_status,
5397 ) = await self._ns_execute_primitive(
5398 ee_id,
5399 primitive=primitive_name,
5400 primitive_params=self._map_primitive_params(
5401 config_primitive_desc, primitive_params, desc_params
5402 ),
5403 timeout=timeout_ns_action,
5404 vca_type=vca_type,
5405 db_dict=db_dict,
5406 vca_id=vca_id,
5407 )
5408
5409 db_nslcmop_update["detailed-status"] = detailed_status
5410 error_description_nslcmop = (
5411 detailed_status if nslcmop_operation_state == "FAILED" else ""
5412 )
5413 self.logger.debug(
5414 logging_text
5415 + "Done with result {} {}".format(
5416 nslcmop_operation_state, detailed_status
5417 )
5418 )
5419 return # database update is called inside finally
5420
5421 except (DbException, LcmException, N2VCException, K8sException) as e:
5422 self.logger.error(logging_text + "Exit Exception {}".format(e))
5423 exc = e
5424 except asyncio.CancelledError:
5425 self.logger.error(
5426 logging_text + "Cancelled Exception while '{}'".format(step)
5427 )
5428 exc = "Operation was cancelled"
5429 except asyncio.TimeoutError:
5430 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5431 exc = "Timeout"
5432 except Exception as e:
5433 exc = traceback.format_exc()
5434 self.logger.critical(
5435 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5436 exc_info=True,
5437 )
5438 finally:
5439 if exc:
5440 db_nslcmop_update[
5441 "detailed-status"
5442 ] = (
5443 detailed_status
5444 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5445 nslcmop_operation_state = "FAILED"
5446 if db_nsr:
5447 self._write_ns_status(
5448 nsr_id=nsr_id,
5449 ns_state=db_nsr[
5450 "nsState"
5451 ], # TODO check if degraded. For the moment use previous status
5452 current_operation="IDLE",
5453 current_operation_id=None,
5454 # error_description=error_description_nsr,
5455 # error_detail=error_detail,
5456 other_update=db_nsr_update,
5457 )
5458
5459 self._write_op_status(
5460 op_id=nslcmop_id,
5461 stage="",
5462 error_message=error_description_nslcmop,
5463 operation_state=nslcmop_operation_state,
5464 other_update=db_nslcmop_update,
5465 )
5466
5467 if nslcmop_operation_state:
5468 try:
5469 await self.msg.aiowrite(
5470 "ns",
5471 "actioned",
5472 {
5473 "nsr_id": nsr_id,
5474 "nslcmop_id": nslcmop_id,
5475 "operationState": nslcmop_operation_state,
5476 },
5477 )
5478 except Exception as e:
5479 self.logger.error(
5480 logging_text + "kafka_write notification Exception {}".format(e)
5481 )
5482 self.logger.debug(logging_text + "Exit")
5483 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5484 return nslcmop_operation_state, detailed_status
5485
5486 async def terminate_vdus(
5487 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5488 ):
5489 """This method terminates VDUs
5490
5491 Args:
5492 db_vnfr: VNF instance record
5493 member_vnf_index: VNF index to identify the VDUs to be removed
5494 db_nsr: NS instance record
5495 update_db_nslcmops: Nslcmop update record
5496 """
5497 vca_scaling_info = []
5498 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5499 scaling_info["scaling_direction"] = "IN"
5500 scaling_info["vdu-delete"] = {}
5501 scaling_info["kdu-delete"] = {}
5502 db_vdur = db_vnfr.get("vdur")
5503 vdur_list = copy(db_vdur)
5504 count_index = 0
5505 for index, vdu in enumerate(vdur_list):
5506 vca_scaling_info.append(
5507 {
5508 "osm_vdu_id": vdu["vdu-id-ref"],
5509 "member-vnf-index": member_vnf_index,
5510 "type": "delete",
5511 "vdu_index": count_index,
5512 }
5513 )
5514 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5515 scaling_info["vdu"].append(
5516 {
5517 "name": vdu.get("name") or vdu.get("vdu-name"),
5518 "vdu_id": vdu["vdu-id-ref"],
5519 "interface": [],
5520 }
5521 )
5522 for interface in vdu["interfaces"]:
5523 scaling_info["vdu"][index]["interface"].append(
5524 {
5525 "name": interface["name"],
5526 "ip_address": interface["ip-address"],
5527 "mac_address": interface.get("mac-address"),
5528 }
5529 )
5530 self.logger.info("NS update scaling info{}".format(scaling_info))
5531 stage[2] = "Terminating VDUs"
5532 if scaling_info.get("vdu-delete"):
5533 # scale_process = "RO"
5534 if self.ro_config.ng:
5535 await self._scale_ng_ro(
5536 logging_text,
5537 db_nsr,
5538 update_db_nslcmops,
5539 db_vnfr,
5540 scaling_info,
5541 stage,
5542 )
5543
5544 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5545 """This method is to Remove VNF instances from NS.
5546
5547 Args:
5548 nsr_id: NS instance id
5549 nslcmop_id: nslcmop id of update
5550 vnf_instance_id: id of the VNF instance to be removed
5551
5552 Returns:
5553 result: (str, str) COMPLETED/FAILED, details
5554 """
5555 try:
5556 db_nsr_update = {}
5557 logging_text = "Task ns={} update ".format(nsr_id)
5558 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5559 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5560 if check_vnfr_count > 1:
5561 stage = ["", "", ""]
5562 step = "Getting nslcmop from database"
5563 self.logger.debug(
5564 step + " after having waited for previous tasks to be completed"
5565 )
5566 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5567 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5568 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5569 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5570 """ db_vnfr = self.db.get_one(
5571 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5572
5573 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5574 await self.terminate_vdus(
5575 db_vnfr,
5576 member_vnf_index,
5577 db_nsr,
5578 update_db_nslcmops,
5579 stage,
5580 logging_text,
5581 )
5582
5583 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5584 constituent_vnfr.remove(db_vnfr.get("_id"))
5585 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5586 "constituent-vnfr-ref"
5587 )
5588 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5589 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5590 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5591 return "COMPLETED", "Done"
5592 else:
5593 step = "Terminate VNF Failed with"
5594 raise LcmException(
5595 "{} Cannot terminate the last VNF in this NS.".format(
5596 vnf_instance_id
5597 )
5598 )
5599 except (LcmException, asyncio.CancelledError):
5600 raise
5601 except Exception as e:
5602 self.logger.debug("Error removing VNF {}".format(e))
5603 return "FAILED", "Error removing VNF {}".format(e)
5604
5605 async def _ns_redeploy_vnf(
5606 self,
5607 nsr_id,
5608 nslcmop_id,
5609 db_vnfd,
5610 db_vnfr,
5611 db_nsr,
5612 ):
5613 """This method updates and redeploys VNF instances
5614
5615 Args:
5616 nsr_id: NS instance id
5617 nslcmop_id: nslcmop id
5618 db_vnfd: VNF descriptor
5619 db_vnfr: VNF instance record
5620 db_nsr: NS instance record
5621
5622 Returns:
5623 result: (str, str) COMPLETED/FAILED, details
5624 """
5625 try:
5626 count_index = 0
5627 stage = ["", "", ""]
5628 logging_text = "Task ns={} update ".format(nsr_id)
5629 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5630 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5631
5632 # Terminate old VNF resources
5633 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5634 await self.terminate_vdus(
5635 db_vnfr,
5636 member_vnf_index,
5637 db_nsr,
5638 update_db_nslcmops,
5639 stage,
5640 logging_text,
5641 )
5642
5643 # old_vnfd_id = db_vnfr["vnfd-id"]
5644 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5645 new_db_vnfd = db_vnfd
5646 # new_vnfd_ref = new_db_vnfd["id"]
5647 # new_vnfd_id = vnfd_id
5648
5649 # Create VDUR
5650 new_vnfr_cp = []
5651 for cp in new_db_vnfd.get("ext-cpd", ()):
5652 vnf_cp = {
5653 "name": cp.get("id"),
5654 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5655 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5656 "id": cp.get("id"),
5657 }
5658 new_vnfr_cp.append(vnf_cp)
5659 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5660 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5661 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5662 new_vnfr_update = {
5663 "revision": latest_vnfd_revision,
5664 "connection-point": new_vnfr_cp,
5665 "vdur": new_vdur,
5666 "ip-address": "",
5667 }
5668 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5669 updated_db_vnfr = self.db.get_one(
5670 "vnfrs",
5671 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5672 )
5673
5674 # Instantiate new VNF resources
5675 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5676 vca_scaling_info = []
5677 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5678 scaling_info["scaling_direction"] = "OUT"
5679 scaling_info["vdu-create"] = {}
5680 scaling_info["kdu-create"] = {}
5681 vdud_instantiate_list = db_vnfd["vdu"]
5682 for index, vdud in enumerate(vdud_instantiate_list):
5683 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5684 if cloud_init_text:
5685 additional_params = (
5686 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5687 or {}
5688 )
5689 cloud_init_list = []
5690 if cloud_init_text:
5691 # TODO Information of its own ip is not available because db_vnfr is not updated.
5692 additional_params["OSM"] = get_osm_params(
5693 updated_db_vnfr, vdud["id"], 1
5694 )
5695 cloud_init_list.append(
5696 self._parse_cloud_init(
5697 cloud_init_text,
5698 additional_params,
5699 db_vnfd["id"],
5700 vdud["id"],
5701 )
5702 )
5703 vca_scaling_info.append(
5704 {
5705 "osm_vdu_id": vdud["id"],
5706 "member-vnf-index": member_vnf_index,
5707 "type": "create",
5708 "vdu_index": count_index,
5709 }
5710 )
5711 scaling_info["vdu-create"][vdud["id"]] = count_index
5712 if self.ro_config.ng:
5713 self.logger.debug(
5714 "New Resources to be deployed: {}".format(scaling_info)
5715 )
5716 await self._scale_ng_ro(
5717 logging_text,
5718 db_nsr,
5719 update_db_nslcmops,
5720 updated_db_vnfr,
5721 scaling_info,
5722 stage,
5723 )
5724 return "COMPLETED", "Done"
5725 except (LcmException, asyncio.CancelledError):
5726 raise
5727 except Exception as e:
5728 self.logger.debug("Error updating VNF {}".format(e))
5729 return "FAILED", "Error updating VNF {}".format(e)
5730
5731 async def _ns_charm_upgrade(
5732 self,
5733 ee_id,
5734 charm_id,
5735 charm_type,
5736 path,
5737 timeout: float = None,
5738 ) -> (str, str):
5739 """This method upgrade charms in VNF instances
5740
5741 Args:
5742 ee_id: Execution environment id
5743 path: Local path to the charm
5744 charm_id: charm-id
5745 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5746 timeout: (Float) Timeout for the ns update operation
5747
5748 Returns:
5749 result: (str, str) COMPLETED/FAILED, details
5750 """
5751 try:
5752 charm_type = charm_type or "lxc_proxy_charm"
5753 output = await self.vca_map[charm_type].upgrade_charm(
5754 ee_id=ee_id,
5755 path=path,
5756 charm_id=charm_id,
5757 charm_type=charm_type,
5758 timeout=timeout or self.timeout.ns_update,
5759 )
5760
5761 if output:
5762 return "COMPLETED", output
5763
5764 except (LcmException, asyncio.CancelledError):
5765 raise
5766
5767 except Exception as e:
5768 self.logger.debug("Error upgrading charm {}".format(path))
5769
5770 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5771
5772 async def update(self, nsr_id, nslcmop_id):
5773 """Update NS according to different update types
5774
5775 This method performs upgrade of VNF instances then updates the revision
5776 number in VNF record
5777
5778 Args:
5779 nsr_id: Network service will be updated
5780 nslcmop_id: ns lcm operation id
5781
5782 Returns:
5783 It may raise DbException, LcmException, N2VCException, K8sException
5784
5785 """
5786 # Try to lock HA task here
5787 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5788 if not task_is_locked_by_me:
5789 return
5790
5791 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5792 self.logger.debug(logging_text + "Enter")
5793
5794 # Set the required variables to be filled up later
5795 db_nsr = None
5796 db_nslcmop_update = {}
5797 vnfr_update = {}
5798 nslcmop_operation_state = None
5799 db_nsr_update = {}
5800 error_description_nslcmop = ""
5801 exc = None
5802 change_type = "updated"
5803 detailed_status = ""
5804 member_vnf_index = None
5805
5806 try:
5807 # wait for any previous tasks in process
5808 step = "Waiting for previous operations to terminate"
5809 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5810 self._write_ns_status(
5811 nsr_id=nsr_id,
5812 ns_state=None,
5813 current_operation="UPDATING",
5814 current_operation_id=nslcmop_id,
5815 )
5816
5817 step = "Getting nslcmop from database"
5818 db_nslcmop = self.db.get_one(
5819 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5820 )
5821 update_type = db_nslcmop["operationParams"]["updateType"]
5822
5823 step = "Getting nsr from database"
5824 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5825 old_operational_status = db_nsr["operational-status"]
5826 db_nsr_update["operational-status"] = "updating"
5827 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5828 nsr_deployed = db_nsr["_admin"].get("deployed")
5829
5830 if update_type == "CHANGE_VNFPKG":
5831 # Get the input parameters given through update request
5832 vnf_instance_id = db_nslcmop["operationParams"][
5833 "changeVnfPackageData"
5834 ].get("vnfInstanceId")
5835
5836 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5837 "vnfdId"
5838 )
5839 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5840
5841 step = "Getting vnfr from database"
5842 db_vnfr = self.db.get_one(
5843 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5844 )
5845
5846 step = "Getting vnfds from database"
5847 # Latest VNFD
5848 latest_vnfd = self.db.get_one(
5849 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5850 )
5851 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5852
5853 # Current VNFD
5854 current_vnf_revision = db_vnfr.get("revision", 1)
5855 current_vnfd = self.db.get_one(
5856 "vnfds_revisions",
5857 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5858 fail_on_empty=False,
5859 )
5860 # Charm artifact paths will be filled up later
5861 (
5862 current_charm_artifact_path,
5863 target_charm_artifact_path,
5864 charm_artifact_paths,
5865 helm_artifacts,
5866 ) = ([], [], [], [])
5867
5868 step = "Checking if revision has changed in VNFD"
5869 if current_vnf_revision != latest_vnfd_revision:
5870 change_type = "policy_updated"
5871
5872 # There is new revision of VNFD, update operation is required
5873 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5874 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5875
5876 step = "Removing the VNFD packages if they exist in the local path"
5877 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5878 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5879
5880 step = "Get the VNFD packages from FSMongo"
5881 self.fs.sync(from_path=latest_vnfd_path)
5882 self.fs.sync(from_path=current_vnfd_path)
5883
5884 step = (
5885 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5886 )
5887 current_base_folder = current_vnfd["_admin"]["storage"]
5888 latest_base_folder = latest_vnfd["_admin"]["storage"]
5889
5890 for vca_index, vca_deployed in enumerate(
5891 get_iterable(nsr_deployed, "VCA")
5892 ):
5893 vnf_index = db_vnfr.get("member-vnf-index-ref")
5894
5895 # Getting charm-id and charm-type
5896 if vca_deployed.get("member-vnf-index") == vnf_index:
5897 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5898 vca_type = vca_deployed.get("type")
5899 vdu_count_index = vca_deployed.get("vdu_count_index")
5900
5901 # Getting ee-id
5902 ee_id = vca_deployed.get("ee_id")
5903
5904 step = "Getting descriptor config"
5905 if current_vnfd.get("kdu"):
5906 search_key = "kdu_name"
5907 else:
5908 search_key = "vnfd_id"
5909
5910 entity_id = vca_deployed.get(search_key)
5911
5912 descriptor_config = get_configuration(
5913 current_vnfd, entity_id
5914 )
5915
5916 if "execution-environment-list" in descriptor_config:
5917 ee_list = descriptor_config.get(
5918 "execution-environment-list", []
5919 )
5920 else:
5921 ee_list = []
5922
5923 # There could be several charm used in the same VNF
5924 for ee_item in ee_list:
5925 if ee_item.get("juju"):
5926 step = "Getting charm name"
5927 charm_name = ee_item["juju"].get("charm")
5928
5929 step = "Setting Charm artifact paths"
5930 current_charm_artifact_path.append(
5931 get_charm_artifact_path(
5932 current_base_folder,
5933 charm_name,
5934 vca_type,
5935 current_vnf_revision,
5936 )
5937 )
5938 target_charm_artifact_path.append(
5939 get_charm_artifact_path(
5940 latest_base_folder,
5941 charm_name,
5942 vca_type,
5943 latest_vnfd_revision,
5944 )
5945 )
5946 elif ee_item.get("helm-chart"):
5947 # add chart to list and all parameters
5948 step = "Getting helm chart name"
5949 chart_name = ee_item.get("helm-chart")
5950 if (
5951 ee_item.get("helm-version")
5952 and ee_item.get("helm-version") == "v2"
5953 ):
5954 vca_type = "helm"
5955 else:
5956 vca_type = "helm-v3"
5957 step = "Setting Helm chart artifact paths"
5958
5959 helm_artifacts.append(
5960 {
5961 "current_artifact_path": get_charm_artifact_path(
5962 current_base_folder,
5963 chart_name,
5964 vca_type,
5965 current_vnf_revision,
5966 ),
5967 "target_artifact_path": get_charm_artifact_path(
5968 latest_base_folder,
5969 chart_name,
5970 vca_type,
5971 latest_vnfd_revision,
5972 ),
5973 "ee_id": ee_id,
5974 "vca_index": vca_index,
5975 "vdu_index": vdu_count_index,
5976 }
5977 )
5978
5979 charm_artifact_paths = zip(
5980 current_charm_artifact_path, target_charm_artifact_path
5981 )
5982
5983 step = "Checking if software version has changed in VNFD"
5984 if find_software_version(current_vnfd) != find_software_version(
5985 latest_vnfd
5986 ):
5987 step = "Checking if existing VNF has charm"
5988 for current_charm_path, target_charm_path in list(
5989 charm_artifact_paths
5990 ):
5991 if current_charm_path:
5992 raise LcmException(
5993 "Software version change is not supported as VNF instance {} has charm.".format(
5994 vnf_instance_id
5995 )
5996 )
5997
5998 # There is no change in the charm package, then redeploy the VNF
5999 # based on new descriptor
6000 step = "Redeploying VNF"
6001 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6002 (result, detailed_status) = await self._ns_redeploy_vnf(
6003 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6004 )
6005 if result == "FAILED":
6006 nslcmop_operation_state = result
6007 error_description_nslcmop = detailed_status
6008 db_nslcmop_update["detailed-status"] = detailed_status
6009 self.logger.debug(
6010 logging_text
6011 + " step {} Done with result {} {}".format(
6012 step, nslcmop_operation_state, detailed_status
6013 )
6014 )
6015
6016 else:
6017 step = "Checking if any charm package has changed or not"
6018 for current_charm_path, target_charm_path in list(
6019 charm_artifact_paths
6020 ):
6021 if (
6022 current_charm_path
6023 and target_charm_path
6024 and self.check_charm_hash_changed(
6025 current_charm_path, target_charm_path
6026 )
6027 ):
6028 step = "Checking whether VNF uses juju bundle"
6029 if check_juju_bundle_existence(current_vnfd):
6030 raise LcmException(
6031 "Charm upgrade is not supported for the instance which"
6032 " uses juju-bundle: {}".format(
6033 check_juju_bundle_existence(current_vnfd)
6034 )
6035 )
6036
6037 step = "Upgrading Charm"
6038 (
6039 result,
6040 detailed_status,
6041 ) = await self._ns_charm_upgrade(
6042 ee_id=ee_id,
6043 charm_id=vca_id,
6044 charm_type=vca_type,
6045 path=self.fs.path + target_charm_path,
6046 timeout=timeout_seconds,
6047 )
6048
6049 if result == "FAILED":
6050 nslcmop_operation_state = result
6051 error_description_nslcmop = detailed_status
6052
6053 db_nslcmop_update["detailed-status"] = detailed_status
6054 self.logger.debug(
6055 logging_text
6056 + " step {} Done with result {} {}".format(
6057 step, nslcmop_operation_state, detailed_status
6058 )
6059 )
6060
6061 step = "Updating policies"
6062 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6063 result = "COMPLETED"
6064 detailed_status = "Done"
6065 db_nslcmop_update["detailed-status"] = "Done"
6066
6067 # helm base EE
6068 for item in helm_artifacts:
6069 if not (
6070 item["current_artifact_path"]
6071 and item["target_artifact_path"]
6072 and self.check_charm_hash_changed(
6073 item["current_artifact_path"],
6074 item["target_artifact_path"],
6075 )
6076 ):
6077 continue
6078 db_update_entry = "_admin.deployed.VCA.{}.".format(
6079 item["vca_index"]
6080 )
6081 vnfr_id = db_vnfr["_id"]
6082 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6083 db_dict = {
6084 "collection": "nsrs",
6085 "filter": {"_id": nsr_id},
6086 "path": db_update_entry,
6087 }
6088 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6089 await self.vca_map[vca_type].upgrade_execution_environment(
6090 namespace=namespace,
6091 helm_id=helm_id,
6092 db_dict=db_dict,
6093 config=osm_config,
6094 artifact_path=item["target_artifact_path"],
6095 vca_type=vca_type,
6096 )
6097 vnf_id = db_vnfr.get("vnfd-ref")
6098 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6099 self.logger.debug("get ssh key block")
6100 rw_mgmt_ip = None
6101 if deep_get(
6102 config_descriptor,
6103 ("config-access", "ssh-access", "required"),
6104 ):
6105 # Needed to inject a ssh key
6106 user = deep_get(
6107 config_descriptor,
6108 ("config-access", "ssh-access", "default-user"),
6109 )
6110 step = (
6111 "Install configuration Software, getting public ssh key"
6112 )
6113 pub_key = await self.vca_map[
6114 vca_type
6115 ].get_ee_ssh_public__key(
6116 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6117 )
6118
6119 step = (
6120 "Insert public key into VM user={} ssh_key={}".format(
6121 user, pub_key
6122 )
6123 )
6124 self.logger.debug(logging_text + step)
6125
6126 # wait for RO (ip-address) Insert pub_key into VM
6127 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6128 logging_text,
6129 nsr_id,
6130 vnfr_id,
6131 None,
6132 item["vdu_index"],
6133 user=user,
6134 pub_key=pub_key,
6135 )
6136
6137 initial_config_primitive_list = config_descriptor.get(
6138 "initial-config-primitive"
6139 )
6140 config_primitive = next(
6141 (
6142 p
6143 for p in initial_config_primitive_list
6144 if p["name"] == "config"
6145 ),
6146 None,
6147 )
6148 if not config_primitive:
6149 continue
6150
6151 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6152 if rw_mgmt_ip:
6153 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6154 if db_vnfr.get("additionalParamsForVnf"):
6155 deploy_params.update(
6156 parse_yaml_strings(
6157 db_vnfr["additionalParamsForVnf"].copy()
6158 )
6159 )
6160 primitive_params_ = self._map_primitive_params(
6161 config_primitive, {}, deploy_params
6162 )
6163
6164 step = "execute primitive '{}' params '{}'".format(
6165 config_primitive["name"], primitive_params_
6166 )
6167 self.logger.debug(logging_text + step)
6168 await self.vca_map[vca_type].exec_primitive(
6169 ee_id=ee_id,
6170 primitive_name=config_primitive["name"],
6171 params_dict=primitive_params_,
6172 db_dict=db_dict,
6173 vca_id=vca_id,
6174 vca_type=vca_type,
6175 )
6176
6177 step = "Updating policies"
6178 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6179 detailed_status = "Done"
6180 db_nslcmop_update["detailed-status"] = "Done"
6181
6182 # If nslcmop_operation_state is None, so any operation is not failed.
6183 if not nslcmop_operation_state:
6184 nslcmop_operation_state = "COMPLETED"
6185
6186 # If update CHANGE_VNFPKG nslcmop_operation is successful
6187 # vnf revision need to be updated
6188 vnfr_update["revision"] = latest_vnfd_revision
6189 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6190
6191 self.logger.debug(
6192 logging_text
6193 + " task Done with result {} {}".format(
6194 nslcmop_operation_state, detailed_status
6195 )
6196 )
6197 elif update_type == "REMOVE_VNF":
6198 # This part is included in https://osm.etsi.org/gerrit/11876
6199 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6200 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6201 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6202 step = "Removing VNF"
6203 (result, detailed_status) = await self.remove_vnf(
6204 nsr_id, nslcmop_id, vnf_instance_id
6205 )
6206 if result == "FAILED":
6207 nslcmop_operation_state = result
6208 error_description_nslcmop = detailed_status
6209 db_nslcmop_update["detailed-status"] = detailed_status
6210 change_type = "vnf_terminated"
6211 if not nslcmop_operation_state:
6212 nslcmop_operation_state = "COMPLETED"
6213 self.logger.debug(
6214 logging_text
6215 + " task Done with result {} {}".format(
6216 nslcmop_operation_state, detailed_status
6217 )
6218 )
6219
6220 elif update_type == "OPERATE_VNF":
6221 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6222 "vnfInstanceId"
6223 ]
6224 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6225 "changeStateTo"
6226 ]
6227 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6228 "additionalParam"
6229 ]
6230 (result, detailed_status) = await self.rebuild_start_stop(
6231 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6232 )
6233 if result == "FAILED":
6234 nslcmop_operation_state = result
6235 error_description_nslcmop = detailed_status
6236 db_nslcmop_update["detailed-status"] = detailed_status
6237 if not nslcmop_operation_state:
6238 nslcmop_operation_state = "COMPLETED"
6239 self.logger.debug(
6240 logging_text
6241 + " task Done with result {} {}".format(
6242 nslcmop_operation_state, detailed_status
6243 )
6244 )
6245
6246 # If nslcmop_operation_state is None, so any operation is not failed.
6247 # All operations are executed in overall.
6248 if not nslcmop_operation_state:
6249 nslcmop_operation_state = "COMPLETED"
6250 db_nsr_update["operational-status"] = old_operational_status
6251
6252 except (DbException, LcmException, N2VCException, K8sException) as e:
6253 self.logger.error(logging_text + "Exit Exception {}".format(e))
6254 exc = e
6255 except asyncio.CancelledError:
6256 self.logger.error(
6257 logging_text + "Cancelled Exception while '{}'".format(step)
6258 )
6259 exc = "Operation was cancelled"
6260 except asyncio.TimeoutError:
6261 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6262 exc = "Timeout"
6263 except Exception as e:
6264 exc = traceback.format_exc()
6265 self.logger.critical(
6266 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6267 exc_info=True,
6268 )
6269 finally:
6270 if exc:
6271 db_nslcmop_update[
6272 "detailed-status"
6273 ] = (
6274 detailed_status
6275 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6276 nslcmop_operation_state = "FAILED"
6277 db_nsr_update["operational-status"] = old_operational_status
6278 if db_nsr:
6279 self._write_ns_status(
6280 nsr_id=nsr_id,
6281 ns_state=db_nsr["nsState"],
6282 current_operation="IDLE",
6283 current_operation_id=None,
6284 other_update=db_nsr_update,
6285 )
6286
6287 self._write_op_status(
6288 op_id=nslcmop_id,
6289 stage="",
6290 error_message=error_description_nslcmop,
6291 operation_state=nslcmop_operation_state,
6292 other_update=db_nslcmop_update,
6293 )
6294
6295 if nslcmop_operation_state:
6296 try:
6297 msg = {
6298 "nsr_id": nsr_id,
6299 "nslcmop_id": nslcmop_id,
6300 "operationState": nslcmop_operation_state,
6301 }
6302 if (
6303 change_type in ("vnf_terminated", "policy_updated")
6304 and member_vnf_index
6305 ):
6306 msg.update({"vnf_member_index": member_vnf_index})
6307 await self.msg.aiowrite("ns", change_type, msg)
6308 except Exception as e:
6309 self.logger.error(
6310 logging_text + "kafka_write notification Exception {}".format(e)
6311 )
6312 self.logger.debug(logging_text + "Exit")
6313 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6314 return nslcmop_operation_state, detailed_status
6315
6316 async def scale(self, nsr_id, nslcmop_id):
6317 # Try to lock HA task here
6318 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6319 if not task_is_locked_by_me:
6320 return
6321
6322 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6323 stage = ["", "", ""]
6324 tasks_dict_info = {}
6325 # ^ stage, step, VIM progress
6326 self.logger.debug(logging_text + "Enter")
6327 # get all needed from database
6328 db_nsr = None
6329 db_nslcmop_update = {}
6330 db_nsr_update = {}
6331 exc = None
6332 # in case of error, indicates what part of scale was failed to put nsr at error status
6333 scale_process = None
6334 old_operational_status = ""
6335 old_config_status = ""
6336 nsi_id = None
6337 try:
6338 # wait for any previous tasks in process
6339 step = "Waiting for previous operations to terminate"
6340 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6341 self._write_ns_status(
6342 nsr_id=nsr_id,
6343 ns_state=None,
6344 current_operation="SCALING",
6345 current_operation_id=nslcmop_id,
6346 )
6347
6348 step = "Getting nslcmop from database"
6349 self.logger.debug(
6350 step + " after having waited for previous tasks to be completed"
6351 )
6352 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6353
6354 step = "Getting nsr from database"
6355 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6356 old_operational_status = db_nsr["operational-status"]
6357 old_config_status = db_nsr["config-status"]
6358
6359 step = "Parsing scaling parameters"
6360 db_nsr_update["operational-status"] = "scaling"
6361 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6362 nsr_deployed = db_nsr["_admin"].get("deployed")
6363
6364 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6365 "scaleByStepData"
6366 ]["member-vnf-index"]
6367 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6368 "scaleByStepData"
6369 ]["scaling-group-descriptor"]
6370 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6371 # for backward compatibility
6372 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6373 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6374 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6375 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6376
6377 step = "Getting vnfr from database"
6378 db_vnfr = self.db.get_one(
6379 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6380 )
6381
6382 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6383
6384 step = "Getting vnfd from database"
6385 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6386
6387 base_folder = db_vnfd["_admin"]["storage"]
6388
6389 step = "Getting scaling-group-descriptor"
6390 scaling_descriptor = find_in_list(
6391 get_scaling_aspect(db_vnfd),
6392 lambda scale_desc: scale_desc["name"] == scaling_group,
6393 )
6394 if not scaling_descriptor:
6395 raise LcmException(
6396 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6397 "at vnfd:scaling-group-descriptor".format(scaling_group)
6398 )
6399
6400 step = "Sending scale order to VIM"
6401 # TODO check if ns is in a proper status
6402 nb_scale_op = 0
6403 if not db_nsr["_admin"].get("scaling-group"):
6404 self.update_db_2(
6405 "nsrs",
6406 nsr_id,
6407 {
6408 "_admin.scaling-group": [
6409 {"name": scaling_group, "nb-scale-op": 0}
6410 ]
6411 },
6412 )
6413 admin_scale_index = 0
6414 else:
6415 for admin_scale_index, admin_scale_info in enumerate(
6416 db_nsr["_admin"]["scaling-group"]
6417 ):
6418 if admin_scale_info["name"] == scaling_group:
6419 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6420 break
6421 else: # not found, set index one plus last element and add new entry with the name
6422 admin_scale_index += 1
6423 db_nsr_update[
6424 "_admin.scaling-group.{}.name".format(admin_scale_index)
6425 ] = scaling_group
6426
6427 vca_scaling_info = []
6428 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6429 if scaling_type == "SCALE_OUT":
6430 if "aspect-delta-details" not in scaling_descriptor:
6431 raise LcmException(
6432 "Aspect delta details not fount in scaling descriptor {}".format(
6433 scaling_descriptor["name"]
6434 )
6435 )
6436 # count if max-instance-count is reached
6437 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6438
6439 scaling_info["scaling_direction"] = "OUT"
6440 scaling_info["vdu-create"] = {}
6441 scaling_info["kdu-create"] = {}
6442 for delta in deltas:
6443 for vdu_delta in delta.get("vdu-delta", {}):
6444 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6445 # vdu_index also provides the number of instance of the targeted vdu
6446 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6447 cloud_init_text = self._get_vdu_cloud_init_content(
6448 vdud, db_vnfd
6449 )
6450 if cloud_init_text:
6451 additional_params = (
6452 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6453 or {}
6454 )
6455 cloud_init_list = []
6456
6457 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6458 max_instance_count = 10
6459 if vdu_profile and "max-number-of-instances" in vdu_profile:
6460 max_instance_count = vdu_profile.get(
6461 "max-number-of-instances", 10
6462 )
6463
6464 default_instance_num = get_number_of_instances(
6465 db_vnfd, vdud["id"]
6466 )
6467 instances_number = vdu_delta.get("number-of-instances", 1)
6468 nb_scale_op += instances_number
6469
6470 new_instance_count = nb_scale_op + default_instance_num
6471 # Control if new count is over max and vdu count is less than max.
6472 # Then assign new instance count
6473 if new_instance_count > max_instance_count > vdu_count:
6474 instances_number = new_instance_count - max_instance_count
6475 else:
6476 instances_number = instances_number
6477
6478 if new_instance_count > max_instance_count:
6479 raise LcmException(
6480 "reached the limit of {} (max-instance-count) "
6481 "scaling-out operations for the "
6482 "scaling-group-descriptor '{}'".format(
6483 nb_scale_op, scaling_group
6484 )
6485 )
6486 for x in range(vdu_delta.get("number-of-instances", 1)):
6487 if cloud_init_text:
6488 # TODO Information of its own ip is not available because db_vnfr is not updated.
6489 additional_params["OSM"] = get_osm_params(
6490 db_vnfr, vdu_delta["id"], vdu_index + x
6491 )
6492 cloud_init_list.append(
6493 self._parse_cloud_init(
6494 cloud_init_text,
6495 additional_params,
6496 db_vnfd["id"],
6497 vdud["id"],
6498 )
6499 )
6500 vca_scaling_info.append(
6501 {
6502 "osm_vdu_id": vdu_delta["id"],
6503 "member-vnf-index": vnf_index,
6504 "type": "create",
6505 "vdu_index": vdu_index + x,
6506 }
6507 )
6508 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6509 for kdu_delta in delta.get("kdu-resource-delta", {}):
6510 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6511 kdu_name = kdu_profile["kdu-name"]
6512 resource_name = kdu_profile.get("resource-name", "")
6513
6514 # Might have different kdus in the same delta
6515 # Should have list for each kdu
6516 if not scaling_info["kdu-create"].get(kdu_name, None):
6517 scaling_info["kdu-create"][kdu_name] = []
6518
6519 kdur = get_kdur(db_vnfr, kdu_name)
6520 if kdur.get("helm-chart"):
6521 k8s_cluster_type = "helm-chart-v3"
6522 self.logger.debug("kdur: {}".format(kdur))
6523 if (
6524 kdur.get("helm-version")
6525 and kdur.get("helm-version") == "v2"
6526 ):
6527 k8s_cluster_type = "helm-chart"
6528 elif kdur.get("juju-bundle"):
6529 k8s_cluster_type = "juju-bundle"
6530 else:
6531 raise LcmException(
6532 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6533 "juju-bundle. Maybe an old NBI version is running".format(
6534 db_vnfr["member-vnf-index-ref"], kdu_name
6535 )
6536 )
6537
6538 max_instance_count = 10
6539 if kdu_profile and "max-number-of-instances" in kdu_profile:
6540 max_instance_count = kdu_profile.get(
6541 "max-number-of-instances", 10
6542 )
6543
6544 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6545 deployed_kdu, _ = get_deployed_kdu(
6546 nsr_deployed, kdu_name, vnf_index
6547 )
6548 if deployed_kdu is None:
6549 raise LcmException(
6550 "KDU '{}' for vnf '{}' not deployed".format(
6551 kdu_name, vnf_index
6552 )
6553 )
6554 kdu_instance = deployed_kdu.get("kdu-instance")
6555 instance_num = await self.k8scluster_map[
6556 k8s_cluster_type
6557 ].get_scale_count(
6558 resource_name,
6559 kdu_instance,
6560 vca_id=vca_id,
6561 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6562 kdu_model=deployed_kdu.get("kdu-model"),
6563 )
6564 kdu_replica_count = instance_num + kdu_delta.get(
6565 "number-of-instances", 1
6566 )
6567
6568 # Control if new count is over max and instance_num is less than max.
6569 # Then assign max instance number to kdu replica count
6570 if kdu_replica_count > max_instance_count > instance_num:
6571 kdu_replica_count = max_instance_count
6572 if kdu_replica_count > max_instance_count:
6573 raise LcmException(
6574 "reached the limit of {} (max-instance-count) "
6575 "scaling-out operations for the "
6576 "scaling-group-descriptor '{}'".format(
6577 instance_num, scaling_group
6578 )
6579 )
6580
6581 for x in range(kdu_delta.get("number-of-instances", 1)):
6582 vca_scaling_info.append(
6583 {
6584 "osm_kdu_id": kdu_name,
6585 "member-vnf-index": vnf_index,
6586 "type": "create",
6587 "kdu_index": instance_num + x - 1,
6588 }
6589 )
6590 scaling_info["kdu-create"][kdu_name].append(
6591 {
6592 "member-vnf-index": vnf_index,
6593 "type": "create",
6594 "k8s-cluster-type": k8s_cluster_type,
6595 "resource-name": resource_name,
6596 "scale": kdu_replica_count,
6597 }
6598 )
6599 elif scaling_type == "SCALE_IN":
6600 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6601
6602 scaling_info["scaling_direction"] = "IN"
6603 scaling_info["vdu-delete"] = {}
6604 scaling_info["kdu-delete"] = {}
6605
6606 for delta in deltas:
6607 for vdu_delta in delta.get("vdu-delta", {}):
6608 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6609 min_instance_count = 0
6610 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6611 if vdu_profile and "min-number-of-instances" in vdu_profile:
6612 min_instance_count = vdu_profile["min-number-of-instances"]
6613
6614 default_instance_num = get_number_of_instances(
6615 db_vnfd, vdu_delta["id"]
6616 )
6617 instance_num = vdu_delta.get("number-of-instances", 1)
6618 nb_scale_op -= instance_num
6619
6620 new_instance_count = nb_scale_op + default_instance_num
6621
6622 if new_instance_count < min_instance_count < vdu_count:
6623 instances_number = min_instance_count - new_instance_count
6624 else:
6625 instances_number = instance_num
6626
6627 if new_instance_count < min_instance_count:
6628 raise LcmException(
6629 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6630 "scaling-group-descriptor '{}'".format(
6631 nb_scale_op, scaling_group
6632 )
6633 )
6634 for x in range(vdu_delta.get("number-of-instances", 1)):
6635 vca_scaling_info.append(
6636 {
6637 "osm_vdu_id": vdu_delta["id"],
6638 "member-vnf-index": vnf_index,
6639 "type": "delete",
6640 "vdu_index": vdu_index - 1 - x,
6641 }
6642 )
6643 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6644 for kdu_delta in delta.get("kdu-resource-delta", {}):
6645 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6646 kdu_name = kdu_profile["kdu-name"]
6647 resource_name = kdu_profile.get("resource-name", "")
6648
6649 if not scaling_info["kdu-delete"].get(kdu_name, None):
6650 scaling_info["kdu-delete"][kdu_name] = []
6651
6652 kdur = get_kdur(db_vnfr, kdu_name)
6653 if kdur.get("helm-chart"):
6654 k8s_cluster_type = "helm-chart-v3"
6655 self.logger.debug("kdur: {}".format(kdur))
6656 if (
6657 kdur.get("helm-version")
6658 and kdur.get("helm-version") == "v2"
6659 ):
6660 k8s_cluster_type = "helm-chart"
6661 elif kdur.get("juju-bundle"):
6662 k8s_cluster_type = "juju-bundle"
6663 else:
6664 raise LcmException(
6665 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6666 "juju-bundle. Maybe an old NBI version is running".format(
6667 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6668 )
6669 )
6670
6671 min_instance_count = 0
6672 if kdu_profile and "min-number-of-instances" in kdu_profile:
6673 min_instance_count = kdu_profile["min-number-of-instances"]
6674
6675 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6676 deployed_kdu, _ = get_deployed_kdu(
6677 nsr_deployed, kdu_name, vnf_index
6678 )
6679 if deployed_kdu is None:
6680 raise LcmException(
6681 "KDU '{}' for vnf '{}' not deployed".format(
6682 kdu_name, vnf_index
6683 )
6684 )
6685 kdu_instance = deployed_kdu.get("kdu-instance")
6686 instance_num = await self.k8scluster_map[
6687 k8s_cluster_type
6688 ].get_scale_count(
6689 resource_name,
6690 kdu_instance,
6691 vca_id=vca_id,
6692 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6693 kdu_model=deployed_kdu.get("kdu-model"),
6694 )
6695 kdu_replica_count = instance_num - kdu_delta.get(
6696 "number-of-instances", 1
6697 )
6698
6699 if kdu_replica_count < min_instance_count < instance_num:
6700 kdu_replica_count = min_instance_count
6701 if kdu_replica_count < min_instance_count:
6702 raise LcmException(
6703 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6704 "scaling-group-descriptor '{}'".format(
6705 instance_num, scaling_group
6706 )
6707 )
6708
6709 for x in range(kdu_delta.get("number-of-instances", 1)):
6710 vca_scaling_info.append(
6711 {
6712 "osm_kdu_id": kdu_name,
6713 "member-vnf-index": vnf_index,
6714 "type": "delete",
6715 "kdu_index": instance_num - x - 1,
6716 }
6717 )
6718 scaling_info["kdu-delete"][kdu_name].append(
6719 {
6720 "member-vnf-index": vnf_index,
6721 "type": "delete",
6722 "k8s-cluster-type": k8s_cluster_type,
6723 "resource-name": resource_name,
6724 "scale": kdu_replica_count,
6725 }
6726 )
6727
6728 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6729 vdu_delete = copy(scaling_info.get("vdu-delete"))
6730 if scaling_info["scaling_direction"] == "IN":
6731 for vdur in reversed(db_vnfr["vdur"]):
6732 if vdu_delete.get(vdur["vdu-id-ref"]):
6733 vdu_delete[vdur["vdu-id-ref"]] -= 1
6734 scaling_info["vdu"].append(
6735 {
6736 "name": vdur.get("name") or vdur.get("vdu-name"),
6737 "vdu_id": vdur["vdu-id-ref"],
6738 "interface": [],
6739 }
6740 )
6741 for interface in vdur["interfaces"]:
6742 scaling_info["vdu"][-1]["interface"].append(
6743 {
6744 "name": interface["name"],
6745 "ip_address": interface["ip-address"],
6746 "mac_address": interface.get("mac-address"),
6747 }
6748 )
6749 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6750
6751 # PRE-SCALE BEGIN
6752 step = "Executing pre-scale vnf-config-primitive"
6753 if scaling_descriptor.get("scaling-config-action"):
6754 for scaling_config_action in scaling_descriptor[
6755 "scaling-config-action"
6756 ]:
6757 if (
6758 scaling_config_action.get("trigger") == "pre-scale-in"
6759 and scaling_type == "SCALE_IN"
6760 ) or (
6761 scaling_config_action.get("trigger") == "pre-scale-out"
6762 and scaling_type == "SCALE_OUT"
6763 ):
6764 vnf_config_primitive = scaling_config_action[
6765 "vnf-config-primitive-name-ref"
6766 ]
6767 step = db_nslcmop_update[
6768 "detailed-status"
6769 ] = "executing pre-scale scaling-config-action '{}'".format(
6770 vnf_config_primitive
6771 )
6772
6773 # look for primitive
6774 for config_primitive in (
6775 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6776 ).get("config-primitive", ()):
6777 if config_primitive["name"] == vnf_config_primitive:
6778 break
6779 else:
6780 raise LcmException(
6781 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6782 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6783 "primitive".format(scaling_group, vnf_config_primitive)
6784 )
6785
6786 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6787 if db_vnfr.get("additionalParamsForVnf"):
6788 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6789
6790 scale_process = "VCA"
6791 db_nsr_update["config-status"] = "configuring pre-scaling"
6792 primitive_params = self._map_primitive_params(
6793 config_primitive, {}, vnfr_params
6794 )
6795
6796 # Pre-scale retry check: Check if this sub-operation has been executed before
6797 op_index = self._check_or_add_scale_suboperation(
6798 db_nslcmop,
6799 vnf_index,
6800 vnf_config_primitive,
6801 primitive_params,
6802 "PRE-SCALE",
6803 )
6804 if op_index == self.SUBOPERATION_STATUS_SKIP:
6805 # Skip sub-operation
6806 result = "COMPLETED"
6807 result_detail = "Done"
6808 self.logger.debug(
6809 logging_text
6810 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6811 vnf_config_primitive, result, result_detail
6812 )
6813 )
6814 else:
6815 if op_index == self.SUBOPERATION_STATUS_NEW:
6816 # New sub-operation: Get index of this sub-operation
6817 op_index = (
6818 len(db_nslcmop.get("_admin", {}).get("operations"))
6819 - 1
6820 )
6821 self.logger.debug(
6822 logging_text
6823 + "vnf_config_primitive={} New sub-operation".format(
6824 vnf_config_primitive
6825 )
6826 )
6827 else:
6828 # retry: Get registered params for this existing sub-operation
6829 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6830 op_index
6831 ]
6832 vnf_index = op.get("member_vnf_index")
6833 vnf_config_primitive = op.get("primitive")
6834 primitive_params = op.get("primitive_params")
6835 self.logger.debug(
6836 logging_text
6837 + "vnf_config_primitive={} Sub-operation retry".format(
6838 vnf_config_primitive
6839 )
6840 )
6841 # Execute the primitive, either with new (first-time) or registered (reintent) args
6842 ee_descriptor_id = config_primitive.get(
6843 "execution-environment-ref"
6844 )
6845 primitive_name = config_primitive.get(
6846 "execution-environment-primitive", vnf_config_primitive
6847 )
6848 ee_id, vca_type = self._look_for_deployed_vca(
6849 nsr_deployed["VCA"],
6850 member_vnf_index=vnf_index,
6851 vdu_id=None,
6852 vdu_count_index=None,
6853 ee_descriptor_id=ee_descriptor_id,
6854 )
6855 result, result_detail = await self._ns_execute_primitive(
6856 ee_id,
6857 primitive_name,
6858 primitive_params,
6859 vca_type=vca_type,
6860 vca_id=vca_id,
6861 )
6862 self.logger.debug(
6863 logging_text
6864 + "vnf_config_primitive={} Done with result {} {}".format(
6865 vnf_config_primitive, result, result_detail
6866 )
6867 )
6868 # Update operationState = COMPLETED | FAILED
6869 self._update_suboperation_status(
6870 db_nslcmop, op_index, result, result_detail
6871 )
6872
6873 if result == "FAILED":
6874 raise LcmException(result_detail)
6875 db_nsr_update["config-status"] = old_config_status
6876 scale_process = None
6877 # PRE-SCALE END
6878
6879 db_nsr_update[
6880 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6881 ] = nb_scale_op
6882 db_nsr_update[
6883 "_admin.scaling-group.{}.time".format(admin_scale_index)
6884 ] = time()
6885
6886 # SCALE-IN VCA - BEGIN
6887 if vca_scaling_info:
6888 step = db_nslcmop_update[
6889 "detailed-status"
6890 ] = "Deleting the execution environments"
6891 scale_process = "VCA"
6892 for vca_info in vca_scaling_info:
6893 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6894 member_vnf_index = str(vca_info["member-vnf-index"])
6895 self.logger.debug(
6896 logging_text + "vdu info: {}".format(vca_info)
6897 )
6898 if vca_info.get("osm_vdu_id"):
6899 vdu_id = vca_info["osm_vdu_id"]
6900 vdu_index = int(vca_info["vdu_index"])
6901 stage[
6902 1
6903 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6904 member_vnf_index, vdu_id, vdu_index
6905 )
6906 stage[2] = step = "Scaling in VCA"
6907 self._write_op_status(op_id=nslcmop_id, stage=stage)
6908 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6909 config_update = db_nsr["configurationStatus"]
6910 for vca_index, vca in enumerate(vca_update):
6911 if (
6912 (vca or vca.get("ee_id"))
6913 and vca["member-vnf-index"] == member_vnf_index
6914 and vca["vdu_count_index"] == vdu_index
6915 ):
6916 if vca.get("vdu_id"):
6917 config_descriptor = get_configuration(
6918 db_vnfd, vca.get("vdu_id")
6919 )
6920 elif vca.get("kdu_name"):
6921 config_descriptor = get_configuration(
6922 db_vnfd, vca.get("kdu_name")
6923 )
6924 else:
6925 config_descriptor = get_configuration(
6926 db_vnfd, db_vnfd["id"]
6927 )
6928 operation_params = (
6929 db_nslcmop.get("operationParams") or {}
6930 )
6931 exec_terminate_primitives = not operation_params.get(
6932 "skip_terminate_primitives"
6933 ) and vca.get("needed_terminate")
6934 task = asyncio.ensure_future(
6935 asyncio.wait_for(
6936 self.destroy_N2VC(
6937 logging_text,
6938 db_nslcmop,
6939 vca,
6940 config_descriptor,
6941 vca_index,
6942 destroy_ee=True,
6943 exec_primitives=exec_terminate_primitives,
6944 scaling_in=True,
6945 vca_id=vca_id,
6946 ),
6947 timeout=self.timeout.charm_delete,
6948 )
6949 )
6950 tasks_dict_info[task] = "Terminating VCA {}".format(
6951 vca.get("ee_id")
6952 )
6953 del vca_update[vca_index]
6954 del config_update[vca_index]
6955 # wait for pending tasks of terminate primitives
6956 if tasks_dict_info:
6957 self.logger.debug(
6958 logging_text
6959 + "Waiting for tasks {}".format(
6960 list(tasks_dict_info.keys())
6961 )
6962 )
6963 error_list = await self._wait_for_tasks(
6964 logging_text,
6965 tasks_dict_info,
6966 min(
6967 self.timeout.charm_delete, self.timeout.ns_terminate
6968 ),
6969 stage,
6970 nslcmop_id,
6971 )
6972 tasks_dict_info.clear()
6973 if error_list:
6974 raise LcmException("; ".join(error_list))
6975
6976 db_vca_and_config_update = {
6977 "_admin.deployed.VCA": vca_update,
6978 "configurationStatus": config_update,
6979 }
6980 self.update_db_2(
6981 "nsrs", db_nsr["_id"], db_vca_and_config_update
6982 )
6983 scale_process = None
6984 # SCALE-IN VCA - END
6985
6986 # SCALE RO - BEGIN
6987 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6988 scale_process = "RO"
6989 if self.ro_config.ng:
6990 await self._scale_ng_ro(
6991 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6992 )
6993 scaling_info.pop("vdu-create", None)
6994 scaling_info.pop("vdu-delete", None)
6995
6996 scale_process = None
6997 # SCALE RO - END
6998
6999 # SCALE KDU - BEGIN
7000 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7001 scale_process = "KDU"
7002 await self._scale_kdu(
7003 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7004 )
7005 scaling_info.pop("kdu-create", None)
7006 scaling_info.pop("kdu-delete", None)
7007
7008 scale_process = None
7009 # SCALE KDU - END
7010
7011 if db_nsr_update:
7012 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7013
7014 # SCALE-UP VCA - BEGIN
7015 if vca_scaling_info:
7016 step = db_nslcmop_update[
7017 "detailed-status"
7018 ] = "Creating new execution environments"
7019 scale_process = "VCA"
7020 for vca_info in vca_scaling_info:
7021 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7022 member_vnf_index = str(vca_info["member-vnf-index"])
7023 self.logger.debug(
7024 logging_text + "vdu info: {}".format(vca_info)
7025 )
7026 vnfd_id = db_vnfr["vnfd-ref"]
7027 if vca_info.get("osm_vdu_id"):
7028 vdu_index = int(vca_info["vdu_index"])
7029 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7030 if db_vnfr.get("additionalParamsForVnf"):
7031 deploy_params.update(
7032 parse_yaml_strings(
7033 db_vnfr["additionalParamsForVnf"].copy()
7034 )
7035 )
7036 descriptor_config = get_configuration(
7037 db_vnfd, db_vnfd["id"]
7038 )
7039 if descriptor_config:
7040 vdu_id = None
7041 vdu_name = None
7042 kdu_name = None
7043 kdu_index = None
7044 self._deploy_n2vc(
7045 logging_text=logging_text
7046 + "member_vnf_index={} ".format(member_vnf_index),
7047 db_nsr=db_nsr,
7048 db_vnfr=db_vnfr,
7049 nslcmop_id=nslcmop_id,
7050 nsr_id=nsr_id,
7051 nsi_id=nsi_id,
7052 vnfd_id=vnfd_id,
7053 vdu_id=vdu_id,
7054 kdu_name=kdu_name,
7055 kdu_index=kdu_index,
7056 member_vnf_index=member_vnf_index,
7057 vdu_index=vdu_index,
7058 vdu_name=vdu_name,
7059 deploy_params=deploy_params,
7060 descriptor_config=descriptor_config,
7061 base_folder=base_folder,
7062 task_instantiation_info=tasks_dict_info,
7063 stage=stage,
7064 )
7065 vdu_id = vca_info["osm_vdu_id"]
7066 vdur = find_in_list(
7067 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7068 )
7069 descriptor_config = get_configuration(db_vnfd, vdu_id)
7070 if vdur.get("additionalParams"):
7071 deploy_params_vdu = parse_yaml_strings(
7072 vdur["additionalParams"]
7073 )
7074 else:
7075 deploy_params_vdu = deploy_params
7076 deploy_params_vdu["OSM"] = get_osm_params(
7077 db_vnfr, vdu_id, vdu_count_index=vdu_index
7078 )
7079 if descriptor_config:
7080 vdu_name = None
7081 kdu_name = None
7082 kdu_index = None
7083 stage[
7084 1
7085 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7086 member_vnf_index, vdu_id, vdu_index
7087 )
7088 stage[2] = step = "Scaling out VCA"
7089 self._write_op_status(op_id=nslcmop_id, stage=stage)
7090 self._deploy_n2vc(
7091 logging_text=logging_text
7092 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7093 member_vnf_index, vdu_id, vdu_index
7094 ),
7095 db_nsr=db_nsr,
7096 db_vnfr=db_vnfr,
7097 nslcmop_id=nslcmop_id,
7098 nsr_id=nsr_id,
7099 nsi_id=nsi_id,
7100 vnfd_id=vnfd_id,
7101 vdu_id=vdu_id,
7102 kdu_name=kdu_name,
7103 member_vnf_index=member_vnf_index,
7104 vdu_index=vdu_index,
7105 kdu_index=kdu_index,
7106 vdu_name=vdu_name,
7107 deploy_params=deploy_params_vdu,
7108 descriptor_config=descriptor_config,
7109 base_folder=base_folder,
7110 task_instantiation_info=tasks_dict_info,
7111 stage=stage,
7112 )
7113 # SCALE-UP VCA - END
7114 scale_process = None
7115
7116 # POST-SCALE BEGIN
7117 # execute primitive service POST-SCALING
7118 step = "Executing post-scale vnf-config-primitive"
7119 if scaling_descriptor.get("scaling-config-action"):
7120 for scaling_config_action in scaling_descriptor[
7121 "scaling-config-action"
7122 ]:
7123 if (
7124 scaling_config_action.get("trigger") == "post-scale-in"
7125 and scaling_type == "SCALE_IN"
7126 ) or (
7127 scaling_config_action.get("trigger") == "post-scale-out"
7128 and scaling_type == "SCALE_OUT"
7129 ):
7130 vnf_config_primitive = scaling_config_action[
7131 "vnf-config-primitive-name-ref"
7132 ]
7133 step = db_nslcmop_update[
7134 "detailed-status"
7135 ] = "executing post-scale scaling-config-action '{}'".format(
7136 vnf_config_primitive
7137 )
7138
7139 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7140 if db_vnfr.get("additionalParamsForVnf"):
7141 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7142
7143 # look for primitive
7144 for config_primitive in (
7145 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7146 ).get("config-primitive", ()):
7147 if config_primitive["name"] == vnf_config_primitive:
7148 break
7149 else:
7150 raise LcmException(
7151 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7152 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7153 "config-primitive".format(
7154 scaling_group, vnf_config_primitive
7155 )
7156 )
7157 scale_process = "VCA"
7158 db_nsr_update["config-status"] = "configuring post-scaling"
7159 primitive_params = self._map_primitive_params(
7160 config_primitive, {}, vnfr_params
7161 )
7162
7163 # Post-scale retry check: Check if this sub-operation has been executed before
7164 op_index = self._check_or_add_scale_suboperation(
7165 db_nslcmop,
7166 vnf_index,
7167 vnf_config_primitive,
7168 primitive_params,
7169 "POST-SCALE",
7170 )
7171 if op_index == self.SUBOPERATION_STATUS_SKIP:
7172 # Skip sub-operation
7173 result = "COMPLETED"
7174 result_detail = "Done"
7175 self.logger.debug(
7176 logging_text
7177 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7178 vnf_config_primitive, result, result_detail
7179 )
7180 )
7181 else:
7182 if op_index == self.SUBOPERATION_STATUS_NEW:
7183 # New sub-operation: Get index of this sub-operation
7184 op_index = (
7185 len(db_nslcmop.get("_admin", {}).get("operations"))
7186 - 1
7187 )
7188 self.logger.debug(
7189 logging_text
7190 + "vnf_config_primitive={} New sub-operation".format(
7191 vnf_config_primitive
7192 )
7193 )
7194 else:
7195 # retry: Get registered params for this existing sub-operation
7196 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7197 op_index
7198 ]
7199 vnf_index = op.get("member_vnf_index")
7200 vnf_config_primitive = op.get("primitive")
7201 primitive_params = op.get("primitive_params")
7202 self.logger.debug(
7203 logging_text
7204 + "vnf_config_primitive={} Sub-operation retry".format(
7205 vnf_config_primitive
7206 )
7207 )
7208 # Execute the primitive, either with new (first-time) or registered (reintent) args
7209 ee_descriptor_id = config_primitive.get(
7210 "execution-environment-ref"
7211 )
7212 primitive_name = config_primitive.get(
7213 "execution-environment-primitive", vnf_config_primitive
7214 )
7215 ee_id, vca_type = self._look_for_deployed_vca(
7216 nsr_deployed["VCA"],
7217 member_vnf_index=vnf_index,
7218 vdu_id=None,
7219 vdu_count_index=None,
7220 ee_descriptor_id=ee_descriptor_id,
7221 )
7222 result, result_detail = await self._ns_execute_primitive(
7223 ee_id,
7224 primitive_name,
7225 primitive_params,
7226 vca_type=vca_type,
7227 vca_id=vca_id,
7228 )
7229 self.logger.debug(
7230 logging_text
7231 + "vnf_config_primitive={} Done with result {} {}".format(
7232 vnf_config_primitive, result, result_detail
7233 )
7234 )
7235 # Update operationState = COMPLETED | FAILED
7236 self._update_suboperation_status(
7237 db_nslcmop, op_index, result, result_detail
7238 )
7239
7240 if result == "FAILED":
7241 raise LcmException(result_detail)
7242 db_nsr_update["config-status"] = old_config_status
7243 scale_process = None
7244 # POST-SCALE END
7245
7246 db_nsr_update[
7247 "detailed-status"
7248 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7249 db_nsr_update["operational-status"] = (
7250 "running"
7251 if old_operational_status == "failed"
7252 else old_operational_status
7253 )
7254 db_nsr_update["config-status"] = old_config_status
7255 return
7256 except (
7257 ROclient.ROClientException,
7258 DbException,
7259 LcmException,
7260 NgRoException,
7261 ) as e:
7262 self.logger.error(logging_text + "Exit Exception {}".format(e))
7263 exc = e
7264 except asyncio.CancelledError:
7265 self.logger.error(
7266 logging_text + "Cancelled Exception while '{}'".format(step)
7267 )
7268 exc = "Operation was cancelled"
7269 except Exception as e:
7270 exc = traceback.format_exc()
7271 self.logger.critical(
7272 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7273 exc_info=True,
7274 )
7275 finally:
7276 self._write_ns_status(
7277 nsr_id=nsr_id,
7278 ns_state=None,
7279 current_operation="IDLE",
7280 current_operation_id=None,
7281 )
7282 if tasks_dict_info:
7283 stage[1] = "Waiting for instantiate pending tasks."
7284 self.logger.debug(logging_text + stage[1])
7285 exc = await self._wait_for_tasks(
7286 logging_text,
7287 tasks_dict_info,
7288 self.timeout.ns_deploy,
7289 stage,
7290 nslcmop_id,
7291 nsr_id=nsr_id,
7292 )
7293 if exc:
7294 db_nslcmop_update[
7295 "detailed-status"
7296 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7297 nslcmop_operation_state = "FAILED"
7298 if db_nsr:
7299 db_nsr_update["operational-status"] = old_operational_status
7300 db_nsr_update["config-status"] = old_config_status
7301 db_nsr_update["detailed-status"] = ""
7302 if scale_process:
7303 if "VCA" in scale_process:
7304 db_nsr_update["config-status"] = "failed"
7305 if "RO" in scale_process:
7306 db_nsr_update["operational-status"] = "failed"
7307 db_nsr_update[
7308 "detailed-status"
7309 ] = "FAILED scaling nslcmop={} {}: {}".format(
7310 nslcmop_id, step, exc
7311 )
7312 else:
7313 error_description_nslcmop = None
7314 nslcmop_operation_state = "COMPLETED"
7315 db_nslcmop_update["detailed-status"] = "Done"
7316
7317 self._write_op_status(
7318 op_id=nslcmop_id,
7319 stage="",
7320 error_message=error_description_nslcmop,
7321 operation_state=nslcmop_operation_state,
7322 other_update=db_nslcmop_update,
7323 )
7324 if db_nsr:
7325 self._write_ns_status(
7326 nsr_id=nsr_id,
7327 ns_state=None,
7328 current_operation="IDLE",
7329 current_operation_id=None,
7330 other_update=db_nsr_update,
7331 )
7332
7333 if nslcmop_operation_state:
7334 try:
7335 msg = {
7336 "nsr_id": nsr_id,
7337 "nslcmop_id": nslcmop_id,
7338 "operationState": nslcmop_operation_state,
7339 }
7340 await self.msg.aiowrite("ns", "scaled", msg)
7341 except Exception as e:
7342 self.logger.error(
7343 logging_text + "kafka_write notification Exception {}".format(e)
7344 )
7345 self.logger.debug(logging_text + "Exit")
7346 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7347
7348 async def _scale_kdu(
7349 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7350 ):
7351 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7352 for kdu_name in _scaling_info:
7353 for kdu_scaling_info in _scaling_info[kdu_name]:
7354 deployed_kdu, index = get_deployed_kdu(
7355 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7356 )
7357 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7358 kdu_instance = deployed_kdu["kdu-instance"]
7359 kdu_model = deployed_kdu.get("kdu-model")
7360 scale = int(kdu_scaling_info["scale"])
7361 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7362
7363 db_dict = {
7364 "collection": "nsrs",
7365 "filter": {"_id": nsr_id},
7366 "path": "_admin.deployed.K8s.{}".format(index),
7367 }
7368
7369 step = "scaling application {}".format(
7370 kdu_scaling_info["resource-name"]
7371 )
7372 self.logger.debug(logging_text + step)
7373
7374 if kdu_scaling_info["type"] == "delete":
7375 kdu_config = get_configuration(db_vnfd, kdu_name)
7376 if (
7377 kdu_config
7378 and kdu_config.get("terminate-config-primitive")
7379 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7380 ):
7381 terminate_config_primitive_list = kdu_config.get(
7382 "terminate-config-primitive"
7383 )
7384 terminate_config_primitive_list.sort(
7385 key=lambda val: int(val["seq"])
7386 )
7387
7388 for (
7389 terminate_config_primitive
7390 ) in terminate_config_primitive_list:
7391 primitive_params_ = self._map_primitive_params(
7392 terminate_config_primitive, {}, {}
7393 )
7394 step = "execute terminate config primitive"
7395 self.logger.debug(logging_text + step)
7396 await asyncio.wait_for(
7397 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7398 cluster_uuid=cluster_uuid,
7399 kdu_instance=kdu_instance,
7400 primitive_name=terminate_config_primitive["name"],
7401 params=primitive_params_,
7402 db_dict=db_dict,
7403 total_timeout=self.timeout.primitive,
7404 vca_id=vca_id,
7405 ),
7406 timeout=self.timeout.primitive
7407 * self.timeout.primitive_outer_factor,
7408 )
7409
7410 await asyncio.wait_for(
7411 self.k8scluster_map[k8s_cluster_type].scale(
7412 kdu_instance=kdu_instance,
7413 scale=scale,
7414 resource_name=kdu_scaling_info["resource-name"],
7415 total_timeout=self.timeout.scale_on_error,
7416 vca_id=vca_id,
7417 cluster_uuid=cluster_uuid,
7418 kdu_model=kdu_model,
7419 atomic=True,
7420 db_dict=db_dict,
7421 ),
7422 timeout=self.timeout.scale_on_error
7423 * self.timeout.scale_on_error_outer_factor,
7424 )
7425
7426 if kdu_scaling_info["type"] == "create":
7427 kdu_config = get_configuration(db_vnfd, kdu_name)
7428 if (
7429 kdu_config
7430 and kdu_config.get("initial-config-primitive")
7431 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7432 ):
7433 initial_config_primitive_list = kdu_config.get(
7434 "initial-config-primitive"
7435 )
7436 initial_config_primitive_list.sort(
7437 key=lambda val: int(val["seq"])
7438 )
7439
7440 for initial_config_primitive in initial_config_primitive_list:
7441 primitive_params_ = self._map_primitive_params(
7442 initial_config_primitive, {}, {}
7443 )
7444 step = "execute initial config primitive"
7445 self.logger.debug(logging_text + step)
7446 await asyncio.wait_for(
7447 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7448 cluster_uuid=cluster_uuid,
7449 kdu_instance=kdu_instance,
7450 primitive_name=initial_config_primitive["name"],
7451 params=primitive_params_,
7452 db_dict=db_dict,
7453 vca_id=vca_id,
7454 ),
7455 timeout=600,
7456 )
7457
7458 async def _scale_ng_ro(
7459 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7460 ):
7461 nsr_id = db_nslcmop["nsInstanceId"]
7462 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7463 db_vnfrs = {}
7464
7465 # read from db: vnfd's for every vnf
7466 db_vnfds = []
7467
7468 # for each vnf in ns, read vnfd
7469 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7470 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7471 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7472 # if we haven't this vnfd, read it from db
7473 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7474 # read from db
7475 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7476 db_vnfds.append(vnfd)
7477 n2vc_key = self.n2vc.get_public_key()
7478 n2vc_key_list = [n2vc_key]
7479 self.scale_vnfr(
7480 db_vnfr,
7481 vdu_scaling_info.get("vdu-create"),
7482 vdu_scaling_info.get("vdu-delete"),
7483 mark_delete=True,
7484 )
7485 # db_vnfr has been updated, update db_vnfrs to use it
7486 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7487 await self._instantiate_ng_ro(
7488 logging_text,
7489 nsr_id,
7490 db_nsd,
7491 db_nsr,
7492 db_nslcmop,
7493 db_vnfrs,
7494 db_vnfds,
7495 n2vc_key_list,
7496 stage=stage,
7497 start_deploy=time(),
7498 timeout_ns_deploy=self.timeout.ns_deploy,
7499 )
7500 if vdu_scaling_info.get("vdu-delete"):
7501 self.scale_vnfr(
7502 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7503 )
7504
7505 async def extract_prometheus_scrape_jobs(
7506 self,
7507 ee_id: str,
7508 artifact_path: str,
7509 ee_config_descriptor: dict,
7510 vnfr_id: str,
7511 nsr_id: str,
7512 target_ip: str,
7513 element_type: str,
7514 vnf_member_index: str = "",
7515 vdu_id: str = "",
7516 vdu_index: int = None,
7517 kdu_name: str = "",
7518 kdu_index: int = None,
7519 ) -> dict:
7520 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7521 This method will wait until the corresponding VDU or KDU is fully instantiated
7522
7523 Args:
7524 ee_id (str): Execution Environment ID
7525 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7526 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7527 vnfr_id (str): VNFR ID where this EE applies
7528 nsr_id (str): NSR ID where this EE applies
7529 target_ip (str): VDU/KDU instance IP address
7530 element_type (str): NS or VNF or VDU or KDU
7531 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7532 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7533 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7534 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7535 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7536
7537 Raises:
7538 LcmException: When the VDU or KDU instance was not found in an hour
7539
7540 Returns:
7541 _type_: Prometheus jobs
7542 """
7543 # default the vdur and kdur names to an empty string, to avoid any later
7544 # problem with Prometheus when the element type is not VDU or KDU
7545 vdur_name = ""
7546 kdur_name = ""
7547
7548 # look if exist a file called 'prometheus*.j2' and
7549 artifact_content = self.fs.dir_ls(artifact_path)
7550 job_file = next(
7551 (
7552 f
7553 for f in artifact_content
7554 if f.startswith("prometheus") and f.endswith(".j2")
7555 ),
7556 None,
7557 )
7558 if not job_file:
7559 return
7560 self.logger.debug("Artifact path{}".format(artifact_path))
7561 self.logger.debug("job file{}".format(job_file))
7562 with self.fs.file_open((artifact_path, job_file), "r") as f:
7563 job_data = f.read()
7564
7565 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7566 if element_type in ("VDU", "KDU"):
7567 for _ in range(360):
7568 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7569 if vdu_id and vdu_index is not None:
7570 vdur = next(
7571 (
7572 x
7573 for x in get_iterable(db_vnfr, "vdur")
7574 if (
7575 x.get("vdu-id-ref") == vdu_id
7576 and x.get("count-index") == vdu_index
7577 )
7578 ),
7579 {},
7580 )
7581 if vdur.get("name"):
7582 vdur_name = vdur.get("name")
7583 break
7584 if kdu_name and kdu_index is not None:
7585 kdur = next(
7586 (
7587 x
7588 for x in get_iterable(db_vnfr, "kdur")
7589 if (
7590 x.get("kdu-name") == kdu_name
7591 and x.get("count-index") == kdu_index
7592 )
7593 ),
7594 {},
7595 )
7596 if kdur.get("name"):
7597 kdur_name = kdur.get("name")
7598 break
7599
7600 await asyncio.sleep(10)
7601 else:
7602 if vdu_id and vdu_index is not None:
7603 raise LcmException(
7604 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7605 )
7606 if kdu_name and kdu_index is not None:
7607 raise LcmException(
7608 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7609 )
7610
7611 # TODO get_service
7612 if ee_id is not None:
7613 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7614 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7615 host_port = "80"
7616 vnfr_id = vnfr_id.replace("-", "")
7617 variables = {
7618 "JOB_NAME": vnfr_id,
7619 "TARGET_IP": target_ip,
7620 "EXPORTER_POD_IP": host_name,
7621 "EXPORTER_POD_PORT": host_port,
7622 "NSR_ID": nsr_id,
7623 "VNF_MEMBER_INDEX": vnf_member_index,
7624 "VDUR_NAME": vdur_name,
7625 "KDUR_NAME": kdur_name,
7626 "ELEMENT_TYPE": element_type,
7627 }
7628 else:
7629 metric_path = ee_config_descriptor["metric-path"]
7630 target_port = ee_config_descriptor["metric-port"]
7631 vnfr_id = vnfr_id.replace("-", "")
7632 variables = {
7633 "JOB_NAME": vnfr_id,
7634 "TARGET_IP": target_ip,
7635 "TARGET_PORT": target_port,
7636 "METRIC_PATH": metric_path,
7637 }
7638
7639 job_list = parse_job(job_data, variables)
7640 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7641 for job in job_list:
7642 if (
7643 not isinstance(job.get("job_name"), str)
7644 or vnfr_id not in job["job_name"]
7645 ):
7646 job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
7647 job["nsr_id"] = nsr_id
7648 job["vnfr_id"] = vnfr_id
7649 return job_list
7650
7651 async def rebuild_start_stop(
7652 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7653 ):
7654 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7655 self.logger.info(logging_text + "Enter")
7656 stage = ["Preparing the environment", ""]
7657 # database nsrs record
7658 db_nsr_update = {}
7659 vdu_vim_name = None
7660 vim_vm_id = None
7661 # in case of error, indicates what part of scale was failed to put nsr at error status
7662 start_deploy = time()
7663 try:
7664 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7665 vim_account_id = db_vnfr.get("vim-account-id")
7666 vim_info_key = "vim:" + vim_account_id
7667 vdu_id = additional_param["vdu_id"]
7668 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7669 vdur = find_in_list(
7670 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7671 )
7672 if vdur:
7673 vdu_vim_name = vdur["name"]
7674 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7675 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7676 else:
7677 raise LcmException("Target vdu is not found")
7678 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7679 # wait for any previous tasks in process
7680 stage[1] = "Waiting for previous operations to terminate"
7681 self.logger.info(stage[1])
7682 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7683
7684 stage[1] = "Reading from database."
7685 self.logger.info(stage[1])
7686 self._write_ns_status(
7687 nsr_id=nsr_id,
7688 ns_state=None,
7689 current_operation=operation_type.upper(),
7690 current_operation_id=nslcmop_id,
7691 )
7692 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7693
7694 # read from db: ns
7695 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7696 db_nsr_update["operational-status"] = operation_type
7697 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7698 # Payload for RO
7699 desc = {
7700 operation_type: {
7701 "vim_vm_id": vim_vm_id,
7702 "vnf_id": vnf_id,
7703 "vdu_index": additional_param["count-index"],
7704 "vdu_id": vdur["id"],
7705 "target_vim": target_vim,
7706 "vim_account_id": vim_account_id,
7707 }
7708 }
7709 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7710 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7711 self.logger.info("ro nsr id: {}".format(nsr_id))
7712 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7713 self.logger.info("response from RO: {}".format(result_dict))
7714 action_id = result_dict["action_id"]
7715 await self._wait_ng_ro(
7716 nsr_id,
7717 action_id,
7718 nslcmop_id,
7719 start_deploy,
7720 self.timeout.operate,
7721 None,
7722 "start_stop_rebuild",
7723 )
7724 return "COMPLETED", "Done"
7725 except (ROclient.ROClientException, DbException, LcmException) as e:
7726 self.logger.error("Exit Exception {}".format(e))
7727 exc = e
7728 except asyncio.CancelledError:
7729 self.logger.error("Cancelled Exception while '{}'".format(stage))
7730 exc = "Operation was cancelled"
7731 except Exception as e:
7732 exc = traceback.format_exc()
7733 self.logger.critical(
7734 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7735 )
7736 return "FAILED", "Error in operate VNF {}".format(exc)
7737
7738 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7739 """
7740 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7741
7742 :param: vim_account_id: VIM Account ID
7743
7744 :return: (cloud_name, cloud_credential)
7745 """
7746 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7747 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7748
7749 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7750 """
7751 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7752
7753 :param: vim_account_id: VIM Account ID
7754
7755 :return: (cloud_name, cloud_credential)
7756 """
7757 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7758 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7759
7760 async def migrate(self, nsr_id, nslcmop_id):
7761 """
7762 Migrate VNFs and VDUs instances in a NS
7763
7764 :param: nsr_id: NS Instance ID
7765 :param: nslcmop_id: nslcmop ID of migrate
7766
7767 """
7768 # Try to lock HA task here
7769 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7770 if not task_is_locked_by_me:
7771 return
7772 logging_text = "Task ns={} migrate ".format(nsr_id)
7773 self.logger.debug(logging_text + "Enter")
7774 # get all needed from database
7775 db_nslcmop = None
7776 db_nslcmop_update = {}
7777 nslcmop_operation_state = None
7778 db_nsr_update = {}
7779 target = {}
7780 exc = None
7781 # in case of error, indicates what part of scale was failed to put nsr at error status
7782 start_deploy = time()
7783
7784 try:
7785 # wait for any previous tasks in process
7786 step = "Waiting for previous operations to terminate"
7787 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7788
7789 self._write_ns_status(
7790 nsr_id=nsr_id,
7791 ns_state=None,
7792 current_operation="MIGRATING",
7793 current_operation_id=nslcmop_id,
7794 )
7795 step = "Getting nslcmop from database"
7796 self.logger.debug(
7797 step + " after having waited for previous tasks to be completed"
7798 )
7799 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7800 migrate_params = db_nslcmop.get("operationParams")
7801
7802 target = {}
7803 target.update(migrate_params)
7804 desc = await self.RO.migrate(nsr_id, target)
7805 self.logger.debug("RO return > {}".format(desc))
7806 action_id = desc["action_id"]
7807 await self._wait_ng_ro(
7808 nsr_id,
7809 action_id,
7810 nslcmop_id,
7811 start_deploy,
7812 self.timeout.migrate,
7813 operation="migrate",
7814 )
7815 except (ROclient.ROClientException, DbException, LcmException) as e:
7816 self.logger.error("Exit Exception {}".format(e))
7817 exc = e
7818 except asyncio.CancelledError:
7819 self.logger.error("Cancelled Exception while '{}'".format(step))
7820 exc = "Operation was cancelled"
7821 except Exception as e:
7822 exc = traceback.format_exc()
7823 self.logger.critical(
7824 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7825 )
7826 finally:
7827 self._write_ns_status(
7828 nsr_id=nsr_id,
7829 ns_state=None,
7830 current_operation="IDLE",
7831 current_operation_id=None,
7832 )
7833 if exc:
7834 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7835 nslcmop_operation_state = "FAILED"
7836 else:
7837 nslcmop_operation_state = "COMPLETED"
7838 db_nslcmop_update["detailed-status"] = "Done"
7839 db_nsr_update["detailed-status"] = "Done"
7840
7841 self._write_op_status(
7842 op_id=nslcmop_id,
7843 stage="",
7844 error_message="",
7845 operation_state=nslcmop_operation_state,
7846 other_update=db_nslcmop_update,
7847 )
7848 if nslcmop_operation_state:
7849 try:
7850 msg = {
7851 "nsr_id": nsr_id,
7852 "nslcmop_id": nslcmop_id,
7853 "operationState": nslcmop_operation_state,
7854 }
7855 await self.msg.aiowrite("ns", "migrated", msg)
7856 except Exception as e:
7857 self.logger.error(
7858 logging_text + "kafka_write notification Exception {}".format(e)
7859 )
7860 self.logger.debug(logging_text + "Exit")
7861 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7862
7863 async def heal(self, nsr_id, nslcmop_id):
7864 """
7865 Heal NS
7866
7867 :param nsr_id: ns instance to heal
7868 :param nslcmop_id: operation to run
7869 :return:
7870 """
7871
7872 # Try to lock HA task here
7873 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7874 if not task_is_locked_by_me:
7875 return
7876
7877 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7878 stage = ["", "", ""]
7879 tasks_dict_info = {}
7880 # ^ stage, step, VIM progress
7881 self.logger.debug(logging_text + "Enter")
7882 # get all needed from database
7883 db_nsr = None
7884 db_nslcmop_update = {}
7885 db_nsr_update = {}
7886 db_vnfrs = {} # vnf's info indexed by _id
7887 exc = None
7888 old_operational_status = ""
7889 old_config_status = ""
7890 nsi_id = None
7891 try:
7892 # wait for any previous tasks in process
7893 step = "Waiting for previous operations to terminate"
7894 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7895 self._write_ns_status(
7896 nsr_id=nsr_id,
7897 ns_state=None,
7898 current_operation="HEALING",
7899 current_operation_id=nslcmop_id,
7900 )
7901
7902 step = "Getting nslcmop from database"
7903 self.logger.debug(
7904 step + " after having waited for previous tasks to be completed"
7905 )
7906 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7907
7908 step = "Getting nsr from database"
7909 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7910 old_operational_status = db_nsr["operational-status"]
7911 old_config_status = db_nsr["config-status"]
7912
7913 db_nsr_update = {
7914 "_admin.deployed.RO.operational-status": "healing",
7915 }
7916 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7917
7918 step = "Sending heal order to VIM"
7919 await self.heal_RO(
7920 logging_text=logging_text,
7921 nsr_id=nsr_id,
7922 db_nslcmop=db_nslcmop,
7923 stage=stage,
7924 )
7925 # VCA tasks
7926 # read from db: nsd
7927 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7928 self.logger.debug(logging_text + stage[1])
7929 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7930 self.fs.sync(db_nsr["nsd-id"])
7931 db_nsr["nsd"] = nsd
7932 # read from db: vnfr's of this ns
7933 step = "Getting vnfrs from db"
7934 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7935 for vnfr in db_vnfrs_list:
7936 db_vnfrs[vnfr["_id"]] = vnfr
7937 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7938
7939 # Check for each target VNF
7940 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7941 for target_vnf in target_list:
7942 # Find this VNF in the list from DB
7943 vnfr_id = target_vnf.get("vnfInstanceId", None)
7944 if vnfr_id:
7945 db_vnfr = db_vnfrs[vnfr_id]
7946 vnfd_id = db_vnfr.get("vnfd-id")
7947 vnfd_ref = db_vnfr.get("vnfd-ref")
7948 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7949 base_folder = vnfd["_admin"]["storage"]
7950 vdu_id = None
7951 vdu_index = 0
7952 vdu_name = None
7953 kdu_name = None
7954 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7955 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7956
7957 # Check each target VDU and deploy N2VC
7958 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7959 "vdu", []
7960 )
7961 if not target_vdu_list:
7962 # Codigo nuevo para crear diccionario
7963 target_vdu_list = []
7964 for existing_vdu in db_vnfr.get("vdur"):
7965 vdu_name = existing_vdu.get("vdu-name", None)
7966 vdu_index = existing_vdu.get("count-index", 0)
7967 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7968 "run-day1", False
7969 )
7970 vdu_to_be_healed = {
7971 "vdu-id": vdu_name,
7972 "count-index": vdu_index,
7973 "run-day1": vdu_run_day1,
7974 }
7975 target_vdu_list.append(vdu_to_be_healed)
7976 for target_vdu in target_vdu_list:
7977 deploy_params_vdu = target_vdu
7978 # Set run-day1 vnf level value if not vdu level value exists
7979 if not deploy_params_vdu.get("run-day1") and target_vnf.get(
7980 "additionalParams", {}
7981 ).get("run-day1"):
7982 deploy_params_vdu["run-day1"] = target_vnf[
7983 "additionalParams"
7984 ].get("run-day1")
7985 vdu_name = target_vdu.get("vdu-id", None)
7986 # TODO: Get vdu_id from vdud.
7987 vdu_id = vdu_name
7988 # For multi instance VDU count-index is mandatory
7989 # For single session VDU count-indes is 0
7990 vdu_index = target_vdu.get("count-index", 0)
7991
7992 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7993 stage[1] = "Deploying Execution Environments."
7994 self.logger.debug(logging_text + stage[1])
7995
7996 # VNF Level charm. Normal case when proxy charms.
7997 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7998 descriptor_config = get_configuration(vnfd, vnfd_ref)
7999 if descriptor_config:
8000 # Continue if healed machine is management machine
8001 vnf_ip_address = db_vnfr.get("ip-address")
8002 target_instance = None
8003 for instance in db_vnfr.get("vdur", None):
8004 if (
8005 instance["vdu-name"] == vdu_name
8006 and instance["count-index"] == vdu_index
8007 ):
8008 target_instance = instance
8009 break
8010 if vnf_ip_address == target_instance.get("ip-address"):
8011 self._heal_n2vc(
8012 logging_text=logging_text
8013 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8014 member_vnf_index, vdu_name, vdu_index
8015 ),
8016 db_nsr=db_nsr,
8017 db_vnfr=db_vnfr,
8018 nslcmop_id=nslcmop_id,
8019 nsr_id=nsr_id,
8020 nsi_id=nsi_id,
8021 vnfd_id=vnfd_ref,
8022 vdu_id=None,
8023 kdu_name=None,
8024 member_vnf_index=member_vnf_index,
8025 vdu_index=0,
8026 vdu_name=None,
8027 deploy_params=deploy_params_vdu,
8028 descriptor_config=descriptor_config,
8029 base_folder=base_folder,
8030 task_instantiation_info=tasks_dict_info,
8031 stage=stage,
8032 )
8033
8034 # VDU Level charm. Normal case with native charms.
8035 descriptor_config = get_configuration(vnfd, vdu_name)
8036 if descriptor_config:
8037 self._heal_n2vc(
8038 logging_text=logging_text
8039 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8040 member_vnf_index, vdu_name, vdu_index
8041 ),
8042 db_nsr=db_nsr,
8043 db_vnfr=db_vnfr,
8044 nslcmop_id=nslcmop_id,
8045 nsr_id=nsr_id,
8046 nsi_id=nsi_id,
8047 vnfd_id=vnfd_ref,
8048 vdu_id=vdu_id,
8049 kdu_name=kdu_name,
8050 member_vnf_index=member_vnf_index,
8051 vdu_index=vdu_index,
8052 vdu_name=vdu_name,
8053 deploy_params=deploy_params_vdu,
8054 descriptor_config=descriptor_config,
8055 base_folder=base_folder,
8056 task_instantiation_info=tasks_dict_info,
8057 stage=stage,
8058 )
8059
8060 except (
8061 ROclient.ROClientException,
8062 DbException,
8063 LcmException,
8064 NgRoException,
8065 ) as e:
8066 self.logger.error(logging_text + "Exit Exception {}".format(e))
8067 exc = e
8068 except asyncio.CancelledError:
8069 self.logger.error(
8070 logging_text + "Cancelled Exception while '{}'".format(step)
8071 )
8072 exc = "Operation was cancelled"
8073 except Exception as e:
8074 exc = traceback.format_exc()
8075 self.logger.critical(
8076 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8077 exc_info=True,
8078 )
8079 finally:
8080 if tasks_dict_info:
8081 stage[1] = "Waiting for healing pending tasks."
8082 self.logger.debug(logging_text + stage[1])
8083 exc = await self._wait_for_tasks(
8084 logging_text,
8085 tasks_dict_info,
8086 self.timeout.ns_deploy,
8087 stage,
8088 nslcmop_id,
8089 nsr_id=nsr_id,
8090 )
8091 if exc:
8092 db_nslcmop_update[
8093 "detailed-status"
8094 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8095 nslcmop_operation_state = "FAILED"
8096 if db_nsr:
8097 db_nsr_update["operational-status"] = old_operational_status
8098 db_nsr_update["config-status"] = old_config_status
8099 db_nsr_update[
8100 "detailed-status"
8101 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8102 for task, task_name in tasks_dict_info.items():
8103 if not task.done() or task.cancelled() or task.exception():
8104 if task_name.startswith(self.task_name_deploy_vca):
8105 # A N2VC task is pending
8106 db_nsr_update["config-status"] = "failed"
8107 else:
8108 # RO task is pending
8109 db_nsr_update["operational-status"] = "failed"
8110 else:
8111 error_description_nslcmop = None
8112 nslcmop_operation_state = "COMPLETED"
8113 db_nslcmop_update["detailed-status"] = "Done"
8114 db_nsr_update["detailed-status"] = "Done"
8115 db_nsr_update["operational-status"] = "running"
8116 db_nsr_update["config-status"] = "configured"
8117
8118 self._write_op_status(
8119 op_id=nslcmop_id,
8120 stage="",
8121 error_message=error_description_nslcmop,
8122 operation_state=nslcmop_operation_state,
8123 other_update=db_nslcmop_update,
8124 )
8125 if db_nsr:
8126 self._write_ns_status(
8127 nsr_id=nsr_id,
8128 ns_state=None,
8129 current_operation="IDLE",
8130 current_operation_id=None,
8131 other_update=db_nsr_update,
8132 )
8133
8134 if nslcmop_operation_state:
8135 try:
8136 msg = {
8137 "nsr_id": nsr_id,
8138 "nslcmop_id": nslcmop_id,
8139 "operationState": nslcmop_operation_state,
8140 }
8141 await self.msg.aiowrite("ns", "healed", msg)
8142 except Exception as e:
8143 self.logger.error(
8144 logging_text + "kafka_write notification Exception {}".format(e)
8145 )
8146 self.logger.debug(logging_text + "Exit")
8147 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8148
8149 async def heal_RO(
8150 self,
8151 logging_text,
8152 nsr_id,
8153 db_nslcmop,
8154 stage,
8155 ):
8156 """
8157 Heal at RO
8158 :param logging_text: preffix text to use at logging
8159 :param nsr_id: nsr identity
8160 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8161 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8162 :return: None or exception
8163 """
8164
8165 def get_vim_account(vim_account_id):
8166 nonlocal db_vims
8167 if vim_account_id in db_vims:
8168 return db_vims[vim_account_id]
8169 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8170 db_vims[vim_account_id] = db_vim
8171 return db_vim
8172
8173 try:
8174 start_heal = time()
8175 ns_params = db_nslcmop.get("operationParams")
8176 if ns_params and ns_params.get("timeout_ns_heal"):
8177 timeout_ns_heal = ns_params["timeout_ns_heal"]
8178 else:
8179 timeout_ns_heal = self.timeout.ns_heal
8180
8181 db_vims = {}
8182
8183 nslcmop_id = db_nslcmop["_id"]
8184 target = {
8185 "action_id": nslcmop_id,
8186 }
8187 self.logger.warning(
8188 "db_nslcmop={} and timeout_ns_heal={}".format(
8189 db_nslcmop, timeout_ns_heal
8190 )
8191 )
8192 target.update(db_nslcmop.get("operationParams", {}))
8193
8194 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8195 desc = await self.RO.recreate(nsr_id, target)
8196 self.logger.debug("RO return > {}".format(desc))
8197 action_id = desc["action_id"]
8198 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8199 await self._wait_ng_ro(
8200 nsr_id,
8201 action_id,
8202 nslcmop_id,
8203 start_heal,
8204 timeout_ns_heal,
8205 stage,
8206 operation="healing",
8207 )
8208
8209 # Updating NSR
8210 db_nsr_update = {
8211 "_admin.deployed.RO.operational-status": "running",
8212 "detailed-status": " ".join(stage),
8213 }
8214 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8215 self._write_op_status(nslcmop_id, stage)
8216 self.logger.debug(
8217 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8218 )
8219
8220 except Exception as e:
8221 stage[2] = "ERROR healing at VIM"
8222 # self.set_vnfr_at_error(db_vnfrs, str(e))
8223 self.logger.error(
8224 "Error healing at VIM {}".format(e),
8225 exc_info=not isinstance(
8226 e,
8227 (
8228 ROclient.ROClientException,
8229 LcmException,
8230 DbException,
8231 NgRoException,
8232 ),
8233 ),
8234 )
8235 raise
8236
8237 def _heal_n2vc(
8238 self,
8239 logging_text,
8240 db_nsr,
8241 db_vnfr,
8242 nslcmop_id,
8243 nsr_id,
8244 nsi_id,
8245 vnfd_id,
8246 vdu_id,
8247 kdu_name,
8248 member_vnf_index,
8249 vdu_index,
8250 vdu_name,
8251 deploy_params,
8252 descriptor_config,
8253 base_folder,
8254 task_instantiation_info,
8255 stage,
8256 ):
8257 # launch instantiate_N2VC in a asyncio task and register task object
8258 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8259 # if not found, create one entry and update database
8260 # fill db_nsr._admin.deployed.VCA.<index>
8261
8262 self.logger.debug(
8263 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8264 )
8265
8266 charm_name = ""
8267 get_charm_name = False
8268 if "execution-environment-list" in descriptor_config:
8269 ee_list = descriptor_config.get("execution-environment-list", [])
8270 elif "juju" in descriptor_config:
8271 ee_list = [descriptor_config] # ns charms
8272 if "execution-environment-list" not in descriptor_config:
8273 # charm name is only required for ns charms
8274 get_charm_name = True
8275 else: # other types as script are not supported
8276 ee_list = []
8277
8278 for ee_item in ee_list:
8279 self.logger.debug(
8280 logging_text
8281 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8282 ee_item.get("juju"), ee_item.get("helm-chart")
8283 )
8284 )
8285 ee_descriptor_id = ee_item.get("id")
8286 if ee_item.get("juju"):
8287 vca_name = ee_item["juju"].get("charm")
8288 if get_charm_name:
8289 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8290 vca_type = (
8291 "lxc_proxy_charm"
8292 if ee_item["juju"].get("charm") is not None
8293 else "native_charm"
8294 )
8295 if ee_item["juju"].get("cloud") == "k8s":
8296 vca_type = "k8s_proxy_charm"
8297 elif ee_item["juju"].get("proxy") is False:
8298 vca_type = "native_charm"
8299 elif ee_item.get("helm-chart"):
8300 vca_name = ee_item["helm-chart"]
8301 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8302 vca_type = "helm"
8303 else:
8304 vca_type = "helm-v3"
8305 else:
8306 self.logger.debug(
8307 logging_text + "skipping non juju neither charm configuration"
8308 )
8309 continue
8310
8311 vca_index = -1
8312 for vca_index, vca_deployed in enumerate(
8313 db_nsr["_admin"]["deployed"]["VCA"]
8314 ):
8315 if not vca_deployed:
8316 continue
8317 if (
8318 vca_deployed.get("member-vnf-index") == member_vnf_index
8319 and vca_deployed.get("vdu_id") == vdu_id
8320 and vca_deployed.get("kdu_name") == kdu_name
8321 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8322 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8323 ):
8324 break
8325 else:
8326 # not found, create one.
8327 target = (
8328 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8329 )
8330 if vdu_id:
8331 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8332 elif kdu_name:
8333 target += "/kdu/{}".format(kdu_name)
8334 vca_deployed = {
8335 "target_element": target,
8336 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8337 "member-vnf-index": member_vnf_index,
8338 "vdu_id": vdu_id,
8339 "kdu_name": kdu_name,
8340 "vdu_count_index": vdu_index,
8341 "operational-status": "init", # TODO revise
8342 "detailed-status": "", # TODO revise
8343 "step": "initial-deploy", # TODO revise
8344 "vnfd_id": vnfd_id,
8345 "vdu_name": vdu_name,
8346 "type": vca_type,
8347 "ee_descriptor_id": ee_descriptor_id,
8348 "charm_name": charm_name,
8349 }
8350 vca_index += 1
8351
8352 # create VCA and configurationStatus in db
8353 db_dict = {
8354 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8355 "configurationStatus.{}".format(vca_index): dict(),
8356 }
8357 self.update_db_2("nsrs", nsr_id, db_dict)
8358
8359 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8360
8361 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8362 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8363 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8364
8365 # Launch task
8366 task_n2vc = asyncio.ensure_future(
8367 self.heal_N2VC(
8368 logging_text=logging_text,
8369 vca_index=vca_index,
8370 nsi_id=nsi_id,
8371 db_nsr=db_nsr,
8372 db_vnfr=db_vnfr,
8373 vdu_id=vdu_id,
8374 kdu_name=kdu_name,
8375 vdu_index=vdu_index,
8376 deploy_params=deploy_params,
8377 config_descriptor=descriptor_config,
8378 base_folder=base_folder,
8379 nslcmop_id=nslcmop_id,
8380 stage=stage,
8381 vca_type=vca_type,
8382 vca_name=vca_name,
8383 ee_config_descriptor=ee_item,
8384 )
8385 )
8386 self.lcm_tasks.register(
8387 "ns",
8388 nsr_id,
8389 nslcmop_id,
8390 "instantiate_N2VC-{}".format(vca_index),
8391 task_n2vc,
8392 )
8393 task_instantiation_info[
8394 task_n2vc
8395 ] = self.task_name_deploy_vca + " {}.{}".format(
8396 member_vnf_index or "", vdu_id or ""
8397 )
8398
8399 async def heal_N2VC(
8400 self,
8401 logging_text,
8402 vca_index,
8403 nsi_id,
8404 db_nsr,
8405 db_vnfr,
8406 vdu_id,
8407 kdu_name,
8408 vdu_index,
8409 config_descriptor,
8410 deploy_params,
8411 base_folder,
8412 nslcmop_id,
8413 stage,
8414 vca_type,
8415 vca_name,
8416 ee_config_descriptor,
8417 ):
8418 nsr_id = db_nsr["_id"]
8419 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8420 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8421 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8422 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8423 db_dict = {
8424 "collection": "nsrs",
8425 "filter": {"_id": nsr_id},
8426 "path": db_update_entry,
8427 }
8428 step = ""
8429 try:
8430 element_type = "NS"
8431 element_under_configuration = nsr_id
8432
8433 vnfr_id = None
8434 if db_vnfr:
8435 vnfr_id = db_vnfr["_id"]
8436 osm_config["osm"]["vnf_id"] = vnfr_id
8437
8438 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8439
8440 if vca_type == "native_charm":
8441 index_number = 0
8442 else:
8443 index_number = vdu_index or 0
8444
8445 if vnfr_id:
8446 element_type = "VNF"
8447 element_under_configuration = vnfr_id
8448 namespace += ".{}-{}".format(vnfr_id, index_number)
8449 if vdu_id:
8450 namespace += ".{}-{}".format(vdu_id, index_number)
8451 element_type = "VDU"
8452 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8453 osm_config["osm"]["vdu_id"] = vdu_id
8454 elif kdu_name:
8455 namespace += ".{}".format(kdu_name)
8456 element_type = "KDU"
8457 element_under_configuration = kdu_name
8458 osm_config["osm"]["kdu_name"] = kdu_name
8459
8460 # Get artifact path
8461 if base_folder["pkg-dir"]:
8462 artifact_path = "{}/{}/{}/{}".format(
8463 base_folder["folder"],
8464 base_folder["pkg-dir"],
8465 "charms"
8466 if vca_type
8467 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8468 else "helm-charts",
8469 vca_name,
8470 )
8471 else:
8472 artifact_path = "{}/Scripts/{}/{}/".format(
8473 base_folder["folder"],
8474 "charms"
8475 if vca_type
8476 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8477 else "helm-charts",
8478 vca_name,
8479 )
8480
8481 self.logger.debug("Artifact path > {}".format(artifact_path))
8482
8483 # get initial_config_primitive_list that applies to this element
8484 initial_config_primitive_list = config_descriptor.get(
8485 "initial-config-primitive"
8486 )
8487
8488 self.logger.debug(
8489 "Initial config primitive list > {}".format(
8490 initial_config_primitive_list
8491 )
8492 )
8493
8494 # add config if not present for NS charm
8495 ee_descriptor_id = ee_config_descriptor.get("id")
8496 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8497 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8498 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8499 )
8500
8501 self.logger.debug(
8502 "Initial config primitive list #2 > {}".format(
8503 initial_config_primitive_list
8504 )
8505 )
8506 # n2vc_redesign STEP 3.1
8507 # find old ee_id if exists
8508 ee_id = vca_deployed.get("ee_id")
8509
8510 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8511 # create or register execution environment in VCA. Only for native charms when healing
8512 if vca_type == "native_charm":
8513 step = "Waiting to VM being up and getting IP address"
8514 self.logger.debug(logging_text + step)
8515 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8516 logging_text,
8517 nsr_id,
8518 vnfr_id,
8519 vdu_id,
8520 vdu_index,
8521 user=None,
8522 pub_key=None,
8523 )
8524 credentials = {"hostname": rw_mgmt_ip}
8525 # get username
8526 username = deep_get(
8527 config_descriptor, ("config-access", "ssh-access", "default-user")
8528 )
8529 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8530 # merged. Meanwhile let's get username from initial-config-primitive
8531 if not username and initial_config_primitive_list:
8532 for config_primitive in initial_config_primitive_list:
8533 for param in config_primitive.get("parameter", ()):
8534 if param["name"] == "ssh-username":
8535 username = param["value"]
8536 break
8537 if not username:
8538 raise LcmException(
8539 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8540 "'config-access.ssh-access.default-user'"
8541 )
8542 credentials["username"] = username
8543
8544 # n2vc_redesign STEP 3.2
8545 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8546 self._write_configuration_status(
8547 nsr_id=nsr_id,
8548 vca_index=vca_index,
8549 status="REGISTERING",
8550 element_under_configuration=element_under_configuration,
8551 element_type=element_type,
8552 )
8553
8554 step = "register execution environment {}".format(credentials)
8555 self.logger.debug(logging_text + step)
8556 ee_id = await self.vca_map[vca_type].register_execution_environment(
8557 credentials=credentials,
8558 namespace=namespace,
8559 db_dict=db_dict,
8560 vca_id=vca_id,
8561 )
8562
8563 # update ee_id en db
8564 db_dict_ee_id = {
8565 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8566 }
8567 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8568
8569 # for compatibility with MON/POL modules, the need model and application name at database
8570 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8571 # Not sure if this need to be done when healing
8572 """
8573 ee_id_parts = ee_id.split(".")
8574 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8575 if len(ee_id_parts) >= 2:
8576 model_name = ee_id_parts[0]
8577 application_name = ee_id_parts[1]
8578 db_nsr_update[db_update_entry + "model"] = model_name
8579 db_nsr_update[db_update_entry + "application"] = application_name
8580 """
8581
8582 # n2vc_redesign STEP 3.3
8583 # Install configuration software. Only for native charms.
8584 step = "Install configuration Software"
8585
8586 self._write_configuration_status(
8587 nsr_id=nsr_id,
8588 vca_index=vca_index,
8589 status="INSTALLING SW",
8590 element_under_configuration=element_under_configuration,
8591 element_type=element_type,
8592 # other_update=db_nsr_update,
8593 other_update=None,
8594 )
8595
8596 # TODO check if already done
8597 self.logger.debug(logging_text + step)
8598 config = None
8599 if vca_type == "native_charm":
8600 config_primitive = next(
8601 (p for p in initial_config_primitive_list if p["name"] == "config"),
8602 None,
8603 )
8604 if config_primitive:
8605 config = self._map_primitive_params(
8606 config_primitive, {}, deploy_params
8607 )
8608 await self.vca_map[vca_type].install_configuration_sw(
8609 ee_id=ee_id,
8610 artifact_path=artifact_path,
8611 db_dict=db_dict,
8612 config=config,
8613 num_units=1,
8614 vca_id=vca_id,
8615 vca_type=vca_type,
8616 )
8617
8618 # write in db flag of configuration_sw already installed
8619 self.update_db_2(
8620 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8621 )
8622
8623 # Not sure if this need to be done when healing
8624 """
8625 # add relations for this VCA (wait for other peers related with this VCA)
8626 await self._add_vca_relations(
8627 logging_text=logging_text,
8628 nsr_id=nsr_id,
8629 vca_type=vca_type,
8630 vca_index=vca_index,
8631 )
8632 """
8633
8634 # if SSH access is required, then get execution environment SSH public
8635 # if native charm we have waited already to VM be UP
8636 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8637 pub_key = None
8638 user = None
8639 # self.logger.debug("get ssh key block")
8640 if deep_get(
8641 config_descriptor, ("config-access", "ssh-access", "required")
8642 ):
8643 # self.logger.debug("ssh key needed")
8644 # Needed to inject a ssh key
8645 user = deep_get(
8646 config_descriptor,
8647 ("config-access", "ssh-access", "default-user"),
8648 )
8649 step = "Install configuration Software, getting public ssh key"
8650 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8651 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8652 )
8653
8654 step = "Insert public key into VM user={} ssh_key={}".format(
8655 user, pub_key
8656 )
8657 else:
8658 # self.logger.debug("no need to get ssh key")
8659 step = "Waiting to VM being up and getting IP address"
8660 self.logger.debug(logging_text + step)
8661
8662 # n2vc_redesign STEP 5.1
8663 # wait for RO (ip-address) Insert pub_key into VM
8664 # IMPORTANT: We need do wait for RO to complete healing operation.
8665 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8666 if vnfr_id:
8667 if kdu_name:
8668 rw_mgmt_ip = await self.wait_kdu_up(
8669 logging_text, nsr_id, vnfr_id, kdu_name
8670 )
8671 else:
8672 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8673 logging_text,
8674 nsr_id,
8675 vnfr_id,
8676 vdu_id,
8677 vdu_index,
8678 user=user,
8679 pub_key=pub_key,
8680 )
8681 else:
8682 rw_mgmt_ip = None # This is for a NS configuration
8683
8684 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8685
8686 # store rw_mgmt_ip in deploy params for later replacement
8687 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8688
8689 # Day1 operations.
8690 # get run-day1 operation parameter
8691 runDay1 = deploy_params.get("run-day1", False)
8692 self.logger.debug(
8693 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8694 )
8695 if runDay1:
8696 # n2vc_redesign STEP 6 Execute initial config primitive
8697 step = "execute initial config primitive"
8698
8699 # wait for dependent primitives execution (NS -> VNF -> VDU)
8700 if initial_config_primitive_list:
8701 await self._wait_dependent_n2vc(
8702 nsr_id, vca_deployed_list, vca_index
8703 )
8704
8705 # stage, in function of element type: vdu, kdu, vnf or ns
8706 my_vca = vca_deployed_list[vca_index]
8707 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8708 # VDU or KDU
8709 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8710 elif my_vca.get("member-vnf-index"):
8711 # VNF
8712 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8713 else:
8714 # NS
8715 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8716
8717 self._write_configuration_status(
8718 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8719 )
8720
8721 self._write_op_status(op_id=nslcmop_id, stage=stage)
8722
8723 check_if_terminated_needed = True
8724 for initial_config_primitive in initial_config_primitive_list:
8725 # adding information on the vca_deployed if it is a NS execution environment
8726 if not vca_deployed["member-vnf-index"]:
8727 deploy_params["ns_config_info"] = json.dumps(
8728 self._get_ns_config_info(nsr_id)
8729 )
8730 # TODO check if already done
8731 primitive_params_ = self._map_primitive_params(
8732 initial_config_primitive, {}, deploy_params
8733 )
8734
8735 step = "execute primitive '{}' params '{}'".format(
8736 initial_config_primitive["name"], primitive_params_
8737 )
8738 self.logger.debug(logging_text + step)
8739 await self.vca_map[vca_type].exec_primitive(
8740 ee_id=ee_id,
8741 primitive_name=initial_config_primitive["name"],
8742 params_dict=primitive_params_,
8743 db_dict=db_dict,
8744 vca_id=vca_id,
8745 vca_type=vca_type,
8746 )
8747 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8748 if check_if_terminated_needed:
8749 if config_descriptor.get("terminate-config-primitive"):
8750 self.update_db_2(
8751 "nsrs",
8752 nsr_id,
8753 {db_update_entry + "needed_terminate": True},
8754 )
8755 check_if_terminated_needed = False
8756
8757 # TODO register in database that primitive is done
8758
8759 # STEP 7 Configure metrics
8760 # Not sure if this need to be done when healing
8761 """
8762 if vca_type == "helm" or vca_type == "helm-v3":
8763 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8764 ee_id=ee_id,
8765 artifact_path=artifact_path,
8766 ee_config_descriptor=ee_config_descriptor,
8767 vnfr_id=vnfr_id,
8768 nsr_id=nsr_id,
8769 target_ip=rw_mgmt_ip,
8770 )
8771 if prometheus_jobs:
8772 self.update_db_2(
8773 "nsrs",
8774 nsr_id,
8775 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8776 )
8777
8778 for job in prometheus_jobs:
8779 self.db.set_one(
8780 "prometheus_jobs",
8781 {"job_name": job["job_name"]},
8782 job,
8783 upsert=True,
8784 fail_on_empty=False,
8785 )
8786
8787 """
8788 step = "instantiated at VCA"
8789 self.logger.debug(logging_text + step)
8790
8791 self._write_configuration_status(
8792 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8793 )
8794
8795 except Exception as e: # TODO not use Exception but N2VC exception
8796 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8797 if not isinstance(
8798 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8799 ):
8800 self.logger.error(
8801 "Exception while {} : {}".format(step, e), exc_info=True
8802 )
8803 self._write_configuration_status(
8804 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8805 )
8806 raise LcmException("{} {}".format(step, e)) from e
8807
8808 async def _wait_heal_ro(
8809 self,
8810 nsr_id,
8811 timeout=600,
8812 ):
8813 start_time = time()
8814 while time() <= start_time + timeout:
8815 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8816 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8817 "operational-status"
8818 ]
8819 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8820 if operational_status_ro != "healing":
8821 break
8822 await asyncio.sleep(15)
8823 else: # timeout_ns_deploy
8824 raise NgRoException("Timeout waiting ns to deploy")
8825
8826 async def vertical_scale(self, nsr_id, nslcmop_id):
8827 """
8828 Vertical Scale the VDUs in a NS
8829
8830 :param: nsr_id: NS Instance ID
8831 :param: nslcmop_id: nslcmop ID of migrate
8832
8833 """
8834 # Try to lock HA task here
8835 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8836 if not task_is_locked_by_me:
8837 return
8838 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8839 self.logger.debug(logging_text + "Enter")
8840 # get all needed from database
8841 db_nslcmop = None
8842 db_nslcmop_update = {}
8843 nslcmop_operation_state = None
8844 db_nsr_update = {}
8845 target = {}
8846 exc = None
8847 # in case of error, indicates what part of scale was failed to put nsr at error status
8848 start_deploy = time()
8849
8850 try:
8851 # wait for any previous tasks in process
8852 step = "Waiting for previous operations to terminate"
8853 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8854
8855 self._write_ns_status(
8856 nsr_id=nsr_id,
8857 ns_state=None,
8858 current_operation="VerticalScale",
8859 current_operation_id=nslcmop_id,
8860 )
8861 step = "Getting nslcmop from database"
8862 self.logger.debug(
8863 step + " after having waited for previous tasks to be completed"
8864 )
8865 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8866 operationParams = db_nslcmop.get("operationParams")
8867 target = {}
8868 target.update(operationParams)
8869 desc = await self.RO.vertical_scale(nsr_id, target)
8870 self.logger.debug("RO return > {}".format(desc))
8871 action_id = desc["action_id"]
8872 await self._wait_ng_ro(
8873 nsr_id,
8874 action_id,
8875 nslcmop_id,
8876 start_deploy,
8877 self.timeout.verticalscale,
8878 operation="verticalscale",
8879 )
8880 except (ROclient.ROClientException, DbException, LcmException) as e:
8881 self.logger.error("Exit Exception {}".format(e))
8882 exc = e
8883 except asyncio.CancelledError:
8884 self.logger.error("Cancelled Exception while '{}'".format(step))
8885 exc = "Operation was cancelled"
8886 except Exception as e:
8887 exc = traceback.format_exc()
8888 self.logger.critical(
8889 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8890 )
8891 finally:
8892 self._write_ns_status(
8893 nsr_id=nsr_id,
8894 ns_state=None,
8895 current_operation="IDLE",
8896 current_operation_id=None,
8897 )
8898 if exc:
8899 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8900 nslcmop_operation_state = "FAILED"
8901 else:
8902 nslcmop_operation_state = "COMPLETED"
8903 db_nslcmop_update["detailed-status"] = "Done"
8904 db_nsr_update["detailed-status"] = "Done"
8905
8906 self._write_op_status(
8907 op_id=nslcmop_id,
8908 stage="",
8909 error_message="",
8910 operation_state=nslcmop_operation_state,
8911 other_update=db_nslcmop_update,
8912 )
8913 if nslcmop_operation_state:
8914 try:
8915 msg = {
8916 "nsr_id": nsr_id,
8917 "nslcmop_id": nslcmop_id,
8918 "operationState": nslcmop_operation_state,
8919 }
8920 await self.msg.aiowrite("ns", "verticalscaled", msg)
8921 except Exception as e:
8922 self.logger.error(
8923 logging_text + "kafka_write notification Exception {}".format(e)
8924 )
8925 self.logger.debug(logging_text + "Exit")
8926 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")