Update ns.py to append osm to metric_name to be BWC
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import randint
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.loop = loop
149 self.lcm_tasks = lcm_tasks
150 self.timeout = config.timeout
151 self.ro_config = config.RO
152 self.vca_config = config.VCA
153
154 # create N2VC connector
155 self.n2vc = N2VCJujuConnector(
156 log=self.logger,
157 loop=self.loop,
158 on_update_db=self._on_update_n2vc_db,
159 fs=self.fs,
160 db=self.db,
161 )
162
163 self.conn_helm_ee = LCMHelmConn(
164 log=self.logger,
165 loop=self.loop,
166 vca_config=self.vca_config,
167 on_update_db=self._on_update_n2vc_db,
168 )
169
170 self.k8sclusterhelm2 = K8sHelmConnector(
171 kubectl_command=self.vca_config.kubectlpath,
172 helm_command=self.vca_config.helmpath,
173 log=self.logger,
174 on_update_db=None,
175 fs=self.fs,
176 db=self.db,
177 )
178
179 self.k8sclusterhelm3 = K8sHelm3Connector(
180 kubectl_command=self.vca_config.kubectlpath,
181 helm_command=self.vca_config.helm3path,
182 fs=self.fs,
183 log=self.logger,
184 db=self.db,
185 on_update_db=None,
186 )
187
188 self.k8sclusterjuju = K8sJujuConnector(
189 kubectl_command=self.vca_config.kubectlpath,
190 juju_command=self.vca_config.jujupath,
191 log=self.logger,
192 loop=self.loop,
193 on_update_db=self._on_update_k8s_db,
194 fs=self.fs,
195 db=self.db,
196 )
197
198 self.k8scluster_map = {
199 "helm-chart": self.k8sclusterhelm2,
200 "helm-chart-v3": self.k8sclusterhelm3,
201 "chart": self.k8sclusterhelm3,
202 "juju-bundle": self.k8sclusterjuju,
203 "juju": self.k8sclusterjuju,
204 }
205
206 self.vca_map = {
207 "lxc_proxy_charm": self.n2vc,
208 "native_charm": self.n2vc,
209 "k8s_proxy_charm": self.n2vc,
210 "helm": self.conn_helm_ee,
211 "helm-v3": self.conn_helm_ee,
212 }
213
214 # create RO client
215 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
216
217 self.op_status_map = {
218 "instantiation": self.RO.status,
219 "termination": self.RO.status,
220 "migrate": self.RO.status,
221 "healing": self.RO.recreate_status,
222 "verticalscale": self.RO.status,
223 "start_stop_rebuild": self.RO.status,
224 }
225
226 @staticmethod
227 def increment_ip_mac(ip_mac, vm_index=1):
228 if not isinstance(ip_mac, str):
229 return ip_mac
230 try:
231 # try with ipv4 look for last dot
232 i = ip_mac.rfind(".")
233 if i > 0:
234 i += 1
235 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
236 # try with ipv6 or mac look for last colon. Operate in hex
237 i = ip_mac.rfind(":")
238 if i > 0:
239 i += 1
240 # format in hex, len can be 2 for mac or 4 for ipv6
241 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
242 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
243 )
244 except Exception:
245 pass
246 return None
247
248 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266 # remove last dot from path (if exists)
267 if path.endswith("."):
268 path = path[:-1]
269
270 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
271 # .format(table, filter, path, updated_data))
272 try:
273 nsr_id = filter.get("_id")
274
275 # read ns record from database
276 nsr = self.db.get_one(table="nsrs", q_filter=filter)
277 current_ns_status = nsr.get("nsState")
278
279 # get vca status for NS
280 status_dict = await self.n2vc.get_status(
281 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
282 )
283
284 # vcaStatus
285 db_dict = dict()
286 db_dict["vcaStatus"] = status_dict
287
288 # update configurationStatus for this VCA
289 try:
290 vca_index = int(path[path.rfind(".") + 1 :])
291
292 vca_list = deep_get(
293 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
294 )
295 vca_status = vca_list[vca_index].get("status")
296
297 configuration_status_list = nsr.get("configurationStatus")
298 config_status = configuration_status_list[vca_index].get("status")
299
300 if config_status == "BROKEN" and vca_status != "failed":
301 db_dict["configurationStatus"][vca_index] = "READY"
302 elif config_status != "BROKEN" and vca_status == "failed":
303 db_dict["configurationStatus"][vca_index] = "BROKEN"
304 except Exception as e:
305 # not update configurationStatus
306 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
307
308 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
309 # if nsState = 'DEGRADED' check if all is OK
310 is_degraded = False
311 if current_ns_status in ("READY", "DEGRADED"):
312 error_description = ""
313 # check machines
314 if status_dict.get("machines"):
315 for machine_id in status_dict.get("machines"):
316 machine = status_dict.get("machines").get(machine_id)
317 # check machine agent-status
318 if machine.get("agent-status"):
319 s = machine.get("agent-status").get("status")
320 if s != "started":
321 is_degraded = True
322 error_description += (
323 "machine {} agent-status={} ; ".format(
324 machine_id, s
325 )
326 )
327 # check machine instance status
328 if machine.get("instance-status"):
329 s = machine.get("instance-status").get("status")
330 if s != "running":
331 is_degraded = True
332 error_description += (
333 "machine {} instance-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check applications
338 if status_dict.get("applications"):
339 for app_id in status_dict.get("applications"):
340 app = status_dict.get("applications").get(app_id)
341 # check application status
342 if app.get("status"):
343 s = app.get("status").get("status")
344 if s != "active":
345 is_degraded = True
346 error_description += (
347 "application {} status={} ; ".format(app_id, s)
348 )
349
350 if error_description:
351 db_dict["errorDescription"] = error_description
352 if current_ns_status == "READY" and is_degraded:
353 db_dict["nsState"] = "DEGRADED"
354 if current_ns_status == "DEGRADED" and not is_degraded:
355 db_dict["nsState"] = "READY"
356
357 # write to database
358 self.update_db_2("nsrs", nsr_id, db_dict)
359
360 except (asyncio.CancelledError, asyncio.TimeoutError):
361 raise
362 except Exception as e:
363 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
364
365 async def _on_update_k8s_db(
366 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
367 ):
368 """
369 Updating vca status in NSR record
370 :param cluster_uuid: UUID of a k8s cluster
371 :param kdu_instance: The unique name of the KDU instance
372 :param filter: To get nsr_id
373 :cluster_type: The cluster type (juju, k8s)
374 :return: none
375 """
376
377 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
378 # .format(cluster_uuid, kdu_instance, filter))
379
380 nsr_id = filter.get("_id")
381 try:
382 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
383 cluster_uuid=cluster_uuid,
384 kdu_instance=kdu_instance,
385 yaml_format=False,
386 complete_status=True,
387 vca_id=vca_id,
388 )
389
390 # vcaStatus
391 db_dict = dict()
392 db_dict["vcaStatus"] = {nsr_id: vca_status}
393
394 self.logger.debug(
395 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
396 )
397
398 # write to database
399 self.update_db_2("nsrs", nsr_id, db_dict)
400 except (asyncio.CancelledError, asyncio.TimeoutError):
401 raise
402 except Exception as e:
403 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
404
405 @staticmethod
406 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
407 try:
408 env = Environment(
409 undefined=StrictUndefined,
410 autoescape=select_autoescape(default_for_string=True, default=True),
411 )
412 template = env.from_string(cloud_init_text)
413 return template.render(additional_params or {})
414 except UndefinedError as e:
415 raise LcmException(
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
419 )
420 except (TemplateError, TemplateNotFound) as e:
421 raise LcmException(
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
423 vnfd_id, vdu_id, e
424 )
425 )
426
427 def _get_vdu_cloud_init_content(self, vdu, vnfd):
428 cloud_init_content = cloud_init_file = None
429 try:
430 if vdu.get("cloud-init-file"):
431 base_folder = vnfd["_admin"]["storage"]
432 if base_folder["pkg-dir"]:
433 cloud_init_file = "{}/{}/cloud_init/{}".format(
434 base_folder["folder"],
435 base_folder["pkg-dir"],
436 vdu["cloud-init-file"],
437 )
438 else:
439 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
440 base_folder["folder"],
441 vdu["cloud-init-file"],
442 )
443 with self.fs.file_open(cloud_init_file, "r") as ci_file:
444 cloud_init_content = ci_file.read()
445 elif vdu.get("cloud-init"):
446 cloud_init_content = vdu["cloud-init"]
447
448 return cloud_init_content
449 except FsException as e:
450 raise LcmException(
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd["id"], vdu["id"], cloud_init_file, e
453 )
454 )
455
456 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
457 vdur = next(
458 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
459 )
460 additional_params = vdur.get("additionalParams")
461 return parse_yaml_strings(additional_params)
462
463 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
464 """
465 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
466 :param vnfd: input vnfd
467 :param new_id: overrides vnf id if provided
468 :param additionalParams: Instantiation params for VNFs provided
469 :param nsrId: Id of the NSR
470 :return: copy of vnfd
471 """
472 vnfd_RO = deepcopy(vnfd)
473 # remove unused by RO configuration, monitoring, scaling and internal keys
474 vnfd_RO.pop("_id", None)
475 vnfd_RO.pop("_admin", None)
476 vnfd_RO.pop("monitoring-param", None)
477 vnfd_RO.pop("scaling-group-descriptor", None)
478 vnfd_RO.pop("kdu", None)
479 vnfd_RO.pop("k8s-cluster", None)
480 if new_id:
481 vnfd_RO["id"] = new_id
482
483 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
484 for vdu in get_iterable(vnfd_RO, "vdu"):
485 vdu.pop("cloud-init-file", None)
486 vdu.pop("cloud-init", None)
487 return vnfd_RO
488
489 @staticmethod
490 def ip_profile_2_RO(ip_profile):
491 RO_ip_profile = deepcopy(ip_profile)
492 if "dns-server" in RO_ip_profile:
493 if isinstance(RO_ip_profile["dns-server"], list):
494 RO_ip_profile["dns-address"] = []
495 for ds in RO_ip_profile.pop("dns-server"):
496 RO_ip_profile["dns-address"].append(ds["address"])
497 else:
498 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
499 if RO_ip_profile.get("ip-version") == "ipv4":
500 RO_ip_profile["ip-version"] = "IPv4"
501 if RO_ip_profile.get("ip-version") == "ipv6":
502 RO_ip_profile["ip-version"] = "IPv6"
503 if "dhcp-params" in RO_ip_profile:
504 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
505 return RO_ip_profile
506
507 def _get_ro_vim_id_for_vim_account(self, vim_account):
508 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
509 if db_vim["_admin"]["operationalState"] != "ENABLED":
510 raise LcmException(
511 "VIM={} is not available. operationalState={}".format(
512 vim_account, db_vim["_admin"]["operationalState"]
513 )
514 )
515 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
516 return RO_vim_id
517
518 def get_ro_wim_id_for_wim_account(self, wim_account):
519 if isinstance(wim_account, str):
520 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
521 if db_wim["_admin"]["operationalState"] != "ENABLED":
522 raise LcmException(
523 "WIM={} is not available. operationalState={}".format(
524 wim_account, db_wim["_admin"]["operationalState"]
525 )
526 )
527 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
528 return RO_wim_id
529 else:
530 return wim_account
531
532 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
533 db_vdu_push_list = []
534 template_vdur = []
535 db_update = {"_admin.modified": time()}
536 if vdu_create:
537 for vdu_id, vdu_count in vdu_create.items():
538 vdur = next(
539 (
540 vdur
541 for vdur in reversed(db_vnfr["vdur"])
542 if vdur["vdu-id-ref"] == vdu_id
543 ),
544 None,
545 )
546 if not vdur:
547 # Read the template saved in the db:
548 self.logger.debug(
549 "No vdur in the database. Using the vdur-template to scale"
550 )
551 vdur_template = db_vnfr.get("vdur-template")
552 if not vdur_template:
553 raise LcmException(
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
555 vdu_id
556 )
557 )
558 vdur = vdur_template[0]
559 # Delete a template from the database after using it
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur-template": {"_id": vdur["_id"]}},
565 )
566 for count in range(vdu_count):
567 vdur_copy = deepcopy(vdur)
568 vdur_copy["status"] = "BUILD"
569 vdur_copy["status-detailed"] = None
570 vdur_copy["ip-address"] = None
571 vdur_copy["_id"] = str(uuid4())
572 vdur_copy["count-index"] += count + 1
573 vdur_copy["id"] = "{}-{}".format(
574 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
575 )
576 vdur_copy.pop("vim_info", None)
577 for iface in vdur_copy["interfaces"]:
578 if iface.get("fixed-ip"):
579 iface["ip-address"] = self.increment_ip_mac(
580 iface["ip-address"], count + 1
581 )
582 else:
583 iface.pop("ip-address", None)
584 if iface.get("fixed-mac"):
585 iface["mac-address"] = self.increment_ip_mac(
586 iface["mac-address"], count + 1
587 )
588 else:
589 iface.pop("mac-address", None)
590 if db_vnfr["vdur"]:
591 iface.pop(
592 "mgmt_vnf", None
593 ) # only first vdu can be managment of vnf
594 db_vdu_push_list.append(vdur_copy)
595 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
596 if vdu_delete:
597 if len(db_vnfr["vdur"]) == 1:
598 # The scale will move to 0 instances
599 self.logger.debug(
600 "Scaling to 0 !, creating the template with the last vdur"
601 )
602 template_vdur = [db_vnfr["vdur"][0]]
603 for vdu_id, vdu_count in vdu_delete.items():
604 if mark_delete:
605 indexes_to_delete = [
606 iv[0]
607 for iv in enumerate(db_vnfr["vdur"])
608 if iv[1]["vdu-id-ref"] == vdu_id
609 ]
610 db_update.update(
611 {
612 "vdur.{}.status".format(i): "DELETING"
613 for i in indexes_to_delete[-vdu_count:]
614 }
615 )
616 else:
617 # it must be deleted one by one because common.db does not allow otherwise
618 vdus_to_delete = [
619 v
620 for v in reversed(db_vnfr["vdur"])
621 if v["vdu-id-ref"] == vdu_id
622 ]
623 for vdu in vdus_to_delete[:vdu_count]:
624 self.db.set_one(
625 "vnfrs",
626 {"_id": db_vnfr["_id"]},
627 None,
628 pull={"vdur": {"_id": vdu["_id"]}},
629 )
630 db_push = {}
631 if db_vdu_push_list:
632 db_push["vdur"] = db_vdu_push_list
633 if template_vdur:
634 db_push["vdur-template"] = template_vdur
635 if not db_push:
636 db_push = None
637 db_vnfr["vdur-template"] = template_vdur
638 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
639 # modify passed dictionary db_vnfr
640 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
641 db_vnfr["vdur"] = db_vnfr_["vdur"]
642
643 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
644 """
645 Updates database nsr with the RO info for the created vld
646 :param ns_update_nsr: dictionary to be filled with the updated info
647 :param db_nsr: content of db_nsr. This is also modified
648 :param nsr_desc_RO: nsr descriptor from RO
649 :return: Nothing, LcmException is raised on errors
650 """
651
652 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
653 for net_RO in get_iterable(nsr_desc_RO, "nets"):
654 if vld["id"] != net_RO.get("ns_net_osm_id"):
655 continue
656 vld["vim-id"] = net_RO.get("vim_net_id")
657 vld["name"] = net_RO.get("vim_name")
658 vld["status"] = net_RO.get("status")
659 vld["status-detailed"] = net_RO.get("error_msg")
660 ns_update_nsr["vld.{}".format(vld_index)] = vld
661 break
662 else:
663 raise LcmException(
664 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
665 )
666
667 def set_vnfr_at_error(self, db_vnfrs, error_text):
668 try:
669 for db_vnfr in db_vnfrs.values():
670 vnfr_update = {"status": "ERROR"}
671 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
672 if "status" not in vdur:
673 vdur["status"] = "ERROR"
674 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
675 if error_text:
676 vdur["status-detailed"] = str(error_text)
677 vnfr_update[
678 "vdur.{}.status-detailed".format(vdu_index)
679 ] = "ERROR"
680 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
681 except DbException as e:
682 self.logger.error("Cannot update vnf. {}".format(e))
683
684 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
685 """
686 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
687 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
688 :param nsr_desc_RO: nsr descriptor from RO
689 :return: Nothing, LcmException is raised on errors
690 """
691 for vnf_index, db_vnfr in db_vnfrs.items():
692 for vnf_RO in nsr_desc_RO["vnfs"]:
693 if vnf_RO["member_vnf_index"] != vnf_index:
694 continue
695 vnfr_update = {}
696 if vnf_RO.get("ip_address"):
697 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
698 "ip_address"
699 ].split(";")[0]
700 elif not db_vnfr.get("ip-address"):
701 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
702 raise LcmExceptionNoMgmtIP(
703 "ns member_vnf_index '{}' has no IP address".format(
704 vnf_index
705 )
706 )
707
708 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
709 vdur_RO_count_index = 0
710 if vdur.get("pdu-type"):
711 continue
712 for vdur_RO in get_iterable(vnf_RO, "vms"):
713 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
714 continue
715 if vdur["count-index"] != vdur_RO_count_index:
716 vdur_RO_count_index += 1
717 continue
718 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
719 if vdur_RO.get("ip_address"):
720 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
721 else:
722 vdur["ip-address"] = None
723 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
724 vdur["name"] = vdur_RO.get("vim_name")
725 vdur["status"] = vdur_RO.get("status")
726 vdur["status-detailed"] = vdur_RO.get("error_msg")
727 for ifacer in get_iterable(vdur, "interfaces"):
728 for interface_RO in get_iterable(vdur_RO, "interfaces"):
729 if ifacer["name"] == interface_RO.get("internal_name"):
730 ifacer["ip-address"] = interface_RO.get(
731 "ip_address"
732 )
733 ifacer["mac-address"] = interface_RO.get(
734 "mac_address"
735 )
736 break
737 else:
738 raise LcmException(
739 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
740 "from VIM info".format(
741 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
742 )
743 )
744 vnfr_update["vdur.{}".format(vdu_index)] = vdur
745 break
746 else:
747 raise LcmException(
748 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
749 "VIM info".format(
750 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
751 )
752 )
753
754 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
755 for net_RO in get_iterable(nsr_desc_RO, "nets"):
756 if vld["id"] != net_RO.get("vnf_net_osm_id"):
757 continue
758 vld["vim-id"] = net_RO.get("vim_net_id")
759 vld["name"] = net_RO.get("vim_name")
760 vld["status"] = net_RO.get("status")
761 vld["status-detailed"] = net_RO.get("error_msg")
762 vnfr_update["vld.{}".format(vld_index)] = vld
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
767 vnf_index, vld["id"]
768 )
769 )
770
771 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
772 break
773
774 else:
775 raise LcmException(
776 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
777 vnf_index
778 )
779 )
780
781 def _get_ns_config_info(self, nsr_id):
782 """
783 Generates a mapping between vnf,vdu elements and the N2VC id
784 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
785 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
786 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
787 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
788 """
789 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
790 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
791 mapping = {}
792 ns_config_info = {"osm-config-mapping": mapping}
793 for vca in vca_deployed_list:
794 if not vca["member-vnf-index"]:
795 continue
796 if not vca["vdu_id"]:
797 mapping[vca["member-vnf-index"]] = vca["application"]
798 else:
799 mapping[
800 "{}.{}.{}".format(
801 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
802 )
803 ] = vca["application"]
804 return ns_config_info
805
806 async def _instantiate_ng_ro(
807 self,
808 logging_text,
809 nsr_id,
810 nsd,
811 db_nsr,
812 db_nslcmop,
813 db_vnfrs,
814 db_vnfds,
815 n2vc_key_list,
816 stage,
817 start_deploy,
818 timeout_ns_deploy,
819 ):
820 db_vims = {}
821
822 def get_vim_account(vim_account_id):
823 nonlocal db_vims
824 if vim_account_id in db_vims:
825 return db_vims[vim_account_id]
826 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
827 db_vims[vim_account_id] = db_vim
828 return db_vim
829
830 # modify target_vld info with instantiation parameters
831 def parse_vld_instantiation_params(
832 target_vim, target_vld, vld_params, target_sdn
833 ):
834 if vld_params.get("ip-profile"):
835 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
836 vld_params["ip-profile"]
837 )
838 if vld_params.get("provider-network"):
839 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
840 "provider-network"
841 ]
842 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
843 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
844 "provider-network"
845 ]["sdn-ports"]
846
847 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
848 # if wim_account_id is specified in vld_params, validate if it is feasible.
849 wim_account_id, db_wim = select_feasible_wim_account(
850 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
851 )
852
853 if wim_account_id:
854 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
855 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
856 # update vld_params with correct WIM account Id
857 vld_params["wimAccountId"] = wim_account_id
858
859 target_wim = "wim:{}".format(wim_account_id)
860 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
861 sdn_ports = get_sdn_ports(vld_params, db_wim)
862 if len(sdn_ports) > 0:
863 target_vld["vim_info"][target_wim] = target_wim_attrs
864 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
865
866 self.logger.debug(
867 "Target VLD with WIM data: {:s}".format(str(target_vld))
868 )
869
870 for param in ("vim-network-name", "vim-network-id"):
871 if vld_params.get(param):
872 if isinstance(vld_params[param], dict):
873 for vim, vim_net in vld_params[param].items():
874 other_target_vim = "vim:" + vim
875 populate_dict(
876 target_vld["vim_info"],
877 (other_target_vim, param.replace("-", "_")),
878 vim_net,
879 )
880 else: # isinstance str
881 target_vld["vim_info"][target_vim][
882 param.replace("-", "_")
883 ] = vld_params[param]
884 if vld_params.get("common_id"):
885 target_vld["common_id"] = vld_params.get("common_id")
886
887 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
888 def update_ns_vld_target(target, ns_params):
889 for vnf_params in ns_params.get("vnf", ()):
890 if vnf_params.get("vimAccountId"):
891 target_vnf = next(
892 (
893 vnfr
894 for vnfr in db_vnfrs.values()
895 if vnf_params["member-vnf-index"]
896 == vnfr["member-vnf-index-ref"]
897 ),
898 None,
899 )
900 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
901 if not vdur:
902 return
903 for a_index, a_vld in enumerate(target["ns"]["vld"]):
904 target_vld = find_in_list(
905 get_iterable(vdur, "interfaces"),
906 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
907 )
908
909 vld_params = find_in_list(
910 get_iterable(ns_params, "vld"),
911 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
912 )
913 if target_vld:
914 if vnf_params.get("vimAccountId") not in a_vld.get(
915 "vim_info", {}
916 ):
917 target_vim_network_list = [
918 v for _, v in a_vld.get("vim_info").items()
919 ]
920 target_vim_network_name = next(
921 (
922 item.get("vim_network_name", "")
923 for item in target_vim_network_list
924 ),
925 "",
926 )
927
928 target["ns"]["vld"][a_index].get("vim_info").update(
929 {
930 "vim:{}".format(vnf_params["vimAccountId"]): {
931 "vim_network_name": target_vim_network_name,
932 }
933 }
934 )
935
936 if vld_params:
937 for param in ("vim-network-name", "vim-network-id"):
938 if vld_params.get(param) and isinstance(
939 vld_params[param], dict
940 ):
941 for vim, vim_net in vld_params[
942 param
943 ].items():
944 other_target_vim = "vim:" + vim
945 populate_dict(
946 target["ns"]["vld"][a_index].get(
947 "vim_info"
948 ),
949 (
950 other_target_vim,
951 param.replace("-", "_"),
952 ),
953 vim_net,
954 )
955
956 nslcmop_id = db_nslcmop["_id"]
957 target = {
958 "name": db_nsr["name"],
959 "ns": {"vld": []},
960 "vnf": [],
961 "image": deepcopy(db_nsr["image"]),
962 "flavor": deepcopy(db_nsr["flavor"]),
963 "action_id": nslcmop_id,
964 "cloud_init_content": {},
965 }
966 for image in target["image"]:
967 image["vim_info"] = {}
968 for flavor in target["flavor"]:
969 flavor["vim_info"] = {}
970 if db_nsr.get("affinity-or-anti-affinity-group"):
971 target["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr["affinity-or-anti-affinity-group"]
973 )
974 for affinity_or_anti_affinity_group in target[
975 "affinity-or-anti-affinity-group"
976 ]:
977 affinity_or_anti_affinity_group["vim_info"] = {}
978
979 if db_nslcmop.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate = self.db.get_list(
982 "nslcmops",
983 {
984 "nsInstanceId": db_nslcmop["nsInstanceId"],
985 "lcmOperationType": "instantiate",
986 },
987 )[-1]
988 ns_params = db_nslcmop_instantiate.get("operationParams")
989 else:
990 ns_params = db_nslcmop.get("operationParams")
991 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
992 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
993
994 cp2target = {}
995 for vld_index, vld in enumerate(db_nsr.get("vld")):
996 target_vim = "vim:{}".format(ns_params["vimAccountId"])
997 target_vld = {
998 "id": vld["id"],
999 "name": vld["name"],
1000 "mgmt-network": vld.get("mgmt-network", False),
1001 "type": vld.get("type"),
1002 "vim_info": {
1003 target_vim: {
1004 "vim_network_name": vld.get("vim-network-name"),
1005 "vim_account_id": ns_params["vimAccountId"],
1006 }
1007 },
1008 }
1009 # check if this network needs SDN assist
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(ns_params["vimAccountId"])
1012 if vim_config := db_vim.get("config"):
1013 if sdnc_id := vim_config.get("sdn-controller"):
1014 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 target_vld["vim_info"][target_sdn] = {
1017 "sdn": True,
1018 "target_vim": target_vim,
1019 "vlds": [sdn_vld],
1020 "type": vld.get("type"),
1021 }
1022
1023 nsd_vnf_profiles = get_vnf_profiles(nsd)
1024 for nsd_vnf_profile in nsd_vnf_profiles:
1025 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1026 if cp["virtual-link-profile-id"] == vld["id"]:
1027 cp2target[
1028 "member_vnf:{}.{}".format(
1029 cp["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1031 ],
1032 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1033 )
1034 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1035
1036 # check at nsd descriptor, if there is an ip-profile
1037 vld_params = {}
1038 nsd_vlp = find_in_list(
1039 get_virtual_link_profiles(nsd),
1040 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1041 == vld["id"],
1042 )
1043 if (
1044 nsd_vlp
1045 and nsd_vlp.get("virtual-link-protocol-data")
1046 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1047 ):
1048 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1049 "l3-protocol-data"
1050 ]
1051
1052 # update vld_params with instantiation params
1053 vld_instantiation_params = find_in_list(
1054 get_iterable(ns_params, "vld"),
1055 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1056 )
1057 if vld_instantiation_params:
1058 vld_params.update(vld_instantiation_params)
1059 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1060 target["ns"]["vld"].append(target_vld)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target, ns_params)
1063
1064 for vnfr in db_vnfrs.values():
1065 vnfd = find_in_list(
1066 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1067 )
1068 vnf_params = find_in_list(
1069 get_iterable(ns_params, "vnf"),
1070 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1071 )
1072 target_vnf = deepcopy(vnfr)
1073 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1074 for vld in target_vnf.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp = find_in_list(
1077 vnfd.get("int-virtual-link-desc", ()),
1078 lambda cpd: cpd.get("id") == vld["id"],
1079 )
1080 if vnf_cp:
1081 ns_cp = "member_vnf:{}.{}".format(
1082 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1083 )
1084 if cp2target.get(ns_cp):
1085 vld["target"] = cp2target[ns_cp]
1086
1087 vld["vim_info"] = {
1088 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1089 }
1090 # check if this network needs SDN assist
1091 target_sdn = None
1092 if vld.get("pci-interfaces"):
1093 db_vim = get_vim_account(vnfr["vim-account-id"])
1094 sdnc_id = db_vim["config"].get("sdn-controller")
1095 if sdnc_id:
1096 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1097 target_sdn = "sdn:{}".format(sdnc_id)
1098 vld["vim_info"][target_sdn] = {
1099 "sdn": True,
1100 "target_vim": target_vim,
1101 "vlds": [sdn_vld],
1102 "type": vld.get("type"),
1103 }
1104
1105 # check at vnfd descriptor, if there is an ip-profile
1106 vld_params = {}
1107 vnfd_vlp = find_in_list(
1108 get_virtual_link_profiles(vnfd),
1109 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1110 )
1111 if (
1112 vnfd_vlp
1113 and vnfd_vlp.get("virtual-link-protocol-data")
1114 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1115 ):
1116 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1117 "l3-protocol-data"
1118 ]
1119 # update vld_params with instantiation params
1120 if vnf_params:
1121 vld_instantiation_params = find_in_list(
1122 get_iterable(vnf_params, "internal-vld"),
1123 lambda i_vld: i_vld["name"] == vld["id"],
1124 )
1125 if vld_instantiation_params:
1126 vld_params.update(vld_instantiation_params)
1127 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1128
1129 vdur_list = []
1130 for vdur in target_vnf.get("vdur", ()):
1131 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1134
1135 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1136
1137 if ssh_keys_all:
1138 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1139 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1140 if (
1141 vdu_configuration
1142 and vdu_configuration.get("config-access")
1143 and vdu_configuration.get("config-access").get("ssh-access")
1144 ):
1145 vdur["ssh-keys"] = ssh_keys_all
1146 vdur["ssh-access-required"] = vdu_configuration[
1147 "config-access"
1148 ]["ssh-access"]["required"]
1149 elif (
1150 vnf_configuration
1151 and vnf_configuration.get("config-access")
1152 and vnf_configuration.get("config-access").get("ssh-access")
1153 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_all
1156 vdur["ssh-access-required"] = vnf_configuration[
1157 "config-access"
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation and find_in_list(
1160 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1161 ):
1162 vdur["ssh-keys"] = ssh_keys_instantiation
1163
1164 self.logger.debug("NS > vdur > {}".format(vdur))
1165
1166 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1167 # cloud-init
1168 if vdud.get("cloud-init-file"):
1169 vdur["cloud-init"] = "{}:file:{}".format(
1170 vnfd["_id"], vdud.get("cloud-init-file")
1171 )
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur["cloud-init"] not in target["cloud_init_content"]:
1174 base_folder = vnfd["_admin"]["storage"]
1175 if base_folder["pkg-dir"]:
1176 cloud_init_file = "{}/{}/cloud_init/{}".format(
1177 base_folder["folder"],
1178 base_folder["pkg-dir"],
1179 vdud.get("cloud-init-file"),
1180 )
1181 else:
1182 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1183 base_folder["folder"],
1184 vdud.get("cloud-init-file"),
1185 )
1186 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1187 target["cloud_init_content"][
1188 vdur["cloud-init"]
1189 ] = ci_file.read()
1190 elif vdud.get("cloud-init"):
1191 vdur["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1193 )
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1196 "cloud-init"
1197 ]
1198 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1199 deploy_params_vdu = self._format_additional_params(
1200 vdur.get("additionalParams") or {}
1201 )
1202 deploy_params_vdu["OSM"] = get_osm_params(
1203 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1204 )
1205 vdur["additionalParams"] = deploy_params_vdu
1206
1207 # flavor
1208 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1209 if target_vim not in ns_flavor["vim_info"]:
1210 ns_flavor["vim_info"][target_vim] = {}
1211
1212 # deal with images
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id = int(vdur["ns-image-id"])
1216 if vdur.get("alt-image-ids"):
1217 db_vim = get_vim_account(vnfr["vim-account-id"])
1218 vim_type = db_vim["vim_type"]
1219 for alt_image_id in vdur.get("alt-image-ids"):
1220 ns_alt_image = target["image"][int(alt_image_id)]
1221 if vim_type == ns_alt_image.get("vim-type"):
1222 # must use alternative image
1223 self.logger.debug(
1224 "use alternative image id: {}".format(alt_image_id)
1225 )
1226 ns_image_id = alt_image_id
1227 vdur["ns-image-id"] = ns_image_id
1228 break
1229 ns_image = target["image"][int(ns_image_id)]
1230 if target_vim not in ns_image["vim_info"]:
1231 ns_image["vim_info"][target_vim] = {}
1232
1233 # Affinity groups
1234 if vdur.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1236 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1237 if target_vim not in ns_ags["vim_info"]:
1238 ns_ags["vim_info"][target_vim] = {}
1239
1240 vdur["vim_info"] = {target_vim: {}}
1241 # instantiation parameters
1242 if vnf_params:
1243 vdu_instantiation_params = find_in_list(
1244 get_iterable(vnf_params, "vdu"),
1245 lambda i_vdu: i_vdu["id"] == vdud["id"],
1246 )
1247 if vdu_instantiation_params:
1248 # Parse the vdu_volumes from the instantiation params
1249 vdu_volumes = get_volumes_from_instantiation_params(
1250 vdu_instantiation_params, vdud
1251 )
1252 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1253 vdur["additionalParams"]["OSM"][
1254 "vim_flavor_id"
1255 ] = vdu_instantiation_params.get("vim-flavor-id")
1256 vdur_list.append(vdur)
1257 target_vnf["vdur"] = vdur_list
1258 target["vnf"].append(target_vnf)
1259
1260 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1261 desc = await self.RO.deploy(nsr_id, target)
1262 self.logger.debug("RO return > {}".format(desc))
1263 action_id = desc["action_id"]
1264 await self._wait_ng_ro(
1265 nsr_id,
1266 action_id,
1267 nslcmop_id,
1268 start_deploy,
1269 timeout_ns_deploy,
1270 stage,
1271 operation="instantiation",
1272 )
1273
1274 # Updating NSR
1275 db_nsr_update = {
1276 "_admin.deployed.RO.operational-status": "running",
1277 "detailed-status": " ".join(stage),
1278 }
1279 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1280 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1281 self._write_op_status(nslcmop_id, stage)
1282 self.logger.debug(
1283 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1284 )
1285 return
1286
1287 async def _wait_ng_ro(
1288 self,
1289 nsr_id,
1290 action_id,
1291 nslcmop_id=None,
1292 start_time=None,
1293 timeout=600,
1294 stage=None,
1295 operation=None,
1296 ):
1297 detailed_status_old = None
1298 db_nsr_update = {}
1299 start_time = start_time or time()
1300 while time() <= start_time + timeout:
1301 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1302 self.logger.debug("Wait NG RO > {}".format(desc_status))
1303 if desc_status["status"] == "FAILED":
1304 raise NgRoException(desc_status["details"])
1305 elif desc_status["status"] == "BUILD":
1306 if stage:
1307 stage[2] = "VIM: ({})".format(desc_status["details"])
1308 elif desc_status["status"] == "DONE":
1309 if stage:
1310 stage[2] = "Deployed at VIM"
1311 break
1312 else:
1313 assert False, "ROclient.check_ns_status returns unknown {}".format(
1314 desc_status["status"]
1315 )
1316 if stage and nslcmop_id and stage[2] != detailed_status_old:
1317 detailed_status_old = stage[2]
1318 db_nsr_update["detailed-status"] = " ".join(stage)
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 await asyncio.sleep(15, loop=self.loop)
1322 else: # timeout_ns_deploy
1323 raise NgRoException("Timeout waiting ns to deploy")
1324
1325 async def _terminate_ng_ro(
1326 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1327 ):
1328 db_nsr_update = {}
1329 failed_detail = []
1330 action_id = None
1331 start_deploy = time()
1332 try:
1333 target = {
1334 "ns": {"vld": []},
1335 "vnf": [],
1336 "image": [],
1337 "flavor": [],
1338 "action_id": nslcmop_id,
1339 }
1340 desc = await self.RO.deploy(nsr_id, target)
1341 action_id = desc["action_id"]
1342 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1343 self.logger.debug(
1344 logging_text
1345 + "ns terminate action at RO. action_id={}".format(action_id)
1346 )
1347
1348 # wait until done
1349 delete_timeout = 20 * 60 # 20 minutes
1350 await self._wait_ng_ro(
1351 nsr_id,
1352 action_id,
1353 nslcmop_id,
1354 start_deploy,
1355 delete_timeout,
1356 stage,
1357 operation="termination",
1358 )
1359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1360 # delete all nsr
1361 await self.RO.delete(nsr_id)
1362 except NgRoException as e:
1363 if e.http_code == 404: # not found
1364 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1365 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1366 self.logger.debug(
1367 logging_text + "RO_action_id={} already deleted".format(action_id)
1368 )
1369 elif e.http_code == 409: # conflict
1370 failed_detail.append("delete conflict: {}".format(e))
1371 self.logger.debug(
1372 logging_text
1373 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1374 )
1375 else:
1376 failed_detail.append("delete error: {}".format(e))
1377 self.logger.error(
1378 logging_text
1379 + "RO_action_id={} delete error: {}".format(action_id, e)
1380 )
1381 except Exception as e:
1382 failed_detail.append("delete error: {}".format(e))
1383 self.logger.error(
1384 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1385 )
1386
1387 if failed_detail:
1388 stage[2] = "Error deleting from VIM"
1389 else:
1390 stage[2] = "Deleted from VIM"
1391 db_nsr_update["detailed-status"] = " ".join(stage)
1392 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1393 self._write_op_status(nslcmop_id, stage)
1394
1395 if failed_detail:
1396 raise LcmException("; ".join(failed_detail))
1397 return
1398
1399 async def instantiate_RO(
1400 self,
1401 logging_text,
1402 nsr_id,
1403 nsd,
1404 db_nsr,
1405 db_nslcmop,
1406 db_vnfrs,
1407 db_vnfds,
1408 n2vc_key_list,
1409 stage,
1410 ):
1411 """
1412 Instantiate at RO
1413 :param logging_text: preffix text to use at logging
1414 :param nsr_id: nsr identity
1415 :param nsd: database content of ns descriptor
1416 :param db_nsr: database content of ns record
1417 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1418 :param db_vnfrs:
1419 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1420 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1421 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1422 :return: None or exception
1423 """
1424 try:
1425 start_deploy = time()
1426 ns_params = db_nslcmop.get("operationParams")
1427 if ns_params and ns_params.get("timeout_ns_deploy"):
1428 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1429 else:
1430 timeout_ns_deploy = self.timeout.ns_deploy
1431
1432 # Check for and optionally request placement optimization. Database will be updated if placement activated
1433 stage[2] = "Waiting for Placement."
1434 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1435 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1436 for vnfr in db_vnfrs.values():
1437 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1438 break
1439 else:
1440 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1441
1442 return await self._instantiate_ng_ro(
1443 logging_text,
1444 nsr_id,
1445 nsd,
1446 db_nsr,
1447 db_nslcmop,
1448 db_vnfrs,
1449 db_vnfds,
1450 n2vc_key_list,
1451 stage,
1452 start_deploy,
1453 timeout_ns_deploy,
1454 )
1455 except Exception as e:
1456 stage[2] = "ERROR deploying at VIM"
1457 self.set_vnfr_at_error(db_vnfrs, str(e))
1458 self.logger.error(
1459 "Error deploying at VIM {}".format(e),
1460 exc_info=not isinstance(
1461 e,
1462 (
1463 ROclient.ROClientException,
1464 LcmException,
1465 DbException,
1466 NgRoException,
1467 ),
1468 ),
1469 )
1470 raise
1471
1472 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1473 """
1474 Wait for kdu to be up, get ip address
1475 :param logging_text: prefix use for logging
1476 :param nsr_id:
1477 :param vnfr_id:
1478 :param kdu_name:
1479 :return: IP address, K8s services
1480 """
1481
1482 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1483 nb_tries = 0
1484
1485 while nb_tries < 360:
1486 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1487 kdur = next(
1488 (
1489 x
1490 for x in get_iterable(db_vnfr, "kdur")
1491 if x.get("kdu-name") == kdu_name
1492 ),
1493 None,
1494 )
1495 if not kdur:
1496 raise LcmException(
1497 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1498 )
1499 if kdur.get("status"):
1500 if kdur["status"] in ("READY", "ENABLED"):
1501 return kdur.get("ip-address"), kdur.get("services")
1502 else:
1503 raise LcmException(
1504 "target KDU={} is in error state".format(kdu_name)
1505 )
1506
1507 await asyncio.sleep(10, loop=self.loop)
1508 nb_tries += 1
1509 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1510
1511 async def wait_vm_up_insert_key_ro(
1512 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1513 ):
1514 """
1515 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1516 :param logging_text: prefix use for logging
1517 :param nsr_id:
1518 :param vnfr_id:
1519 :param vdu_id:
1520 :param vdu_index:
1521 :param pub_key: public ssh key to inject, None to skip
1522 :param user: user to apply the public ssh key
1523 :return: IP address
1524 """
1525
1526 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1527 ip_address = None
1528 target_vdu_id = None
1529 ro_retries = 0
1530
1531 while True:
1532 ro_retries += 1
1533 if ro_retries >= 360: # 1 hour
1534 raise LcmException(
1535 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1536 )
1537
1538 await asyncio.sleep(10, loop=self.loop)
1539
1540 # get ip address
1541 if not target_vdu_id:
1542 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1543
1544 if not vdu_id: # for the VNF case
1545 if db_vnfr.get("status") == "ERROR":
1546 raise LcmException(
1547 "Cannot inject ssh-key because target VNF is in error state"
1548 )
1549 ip_address = db_vnfr.get("ip-address")
1550 if not ip_address:
1551 continue
1552 vdur = next(
1553 (
1554 x
1555 for x in get_iterable(db_vnfr, "vdur")
1556 if x.get("ip-address") == ip_address
1557 ),
1558 None,
1559 )
1560 else: # VDU case
1561 vdur = next(
1562 (
1563 x
1564 for x in get_iterable(db_vnfr, "vdur")
1565 if x.get("vdu-id-ref") == vdu_id
1566 and x.get("count-index") == vdu_index
1567 ),
1568 None,
1569 )
1570
1571 if (
1572 not vdur and len(db_vnfr.get("vdur", ())) == 1
1573 ): # If only one, this should be the target vdu
1574 vdur = db_vnfr["vdur"][0]
1575 if not vdur:
1576 raise LcmException(
1577 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1578 vnfr_id, vdu_id, vdu_index
1579 )
1580 )
1581 # New generation RO stores information at "vim_info"
1582 ng_ro_status = None
1583 target_vim = None
1584 if vdur.get("vim_info"):
1585 target_vim = next(
1586 t for t in vdur["vim_info"]
1587 ) # there should be only one key
1588 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1589 if (
1590 vdur.get("pdu-type")
1591 or vdur.get("status") == "ACTIVE"
1592 or ng_ro_status == "ACTIVE"
1593 ):
1594 ip_address = vdur.get("ip-address")
1595 if not ip_address:
1596 continue
1597 target_vdu_id = vdur["vdu-id-ref"]
1598 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1599 raise LcmException(
1600 "Cannot inject ssh-key because target VM is in error state"
1601 )
1602
1603 if not target_vdu_id:
1604 continue
1605
1606 # inject public key into machine
1607 if pub_key and user:
1608 self.logger.debug(logging_text + "Inserting RO key")
1609 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1610 if vdur.get("pdu-type"):
1611 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1612 return ip_address
1613 try:
1614 target = {
1615 "action": {
1616 "action": "inject_ssh_key",
1617 "key": pub_key,
1618 "user": user,
1619 },
1620 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1621 }
1622 desc = await self.RO.deploy(nsr_id, target)
1623 action_id = desc["action_id"]
1624 await self._wait_ng_ro(
1625 nsr_id, action_id, timeout=600, operation="instantiation"
1626 )
1627 break
1628 except NgRoException as e:
1629 raise LcmException(
1630 "Reaching max tries injecting key. Error: {}".format(e)
1631 )
1632 else:
1633 break
1634
1635 return ip_address
1636
1637 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1638 """
1639 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1640 """
1641 my_vca = vca_deployed_list[vca_index]
1642 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1643 # vdu or kdu: no dependencies
1644 return
1645 timeout = 300
1646 while timeout >= 0:
1647 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1648 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1649 configuration_status_list = db_nsr["configurationStatus"]
1650 for index, vca_deployed in enumerate(configuration_status_list):
1651 if index == vca_index:
1652 # myself
1653 continue
1654 if not my_vca.get("member-vnf-index") or (
1655 vca_deployed.get("member-vnf-index")
1656 == my_vca.get("member-vnf-index")
1657 ):
1658 internal_status = configuration_status_list[index].get("status")
1659 if internal_status == "READY":
1660 continue
1661 elif internal_status == "BROKEN":
1662 raise LcmException(
1663 "Configuration aborted because dependent charm/s has failed"
1664 )
1665 else:
1666 break
1667 else:
1668 # no dependencies, return
1669 return
1670 await asyncio.sleep(10)
1671 timeout -= 1
1672
1673 raise LcmException("Configuration aborted because dependent charm/s timeout")
1674
1675 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1676 vca_id = None
1677 if db_vnfr:
1678 vca_id = deep_get(db_vnfr, ("vca-id",))
1679 elif db_nsr:
1680 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1681 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1682 return vca_id
1683
1684 async def instantiate_N2VC(
1685 self,
1686 logging_text,
1687 vca_index,
1688 nsi_id,
1689 db_nsr,
1690 db_vnfr,
1691 vdu_id,
1692 kdu_name,
1693 vdu_index,
1694 kdu_index,
1695 config_descriptor,
1696 deploy_params,
1697 base_folder,
1698 nslcmop_id,
1699 stage,
1700 vca_type,
1701 vca_name,
1702 ee_config_descriptor,
1703 ):
1704 nsr_id = db_nsr["_id"]
1705 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1706 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1707 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1708 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1709 db_dict = {
1710 "collection": "nsrs",
1711 "filter": {"_id": nsr_id},
1712 "path": db_update_entry,
1713 }
1714 step = ""
1715 try:
1716 element_type = "NS"
1717 element_under_configuration = nsr_id
1718
1719 vnfr_id = None
1720 if db_vnfr:
1721 vnfr_id = db_vnfr["_id"]
1722 osm_config["osm"]["vnf_id"] = vnfr_id
1723
1724 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1725
1726 if vca_type == "native_charm":
1727 index_number = 0
1728 else:
1729 index_number = vdu_index or 0
1730
1731 if vnfr_id:
1732 element_type = "VNF"
1733 element_under_configuration = vnfr_id
1734 namespace += ".{}-{}".format(vnfr_id, index_number)
1735 if vdu_id:
1736 namespace += ".{}-{}".format(vdu_id, index_number)
1737 element_type = "VDU"
1738 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1739 osm_config["osm"]["vdu_id"] = vdu_id
1740 elif kdu_name:
1741 namespace += ".{}".format(kdu_name)
1742 element_type = "KDU"
1743 element_under_configuration = kdu_name
1744 osm_config["osm"]["kdu_name"] = kdu_name
1745
1746 # Get artifact path
1747 if base_folder["pkg-dir"]:
1748 artifact_path = "{}/{}/{}/{}".format(
1749 base_folder["folder"],
1750 base_folder["pkg-dir"],
1751 "charms"
1752 if vca_type
1753 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1754 else "helm-charts",
1755 vca_name,
1756 )
1757 else:
1758 artifact_path = "{}/Scripts/{}/{}/".format(
1759 base_folder["folder"],
1760 "charms"
1761 if vca_type
1762 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1763 else "helm-charts",
1764 vca_name,
1765 )
1766
1767 self.logger.debug("Artifact path > {}".format(artifact_path))
1768
1769 # get initial_config_primitive_list that applies to this element
1770 initial_config_primitive_list = config_descriptor.get(
1771 "initial-config-primitive"
1772 )
1773
1774 self.logger.debug(
1775 "Initial config primitive list > {}".format(
1776 initial_config_primitive_list
1777 )
1778 )
1779
1780 # add config if not present for NS charm
1781 ee_descriptor_id = ee_config_descriptor.get("id")
1782 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1783 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1784 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1785 )
1786
1787 self.logger.debug(
1788 "Initial config primitive list #2 > {}".format(
1789 initial_config_primitive_list
1790 )
1791 )
1792 # n2vc_redesign STEP 3.1
1793 # find old ee_id if exists
1794 ee_id = vca_deployed.get("ee_id")
1795
1796 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1797 # create or register execution environment in VCA
1798 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1799 self._write_configuration_status(
1800 nsr_id=nsr_id,
1801 vca_index=vca_index,
1802 status="CREATING",
1803 element_under_configuration=element_under_configuration,
1804 element_type=element_type,
1805 )
1806
1807 step = "create execution environment"
1808 self.logger.debug(logging_text + step)
1809
1810 ee_id = None
1811 credentials = None
1812 if vca_type == "k8s_proxy_charm":
1813 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1814 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1815 namespace=namespace,
1816 artifact_path=artifact_path,
1817 db_dict=db_dict,
1818 vca_id=vca_id,
1819 )
1820 elif vca_type == "helm" or vca_type == "helm-v3":
1821 ee_id, credentials = await self.vca_map[
1822 vca_type
1823 ].create_execution_environment(
1824 namespace=namespace,
1825 reuse_ee_id=ee_id,
1826 db_dict=db_dict,
1827 config=osm_config,
1828 artifact_path=artifact_path,
1829 chart_model=vca_name,
1830 vca_type=vca_type,
1831 )
1832 else:
1833 ee_id, credentials = await self.vca_map[
1834 vca_type
1835 ].create_execution_environment(
1836 namespace=namespace,
1837 reuse_ee_id=ee_id,
1838 db_dict=db_dict,
1839 vca_id=vca_id,
1840 )
1841
1842 elif vca_type == "native_charm":
1843 step = "Waiting to VM being up and getting IP address"
1844 self.logger.debug(logging_text + step)
1845 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1846 logging_text,
1847 nsr_id,
1848 vnfr_id,
1849 vdu_id,
1850 vdu_index,
1851 user=None,
1852 pub_key=None,
1853 )
1854 credentials = {"hostname": rw_mgmt_ip}
1855 # get username
1856 username = deep_get(
1857 config_descriptor, ("config-access", "ssh-access", "default-user")
1858 )
1859 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1860 # merged. Meanwhile let's get username from initial-config-primitive
1861 if not username and initial_config_primitive_list:
1862 for config_primitive in initial_config_primitive_list:
1863 for param in config_primitive.get("parameter", ()):
1864 if param["name"] == "ssh-username":
1865 username = param["value"]
1866 break
1867 if not username:
1868 raise LcmException(
1869 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1870 "'config-access.ssh-access.default-user'"
1871 )
1872 credentials["username"] = username
1873 # n2vc_redesign STEP 3.2
1874
1875 self._write_configuration_status(
1876 nsr_id=nsr_id,
1877 vca_index=vca_index,
1878 status="REGISTERING",
1879 element_under_configuration=element_under_configuration,
1880 element_type=element_type,
1881 )
1882
1883 step = "register execution environment {}".format(credentials)
1884 self.logger.debug(logging_text + step)
1885 ee_id = await self.vca_map[vca_type].register_execution_environment(
1886 credentials=credentials,
1887 namespace=namespace,
1888 db_dict=db_dict,
1889 vca_id=vca_id,
1890 )
1891
1892 # for compatibility with MON/POL modules, the need model and application name at database
1893 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1894 ee_id_parts = ee_id.split(".")
1895 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1896 if len(ee_id_parts) >= 2:
1897 model_name = ee_id_parts[0]
1898 application_name = ee_id_parts[1]
1899 db_nsr_update[db_update_entry + "model"] = model_name
1900 db_nsr_update[db_update_entry + "application"] = application_name
1901
1902 # n2vc_redesign STEP 3.3
1903 step = "Install configuration Software"
1904
1905 self._write_configuration_status(
1906 nsr_id=nsr_id,
1907 vca_index=vca_index,
1908 status="INSTALLING SW",
1909 element_under_configuration=element_under_configuration,
1910 element_type=element_type,
1911 other_update=db_nsr_update,
1912 )
1913
1914 # TODO check if already done
1915 self.logger.debug(logging_text + step)
1916 config = None
1917 if vca_type == "native_charm":
1918 config_primitive = next(
1919 (p for p in initial_config_primitive_list if p["name"] == "config"),
1920 None,
1921 )
1922 if config_primitive:
1923 config = self._map_primitive_params(
1924 config_primitive, {}, deploy_params
1925 )
1926 num_units = 1
1927 if vca_type == "lxc_proxy_charm":
1928 if element_type == "NS":
1929 num_units = db_nsr.get("config-units") or 1
1930 elif element_type == "VNF":
1931 num_units = db_vnfr.get("config-units") or 1
1932 elif element_type == "VDU":
1933 for v in db_vnfr["vdur"]:
1934 if vdu_id == v["vdu-id-ref"]:
1935 num_units = v.get("config-units") or 1
1936 break
1937 if vca_type != "k8s_proxy_charm":
1938 await self.vca_map[vca_type].install_configuration_sw(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 db_dict=db_dict,
1942 config=config,
1943 num_units=num_units,
1944 vca_id=vca_id,
1945 vca_type=vca_type,
1946 )
1947
1948 # write in db flag of configuration_sw already installed
1949 self.update_db_2(
1950 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1951 )
1952
1953 # add relations for this VCA (wait for other peers related with this VCA)
1954 is_relation_added = await self._add_vca_relations(
1955 logging_text=logging_text,
1956 nsr_id=nsr_id,
1957 vca_type=vca_type,
1958 vca_index=vca_index,
1959 )
1960
1961 if not is_relation_added:
1962 raise LcmException("Relations could not be added to VCA.")
1963
1964 # if SSH access is required, then get execution environment SSH public
1965 # if native charm we have waited already to VM be UP
1966 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1967 pub_key = None
1968 user = None
1969 # self.logger.debug("get ssh key block")
1970 if deep_get(
1971 config_descriptor, ("config-access", "ssh-access", "required")
1972 ):
1973 # self.logger.debug("ssh key needed")
1974 # Needed to inject a ssh key
1975 user = deep_get(
1976 config_descriptor,
1977 ("config-access", "ssh-access", "default-user"),
1978 )
1979 step = "Install configuration Software, getting public ssh key"
1980 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1981 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1982 )
1983
1984 step = "Insert public key into VM user={} ssh_key={}".format(
1985 user, pub_key
1986 )
1987 else:
1988 # self.logger.debug("no need to get ssh key")
1989 step = "Waiting to VM being up and getting IP address"
1990 self.logger.debug(logging_text + step)
1991
1992 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1993 rw_mgmt_ip = None
1994
1995 # n2vc_redesign STEP 5.1
1996 # wait for RO (ip-address) Insert pub_key into VM
1997 if vnfr_id:
1998 if kdu_name:
1999 rw_mgmt_ip, services = await self.wait_kdu_up(
2000 logging_text, nsr_id, vnfr_id, kdu_name
2001 )
2002 vnfd = self.db.get_one(
2003 "vnfds_revisions",
2004 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2005 )
2006 kdu = get_kdu(vnfd, kdu_name)
2007 kdu_services = [
2008 service["name"] for service in get_kdu_services(kdu)
2009 ]
2010 exposed_services = []
2011 for service in services:
2012 if any(s in service["name"] for s in kdu_services):
2013 exposed_services.append(service)
2014 await self.vca_map[vca_type].exec_primitive(
2015 ee_id=ee_id,
2016 primitive_name="config",
2017 params_dict={
2018 "osm-config": json.dumps(
2019 OsmConfigBuilder(
2020 k8s={"services": exposed_services}
2021 ).build()
2022 )
2023 },
2024 vca_id=vca_id,
2025 )
2026
2027 # This verification is needed in order to avoid trying to add a public key
2028 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2029 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2030 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2031 # or it is a KNF)
2032 elif db_vnfr.get("vdur"):
2033 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2034 logging_text,
2035 nsr_id,
2036 vnfr_id,
2037 vdu_id,
2038 vdu_index,
2039 user=user,
2040 pub_key=pub_key,
2041 )
2042
2043 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2044
2045 # store rw_mgmt_ip in deploy params for later replacement
2046 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2047
2048 # n2vc_redesign STEP 6 Execute initial config primitive
2049 step = "execute initial config primitive"
2050
2051 # wait for dependent primitives execution (NS -> VNF -> VDU)
2052 if initial_config_primitive_list:
2053 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2054
2055 # stage, in function of element type: vdu, kdu, vnf or ns
2056 my_vca = vca_deployed_list[vca_index]
2057 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2058 # VDU or KDU
2059 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2060 elif my_vca.get("member-vnf-index"):
2061 # VNF
2062 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2063 else:
2064 # NS
2065 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2066
2067 self._write_configuration_status(
2068 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2069 )
2070
2071 self._write_op_status(op_id=nslcmop_id, stage=stage)
2072
2073 check_if_terminated_needed = True
2074 for initial_config_primitive in initial_config_primitive_list:
2075 # adding information on the vca_deployed if it is a NS execution environment
2076 if not vca_deployed["member-vnf-index"]:
2077 deploy_params["ns_config_info"] = json.dumps(
2078 self._get_ns_config_info(nsr_id)
2079 )
2080 # TODO check if already done
2081 primitive_params_ = self._map_primitive_params(
2082 initial_config_primitive, {}, deploy_params
2083 )
2084
2085 step = "execute primitive '{}' params '{}'".format(
2086 initial_config_primitive["name"], primitive_params_
2087 )
2088 self.logger.debug(logging_text + step)
2089 await self.vca_map[vca_type].exec_primitive(
2090 ee_id=ee_id,
2091 primitive_name=initial_config_primitive["name"],
2092 params_dict=primitive_params_,
2093 db_dict=db_dict,
2094 vca_id=vca_id,
2095 vca_type=vca_type,
2096 )
2097 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2098 if check_if_terminated_needed:
2099 if config_descriptor.get("terminate-config-primitive"):
2100 self.update_db_2(
2101 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2102 )
2103 check_if_terminated_needed = False
2104
2105 # TODO register in database that primitive is done
2106
2107 # STEP 7 Configure metrics
2108 if vca_type == "helm" or vca_type == "helm-v3":
2109 # TODO: review for those cases where the helm chart is a reference and
2110 # is not part of the NF package
2111 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2112 ee_id=ee_id,
2113 artifact_path=artifact_path,
2114 ee_config_descriptor=ee_config_descriptor,
2115 vnfr_id=vnfr_id,
2116 nsr_id=nsr_id,
2117 target_ip=rw_mgmt_ip,
2118 element_type=element_type,
2119 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2120 vdu_id=vdu_id,
2121 vdu_index=vdu_index,
2122 kdu_name=kdu_name,
2123 kdu_index=kdu_index,
2124 )
2125 if prometheus_jobs:
2126 self.update_db_2(
2127 "nsrs",
2128 nsr_id,
2129 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2130 )
2131
2132 for job in prometheus_jobs:
2133 self.db.set_one(
2134 "prometheus_jobs",
2135 {"job_name": job["job_name"]},
2136 job,
2137 upsert=True,
2138 fail_on_empty=False,
2139 )
2140
2141 step = "instantiated at VCA"
2142 self.logger.debug(logging_text + step)
2143
2144 self._write_configuration_status(
2145 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2146 )
2147
2148 except Exception as e: # TODO not use Exception but N2VC exception
2149 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2150 if not isinstance(
2151 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2152 ):
2153 self.logger.error(
2154 "Exception while {} : {}".format(step, e), exc_info=True
2155 )
2156 self._write_configuration_status(
2157 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2158 )
2159 raise LcmException("{}. {}".format(step, e)) from e
2160
2161 def _write_ns_status(
2162 self,
2163 nsr_id: str,
2164 ns_state: str,
2165 current_operation: str,
2166 current_operation_id: str,
2167 error_description: str = None,
2168 error_detail: str = None,
2169 other_update: dict = None,
2170 ):
2171 """
2172 Update db_nsr fields.
2173 :param nsr_id:
2174 :param ns_state:
2175 :param current_operation:
2176 :param current_operation_id:
2177 :param error_description:
2178 :param error_detail:
2179 :param other_update: Other required changes at database if provided, will be cleared
2180 :return:
2181 """
2182 try:
2183 db_dict = other_update or {}
2184 db_dict[
2185 "_admin.nslcmop"
2186 ] = current_operation_id # for backward compatibility
2187 db_dict["_admin.current-operation"] = current_operation_id
2188 db_dict["_admin.operation-type"] = (
2189 current_operation if current_operation != "IDLE" else None
2190 )
2191 db_dict["currentOperation"] = current_operation
2192 db_dict["currentOperationID"] = current_operation_id
2193 db_dict["errorDescription"] = error_description
2194 db_dict["errorDetail"] = error_detail
2195
2196 if ns_state:
2197 db_dict["nsState"] = ns_state
2198 self.update_db_2("nsrs", nsr_id, db_dict)
2199 except DbException as e:
2200 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2201
2202 def _write_op_status(
2203 self,
2204 op_id: str,
2205 stage: list = None,
2206 error_message: str = None,
2207 queuePosition: int = 0,
2208 operation_state: str = None,
2209 other_update: dict = None,
2210 ):
2211 try:
2212 db_dict = other_update or {}
2213 db_dict["queuePosition"] = queuePosition
2214 if isinstance(stage, list):
2215 db_dict["stage"] = stage[0]
2216 db_dict["detailed-status"] = " ".join(stage)
2217 elif stage is not None:
2218 db_dict["stage"] = str(stage)
2219
2220 if error_message is not None:
2221 db_dict["errorMessage"] = error_message
2222 if operation_state is not None:
2223 db_dict["operationState"] = operation_state
2224 db_dict["statusEnteredTime"] = time()
2225 self.update_db_2("nslcmops", op_id, db_dict)
2226 except DbException as e:
2227 self.logger.warn(
2228 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2229 )
2230
2231 def _write_all_config_status(self, db_nsr: dict, status: str):
2232 try:
2233 nsr_id = db_nsr["_id"]
2234 # configurationStatus
2235 config_status = db_nsr.get("configurationStatus")
2236 if config_status:
2237 db_nsr_update = {
2238 "configurationStatus.{}.status".format(index): status
2239 for index, v in enumerate(config_status)
2240 if v
2241 }
2242 # update status
2243 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2244
2245 except DbException as e:
2246 self.logger.warn(
2247 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2248 )
2249
2250 def _write_configuration_status(
2251 self,
2252 nsr_id: str,
2253 vca_index: int,
2254 status: str = None,
2255 element_under_configuration: str = None,
2256 element_type: str = None,
2257 other_update: dict = None,
2258 ):
2259 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2260 # .format(vca_index, status))
2261
2262 try:
2263 db_path = "configurationStatus.{}.".format(vca_index)
2264 db_dict = other_update or {}
2265 if status:
2266 db_dict[db_path + "status"] = status
2267 if element_under_configuration:
2268 db_dict[
2269 db_path + "elementUnderConfiguration"
2270 ] = element_under_configuration
2271 if element_type:
2272 db_dict[db_path + "elementType"] = element_type
2273 self.update_db_2("nsrs", nsr_id, db_dict)
2274 except DbException as e:
2275 self.logger.warn(
2276 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2277 status, nsr_id, vca_index, e
2278 )
2279 )
2280
2281 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2282 """
2283 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2284 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2285 Database is used because the result can be obtained from a different LCM worker in case of HA.
2286 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2287 :param db_nslcmop: database content of nslcmop
2288 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2289 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2290 computed 'vim-account-id'
2291 """
2292 modified = False
2293 nslcmop_id = db_nslcmop["_id"]
2294 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2295 if placement_engine == "PLA":
2296 self.logger.debug(
2297 logging_text + "Invoke and wait for placement optimization"
2298 )
2299 await self.msg.aiowrite(
2300 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2301 )
2302 db_poll_interval = 5
2303 wait = db_poll_interval * 10
2304 pla_result = None
2305 while not pla_result and wait >= 0:
2306 await asyncio.sleep(db_poll_interval)
2307 wait -= db_poll_interval
2308 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2309 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2310
2311 if not pla_result:
2312 raise LcmException(
2313 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2314 )
2315
2316 for pla_vnf in pla_result["vnf"]:
2317 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2318 if not pla_vnf.get("vimAccountId") or not vnfr:
2319 continue
2320 modified = True
2321 self.db.set_one(
2322 "vnfrs",
2323 {"_id": vnfr["_id"]},
2324 {"vim-account-id": pla_vnf["vimAccountId"]},
2325 )
2326 # Modifies db_vnfrs
2327 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2328 return modified
2329
2330 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2331 alerts = []
2332 nsr_id = vnfr["nsr-id-ref"]
2333 df = vnfd.get("df", [{}])[0]
2334 # Checking for auto-healing configuration
2335 if "healing-aspect" in df:
2336 healing_aspects = df["healing-aspect"]
2337 for healing in healing_aspects:
2338 for healing_policy in healing.get("healing-policy", ()):
2339 vdu_id = healing_policy["vdu-id"]
2340 vdur = next(
2341 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2342 {},
2343 )
2344 if not vdur:
2345 continue
2346 metric_name = "vm_status"
2347 vdu_name = vdur.get("name")
2348 vnf_member_index = vnfr["member-vnf-index-ref"]
2349 uuid = str(uuid4())
2350 name = f"healing_{uuid}"
2351 action = healing_policy
2352 # action_on_recovery = healing.get("action-on-recovery")
2353 # cooldown_time = healing.get("cooldown-time")
2354 # day1 = healing.get("day1")
2355 alert = {
2356 "uuid": uuid,
2357 "name": name,
2358 "metric": metric_name,
2359 "tags": {
2360 "ns_id": nsr_id,
2361 "vnf_member_index": vnf_member_index,
2362 "vdu_name": vdu_name,
2363 },
2364 "alarm_status": "ok",
2365 "action_type": "healing",
2366 "action": action,
2367 }
2368 alerts.append(alert)
2369 return alerts
2370
2371 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2372 alerts = []
2373 nsr_id = vnfr["nsr-id-ref"]
2374 df = vnfd.get("df", [{}])[0]
2375 # Checking for auto-scaling configuration
2376 if "scaling-aspect" in df:
2377 rel_operation_types = {
2378 "GE": ">=",
2379 "LE": "<=",
2380 "GT": ">",
2381 "LT": "<",
2382 "EQ": "==",
2383 "NE": "!=",
2384 }
2385 scaling_aspects = df["scaling-aspect"]
2386 all_vnfd_monitoring_params = {}
2387 for ivld in vnfd.get("int-virtual-link-desc", ()):
2388 for mp in ivld.get("monitoring-parameters", ()):
2389 all_vnfd_monitoring_params[mp.get("id")] = mp
2390 for vdu in vnfd.get("vdu", ()):
2391 for mp in vdu.get("monitoring-parameter", ()):
2392 all_vnfd_monitoring_params[mp.get("id")] = mp
2393 for df in vnfd.get("df", ()):
2394 for mp in df.get("monitoring-parameter", ()):
2395 all_vnfd_monitoring_params[mp.get("id")] = mp
2396 for scaling_aspect in scaling_aspects:
2397 scaling_group_name = scaling_aspect.get("name", "")
2398 # Get monitored VDUs
2399 all_monitored_vdus = set()
2400 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2401 "deltas", ()
2402 ):
2403 for vdu_delta in delta.get("vdu-delta", ()):
2404 all_monitored_vdus.add(vdu_delta.get("id"))
2405 monitored_vdurs = list(
2406 filter(
2407 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2408 vnfr["vdur"],
2409 )
2410 )
2411 if not monitored_vdurs:
2412 self.logger.error(
2413 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2414 )
2415 continue
2416 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2417 if scaling_policy["scaling-type"] != "automatic":
2418 continue
2419 threshold_time = scaling_policy.get("threshold-time", "1")
2420 cooldown_time = scaling_policy.get("cooldown-time", "0")
2421 for scaling_criteria in scaling_policy["scaling-criteria"]:
2422 monitoring_param_ref = scaling_criteria.get(
2423 "vnf-monitoring-param-ref"
2424 )
2425 vnf_monitoring_param = all_vnfd_monitoring_params[
2426 monitoring_param_ref
2427 ]
2428 for vdur in monitored_vdurs:
2429 vdu_id = vdur["vdu-id-ref"]
2430 metric_name = vnf_monitoring_param.get("performance-metric")
2431 metric_name = f"osm_{metric_name}"
2432 vnf_member_index = vnfr["member-vnf-index-ref"]
2433 scalein_threshold = scaling_criteria.get(
2434 "scale-in-threshold"
2435 )
2436 scaleout_threshold = scaling_criteria.get(
2437 "scale-out-threshold"
2438 )
2439 # Looking for min/max-number-of-instances
2440 instances_min_number = 1
2441 instances_max_number = 1
2442 vdu_profile = df["vdu-profile"]
2443 if vdu_profile:
2444 profile = next(
2445 item for item in vdu_profile if item["id"] == vdu_id
2446 )
2447 instances_min_number = profile.get(
2448 "min-number-of-instances", 1
2449 )
2450 instances_max_number = profile.get(
2451 "max-number-of-instances", 1
2452 )
2453
2454 if scalein_threshold:
2455 uuid = str(uuid4())
2456 name = f"scalein_{uuid}"
2457 operation = scaling_criteria[
2458 "scale-in-relational-operation"
2459 ]
2460 rel_operator = rel_operation_types.get(operation, "<=")
2461 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2462 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2463 labels = {
2464 "ns_id": nsr_id,
2465 "vnf_member_index": vnf_member_index,
2466 "vdu_id": vdu_id,
2467 }
2468 prom_cfg = {
2469 "alert": name,
2470 "expr": expression,
2471 "for": str(threshold_time) + "m",
2472 "labels": labels,
2473 }
2474 action = scaling_policy
2475 action = {
2476 "scaling-group": scaling_group_name,
2477 "cooldown-time": cooldown_time,
2478 }
2479 alert = {
2480 "uuid": uuid,
2481 "name": name,
2482 "metric": metric_name,
2483 "tags": {
2484 "ns_id": nsr_id,
2485 "vnf_member_index": vnf_member_index,
2486 "vdu_id": vdu_id,
2487 },
2488 "alarm_status": "ok",
2489 "action_type": "scale_in",
2490 "action": action,
2491 "prometheus_config": prom_cfg,
2492 }
2493 alerts.append(alert)
2494
2495 if scaleout_threshold:
2496 uuid = str(uuid4())
2497 name = f"scaleout_{uuid}"
2498 operation = scaling_criteria[
2499 "scale-out-relational-operation"
2500 ]
2501 rel_operator = rel_operation_types.get(operation, "<=")
2502 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2503 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2504 labels = {
2505 "ns_id": nsr_id,
2506 "vnf_member_index": vnf_member_index,
2507 "vdu_id": vdu_id,
2508 }
2509 prom_cfg = {
2510 "alert": name,
2511 "expr": expression,
2512 "for": str(threshold_time) + "m",
2513 "labels": labels,
2514 }
2515 action = scaling_policy
2516 action = {
2517 "scaling-group": scaling_group_name,
2518 "cooldown-time": cooldown_time,
2519 }
2520 alert = {
2521 "uuid": uuid,
2522 "name": name,
2523 "metric": metric_name,
2524 "tags": {
2525 "ns_id": nsr_id,
2526 "vnf_member_index": vnf_member_index,
2527 "vdu_id": vdu_id,
2528 },
2529 "alarm_status": "ok",
2530 "action_type": "scale_out",
2531 "action": action,
2532 "prometheus_config": prom_cfg,
2533 }
2534 alerts.append(alert)
2535 return alerts
2536
2537 def update_nsrs_with_pla_result(self, params):
2538 try:
2539 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2540 self.update_db_2(
2541 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2542 )
2543 except Exception as e:
2544 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2545
2546 async def instantiate(self, nsr_id, nslcmop_id):
2547 """
2548
2549 :param nsr_id: ns instance to deploy
2550 :param nslcmop_id: operation to run
2551 :return:
2552 """
2553
2554 # Try to lock HA task here
2555 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2556 if not task_is_locked_by_me:
2557 self.logger.debug(
2558 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2559 )
2560 return
2561
2562 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2563 self.logger.debug(logging_text + "Enter")
2564
2565 # get all needed from database
2566
2567 # database nsrs record
2568 db_nsr = None
2569
2570 # database nslcmops record
2571 db_nslcmop = None
2572
2573 # update operation on nsrs
2574 db_nsr_update = {}
2575 # update operation on nslcmops
2576 db_nslcmop_update = {}
2577
2578 timeout_ns_deploy = self.timeout.ns_deploy
2579
2580 nslcmop_operation_state = None
2581 db_vnfrs = {} # vnf's info indexed by member-index
2582 # n2vc_info = {}
2583 tasks_dict_info = {} # from task to info text
2584 exc = None
2585 error_list = []
2586 stage = [
2587 "Stage 1/5: preparation of the environment.",
2588 "Waiting for previous operations to terminate.",
2589 "",
2590 ]
2591 # ^ stage, step, VIM progress
2592 try:
2593 # wait for any previous tasks in process
2594 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2595
2596 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2597 stage[1] = "Reading from database."
2598 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2599 db_nsr_update["detailed-status"] = "creating"
2600 db_nsr_update["operational-status"] = "init"
2601 self._write_ns_status(
2602 nsr_id=nsr_id,
2603 ns_state="BUILDING",
2604 current_operation="INSTANTIATING",
2605 current_operation_id=nslcmop_id,
2606 other_update=db_nsr_update,
2607 )
2608 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2609
2610 # read from db: operation
2611 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2612 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2613 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2614 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2615 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2616 )
2617 ns_params = db_nslcmop.get("operationParams")
2618 if ns_params and ns_params.get("timeout_ns_deploy"):
2619 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2620
2621 # read from db: ns
2622 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2623 self.logger.debug(logging_text + stage[1])
2624 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2625 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2626 self.logger.debug(logging_text + stage[1])
2627 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2628 self.fs.sync(db_nsr["nsd-id"])
2629 db_nsr["nsd"] = nsd
2630 # nsr_name = db_nsr["name"] # TODO short-name??
2631
2632 # read from db: vnf's of this ns
2633 stage[1] = "Getting vnfrs from db."
2634 self.logger.debug(logging_text + stage[1])
2635 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2636
2637 # read from db: vnfd's for every vnf
2638 db_vnfds = [] # every vnfd data
2639
2640 # for each vnf in ns, read vnfd
2641 for vnfr in db_vnfrs_list:
2642 if vnfr.get("kdur"):
2643 kdur_list = []
2644 for kdur in vnfr["kdur"]:
2645 if kdur.get("additionalParams"):
2646 kdur["additionalParams"] = json.loads(
2647 kdur["additionalParams"]
2648 )
2649 kdur_list.append(kdur)
2650 vnfr["kdur"] = kdur_list
2651
2652 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2653 vnfd_id = vnfr["vnfd-id"]
2654 vnfd_ref = vnfr["vnfd-ref"]
2655 self.fs.sync(vnfd_id)
2656
2657 # if we haven't this vnfd, read it from db
2658 if vnfd_id not in db_vnfds:
2659 # read from db
2660 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2661 vnfd_id, vnfd_ref
2662 )
2663 self.logger.debug(logging_text + stage[1])
2664 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2665
2666 # store vnfd
2667 db_vnfds.append(vnfd)
2668
2669 # Get or generates the _admin.deployed.VCA list
2670 vca_deployed_list = None
2671 if db_nsr["_admin"].get("deployed"):
2672 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2673 if vca_deployed_list is None:
2674 vca_deployed_list = []
2675 configuration_status_list = []
2676 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2677 db_nsr_update["configurationStatus"] = configuration_status_list
2678 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2679 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2680 elif isinstance(vca_deployed_list, dict):
2681 # maintain backward compatibility. Change a dict to list at database
2682 vca_deployed_list = list(vca_deployed_list.values())
2683 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2684 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2685
2686 if not isinstance(
2687 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2688 ):
2689 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2690 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2691
2692 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2693 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2694 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2695 self.db.set_list(
2696 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2697 )
2698
2699 # n2vc_redesign STEP 2 Deploy Network Scenario
2700 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2701 self._write_op_status(op_id=nslcmop_id, stage=stage)
2702
2703 stage[1] = "Deploying KDUs."
2704 # self.logger.debug(logging_text + "Before deploy_kdus")
2705 # Call to deploy_kdus in case exists the "vdu:kdu" param
2706 await self.deploy_kdus(
2707 logging_text=logging_text,
2708 nsr_id=nsr_id,
2709 nslcmop_id=nslcmop_id,
2710 db_vnfrs=db_vnfrs,
2711 db_vnfds=db_vnfds,
2712 task_instantiation_info=tasks_dict_info,
2713 )
2714
2715 stage[1] = "Getting VCA public key."
2716 # n2vc_redesign STEP 1 Get VCA public ssh-key
2717 # feature 1429. Add n2vc public key to needed VMs
2718 n2vc_key = self.n2vc.get_public_key()
2719 n2vc_key_list = [n2vc_key]
2720 if self.vca_config.public_key:
2721 n2vc_key_list.append(self.vca_config.public_key)
2722
2723 stage[1] = "Deploying NS at VIM."
2724 task_ro = asyncio.ensure_future(
2725 self.instantiate_RO(
2726 logging_text=logging_text,
2727 nsr_id=nsr_id,
2728 nsd=nsd,
2729 db_nsr=db_nsr,
2730 db_nslcmop=db_nslcmop,
2731 db_vnfrs=db_vnfrs,
2732 db_vnfds=db_vnfds,
2733 n2vc_key_list=n2vc_key_list,
2734 stage=stage,
2735 )
2736 )
2737 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2738 tasks_dict_info[task_ro] = "Deploying at VIM"
2739
2740 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2741 stage[1] = "Deploying Execution Environments."
2742 self.logger.debug(logging_text + stage[1])
2743
2744 # create namespace and certificate if any helm based EE is present in the NS
2745 if check_helm_ee_in_ns(db_vnfds):
2746 # TODO: create EE namespace
2747 # create TLS certificates
2748 await self.vca_map["helm-v3"].create_tls_certificate(
2749 secret_name="ee-tls-{}".format(nsr_id),
2750 dns_prefix="*",
2751 nsr_id=nsr_id,
2752 usage="server auth",
2753 )
2754
2755 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2756 for vnf_profile in get_vnf_profiles(nsd):
2757 vnfd_id = vnf_profile["vnfd-id"]
2758 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2759 member_vnf_index = str(vnf_profile["id"])
2760 db_vnfr = db_vnfrs[member_vnf_index]
2761 base_folder = vnfd["_admin"]["storage"]
2762 vdu_id = None
2763 vdu_index = 0
2764 vdu_name = None
2765 kdu_name = None
2766 kdu_index = None
2767
2768 # Get additional parameters
2769 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2770 if db_vnfr.get("additionalParamsForVnf"):
2771 deploy_params.update(
2772 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2773 )
2774
2775 descriptor_config = get_configuration(vnfd, vnfd["id"])
2776 if descriptor_config:
2777 self._deploy_n2vc(
2778 logging_text=logging_text
2779 + "member_vnf_index={} ".format(member_vnf_index),
2780 db_nsr=db_nsr,
2781 db_vnfr=db_vnfr,
2782 nslcmop_id=nslcmop_id,
2783 nsr_id=nsr_id,
2784 nsi_id=nsi_id,
2785 vnfd_id=vnfd_id,
2786 vdu_id=vdu_id,
2787 kdu_name=kdu_name,
2788 member_vnf_index=member_vnf_index,
2789 vdu_index=vdu_index,
2790 kdu_index=kdu_index,
2791 vdu_name=vdu_name,
2792 deploy_params=deploy_params,
2793 descriptor_config=descriptor_config,
2794 base_folder=base_folder,
2795 task_instantiation_info=tasks_dict_info,
2796 stage=stage,
2797 )
2798
2799 # Deploy charms for each VDU that supports one.
2800 for vdud in get_vdu_list(vnfd):
2801 vdu_id = vdud["id"]
2802 descriptor_config = get_configuration(vnfd, vdu_id)
2803 vdur = find_in_list(
2804 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2805 )
2806
2807 if vdur.get("additionalParams"):
2808 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2809 else:
2810 deploy_params_vdu = deploy_params
2811 deploy_params_vdu["OSM"] = get_osm_params(
2812 db_vnfr, vdu_id, vdu_count_index=0
2813 )
2814 vdud_count = get_number_of_instances(vnfd, vdu_id)
2815
2816 self.logger.debug("VDUD > {}".format(vdud))
2817 self.logger.debug(
2818 "Descriptor config > {}".format(descriptor_config)
2819 )
2820 if descriptor_config:
2821 vdu_name = None
2822 kdu_name = None
2823 kdu_index = None
2824 for vdu_index in range(vdud_count):
2825 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2826 self._deploy_n2vc(
2827 logging_text=logging_text
2828 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2829 member_vnf_index, vdu_id, vdu_index
2830 ),
2831 db_nsr=db_nsr,
2832 db_vnfr=db_vnfr,
2833 nslcmop_id=nslcmop_id,
2834 nsr_id=nsr_id,
2835 nsi_id=nsi_id,
2836 vnfd_id=vnfd_id,
2837 vdu_id=vdu_id,
2838 kdu_name=kdu_name,
2839 kdu_index=kdu_index,
2840 member_vnf_index=member_vnf_index,
2841 vdu_index=vdu_index,
2842 vdu_name=vdu_name,
2843 deploy_params=deploy_params_vdu,
2844 descriptor_config=descriptor_config,
2845 base_folder=base_folder,
2846 task_instantiation_info=tasks_dict_info,
2847 stage=stage,
2848 )
2849 for kdud in get_kdu_list(vnfd):
2850 kdu_name = kdud["name"]
2851 descriptor_config = get_configuration(vnfd, kdu_name)
2852 if descriptor_config:
2853 vdu_id = None
2854 vdu_index = 0
2855 vdu_name = None
2856 kdu_index, kdur = next(
2857 x
2858 for x in enumerate(db_vnfr["kdur"])
2859 if x[1]["kdu-name"] == kdu_name
2860 )
2861 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2862 if kdur.get("additionalParams"):
2863 deploy_params_kdu.update(
2864 parse_yaml_strings(kdur["additionalParams"].copy())
2865 )
2866
2867 self._deploy_n2vc(
2868 logging_text=logging_text,
2869 db_nsr=db_nsr,
2870 db_vnfr=db_vnfr,
2871 nslcmop_id=nslcmop_id,
2872 nsr_id=nsr_id,
2873 nsi_id=nsi_id,
2874 vnfd_id=vnfd_id,
2875 vdu_id=vdu_id,
2876 kdu_name=kdu_name,
2877 member_vnf_index=member_vnf_index,
2878 vdu_index=vdu_index,
2879 kdu_index=kdu_index,
2880 vdu_name=vdu_name,
2881 deploy_params=deploy_params_kdu,
2882 descriptor_config=descriptor_config,
2883 base_folder=base_folder,
2884 task_instantiation_info=tasks_dict_info,
2885 stage=stage,
2886 )
2887
2888 # Check if this NS has a charm configuration
2889 descriptor_config = nsd.get("ns-configuration")
2890 if descriptor_config and descriptor_config.get("juju"):
2891 vnfd_id = None
2892 db_vnfr = None
2893 member_vnf_index = None
2894 vdu_id = None
2895 kdu_name = None
2896 kdu_index = None
2897 vdu_index = 0
2898 vdu_name = None
2899
2900 # Get additional parameters
2901 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2902 if db_nsr.get("additionalParamsForNs"):
2903 deploy_params.update(
2904 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2905 )
2906 base_folder = nsd["_admin"]["storage"]
2907 self._deploy_n2vc(
2908 logging_text=logging_text,
2909 db_nsr=db_nsr,
2910 db_vnfr=db_vnfr,
2911 nslcmop_id=nslcmop_id,
2912 nsr_id=nsr_id,
2913 nsi_id=nsi_id,
2914 vnfd_id=vnfd_id,
2915 vdu_id=vdu_id,
2916 kdu_name=kdu_name,
2917 member_vnf_index=member_vnf_index,
2918 vdu_index=vdu_index,
2919 kdu_index=kdu_index,
2920 vdu_name=vdu_name,
2921 deploy_params=deploy_params,
2922 descriptor_config=descriptor_config,
2923 base_folder=base_folder,
2924 task_instantiation_info=tasks_dict_info,
2925 stage=stage,
2926 )
2927
2928 # rest of staff will be done at finally
2929
2930 except (
2931 ROclient.ROClientException,
2932 DbException,
2933 LcmException,
2934 N2VCException,
2935 ) as e:
2936 self.logger.error(
2937 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2938 )
2939 exc = e
2940 except asyncio.CancelledError:
2941 self.logger.error(
2942 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2943 )
2944 exc = "Operation was cancelled"
2945 except Exception as e:
2946 exc = traceback.format_exc()
2947 self.logger.critical(
2948 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2949 exc_info=True,
2950 )
2951 finally:
2952 if exc:
2953 error_list.append(str(exc))
2954 try:
2955 # wait for pending tasks
2956 if tasks_dict_info:
2957 stage[1] = "Waiting for instantiate pending tasks."
2958 self.logger.debug(logging_text + stage[1])
2959 error_list += await self._wait_for_tasks(
2960 logging_text,
2961 tasks_dict_info,
2962 timeout_ns_deploy,
2963 stage,
2964 nslcmop_id,
2965 nsr_id=nsr_id,
2966 )
2967 stage[1] = stage[2] = ""
2968 except asyncio.CancelledError:
2969 error_list.append("Cancelled")
2970 # TODO cancel all tasks
2971 except Exception as exc:
2972 error_list.append(str(exc))
2973
2974 # update operation-status
2975 db_nsr_update["operational-status"] = "running"
2976 # let's begin with VCA 'configured' status (later we can change it)
2977 db_nsr_update["config-status"] = "configured"
2978 for task, task_name in tasks_dict_info.items():
2979 if not task.done() or task.cancelled() or task.exception():
2980 if task_name.startswith(self.task_name_deploy_vca):
2981 # A N2VC task is pending
2982 db_nsr_update["config-status"] = "failed"
2983 else:
2984 # RO or KDU task is pending
2985 db_nsr_update["operational-status"] = "failed"
2986
2987 # update status at database
2988 if error_list:
2989 error_detail = ". ".join(error_list)
2990 self.logger.error(logging_text + error_detail)
2991 error_description_nslcmop = "{} Detail: {}".format(
2992 stage[0], error_detail
2993 )
2994 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2995 nslcmop_id, stage[0]
2996 )
2997
2998 db_nsr_update["detailed-status"] = (
2999 error_description_nsr + " Detail: " + error_detail
3000 )
3001 db_nslcmop_update["detailed-status"] = error_detail
3002 nslcmop_operation_state = "FAILED"
3003 ns_state = "BROKEN"
3004 else:
3005 error_detail = None
3006 error_description_nsr = error_description_nslcmop = None
3007 ns_state = "READY"
3008 db_nsr_update["detailed-status"] = "Done"
3009 db_nslcmop_update["detailed-status"] = "Done"
3010 nslcmop_operation_state = "COMPLETED"
3011 # Gather auto-healing and auto-scaling alerts for each vnfr
3012 healing_alerts = []
3013 scaling_alerts = []
3014 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3015 vnfd = next(
3016 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3017 )
3018 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3019 for alert in healing_alerts:
3020 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3021 self.db.create("alerts", alert)
3022
3023 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3024 for alert in scaling_alerts:
3025 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3026 self.db.create("alerts", alert)
3027
3028 if db_nsr:
3029 self._write_ns_status(
3030 nsr_id=nsr_id,
3031 ns_state=ns_state,
3032 current_operation="IDLE",
3033 current_operation_id=None,
3034 error_description=error_description_nsr,
3035 error_detail=error_detail,
3036 other_update=db_nsr_update,
3037 )
3038 self._write_op_status(
3039 op_id=nslcmop_id,
3040 stage="",
3041 error_message=error_description_nslcmop,
3042 operation_state=nslcmop_operation_state,
3043 other_update=db_nslcmop_update,
3044 )
3045
3046 if nslcmop_operation_state:
3047 try:
3048 await self.msg.aiowrite(
3049 "ns",
3050 "instantiated",
3051 {
3052 "nsr_id": nsr_id,
3053 "nslcmop_id": nslcmop_id,
3054 "operationState": nslcmop_operation_state,
3055 },
3056 loop=self.loop,
3057 )
3058 except Exception as e:
3059 self.logger.error(
3060 logging_text + "kafka_write notification Exception {}".format(e)
3061 )
3062
3063 self.logger.debug(logging_text + "Exit")
3064 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3065
3066 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3067 if vnfd_id not in cached_vnfds:
3068 cached_vnfds[vnfd_id] = self.db.get_one(
3069 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3070 )
3071 return cached_vnfds[vnfd_id]
3072
3073 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3074 if vnf_profile_id not in cached_vnfrs:
3075 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3076 "vnfrs",
3077 {
3078 "member-vnf-index-ref": vnf_profile_id,
3079 "nsr-id-ref": nsr_id,
3080 },
3081 )
3082 return cached_vnfrs[vnf_profile_id]
3083
3084 def _is_deployed_vca_in_relation(
3085 self, vca: DeployedVCA, relation: Relation
3086 ) -> bool:
3087 found = False
3088 for endpoint in (relation.provider, relation.requirer):
3089 if endpoint["kdu-resource-profile-id"]:
3090 continue
3091 found = (
3092 vca.vnf_profile_id == endpoint.vnf_profile_id
3093 and vca.vdu_profile_id == endpoint.vdu_profile_id
3094 and vca.execution_environment_ref == endpoint.execution_environment_ref
3095 )
3096 if found:
3097 break
3098 return found
3099
3100 def _update_ee_relation_data_with_implicit_data(
3101 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3102 ):
3103 ee_relation_data = safe_get_ee_relation(
3104 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3105 )
3106 ee_relation_level = EELevel.get_level(ee_relation_data)
3107 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3108 "execution-environment-ref"
3109 ]:
3110 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3111 vnfd_id = vnf_profile["vnfd-id"]
3112 project = nsd["_admin"]["projects_read"][0]
3113 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3114 entity_id = (
3115 vnfd_id
3116 if ee_relation_level == EELevel.VNF
3117 else ee_relation_data["vdu-profile-id"]
3118 )
3119 ee = get_juju_ee_ref(db_vnfd, entity_id)
3120 if not ee:
3121 raise Exception(
3122 f"not execution environments found for ee_relation {ee_relation_data}"
3123 )
3124 ee_relation_data["execution-environment-ref"] = ee["id"]
3125 return ee_relation_data
3126
3127 def _get_ns_relations(
3128 self,
3129 nsr_id: str,
3130 nsd: Dict[str, Any],
3131 vca: DeployedVCA,
3132 cached_vnfds: Dict[str, Any],
3133 ) -> List[Relation]:
3134 relations = []
3135 db_ns_relations = get_ns_configuration_relation_list(nsd)
3136 for r in db_ns_relations:
3137 provider_dict = None
3138 requirer_dict = None
3139 if all(key in r for key in ("provider", "requirer")):
3140 provider_dict = r["provider"]
3141 requirer_dict = r["requirer"]
3142 elif "entities" in r:
3143 provider_id = r["entities"][0]["id"]
3144 provider_dict = {
3145 "nsr-id": nsr_id,
3146 "endpoint": r["entities"][0]["endpoint"],
3147 }
3148 if provider_id != nsd["id"]:
3149 provider_dict["vnf-profile-id"] = provider_id
3150 requirer_id = r["entities"][1]["id"]
3151 requirer_dict = {
3152 "nsr-id": nsr_id,
3153 "endpoint": r["entities"][1]["endpoint"],
3154 }
3155 if requirer_id != nsd["id"]:
3156 requirer_dict["vnf-profile-id"] = requirer_id
3157 else:
3158 raise Exception(
3159 "provider/requirer or entities must be included in the relation."
3160 )
3161 relation_provider = self._update_ee_relation_data_with_implicit_data(
3162 nsr_id, nsd, provider_dict, cached_vnfds
3163 )
3164 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3165 nsr_id, nsd, requirer_dict, cached_vnfds
3166 )
3167 provider = EERelation(relation_provider)
3168 requirer = EERelation(relation_requirer)
3169 relation = Relation(r["name"], provider, requirer)
3170 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3171 if vca_in_relation:
3172 relations.append(relation)
3173 return relations
3174
3175 def _get_vnf_relations(
3176 self,
3177 nsr_id: str,
3178 nsd: Dict[str, Any],
3179 vca: DeployedVCA,
3180 cached_vnfds: Dict[str, Any],
3181 ) -> List[Relation]:
3182 relations = []
3183 if vca.target_element == "ns":
3184 self.logger.debug("VCA is a NS charm, not a VNF.")
3185 return relations
3186 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3187 vnf_profile_id = vnf_profile["id"]
3188 vnfd_id = vnf_profile["vnfd-id"]
3189 project = nsd["_admin"]["projects_read"][0]
3190 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3191 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3192 for r in db_vnf_relations:
3193 provider_dict = None
3194 requirer_dict = None
3195 if all(key in r for key in ("provider", "requirer")):
3196 provider_dict = r["provider"]
3197 requirer_dict = r["requirer"]
3198 elif "entities" in r:
3199 provider_id = r["entities"][0]["id"]
3200 provider_dict = {
3201 "nsr-id": nsr_id,
3202 "vnf-profile-id": vnf_profile_id,
3203 "endpoint": r["entities"][0]["endpoint"],
3204 }
3205 if provider_id != vnfd_id:
3206 provider_dict["vdu-profile-id"] = provider_id
3207 requirer_id = r["entities"][1]["id"]
3208 requirer_dict = {
3209 "nsr-id": nsr_id,
3210 "vnf-profile-id": vnf_profile_id,
3211 "endpoint": r["entities"][1]["endpoint"],
3212 }
3213 if requirer_id != vnfd_id:
3214 requirer_dict["vdu-profile-id"] = requirer_id
3215 else:
3216 raise Exception(
3217 "provider/requirer or entities must be included in the relation."
3218 )
3219 relation_provider = self._update_ee_relation_data_with_implicit_data(
3220 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3221 )
3222 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3223 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3224 )
3225 provider = EERelation(relation_provider)
3226 requirer = EERelation(relation_requirer)
3227 relation = Relation(r["name"], provider, requirer)
3228 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3229 if vca_in_relation:
3230 relations.append(relation)
3231 return relations
3232
3233 def _get_kdu_resource_data(
3234 self,
3235 ee_relation: EERelation,
3236 db_nsr: Dict[str, Any],
3237 cached_vnfds: Dict[str, Any],
3238 ) -> DeployedK8sResource:
3239 nsd = get_nsd(db_nsr)
3240 vnf_profiles = get_vnf_profiles(nsd)
3241 vnfd_id = find_in_list(
3242 vnf_profiles,
3243 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3244 )["vnfd-id"]
3245 project = nsd["_admin"]["projects_read"][0]
3246 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3247 kdu_resource_profile = get_kdu_resource_profile(
3248 db_vnfd, ee_relation.kdu_resource_profile_id
3249 )
3250 kdu_name = kdu_resource_profile["kdu-name"]
3251 deployed_kdu, _ = get_deployed_kdu(
3252 db_nsr.get("_admin", ()).get("deployed", ()),
3253 kdu_name,
3254 ee_relation.vnf_profile_id,
3255 )
3256 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3257 return deployed_kdu
3258
3259 def _get_deployed_component(
3260 self,
3261 ee_relation: EERelation,
3262 db_nsr: Dict[str, Any],
3263 cached_vnfds: Dict[str, Any],
3264 ) -> DeployedComponent:
3265 nsr_id = db_nsr["_id"]
3266 deployed_component = None
3267 ee_level = EELevel.get_level(ee_relation)
3268 if ee_level == EELevel.NS:
3269 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3270 if vca:
3271 deployed_component = DeployedVCA(nsr_id, vca)
3272 elif ee_level == EELevel.VNF:
3273 vca = get_deployed_vca(
3274 db_nsr,
3275 {
3276 "vdu_id": None,
3277 "member-vnf-index": ee_relation.vnf_profile_id,
3278 "ee_descriptor_id": ee_relation.execution_environment_ref,
3279 },
3280 )
3281 if vca:
3282 deployed_component = DeployedVCA(nsr_id, vca)
3283 elif ee_level == EELevel.VDU:
3284 vca = get_deployed_vca(
3285 db_nsr,
3286 {
3287 "vdu_id": ee_relation.vdu_profile_id,
3288 "member-vnf-index": ee_relation.vnf_profile_id,
3289 "ee_descriptor_id": ee_relation.execution_environment_ref,
3290 },
3291 )
3292 if vca:
3293 deployed_component = DeployedVCA(nsr_id, vca)
3294 elif ee_level == EELevel.KDU:
3295 kdu_resource_data = self._get_kdu_resource_data(
3296 ee_relation, db_nsr, cached_vnfds
3297 )
3298 if kdu_resource_data:
3299 deployed_component = DeployedK8sResource(kdu_resource_data)
3300 return deployed_component
3301
3302 async def _add_relation(
3303 self,
3304 relation: Relation,
3305 vca_type: str,
3306 db_nsr: Dict[str, Any],
3307 cached_vnfds: Dict[str, Any],
3308 cached_vnfrs: Dict[str, Any],
3309 ) -> bool:
3310 deployed_provider = self._get_deployed_component(
3311 relation.provider, db_nsr, cached_vnfds
3312 )
3313 deployed_requirer = self._get_deployed_component(
3314 relation.requirer, db_nsr, cached_vnfds
3315 )
3316 if (
3317 deployed_provider
3318 and deployed_requirer
3319 and deployed_provider.config_sw_installed
3320 and deployed_requirer.config_sw_installed
3321 ):
3322 provider_db_vnfr = (
3323 self._get_vnfr(
3324 relation.provider.nsr_id,
3325 relation.provider.vnf_profile_id,
3326 cached_vnfrs,
3327 )
3328 if relation.provider.vnf_profile_id
3329 else None
3330 )
3331 requirer_db_vnfr = (
3332 self._get_vnfr(
3333 relation.requirer.nsr_id,
3334 relation.requirer.vnf_profile_id,
3335 cached_vnfrs,
3336 )
3337 if relation.requirer.vnf_profile_id
3338 else None
3339 )
3340 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3341 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3342 provider_relation_endpoint = RelationEndpoint(
3343 deployed_provider.ee_id,
3344 provider_vca_id,
3345 relation.provider.endpoint,
3346 )
3347 requirer_relation_endpoint = RelationEndpoint(
3348 deployed_requirer.ee_id,
3349 requirer_vca_id,
3350 relation.requirer.endpoint,
3351 )
3352 try:
3353 await self.vca_map[vca_type].add_relation(
3354 provider=provider_relation_endpoint,
3355 requirer=requirer_relation_endpoint,
3356 )
3357 except N2VCException as exception:
3358 self.logger.error(exception)
3359 raise LcmException(exception)
3360 return True
3361 return False
3362
3363 async def _add_vca_relations(
3364 self,
3365 logging_text,
3366 nsr_id,
3367 vca_type: str,
3368 vca_index: int,
3369 timeout: int = 3600,
3370 ) -> bool:
3371 # steps:
3372 # 1. find all relations for this VCA
3373 # 2. wait for other peers related
3374 # 3. add relations
3375
3376 try:
3377 # STEP 1: find all relations for this VCA
3378
3379 # read nsr record
3380 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3381 nsd = get_nsd(db_nsr)
3382
3383 # this VCA data
3384 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3385 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3386
3387 cached_vnfds = {}
3388 cached_vnfrs = {}
3389 relations = []
3390 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3391 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3392
3393 # if no relations, terminate
3394 if not relations:
3395 self.logger.debug(logging_text + " No relations")
3396 return True
3397
3398 self.logger.debug(logging_text + " adding relations {}".format(relations))
3399
3400 # add all relations
3401 start = time()
3402 while True:
3403 # check timeout
3404 now = time()
3405 if now - start >= timeout:
3406 self.logger.error(logging_text + " : timeout adding relations")
3407 return False
3408
3409 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3410 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3411
3412 # for each relation, find the VCA's related
3413 for relation in relations.copy():
3414 added = await self._add_relation(
3415 relation,
3416 vca_type,
3417 db_nsr,
3418 cached_vnfds,
3419 cached_vnfrs,
3420 )
3421 if added:
3422 relations.remove(relation)
3423
3424 if not relations:
3425 self.logger.debug("Relations added")
3426 break
3427 await asyncio.sleep(5.0)
3428
3429 return True
3430
3431 except Exception as e:
3432 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3433 return False
3434
3435 async def _install_kdu(
3436 self,
3437 nsr_id: str,
3438 nsr_db_path: str,
3439 vnfr_data: dict,
3440 kdu_index: int,
3441 kdud: dict,
3442 vnfd: dict,
3443 k8s_instance_info: dict,
3444 k8params: dict = None,
3445 timeout: int = 600,
3446 vca_id: str = None,
3447 ):
3448 try:
3449 k8sclustertype = k8s_instance_info["k8scluster-type"]
3450 # Instantiate kdu
3451 db_dict_install = {
3452 "collection": "nsrs",
3453 "filter": {"_id": nsr_id},
3454 "path": nsr_db_path,
3455 }
3456
3457 if k8s_instance_info.get("kdu-deployment-name"):
3458 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3459 else:
3460 kdu_instance = self.k8scluster_map[
3461 k8sclustertype
3462 ].generate_kdu_instance_name(
3463 db_dict=db_dict_install,
3464 kdu_model=k8s_instance_info["kdu-model"],
3465 kdu_name=k8s_instance_info["kdu-name"],
3466 )
3467
3468 # Update the nsrs table with the kdu-instance value
3469 self.update_db_2(
3470 item="nsrs",
3471 _id=nsr_id,
3472 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3473 )
3474
3475 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3476 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3477 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3478 # namespace, this first verification could be removed, and the next step would be done for any kind
3479 # of KNF.
3480 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3481 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3482 if k8sclustertype in ("juju", "juju-bundle"):
3483 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3484 # that the user passed a namespace which he wants its KDU to be deployed in)
3485 if (
3486 self.db.count(
3487 table="nsrs",
3488 q_filter={
3489 "_id": nsr_id,
3490 "_admin.projects_write": k8s_instance_info["namespace"],
3491 "_admin.projects_read": k8s_instance_info["namespace"],
3492 },
3493 )
3494 > 0
3495 ):
3496 self.logger.debug(
3497 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3498 )
3499 self.update_db_2(
3500 item="nsrs",
3501 _id=nsr_id,
3502 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3503 )
3504 k8s_instance_info["namespace"] = kdu_instance
3505
3506 await self.k8scluster_map[k8sclustertype].install(
3507 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3508 kdu_model=k8s_instance_info["kdu-model"],
3509 atomic=True,
3510 params=k8params,
3511 db_dict=db_dict_install,
3512 timeout=timeout,
3513 kdu_name=k8s_instance_info["kdu-name"],
3514 namespace=k8s_instance_info["namespace"],
3515 kdu_instance=kdu_instance,
3516 vca_id=vca_id,
3517 )
3518
3519 # Obtain services to obtain management service ip
3520 services = await self.k8scluster_map[k8sclustertype].get_services(
3521 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3522 kdu_instance=kdu_instance,
3523 namespace=k8s_instance_info["namespace"],
3524 )
3525
3526 # Obtain management service info (if exists)
3527 vnfr_update_dict = {}
3528 kdu_config = get_configuration(vnfd, kdud["name"])
3529 if kdu_config:
3530 target_ee_list = kdu_config.get("execution-environment-list", [])
3531 else:
3532 target_ee_list = []
3533
3534 if services:
3535 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3536 mgmt_services = [
3537 service
3538 for service in kdud.get("service", [])
3539 if service.get("mgmt-service")
3540 ]
3541 for mgmt_service in mgmt_services:
3542 for service in services:
3543 if service["name"].startswith(mgmt_service["name"]):
3544 # Mgmt service found, Obtain service ip
3545 ip = service.get("external_ip", service.get("cluster_ip"))
3546 if isinstance(ip, list) and len(ip) == 1:
3547 ip = ip[0]
3548
3549 vnfr_update_dict[
3550 "kdur.{}.ip-address".format(kdu_index)
3551 ] = ip
3552
3553 # Check if must update also mgmt ip at the vnf
3554 service_external_cp = mgmt_service.get(
3555 "external-connection-point-ref"
3556 )
3557 if service_external_cp:
3558 if (
3559 deep_get(vnfd, ("mgmt-interface", "cp"))
3560 == service_external_cp
3561 ):
3562 vnfr_update_dict["ip-address"] = ip
3563
3564 if find_in_list(
3565 target_ee_list,
3566 lambda ee: ee.get(
3567 "external-connection-point-ref", ""
3568 )
3569 == service_external_cp,
3570 ):
3571 vnfr_update_dict[
3572 "kdur.{}.ip-address".format(kdu_index)
3573 ] = ip
3574 break
3575 else:
3576 self.logger.warn(
3577 "Mgmt service name: {} not found".format(
3578 mgmt_service["name"]
3579 )
3580 )
3581
3582 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3583 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3584
3585 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3586 if (
3587 kdu_config
3588 and kdu_config.get("initial-config-primitive")
3589 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3590 ):
3591 initial_config_primitive_list = kdu_config.get(
3592 "initial-config-primitive"
3593 )
3594 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3595
3596 for initial_config_primitive in initial_config_primitive_list:
3597 primitive_params_ = self._map_primitive_params(
3598 initial_config_primitive, {}, {}
3599 )
3600
3601 await asyncio.wait_for(
3602 self.k8scluster_map[k8sclustertype].exec_primitive(
3603 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3604 kdu_instance=kdu_instance,
3605 primitive_name=initial_config_primitive["name"],
3606 params=primitive_params_,
3607 db_dict=db_dict_install,
3608 vca_id=vca_id,
3609 ),
3610 timeout=timeout,
3611 )
3612
3613 except Exception as e:
3614 # Prepare update db with error and raise exception
3615 try:
3616 self.update_db_2(
3617 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3618 )
3619 self.update_db_2(
3620 "vnfrs",
3621 vnfr_data.get("_id"),
3622 {"kdur.{}.status".format(kdu_index): "ERROR"},
3623 )
3624 except Exception:
3625 # ignore to keep original exception
3626 pass
3627 # reraise original error
3628 raise
3629
3630 return kdu_instance
3631
3632 async def deploy_kdus(
3633 self,
3634 logging_text,
3635 nsr_id,
3636 nslcmop_id,
3637 db_vnfrs,
3638 db_vnfds,
3639 task_instantiation_info,
3640 ):
3641 # Launch kdus if present in the descriptor
3642
3643 k8scluster_id_2_uuic = {
3644 "helm-chart-v3": {},
3645 "helm-chart": {},
3646 "juju-bundle": {},
3647 }
3648
3649 async def _get_cluster_id(cluster_id, cluster_type):
3650 nonlocal k8scluster_id_2_uuic
3651 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3652 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3653
3654 # check if K8scluster is creating and wait look if previous tasks in process
3655 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3656 "k8scluster", cluster_id
3657 )
3658 if task_dependency:
3659 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3660 task_name, cluster_id
3661 )
3662 self.logger.debug(logging_text + text)
3663 await asyncio.wait(task_dependency, timeout=3600)
3664
3665 db_k8scluster = self.db.get_one(
3666 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3667 )
3668 if not db_k8scluster:
3669 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3670
3671 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3672 if not k8s_id:
3673 if cluster_type == "helm-chart-v3":
3674 try:
3675 # backward compatibility for existing clusters that have not been initialized for helm v3
3676 k8s_credentials = yaml.safe_dump(
3677 db_k8scluster.get("credentials")
3678 )
3679 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3680 k8s_credentials, reuse_cluster_uuid=cluster_id
3681 )
3682 db_k8scluster_update = {}
3683 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3684 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3685 db_k8scluster_update[
3686 "_admin.helm-chart-v3.created"
3687 ] = uninstall_sw
3688 db_k8scluster_update[
3689 "_admin.helm-chart-v3.operationalState"
3690 ] = "ENABLED"
3691 self.update_db_2(
3692 "k8sclusters", cluster_id, db_k8scluster_update
3693 )
3694 except Exception as e:
3695 self.logger.error(
3696 logging_text
3697 + "error initializing helm-v3 cluster: {}".format(str(e))
3698 )
3699 raise LcmException(
3700 "K8s cluster '{}' has not been initialized for '{}'".format(
3701 cluster_id, cluster_type
3702 )
3703 )
3704 else:
3705 raise LcmException(
3706 "K8s cluster '{}' has not been initialized for '{}'".format(
3707 cluster_id, cluster_type
3708 )
3709 )
3710 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3711 return k8s_id
3712
3713 logging_text += "Deploy kdus: "
3714 step = ""
3715 try:
3716 db_nsr_update = {"_admin.deployed.K8s": []}
3717 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3718
3719 index = 0
3720 updated_cluster_list = []
3721 updated_v3_cluster_list = []
3722
3723 for vnfr_data in db_vnfrs.values():
3724 vca_id = self.get_vca_id(vnfr_data, {})
3725 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3726 # Step 0: Prepare and set parameters
3727 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3728 vnfd_id = vnfr_data.get("vnfd-id")
3729 vnfd_with_id = find_in_list(
3730 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3731 )
3732 kdud = next(
3733 kdud
3734 for kdud in vnfd_with_id["kdu"]
3735 if kdud["name"] == kdur["kdu-name"]
3736 )
3737 namespace = kdur.get("k8s-namespace")
3738 kdu_deployment_name = kdur.get("kdu-deployment-name")
3739 if kdur.get("helm-chart"):
3740 kdumodel = kdur["helm-chart"]
3741 # Default version: helm3, if helm-version is v2 assign v2
3742 k8sclustertype = "helm-chart-v3"
3743 self.logger.debug("kdur: {}".format(kdur))
3744 if (
3745 kdur.get("helm-version")
3746 and kdur.get("helm-version") == "v2"
3747 ):
3748 k8sclustertype = "helm-chart"
3749 elif kdur.get("juju-bundle"):
3750 kdumodel = kdur["juju-bundle"]
3751 k8sclustertype = "juju-bundle"
3752 else:
3753 raise LcmException(
3754 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3755 "juju-bundle. Maybe an old NBI version is running".format(
3756 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3757 )
3758 )
3759 # check if kdumodel is a file and exists
3760 try:
3761 vnfd_with_id = find_in_list(
3762 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3763 )
3764 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3765 if storage: # may be not present if vnfd has not artifacts
3766 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3767 if storage["pkg-dir"]:
3768 filename = "{}/{}/{}s/{}".format(
3769 storage["folder"],
3770 storage["pkg-dir"],
3771 k8sclustertype,
3772 kdumodel,
3773 )
3774 else:
3775 filename = "{}/Scripts/{}s/{}".format(
3776 storage["folder"],
3777 k8sclustertype,
3778 kdumodel,
3779 )
3780 if self.fs.file_exists(
3781 filename, mode="file"
3782 ) or self.fs.file_exists(filename, mode="dir"):
3783 kdumodel = self.fs.path + filename
3784 except (asyncio.TimeoutError, asyncio.CancelledError):
3785 raise
3786 except Exception: # it is not a file
3787 pass
3788
3789 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3790 step = "Synchronize repos for k8s cluster '{}'".format(
3791 k8s_cluster_id
3792 )
3793 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3794
3795 # Synchronize repos
3796 if (
3797 k8sclustertype == "helm-chart"
3798 and cluster_uuid not in updated_cluster_list
3799 ) or (
3800 k8sclustertype == "helm-chart-v3"
3801 and cluster_uuid not in updated_v3_cluster_list
3802 ):
3803 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3804 self.k8scluster_map[k8sclustertype].synchronize_repos(
3805 cluster_uuid=cluster_uuid
3806 )
3807 )
3808 if del_repo_list or added_repo_dict:
3809 if k8sclustertype == "helm-chart":
3810 unset = {
3811 "_admin.helm_charts_added." + item: None
3812 for item in del_repo_list
3813 }
3814 updated = {
3815 "_admin.helm_charts_added." + item: name
3816 for item, name in added_repo_dict.items()
3817 }
3818 updated_cluster_list.append(cluster_uuid)
3819 elif k8sclustertype == "helm-chart-v3":
3820 unset = {
3821 "_admin.helm_charts_v3_added." + item: None
3822 for item in del_repo_list
3823 }
3824 updated = {
3825 "_admin.helm_charts_v3_added." + item: name
3826 for item, name in added_repo_dict.items()
3827 }
3828 updated_v3_cluster_list.append(cluster_uuid)
3829 self.logger.debug(
3830 logging_text + "repos synchronized on k8s cluster "
3831 "'{}' to_delete: {}, to_add: {}".format(
3832 k8s_cluster_id, del_repo_list, added_repo_dict
3833 )
3834 )
3835 self.db.set_one(
3836 "k8sclusters",
3837 {"_id": k8s_cluster_id},
3838 updated,
3839 unset=unset,
3840 )
3841
3842 # Instantiate kdu
3843 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3844 vnfr_data["member-vnf-index-ref"],
3845 kdur["kdu-name"],
3846 k8s_cluster_id,
3847 )
3848 k8s_instance_info = {
3849 "kdu-instance": None,
3850 "k8scluster-uuid": cluster_uuid,
3851 "k8scluster-type": k8sclustertype,
3852 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3853 "kdu-name": kdur["kdu-name"],
3854 "kdu-model": kdumodel,
3855 "namespace": namespace,
3856 "kdu-deployment-name": kdu_deployment_name,
3857 }
3858 db_path = "_admin.deployed.K8s.{}".format(index)
3859 db_nsr_update[db_path] = k8s_instance_info
3860 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3861 vnfd_with_id = find_in_list(
3862 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3863 )
3864 task = asyncio.ensure_future(
3865 self._install_kdu(
3866 nsr_id,
3867 db_path,
3868 vnfr_data,
3869 kdu_index,
3870 kdud,
3871 vnfd_with_id,
3872 k8s_instance_info,
3873 k8params=desc_params,
3874 timeout=1800,
3875 vca_id=vca_id,
3876 )
3877 )
3878 self.lcm_tasks.register(
3879 "ns",
3880 nsr_id,
3881 nslcmop_id,
3882 "instantiate_KDU-{}".format(index),
3883 task,
3884 )
3885 task_instantiation_info[task] = "Deploying KDU {}".format(
3886 kdur["kdu-name"]
3887 )
3888
3889 index += 1
3890
3891 except (LcmException, asyncio.CancelledError):
3892 raise
3893 except Exception as e:
3894 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3895 if isinstance(e, (N2VCException, DbException)):
3896 self.logger.error(logging_text + msg)
3897 else:
3898 self.logger.critical(logging_text + msg, exc_info=True)
3899 raise LcmException(msg)
3900 finally:
3901 if db_nsr_update:
3902 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3903
3904 def _deploy_n2vc(
3905 self,
3906 logging_text,
3907 db_nsr,
3908 db_vnfr,
3909 nslcmop_id,
3910 nsr_id,
3911 nsi_id,
3912 vnfd_id,
3913 vdu_id,
3914 kdu_name,
3915 member_vnf_index,
3916 vdu_index,
3917 kdu_index,
3918 vdu_name,
3919 deploy_params,
3920 descriptor_config,
3921 base_folder,
3922 task_instantiation_info,
3923 stage,
3924 ):
3925 # launch instantiate_N2VC in a asyncio task and register task object
3926 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3927 # if not found, create one entry and update database
3928 # fill db_nsr._admin.deployed.VCA.<index>
3929
3930 self.logger.debug(
3931 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3932 )
3933
3934 charm_name = ""
3935 get_charm_name = False
3936 if "execution-environment-list" in descriptor_config:
3937 ee_list = descriptor_config.get("execution-environment-list", [])
3938 elif "juju" in descriptor_config:
3939 ee_list = [descriptor_config] # ns charms
3940 if "execution-environment-list" not in descriptor_config:
3941 # charm name is only required for ns charms
3942 get_charm_name = True
3943 else: # other types as script are not supported
3944 ee_list = []
3945
3946 for ee_item in ee_list:
3947 self.logger.debug(
3948 logging_text
3949 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3950 ee_item.get("juju"), ee_item.get("helm-chart")
3951 )
3952 )
3953 ee_descriptor_id = ee_item.get("id")
3954 if ee_item.get("juju"):
3955 vca_name = ee_item["juju"].get("charm")
3956 if get_charm_name:
3957 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3958 vca_type = (
3959 "lxc_proxy_charm"
3960 if ee_item["juju"].get("charm") is not None
3961 else "native_charm"
3962 )
3963 if ee_item["juju"].get("cloud") == "k8s":
3964 vca_type = "k8s_proxy_charm"
3965 elif ee_item["juju"].get("proxy") is False:
3966 vca_type = "native_charm"
3967 elif ee_item.get("helm-chart"):
3968 vca_name = ee_item["helm-chart"]
3969 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3970 vca_type = "helm"
3971 else:
3972 vca_type = "helm-v3"
3973 else:
3974 self.logger.debug(
3975 logging_text + "skipping non juju neither charm configuration"
3976 )
3977 continue
3978
3979 vca_index = -1
3980 for vca_index, vca_deployed in enumerate(
3981 db_nsr["_admin"]["deployed"]["VCA"]
3982 ):
3983 if not vca_deployed:
3984 continue
3985 if (
3986 vca_deployed.get("member-vnf-index") == member_vnf_index
3987 and vca_deployed.get("vdu_id") == vdu_id
3988 and vca_deployed.get("kdu_name") == kdu_name
3989 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3990 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3991 ):
3992 break
3993 else:
3994 # not found, create one.
3995 target = (
3996 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3997 )
3998 if vdu_id:
3999 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
4000 elif kdu_name:
4001 target += "/kdu/{}".format(kdu_name)
4002 vca_deployed = {
4003 "target_element": target,
4004 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4005 "member-vnf-index": member_vnf_index,
4006 "vdu_id": vdu_id,
4007 "kdu_name": kdu_name,
4008 "vdu_count_index": vdu_index,
4009 "operational-status": "init", # TODO revise
4010 "detailed-status": "", # TODO revise
4011 "step": "initial-deploy", # TODO revise
4012 "vnfd_id": vnfd_id,
4013 "vdu_name": vdu_name,
4014 "type": vca_type,
4015 "ee_descriptor_id": ee_descriptor_id,
4016 "charm_name": charm_name,
4017 }
4018 vca_index += 1
4019
4020 # create VCA and configurationStatus in db
4021 db_dict = {
4022 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4023 "configurationStatus.{}".format(vca_index): dict(),
4024 }
4025 self.update_db_2("nsrs", nsr_id, db_dict)
4026
4027 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4028
4029 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4030 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4031 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4032
4033 # Launch task
4034 task_n2vc = asyncio.ensure_future(
4035 self.instantiate_N2VC(
4036 logging_text=logging_text,
4037 vca_index=vca_index,
4038 nsi_id=nsi_id,
4039 db_nsr=db_nsr,
4040 db_vnfr=db_vnfr,
4041 vdu_id=vdu_id,
4042 kdu_name=kdu_name,
4043 vdu_index=vdu_index,
4044 kdu_index=kdu_index,
4045 deploy_params=deploy_params,
4046 config_descriptor=descriptor_config,
4047 base_folder=base_folder,
4048 nslcmop_id=nslcmop_id,
4049 stage=stage,
4050 vca_type=vca_type,
4051 vca_name=vca_name,
4052 ee_config_descriptor=ee_item,
4053 )
4054 )
4055 self.lcm_tasks.register(
4056 "ns",
4057 nsr_id,
4058 nslcmop_id,
4059 "instantiate_N2VC-{}".format(vca_index),
4060 task_n2vc,
4061 )
4062 task_instantiation_info[
4063 task_n2vc
4064 ] = self.task_name_deploy_vca + " {}.{}".format(
4065 member_vnf_index or "", vdu_id or ""
4066 )
4067
4068 @staticmethod
4069 def _create_nslcmop(nsr_id, operation, params):
4070 """
4071 Creates a ns-lcm-opp content to be stored at database.
4072 :param nsr_id: internal id of the instance
4073 :param operation: instantiate, terminate, scale, action, ...
4074 :param params: user parameters for the operation
4075 :return: dictionary following SOL005 format
4076 """
4077 # Raise exception if invalid arguments
4078 if not (nsr_id and operation and params):
4079 raise LcmException(
4080 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4081 )
4082 now = time()
4083 _id = str(uuid4())
4084 nslcmop = {
4085 "id": _id,
4086 "_id": _id,
4087 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4088 "operationState": "PROCESSING",
4089 "statusEnteredTime": now,
4090 "nsInstanceId": nsr_id,
4091 "lcmOperationType": operation,
4092 "startTime": now,
4093 "isAutomaticInvocation": False,
4094 "operationParams": params,
4095 "isCancelPending": False,
4096 "links": {
4097 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4098 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4099 },
4100 }
4101 return nslcmop
4102
4103 def _format_additional_params(self, params):
4104 params = params or {}
4105 for key, value in params.items():
4106 if str(value).startswith("!!yaml "):
4107 params[key] = yaml.safe_load(value[7:])
4108 return params
4109
4110 def _get_terminate_primitive_params(self, seq, vnf_index):
4111 primitive = seq.get("name")
4112 primitive_params = {}
4113 params = {
4114 "member_vnf_index": vnf_index,
4115 "primitive": primitive,
4116 "primitive_params": primitive_params,
4117 }
4118 desc_params = {}
4119 return self._map_primitive_params(seq, params, desc_params)
4120
4121 # sub-operations
4122
4123 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4124 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4125 if op.get("operationState") == "COMPLETED":
4126 # b. Skip sub-operation
4127 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4128 return self.SUBOPERATION_STATUS_SKIP
4129 else:
4130 # c. retry executing sub-operation
4131 # The sub-operation exists, and operationState != 'COMPLETED'
4132 # Update operationState = 'PROCESSING' to indicate a retry.
4133 operationState = "PROCESSING"
4134 detailed_status = "In progress"
4135 self._update_suboperation_status(
4136 db_nslcmop, op_index, operationState, detailed_status
4137 )
4138 # Return the sub-operation index
4139 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4140 # with arguments extracted from the sub-operation
4141 return op_index
4142
4143 # Find a sub-operation where all keys in a matching dictionary must match
4144 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4145 def _find_suboperation(self, db_nslcmop, match):
4146 if db_nslcmop and match:
4147 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4148 for i, op in enumerate(op_list):
4149 if all(op.get(k) == match[k] for k in match):
4150 return i
4151 return self.SUBOPERATION_STATUS_NOT_FOUND
4152
4153 # Update status for a sub-operation given its index
4154 def _update_suboperation_status(
4155 self, db_nslcmop, op_index, operationState, detailed_status
4156 ):
4157 # Update DB for HA tasks
4158 q_filter = {"_id": db_nslcmop["_id"]}
4159 update_dict = {
4160 "_admin.operations.{}.operationState".format(op_index): operationState,
4161 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4162 }
4163 self.db.set_one(
4164 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4165 )
4166
4167 # Add sub-operation, return the index of the added sub-operation
4168 # Optionally, set operationState, detailed-status, and operationType
4169 # Status and type are currently set for 'scale' sub-operations:
4170 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4171 # 'detailed-status' : status message
4172 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4173 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4174 def _add_suboperation(
4175 self,
4176 db_nslcmop,
4177 vnf_index,
4178 vdu_id,
4179 vdu_count_index,
4180 vdu_name,
4181 primitive,
4182 mapped_primitive_params,
4183 operationState=None,
4184 detailed_status=None,
4185 operationType=None,
4186 RO_nsr_id=None,
4187 RO_scaling_info=None,
4188 ):
4189 if not db_nslcmop:
4190 return self.SUBOPERATION_STATUS_NOT_FOUND
4191 # Get the "_admin.operations" list, if it exists
4192 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4193 op_list = db_nslcmop_admin.get("operations")
4194 # Create or append to the "_admin.operations" list
4195 new_op = {
4196 "member_vnf_index": vnf_index,
4197 "vdu_id": vdu_id,
4198 "vdu_count_index": vdu_count_index,
4199 "primitive": primitive,
4200 "primitive_params": mapped_primitive_params,
4201 }
4202 if operationState:
4203 new_op["operationState"] = operationState
4204 if detailed_status:
4205 new_op["detailed-status"] = detailed_status
4206 if operationType:
4207 new_op["lcmOperationType"] = operationType
4208 if RO_nsr_id:
4209 new_op["RO_nsr_id"] = RO_nsr_id
4210 if RO_scaling_info:
4211 new_op["RO_scaling_info"] = RO_scaling_info
4212 if not op_list:
4213 # No existing operations, create key 'operations' with current operation as first list element
4214 db_nslcmop_admin.update({"operations": [new_op]})
4215 op_list = db_nslcmop_admin.get("operations")
4216 else:
4217 # Existing operations, append operation to list
4218 op_list.append(new_op)
4219
4220 db_nslcmop_update = {"_admin.operations": op_list}
4221 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4222 op_index = len(op_list) - 1
4223 return op_index
4224
4225 # Helper methods for scale() sub-operations
4226
4227 # pre-scale/post-scale:
4228 # Check for 3 different cases:
4229 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4230 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4231 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4232 def _check_or_add_scale_suboperation(
4233 self,
4234 db_nslcmop,
4235 vnf_index,
4236 vnf_config_primitive,
4237 primitive_params,
4238 operationType,
4239 RO_nsr_id=None,
4240 RO_scaling_info=None,
4241 ):
4242 # Find this sub-operation
4243 if RO_nsr_id and RO_scaling_info:
4244 operationType = "SCALE-RO"
4245 match = {
4246 "member_vnf_index": vnf_index,
4247 "RO_nsr_id": RO_nsr_id,
4248 "RO_scaling_info": RO_scaling_info,
4249 }
4250 else:
4251 match = {
4252 "member_vnf_index": vnf_index,
4253 "primitive": vnf_config_primitive,
4254 "primitive_params": primitive_params,
4255 "lcmOperationType": operationType,
4256 }
4257 op_index = self._find_suboperation(db_nslcmop, match)
4258 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4259 # a. New sub-operation
4260 # The sub-operation does not exist, add it.
4261 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4262 # The following parameters are set to None for all kind of scaling:
4263 vdu_id = None
4264 vdu_count_index = None
4265 vdu_name = None
4266 if RO_nsr_id and RO_scaling_info:
4267 vnf_config_primitive = None
4268 primitive_params = None
4269 else:
4270 RO_nsr_id = None
4271 RO_scaling_info = None
4272 # Initial status for sub-operation
4273 operationState = "PROCESSING"
4274 detailed_status = "In progress"
4275 # Add sub-operation for pre/post-scaling (zero or more operations)
4276 self._add_suboperation(
4277 db_nslcmop,
4278 vnf_index,
4279 vdu_id,
4280 vdu_count_index,
4281 vdu_name,
4282 vnf_config_primitive,
4283 primitive_params,
4284 operationState,
4285 detailed_status,
4286 operationType,
4287 RO_nsr_id,
4288 RO_scaling_info,
4289 )
4290 return self.SUBOPERATION_STATUS_NEW
4291 else:
4292 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4293 # or op_index (operationState != 'COMPLETED')
4294 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4295
4296 # Function to return execution_environment id
4297
4298 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4299 # TODO vdu_index_count
4300 for vca in vca_deployed_list:
4301 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4302 return vca["ee_id"]
4303
4304 async def destroy_N2VC(
4305 self,
4306 logging_text,
4307 db_nslcmop,
4308 vca_deployed,
4309 config_descriptor,
4310 vca_index,
4311 destroy_ee=True,
4312 exec_primitives=True,
4313 scaling_in=False,
4314 vca_id: str = None,
4315 ):
4316 """
4317 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4318 :param logging_text:
4319 :param db_nslcmop:
4320 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4321 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4322 :param vca_index: index in the database _admin.deployed.VCA
4323 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4324 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4325 not executed properly
4326 :param scaling_in: True destroys the application, False destroys the model
4327 :return: None or exception
4328 """
4329
4330 self.logger.debug(
4331 logging_text
4332 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4333 vca_index, vca_deployed, config_descriptor, destroy_ee
4334 )
4335 )
4336
4337 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4338
4339 # execute terminate_primitives
4340 if exec_primitives:
4341 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4342 config_descriptor.get("terminate-config-primitive"),
4343 vca_deployed.get("ee_descriptor_id"),
4344 )
4345 vdu_id = vca_deployed.get("vdu_id")
4346 vdu_count_index = vca_deployed.get("vdu_count_index")
4347 vdu_name = vca_deployed.get("vdu_name")
4348 vnf_index = vca_deployed.get("member-vnf-index")
4349 if terminate_primitives and vca_deployed.get("needed_terminate"):
4350 for seq in terminate_primitives:
4351 # For each sequence in list, get primitive and call _ns_execute_primitive()
4352 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4353 vnf_index, seq.get("name")
4354 )
4355 self.logger.debug(logging_text + step)
4356 # Create the primitive for each sequence, i.e. "primitive": "touch"
4357 primitive = seq.get("name")
4358 mapped_primitive_params = self._get_terminate_primitive_params(
4359 seq, vnf_index
4360 )
4361
4362 # Add sub-operation
4363 self._add_suboperation(
4364 db_nslcmop,
4365 vnf_index,
4366 vdu_id,
4367 vdu_count_index,
4368 vdu_name,
4369 primitive,
4370 mapped_primitive_params,
4371 )
4372 # Sub-operations: Call _ns_execute_primitive() instead of action()
4373 try:
4374 result, result_detail = await self._ns_execute_primitive(
4375 vca_deployed["ee_id"],
4376 primitive,
4377 mapped_primitive_params,
4378 vca_type=vca_type,
4379 vca_id=vca_id,
4380 )
4381 except LcmException:
4382 # this happens when VCA is not deployed. In this case it is not needed to terminate
4383 continue
4384 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4385 if result not in result_ok:
4386 raise LcmException(
4387 "terminate_primitive {} for vnf_member_index={} fails with "
4388 "error {}".format(seq.get("name"), vnf_index, result_detail)
4389 )
4390 # set that this VCA do not need terminated
4391 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4392 vca_index
4393 )
4394 self.update_db_2(
4395 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4396 )
4397
4398 # Delete Prometheus Jobs if any
4399 # This uses NSR_ID, so it will destroy any jobs under this index
4400 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4401
4402 if destroy_ee:
4403 await self.vca_map[vca_type].delete_execution_environment(
4404 vca_deployed["ee_id"],
4405 scaling_in=scaling_in,
4406 vca_type=vca_type,
4407 vca_id=vca_id,
4408 )
4409
4410 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4411 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4412 namespace = "." + db_nsr["_id"]
4413 try:
4414 await self.n2vc.delete_namespace(
4415 namespace=namespace,
4416 total_timeout=self.timeout.charm_delete,
4417 vca_id=vca_id,
4418 )
4419 except N2VCNotFound: # already deleted. Skip
4420 pass
4421 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4422
4423 async def terminate(self, nsr_id, nslcmop_id):
4424 # Try to lock HA task here
4425 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4426 if not task_is_locked_by_me:
4427 return
4428
4429 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4430 self.logger.debug(logging_text + "Enter")
4431 timeout_ns_terminate = self.timeout.ns_terminate
4432 db_nsr = None
4433 db_nslcmop = None
4434 operation_params = None
4435 exc = None
4436 error_list = [] # annotates all failed error messages
4437 db_nslcmop_update = {}
4438 autoremove = False # autoremove after terminated
4439 tasks_dict_info = {}
4440 db_nsr_update = {}
4441 stage = [
4442 "Stage 1/3: Preparing task.",
4443 "Waiting for previous operations to terminate.",
4444 "",
4445 ]
4446 # ^ contains [stage, step, VIM-status]
4447 try:
4448 # wait for any previous tasks in process
4449 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4450
4451 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4452 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4453 operation_params = db_nslcmop.get("operationParams") or {}
4454 if operation_params.get("timeout_ns_terminate"):
4455 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4456 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4457 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4458
4459 db_nsr_update["operational-status"] = "terminating"
4460 db_nsr_update["config-status"] = "terminating"
4461 self._write_ns_status(
4462 nsr_id=nsr_id,
4463 ns_state="TERMINATING",
4464 current_operation="TERMINATING",
4465 current_operation_id=nslcmop_id,
4466 other_update=db_nsr_update,
4467 )
4468 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4469 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4470 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4471 return
4472
4473 stage[1] = "Getting vnf descriptors from db."
4474 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4475 db_vnfrs_dict = {
4476 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4477 }
4478 db_vnfds_from_id = {}
4479 db_vnfds_from_member_index = {}
4480 # Loop over VNFRs
4481 for vnfr in db_vnfrs_list:
4482 vnfd_id = vnfr["vnfd-id"]
4483 if vnfd_id not in db_vnfds_from_id:
4484 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4485 db_vnfds_from_id[vnfd_id] = vnfd
4486 db_vnfds_from_member_index[
4487 vnfr["member-vnf-index-ref"]
4488 ] = db_vnfds_from_id[vnfd_id]
4489
4490 # Destroy individual execution environments when there are terminating primitives.
4491 # Rest of EE will be deleted at once
4492 # TODO - check before calling _destroy_N2VC
4493 # if not operation_params.get("skip_terminate_primitives"):#
4494 # or not vca.get("needed_terminate"):
4495 stage[0] = "Stage 2/3 execute terminating primitives."
4496 self.logger.debug(logging_text + stage[0])
4497 stage[1] = "Looking execution environment that needs terminate."
4498 self.logger.debug(logging_text + stage[1])
4499
4500 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4501 config_descriptor = None
4502 vca_member_vnf_index = vca.get("member-vnf-index")
4503 vca_id = self.get_vca_id(
4504 db_vnfrs_dict.get(vca_member_vnf_index)
4505 if vca_member_vnf_index
4506 else None,
4507 db_nsr,
4508 )
4509 if not vca or not vca.get("ee_id"):
4510 continue
4511 if not vca.get("member-vnf-index"):
4512 # ns
4513 config_descriptor = db_nsr.get("ns-configuration")
4514 elif vca.get("vdu_id"):
4515 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4516 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4517 elif vca.get("kdu_name"):
4518 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4519 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4520 else:
4521 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4522 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4523 vca_type = vca.get("type")
4524 exec_terminate_primitives = not operation_params.get(
4525 "skip_terminate_primitives"
4526 ) and vca.get("needed_terminate")
4527 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4528 # pending native charms
4529 destroy_ee = (
4530 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4531 )
4532 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4533 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4534 task = asyncio.ensure_future(
4535 self.destroy_N2VC(
4536 logging_text,
4537 db_nslcmop,
4538 vca,
4539 config_descriptor,
4540 vca_index,
4541 destroy_ee,
4542 exec_terminate_primitives,
4543 vca_id=vca_id,
4544 )
4545 )
4546 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4547
4548 # wait for pending tasks of terminate primitives
4549 if tasks_dict_info:
4550 self.logger.debug(
4551 logging_text
4552 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4553 )
4554 error_list = await self._wait_for_tasks(
4555 logging_text,
4556 tasks_dict_info,
4557 min(self.timeout.charm_delete, timeout_ns_terminate),
4558 stage,
4559 nslcmop_id,
4560 )
4561 tasks_dict_info.clear()
4562 if error_list:
4563 return # raise LcmException("; ".join(error_list))
4564
4565 # remove All execution environments at once
4566 stage[0] = "Stage 3/3 delete all."
4567
4568 if nsr_deployed.get("VCA"):
4569 stage[1] = "Deleting all execution environments."
4570 self.logger.debug(logging_text + stage[1])
4571 vca_id = self.get_vca_id({}, db_nsr)
4572 task_delete_ee = asyncio.ensure_future(
4573 asyncio.wait_for(
4574 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4575 timeout=self.timeout.charm_delete,
4576 )
4577 )
4578 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4579 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4580
4581 # Delete Namespace and Certificates if necessary
4582 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4583 await self.vca_map["helm-v3"].delete_tls_certificate(
4584 certificate_name=db_nslcmop["nsInstanceId"],
4585 )
4586 # TODO: Delete namespace
4587
4588 # Delete from k8scluster
4589 stage[1] = "Deleting KDUs."
4590 self.logger.debug(logging_text + stage[1])
4591 # print(nsr_deployed)
4592 for kdu in get_iterable(nsr_deployed, "K8s"):
4593 if not kdu or not kdu.get("kdu-instance"):
4594 continue
4595 kdu_instance = kdu.get("kdu-instance")
4596 if kdu.get("k8scluster-type") in self.k8scluster_map:
4597 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4598 vca_id = self.get_vca_id({}, db_nsr)
4599 task_delete_kdu_instance = asyncio.ensure_future(
4600 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4601 cluster_uuid=kdu.get("k8scluster-uuid"),
4602 kdu_instance=kdu_instance,
4603 vca_id=vca_id,
4604 namespace=kdu.get("namespace"),
4605 )
4606 )
4607 else:
4608 self.logger.error(
4609 logging_text
4610 + "Unknown k8s deployment type {}".format(
4611 kdu.get("k8scluster-type")
4612 )
4613 )
4614 continue
4615 tasks_dict_info[
4616 task_delete_kdu_instance
4617 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4618
4619 # remove from RO
4620 stage[1] = "Deleting ns from VIM."
4621 if self.ro_config.ng:
4622 task_delete_ro = asyncio.ensure_future(
4623 self._terminate_ng_ro(
4624 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4625 )
4626 )
4627 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4628
4629 # rest of staff will be done at finally
4630
4631 except (
4632 ROclient.ROClientException,
4633 DbException,
4634 LcmException,
4635 N2VCException,
4636 ) as e:
4637 self.logger.error(logging_text + "Exit Exception {}".format(e))
4638 exc = e
4639 except asyncio.CancelledError:
4640 self.logger.error(
4641 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4642 )
4643 exc = "Operation was cancelled"
4644 except Exception as e:
4645 exc = traceback.format_exc()
4646 self.logger.critical(
4647 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4648 exc_info=True,
4649 )
4650 finally:
4651 if exc:
4652 error_list.append(str(exc))
4653 try:
4654 # wait for pending tasks
4655 if tasks_dict_info:
4656 stage[1] = "Waiting for terminate pending tasks."
4657 self.logger.debug(logging_text + stage[1])
4658 error_list += await self._wait_for_tasks(
4659 logging_text,
4660 tasks_dict_info,
4661 timeout_ns_terminate,
4662 stage,
4663 nslcmop_id,
4664 )
4665 stage[1] = stage[2] = ""
4666 except asyncio.CancelledError:
4667 error_list.append("Cancelled")
4668 # TODO cancell all tasks
4669 except Exception as exc:
4670 error_list.append(str(exc))
4671 # update status at database
4672 if error_list:
4673 error_detail = "; ".join(error_list)
4674 # self.logger.error(logging_text + error_detail)
4675 error_description_nslcmop = "{} Detail: {}".format(
4676 stage[0], error_detail
4677 )
4678 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4679 nslcmop_id, stage[0]
4680 )
4681
4682 db_nsr_update["operational-status"] = "failed"
4683 db_nsr_update["detailed-status"] = (
4684 error_description_nsr + " Detail: " + error_detail
4685 )
4686 db_nslcmop_update["detailed-status"] = error_detail
4687 nslcmop_operation_state = "FAILED"
4688 ns_state = "BROKEN"
4689 else:
4690 error_detail = None
4691 error_description_nsr = error_description_nslcmop = None
4692 ns_state = "NOT_INSTANTIATED"
4693 db_nsr_update["operational-status"] = "terminated"
4694 db_nsr_update["detailed-status"] = "Done"
4695 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4696 db_nslcmop_update["detailed-status"] = "Done"
4697 nslcmop_operation_state = "COMPLETED"
4698
4699 if db_nsr:
4700 self._write_ns_status(
4701 nsr_id=nsr_id,
4702 ns_state=ns_state,
4703 current_operation="IDLE",
4704 current_operation_id=None,
4705 error_description=error_description_nsr,
4706 error_detail=error_detail,
4707 other_update=db_nsr_update,
4708 )
4709 self._write_op_status(
4710 op_id=nslcmop_id,
4711 stage="",
4712 error_message=error_description_nslcmop,
4713 operation_state=nslcmop_operation_state,
4714 other_update=db_nslcmop_update,
4715 )
4716 if ns_state == "NOT_INSTANTIATED":
4717 try:
4718 self.db.set_list(
4719 "vnfrs",
4720 {"nsr-id-ref": nsr_id},
4721 {"_admin.nsState": "NOT_INSTANTIATED"},
4722 )
4723 except DbException as e:
4724 self.logger.warn(
4725 logging_text
4726 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4727 nsr_id, e
4728 )
4729 )
4730 if operation_params:
4731 autoremove = operation_params.get("autoremove", False)
4732 if nslcmop_operation_state:
4733 try:
4734 await self.msg.aiowrite(
4735 "ns",
4736 "terminated",
4737 {
4738 "nsr_id": nsr_id,
4739 "nslcmop_id": nslcmop_id,
4740 "operationState": nslcmop_operation_state,
4741 "autoremove": autoremove,
4742 },
4743 loop=self.loop,
4744 )
4745 except Exception as e:
4746 self.logger.error(
4747 logging_text + "kafka_write notification Exception {}".format(e)
4748 )
4749 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4750 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4751
4752 self.logger.debug(logging_text + "Exit")
4753 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4754
4755 async def _wait_for_tasks(
4756 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4757 ):
4758 time_start = time()
4759 error_detail_list = []
4760 error_list = []
4761 pending_tasks = list(created_tasks_info.keys())
4762 num_tasks = len(pending_tasks)
4763 num_done = 0
4764 stage[1] = "{}/{}.".format(num_done, num_tasks)
4765 self._write_op_status(nslcmop_id, stage)
4766 while pending_tasks:
4767 new_error = None
4768 _timeout = timeout + time_start - time()
4769 done, pending_tasks = await asyncio.wait(
4770 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4771 )
4772 num_done += len(done)
4773 if not done: # Timeout
4774 for task in pending_tasks:
4775 new_error = created_tasks_info[task] + ": Timeout"
4776 error_detail_list.append(new_error)
4777 error_list.append(new_error)
4778 break
4779 for task in done:
4780 if task.cancelled():
4781 exc = "Cancelled"
4782 else:
4783 exc = task.exception()
4784 if exc:
4785 if isinstance(exc, asyncio.TimeoutError):
4786 exc = "Timeout"
4787 new_error = created_tasks_info[task] + ": {}".format(exc)
4788 error_list.append(created_tasks_info[task])
4789 error_detail_list.append(new_error)
4790 if isinstance(
4791 exc,
4792 (
4793 str,
4794 DbException,
4795 N2VCException,
4796 ROclient.ROClientException,
4797 LcmException,
4798 K8sException,
4799 NgRoException,
4800 ),
4801 ):
4802 self.logger.error(logging_text + new_error)
4803 else:
4804 exc_traceback = "".join(
4805 traceback.format_exception(None, exc, exc.__traceback__)
4806 )
4807 self.logger.error(
4808 logging_text
4809 + created_tasks_info[task]
4810 + " "
4811 + exc_traceback
4812 )
4813 else:
4814 self.logger.debug(
4815 logging_text + created_tasks_info[task] + ": Done"
4816 )
4817 stage[1] = "{}/{}.".format(num_done, num_tasks)
4818 if new_error:
4819 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4820 if nsr_id: # update also nsr
4821 self.update_db_2(
4822 "nsrs",
4823 nsr_id,
4824 {
4825 "errorDescription": "Error at: " + ", ".join(error_list),
4826 "errorDetail": ". ".join(error_detail_list),
4827 },
4828 )
4829 self._write_op_status(nslcmop_id, stage)
4830 return error_detail_list
4831
4832 @staticmethod
4833 def _map_primitive_params(primitive_desc, params, instantiation_params):
4834 """
4835 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4836 The default-value is used. If it is between < > it look for a value at instantiation_params
4837 :param primitive_desc: portion of VNFD/NSD that describes primitive
4838 :param params: Params provided by user
4839 :param instantiation_params: Instantiation params provided by user
4840 :return: a dictionary with the calculated params
4841 """
4842 calculated_params = {}
4843 for parameter in primitive_desc.get("parameter", ()):
4844 param_name = parameter["name"]
4845 if param_name in params:
4846 calculated_params[param_name] = params[param_name]
4847 elif "default-value" in parameter or "value" in parameter:
4848 if "value" in parameter:
4849 calculated_params[param_name] = parameter["value"]
4850 else:
4851 calculated_params[param_name] = parameter["default-value"]
4852 if (
4853 isinstance(calculated_params[param_name], str)
4854 and calculated_params[param_name].startswith("<")
4855 and calculated_params[param_name].endswith(">")
4856 ):
4857 if calculated_params[param_name][1:-1] in instantiation_params:
4858 calculated_params[param_name] = instantiation_params[
4859 calculated_params[param_name][1:-1]
4860 ]
4861 else:
4862 raise LcmException(
4863 "Parameter {} needed to execute primitive {} not provided".format(
4864 calculated_params[param_name], primitive_desc["name"]
4865 )
4866 )
4867 else:
4868 raise LcmException(
4869 "Parameter {} needed to execute primitive {} not provided".format(
4870 param_name, primitive_desc["name"]
4871 )
4872 )
4873
4874 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4875 calculated_params[param_name] = yaml.safe_dump(
4876 calculated_params[param_name], default_flow_style=True, width=256
4877 )
4878 elif isinstance(calculated_params[param_name], str) and calculated_params[
4879 param_name
4880 ].startswith("!!yaml "):
4881 calculated_params[param_name] = calculated_params[param_name][7:]
4882 if parameter.get("data-type") == "INTEGER":
4883 try:
4884 calculated_params[param_name] = int(calculated_params[param_name])
4885 except ValueError: # error converting string to int
4886 raise LcmException(
4887 "Parameter {} of primitive {} must be integer".format(
4888 param_name, primitive_desc["name"]
4889 )
4890 )
4891 elif parameter.get("data-type") == "BOOLEAN":
4892 calculated_params[param_name] = not (
4893 (str(calculated_params[param_name])).lower() == "false"
4894 )
4895
4896 # add always ns_config_info if primitive name is config
4897 if primitive_desc["name"] == "config":
4898 if "ns_config_info" in instantiation_params:
4899 calculated_params["ns_config_info"] = instantiation_params[
4900 "ns_config_info"
4901 ]
4902 return calculated_params
4903
4904 def _look_for_deployed_vca(
4905 self,
4906 deployed_vca,
4907 member_vnf_index,
4908 vdu_id,
4909 vdu_count_index,
4910 kdu_name=None,
4911 ee_descriptor_id=None,
4912 ):
4913 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4914 for vca in deployed_vca:
4915 if not vca:
4916 continue
4917 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4918 continue
4919 if (
4920 vdu_count_index is not None
4921 and vdu_count_index != vca["vdu_count_index"]
4922 ):
4923 continue
4924 if kdu_name and kdu_name != vca["kdu_name"]:
4925 continue
4926 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4927 continue
4928 break
4929 else:
4930 # vca_deployed not found
4931 raise LcmException(
4932 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4933 " is not deployed".format(
4934 member_vnf_index,
4935 vdu_id,
4936 vdu_count_index,
4937 kdu_name,
4938 ee_descriptor_id,
4939 )
4940 )
4941 # get ee_id
4942 ee_id = vca.get("ee_id")
4943 vca_type = vca.get(
4944 "type", "lxc_proxy_charm"
4945 ) # default value for backward compatibility - proxy charm
4946 if not ee_id:
4947 raise LcmException(
4948 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4949 "execution environment".format(
4950 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4951 )
4952 )
4953 return ee_id, vca_type
4954
4955 async def _ns_execute_primitive(
4956 self,
4957 ee_id,
4958 primitive,
4959 primitive_params,
4960 retries=0,
4961 retries_interval=30,
4962 timeout=None,
4963 vca_type=None,
4964 db_dict=None,
4965 vca_id: str = None,
4966 ) -> (str, str):
4967 try:
4968 if primitive == "config":
4969 primitive_params = {"params": primitive_params}
4970
4971 vca_type = vca_type or "lxc_proxy_charm"
4972
4973 while retries >= 0:
4974 try:
4975 output = await asyncio.wait_for(
4976 self.vca_map[vca_type].exec_primitive(
4977 ee_id=ee_id,
4978 primitive_name=primitive,
4979 params_dict=primitive_params,
4980 progress_timeout=self.timeout.progress_primitive,
4981 total_timeout=self.timeout.primitive,
4982 db_dict=db_dict,
4983 vca_id=vca_id,
4984 vca_type=vca_type,
4985 ),
4986 timeout=timeout or self.timeout.primitive,
4987 )
4988 # execution was OK
4989 break
4990 except asyncio.CancelledError:
4991 raise
4992 except Exception as e:
4993 retries -= 1
4994 if retries >= 0:
4995 self.logger.debug(
4996 "Error executing action {} on {} -> {}".format(
4997 primitive, ee_id, e
4998 )
4999 )
5000 # wait and retry
5001 await asyncio.sleep(retries_interval, loop=self.loop)
5002 else:
5003 if isinstance(e, asyncio.TimeoutError):
5004 e = N2VCException(
5005 message="Timed out waiting for action to complete"
5006 )
5007 return "FAILED", getattr(e, "message", repr(e))
5008
5009 return "COMPLETED", output
5010
5011 except (LcmException, asyncio.CancelledError):
5012 raise
5013 except Exception as e:
5014 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5015
5016 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5017 """
5018 Updating the vca_status with latest juju information in nsrs record
5019 :param: nsr_id: Id of the nsr
5020 :param: nslcmop_id: Id of the nslcmop
5021 :return: None
5022 """
5023
5024 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5025 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5026 vca_id = self.get_vca_id({}, db_nsr)
5027 if db_nsr["_admin"]["deployed"]["K8s"]:
5028 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5029 cluster_uuid, kdu_instance, cluster_type = (
5030 k8s["k8scluster-uuid"],
5031 k8s["kdu-instance"],
5032 k8s["k8scluster-type"],
5033 )
5034 await self._on_update_k8s_db(
5035 cluster_uuid=cluster_uuid,
5036 kdu_instance=kdu_instance,
5037 filter={"_id": nsr_id},
5038 vca_id=vca_id,
5039 cluster_type=cluster_type,
5040 )
5041 else:
5042 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5043 table, filter = "nsrs", {"_id": nsr_id}
5044 path = "_admin.deployed.VCA.{}.".format(vca_index)
5045 await self._on_update_n2vc_db(table, filter, path, {})
5046
5047 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5048 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5049
5050 async def action(self, nsr_id, nslcmop_id):
5051 # Try to lock HA task here
5052 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5053 if not task_is_locked_by_me:
5054 return
5055
5056 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5057 self.logger.debug(logging_text + "Enter")
5058 # get all needed from database
5059 db_nsr = None
5060 db_nslcmop = None
5061 db_nsr_update = {}
5062 db_nslcmop_update = {}
5063 nslcmop_operation_state = None
5064 error_description_nslcmop = None
5065 exc = None
5066 step = ""
5067 try:
5068 # wait for any previous tasks in process
5069 step = "Waiting for previous operations to terminate"
5070 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5071
5072 self._write_ns_status(
5073 nsr_id=nsr_id,
5074 ns_state=None,
5075 current_operation="RUNNING ACTION",
5076 current_operation_id=nslcmop_id,
5077 )
5078
5079 step = "Getting information from database"
5080 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5081 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5082 if db_nslcmop["operationParams"].get("primitive_params"):
5083 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5084 db_nslcmop["operationParams"]["primitive_params"]
5085 )
5086
5087 nsr_deployed = db_nsr["_admin"].get("deployed")
5088 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5089 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5090 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5091 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5092 primitive = db_nslcmop["operationParams"]["primitive"]
5093 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5094 timeout_ns_action = db_nslcmop["operationParams"].get(
5095 "timeout_ns_action", self.timeout.primitive
5096 )
5097
5098 if vnf_index:
5099 step = "Getting vnfr from database"
5100 db_vnfr = self.db.get_one(
5101 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5102 )
5103 if db_vnfr.get("kdur"):
5104 kdur_list = []
5105 for kdur in db_vnfr["kdur"]:
5106 if kdur.get("additionalParams"):
5107 kdur["additionalParams"] = json.loads(
5108 kdur["additionalParams"]
5109 )
5110 kdur_list.append(kdur)
5111 db_vnfr["kdur"] = kdur_list
5112 step = "Getting vnfd from database"
5113 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5114
5115 # Sync filesystem before running a primitive
5116 self.fs.sync(db_vnfr["vnfd-id"])
5117 else:
5118 step = "Getting nsd from database"
5119 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5120
5121 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5122 # for backward compatibility
5123 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5124 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5125 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5126 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5127
5128 # look for primitive
5129 config_primitive_desc = descriptor_configuration = None
5130 if vdu_id:
5131 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5132 elif kdu_name:
5133 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5134 elif vnf_index:
5135 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5136 else:
5137 descriptor_configuration = db_nsd.get("ns-configuration")
5138
5139 if descriptor_configuration and descriptor_configuration.get(
5140 "config-primitive"
5141 ):
5142 for config_primitive in descriptor_configuration["config-primitive"]:
5143 if config_primitive["name"] == primitive:
5144 config_primitive_desc = config_primitive
5145 break
5146
5147 if not config_primitive_desc:
5148 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5149 raise LcmException(
5150 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5151 primitive
5152 )
5153 )
5154 primitive_name = primitive
5155 ee_descriptor_id = None
5156 else:
5157 primitive_name = config_primitive_desc.get(
5158 "execution-environment-primitive", primitive
5159 )
5160 ee_descriptor_id = config_primitive_desc.get(
5161 "execution-environment-ref"
5162 )
5163
5164 if vnf_index:
5165 if vdu_id:
5166 vdur = next(
5167 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5168 )
5169 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5170 elif kdu_name:
5171 kdur = next(
5172 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5173 )
5174 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5175 else:
5176 desc_params = parse_yaml_strings(
5177 db_vnfr.get("additionalParamsForVnf")
5178 )
5179 else:
5180 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5181 if kdu_name and get_configuration(db_vnfd, kdu_name):
5182 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5183 actions = set()
5184 for primitive in kdu_configuration.get("initial-config-primitive", []):
5185 actions.add(primitive["name"])
5186 for primitive in kdu_configuration.get("config-primitive", []):
5187 actions.add(primitive["name"])
5188 kdu = find_in_list(
5189 nsr_deployed["K8s"],
5190 lambda kdu: kdu_name == kdu["kdu-name"]
5191 and kdu["member-vnf-index"] == vnf_index,
5192 )
5193 kdu_action = (
5194 True
5195 if primitive_name in actions
5196 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5197 else False
5198 )
5199
5200 # TODO check if ns is in a proper status
5201 if kdu_name and (
5202 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5203 ):
5204 # kdur and desc_params already set from before
5205 if primitive_params:
5206 desc_params.update(primitive_params)
5207 # TODO Check if we will need something at vnf level
5208 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5209 if (
5210 kdu_name == kdu["kdu-name"]
5211 and kdu["member-vnf-index"] == vnf_index
5212 ):
5213 break
5214 else:
5215 raise LcmException(
5216 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5217 )
5218
5219 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5220 msg = "unknown k8scluster-type '{}'".format(
5221 kdu.get("k8scluster-type")
5222 )
5223 raise LcmException(msg)
5224
5225 db_dict = {
5226 "collection": "nsrs",
5227 "filter": {"_id": nsr_id},
5228 "path": "_admin.deployed.K8s.{}".format(index),
5229 }
5230 self.logger.debug(
5231 logging_text
5232 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5233 )
5234 step = "Executing kdu {}".format(primitive_name)
5235 if primitive_name == "upgrade":
5236 if desc_params.get("kdu_model"):
5237 kdu_model = desc_params.get("kdu_model")
5238 del desc_params["kdu_model"]
5239 else:
5240 kdu_model = kdu.get("kdu-model")
5241 if kdu_model.count("/") < 2: # helm chart is not embedded
5242 parts = kdu_model.split(sep=":")
5243 if len(parts) == 2:
5244 kdu_model = parts[0]
5245 if desc_params.get("kdu_atomic_upgrade"):
5246 atomic_upgrade = desc_params.get(
5247 "kdu_atomic_upgrade"
5248 ).lower() in ("yes", "true", "1")
5249 del desc_params["kdu_atomic_upgrade"]
5250 else:
5251 atomic_upgrade = True
5252
5253 detailed_status = await asyncio.wait_for(
5254 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5255 cluster_uuid=kdu.get("k8scluster-uuid"),
5256 kdu_instance=kdu.get("kdu-instance"),
5257 atomic=atomic_upgrade,
5258 kdu_model=kdu_model,
5259 params=desc_params,
5260 db_dict=db_dict,
5261 timeout=timeout_ns_action,
5262 ),
5263 timeout=timeout_ns_action + 10,
5264 )
5265 self.logger.debug(
5266 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5267 )
5268 elif primitive_name == "rollback":
5269 detailed_status = await asyncio.wait_for(
5270 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5271 cluster_uuid=kdu.get("k8scluster-uuid"),
5272 kdu_instance=kdu.get("kdu-instance"),
5273 db_dict=db_dict,
5274 ),
5275 timeout=timeout_ns_action,
5276 )
5277 elif primitive_name == "status":
5278 detailed_status = await asyncio.wait_for(
5279 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5280 cluster_uuid=kdu.get("k8scluster-uuid"),
5281 kdu_instance=kdu.get("kdu-instance"),
5282 vca_id=vca_id,
5283 ),
5284 timeout=timeout_ns_action,
5285 )
5286 else:
5287 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5288 kdu["kdu-name"], nsr_id
5289 )
5290 params = self._map_primitive_params(
5291 config_primitive_desc, primitive_params, desc_params
5292 )
5293
5294 detailed_status = await asyncio.wait_for(
5295 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5296 cluster_uuid=kdu.get("k8scluster-uuid"),
5297 kdu_instance=kdu_instance,
5298 primitive_name=primitive_name,
5299 params=params,
5300 db_dict=db_dict,
5301 timeout=timeout_ns_action,
5302 vca_id=vca_id,
5303 ),
5304 timeout=timeout_ns_action,
5305 )
5306
5307 if detailed_status:
5308 nslcmop_operation_state = "COMPLETED"
5309 else:
5310 detailed_status = ""
5311 nslcmop_operation_state = "FAILED"
5312 else:
5313 ee_id, vca_type = self._look_for_deployed_vca(
5314 nsr_deployed["VCA"],
5315 member_vnf_index=vnf_index,
5316 vdu_id=vdu_id,
5317 vdu_count_index=vdu_count_index,
5318 ee_descriptor_id=ee_descriptor_id,
5319 )
5320 for vca_index, vca_deployed in enumerate(
5321 db_nsr["_admin"]["deployed"]["VCA"]
5322 ):
5323 if vca_deployed.get("member-vnf-index") == vnf_index:
5324 db_dict = {
5325 "collection": "nsrs",
5326 "filter": {"_id": nsr_id},
5327 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5328 }
5329 break
5330 (
5331 nslcmop_operation_state,
5332 detailed_status,
5333 ) = await self._ns_execute_primitive(
5334 ee_id,
5335 primitive=primitive_name,
5336 primitive_params=self._map_primitive_params(
5337 config_primitive_desc, primitive_params, desc_params
5338 ),
5339 timeout=timeout_ns_action,
5340 vca_type=vca_type,
5341 db_dict=db_dict,
5342 vca_id=vca_id,
5343 )
5344
5345 db_nslcmop_update["detailed-status"] = detailed_status
5346 error_description_nslcmop = (
5347 detailed_status if nslcmop_operation_state == "FAILED" else ""
5348 )
5349 self.logger.debug(
5350 logging_text
5351 + "Done with result {} {}".format(
5352 nslcmop_operation_state, detailed_status
5353 )
5354 )
5355 return # database update is called inside finally
5356
5357 except (DbException, LcmException, N2VCException, K8sException) as e:
5358 self.logger.error(logging_text + "Exit Exception {}".format(e))
5359 exc = e
5360 except asyncio.CancelledError:
5361 self.logger.error(
5362 logging_text + "Cancelled Exception while '{}'".format(step)
5363 )
5364 exc = "Operation was cancelled"
5365 except asyncio.TimeoutError:
5366 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5367 exc = "Timeout"
5368 except Exception as e:
5369 exc = traceback.format_exc()
5370 self.logger.critical(
5371 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5372 exc_info=True,
5373 )
5374 finally:
5375 if exc:
5376 db_nslcmop_update[
5377 "detailed-status"
5378 ] = (
5379 detailed_status
5380 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5381 nslcmop_operation_state = "FAILED"
5382 if db_nsr:
5383 self._write_ns_status(
5384 nsr_id=nsr_id,
5385 ns_state=db_nsr[
5386 "nsState"
5387 ], # TODO check if degraded. For the moment use previous status
5388 current_operation="IDLE",
5389 current_operation_id=None,
5390 # error_description=error_description_nsr,
5391 # error_detail=error_detail,
5392 other_update=db_nsr_update,
5393 )
5394
5395 self._write_op_status(
5396 op_id=nslcmop_id,
5397 stage="",
5398 error_message=error_description_nslcmop,
5399 operation_state=nslcmop_operation_state,
5400 other_update=db_nslcmop_update,
5401 )
5402
5403 if nslcmop_operation_state:
5404 try:
5405 await self.msg.aiowrite(
5406 "ns",
5407 "actioned",
5408 {
5409 "nsr_id": nsr_id,
5410 "nslcmop_id": nslcmop_id,
5411 "operationState": nslcmop_operation_state,
5412 },
5413 loop=self.loop,
5414 )
5415 except Exception as e:
5416 self.logger.error(
5417 logging_text + "kafka_write notification Exception {}".format(e)
5418 )
5419 self.logger.debug(logging_text + "Exit")
5420 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5421 return nslcmop_operation_state, detailed_status
5422
5423 async def terminate_vdus(
5424 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5425 ):
5426 """This method terminates VDUs
5427
5428 Args:
5429 db_vnfr: VNF instance record
5430 member_vnf_index: VNF index to identify the VDUs to be removed
5431 db_nsr: NS instance record
5432 update_db_nslcmops: Nslcmop update record
5433 """
5434 vca_scaling_info = []
5435 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5436 scaling_info["scaling_direction"] = "IN"
5437 scaling_info["vdu-delete"] = {}
5438 scaling_info["kdu-delete"] = {}
5439 db_vdur = db_vnfr.get("vdur")
5440 vdur_list = copy(db_vdur)
5441 count_index = 0
5442 for index, vdu in enumerate(vdur_list):
5443 vca_scaling_info.append(
5444 {
5445 "osm_vdu_id": vdu["vdu-id-ref"],
5446 "member-vnf-index": member_vnf_index,
5447 "type": "delete",
5448 "vdu_index": count_index,
5449 }
5450 )
5451 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5452 scaling_info["vdu"].append(
5453 {
5454 "name": vdu.get("name") or vdu.get("vdu-name"),
5455 "vdu_id": vdu["vdu-id-ref"],
5456 "interface": [],
5457 }
5458 )
5459 for interface in vdu["interfaces"]:
5460 scaling_info["vdu"][index]["interface"].append(
5461 {
5462 "name": interface["name"],
5463 "ip_address": interface["ip-address"],
5464 "mac_address": interface.get("mac-address"),
5465 }
5466 )
5467 self.logger.info("NS update scaling info{}".format(scaling_info))
5468 stage[2] = "Terminating VDUs"
5469 if scaling_info.get("vdu-delete"):
5470 # scale_process = "RO"
5471 if self.ro_config.ng:
5472 await self._scale_ng_ro(
5473 logging_text,
5474 db_nsr,
5475 update_db_nslcmops,
5476 db_vnfr,
5477 scaling_info,
5478 stage,
5479 )
5480
5481 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5482 """This method is to Remove VNF instances from NS.
5483
5484 Args:
5485 nsr_id: NS instance id
5486 nslcmop_id: nslcmop id of update
5487 vnf_instance_id: id of the VNF instance to be removed
5488
5489 Returns:
5490 result: (str, str) COMPLETED/FAILED, details
5491 """
5492 try:
5493 db_nsr_update = {}
5494 logging_text = "Task ns={} update ".format(nsr_id)
5495 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5496 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5497 if check_vnfr_count > 1:
5498 stage = ["", "", ""]
5499 step = "Getting nslcmop from database"
5500 self.logger.debug(
5501 step + " after having waited for previous tasks to be completed"
5502 )
5503 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5504 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5505 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5506 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5507 """ db_vnfr = self.db.get_one(
5508 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5509
5510 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5511 await self.terminate_vdus(
5512 db_vnfr,
5513 member_vnf_index,
5514 db_nsr,
5515 update_db_nslcmops,
5516 stage,
5517 logging_text,
5518 )
5519
5520 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5521 constituent_vnfr.remove(db_vnfr.get("_id"))
5522 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5523 "constituent-vnfr-ref"
5524 )
5525 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5526 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5527 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5528 return "COMPLETED", "Done"
5529 else:
5530 step = "Terminate VNF Failed with"
5531 raise LcmException(
5532 "{} Cannot terminate the last VNF in this NS.".format(
5533 vnf_instance_id
5534 )
5535 )
5536 except (LcmException, asyncio.CancelledError):
5537 raise
5538 except Exception as e:
5539 self.logger.debug("Error removing VNF {}".format(e))
5540 return "FAILED", "Error removing VNF {}".format(e)
5541
5542 async def _ns_redeploy_vnf(
5543 self,
5544 nsr_id,
5545 nslcmop_id,
5546 db_vnfd,
5547 db_vnfr,
5548 db_nsr,
5549 ):
5550 """This method updates and redeploys VNF instances
5551
5552 Args:
5553 nsr_id: NS instance id
5554 nslcmop_id: nslcmop id
5555 db_vnfd: VNF descriptor
5556 db_vnfr: VNF instance record
5557 db_nsr: NS instance record
5558
5559 Returns:
5560 result: (str, str) COMPLETED/FAILED, details
5561 """
5562 try:
5563 count_index = 0
5564 stage = ["", "", ""]
5565 logging_text = "Task ns={} update ".format(nsr_id)
5566 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5567 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5568
5569 # Terminate old VNF resources
5570 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5571 await self.terminate_vdus(
5572 db_vnfr,
5573 member_vnf_index,
5574 db_nsr,
5575 update_db_nslcmops,
5576 stage,
5577 logging_text,
5578 )
5579
5580 # old_vnfd_id = db_vnfr["vnfd-id"]
5581 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5582 new_db_vnfd = db_vnfd
5583 # new_vnfd_ref = new_db_vnfd["id"]
5584 # new_vnfd_id = vnfd_id
5585
5586 # Create VDUR
5587 new_vnfr_cp = []
5588 for cp in new_db_vnfd.get("ext-cpd", ()):
5589 vnf_cp = {
5590 "name": cp.get("id"),
5591 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5592 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5593 "id": cp.get("id"),
5594 }
5595 new_vnfr_cp.append(vnf_cp)
5596 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5597 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5598 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5599 new_vnfr_update = {
5600 "revision": latest_vnfd_revision,
5601 "connection-point": new_vnfr_cp,
5602 "vdur": new_vdur,
5603 "ip-address": "",
5604 }
5605 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5606 updated_db_vnfr = self.db.get_one(
5607 "vnfrs",
5608 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5609 )
5610
5611 # Instantiate new VNF resources
5612 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5613 vca_scaling_info = []
5614 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5615 scaling_info["scaling_direction"] = "OUT"
5616 scaling_info["vdu-create"] = {}
5617 scaling_info["kdu-create"] = {}
5618 vdud_instantiate_list = db_vnfd["vdu"]
5619 for index, vdud in enumerate(vdud_instantiate_list):
5620 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5621 if cloud_init_text:
5622 additional_params = (
5623 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5624 or {}
5625 )
5626 cloud_init_list = []
5627 if cloud_init_text:
5628 # TODO Information of its own ip is not available because db_vnfr is not updated.
5629 additional_params["OSM"] = get_osm_params(
5630 updated_db_vnfr, vdud["id"], 1
5631 )
5632 cloud_init_list.append(
5633 self._parse_cloud_init(
5634 cloud_init_text,
5635 additional_params,
5636 db_vnfd["id"],
5637 vdud["id"],
5638 )
5639 )
5640 vca_scaling_info.append(
5641 {
5642 "osm_vdu_id": vdud["id"],
5643 "member-vnf-index": member_vnf_index,
5644 "type": "create",
5645 "vdu_index": count_index,
5646 }
5647 )
5648 scaling_info["vdu-create"][vdud["id"]] = count_index
5649 if self.ro_config.ng:
5650 self.logger.debug(
5651 "New Resources to be deployed: {}".format(scaling_info)
5652 )
5653 await self._scale_ng_ro(
5654 logging_text,
5655 db_nsr,
5656 update_db_nslcmops,
5657 updated_db_vnfr,
5658 scaling_info,
5659 stage,
5660 )
5661 return "COMPLETED", "Done"
5662 except (LcmException, asyncio.CancelledError):
5663 raise
5664 except Exception as e:
5665 self.logger.debug("Error updating VNF {}".format(e))
5666 return "FAILED", "Error updating VNF {}".format(e)
5667
5668 async def _ns_charm_upgrade(
5669 self,
5670 ee_id,
5671 charm_id,
5672 charm_type,
5673 path,
5674 timeout: float = None,
5675 ) -> (str, str):
5676 """This method upgrade charms in VNF instances
5677
5678 Args:
5679 ee_id: Execution environment id
5680 path: Local path to the charm
5681 charm_id: charm-id
5682 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5683 timeout: (Float) Timeout for the ns update operation
5684
5685 Returns:
5686 result: (str, str) COMPLETED/FAILED, details
5687 """
5688 try:
5689 charm_type = charm_type or "lxc_proxy_charm"
5690 output = await self.vca_map[charm_type].upgrade_charm(
5691 ee_id=ee_id,
5692 path=path,
5693 charm_id=charm_id,
5694 charm_type=charm_type,
5695 timeout=timeout or self.timeout.ns_update,
5696 )
5697
5698 if output:
5699 return "COMPLETED", output
5700
5701 except (LcmException, asyncio.CancelledError):
5702 raise
5703
5704 except Exception as e:
5705 self.logger.debug("Error upgrading charm {}".format(path))
5706
5707 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5708
5709 async def update(self, nsr_id, nslcmop_id):
5710 """Update NS according to different update types
5711
5712 This method performs upgrade of VNF instances then updates the revision
5713 number in VNF record
5714
5715 Args:
5716 nsr_id: Network service will be updated
5717 nslcmop_id: ns lcm operation id
5718
5719 Returns:
5720 It may raise DbException, LcmException, N2VCException, K8sException
5721
5722 """
5723 # Try to lock HA task here
5724 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5725 if not task_is_locked_by_me:
5726 return
5727
5728 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5729 self.logger.debug(logging_text + "Enter")
5730
5731 # Set the required variables to be filled up later
5732 db_nsr = None
5733 db_nslcmop_update = {}
5734 vnfr_update = {}
5735 nslcmop_operation_state = None
5736 db_nsr_update = {}
5737 error_description_nslcmop = ""
5738 exc = None
5739 change_type = "updated"
5740 detailed_status = ""
5741 member_vnf_index = None
5742
5743 try:
5744 # wait for any previous tasks in process
5745 step = "Waiting for previous operations to terminate"
5746 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5747 self._write_ns_status(
5748 nsr_id=nsr_id,
5749 ns_state=None,
5750 current_operation="UPDATING",
5751 current_operation_id=nslcmop_id,
5752 )
5753
5754 step = "Getting nslcmop from database"
5755 db_nslcmop = self.db.get_one(
5756 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5757 )
5758 update_type = db_nslcmop["operationParams"]["updateType"]
5759
5760 step = "Getting nsr from database"
5761 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5762 old_operational_status = db_nsr["operational-status"]
5763 db_nsr_update["operational-status"] = "updating"
5764 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5765 nsr_deployed = db_nsr["_admin"].get("deployed")
5766
5767 if update_type == "CHANGE_VNFPKG":
5768 # Get the input parameters given through update request
5769 vnf_instance_id = db_nslcmop["operationParams"][
5770 "changeVnfPackageData"
5771 ].get("vnfInstanceId")
5772
5773 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5774 "vnfdId"
5775 )
5776 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5777
5778 step = "Getting vnfr from database"
5779 db_vnfr = self.db.get_one(
5780 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5781 )
5782
5783 step = "Getting vnfds from database"
5784 # Latest VNFD
5785 latest_vnfd = self.db.get_one(
5786 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5787 )
5788 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5789
5790 # Current VNFD
5791 current_vnf_revision = db_vnfr.get("revision", 1)
5792 current_vnfd = self.db.get_one(
5793 "vnfds_revisions",
5794 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5795 fail_on_empty=False,
5796 )
5797 # Charm artifact paths will be filled up later
5798 (
5799 current_charm_artifact_path,
5800 target_charm_artifact_path,
5801 charm_artifact_paths,
5802 helm_artifacts,
5803 ) = ([], [], [], [])
5804
5805 step = "Checking if revision has changed in VNFD"
5806 if current_vnf_revision != latest_vnfd_revision:
5807 change_type = "policy_updated"
5808
5809 # There is new revision of VNFD, update operation is required
5810 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5811 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5812
5813 step = "Removing the VNFD packages if they exist in the local path"
5814 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5815 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5816
5817 step = "Get the VNFD packages from FSMongo"
5818 self.fs.sync(from_path=latest_vnfd_path)
5819 self.fs.sync(from_path=current_vnfd_path)
5820
5821 step = (
5822 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5823 )
5824 current_base_folder = current_vnfd["_admin"]["storage"]
5825 latest_base_folder = latest_vnfd["_admin"]["storage"]
5826
5827 for vca_index, vca_deployed in enumerate(
5828 get_iterable(nsr_deployed, "VCA")
5829 ):
5830 vnf_index = db_vnfr.get("member-vnf-index-ref")
5831
5832 # Getting charm-id and charm-type
5833 if vca_deployed.get("member-vnf-index") == vnf_index:
5834 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5835 vca_type = vca_deployed.get("type")
5836 vdu_count_index = vca_deployed.get("vdu_count_index")
5837
5838 # Getting ee-id
5839 ee_id = vca_deployed.get("ee_id")
5840
5841 step = "Getting descriptor config"
5842 if current_vnfd.get("kdu"):
5843 search_key = "kdu_name"
5844 else:
5845 search_key = "vnfd_id"
5846
5847 entity_id = vca_deployed.get(search_key)
5848
5849 descriptor_config = get_configuration(
5850 current_vnfd, entity_id
5851 )
5852
5853 if "execution-environment-list" in descriptor_config:
5854 ee_list = descriptor_config.get(
5855 "execution-environment-list", []
5856 )
5857 else:
5858 ee_list = []
5859
5860 # There could be several charm used in the same VNF
5861 for ee_item in ee_list:
5862 if ee_item.get("juju"):
5863 step = "Getting charm name"
5864 charm_name = ee_item["juju"].get("charm")
5865
5866 step = "Setting Charm artifact paths"
5867 current_charm_artifact_path.append(
5868 get_charm_artifact_path(
5869 current_base_folder,
5870 charm_name,
5871 vca_type,
5872 current_vnf_revision,
5873 )
5874 )
5875 target_charm_artifact_path.append(
5876 get_charm_artifact_path(
5877 latest_base_folder,
5878 charm_name,
5879 vca_type,
5880 latest_vnfd_revision,
5881 )
5882 )
5883 elif ee_item.get("helm-chart"):
5884 # add chart to list and all parameters
5885 step = "Getting helm chart name"
5886 chart_name = ee_item.get("helm-chart")
5887 if (
5888 ee_item.get("helm-version")
5889 and ee_item.get("helm-version") == "v2"
5890 ):
5891 vca_type = "helm"
5892 else:
5893 vca_type = "helm-v3"
5894 step = "Setting Helm chart artifact paths"
5895
5896 helm_artifacts.append(
5897 {
5898 "current_artifact_path": get_charm_artifact_path(
5899 current_base_folder,
5900 chart_name,
5901 vca_type,
5902 current_vnf_revision,
5903 ),
5904 "target_artifact_path": get_charm_artifact_path(
5905 latest_base_folder,
5906 chart_name,
5907 vca_type,
5908 latest_vnfd_revision,
5909 ),
5910 "ee_id": ee_id,
5911 "vca_index": vca_index,
5912 "vdu_index": vdu_count_index,
5913 }
5914 )
5915
5916 charm_artifact_paths = zip(
5917 current_charm_artifact_path, target_charm_artifact_path
5918 )
5919
5920 step = "Checking if software version has changed in VNFD"
5921 if find_software_version(current_vnfd) != find_software_version(
5922 latest_vnfd
5923 ):
5924 step = "Checking if existing VNF has charm"
5925 for current_charm_path, target_charm_path in list(
5926 charm_artifact_paths
5927 ):
5928 if current_charm_path:
5929 raise LcmException(
5930 "Software version change is not supported as VNF instance {} has charm.".format(
5931 vnf_instance_id
5932 )
5933 )
5934
5935 # There is no change in the charm package, then redeploy the VNF
5936 # based on new descriptor
5937 step = "Redeploying VNF"
5938 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5939 (result, detailed_status) = await self._ns_redeploy_vnf(
5940 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5941 )
5942 if result == "FAILED":
5943 nslcmop_operation_state = result
5944 error_description_nslcmop = detailed_status
5945 db_nslcmop_update["detailed-status"] = detailed_status
5946 self.logger.debug(
5947 logging_text
5948 + " step {} Done with result {} {}".format(
5949 step, nslcmop_operation_state, detailed_status
5950 )
5951 )
5952
5953 else:
5954 step = "Checking if any charm package has changed or not"
5955 for current_charm_path, target_charm_path in list(
5956 charm_artifact_paths
5957 ):
5958 if (
5959 current_charm_path
5960 and target_charm_path
5961 and self.check_charm_hash_changed(
5962 current_charm_path, target_charm_path
5963 )
5964 ):
5965 step = "Checking whether VNF uses juju bundle"
5966 if check_juju_bundle_existence(current_vnfd):
5967 raise LcmException(
5968 "Charm upgrade is not supported for the instance which"
5969 " uses juju-bundle: {}".format(
5970 check_juju_bundle_existence(current_vnfd)
5971 )
5972 )
5973
5974 step = "Upgrading Charm"
5975 (
5976 result,
5977 detailed_status,
5978 ) = await self._ns_charm_upgrade(
5979 ee_id=ee_id,
5980 charm_id=vca_id,
5981 charm_type=vca_type,
5982 path=self.fs.path + target_charm_path,
5983 timeout=timeout_seconds,
5984 )
5985
5986 if result == "FAILED":
5987 nslcmop_operation_state = result
5988 error_description_nslcmop = detailed_status
5989
5990 db_nslcmop_update["detailed-status"] = detailed_status
5991 self.logger.debug(
5992 logging_text
5993 + " step {} Done with result {} {}".format(
5994 step, nslcmop_operation_state, detailed_status
5995 )
5996 )
5997
5998 step = "Updating policies"
5999 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6000 result = "COMPLETED"
6001 detailed_status = "Done"
6002 db_nslcmop_update["detailed-status"] = "Done"
6003
6004 # helm base EE
6005 for item in helm_artifacts:
6006 if not (
6007 item["current_artifact_path"]
6008 and item["target_artifact_path"]
6009 and self.check_charm_hash_changed(
6010 item["current_artifact_path"],
6011 item["target_artifact_path"],
6012 )
6013 ):
6014 continue
6015 db_update_entry = "_admin.deployed.VCA.{}.".format(
6016 item["vca_index"]
6017 )
6018 vnfr_id = db_vnfr["_id"]
6019 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6020 db_dict = {
6021 "collection": "nsrs",
6022 "filter": {"_id": nsr_id},
6023 "path": db_update_entry,
6024 }
6025 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6026 await self.vca_map[vca_type].upgrade_execution_environment(
6027 namespace=namespace,
6028 helm_id=helm_id,
6029 db_dict=db_dict,
6030 config=osm_config,
6031 artifact_path=item["target_artifact_path"],
6032 vca_type=vca_type,
6033 )
6034 vnf_id = db_vnfr.get("vnfd-ref")
6035 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6036 self.logger.debug("get ssh key block")
6037 rw_mgmt_ip = None
6038 if deep_get(
6039 config_descriptor,
6040 ("config-access", "ssh-access", "required"),
6041 ):
6042 # Needed to inject a ssh key
6043 user = deep_get(
6044 config_descriptor,
6045 ("config-access", "ssh-access", "default-user"),
6046 )
6047 step = (
6048 "Install configuration Software, getting public ssh key"
6049 )
6050 pub_key = await self.vca_map[
6051 vca_type
6052 ].get_ee_ssh_public__key(
6053 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6054 )
6055
6056 step = (
6057 "Insert public key into VM user={} ssh_key={}".format(
6058 user, pub_key
6059 )
6060 )
6061 self.logger.debug(logging_text + step)
6062
6063 # wait for RO (ip-address) Insert pub_key into VM
6064 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6065 logging_text,
6066 nsr_id,
6067 vnfr_id,
6068 None,
6069 item["vdu_index"],
6070 user=user,
6071 pub_key=pub_key,
6072 )
6073
6074 initial_config_primitive_list = config_descriptor.get(
6075 "initial-config-primitive"
6076 )
6077 config_primitive = next(
6078 (
6079 p
6080 for p in initial_config_primitive_list
6081 if p["name"] == "config"
6082 ),
6083 None,
6084 )
6085 if not config_primitive:
6086 continue
6087
6088 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6089 if rw_mgmt_ip:
6090 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6091 if db_vnfr.get("additionalParamsForVnf"):
6092 deploy_params.update(
6093 parse_yaml_strings(
6094 db_vnfr["additionalParamsForVnf"].copy()
6095 )
6096 )
6097 primitive_params_ = self._map_primitive_params(
6098 config_primitive, {}, deploy_params
6099 )
6100
6101 step = "execute primitive '{}' params '{}'".format(
6102 config_primitive["name"], primitive_params_
6103 )
6104 self.logger.debug(logging_text + step)
6105 await self.vca_map[vca_type].exec_primitive(
6106 ee_id=ee_id,
6107 primitive_name=config_primitive["name"],
6108 params_dict=primitive_params_,
6109 db_dict=db_dict,
6110 vca_id=vca_id,
6111 vca_type=vca_type,
6112 )
6113
6114 step = "Updating policies"
6115 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6116 detailed_status = "Done"
6117 db_nslcmop_update["detailed-status"] = "Done"
6118
6119 # If nslcmop_operation_state is None, so any operation is not failed.
6120 if not nslcmop_operation_state:
6121 nslcmop_operation_state = "COMPLETED"
6122
6123 # If update CHANGE_VNFPKG nslcmop_operation is successful
6124 # vnf revision need to be updated
6125 vnfr_update["revision"] = latest_vnfd_revision
6126 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6127
6128 self.logger.debug(
6129 logging_text
6130 + " task Done with result {} {}".format(
6131 nslcmop_operation_state, detailed_status
6132 )
6133 )
6134 elif update_type == "REMOVE_VNF":
6135 # This part is included in https://osm.etsi.org/gerrit/11876
6136 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6137 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6138 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6139 step = "Removing VNF"
6140 (result, detailed_status) = await self.remove_vnf(
6141 nsr_id, nslcmop_id, vnf_instance_id
6142 )
6143 if result == "FAILED":
6144 nslcmop_operation_state = result
6145 error_description_nslcmop = detailed_status
6146 db_nslcmop_update["detailed-status"] = detailed_status
6147 change_type = "vnf_terminated"
6148 if not nslcmop_operation_state:
6149 nslcmop_operation_state = "COMPLETED"
6150 self.logger.debug(
6151 logging_text
6152 + " task Done with result {} {}".format(
6153 nslcmop_operation_state, detailed_status
6154 )
6155 )
6156
6157 elif update_type == "OPERATE_VNF":
6158 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6159 "vnfInstanceId"
6160 ]
6161 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6162 "changeStateTo"
6163 ]
6164 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6165 "additionalParam"
6166 ]
6167 (result, detailed_status) = await self.rebuild_start_stop(
6168 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6169 )
6170 if result == "FAILED":
6171 nslcmop_operation_state = result
6172 error_description_nslcmop = detailed_status
6173 db_nslcmop_update["detailed-status"] = detailed_status
6174 if not nslcmop_operation_state:
6175 nslcmop_operation_state = "COMPLETED"
6176 self.logger.debug(
6177 logging_text
6178 + " task Done with result {} {}".format(
6179 nslcmop_operation_state, detailed_status
6180 )
6181 )
6182
6183 # If nslcmop_operation_state is None, so any operation is not failed.
6184 # All operations are executed in overall.
6185 if not nslcmop_operation_state:
6186 nslcmop_operation_state = "COMPLETED"
6187 db_nsr_update["operational-status"] = old_operational_status
6188
6189 except (DbException, LcmException, N2VCException, K8sException) as e:
6190 self.logger.error(logging_text + "Exit Exception {}".format(e))
6191 exc = e
6192 except asyncio.CancelledError:
6193 self.logger.error(
6194 logging_text + "Cancelled Exception while '{}'".format(step)
6195 )
6196 exc = "Operation was cancelled"
6197 except asyncio.TimeoutError:
6198 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6199 exc = "Timeout"
6200 except Exception as e:
6201 exc = traceback.format_exc()
6202 self.logger.critical(
6203 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6204 exc_info=True,
6205 )
6206 finally:
6207 if exc:
6208 db_nslcmop_update[
6209 "detailed-status"
6210 ] = (
6211 detailed_status
6212 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6213 nslcmop_operation_state = "FAILED"
6214 db_nsr_update["operational-status"] = old_operational_status
6215 if db_nsr:
6216 self._write_ns_status(
6217 nsr_id=nsr_id,
6218 ns_state=db_nsr["nsState"],
6219 current_operation="IDLE",
6220 current_operation_id=None,
6221 other_update=db_nsr_update,
6222 )
6223
6224 self._write_op_status(
6225 op_id=nslcmop_id,
6226 stage="",
6227 error_message=error_description_nslcmop,
6228 operation_state=nslcmop_operation_state,
6229 other_update=db_nslcmop_update,
6230 )
6231
6232 if nslcmop_operation_state:
6233 try:
6234 msg = {
6235 "nsr_id": nsr_id,
6236 "nslcmop_id": nslcmop_id,
6237 "operationState": nslcmop_operation_state,
6238 }
6239 if (
6240 change_type in ("vnf_terminated", "policy_updated")
6241 and member_vnf_index
6242 ):
6243 msg.update({"vnf_member_index": member_vnf_index})
6244 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6245 except Exception as e:
6246 self.logger.error(
6247 logging_text + "kafka_write notification Exception {}".format(e)
6248 )
6249 self.logger.debug(logging_text + "Exit")
6250 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6251 return nslcmop_operation_state, detailed_status
6252
6253 async def scale(self, nsr_id, nslcmop_id):
6254 # Try to lock HA task here
6255 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6256 if not task_is_locked_by_me:
6257 return
6258
6259 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6260 stage = ["", "", ""]
6261 tasks_dict_info = {}
6262 # ^ stage, step, VIM progress
6263 self.logger.debug(logging_text + "Enter")
6264 # get all needed from database
6265 db_nsr = None
6266 db_nslcmop_update = {}
6267 db_nsr_update = {}
6268 exc = None
6269 # in case of error, indicates what part of scale was failed to put nsr at error status
6270 scale_process = None
6271 old_operational_status = ""
6272 old_config_status = ""
6273 nsi_id = None
6274 try:
6275 # wait for any previous tasks in process
6276 step = "Waiting for previous operations to terminate"
6277 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6278 self._write_ns_status(
6279 nsr_id=nsr_id,
6280 ns_state=None,
6281 current_operation="SCALING",
6282 current_operation_id=nslcmop_id,
6283 )
6284
6285 step = "Getting nslcmop from database"
6286 self.logger.debug(
6287 step + " after having waited for previous tasks to be completed"
6288 )
6289 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6290
6291 step = "Getting nsr from database"
6292 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6293 old_operational_status = db_nsr["operational-status"]
6294 old_config_status = db_nsr["config-status"]
6295
6296 step = "Parsing scaling parameters"
6297 db_nsr_update["operational-status"] = "scaling"
6298 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6299 nsr_deployed = db_nsr["_admin"].get("deployed")
6300
6301 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6302 "scaleByStepData"
6303 ]["member-vnf-index"]
6304 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6305 "scaleByStepData"
6306 ]["scaling-group-descriptor"]
6307 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6308 # for backward compatibility
6309 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6310 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6311 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6312 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6313
6314 step = "Getting vnfr from database"
6315 db_vnfr = self.db.get_one(
6316 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6317 )
6318
6319 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6320
6321 step = "Getting vnfd from database"
6322 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6323
6324 base_folder = db_vnfd["_admin"]["storage"]
6325
6326 step = "Getting scaling-group-descriptor"
6327 scaling_descriptor = find_in_list(
6328 get_scaling_aspect(db_vnfd),
6329 lambda scale_desc: scale_desc["name"] == scaling_group,
6330 )
6331 if not scaling_descriptor:
6332 raise LcmException(
6333 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6334 "at vnfd:scaling-group-descriptor".format(scaling_group)
6335 )
6336
6337 step = "Sending scale order to VIM"
6338 # TODO check if ns is in a proper status
6339 nb_scale_op = 0
6340 if not db_nsr["_admin"].get("scaling-group"):
6341 self.update_db_2(
6342 "nsrs",
6343 nsr_id,
6344 {
6345 "_admin.scaling-group": [
6346 {"name": scaling_group, "nb-scale-op": 0}
6347 ]
6348 },
6349 )
6350 admin_scale_index = 0
6351 else:
6352 for admin_scale_index, admin_scale_info in enumerate(
6353 db_nsr["_admin"]["scaling-group"]
6354 ):
6355 if admin_scale_info["name"] == scaling_group:
6356 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6357 break
6358 else: # not found, set index one plus last element and add new entry with the name
6359 admin_scale_index += 1
6360 db_nsr_update[
6361 "_admin.scaling-group.{}.name".format(admin_scale_index)
6362 ] = scaling_group
6363
6364 vca_scaling_info = []
6365 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6366 if scaling_type == "SCALE_OUT":
6367 if "aspect-delta-details" not in scaling_descriptor:
6368 raise LcmException(
6369 "Aspect delta details not fount in scaling descriptor {}".format(
6370 scaling_descriptor["name"]
6371 )
6372 )
6373 # count if max-instance-count is reached
6374 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6375
6376 scaling_info["scaling_direction"] = "OUT"
6377 scaling_info["vdu-create"] = {}
6378 scaling_info["kdu-create"] = {}
6379 for delta in deltas:
6380 for vdu_delta in delta.get("vdu-delta", {}):
6381 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6382 # vdu_index also provides the number of instance of the targeted vdu
6383 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6384 cloud_init_text = self._get_vdu_cloud_init_content(
6385 vdud, db_vnfd
6386 )
6387 if cloud_init_text:
6388 additional_params = (
6389 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6390 or {}
6391 )
6392 cloud_init_list = []
6393
6394 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6395 max_instance_count = 10
6396 if vdu_profile and "max-number-of-instances" in vdu_profile:
6397 max_instance_count = vdu_profile.get(
6398 "max-number-of-instances", 10
6399 )
6400
6401 default_instance_num = get_number_of_instances(
6402 db_vnfd, vdud["id"]
6403 )
6404 instances_number = vdu_delta.get("number-of-instances", 1)
6405 nb_scale_op += instances_number
6406
6407 new_instance_count = nb_scale_op + default_instance_num
6408 # Control if new count is over max and vdu count is less than max.
6409 # Then assign new instance count
6410 if new_instance_count > max_instance_count > vdu_count:
6411 instances_number = new_instance_count - max_instance_count
6412 else:
6413 instances_number = instances_number
6414
6415 if new_instance_count > max_instance_count:
6416 raise LcmException(
6417 "reached the limit of {} (max-instance-count) "
6418 "scaling-out operations for the "
6419 "scaling-group-descriptor '{}'".format(
6420 nb_scale_op, scaling_group
6421 )
6422 )
6423 for x in range(vdu_delta.get("number-of-instances", 1)):
6424 if cloud_init_text:
6425 # TODO Information of its own ip is not available because db_vnfr is not updated.
6426 additional_params["OSM"] = get_osm_params(
6427 db_vnfr, vdu_delta["id"], vdu_index + x
6428 )
6429 cloud_init_list.append(
6430 self._parse_cloud_init(
6431 cloud_init_text,
6432 additional_params,
6433 db_vnfd["id"],
6434 vdud["id"],
6435 )
6436 )
6437 vca_scaling_info.append(
6438 {
6439 "osm_vdu_id": vdu_delta["id"],
6440 "member-vnf-index": vnf_index,
6441 "type": "create",
6442 "vdu_index": vdu_index + x,
6443 }
6444 )
6445 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6446 for kdu_delta in delta.get("kdu-resource-delta", {}):
6447 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6448 kdu_name = kdu_profile["kdu-name"]
6449 resource_name = kdu_profile.get("resource-name", "")
6450
6451 # Might have different kdus in the same delta
6452 # Should have list for each kdu
6453 if not scaling_info["kdu-create"].get(kdu_name, None):
6454 scaling_info["kdu-create"][kdu_name] = []
6455
6456 kdur = get_kdur(db_vnfr, kdu_name)
6457 if kdur.get("helm-chart"):
6458 k8s_cluster_type = "helm-chart-v3"
6459 self.logger.debug("kdur: {}".format(kdur))
6460 if (
6461 kdur.get("helm-version")
6462 and kdur.get("helm-version") == "v2"
6463 ):
6464 k8s_cluster_type = "helm-chart"
6465 elif kdur.get("juju-bundle"):
6466 k8s_cluster_type = "juju-bundle"
6467 else:
6468 raise LcmException(
6469 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6470 "juju-bundle. Maybe an old NBI version is running".format(
6471 db_vnfr["member-vnf-index-ref"], kdu_name
6472 )
6473 )
6474
6475 max_instance_count = 10
6476 if kdu_profile and "max-number-of-instances" in kdu_profile:
6477 max_instance_count = kdu_profile.get(
6478 "max-number-of-instances", 10
6479 )
6480
6481 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6482 deployed_kdu, _ = get_deployed_kdu(
6483 nsr_deployed, kdu_name, vnf_index
6484 )
6485 if deployed_kdu is None:
6486 raise LcmException(
6487 "KDU '{}' for vnf '{}' not deployed".format(
6488 kdu_name, vnf_index
6489 )
6490 )
6491 kdu_instance = deployed_kdu.get("kdu-instance")
6492 instance_num = await self.k8scluster_map[
6493 k8s_cluster_type
6494 ].get_scale_count(
6495 resource_name,
6496 kdu_instance,
6497 vca_id=vca_id,
6498 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6499 kdu_model=deployed_kdu.get("kdu-model"),
6500 )
6501 kdu_replica_count = instance_num + kdu_delta.get(
6502 "number-of-instances", 1
6503 )
6504
6505 # Control if new count is over max and instance_num is less than max.
6506 # Then assign max instance number to kdu replica count
6507 if kdu_replica_count > max_instance_count > instance_num:
6508 kdu_replica_count = max_instance_count
6509 if kdu_replica_count > max_instance_count:
6510 raise LcmException(
6511 "reached the limit of {} (max-instance-count) "
6512 "scaling-out operations for the "
6513 "scaling-group-descriptor '{}'".format(
6514 instance_num, scaling_group
6515 )
6516 )
6517
6518 for x in range(kdu_delta.get("number-of-instances", 1)):
6519 vca_scaling_info.append(
6520 {
6521 "osm_kdu_id": kdu_name,
6522 "member-vnf-index": vnf_index,
6523 "type": "create",
6524 "kdu_index": instance_num + x - 1,
6525 }
6526 )
6527 scaling_info["kdu-create"][kdu_name].append(
6528 {
6529 "member-vnf-index": vnf_index,
6530 "type": "create",
6531 "k8s-cluster-type": k8s_cluster_type,
6532 "resource-name": resource_name,
6533 "scale": kdu_replica_count,
6534 }
6535 )
6536 elif scaling_type == "SCALE_IN":
6537 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6538
6539 scaling_info["scaling_direction"] = "IN"
6540 scaling_info["vdu-delete"] = {}
6541 scaling_info["kdu-delete"] = {}
6542
6543 for delta in deltas:
6544 for vdu_delta in delta.get("vdu-delta", {}):
6545 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6546 min_instance_count = 0
6547 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6548 if vdu_profile and "min-number-of-instances" in vdu_profile:
6549 min_instance_count = vdu_profile["min-number-of-instances"]
6550
6551 default_instance_num = get_number_of_instances(
6552 db_vnfd, vdu_delta["id"]
6553 )
6554 instance_num = vdu_delta.get("number-of-instances", 1)
6555 nb_scale_op -= instance_num
6556
6557 new_instance_count = nb_scale_op + default_instance_num
6558
6559 if new_instance_count < min_instance_count < vdu_count:
6560 instances_number = min_instance_count - new_instance_count
6561 else:
6562 instances_number = instance_num
6563
6564 if new_instance_count < min_instance_count:
6565 raise LcmException(
6566 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6567 "scaling-group-descriptor '{}'".format(
6568 nb_scale_op, scaling_group
6569 )
6570 )
6571 for x in range(vdu_delta.get("number-of-instances", 1)):
6572 vca_scaling_info.append(
6573 {
6574 "osm_vdu_id": vdu_delta["id"],
6575 "member-vnf-index": vnf_index,
6576 "type": "delete",
6577 "vdu_index": vdu_index - 1 - x,
6578 }
6579 )
6580 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6581 for kdu_delta in delta.get("kdu-resource-delta", {}):
6582 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6583 kdu_name = kdu_profile["kdu-name"]
6584 resource_name = kdu_profile.get("resource-name", "")
6585
6586 if not scaling_info["kdu-delete"].get(kdu_name, None):
6587 scaling_info["kdu-delete"][kdu_name] = []
6588
6589 kdur = get_kdur(db_vnfr, kdu_name)
6590 if kdur.get("helm-chart"):
6591 k8s_cluster_type = "helm-chart-v3"
6592 self.logger.debug("kdur: {}".format(kdur))
6593 if (
6594 kdur.get("helm-version")
6595 and kdur.get("helm-version") == "v2"
6596 ):
6597 k8s_cluster_type = "helm-chart"
6598 elif kdur.get("juju-bundle"):
6599 k8s_cluster_type = "juju-bundle"
6600 else:
6601 raise LcmException(
6602 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6603 "juju-bundle. Maybe an old NBI version is running".format(
6604 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6605 )
6606 )
6607
6608 min_instance_count = 0
6609 if kdu_profile and "min-number-of-instances" in kdu_profile:
6610 min_instance_count = kdu_profile["min-number-of-instances"]
6611
6612 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6613 deployed_kdu, _ = get_deployed_kdu(
6614 nsr_deployed, kdu_name, vnf_index
6615 )
6616 if deployed_kdu is None:
6617 raise LcmException(
6618 "KDU '{}' for vnf '{}' not deployed".format(
6619 kdu_name, vnf_index
6620 )
6621 )
6622 kdu_instance = deployed_kdu.get("kdu-instance")
6623 instance_num = await self.k8scluster_map[
6624 k8s_cluster_type
6625 ].get_scale_count(
6626 resource_name,
6627 kdu_instance,
6628 vca_id=vca_id,
6629 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6630 kdu_model=deployed_kdu.get("kdu-model"),
6631 )
6632 kdu_replica_count = instance_num - kdu_delta.get(
6633 "number-of-instances", 1
6634 )
6635
6636 if kdu_replica_count < min_instance_count < instance_num:
6637 kdu_replica_count = min_instance_count
6638 if kdu_replica_count < min_instance_count:
6639 raise LcmException(
6640 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6641 "scaling-group-descriptor '{}'".format(
6642 instance_num, scaling_group
6643 )
6644 )
6645
6646 for x in range(kdu_delta.get("number-of-instances", 1)):
6647 vca_scaling_info.append(
6648 {
6649 "osm_kdu_id": kdu_name,
6650 "member-vnf-index": vnf_index,
6651 "type": "delete",
6652 "kdu_index": instance_num - x - 1,
6653 }
6654 )
6655 scaling_info["kdu-delete"][kdu_name].append(
6656 {
6657 "member-vnf-index": vnf_index,
6658 "type": "delete",
6659 "k8s-cluster-type": k8s_cluster_type,
6660 "resource-name": resource_name,
6661 "scale": kdu_replica_count,
6662 }
6663 )
6664
6665 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6666 vdu_delete = copy(scaling_info.get("vdu-delete"))
6667 if scaling_info["scaling_direction"] == "IN":
6668 for vdur in reversed(db_vnfr["vdur"]):
6669 if vdu_delete.get(vdur["vdu-id-ref"]):
6670 vdu_delete[vdur["vdu-id-ref"]] -= 1
6671 scaling_info["vdu"].append(
6672 {
6673 "name": vdur.get("name") or vdur.get("vdu-name"),
6674 "vdu_id": vdur["vdu-id-ref"],
6675 "interface": [],
6676 }
6677 )
6678 for interface in vdur["interfaces"]:
6679 scaling_info["vdu"][-1]["interface"].append(
6680 {
6681 "name": interface["name"],
6682 "ip_address": interface["ip-address"],
6683 "mac_address": interface.get("mac-address"),
6684 }
6685 )
6686 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6687
6688 # PRE-SCALE BEGIN
6689 step = "Executing pre-scale vnf-config-primitive"
6690 if scaling_descriptor.get("scaling-config-action"):
6691 for scaling_config_action in scaling_descriptor[
6692 "scaling-config-action"
6693 ]:
6694 if (
6695 scaling_config_action.get("trigger") == "pre-scale-in"
6696 and scaling_type == "SCALE_IN"
6697 ) or (
6698 scaling_config_action.get("trigger") == "pre-scale-out"
6699 and scaling_type == "SCALE_OUT"
6700 ):
6701 vnf_config_primitive = scaling_config_action[
6702 "vnf-config-primitive-name-ref"
6703 ]
6704 step = db_nslcmop_update[
6705 "detailed-status"
6706 ] = "executing pre-scale scaling-config-action '{}'".format(
6707 vnf_config_primitive
6708 )
6709
6710 # look for primitive
6711 for config_primitive in (
6712 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6713 ).get("config-primitive", ()):
6714 if config_primitive["name"] == vnf_config_primitive:
6715 break
6716 else:
6717 raise LcmException(
6718 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6719 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6720 "primitive".format(scaling_group, vnf_config_primitive)
6721 )
6722
6723 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6724 if db_vnfr.get("additionalParamsForVnf"):
6725 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6726
6727 scale_process = "VCA"
6728 db_nsr_update["config-status"] = "configuring pre-scaling"
6729 primitive_params = self._map_primitive_params(
6730 config_primitive, {}, vnfr_params
6731 )
6732
6733 # Pre-scale retry check: Check if this sub-operation has been executed before
6734 op_index = self._check_or_add_scale_suboperation(
6735 db_nslcmop,
6736 vnf_index,
6737 vnf_config_primitive,
6738 primitive_params,
6739 "PRE-SCALE",
6740 )
6741 if op_index == self.SUBOPERATION_STATUS_SKIP:
6742 # Skip sub-operation
6743 result = "COMPLETED"
6744 result_detail = "Done"
6745 self.logger.debug(
6746 logging_text
6747 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6748 vnf_config_primitive, result, result_detail
6749 )
6750 )
6751 else:
6752 if op_index == self.SUBOPERATION_STATUS_NEW:
6753 # New sub-operation: Get index of this sub-operation
6754 op_index = (
6755 len(db_nslcmop.get("_admin", {}).get("operations"))
6756 - 1
6757 )
6758 self.logger.debug(
6759 logging_text
6760 + "vnf_config_primitive={} New sub-operation".format(
6761 vnf_config_primitive
6762 )
6763 )
6764 else:
6765 # retry: Get registered params for this existing sub-operation
6766 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6767 op_index
6768 ]
6769 vnf_index = op.get("member_vnf_index")
6770 vnf_config_primitive = op.get("primitive")
6771 primitive_params = op.get("primitive_params")
6772 self.logger.debug(
6773 logging_text
6774 + "vnf_config_primitive={} Sub-operation retry".format(
6775 vnf_config_primitive
6776 )
6777 )
6778 # Execute the primitive, either with new (first-time) or registered (reintent) args
6779 ee_descriptor_id = config_primitive.get(
6780 "execution-environment-ref"
6781 )
6782 primitive_name = config_primitive.get(
6783 "execution-environment-primitive", vnf_config_primitive
6784 )
6785 ee_id, vca_type = self._look_for_deployed_vca(
6786 nsr_deployed["VCA"],
6787 member_vnf_index=vnf_index,
6788 vdu_id=None,
6789 vdu_count_index=None,
6790 ee_descriptor_id=ee_descriptor_id,
6791 )
6792 result, result_detail = await self._ns_execute_primitive(
6793 ee_id,
6794 primitive_name,
6795 primitive_params,
6796 vca_type=vca_type,
6797 vca_id=vca_id,
6798 )
6799 self.logger.debug(
6800 logging_text
6801 + "vnf_config_primitive={} Done with result {} {}".format(
6802 vnf_config_primitive, result, result_detail
6803 )
6804 )
6805 # Update operationState = COMPLETED | FAILED
6806 self._update_suboperation_status(
6807 db_nslcmop, op_index, result, result_detail
6808 )
6809
6810 if result == "FAILED":
6811 raise LcmException(result_detail)
6812 db_nsr_update["config-status"] = old_config_status
6813 scale_process = None
6814 # PRE-SCALE END
6815
6816 db_nsr_update[
6817 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6818 ] = nb_scale_op
6819 db_nsr_update[
6820 "_admin.scaling-group.{}.time".format(admin_scale_index)
6821 ] = time()
6822
6823 # SCALE-IN VCA - BEGIN
6824 if vca_scaling_info:
6825 step = db_nslcmop_update[
6826 "detailed-status"
6827 ] = "Deleting the execution environments"
6828 scale_process = "VCA"
6829 for vca_info in vca_scaling_info:
6830 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6831 member_vnf_index = str(vca_info["member-vnf-index"])
6832 self.logger.debug(
6833 logging_text + "vdu info: {}".format(vca_info)
6834 )
6835 if vca_info.get("osm_vdu_id"):
6836 vdu_id = vca_info["osm_vdu_id"]
6837 vdu_index = int(vca_info["vdu_index"])
6838 stage[
6839 1
6840 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6841 member_vnf_index, vdu_id, vdu_index
6842 )
6843 stage[2] = step = "Scaling in VCA"
6844 self._write_op_status(op_id=nslcmop_id, stage=stage)
6845 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6846 config_update = db_nsr["configurationStatus"]
6847 for vca_index, vca in enumerate(vca_update):
6848 if (
6849 (vca or vca.get("ee_id"))
6850 and vca["member-vnf-index"] == member_vnf_index
6851 and vca["vdu_count_index"] == vdu_index
6852 ):
6853 if vca.get("vdu_id"):
6854 config_descriptor = get_configuration(
6855 db_vnfd, vca.get("vdu_id")
6856 )
6857 elif vca.get("kdu_name"):
6858 config_descriptor = get_configuration(
6859 db_vnfd, vca.get("kdu_name")
6860 )
6861 else:
6862 config_descriptor = get_configuration(
6863 db_vnfd, db_vnfd["id"]
6864 )
6865 operation_params = (
6866 db_nslcmop.get("operationParams") or {}
6867 )
6868 exec_terminate_primitives = not operation_params.get(
6869 "skip_terminate_primitives"
6870 ) and vca.get("needed_terminate")
6871 task = asyncio.ensure_future(
6872 asyncio.wait_for(
6873 self.destroy_N2VC(
6874 logging_text,
6875 db_nslcmop,
6876 vca,
6877 config_descriptor,
6878 vca_index,
6879 destroy_ee=True,
6880 exec_primitives=exec_terminate_primitives,
6881 scaling_in=True,
6882 vca_id=vca_id,
6883 ),
6884 timeout=self.timeout.charm_delete,
6885 )
6886 )
6887 tasks_dict_info[task] = "Terminating VCA {}".format(
6888 vca.get("ee_id")
6889 )
6890 del vca_update[vca_index]
6891 del config_update[vca_index]
6892 # wait for pending tasks of terminate primitives
6893 if tasks_dict_info:
6894 self.logger.debug(
6895 logging_text
6896 + "Waiting for tasks {}".format(
6897 list(tasks_dict_info.keys())
6898 )
6899 )
6900 error_list = await self._wait_for_tasks(
6901 logging_text,
6902 tasks_dict_info,
6903 min(
6904 self.timeout.charm_delete, self.timeout.ns_terminate
6905 ),
6906 stage,
6907 nslcmop_id,
6908 )
6909 tasks_dict_info.clear()
6910 if error_list:
6911 raise LcmException("; ".join(error_list))
6912
6913 db_vca_and_config_update = {
6914 "_admin.deployed.VCA": vca_update,
6915 "configurationStatus": config_update,
6916 }
6917 self.update_db_2(
6918 "nsrs", db_nsr["_id"], db_vca_and_config_update
6919 )
6920 scale_process = None
6921 # SCALE-IN VCA - END
6922
6923 # SCALE RO - BEGIN
6924 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6925 scale_process = "RO"
6926 if self.ro_config.ng:
6927 await self._scale_ng_ro(
6928 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6929 )
6930 scaling_info.pop("vdu-create", None)
6931 scaling_info.pop("vdu-delete", None)
6932
6933 scale_process = None
6934 # SCALE RO - END
6935
6936 # SCALE KDU - BEGIN
6937 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6938 scale_process = "KDU"
6939 await self._scale_kdu(
6940 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6941 )
6942 scaling_info.pop("kdu-create", None)
6943 scaling_info.pop("kdu-delete", None)
6944
6945 scale_process = None
6946 # SCALE KDU - END
6947
6948 if db_nsr_update:
6949 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6950
6951 # SCALE-UP VCA - BEGIN
6952 if vca_scaling_info:
6953 step = db_nslcmop_update[
6954 "detailed-status"
6955 ] = "Creating new execution environments"
6956 scale_process = "VCA"
6957 for vca_info in vca_scaling_info:
6958 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6959 member_vnf_index = str(vca_info["member-vnf-index"])
6960 self.logger.debug(
6961 logging_text + "vdu info: {}".format(vca_info)
6962 )
6963 vnfd_id = db_vnfr["vnfd-ref"]
6964 if vca_info.get("osm_vdu_id"):
6965 vdu_index = int(vca_info["vdu_index"])
6966 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6967 if db_vnfr.get("additionalParamsForVnf"):
6968 deploy_params.update(
6969 parse_yaml_strings(
6970 db_vnfr["additionalParamsForVnf"].copy()
6971 )
6972 )
6973 descriptor_config = get_configuration(
6974 db_vnfd, db_vnfd["id"]
6975 )
6976 if descriptor_config:
6977 vdu_id = None
6978 vdu_name = None
6979 kdu_name = None
6980 kdu_index = None
6981 self._deploy_n2vc(
6982 logging_text=logging_text
6983 + "member_vnf_index={} ".format(member_vnf_index),
6984 db_nsr=db_nsr,
6985 db_vnfr=db_vnfr,
6986 nslcmop_id=nslcmop_id,
6987 nsr_id=nsr_id,
6988 nsi_id=nsi_id,
6989 vnfd_id=vnfd_id,
6990 vdu_id=vdu_id,
6991 kdu_name=kdu_name,
6992 kdu_index=kdu_index,
6993 member_vnf_index=member_vnf_index,
6994 vdu_index=vdu_index,
6995 vdu_name=vdu_name,
6996 deploy_params=deploy_params,
6997 descriptor_config=descriptor_config,
6998 base_folder=base_folder,
6999 task_instantiation_info=tasks_dict_info,
7000 stage=stage,
7001 )
7002 vdu_id = vca_info["osm_vdu_id"]
7003 vdur = find_in_list(
7004 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7005 )
7006 descriptor_config = get_configuration(db_vnfd, vdu_id)
7007 if vdur.get("additionalParams"):
7008 deploy_params_vdu = parse_yaml_strings(
7009 vdur["additionalParams"]
7010 )
7011 else:
7012 deploy_params_vdu = deploy_params
7013 deploy_params_vdu["OSM"] = get_osm_params(
7014 db_vnfr, vdu_id, vdu_count_index=vdu_index
7015 )
7016 if descriptor_config:
7017 vdu_name = None
7018 kdu_name = None
7019 kdu_index = None
7020 stage[
7021 1
7022 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7023 member_vnf_index, vdu_id, vdu_index
7024 )
7025 stage[2] = step = "Scaling out VCA"
7026 self._write_op_status(op_id=nslcmop_id, stage=stage)
7027 self._deploy_n2vc(
7028 logging_text=logging_text
7029 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7030 member_vnf_index, vdu_id, vdu_index
7031 ),
7032 db_nsr=db_nsr,
7033 db_vnfr=db_vnfr,
7034 nslcmop_id=nslcmop_id,
7035 nsr_id=nsr_id,
7036 nsi_id=nsi_id,
7037 vnfd_id=vnfd_id,
7038 vdu_id=vdu_id,
7039 kdu_name=kdu_name,
7040 member_vnf_index=member_vnf_index,
7041 vdu_index=vdu_index,
7042 kdu_index=kdu_index,
7043 vdu_name=vdu_name,
7044 deploy_params=deploy_params_vdu,
7045 descriptor_config=descriptor_config,
7046 base_folder=base_folder,
7047 task_instantiation_info=tasks_dict_info,
7048 stage=stage,
7049 )
7050 # SCALE-UP VCA - END
7051 scale_process = None
7052
7053 # POST-SCALE BEGIN
7054 # execute primitive service POST-SCALING
7055 step = "Executing post-scale vnf-config-primitive"
7056 if scaling_descriptor.get("scaling-config-action"):
7057 for scaling_config_action in scaling_descriptor[
7058 "scaling-config-action"
7059 ]:
7060 if (
7061 scaling_config_action.get("trigger") == "post-scale-in"
7062 and scaling_type == "SCALE_IN"
7063 ) or (
7064 scaling_config_action.get("trigger") == "post-scale-out"
7065 and scaling_type == "SCALE_OUT"
7066 ):
7067 vnf_config_primitive = scaling_config_action[
7068 "vnf-config-primitive-name-ref"
7069 ]
7070 step = db_nslcmop_update[
7071 "detailed-status"
7072 ] = "executing post-scale scaling-config-action '{}'".format(
7073 vnf_config_primitive
7074 )
7075
7076 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7077 if db_vnfr.get("additionalParamsForVnf"):
7078 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7079
7080 # look for primitive
7081 for config_primitive in (
7082 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7083 ).get("config-primitive", ()):
7084 if config_primitive["name"] == vnf_config_primitive:
7085 break
7086 else:
7087 raise LcmException(
7088 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7089 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7090 "config-primitive".format(
7091 scaling_group, vnf_config_primitive
7092 )
7093 )
7094 scale_process = "VCA"
7095 db_nsr_update["config-status"] = "configuring post-scaling"
7096 primitive_params = self._map_primitive_params(
7097 config_primitive, {}, vnfr_params
7098 )
7099
7100 # Post-scale retry check: Check if this sub-operation has been executed before
7101 op_index = self._check_or_add_scale_suboperation(
7102 db_nslcmop,
7103 vnf_index,
7104 vnf_config_primitive,
7105 primitive_params,
7106 "POST-SCALE",
7107 )
7108 if op_index == self.SUBOPERATION_STATUS_SKIP:
7109 # Skip sub-operation
7110 result = "COMPLETED"
7111 result_detail = "Done"
7112 self.logger.debug(
7113 logging_text
7114 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7115 vnf_config_primitive, result, result_detail
7116 )
7117 )
7118 else:
7119 if op_index == self.SUBOPERATION_STATUS_NEW:
7120 # New sub-operation: Get index of this sub-operation
7121 op_index = (
7122 len(db_nslcmop.get("_admin", {}).get("operations"))
7123 - 1
7124 )
7125 self.logger.debug(
7126 logging_text
7127 + "vnf_config_primitive={} New sub-operation".format(
7128 vnf_config_primitive
7129 )
7130 )
7131 else:
7132 # retry: Get registered params for this existing sub-operation
7133 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7134 op_index
7135 ]
7136 vnf_index = op.get("member_vnf_index")
7137 vnf_config_primitive = op.get("primitive")
7138 primitive_params = op.get("primitive_params")
7139 self.logger.debug(
7140 logging_text
7141 + "vnf_config_primitive={} Sub-operation retry".format(
7142 vnf_config_primitive
7143 )
7144 )
7145 # Execute the primitive, either with new (first-time) or registered (reintent) args
7146 ee_descriptor_id = config_primitive.get(
7147 "execution-environment-ref"
7148 )
7149 primitive_name = config_primitive.get(
7150 "execution-environment-primitive", vnf_config_primitive
7151 )
7152 ee_id, vca_type = self._look_for_deployed_vca(
7153 nsr_deployed["VCA"],
7154 member_vnf_index=vnf_index,
7155 vdu_id=None,
7156 vdu_count_index=None,
7157 ee_descriptor_id=ee_descriptor_id,
7158 )
7159 result, result_detail = await self._ns_execute_primitive(
7160 ee_id,
7161 primitive_name,
7162 primitive_params,
7163 vca_type=vca_type,
7164 vca_id=vca_id,
7165 )
7166 self.logger.debug(
7167 logging_text
7168 + "vnf_config_primitive={} Done with result {} {}".format(
7169 vnf_config_primitive, result, result_detail
7170 )
7171 )
7172 # Update operationState = COMPLETED | FAILED
7173 self._update_suboperation_status(
7174 db_nslcmop, op_index, result, result_detail
7175 )
7176
7177 if result == "FAILED":
7178 raise LcmException(result_detail)
7179 db_nsr_update["config-status"] = old_config_status
7180 scale_process = None
7181 # POST-SCALE END
7182
7183 db_nsr_update[
7184 "detailed-status"
7185 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7186 db_nsr_update["operational-status"] = (
7187 "running"
7188 if old_operational_status == "failed"
7189 else old_operational_status
7190 )
7191 db_nsr_update["config-status"] = old_config_status
7192 return
7193 except (
7194 ROclient.ROClientException,
7195 DbException,
7196 LcmException,
7197 NgRoException,
7198 ) as e:
7199 self.logger.error(logging_text + "Exit Exception {}".format(e))
7200 exc = e
7201 except asyncio.CancelledError:
7202 self.logger.error(
7203 logging_text + "Cancelled Exception while '{}'".format(step)
7204 )
7205 exc = "Operation was cancelled"
7206 except Exception as e:
7207 exc = traceback.format_exc()
7208 self.logger.critical(
7209 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7210 exc_info=True,
7211 )
7212 finally:
7213 self._write_ns_status(
7214 nsr_id=nsr_id,
7215 ns_state=None,
7216 current_operation="IDLE",
7217 current_operation_id=None,
7218 )
7219 if tasks_dict_info:
7220 stage[1] = "Waiting for instantiate pending tasks."
7221 self.logger.debug(logging_text + stage[1])
7222 exc = await self._wait_for_tasks(
7223 logging_text,
7224 tasks_dict_info,
7225 self.timeout.ns_deploy,
7226 stage,
7227 nslcmop_id,
7228 nsr_id=nsr_id,
7229 )
7230 if exc:
7231 db_nslcmop_update[
7232 "detailed-status"
7233 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7234 nslcmop_operation_state = "FAILED"
7235 if db_nsr:
7236 db_nsr_update["operational-status"] = old_operational_status
7237 db_nsr_update["config-status"] = old_config_status
7238 db_nsr_update["detailed-status"] = ""
7239 if scale_process:
7240 if "VCA" in scale_process:
7241 db_nsr_update["config-status"] = "failed"
7242 if "RO" in scale_process:
7243 db_nsr_update["operational-status"] = "failed"
7244 db_nsr_update[
7245 "detailed-status"
7246 ] = "FAILED scaling nslcmop={} {}: {}".format(
7247 nslcmop_id, step, exc
7248 )
7249 else:
7250 error_description_nslcmop = None
7251 nslcmop_operation_state = "COMPLETED"
7252 db_nslcmop_update["detailed-status"] = "Done"
7253
7254 self._write_op_status(
7255 op_id=nslcmop_id,
7256 stage="",
7257 error_message=error_description_nslcmop,
7258 operation_state=nslcmop_operation_state,
7259 other_update=db_nslcmop_update,
7260 )
7261 if db_nsr:
7262 self._write_ns_status(
7263 nsr_id=nsr_id,
7264 ns_state=None,
7265 current_operation="IDLE",
7266 current_operation_id=None,
7267 other_update=db_nsr_update,
7268 )
7269
7270 if nslcmop_operation_state:
7271 try:
7272 msg = {
7273 "nsr_id": nsr_id,
7274 "nslcmop_id": nslcmop_id,
7275 "operationState": nslcmop_operation_state,
7276 }
7277 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7278 except Exception as e:
7279 self.logger.error(
7280 logging_text + "kafka_write notification Exception {}".format(e)
7281 )
7282 self.logger.debug(logging_text + "Exit")
7283 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7284
7285 async def _scale_kdu(
7286 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7287 ):
7288 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7289 for kdu_name in _scaling_info:
7290 for kdu_scaling_info in _scaling_info[kdu_name]:
7291 deployed_kdu, index = get_deployed_kdu(
7292 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7293 )
7294 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7295 kdu_instance = deployed_kdu["kdu-instance"]
7296 kdu_model = deployed_kdu.get("kdu-model")
7297 scale = int(kdu_scaling_info["scale"])
7298 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7299
7300 db_dict = {
7301 "collection": "nsrs",
7302 "filter": {"_id": nsr_id},
7303 "path": "_admin.deployed.K8s.{}".format(index),
7304 }
7305
7306 step = "scaling application {}".format(
7307 kdu_scaling_info["resource-name"]
7308 )
7309 self.logger.debug(logging_text + step)
7310
7311 if kdu_scaling_info["type"] == "delete":
7312 kdu_config = get_configuration(db_vnfd, kdu_name)
7313 if (
7314 kdu_config
7315 and kdu_config.get("terminate-config-primitive")
7316 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7317 ):
7318 terminate_config_primitive_list = kdu_config.get(
7319 "terminate-config-primitive"
7320 )
7321 terminate_config_primitive_list.sort(
7322 key=lambda val: int(val["seq"])
7323 )
7324
7325 for (
7326 terminate_config_primitive
7327 ) in terminate_config_primitive_list:
7328 primitive_params_ = self._map_primitive_params(
7329 terminate_config_primitive, {}, {}
7330 )
7331 step = "execute terminate config primitive"
7332 self.logger.debug(logging_text + step)
7333 await asyncio.wait_for(
7334 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7335 cluster_uuid=cluster_uuid,
7336 kdu_instance=kdu_instance,
7337 primitive_name=terminate_config_primitive["name"],
7338 params=primitive_params_,
7339 db_dict=db_dict,
7340 total_timeout=self.timeout.primitive,
7341 vca_id=vca_id,
7342 ),
7343 timeout=self.timeout.primitive
7344 * self.timeout.primitive_outer_factor,
7345 )
7346
7347 await asyncio.wait_for(
7348 self.k8scluster_map[k8s_cluster_type].scale(
7349 kdu_instance=kdu_instance,
7350 scale=scale,
7351 resource_name=kdu_scaling_info["resource-name"],
7352 total_timeout=self.timeout.scale_on_error,
7353 vca_id=vca_id,
7354 cluster_uuid=cluster_uuid,
7355 kdu_model=kdu_model,
7356 atomic=True,
7357 db_dict=db_dict,
7358 ),
7359 timeout=self.timeout.scale_on_error
7360 * self.timeout.scale_on_error_outer_factor,
7361 )
7362
7363 if kdu_scaling_info["type"] == "create":
7364 kdu_config = get_configuration(db_vnfd, kdu_name)
7365 if (
7366 kdu_config
7367 and kdu_config.get("initial-config-primitive")
7368 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7369 ):
7370 initial_config_primitive_list = kdu_config.get(
7371 "initial-config-primitive"
7372 )
7373 initial_config_primitive_list.sort(
7374 key=lambda val: int(val["seq"])
7375 )
7376
7377 for initial_config_primitive in initial_config_primitive_list:
7378 primitive_params_ = self._map_primitive_params(
7379 initial_config_primitive, {}, {}
7380 )
7381 step = "execute initial config primitive"
7382 self.logger.debug(logging_text + step)
7383 await asyncio.wait_for(
7384 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7385 cluster_uuid=cluster_uuid,
7386 kdu_instance=kdu_instance,
7387 primitive_name=initial_config_primitive["name"],
7388 params=primitive_params_,
7389 db_dict=db_dict,
7390 vca_id=vca_id,
7391 ),
7392 timeout=600,
7393 )
7394
7395 async def _scale_ng_ro(
7396 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7397 ):
7398 nsr_id = db_nslcmop["nsInstanceId"]
7399 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7400 db_vnfrs = {}
7401
7402 # read from db: vnfd's for every vnf
7403 db_vnfds = []
7404
7405 # for each vnf in ns, read vnfd
7406 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7407 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7408 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7409 # if we haven't this vnfd, read it from db
7410 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7411 # read from db
7412 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7413 db_vnfds.append(vnfd)
7414 n2vc_key = self.n2vc.get_public_key()
7415 n2vc_key_list = [n2vc_key]
7416 self.scale_vnfr(
7417 db_vnfr,
7418 vdu_scaling_info.get("vdu-create"),
7419 vdu_scaling_info.get("vdu-delete"),
7420 mark_delete=True,
7421 )
7422 # db_vnfr has been updated, update db_vnfrs to use it
7423 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7424 await self._instantiate_ng_ro(
7425 logging_text,
7426 nsr_id,
7427 db_nsd,
7428 db_nsr,
7429 db_nslcmop,
7430 db_vnfrs,
7431 db_vnfds,
7432 n2vc_key_list,
7433 stage=stage,
7434 start_deploy=time(),
7435 timeout_ns_deploy=self.timeout.ns_deploy,
7436 )
7437 if vdu_scaling_info.get("vdu-delete"):
7438 self.scale_vnfr(
7439 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7440 )
7441
7442 async def extract_prometheus_scrape_jobs(
7443 self,
7444 ee_id: str,
7445 artifact_path: str,
7446 ee_config_descriptor: dict,
7447 vnfr_id: str,
7448 nsr_id: str,
7449 target_ip: str,
7450 element_type: str,
7451 vnf_member_index: str = "",
7452 vdu_id: str = "",
7453 vdu_index: int = None,
7454 kdu_name: str = "",
7455 kdu_index: int = None,
7456 ) -> dict:
7457 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7458 This method will wait until the corresponding VDU or KDU is fully instantiated
7459
7460 Args:
7461 ee_id (str): Execution Environment ID
7462 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7463 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7464 vnfr_id (str): VNFR ID where this EE applies
7465 nsr_id (str): NSR ID where this EE applies
7466 target_ip (str): VDU/KDU instance IP address
7467 element_type (str): NS or VNF or VDU or KDU
7468 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7469 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7470 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7471 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7472 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7473
7474 Raises:
7475 LcmException: When the VDU or KDU instance was not found in an hour
7476
7477 Returns:
7478 _type_: Prometheus jobs
7479 """
7480 # default the vdur and kdur names to an empty string, to avoid any later
7481 # problem with Prometheus when the element type is not VDU or KDU
7482 vdur_name = ""
7483 kdur_name = ""
7484
7485 # look if exist a file called 'prometheus*.j2' and
7486 artifact_content = self.fs.dir_ls(artifact_path)
7487 job_file = next(
7488 (
7489 f
7490 for f in artifact_content
7491 if f.startswith("prometheus") and f.endswith(".j2")
7492 ),
7493 None,
7494 )
7495 if not job_file:
7496 return
7497 with self.fs.file_open((artifact_path, job_file), "r") as f:
7498 job_data = f.read()
7499
7500 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7501 if element_type in ("VDU", "KDU"):
7502 for _ in range(360):
7503 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7504 if vdu_id and vdu_index is not None:
7505 vdur = next(
7506 (
7507 x
7508 for x in get_iterable(db_vnfr, "vdur")
7509 if (
7510 x.get("vdu-id-ref") == vdu_id
7511 and x.get("count-index") == vdu_index
7512 )
7513 ),
7514 {},
7515 )
7516 if vdur.get("name"):
7517 vdur_name = vdur.get("name")
7518 break
7519 if kdu_name and kdu_index is not None:
7520 kdur = next(
7521 (
7522 x
7523 for x in get_iterable(db_vnfr, "kdur")
7524 if (
7525 x.get("kdu-name") == kdu_name
7526 and x.get("count-index") == kdu_index
7527 )
7528 ),
7529 {},
7530 )
7531 if kdur.get("name"):
7532 kdur_name = kdur.get("name")
7533 break
7534
7535 await asyncio.sleep(10, loop=self.loop)
7536 else:
7537 if vdu_id and vdu_index is not None:
7538 raise LcmException(
7539 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7540 )
7541 if kdu_name and kdu_index is not None:
7542 raise LcmException(
7543 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7544 )
7545
7546 # TODO get_service
7547 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7548 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7549 host_port = "80"
7550 vnfr_id = vnfr_id.replace("-", "")
7551 variables = {
7552 "JOB_NAME": vnfr_id,
7553 "TARGET_IP": target_ip,
7554 "EXPORTER_POD_IP": host_name,
7555 "EXPORTER_POD_PORT": host_port,
7556 "NSR_ID": nsr_id,
7557 "VNF_MEMBER_INDEX": vnf_member_index,
7558 "VDUR_NAME": vdur_name,
7559 "KDUR_NAME": kdur_name,
7560 "ELEMENT_TYPE": element_type,
7561 }
7562 job_list = parse_job(job_data, variables)
7563 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7564 for job in job_list:
7565 if (
7566 not isinstance(job.get("job_name"), str)
7567 or vnfr_id not in job["job_name"]
7568 ):
7569 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7570 job["nsr_id"] = nsr_id
7571 job["vnfr_id"] = vnfr_id
7572 return job_list
7573
7574 async def rebuild_start_stop(
7575 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7576 ):
7577 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7578 self.logger.info(logging_text + "Enter")
7579 stage = ["Preparing the environment", ""]
7580 # database nsrs record
7581 db_nsr_update = {}
7582 vdu_vim_name = None
7583 vim_vm_id = None
7584 # in case of error, indicates what part of scale was failed to put nsr at error status
7585 start_deploy = time()
7586 try:
7587 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7588 vim_account_id = db_vnfr.get("vim-account-id")
7589 vim_info_key = "vim:" + vim_account_id
7590 vdu_id = additional_param["vdu_id"]
7591 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7592 vdur = find_in_list(
7593 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7594 )
7595 if vdur:
7596 vdu_vim_name = vdur["name"]
7597 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7598 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7599 else:
7600 raise LcmException("Target vdu is not found")
7601 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7602 # wait for any previous tasks in process
7603 stage[1] = "Waiting for previous operations to terminate"
7604 self.logger.info(stage[1])
7605 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7606
7607 stage[1] = "Reading from database."
7608 self.logger.info(stage[1])
7609 self._write_ns_status(
7610 nsr_id=nsr_id,
7611 ns_state=None,
7612 current_operation=operation_type.upper(),
7613 current_operation_id=nslcmop_id,
7614 )
7615 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7616
7617 # read from db: ns
7618 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7619 db_nsr_update["operational-status"] = operation_type
7620 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7621 # Payload for RO
7622 desc = {
7623 operation_type: {
7624 "vim_vm_id": vim_vm_id,
7625 "vnf_id": vnf_id,
7626 "vdu_index": additional_param["count-index"],
7627 "vdu_id": vdur["id"],
7628 "target_vim": target_vim,
7629 "vim_account_id": vim_account_id,
7630 }
7631 }
7632 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7633 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7634 self.logger.info("ro nsr id: {}".format(nsr_id))
7635 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7636 self.logger.info("response from RO: {}".format(result_dict))
7637 action_id = result_dict["action_id"]
7638 await self._wait_ng_ro(
7639 nsr_id,
7640 action_id,
7641 nslcmop_id,
7642 start_deploy,
7643 self.timeout.operate,
7644 None,
7645 "start_stop_rebuild",
7646 )
7647 return "COMPLETED", "Done"
7648 except (ROclient.ROClientException, DbException, LcmException) as e:
7649 self.logger.error("Exit Exception {}".format(e))
7650 exc = e
7651 except asyncio.CancelledError:
7652 self.logger.error("Cancelled Exception while '{}'".format(stage))
7653 exc = "Operation was cancelled"
7654 except Exception as e:
7655 exc = traceback.format_exc()
7656 self.logger.critical(
7657 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7658 )
7659 return "FAILED", "Error in operate VNF {}".format(exc)
7660
7661 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7662 """
7663 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7664
7665 :param: vim_account_id: VIM Account ID
7666
7667 :return: (cloud_name, cloud_credential)
7668 """
7669 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7670 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7671
7672 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7673 """
7674 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7675
7676 :param: vim_account_id: VIM Account ID
7677
7678 :return: (cloud_name, cloud_credential)
7679 """
7680 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7681 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7682
7683 async def migrate(self, nsr_id, nslcmop_id):
7684 """
7685 Migrate VNFs and VDUs instances in a NS
7686
7687 :param: nsr_id: NS Instance ID
7688 :param: nslcmop_id: nslcmop ID of migrate
7689
7690 """
7691 # Try to lock HA task here
7692 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7693 if not task_is_locked_by_me:
7694 return
7695 logging_text = "Task ns={} migrate ".format(nsr_id)
7696 self.logger.debug(logging_text + "Enter")
7697 # get all needed from database
7698 db_nslcmop = None
7699 db_nslcmop_update = {}
7700 nslcmop_operation_state = None
7701 db_nsr_update = {}
7702 target = {}
7703 exc = None
7704 # in case of error, indicates what part of scale was failed to put nsr at error status
7705 start_deploy = time()
7706
7707 try:
7708 # wait for any previous tasks in process
7709 step = "Waiting for previous operations to terminate"
7710 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7711
7712 self._write_ns_status(
7713 nsr_id=nsr_id,
7714 ns_state=None,
7715 current_operation="MIGRATING",
7716 current_operation_id=nslcmop_id,
7717 )
7718 step = "Getting nslcmop from database"
7719 self.logger.debug(
7720 step + " after having waited for previous tasks to be completed"
7721 )
7722 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7723 migrate_params = db_nslcmop.get("operationParams")
7724
7725 target = {}
7726 target.update(migrate_params)
7727 desc = await self.RO.migrate(nsr_id, target)
7728 self.logger.debug("RO return > {}".format(desc))
7729 action_id = desc["action_id"]
7730 await self._wait_ng_ro(
7731 nsr_id,
7732 action_id,
7733 nslcmop_id,
7734 start_deploy,
7735 self.timeout.migrate,
7736 operation="migrate",
7737 )
7738 except (ROclient.ROClientException, DbException, LcmException) as e:
7739 self.logger.error("Exit Exception {}".format(e))
7740 exc = e
7741 except asyncio.CancelledError:
7742 self.logger.error("Cancelled Exception while '{}'".format(step))
7743 exc = "Operation was cancelled"
7744 except Exception as e:
7745 exc = traceback.format_exc()
7746 self.logger.critical(
7747 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7748 )
7749 finally:
7750 self._write_ns_status(
7751 nsr_id=nsr_id,
7752 ns_state=None,
7753 current_operation="IDLE",
7754 current_operation_id=None,
7755 )
7756 if exc:
7757 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7758 nslcmop_operation_state = "FAILED"
7759 else:
7760 nslcmop_operation_state = "COMPLETED"
7761 db_nslcmop_update["detailed-status"] = "Done"
7762 db_nsr_update["detailed-status"] = "Done"
7763
7764 self._write_op_status(
7765 op_id=nslcmop_id,
7766 stage="",
7767 error_message="",
7768 operation_state=nslcmop_operation_state,
7769 other_update=db_nslcmop_update,
7770 )
7771 if nslcmop_operation_state:
7772 try:
7773 msg = {
7774 "nsr_id": nsr_id,
7775 "nslcmop_id": nslcmop_id,
7776 "operationState": nslcmop_operation_state,
7777 }
7778 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7779 except Exception as e:
7780 self.logger.error(
7781 logging_text + "kafka_write notification Exception {}".format(e)
7782 )
7783 self.logger.debug(logging_text + "Exit")
7784 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7785
7786 async def heal(self, nsr_id, nslcmop_id):
7787 """
7788 Heal NS
7789
7790 :param nsr_id: ns instance to heal
7791 :param nslcmop_id: operation to run
7792 :return:
7793 """
7794
7795 # Try to lock HA task here
7796 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7797 if not task_is_locked_by_me:
7798 return
7799
7800 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7801 stage = ["", "", ""]
7802 tasks_dict_info = {}
7803 # ^ stage, step, VIM progress
7804 self.logger.debug(logging_text + "Enter")
7805 # get all needed from database
7806 db_nsr = None
7807 db_nslcmop_update = {}
7808 db_nsr_update = {}
7809 db_vnfrs = {} # vnf's info indexed by _id
7810 exc = None
7811 old_operational_status = ""
7812 old_config_status = ""
7813 nsi_id = None
7814 try:
7815 # wait for any previous tasks in process
7816 step = "Waiting for previous operations to terminate"
7817 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7818 self._write_ns_status(
7819 nsr_id=nsr_id,
7820 ns_state=None,
7821 current_operation="HEALING",
7822 current_operation_id=nslcmop_id,
7823 )
7824
7825 step = "Getting nslcmop from database"
7826 self.logger.debug(
7827 step + " after having waited for previous tasks to be completed"
7828 )
7829 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7830
7831 step = "Getting nsr from database"
7832 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7833 old_operational_status = db_nsr["operational-status"]
7834 old_config_status = db_nsr["config-status"]
7835
7836 db_nsr_update = {
7837 "_admin.deployed.RO.operational-status": "healing",
7838 }
7839 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7840
7841 step = "Sending heal order to VIM"
7842 await self.heal_RO(
7843 logging_text=logging_text,
7844 nsr_id=nsr_id,
7845 db_nslcmop=db_nslcmop,
7846 stage=stage,
7847 )
7848 # VCA tasks
7849 # read from db: nsd
7850 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7851 self.logger.debug(logging_text + stage[1])
7852 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7853 self.fs.sync(db_nsr["nsd-id"])
7854 db_nsr["nsd"] = nsd
7855 # read from db: vnfr's of this ns
7856 step = "Getting vnfrs from db"
7857 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7858 for vnfr in db_vnfrs_list:
7859 db_vnfrs[vnfr["_id"]] = vnfr
7860 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7861
7862 # Check for each target VNF
7863 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7864 for target_vnf in target_list:
7865 # Find this VNF in the list from DB
7866 vnfr_id = target_vnf.get("vnfInstanceId", None)
7867 if vnfr_id:
7868 db_vnfr = db_vnfrs[vnfr_id]
7869 vnfd_id = db_vnfr.get("vnfd-id")
7870 vnfd_ref = db_vnfr.get("vnfd-ref")
7871 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7872 base_folder = vnfd["_admin"]["storage"]
7873 vdu_id = None
7874 vdu_index = 0
7875 vdu_name = None
7876 kdu_name = None
7877 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7878 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7879
7880 # Check each target VDU and deploy N2VC
7881 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7882 "vdu", []
7883 )
7884 if not target_vdu_list:
7885 # Codigo nuevo para crear diccionario
7886 target_vdu_list = []
7887 for existing_vdu in db_vnfr.get("vdur"):
7888 vdu_name = existing_vdu.get("vdu-name", None)
7889 vdu_index = existing_vdu.get("count-index", 0)
7890 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7891 "run-day1", False
7892 )
7893 vdu_to_be_healed = {
7894 "vdu-id": vdu_name,
7895 "count-index": vdu_index,
7896 "run-day1": vdu_run_day1,
7897 }
7898 target_vdu_list.append(vdu_to_be_healed)
7899 for target_vdu in target_vdu_list:
7900 deploy_params_vdu = target_vdu
7901 # Set run-day1 vnf level value if not vdu level value exists
7902 if not deploy_params_vdu.get("run-day1") and target_vnf[
7903 "additionalParams"
7904 ].get("run-day1"):
7905 deploy_params_vdu["run-day1"] = target_vnf[
7906 "additionalParams"
7907 ].get("run-day1")
7908 vdu_name = target_vdu.get("vdu-id", None)
7909 # TODO: Get vdu_id from vdud.
7910 vdu_id = vdu_name
7911 # For multi instance VDU count-index is mandatory
7912 # For single session VDU count-indes is 0
7913 vdu_index = target_vdu.get("count-index", 0)
7914
7915 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7916 stage[1] = "Deploying Execution Environments."
7917 self.logger.debug(logging_text + stage[1])
7918
7919 # VNF Level charm. Normal case when proxy charms.
7920 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7921 descriptor_config = get_configuration(vnfd, vnfd_ref)
7922 if descriptor_config:
7923 # Continue if healed machine is management machine
7924 vnf_ip_address = db_vnfr.get("ip-address")
7925 target_instance = None
7926 for instance in db_vnfr.get("vdur", None):
7927 if (
7928 instance["vdu-name"] == vdu_name
7929 and instance["count-index"] == vdu_index
7930 ):
7931 target_instance = instance
7932 break
7933 if vnf_ip_address == target_instance.get("ip-address"):
7934 self._heal_n2vc(
7935 logging_text=logging_text
7936 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7937 member_vnf_index, vdu_name, vdu_index
7938 ),
7939 db_nsr=db_nsr,
7940 db_vnfr=db_vnfr,
7941 nslcmop_id=nslcmop_id,
7942 nsr_id=nsr_id,
7943 nsi_id=nsi_id,
7944 vnfd_id=vnfd_ref,
7945 vdu_id=None,
7946 kdu_name=None,
7947 member_vnf_index=member_vnf_index,
7948 vdu_index=0,
7949 vdu_name=None,
7950 deploy_params=deploy_params_vdu,
7951 descriptor_config=descriptor_config,
7952 base_folder=base_folder,
7953 task_instantiation_info=tasks_dict_info,
7954 stage=stage,
7955 )
7956
7957 # VDU Level charm. Normal case with native charms.
7958 descriptor_config = get_configuration(vnfd, vdu_name)
7959 if descriptor_config:
7960 self._heal_n2vc(
7961 logging_text=logging_text
7962 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7963 member_vnf_index, vdu_name, vdu_index
7964 ),
7965 db_nsr=db_nsr,
7966 db_vnfr=db_vnfr,
7967 nslcmop_id=nslcmop_id,
7968 nsr_id=nsr_id,
7969 nsi_id=nsi_id,
7970 vnfd_id=vnfd_ref,
7971 vdu_id=vdu_id,
7972 kdu_name=kdu_name,
7973 member_vnf_index=member_vnf_index,
7974 vdu_index=vdu_index,
7975 vdu_name=vdu_name,
7976 deploy_params=deploy_params_vdu,
7977 descriptor_config=descriptor_config,
7978 base_folder=base_folder,
7979 task_instantiation_info=tasks_dict_info,
7980 stage=stage,
7981 )
7982
7983 except (
7984 ROclient.ROClientException,
7985 DbException,
7986 LcmException,
7987 NgRoException,
7988 ) as e:
7989 self.logger.error(logging_text + "Exit Exception {}".format(e))
7990 exc = e
7991 except asyncio.CancelledError:
7992 self.logger.error(
7993 logging_text + "Cancelled Exception while '{}'".format(step)
7994 )
7995 exc = "Operation was cancelled"
7996 except Exception as e:
7997 exc = traceback.format_exc()
7998 self.logger.critical(
7999 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
8000 exc_info=True,
8001 )
8002 finally:
8003 if tasks_dict_info:
8004 stage[1] = "Waiting for healing pending tasks."
8005 self.logger.debug(logging_text + stage[1])
8006 exc = await self._wait_for_tasks(
8007 logging_text,
8008 tasks_dict_info,
8009 self.timeout.ns_deploy,
8010 stage,
8011 nslcmop_id,
8012 nsr_id=nsr_id,
8013 )
8014 if exc:
8015 db_nslcmop_update[
8016 "detailed-status"
8017 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8018 nslcmop_operation_state = "FAILED"
8019 if db_nsr:
8020 db_nsr_update["operational-status"] = old_operational_status
8021 db_nsr_update["config-status"] = old_config_status
8022 db_nsr_update[
8023 "detailed-status"
8024 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8025 for task, task_name in tasks_dict_info.items():
8026 if not task.done() or task.cancelled() or task.exception():
8027 if task_name.startswith(self.task_name_deploy_vca):
8028 # A N2VC task is pending
8029 db_nsr_update["config-status"] = "failed"
8030 else:
8031 # RO task is pending
8032 db_nsr_update["operational-status"] = "failed"
8033 else:
8034 error_description_nslcmop = None
8035 nslcmop_operation_state = "COMPLETED"
8036 db_nslcmop_update["detailed-status"] = "Done"
8037 db_nsr_update["detailed-status"] = "Done"
8038 db_nsr_update["operational-status"] = "running"
8039 db_nsr_update["config-status"] = "configured"
8040
8041 self._write_op_status(
8042 op_id=nslcmop_id,
8043 stage="",
8044 error_message=error_description_nslcmop,
8045 operation_state=nslcmop_operation_state,
8046 other_update=db_nslcmop_update,
8047 )
8048 if db_nsr:
8049 self._write_ns_status(
8050 nsr_id=nsr_id,
8051 ns_state=None,
8052 current_operation="IDLE",
8053 current_operation_id=None,
8054 other_update=db_nsr_update,
8055 )
8056
8057 if nslcmop_operation_state:
8058 try:
8059 msg = {
8060 "nsr_id": nsr_id,
8061 "nslcmop_id": nslcmop_id,
8062 "operationState": nslcmop_operation_state,
8063 }
8064 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8065 except Exception as e:
8066 self.logger.error(
8067 logging_text + "kafka_write notification Exception {}".format(e)
8068 )
8069 self.logger.debug(logging_text + "Exit")
8070 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8071
8072 async def heal_RO(
8073 self,
8074 logging_text,
8075 nsr_id,
8076 db_nslcmop,
8077 stage,
8078 ):
8079 """
8080 Heal at RO
8081 :param logging_text: preffix text to use at logging
8082 :param nsr_id: nsr identity
8083 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8084 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8085 :return: None or exception
8086 """
8087
8088 def get_vim_account(vim_account_id):
8089 nonlocal db_vims
8090 if vim_account_id in db_vims:
8091 return db_vims[vim_account_id]
8092 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8093 db_vims[vim_account_id] = db_vim
8094 return db_vim
8095
8096 try:
8097 start_heal = time()
8098 ns_params = db_nslcmop.get("operationParams")
8099 if ns_params and ns_params.get("timeout_ns_heal"):
8100 timeout_ns_heal = ns_params["timeout_ns_heal"]
8101 else:
8102 timeout_ns_heal = self.timeout.ns_heal
8103
8104 db_vims = {}
8105
8106 nslcmop_id = db_nslcmop["_id"]
8107 target = {
8108 "action_id": nslcmop_id,
8109 }
8110 self.logger.warning(
8111 "db_nslcmop={} and timeout_ns_heal={}".format(
8112 db_nslcmop, timeout_ns_heal
8113 )
8114 )
8115 target.update(db_nslcmop.get("operationParams", {}))
8116
8117 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8118 desc = await self.RO.recreate(nsr_id, target)
8119 self.logger.debug("RO return > {}".format(desc))
8120 action_id = desc["action_id"]
8121 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8122 await self._wait_ng_ro(
8123 nsr_id,
8124 action_id,
8125 nslcmop_id,
8126 start_heal,
8127 timeout_ns_heal,
8128 stage,
8129 operation="healing",
8130 )
8131
8132 # Updating NSR
8133 db_nsr_update = {
8134 "_admin.deployed.RO.operational-status": "running",
8135 "detailed-status": " ".join(stage),
8136 }
8137 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8138 self._write_op_status(nslcmop_id, stage)
8139 self.logger.debug(
8140 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8141 )
8142
8143 except Exception as e:
8144 stage[2] = "ERROR healing at VIM"
8145 # self.set_vnfr_at_error(db_vnfrs, str(e))
8146 self.logger.error(
8147 "Error healing at VIM {}".format(e),
8148 exc_info=not isinstance(
8149 e,
8150 (
8151 ROclient.ROClientException,
8152 LcmException,
8153 DbException,
8154 NgRoException,
8155 ),
8156 ),
8157 )
8158 raise
8159
8160 def _heal_n2vc(
8161 self,
8162 logging_text,
8163 db_nsr,
8164 db_vnfr,
8165 nslcmop_id,
8166 nsr_id,
8167 nsi_id,
8168 vnfd_id,
8169 vdu_id,
8170 kdu_name,
8171 member_vnf_index,
8172 vdu_index,
8173 vdu_name,
8174 deploy_params,
8175 descriptor_config,
8176 base_folder,
8177 task_instantiation_info,
8178 stage,
8179 ):
8180 # launch instantiate_N2VC in a asyncio task and register task object
8181 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8182 # if not found, create one entry and update database
8183 # fill db_nsr._admin.deployed.VCA.<index>
8184
8185 self.logger.debug(
8186 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8187 )
8188
8189 charm_name = ""
8190 get_charm_name = False
8191 if "execution-environment-list" in descriptor_config:
8192 ee_list = descriptor_config.get("execution-environment-list", [])
8193 elif "juju" in descriptor_config:
8194 ee_list = [descriptor_config] # ns charms
8195 if "execution-environment-list" not in descriptor_config:
8196 # charm name is only required for ns charms
8197 get_charm_name = True
8198 else: # other types as script are not supported
8199 ee_list = []
8200
8201 for ee_item in ee_list:
8202 self.logger.debug(
8203 logging_text
8204 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8205 ee_item.get("juju"), ee_item.get("helm-chart")
8206 )
8207 )
8208 ee_descriptor_id = ee_item.get("id")
8209 if ee_item.get("juju"):
8210 vca_name = ee_item["juju"].get("charm")
8211 if get_charm_name:
8212 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8213 vca_type = (
8214 "lxc_proxy_charm"
8215 if ee_item["juju"].get("charm") is not None
8216 else "native_charm"
8217 )
8218 if ee_item["juju"].get("cloud") == "k8s":
8219 vca_type = "k8s_proxy_charm"
8220 elif ee_item["juju"].get("proxy") is False:
8221 vca_type = "native_charm"
8222 elif ee_item.get("helm-chart"):
8223 vca_name = ee_item["helm-chart"]
8224 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8225 vca_type = "helm"
8226 else:
8227 vca_type = "helm-v3"
8228 else:
8229 self.logger.debug(
8230 logging_text + "skipping non juju neither charm configuration"
8231 )
8232 continue
8233
8234 vca_index = -1
8235 for vca_index, vca_deployed in enumerate(
8236 db_nsr["_admin"]["deployed"]["VCA"]
8237 ):
8238 if not vca_deployed:
8239 continue
8240 if (
8241 vca_deployed.get("member-vnf-index") == member_vnf_index
8242 and vca_deployed.get("vdu_id") == vdu_id
8243 and vca_deployed.get("kdu_name") == kdu_name
8244 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8245 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8246 ):
8247 break
8248 else:
8249 # not found, create one.
8250 target = (
8251 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8252 )
8253 if vdu_id:
8254 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8255 elif kdu_name:
8256 target += "/kdu/{}".format(kdu_name)
8257 vca_deployed = {
8258 "target_element": target,
8259 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8260 "member-vnf-index": member_vnf_index,
8261 "vdu_id": vdu_id,
8262 "kdu_name": kdu_name,
8263 "vdu_count_index": vdu_index,
8264 "operational-status": "init", # TODO revise
8265 "detailed-status": "", # TODO revise
8266 "step": "initial-deploy", # TODO revise
8267 "vnfd_id": vnfd_id,
8268 "vdu_name": vdu_name,
8269 "type": vca_type,
8270 "ee_descriptor_id": ee_descriptor_id,
8271 "charm_name": charm_name,
8272 }
8273 vca_index += 1
8274
8275 # create VCA and configurationStatus in db
8276 db_dict = {
8277 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8278 "configurationStatus.{}".format(vca_index): dict(),
8279 }
8280 self.update_db_2("nsrs", nsr_id, db_dict)
8281
8282 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8283
8284 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8285 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8286 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8287
8288 # Launch task
8289 task_n2vc = asyncio.ensure_future(
8290 self.heal_N2VC(
8291 logging_text=logging_text,
8292 vca_index=vca_index,
8293 nsi_id=nsi_id,
8294 db_nsr=db_nsr,
8295 db_vnfr=db_vnfr,
8296 vdu_id=vdu_id,
8297 kdu_name=kdu_name,
8298 vdu_index=vdu_index,
8299 deploy_params=deploy_params,
8300 config_descriptor=descriptor_config,
8301 base_folder=base_folder,
8302 nslcmop_id=nslcmop_id,
8303 stage=stage,
8304 vca_type=vca_type,
8305 vca_name=vca_name,
8306 ee_config_descriptor=ee_item,
8307 )
8308 )
8309 self.lcm_tasks.register(
8310 "ns",
8311 nsr_id,
8312 nslcmop_id,
8313 "instantiate_N2VC-{}".format(vca_index),
8314 task_n2vc,
8315 )
8316 task_instantiation_info[
8317 task_n2vc
8318 ] = self.task_name_deploy_vca + " {}.{}".format(
8319 member_vnf_index or "", vdu_id or ""
8320 )
8321
8322 async def heal_N2VC(
8323 self,
8324 logging_text,
8325 vca_index,
8326 nsi_id,
8327 db_nsr,
8328 db_vnfr,
8329 vdu_id,
8330 kdu_name,
8331 vdu_index,
8332 config_descriptor,
8333 deploy_params,
8334 base_folder,
8335 nslcmop_id,
8336 stage,
8337 vca_type,
8338 vca_name,
8339 ee_config_descriptor,
8340 ):
8341 nsr_id = db_nsr["_id"]
8342 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8343 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8344 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8345 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8346 db_dict = {
8347 "collection": "nsrs",
8348 "filter": {"_id": nsr_id},
8349 "path": db_update_entry,
8350 }
8351 step = ""
8352 try:
8353 element_type = "NS"
8354 element_under_configuration = nsr_id
8355
8356 vnfr_id = None
8357 if db_vnfr:
8358 vnfr_id = db_vnfr["_id"]
8359 osm_config["osm"]["vnf_id"] = vnfr_id
8360
8361 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8362
8363 if vca_type == "native_charm":
8364 index_number = 0
8365 else:
8366 index_number = vdu_index or 0
8367
8368 if vnfr_id:
8369 element_type = "VNF"
8370 element_under_configuration = vnfr_id
8371 namespace += ".{}-{}".format(vnfr_id, index_number)
8372 if vdu_id:
8373 namespace += ".{}-{}".format(vdu_id, index_number)
8374 element_type = "VDU"
8375 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8376 osm_config["osm"]["vdu_id"] = vdu_id
8377 elif kdu_name:
8378 namespace += ".{}".format(kdu_name)
8379 element_type = "KDU"
8380 element_under_configuration = kdu_name
8381 osm_config["osm"]["kdu_name"] = kdu_name
8382
8383 # Get artifact path
8384 if base_folder["pkg-dir"]:
8385 artifact_path = "{}/{}/{}/{}".format(
8386 base_folder["folder"],
8387 base_folder["pkg-dir"],
8388 "charms"
8389 if vca_type
8390 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8391 else "helm-charts",
8392 vca_name,
8393 )
8394 else:
8395 artifact_path = "{}/Scripts/{}/{}/".format(
8396 base_folder["folder"],
8397 "charms"
8398 if vca_type
8399 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8400 else "helm-charts",
8401 vca_name,
8402 )
8403
8404 self.logger.debug("Artifact path > {}".format(artifact_path))
8405
8406 # get initial_config_primitive_list that applies to this element
8407 initial_config_primitive_list = config_descriptor.get(
8408 "initial-config-primitive"
8409 )
8410
8411 self.logger.debug(
8412 "Initial config primitive list > {}".format(
8413 initial_config_primitive_list
8414 )
8415 )
8416
8417 # add config if not present for NS charm
8418 ee_descriptor_id = ee_config_descriptor.get("id")
8419 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8420 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8421 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8422 )
8423
8424 self.logger.debug(
8425 "Initial config primitive list #2 > {}".format(
8426 initial_config_primitive_list
8427 )
8428 )
8429 # n2vc_redesign STEP 3.1
8430 # find old ee_id if exists
8431 ee_id = vca_deployed.get("ee_id")
8432
8433 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8434 # create or register execution environment in VCA. Only for native charms when healing
8435 if vca_type == "native_charm":
8436 step = "Waiting to VM being up and getting IP address"
8437 self.logger.debug(logging_text + step)
8438 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8439 logging_text,
8440 nsr_id,
8441 vnfr_id,
8442 vdu_id,
8443 vdu_index,
8444 user=None,
8445 pub_key=None,
8446 )
8447 credentials = {"hostname": rw_mgmt_ip}
8448 # get username
8449 username = deep_get(
8450 config_descriptor, ("config-access", "ssh-access", "default-user")
8451 )
8452 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8453 # merged. Meanwhile let's get username from initial-config-primitive
8454 if not username and initial_config_primitive_list:
8455 for config_primitive in initial_config_primitive_list:
8456 for param in config_primitive.get("parameter", ()):
8457 if param["name"] == "ssh-username":
8458 username = param["value"]
8459 break
8460 if not username:
8461 raise LcmException(
8462 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8463 "'config-access.ssh-access.default-user'"
8464 )
8465 credentials["username"] = username
8466
8467 # n2vc_redesign STEP 3.2
8468 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8469 self._write_configuration_status(
8470 nsr_id=nsr_id,
8471 vca_index=vca_index,
8472 status="REGISTERING",
8473 element_under_configuration=element_under_configuration,
8474 element_type=element_type,
8475 )
8476
8477 step = "register execution environment {}".format(credentials)
8478 self.logger.debug(logging_text + step)
8479 ee_id = await self.vca_map[vca_type].register_execution_environment(
8480 credentials=credentials,
8481 namespace=namespace,
8482 db_dict=db_dict,
8483 vca_id=vca_id,
8484 )
8485
8486 # update ee_id en db
8487 db_dict_ee_id = {
8488 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8489 }
8490 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8491
8492 # for compatibility with MON/POL modules, the need model and application name at database
8493 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8494 # Not sure if this need to be done when healing
8495 """
8496 ee_id_parts = ee_id.split(".")
8497 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8498 if len(ee_id_parts) >= 2:
8499 model_name = ee_id_parts[0]
8500 application_name = ee_id_parts[1]
8501 db_nsr_update[db_update_entry + "model"] = model_name
8502 db_nsr_update[db_update_entry + "application"] = application_name
8503 """
8504
8505 # n2vc_redesign STEP 3.3
8506 # Install configuration software. Only for native charms.
8507 step = "Install configuration Software"
8508
8509 self._write_configuration_status(
8510 nsr_id=nsr_id,
8511 vca_index=vca_index,
8512 status="INSTALLING SW",
8513 element_under_configuration=element_under_configuration,
8514 element_type=element_type,
8515 # other_update=db_nsr_update,
8516 other_update=None,
8517 )
8518
8519 # TODO check if already done
8520 self.logger.debug(logging_text + step)
8521 config = None
8522 if vca_type == "native_charm":
8523 config_primitive = next(
8524 (p for p in initial_config_primitive_list if p["name"] == "config"),
8525 None,
8526 )
8527 if config_primitive:
8528 config = self._map_primitive_params(
8529 config_primitive, {}, deploy_params
8530 )
8531 await self.vca_map[vca_type].install_configuration_sw(
8532 ee_id=ee_id,
8533 artifact_path=artifact_path,
8534 db_dict=db_dict,
8535 config=config,
8536 num_units=1,
8537 vca_id=vca_id,
8538 vca_type=vca_type,
8539 )
8540
8541 # write in db flag of configuration_sw already installed
8542 self.update_db_2(
8543 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8544 )
8545
8546 # Not sure if this need to be done when healing
8547 """
8548 # add relations for this VCA (wait for other peers related with this VCA)
8549 await self._add_vca_relations(
8550 logging_text=logging_text,
8551 nsr_id=nsr_id,
8552 vca_type=vca_type,
8553 vca_index=vca_index,
8554 )
8555 """
8556
8557 # if SSH access is required, then get execution environment SSH public
8558 # if native charm we have waited already to VM be UP
8559 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8560 pub_key = None
8561 user = None
8562 # self.logger.debug("get ssh key block")
8563 if deep_get(
8564 config_descriptor, ("config-access", "ssh-access", "required")
8565 ):
8566 # self.logger.debug("ssh key needed")
8567 # Needed to inject a ssh key
8568 user = deep_get(
8569 config_descriptor,
8570 ("config-access", "ssh-access", "default-user"),
8571 )
8572 step = "Install configuration Software, getting public ssh key"
8573 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8574 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8575 )
8576
8577 step = "Insert public key into VM user={} ssh_key={}".format(
8578 user, pub_key
8579 )
8580 else:
8581 # self.logger.debug("no need to get ssh key")
8582 step = "Waiting to VM being up and getting IP address"
8583 self.logger.debug(logging_text + step)
8584
8585 # n2vc_redesign STEP 5.1
8586 # wait for RO (ip-address) Insert pub_key into VM
8587 # IMPORTANT: We need do wait for RO to complete healing operation.
8588 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8589 if vnfr_id:
8590 if kdu_name:
8591 rw_mgmt_ip = await self.wait_kdu_up(
8592 logging_text, nsr_id, vnfr_id, kdu_name
8593 )
8594 else:
8595 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8596 logging_text,
8597 nsr_id,
8598 vnfr_id,
8599 vdu_id,
8600 vdu_index,
8601 user=user,
8602 pub_key=pub_key,
8603 )
8604 else:
8605 rw_mgmt_ip = None # This is for a NS configuration
8606
8607 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8608
8609 # store rw_mgmt_ip in deploy params for later replacement
8610 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8611
8612 # Day1 operations.
8613 # get run-day1 operation parameter
8614 runDay1 = deploy_params.get("run-day1", False)
8615 self.logger.debug(
8616 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8617 )
8618 if runDay1:
8619 # n2vc_redesign STEP 6 Execute initial config primitive
8620 step = "execute initial config primitive"
8621
8622 # wait for dependent primitives execution (NS -> VNF -> VDU)
8623 if initial_config_primitive_list:
8624 await self._wait_dependent_n2vc(
8625 nsr_id, vca_deployed_list, vca_index
8626 )
8627
8628 # stage, in function of element type: vdu, kdu, vnf or ns
8629 my_vca = vca_deployed_list[vca_index]
8630 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8631 # VDU or KDU
8632 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8633 elif my_vca.get("member-vnf-index"):
8634 # VNF
8635 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8636 else:
8637 # NS
8638 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8639
8640 self._write_configuration_status(
8641 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8642 )
8643
8644 self._write_op_status(op_id=nslcmop_id, stage=stage)
8645
8646 check_if_terminated_needed = True
8647 for initial_config_primitive in initial_config_primitive_list:
8648 # adding information on the vca_deployed if it is a NS execution environment
8649 if not vca_deployed["member-vnf-index"]:
8650 deploy_params["ns_config_info"] = json.dumps(
8651 self._get_ns_config_info(nsr_id)
8652 )
8653 # TODO check if already done
8654 primitive_params_ = self._map_primitive_params(
8655 initial_config_primitive, {}, deploy_params
8656 )
8657
8658 step = "execute primitive '{}' params '{}'".format(
8659 initial_config_primitive["name"], primitive_params_
8660 )
8661 self.logger.debug(logging_text + step)
8662 await self.vca_map[vca_type].exec_primitive(
8663 ee_id=ee_id,
8664 primitive_name=initial_config_primitive["name"],
8665 params_dict=primitive_params_,
8666 db_dict=db_dict,
8667 vca_id=vca_id,
8668 vca_type=vca_type,
8669 )
8670 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8671 if check_if_terminated_needed:
8672 if config_descriptor.get("terminate-config-primitive"):
8673 self.update_db_2(
8674 "nsrs",
8675 nsr_id,
8676 {db_update_entry + "needed_terminate": True},
8677 )
8678 check_if_terminated_needed = False
8679
8680 # TODO register in database that primitive is done
8681
8682 # STEP 7 Configure metrics
8683 # Not sure if this need to be done when healing
8684 """
8685 if vca_type == "helm" or vca_type == "helm-v3":
8686 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8687 ee_id=ee_id,
8688 artifact_path=artifact_path,
8689 ee_config_descriptor=ee_config_descriptor,
8690 vnfr_id=vnfr_id,
8691 nsr_id=nsr_id,
8692 target_ip=rw_mgmt_ip,
8693 )
8694 if prometheus_jobs:
8695 self.update_db_2(
8696 "nsrs",
8697 nsr_id,
8698 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8699 )
8700
8701 for job in prometheus_jobs:
8702 self.db.set_one(
8703 "prometheus_jobs",
8704 {"job_name": job["job_name"]},
8705 job,
8706 upsert=True,
8707 fail_on_empty=False,
8708 )
8709
8710 """
8711 step = "instantiated at VCA"
8712 self.logger.debug(logging_text + step)
8713
8714 self._write_configuration_status(
8715 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8716 )
8717
8718 except Exception as e: # TODO not use Exception but N2VC exception
8719 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8720 if not isinstance(
8721 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8722 ):
8723 self.logger.error(
8724 "Exception while {} : {}".format(step, e), exc_info=True
8725 )
8726 self._write_configuration_status(
8727 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8728 )
8729 raise LcmException("{} {}".format(step, e)) from e
8730
8731 async def _wait_heal_ro(
8732 self,
8733 nsr_id,
8734 timeout=600,
8735 ):
8736 start_time = time()
8737 while time() <= start_time + timeout:
8738 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8739 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8740 "operational-status"
8741 ]
8742 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8743 if operational_status_ro != "healing":
8744 break
8745 await asyncio.sleep(15, loop=self.loop)
8746 else: # timeout_ns_deploy
8747 raise NgRoException("Timeout waiting ns to deploy")
8748
8749 async def vertical_scale(self, nsr_id, nslcmop_id):
8750 """
8751 Vertical Scale the VDUs in a NS
8752
8753 :param: nsr_id: NS Instance ID
8754 :param: nslcmop_id: nslcmop ID of migrate
8755
8756 """
8757 # Try to lock HA task here
8758 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8759 if not task_is_locked_by_me:
8760 return
8761 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8762 self.logger.debug(logging_text + "Enter")
8763 # get all needed from database
8764 db_nslcmop = None
8765 db_nslcmop_update = {}
8766 nslcmop_operation_state = None
8767 db_nsr_update = {}
8768 target = {}
8769 exc = None
8770 # in case of error, indicates what part of scale was failed to put nsr at error status
8771 start_deploy = time()
8772
8773 try:
8774 # wait for any previous tasks in process
8775 step = "Waiting for previous operations to terminate"
8776 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8777
8778 self._write_ns_status(
8779 nsr_id=nsr_id,
8780 ns_state=None,
8781 current_operation="VerticalScale",
8782 current_operation_id=nslcmop_id,
8783 )
8784 step = "Getting nslcmop from database"
8785 self.logger.debug(
8786 step + " after having waited for previous tasks to be completed"
8787 )
8788 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8789 operationParams = db_nslcmop.get("operationParams")
8790 target = {}
8791 target.update(operationParams)
8792 desc = await self.RO.vertical_scale(nsr_id, target)
8793 self.logger.debug("RO return > {}".format(desc))
8794 action_id = desc["action_id"]
8795 await self._wait_ng_ro(
8796 nsr_id,
8797 action_id,
8798 nslcmop_id,
8799 start_deploy,
8800 self.timeout.verticalscale,
8801 operation="verticalscale",
8802 )
8803 except (ROclient.ROClientException, DbException, LcmException) as e:
8804 self.logger.error("Exit Exception {}".format(e))
8805 exc = e
8806 except asyncio.CancelledError:
8807 self.logger.error("Cancelled Exception while '{}'".format(step))
8808 exc = "Operation was cancelled"
8809 except Exception as e:
8810 exc = traceback.format_exc()
8811 self.logger.critical(
8812 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8813 )
8814 finally:
8815 self._write_ns_status(
8816 nsr_id=nsr_id,
8817 ns_state=None,
8818 current_operation="IDLE",
8819 current_operation_id=None,
8820 )
8821 if exc:
8822 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8823 nslcmop_operation_state = "FAILED"
8824 else:
8825 nslcmop_operation_state = "COMPLETED"
8826 db_nslcmop_update["detailed-status"] = "Done"
8827 db_nsr_update["detailed-status"] = "Done"
8828
8829 self._write_op_status(
8830 op_id=nslcmop_id,
8831 stage="",
8832 error_message="",
8833 operation_state=nslcmop_operation_state,
8834 other_update=db_nslcmop_update,
8835 )
8836 if nslcmop_operation_state:
8837 try:
8838 msg = {
8839 "nsr_id": nsr_id,
8840 "nslcmop_id": nslcmop_id,
8841 "operationState": nslcmop_operation_state,
8842 }
8843 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8844 except Exception as e:
8845 self.logger.error(
8846 logging_text + "kafka_write notification Exception {}".format(e)
8847 )
8848 self.logger.debug(logging_text + "Exit")
8849 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")