Feature 10981: added Mongo accesses needed for NGSA
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import randint
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.loop = loop
149 self.lcm_tasks = lcm_tasks
150 self.timeout = config.timeout
151 self.ro_config = config.RO
152 self.vca_config = config.VCA
153
154 # create N2VC connector
155 self.n2vc = N2VCJujuConnector(
156 log=self.logger,
157 loop=self.loop,
158 on_update_db=self._on_update_n2vc_db,
159 fs=self.fs,
160 db=self.db,
161 )
162
163 self.conn_helm_ee = LCMHelmConn(
164 log=self.logger,
165 loop=self.loop,
166 vca_config=self.vca_config,
167 on_update_db=self._on_update_n2vc_db,
168 )
169
170 self.k8sclusterhelm2 = K8sHelmConnector(
171 kubectl_command=self.vca_config.kubectlpath,
172 helm_command=self.vca_config.helmpath,
173 log=self.logger,
174 on_update_db=None,
175 fs=self.fs,
176 db=self.db,
177 )
178
179 self.k8sclusterhelm3 = K8sHelm3Connector(
180 kubectl_command=self.vca_config.kubectlpath,
181 helm_command=self.vca_config.helm3path,
182 fs=self.fs,
183 log=self.logger,
184 db=self.db,
185 on_update_db=None,
186 )
187
188 self.k8sclusterjuju = K8sJujuConnector(
189 kubectl_command=self.vca_config.kubectlpath,
190 juju_command=self.vca_config.jujupath,
191 log=self.logger,
192 loop=self.loop,
193 on_update_db=self._on_update_k8s_db,
194 fs=self.fs,
195 db=self.db,
196 )
197
198 self.k8scluster_map = {
199 "helm-chart": self.k8sclusterhelm2,
200 "helm-chart-v3": self.k8sclusterhelm3,
201 "chart": self.k8sclusterhelm3,
202 "juju-bundle": self.k8sclusterjuju,
203 "juju": self.k8sclusterjuju,
204 }
205
206 self.vca_map = {
207 "lxc_proxy_charm": self.n2vc,
208 "native_charm": self.n2vc,
209 "k8s_proxy_charm": self.n2vc,
210 "helm": self.conn_helm_ee,
211 "helm-v3": self.conn_helm_ee,
212 }
213
214 # create RO client
215 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
216
217 self.op_status_map = {
218 "instantiation": self.RO.status,
219 "termination": self.RO.status,
220 "migrate": self.RO.status,
221 "healing": self.RO.recreate_status,
222 "verticalscale": self.RO.status,
223 "start_stop_rebuild": self.RO.status,
224 }
225
226 @staticmethod
227 def increment_ip_mac(ip_mac, vm_index=1):
228 if not isinstance(ip_mac, str):
229 return ip_mac
230 try:
231 # try with ipv4 look for last dot
232 i = ip_mac.rfind(".")
233 if i > 0:
234 i += 1
235 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
236 # try with ipv6 or mac look for last colon. Operate in hex
237 i = ip_mac.rfind(":")
238 if i > 0:
239 i += 1
240 # format in hex, len can be 2 for mac or 4 for ipv6
241 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
242 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
243 )
244 except Exception:
245 pass
246 return None
247
248 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266 # remove last dot from path (if exists)
267 if path.endswith("."):
268 path = path[:-1]
269
270 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
271 # .format(table, filter, path, updated_data))
272 try:
273 nsr_id = filter.get("_id")
274
275 # read ns record from database
276 nsr = self.db.get_one(table="nsrs", q_filter=filter)
277 current_ns_status = nsr.get("nsState")
278
279 # get vca status for NS
280 status_dict = await self.n2vc.get_status(
281 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
282 )
283
284 # vcaStatus
285 db_dict = dict()
286 db_dict["vcaStatus"] = status_dict
287
288 # update configurationStatus for this VCA
289 try:
290 vca_index = int(path[path.rfind(".") + 1 :])
291
292 vca_list = deep_get(
293 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
294 )
295 vca_status = vca_list[vca_index].get("status")
296
297 configuration_status_list = nsr.get("configurationStatus")
298 config_status = configuration_status_list[vca_index].get("status")
299
300 if config_status == "BROKEN" and vca_status != "failed":
301 db_dict["configurationStatus"][vca_index] = "READY"
302 elif config_status != "BROKEN" and vca_status == "failed":
303 db_dict["configurationStatus"][vca_index] = "BROKEN"
304 except Exception as e:
305 # not update configurationStatus
306 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
307
308 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
309 # if nsState = 'DEGRADED' check if all is OK
310 is_degraded = False
311 if current_ns_status in ("READY", "DEGRADED"):
312 error_description = ""
313 # check machines
314 if status_dict.get("machines"):
315 for machine_id in status_dict.get("machines"):
316 machine = status_dict.get("machines").get(machine_id)
317 # check machine agent-status
318 if machine.get("agent-status"):
319 s = machine.get("agent-status").get("status")
320 if s != "started":
321 is_degraded = True
322 error_description += (
323 "machine {} agent-status={} ; ".format(
324 machine_id, s
325 )
326 )
327 # check machine instance status
328 if machine.get("instance-status"):
329 s = machine.get("instance-status").get("status")
330 if s != "running":
331 is_degraded = True
332 error_description += (
333 "machine {} instance-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check applications
338 if status_dict.get("applications"):
339 for app_id in status_dict.get("applications"):
340 app = status_dict.get("applications").get(app_id)
341 # check application status
342 if app.get("status"):
343 s = app.get("status").get("status")
344 if s != "active":
345 is_degraded = True
346 error_description += (
347 "application {} status={} ; ".format(app_id, s)
348 )
349
350 if error_description:
351 db_dict["errorDescription"] = error_description
352 if current_ns_status == "READY" and is_degraded:
353 db_dict["nsState"] = "DEGRADED"
354 if current_ns_status == "DEGRADED" and not is_degraded:
355 db_dict["nsState"] = "READY"
356
357 # write to database
358 self.update_db_2("nsrs", nsr_id, db_dict)
359
360 except (asyncio.CancelledError, asyncio.TimeoutError):
361 raise
362 except Exception as e:
363 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
364
365 async def _on_update_k8s_db(
366 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
367 ):
368 """
369 Updating vca status in NSR record
370 :param cluster_uuid: UUID of a k8s cluster
371 :param kdu_instance: The unique name of the KDU instance
372 :param filter: To get nsr_id
373 :cluster_type: The cluster type (juju, k8s)
374 :return: none
375 """
376
377 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
378 # .format(cluster_uuid, kdu_instance, filter))
379
380 nsr_id = filter.get("_id")
381 try:
382 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
383 cluster_uuid=cluster_uuid,
384 kdu_instance=kdu_instance,
385 yaml_format=False,
386 complete_status=True,
387 vca_id=vca_id,
388 )
389
390 # vcaStatus
391 db_dict = dict()
392 db_dict["vcaStatus"] = {nsr_id: vca_status}
393
394 self.logger.debug(
395 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
396 )
397
398 # write to database
399 self.update_db_2("nsrs", nsr_id, db_dict)
400 except (asyncio.CancelledError, asyncio.TimeoutError):
401 raise
402 except Exception as e:
403 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
404
405 @staticmethod
406 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
407 try:
408 env = Environment(
409 undefined=StrictUndefined,
410 autoescape=select_autoescape(default_for_string=True, default=True),
411 )
412 template = env.from_string(cloud_init_text)
413 return template.render(additional_params or {})
414 except UndefinedError as e:
415 raise LcmException(
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
419 )
420 except (TemplateError, TemplateNotFound) as e:
421 raise LcmException(
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
423 vnfd_id, vdu_id, e
424 )
425 )
426
427 def _get_vdu_cloud_init_content(self, vdu, vnfd):
428 cloud_init_content = cloud_init_file = None
429 try:
430 if vdu.get("cloud-init-file"):
431 base_folder = vnfd["_admin"]["storage"]
432 if base_folder["pkg-dir"]:
433 cloud_init_file = "{}/{}/cloud_init/{}".format(
434 base_folder["folder"],
435 base_folder["pkg-dir"],
436 vdu["cloud-init-file"],
437 )
438 else:
439 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
440 base_folder["folder"],
441 vdu["cloud-init-file"],
442 )
443 with self.fs.file_open(cloud_init_file, "r") as ci_file:
444 cloud_init_content = ci_file.read()
445 elif vdu.get("cloud-init"):
446 cloud_init_content = vdu["cloud-init"]
447
448 return cloud_init_content
449 except FsException as e:
450 raise LcmException(
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd["id"], vdu["id"], cloud_init_file, e
453 )
454 )
455
456 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
457 vdur = next(
458 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
459 )
460 additional_params = vdur.get("additionalParams")
461 return parse_yaml_strings(additional_params)
462
463 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
464 """
465 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
466 :param vnfd: input vnfd
467 :param new_id: overrides vnf id if provided
468 :param additionalParams: Instantiation params for VNFs provided
469 :param nsrId: Id of the NSR
470 :return: copy of vnfd
471 """
472 vnfd_RO = deepcopy(vnfd)
473 # remove unused by RO configuration, monitoring, scaling and internal keys
474 vnfd_RO.pop("_id", None)
475 vnfd_RO.pop("_admin", None)
476 vnfd_RO.pop("monitoring-param", None)
477 vnfd_RO.pop("scaling-group-descriptor", None)
478 vnfd_RO.pop("kdu", None)
479 vnfd_RO.pop("k8s-cluster", None)
480 if new_id:
481 vnfd_RO["id"] = new_id
482
483 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
484 for vdu in get_iterable(vnfd_RO, "vdu"):
485 vdu.pop("cloud-init-file", None)
486 vdu.pop("cloud-init", None)
487 return vnfd_RO
488
489 @staticmethod
490 def ip_profile_2_RO(ip_profile):
491 RO_ip_profile = deepcopy(ip_profile)
492 if "dns-server" in RO_ip_profile:
493 if isinstance(RO_ip_profile["dns-server"], list):
494 RO_ip_profile["dns-address"] = []
495 for ds in RO_ip_profile.pop("dns-server"):
496 RO_ip_profile["dns-address"].append(ds["address"])
497 else:
498 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
499 if RO_ip_profile.get("ip-version") == "ipv4":
500 RO_ip_profile["ip-version"] = "IPv4"
501 if RO_ip_profile.get("ip-version") == "ipv6":
502 RO_ip_profile["ip-version"] = "IPv6"
503 if "dhcp-params" in RO_ip_profile:
504 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
505 return RO_ip_profile
506
507 def _get_ro_vim_id_for_vim_account(self, vim_account):
508 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
509 if db_vim["_admin"]["operationalState"] != "ENABLED":
510 raise LcmException(
511 "VIM={} is not available. operationalState={}".format(
512 vim_account, db_vim["_admin"]["operationalState"]
513 )
514 )
515 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
516 return RO_vim_id
517
518 def get_ro_wim_id_for_wim_account(self, wim_account):
519 if isinstance(wim_account, str):
520 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
521 if db_wim["_admin"]["operationalState"] != "ENABLED":
522 raise LcmException(
523 "WIM={} is not available. operationalState={}".format(
524 wim_account, db_wim["_admin"]["operationalState"]
525 )
526 )
527 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
528 return RO_wim_id
529 else:
530 return wim_account
531
532 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
533 db_vdu_push_list = []
534 template_vdur = []
535 db_update = {"_admin.modified": time()}
536 if vdu_create:
537 for vdu_id, vdu_count in vdu_create.items():
538 vdur = next(
539 (
540 vdur
541 for vdur in reversed(db_vnfr["vdur"])
542 if vdur["vdu-id-ref"] == vdu_id
543 ),
544 None,
545 )
546 if not vdur:
547 # Read the template saved in the db:
548 self.logger.debug(
549 "No vdur in the database. Using the vdur-template to scale"
550 )
551 vdur_template = db_vnfr.get("vdur-template")
552 if not vdur_template:
553 raise LcmException(
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
555 vdu_id
556 )
557 )
558 vdur = vdur_template[0]
559 # Delete a template from the database after using it
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur-template": {"_id": vdur["_id"]}},
565 )
566 for count in range(vdu_count):
567 vdur_copy = deepcopy(vdur)
568 vdur_copy["status"] = "BUILD"
569 vdur_copy["status-detailed"] = None
570 vdur_copy["ip-address"] = None
571 vdur_copy["_id"] = str(uuid4())
572 vdur_copy["count-index"] += count + 1
573 vdur_copy["id"] = "{}-{}".format(
574 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
575 )
576 vdur_copy.pop("vim_info", None)
577 for iface in vdur_copy["interfaces"]:
578 if iface.get("fixed-ip"):
579 iface["ip-address"] = self.increment_ip_mac(
580 iface["ip-address"], count + 1
581 )
582 else:
583 iface.pop("ip-address", None)
584 if iface.get("fixed-mac"):
585 iface["mac-address"] = self.increment_ip_mac(
586 iface["mac-address"], count + 1
587 )
588 else:
589 iface.pop("mac-address", None)
590 if db_vnfr["vdur"]:
591 iface.pop(
592 "mgmt_vnf", None
593 ) # only first vdu can be managment of vnf
594 db_vdu_push_list.append(vdur_copy)
595 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
596 if vdu_delete:
597 if len(db_vnfr["vdur"]) == 1:
598 # The scale will move to 0 instances
599 self.logger.debug(
600 "Scaling to 0 !, creating the template with the last vdur"
601 )
602 template_vdur = [db_vnfr["vdur"][0]]
603 for vdu_id, vdu_count in vdu_delete.items():
604 if mark_delete:
605 indexes_to_delete = [
606 iv[0]
607 for iv in enumerate(db_vnfr["vdur"])
608 if iv[1]["vdu-id-ref"] == vdu_id
609 ]
610 db_update.update(
611 {
612 "vdur.{}.status".format(i): "DELETING"
613 for i in indexes_to_delete[-vdu_count:]
614 }
615 )
616 else:
617 # it must be deleted one by one because common.db does not allow otherwise
618 vdus_to_delete = [
619 v
620 for v in reversed(db_vnfr["vdur"])
621 if v["vdu-id-ref"] == vdu_id
622 ]
623 for vdu in vdus_to_delete[:vdu_count]:
624 self.db.set_one(
625 "vnfrs",
626 {"_id": db_vnfr["_id"]},
627 None,
628 pull={"vdur": {"_id": vdu["_id"]}},
629 )
630 db_push = {}
631 if db_vdu_push_list:
632 db_push["vdur"] = db_vdu_push_list
633 if template_vdur:
634 db_push["vdur-template"] = template_vdur
635 if not db_push:
636 db_push = None
637 db_vnfr["vdur-template"] = template_vdur
638 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
639 # modify passed dictionary db_vnfr
640 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
641 db_vnfr["vdur"] = db_vnfr_["vdur"]
642
643 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
644 """
645 Updates database nsr with the RO info for the created vld
646 :param ns_update_nsr: dictionary to be filled with the updated info
647 :param db_nsr: content of db_nsr. This is also modified
648 :param nsr_desc_RO: nsr descriptor from RO
649 :return: Nothing, LcmException is raised on errors
650 """
651
652 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
653 for net_RO in get_iterable(nsr_desc_RO, "nets"):
654 if vld["id"] != net_RO.get("ns_net_osm_id"):
655 continue
656 vld["vim-id"] = net_RO.get("vim_net_id")
657 vld["name"] = net_RO.get("vim_name")
658 vld["status"] = net_RO.get("status")
659 vld["status-detailed"] = net_RO.get("error_msg")
660 ns_update_nsr["vld.{}".format(vld_index)] = vld
661 break
662 else:
663 raise LcmException(
664 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
665 )
666
667 def set_vnfr_at_error(self, db_vnfrs, error_text):
668 try:
669 for db_vnfr in db_vnfrs.values():
670 vnfr_update = {"status": "ERROR"}
671 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
672 if "status" not in vdur:
673 vdur["status"] = "ERROR"
674 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
675 if error_text:
676 vdur["status-detailed"] = str(error_text)
677 vnfr_update[
678 "vdur.{}.status-detailed".format(vdu_index)
679 ] = "ERROR"
680 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
681 except DbException as e:
682 self.logger.error("Cannot update vnf. {}".format(e))
683
684 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
685 """
686 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
687 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
688 :param nsr_desc_RO: nsr descriptor from RO
689 :return: Nothing, LcmException is raised on errors
690 """
691 for vnf_index, db_vnfr in db_vnfrs.items():
692 for vnf_RO in nsr_desc_RO["vnfs"]:
693 if vnf_RO["member_vnf_index"] != vnf_index:
694 continue
695 vnfr_update = {}
696 if vnf_RO.get("ip_address"):
697 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
698 "ip_address"
699 ].split(";")[0]
700 elif not db_vnfr.get("ip-address"):
701 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
702 raise LcmExceptionNoMgmtIP(
703 "ns member_vnf_index '{}' has no IP address".format(
704 vnf_index
705 )
706 )
707
708 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
709 vdur_RO_count_index = 0
710 if vdur.get("pdu-type"):
711 continue
712 for vdur_RO in get_iterable(vnf_RO, "vms"):
713 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
714 continue
715 if vdur["count-index"] != vdur_RO_count_index:
716 vdur_RO_count_index += 1
717 continue
718 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
719 if vdur_RO.get("ip_address"):
720 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
721 else:
722 vdur["ip-address"] = None
723 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
724 vdur["name"] = vdur_RO.get("vim_name")
725 vdur["status"] = vdur_RO.get("status")
726 vdur["status-detailed"] = vdur_RO.get("error_msg")
727 for ifacer in get_iterable(vdur, "interfaces"):
728 for interface_RO in get_iterable(vdur_RO, "interfaces"):
729 if ifacer["name"] == interface_RO.get("internal_name"):
730 ifacer["ip-address"] = interface_RO.get(
731 "ip_address"
732 )
733 ifacer["mac-address"] = interface_RO.get(
734 "mac_address"
735 )
736 break
737 else:
738 raise LcmException(
739 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
740 "from VIM info".format(
741 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
742 )
743 )
744 vnfr_update["vdur.{}".format(vdu_index)] = vdur
745 break
746 else:
747 raise LcmException(
748 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
749 "VIM info".format(
750 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
751 )
752 )
753
754 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
755 for net_RO in get_iterable(nsr_desc_RO, "nets"):
756 if vld["id"] != net_RO.get("vnf_net_osm_id"):
757 continue
758 vld["vim-id"] = net_RO.get("vim_net_id")
759 vld["name"] = net_RO.get("vim_name")
760 vld["status"] = net_RO.get("status")
761 vld["status-detailed"] = net_RO.get("error_msg")
762 vnfr_update["vld.{}".format(vld_index)] = vld
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
767 vnf_index, vld["id"]
768 )
769 )
770
771 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
772 break
773
774 else:
775 raise LcmException(
776 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
777 vnf_index
778 )
779 )
780
781 def _get_ns_config_info(self, nsr_id):
782 """
783 Generates a mapping between vnf,vdu elements and the N2VC id
784 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
785 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
786 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
787 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
788 """
789 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
790 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
791 mapping = {}
792 ns_config_info = {"osm-config-mapping": mapping}
793 for vca in vca_deployed_list:
794 if not vca["member-vnf-index"]:
795 continue
796 if not vca["vdu_id"]:
797 mapping[vca["member-vnf-index"]] = vca["application"]
798 else:
799 mapping[
800 "{}.{}.{}".format(
801 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
802 )
803 ] = vca["application"]
804 return ns_config_info
805
806 async def _instantiate_ng_ro(
807 self,
808 logging_text,
809 nsr_id,
810 nsd,
811 db_nsr,
812 db_nslcmop,
813 db_vnfrs,
814 db_vnfds,
815 n2vc_key_list,
816 stage,
817 start_deploy,
818 timeout_ns_deploy,
819 ):
820 db_vims = {}
821
822 def get_vim_account(vim_account_id):
823 nonlocal db_vims
824 if vim_account_id in db_vims:
825 return db_vims[vim_account_id]
826 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
827 db_vims[vim_account_id] = db_vim
828 return db_vim
829
830 # modify target_vld info with instantiation parameters
831 def parse_vld_instantiation_params(
832 target_vim, target_vld, vld_params, target_sdn
833 ):
834 if vld_params.get("ip-profile"):
835 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
836 vld_params["ip-profile"]
837 )
838 if vld_params.get("provider-network"):
839 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
840 "provider-network"
841 ]
842 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
843 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
844 "provider-network"
845 ]["sdn-ports"]
846
847 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
848 # if wim_account_id is specified in vld_params, validate if it is feasible.
849 wim_account_id, db_wim = select_feasible_wim_account(
850 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
851 )
852
853 if wim_account_id:
854 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
855 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
856 # update vld_params with correct WIM account Id
857 vld_params["wimAccountId"] = wim_account_id
858
859 target_wim = "wim:{}".format(wim_account_id)
860 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
861 sdn_ports = get_sdn_ports(vld_params, db_wim)
862 if len(sdn_ports) > 0:
863 target_vld["vim_info"][target_wim] = target_wim_attrs
864 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
865
866 self.logger.debug(
867 "Target VLD with WIM data: {:s}".format(str(target_vld))
868 )
869
870 for param in ("vim-network-name", "vim-network-id"):
871 if vld_params.get(param):
872 if isinstance(vld_params[param], dict):
873 for vim, vim_net in vld_params[param].items():
874 other_target_vim = "vim:" + vim
875 populate_dict(
876 target_vld["vim_info"],
877 (other_target_vim, param.replace("-", "_")),
878 vim_net,
879 )
880 else: # isinstance str
881 target_vld["vim_info"][target_vim][
882 param.replace("-", "_")
883 ] = vld_params[param]
884 if vld_params.get("common_id"):
885 target_vld["common_id"] = vld_params.get("common_id")
886
887 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
888 def update_ns_vld_target(target, ns_params):
889 for vnf_params in ns_params.get("vnf", ()):
890 if vnf_params.get("vimAccountId"):
891 target_vnf = next(
892 (
893 vnfr
894 for vnfr in db_vnfrs.values()
895 if vnf_params["member-vnf-index"]
896 == vnfr["member-vnf-index-ref"]
897 ),
898 None,
899 )
900 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
901 if not vdur:
902 return
903 for a_index, a_vld in enumerate(target["ns"]["vld"]):
904 target_vld = find_in_list(
905 get_iterable(vdur, "interfaces"),
906 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
907 )
908
909 vld_params = find_in_list(
910 get_iterable(ns_params, "vld"),
911 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
912 )
913 if target_vld:
914 if vnf_params.get("vimAccountId") not in a_vld.get(
915 "vim_info", {}
916 ):
917 target_vim_network_list = [
918 v for _, v in a_vld.get("vim_info").items()
919 ]
920 target_vim_network_name = next(
921 (
922 item.get("vim_network_name", "")
923 for item in target_vim_network_list
924 ),
925 "",
926 )
927
928 target["ns"]["vld"][a_index].get("vim_info").update(
929 {
930 "vim:{}".format(vnf_params["vimAccountId"]): {
931 "vim_network_name": target_vim_network_name,
932 }
933 }
934 )
935
936 if vld_params:
937 for param in ("vim-network-name", "vim-network-id"):
938 if vld_params.get(param) and isinstance(
939 vld_params[param], dict
940 ):
941 for vim, vim_net in vld_params[
942 param
943 ].items():
944 other_target_vim = "vim:" + vim
945 populate_dict(
946 target["ns"]["vld"][a_index].get(
947 "vim_info"
948 ),
949 (
950 other_target_vim,
951 param.replace("-", "_"),
952 ),
953 vim_net,
954 )
955
956 nslcmop_id = db_nslcmop["_id"]
957 target = {
958 "name": db_nsr["name"],
959 "ns": {"vld": []},
960 "vnf": [],
961 "image": deepcopy(db_nsr["image"]),
962 "flavor": deepcopy(db_nsr["flavor"]),
963 "action_id": nslcmop_id,
964 "cloud_init_content": {},
965 }
966 for image in target["image"]:
967 image["vim_info"] = {}
968 for flavor in target["flavor"]:
969 flavor["vim_info"] = {}
970 if db_nsr.get("affinity-or-anti-affinity-group"):
971 target["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr["affinity-or-anti-affinity-group"]
973 )
974 for affinity_or_anti_affinity_group in target[
975 "affinity-or-anti-affinity-group"
976 ]:
977 affinity_or_anti_affinity_group["vim_info"] = {}
978
979 if db_nslcmop.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate = self.db.get_list(
982 "nslcmops",
983 {
984 "nsInstanceId": db_nslcmop["nsInstanceId"],
985 "lcmOperationType": "instantiate",
986 },
987 )[-1]
988 ns_params = db_nslcmop_instantiate.get("operationParams")
989 else:
990 ns_params = db_nslcmop.get("operationParams")
991 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
992 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
993
994 cp2target = {}
995 for vld_index, vld in enumerate(db_nsr.get("vld")):
996 target_vim = "vim:{}".format(ns_params["vimAccountId"])
997 target_vld = {
998 "id": vld["id"],
999 "name": vld["name"],
1000 "mgmt-network": vld.get("mgmt-network", False),
1001 "type": vld.get("type"),
1002 "vim_info": {
1003 target_vim: {
1004 "vim_network_name": vld.get("vim-network-name"),
1005 "vim_account_id": ns_params["vimAccountId"],
1006 }
1007 },
1008 }
1009 # check if this network needs SDN assist
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(ns_params["vimAccountId"])
1012 if vim_config := db_vim.get("config"):
1013 if sdnc_id := vim_config.get("sdn-controller"):
1014 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 target_vld["vim_info"][target_sdn] = {
1017 "sdn": True,
1018 "target_vim": target_vim,
1019 "vlds": [sdn_vld],
1020 "type": vld.get("type"),
1021 }
1022
1023 nsd_vnf_profiles = get_vnf_profiles(nsd)
1024 for nsd_vnf_profile in nsd_vnf_profiles:
1025 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1026 if cp["virtual-link-profile-id"] == vld["id"]:
1027 cp2target[
1028 "member_vnf:{}.{}".format(
1029 cp["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1031 ],
1032 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1033 )
1034 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1035
1036 # check at nsd descriptor, if there is an ip-profile
1037 vld_params = {}
1038 nsd_vlp = find_in_list(
1039 get_virtual_link_profiles(nsd),
1040 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1041 == vld["id"],
1042 )
1043 if (
1044 nsd_vlp
1045 and nsd_vlp.get("virtual-link-protocol-data")
1046 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1047 ):
1048 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1049 "l3-protocol-data"
1050 ]
1051
1052 # update vld_params with instantiation params
1053 vld_instantiation_params = find_in_list(
1054 get_iterable(ns_params, "vld"),
1055 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1056 )
1057 if vld_instantiation_params:
1058 vld_params.update(vld_instantiation_params)
1059 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1060 target["ns"]["vld"].append(target_vld)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target, ns_params)
1063
1064 for vnfr in db_vnfrs.values():
1065 vnfd = find_in_list(
1066 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1067 )
1068 vnf_params = find_in_list(
1069 get_iterable(ns_params, "vnf"),
1070 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1071 )
1072 target_vnf = deepcopy(vnfr)
1073 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1074 for vld in target_vnf.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp = find_in_list(
1077 vnfd.get("int-virtual-link-desc", ()),
1078 lambda cpd: cpd.get("id") == vld["id"],
1079 )
1080 if vnf_cp:
1081 ns_cp = "member_vnf:{}.{}".format(
1082 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1083 )
1084 if cp2target.get(ns_cp):
1085 vld["target"] = cp2target[ns_cp]
1086
1087 vld["vim_info"] = {
1088 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1089 }
1090 # check if this network needs SDN assist
1091 target_sdn = None
1092 if vld.get("pci-interfaces"):
1093 db_vim = get_vim_account(vnfr["vim-account-id"])
1094 sdnc_id = db_vim["config"].get("sdn-controller")
1095 if sdnc_id:
1096 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1097 target_sdn = "sdn:{}".format(sdnc_id)
1098 vld["vim_info"][target_sdn] = {
1099 "sdn": True,
1100 "target_vim": target_vim,
1101 "vlds": [sdn_vld],
1102 "type": vld.get("type"),
1103 }
1104
1105 # check at vnfd descriptor, if there is an ip-profile
1106 vld_params = {}
1107 vnfd_vlp = find_in_list(
1108 get_virtual_link_profiles(vnfd),
1109 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1110 )
1111 if (
1112 vnfd_vlp
1113 and vnfd_vlp.get("virtual-link-protocol-data")
1114 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1115 ):
1116 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1117 "l3-protocol-data"
1118 ]
1119 # update vld_params with instantiation params
1120 if vnf_params:
1121 vld_instantiation_params = find_in_list(
1122 get_iterable(vnf_params, "internal-vld"),
1123 lambda i_vld: i_vld["name"] == vld["id"],
1124 )
1125 if vld_instantiation_params:
1126 vld_params.update(vld_instantiation_params)
1127 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1128
1129 vdur_list = []
1130 for vdur in target_vnf.get("vdur", ()):
1131 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1134
1135 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1136
1137 if ssh_keys_all:
1138 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1139 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1140 if (
1141 vdu_configuration
1142 and vdu_configuration.get("config-access")
1143 and vdu_configuration.get("config-access").get("ssh-access")
1144 ):
1145 vdur["ssh-keys"] = ssh_keys_all
1146 vdur["ssh-access-required"] = vdu_configuration[
1147 "config-access"
1148 ]["ssh-access"]["required"]
1149 elif (
1150 vnf_configuration
1151 and vnf_configuration.get("config-access")
1152 and vnf_configuration.get("config-access").get("ssh-access")
1153 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_all
1156 vdur["ssh-access-required"] = vnf_configuration[
1157 "config-access"
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation and find_in_list(
1160 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1161 ):
1162 vdur["ssh-keys"] = ssh_keys_instantiation
1163
1164 self.logger.debug("NS > vdur > {}".format(vdur))
1165
1166 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1167 # cloud-init
1168 if vdud.get("cloud-init-file"):
1169 vdur["cloud-init"] = "{}:file:{}".format(
1170 vnfd["_id"], vdud.get("cloud-init-file")
1171 )
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur["cloud-init"] not in target["cloud_init_content"]:
1174 base_folder = vnfd["_admin"]["storage"]
1175 if base_folder["pkg-dir"]:
1176 cloud_init_file = "{}/{}/cloud_init/{}".format(
1177 base_folder["folder"],
1178 base_folder["pkg-dir"],
1179 vdud.get("cloud-init-file"),
1180 )
1181 else:
1182 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1183 base_folder["folder"],
1184 vdud.get("cloud-init-file"),
1185 )
1186 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1187 target["cloud_init_content"][
1188 vdur["cloud-init"]
1189 ] = ci_file.read()
1190 elif vdud.get("cloud-init"):
1191 vdur["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1193 )
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1196 "cloud-init"
1197 ]
1198 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1199 deploy_params_vdu = self._format_additional_params(
1200 vdur.get("additionalParams") or {}
1201 )
1202 deploy_params_vdu["OSM"] = get_osm_params(
1203 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1204 )
1205 vdur["additionalParams"] = deploy_params_vdu
1206
1207 # flavor
1208 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1209 if target_vim not in ns_flavor["vim_info"]:
1210 ns_flavor["vim_info"][target_vim] = {}
1211
1212 # deal with images
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id = int(vdur["ns-image-id"])
1216 if vdur.get("alt-image-ids"):
1217 db_vim = get_vim_account(vnfr["vim-account-id"])
1218 vim_type = db_vim["vim_type"]
1219 for alt_image_id in vdur.get("alt-image-ids"):
1220 ns_alt_image = target["image"][int(alt_image_id)]
1221 if vim_type == ns_alt_image.get("vim-type"):
1222 # must use alternative image
1223 self.logger.debug(
1224 "use alternative image id: {}".format(alt_image_id)
1225 )
1226 ns_image_id = alt_image_id
1227 vdur["ns-image-id"] = ns_image_id
1228 break
1229 ns_image = target["image"][int(ns_image_id)]
1230 if target_vim not in ns_image["vim_info"]:
1231 ns_image["vim_info"][target_vim] = {}
1232
1233 # Affinity groups
1234 if vdur.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1236 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1237 if target_vim not in ns_ags["vim_info"]:
1238 ns_ags["vim_info"][target_vim] = {}
1239
1240 vdur["vim_info"] = {target_vim: {}}
1241 # instantiation parameters
1242 if vnf_params:
1243 vdu_instantiation_params = find_in_list(
1244 get_iterable(vnf_params, "vdu"),
1245 lambda i_vdu: i_vdu["id"] == vdud["id"],
1246 )
1247 if vdu_instantiation_params:
1248 # Parse the vdu_volumes from the instantiation params
1249 vdu_volumes = get_volumes_from_instantiation_params(
1250 vdu_instantiation_params, vdud
1251 )
1252 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1253 vdur["additionalParams"]["OSM"][
1254 "vim_flavor_id"
1255 ] = vdu_instantiation_params.get("vim-flavor-id")
1256 vdur_list.append(vdur)
1257 target_vnf["vdur"] = vdur_list
1258 target["vnf"].append(target_vnf)
1259
1260 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1261 desc = await self.RO.deploy(nsr_id, target)
1262 self.logger.debug("RO return > {}".format(desc))
1263 action_id = desc["action_id"]
1264 await self._wait_ng_ro(
1265 nsr_id,
1266 action_id,
1267 nslcmop_id,
1268 start_deploy,
1269 timeout_ns_deploy,
1270 stage,
1271 operation="instantiation",
1272 )
1273
1274 # Updating NSR
1275 db_nsr_update = {
1276 "_admin.deployed.RO.operational-status": "running",
1277 "detailed-status": " ".join(stage),
1278 }
1279 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1280 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1281 self._write_op_status(nslcmop_id, stage)
1282 self.logger.debug(
1283 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1284 )
1285 return
1286
1287 async def _wait_ng_ro(
1288 self,
1289 nsr_id,
1290 action_id,
1291 nslcmop_id=None,
1292 start_time=None,
1293 timeout=600,
1294 stage=None,
1295 operation=None,
1296 ):
1297 detailed_status_old = None
1298 db_nsr_update = {}
1299 start_time = start_time or time()
1300 while time() <= start_time + timeout:
1301 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1302 self.logger.debug("Wait NG RO > {}".format(desc_status))
1303 if desc_status["status"] == "FAILED":
1304 raise NgRoException(desc_status["details"])
1305 elif desc_status["status"] == "BUILD":
1306 if stage:
1307 stage[2] = "VIM: ({})".format(desc_status["details"])
1308 elif desc_status["status"] == "DONE":
1309 if stage:
1310 stage[2] = "Deployed at VIM"
1311 break
1312 else:
1313 assert False, "ROclient.check_ns_status returns unknown {}".format(
1314 desc_status["status"]
1315 )
1316 if stage and nslcmop_id and stage[2] != detailed_status_old:
1317 detailed_status_old = stage[2]
1318 db_nsr_update["detailed-status"] = " ".join(stage)
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 await asyncio.sleep(15, loop=self.loop)
1322 else: # timeout_ns_deploy
1323 raise NgRoException("Timeout waiting ns to deploy")
1324
1325 async def _terminate_ng_ro(
1326 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1327 ):
1328 db_nsr_update = {}
1329 failed_detail = []
1330 action_id = None
1331 start_deploy = time()
1332 try:
1333 target = {
1334 "ns": {"vld": []},
1335 "vnf": [],
1336 "image": [],
1337 "flavor": [],
1338 "action_id": nslcmop_id,
1339 }
1340 desc = await self.RO.deploy(nsr_id, target)
1341 action_id = desc["action_id"]
1342 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1343 self.logger.debug(
1344 logging_text
1345 + "ns terminate action at RO. action_id={}".format(action_id)
1346 )
1347
1348 # wait until done
1349 delete_timeout = 20 * 60 # 20 minutes
1350 await self._wait_ng_ro(
1351 nsr_id,
1352 action_id,
1353 nslcmop_id,
1354 start_deploy,
1355 delete_timeout,
1356 stage,
1357 operation="termination",
1358 )
1359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1360 # delete all nsr
1361 await self.RO.delete(nsr_id)
1362 except NgRoException as e:
1363 if e.http_code == 404: # not found
1364 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1365 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1366 self.logger.debug(
1367 logging_text + "RO_action_id={} already deleted".format(action_id)
1368 )
1369 elif e.http_code == 409: # conflict
1370 failed_detail.append("delete conflict: {}".format(e))
1371 self.logger.debug(
1372 logging_text
1373 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1374 )
1375 else:
1376 failed_detail.append("delete error: {}".format(e))
1377 self.logger.error(
1378 logging_text
1379 + "RO_action_id={} delete error: {}".format(action_id, e)
1380 )
1381 except Exception as e:
1382 failed_detail.append("delete error: {}".format(e))
1383 self.logger.error(
1384 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1385 )
1386
1387 if failed_detail:
1388 stage[2] = "Error deleting from VIM"
1389 else:
1390 stage[2] = "Deleted from VIM"
1391 db_nsr_update["detailed-status"] = " ".join(stage)
1392 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1393 self._write_op_status(nslcmop_id, stage)
1394
1395 if failed_detail:
1396 raise LcmException("; ".join(failed_detail))
1397 return
1398
1399 async def instantiate_RO(
1400 self,
1401 logging_text,
1402 nsr_id,
1403 nsd,
1404 db_nsr,
1405 db_nslcmop,
1406 db_vnfrs,
1407 db_vnfds,
1408 n2vc_key_list,
1409 stage,
1410 ):
1411 """
1412 Instantiate at RO
1413 :param logging_text: preffix text to use at logging
1414 :param nsr_id: nsr identity
1415 :param nsd: database content of ns descriptor
1416 :param db_nsr: database content of ns record
1417 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1418 :param db_vnfrs:
1419 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1420 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1421 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1422 :return: None or exception
1423 """
1424 try:
1425 start_deploy = time()
1426 ns_params = db_nslcmop.get("operationParams")
1427 if ns_params and ns_params.get("timeout_ns_deploy"):
1428 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1429 else:
1430 timeout_ns_deploy = self.timeout.ns_deploy
1431
1432 # Check for and optionally request placement optimization. Database will be updated if placement activated
1433 stage[2] = "Waiting for Placement."
1434 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1435 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1436 for vnfr in db_vnfrs.values():
1437 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1438 break
1439 else:
1440 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1441
1442 return await self._instantiate_ng_ro(
1443 logging_text,
1444 nsr_id,
1445 nsd,
1446 db_nsr,
1447 db_nslcmop,
1448 db_vnfrs,
1449 db_vnfds,
1450 n2vc_key_list,
1451 stage,
1452 start_deploy,
1453 timeout_ns_deploy,
1454 )
1455 except Exception as e:
1456 stage[2] = "ERROR deploying at VIM"
1457 self.set_vnfr_at_error(db_vnfrs, str(e))
1458 self.logger.error(
1459 "Error deploying at VIM {}".format(e),
1460 exc_info=not isinstance(
1461 e,
1462 (
1463 ROclient.ROClientException,
1464 LcmException,
1465 DbException,
1466 NgRoException,
1467 ),
1468 ),
1469 )
1470 raise
1471
1472 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1473 """
1474 Wait for kdu to be up, get ip address
1475 :param logging_text: prefix use for logging
1476 :param nsr_id:
1477 :param vnfr_id:
1478 :param kdu_name:
1479 :return: IP address, K8s services
1480 """
1481
1482 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1483 nb_tries = 0
1484
1485 while nb_tries < 360:
1486 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1487 kdur = next(
1488 (
1489 x
1490 for x in get_iterable(db_vnfr, "kdur")
1491 if x.get("kdu-name") == kdu_name
1492 ),
1493 None,
1494 )
1495 if not kdur:
1496 raise LcmException(
1497 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1498 )
1499 if kdur.get("status"):
1500 if kdur["status"] in ("READY", "ENABLED"):
1501 return kdur.get("ip-address"), kdur.get("services")
1502 else:
1503 raise LcmException(
1504 "target KDU={} is in error state".format(kdu_name)
1505 )
1506
1507 await asyncio.sleep(10, loop=self.loop)
1508 nb_tries += 1
1509 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1510
1511 async def wait_vm_up_insert_key_ro(
1512 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1513 ):
1514 """
1515 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1516 :param logging_text: prefix use for logging
1517 :param nsr_id:
1518 :param vnfr_id:
1519 :param vdu_id:
1520 :param vdu_index:
1521 :param pub_key: public ssh key to inject, None to skip
1522 :param user: user to apply the public ssh key
1523 :return: IP address
1524 """
1525
1526 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1527 ip_address = None
1528 target_vdu_id = None
1529 ro_retries = 0
1530
1531 while True:
1532 ro_retries += 1
1533 if ro_retries >= 360: # 1 hour
1534 raise LcmException(
1535 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1536 )
1537
1538 await asyncio.sleep(10, loop=self.loop)
1539
1540 # get ip address
1541 if not target_vdu_id:
1542 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1543
1544 if not vdu_id: # for the VNF case
1545 if db_vnfr.get("status") == "ERROR":
1546 raise LcmException(
1547 "Cannot inject ssh-key because target VNF is in error state"
1548 )
1549 ip_address = db_vnfr.get("ip-address")
1550 if not ip_address:
1551 continue
1552 vdur = next(
1553 (
1554 x
1555 for x in get_iterable(db_vnfr, "vdur")
1556 if x.get("ip-address") == ip_address
1557 ),
1558 None,
1559 )
1560 else: # VDU case
1561 vdur = next(
1562 (
1563 x
1564 for x in get_iterable(db_vnfr, "vdur")
1565 if x.get("vdu-id-ref") == vdu_id
1566 and x.get("count-index") == vdu_index
1567 ),
1568 None,
1569 )
1570
1571 if (
1572 not vdur and len(db_vnfr.get("vdur", ())) == 1
1573 ): # If only one, this should be the target vdu
1574 vdur = db_vnfr["vdur"][0]
1575 if not vdur:
1576 raise LcmException(
1577 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1578 vnfr_id, vdu_id, vdu_index
1579 )
1580 )
1581 # New generation RO stores information at "vim_info"
1582 ng_ro_status = None
1583 target_vim = None
1584 if vdur.get("vim_info"):
1585 target_vim = next(
1586 t for t in vdur["vim_info"]
1587 ) # there should be only one key
1588 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1589 if (
1590 vdur.get("pdu-type")
1591 or vdur.get("status") == "ACTIVE"
1592 or ng_ro_status == "ACTIVE"
1593 ):
1594 ip_address = vdur.get("ip-address")
1595 if not ip_address:
1596 continue
1597 target_vdu_id = vdur["vdu-id-ref"]
1598 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1599 raise LcmException(
1600 "Cannot inject ssh-key because target VM is in error state"
1601 )
1602
1603 if not target_vdu_id:
1604 continue
1605
1606 # inject public key into machine
1607 if pub_key and user:
1608 self.logger.debug(logging_text + "Inserting RO key")
1609 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1610 if vdur.get("pdu-type"):
1611 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1612 return ip_address
1613 try:
1614 target = {
1615 "action": {
1616 "action": "inject_ssh_key",
1617 "key": pub_key,
1618 "user": user,
1619 },
1620 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1621 }
1622 desc = await self.RO.deploy(nsr_id, target)
1623 action_id = desc["action_id"]
1624 await self._wait_ng_ro(
1625 nsr_id, action_id, timeout=600, operation="instantiation"
1626 )
1627 break
1628 except NgRoException as e:
1629 raise LcmException(
1630 "Reaching max tries injecting key. Error: {}".format(e)
1631 )
1632 else:
1633 break
1634
1635 return ip_address
1636
1637 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1638 """
1639 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1640 """
1641 my_vca = vca_deployed_list[vca_index]
1642 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1643 # vdu or kdu: no dependencies
1644 return
1645 timeout = 300
1646 while timeout >= 0:
1647 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1648 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1649 configuration_status_list = db_nsr["configurationStatus"]
1650 for index, vca_deployed in enumerate(configuration_status_list):
1651 if index == vca_index:
1652 # myself
1653 continue
1654 if not my_vca.get("member-vnf-index") or (
1655 vca_deployed.get("member-vnf-index")
1656 == my_vca.get("member-vnf-index")
1657 ):
1658 internal_status = configuration_status_list[index].get("status")
1659 if internal_status == "READY":
1660 continue
1661 elif internal_status == "BROKEN":
1662 raise LcmException(
1663 "Configuration aborted because dependent charm/s has failed"
1664 )
1665 else:
1666 break
1667 else:
1668 # no dependencies, return
1669 return
1670 await asyncio.sleep(10)
1671 timeout -= 1
1672
1673 raise LcmException("Configuration aborted because dependent charm/s timeout")
1674
1675 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1676 vca_id = None
1677 if db_vnfr:
1678 vca_id = deep_get(db_vnfr, ("vca-id",))
1679 elif db_nsr:
1680 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1681 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1682 return vca_id
1683
1684 async def instantiate_N2VC(
1685 self,
1686 logging_text,
1687 vca_index,
1688 nsi_id,
1689 db_nsr,
1690 db_vnfr,
1691 vdu_id,
1692 kdu_name,
1693 vdu_index,
1694 kdu_index,
1695 config_descriptor,
1696 deploy_params,
1697 base_folder,
1698 nslcmop_id,
1699 stage,
1700 vca_type,
1701 vca_name,
1702 ee_config_descriptor,
1703 ):
1704 nsr_id = db_nsr["_id"]
1705 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1706 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1707 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1708 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1709 db_dict = {
1710 "collection": "nsrs",
1711 "filter": {"_id": nsr_id},
1712 "path": db_update_entry,
1713 }
1714 step = ""
1715 try:
1716 element_type = "NS"
1717 element_under_configuration = nsr_id
1718
1719 vnfr_id = None
1720 if db_vnfr:
1721 vnfr_id = db_vnfr["_id"]
1722 osm_config["osm"]["vnf_id"] = vnfr_id
1723
1724 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1725
1726 if vca_type == "native_charm":
1727 index_number = 0
1728 else:
1729 index_number = vdu_index or 0
1730
1731 if vnfr_id:
1732 element_type = "VNF"
1733 element_under_configuration = vnfr_id
1734 namespace += ".{}-{}".format(vnfr_id, index_number)
1735 if vdu_id:
1736 namespace += ".{}-{}".format(vdu_id, index_number)
1737 element_type = "VDU"
1738 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1739 osm_config["osm"]["vdu_id"] = vdu_id
1740 elif kdu_name:
1741 namespace += ".{}".format(kdu_name)
1742 element_type = "KDU"
1743 element_under_configuration = kdu_name
1744 osm_config["osm"]["kdu_name"] = kdu_name
1745
1746 # Get artifact path
1747 if base_folder["pkg-dir"]:
1748 artifact_path = "{}/{}/{}/{}".format(
1749 base_folder["folder"],
1750 base_folder["pkg-dir"],
1751 "charms"
1752 if vca_type
1753 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1754 else "helm-charts",
1755 vca_name,
1756 )
1757 else:
1758 artifact_path = "{}/Scripts/{}/{}/".format(
1759 base_folder["folder"],
1760 "charms"
1761 if vca_type
1762 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1763 else "helm-charts",
1764 vca_name,
1765 )
1766
1767 self.logger.debug("Artifact path > {}".format(artifact_path))
1768
1769 # get initial_config_primitive_list that applies to this element
1770 initial_config_primitive_list = config_descriptor.get(
1771 "initial-config-primitive"
1772 )
1773
1774 self.logger.debug(
1775 "Initial config primitive list > {}".format(
1776 initial_config_primitive_list
1777 )
1778 )
1779
1780 # add config if not present for NS charm
1781 ee_descriptor_id = ee_config_descriptor.get("id")
1782 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1783 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1784 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1785 )
1786
1787 self.logger.debug(
1788 "Initial config primitive list #2 > {}".format(
1789 initial_config_primitive_list
1790 )
1791 )
1792 # n2vc_redesign STEP 3.1
1793 # find old ee_id if exists
1794 ee_id = vca_deployed.get("ee_id")
1795
1796 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1797 # create or register execution environment in VCA
1798 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1799 self._write_configuration_status(
1800 nsr_id=nsr_id,
1801 vca_index=vca_index,
1802 status="CREATING",
1803 element_under_configuration=element_under_configuration,
1804 element_type=element_type,
1805 )
1806
1807 step = "create execution environment"
1808 self.logger.debug(logging_text + step)
1809
1810 ee_id = None
1811 credentials = None
1812 if vca_type == "k8s_proxy_charm":
1813 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1814 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1815 namespace=namespace,
1816 artifact_path=artifact_path,
1817 db_dict=db_dict,
1818 vca_id=vca_id,
1819 )
1820 elif vca_type == "helm" or vca_type == "helm-v3":
1821 ee_id, credentials = await self.vca_map[
1822 vca_type
1823 ].create_execution_environment(
1824 namespace=namespace,
1825 reuse_ee_id=ee_id,
1826 db_dict=db_dict,
1827 config=osm_config,
1828 artifact_path=artifact_path,
1829 chart_model=vca_name,
1830 vca_type=vca_type,
1831 )
1832 else:
1833 ee_id, credentials = await self.vca_map[
1834 vca_type
1835 ].create_execution_environment(
1836 namespace=namespace,
1837 reuse_ee_id=ee_id,
1838 db_dict=db_dict,
1839 vca_id=vca_id,
1840 )
1841
1842 elif vca_type == "native_charm":
1843 step = "Waiting to VM being up and getting IP address"
1844 self.logger.debug(logging_text + step)
1845 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1846 logging_text,
1847 nsr_id,
1848 vnfr_id,
1849 vdu_id,
1850 vdu_index,
1851 user=None,
1852 pub_key=None,
1853 )
1854 credentials = {"hostname": rw_mgmt_ip}
1855 # get username
1856 username = deep_get(
1857 config_descriptor, ("config-access", "ssh-access", "default-user")
1858 )
1859 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1860 # merged. Meanwhile let's get username from initial-config-primitive
1861 if not username and initial_config_primitive_list:
1862 for config_primitive in initial_config_primitive_list:
1863 for param in config_primitive.get("parameter", ()):
1864 if param["name"] == "ssh-username":
1865 username = param["value"]
1866 break
1867 if not username:
1868 raise LcmException(
1869 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1870 "'config-access.ssh-access.default-user'"
1871 )
1872 credentials["username"] = username
1873 # n2vc_redesign STEP 3.2
1874
1875 self._write_configuration_status(
1876 nsr_id=nsr_id,
1877 vca_index=vca_index,
1878 status="REGISTERING",
1879 element_under_configuration=element_under_configuration,
1880 element_type=element_type,
1881 )
1882
1883 step = "register execution environment {}".format(credentials)
1884 self.logger.debug(logging_text + step)
1885 ee_id = await self.vca_map[vca_type].register_execution_environment(
1886 credentials=credentials,
1887 namespace=namespace,
1888 db_dict=db_dict,
1889 vca_id=vca_id,
1890 )
1891
1892 # for compatibility with MON/POL modules, the need model and application name at database
1893 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1894 ee_id_parts = ee_id.split(".")
1895 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1896 if len(ee_id_parts) >= 2:
1897 model_name = ee_id_parts[0]
1898 application_name = ee_id_parts[1]
1899 db_nsr_update[db_update_entry + "model"] = model_name
1900 db_nsr_update[db_update_entry + "application"] = application_name
1901
1902 # n2vc_redesign STEP 3.3
1903 step = "Install configuration Software"
1904
1905 self._write_configuration_status(
1906 nsr_id=nsr_id,
1907 vca_index=vca_index,
1908 status="INSTALLING SW",
1909 element_under_configuration=element_under_configuration,
1910 element_type=element_type,
1911 other_update=db_nsr_update,
1912 )
1913
1914 # TODO check if already done
1915 self.logger.debug(logging_text + step)
1916 config = None
1917 if vca_type == "native_charm":
1918 config_primitive = next(
1919 (p for p in initial_config_primitive_list if p["name"] == "config"),
1920 None,
1921 )
1922 if config_primitive:
1923 config = self._map_primitive_params(
1924 config_primitive, {}, deploy_params
1925 )
1926 num_units = 1
1927 if vca_type == "lxc_proxy_charm":
1928 if element_type == "NS":
1929 num_units = db_nsr.get("config-units") or 1
1930 elif element_type == "VNF":
1931 num_units = db_vnfr.get("config-units") or 1
1932 elif element_type == "VDU":
1933 for v in db_vnfr["vdur"]:
1934 if vdu_id == v["vdu-id-ref"]:
1935 num_units = v.get("config-units") or 1
1936 break
1937 if vca_type != "k8s_proxy_charm":
1938 await self.vca_map[vca_type].install_configuration_sw(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 db_dict=db_dict,
1942 config=config,
1943 num_units=num_units,
1944 vca_id=vca_id,
1945 vca_type=vca_type,
1946 )
1947
1948 # write in db flag of configuration_sw already installed
1949 self.update_db_2(
1950 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1951 )
1952
1953 # add relations for this VCA (wait for other peers related with this VCA)
1954 is_relation_added = await self._add_vca_relations(
1955 logging_text=logging_text,
1956 nsr_id=nsr_id,
1957 vca_type=vca_type,
1958 vca_index=vca_index,
1959 )
1960
1961 if not is_relation_added:
1962 raise LcmException("Relations could not be added to VCA.")
1963
1964 # if SSH access is required, then get execution environment SSH public
1965 # if native charm we have waited already to VM be UP
1966 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1967 pub_key = None
1968 user = None
1969 # self.logger.debug("get ssh key block")
1970 if deep_get(
1971 config_descriptor, ("config-access", "ssh-access", "required")
1972 ):
1973 # self.logger.debug("ssh key needed")
1974 # Needed to inject a ssh key
1975 user = deep_get(
1976 config_descriptor,
1977 ("config-access", "ssh-access", "default-user"),
1978 )
1979 step = "Install configuration Software, getting public ssh key"
1980 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1981 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1982 )
1983
1984 step = "Insert public key into VM user={} ssh_key={}".format(
1985 user, pub_key
1986 )
1987 else:
1988 # self.logger.debug("no need to get ssh key")
1989 step = "Waiting to VM being up and getting IP address"
1990 self.logger.debug(logging_text + step)
1991
1992 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1993 rw_mgmt_ip = None
1994
1995 # n2vc_redesign STEP 5.1
1996 # wait for RO (ip-address) Insert pub_key into VM
1997 if vnfr_id:
1998 if kdu_name:
1999 rw_mgmt_ip, services = await self.wait_kdu_up(
2000 logging_text, nsr_id, vnfr_id, kdu_name
2001 )
2002 vnfd = self.db.get_one(
2003 "vnfds_revisions",
2004 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2005 )
2006 kdu = get_kdu(vnfd, kdu_name)
2007 kdu_services = [
2008 service["name"] for service in get_kdu_services(kdu)
2009 ]
2010 exposed_services = []
2011 for service in services:
2012 if any(s in service["name"] for s in kdu_services):
2013 exposed_services.append(service)
2014 await self.vca_map[vca_type].exec_primitive(
2015 ee_id=ee_id,
2016 primitive_name="config",
2017 params_dict={
2018 "osm-config": json.dumps(
2019 OsmConfigBuilder(
2020 k8s={"services": exposed_services}
2021 ).build()
2022 )
2023 },
2024 vca_id=vca_id,
2025 )
2026
2027 # This verification is needed in order to avoid trying to add a public key
2028 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2029 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2030 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2031 # or it is a KNF)
2032 elif db_vnfr.get("vdur"):
2033 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2034 logging_text,
2035 nsr_id,
2036 vnfr_id,
2037 vdu_id,
2038 vdu_index,
2039 user=user,
2040 pub_key=pub_key,
2041 )
2042
2043 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2044
2045 # store rw_mgmt_ip in deploy params for later replacement
2046 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2047
2048 # n2vc_redesign STEP 6 Execute initial config primitive
2049 step = "execute initial config primitive"
2050
2051 # wait for dependent primitives execution (NS -> VNF -> VDU)
2052 if initial_config_primitive_list:
2053 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2054
2055 # stage, in function of element type: vdu, kdu, vnf or ns
2056 my_vca = vca_deployed_list[vca_index]
2057 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2058 # VDU or KDU
2059 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2060 elif my_vca.get("member-vnf-index"):
2061 # VNF
2062 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2063 else:
2064 # NS
2065 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2066
2067 self._write_configuration_status(
2068 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2069 )
2070
2071 self._write_op_status(op_id=nslcmop_id, stage=stage)
2072
2073 check_if_terminated_needed = True
2074 for initial_config_primitive in initial_config_primitive_list:
2075 # adding information on the vca_deployed if it is a NS execution environment
2076 if not vca_deployed["member-vnf-index"]:
2077 deploy_params["ns_config_info"] = json.dumps(
2078 self._get_ns_config_info(nsr_id)
2079 )
2080 # TODO check if already done
2081 primitive_params_ = self._map_primitive_params(
2082 initial_config_primitive, {}, deploy_params
2083 )
2084
2085 step = "execute primitive '{}' params '{}'".format(
2086 initial_config_primitive["name"], primitive_params_
2087 )
2088 self.logger.debug(logging_text + step)
2089 await self.vca_map[vca_type].exec_primitive(
2090 ee_id=ee_id,
2091 primitive_name=initial_config_primitive["name"],
2092 params_dict=primitive_params_,
2093 db_dict=db_dict,
2094 vca_id=vca_id,
2095 vca_type=vca_type,
2096 )
2097 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2098 if check_if_terminated_needed:
2099 if config_descriptor.get("terminate-config-primitive"):
2100 self.update_db_2(
2101 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2102 )
2103 check_if_terminated_needed = False
2104
2105 # TODO register in database that primitive is done
2106
2107 # STEP 7 Configure metrics
2108 if vca_type == "helm" or vca_type == "helm-v3":
2109 # TODO: review for those cases where the helm chart is a reference and
2110 # is not part of the NF package
2111 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2112 ee_id=ee_id,
2113 artifact_path=artifact_path,
2114 ee_config_descriptor=ee_config_descriptor,
2115 vnfr_id=vnfr_id,
2116 nsr_id=nsr_id,
2117 target_ip=rw_mgmt_ip,
2118 element_type=element_type,
2119 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2120 vdu_id=vdu_id,
2121 vdu_index=vdu_index,
2122 kdu_name=kdu_name,
2123 kdu_index=kdu_index,
2124 )
2125 if prometheus_jobs:
2126 self.update_db_2(
2127 "nsrs",
2128 nsr_id,
2129 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2130 )
2131
2132 for job in prometheus_jobs:
2133 self.db.set_one(
2134 "prometheus_jobs",
2135 {"job_name": job["job_name"]},
2136 job,
2137 upsert=True,
2138 fail_on_empty=False,
2139 )
2140
2141 step = "instantiated at VCA"
2142 self.logger.debug(logging_text + step)
2143
2144 self._write_configuration_status(
2145 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2146 )
2147
2148 except Exception as e: # TODO not use Exception but N2VC exception
2149 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2150 if not isinstance(
2151 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2152 ):
2153 self.logger.error(
2154 "Exception while {} : {}".format(step, e), exc_info=True
2155 )
2156 self._write_configuration_status(
2157 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2158 )
2159 raise LcmException("{}. {}".format(step, e)) from e
2160
2161 def _write_ns_status(
2162 self,
2163 nsr_id: str,
2164 ns_state: str,
2165 current_operation: str,
2166 current_operation_id: str,
2167 error_description: str = None,
2168 error_detail: str = None,
2169 other_update: dict = None,
2170 ):
2171 """
2172 Update db_nsr fields.
2173 :param nsr_id:
2174 :param ns_state:
2175 :param current_operation:
2176 :param current_operation_id:
2177 :param error_description:
2178 :param error_detail:
2179 :param other_update: Other required changes at database if provided, will be cleared
2180 :return:
2181 """
2182 try:
2183 db_dict = other_update or {}
2184 db_dict[
2185 "_admin.nslcmop"
2186 ] = current_operation_id # for backward compatibility
2187 db_dict["_admin.current-operation"] = current_operation_id
2188 db_dict["_admin.operation-type"] = (
2189 current_operation if current_operation != "IDLE" else None
2190 )
2191 db_dict["currentOperation"] = current_operation
2192 db_dict["currentOperationID"] = current_operation_id
2193 db_dict["errorDescription"] = error_description
2194 db_dict["errorDetail"] = error_detail
2195
2196 if ns_state:
2197 db_dict["nsState"] = ns_state
2198 self.update_db_2("nsrs", nsr_id, db_dict)
2199 except DbException as e:
2200 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2201
2202 def _write_op_status(
2203 self,
2204 op_id: str,
2205 stage: list = None,
2206 error_message: str = None,
2207 queuePosition: int = 0,
2208 operation_state: str = None,
2209 other_update: dict = None,
2210 ):
2211 try:
2212 db_dict = other_update or {}
2213 db_dict["queuePosition"] = queuePosition
2214 if isinstance(stage, list):
2215 db_dict["stage"] = stage[0]
2216 db_dict["detailed-status"] = " ".join(stage)
2217 elif stage is not None:
2218 db_dict["stage"] = str(stage)
2219
2220 if error_message is not None:
2221 db_dict["errorMessage"] = error_message
2222 if operation_state is not None:
2223 db_dict["operationState"] = operation_state
2224 db_dict["statusEnteredTime"] = time()
2225 self.update_db_2("nslcmops", op_id, db_dict)
2226 except DbException as e:
2227 self.logger.warn(
2228 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2229 )
2230
2231 def _write_all_config_status(self, db_nsr: dict, status: str):
2232 try:
2233 nsr_id = db_nsr["_id"]
2234 # configurationStatus
2235 config_status = db_nsr.get("configurationStatus")
2236 if config_status:
2237 db_nsr_update = {
2238 "configurationStatus.{}.status".format(index): status
2239 for index, v in enumerate(config_status)
2240 if v
2241 }
2242 # update status
2243 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2244
2245 except DbException as e:
2246 self.logger.warn(
2247 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2248 )
2249
2250 def _write_configuration_status(
2251 self,
2252 nsr_id: str,
2253 vca_index: int,
2254 status: str = None,
2255 element_under_configuration: str = None,
2256 element_type: str = None,
2257 other_update: dict = None,
2258 ):
2259 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2260 # .format(vca_index, status))
2261
2262 try:
2263 db_path = "configurationStatus.{}.".format(vca_index)
2264 db_dict = other_update or {}
2265 if status:
2266 db_dict[db_path + "status"] = status
2267 if element_under_configuration:
2268 db_dict[
2269 db_path + "elementUnderConfiguration"
2270 ] = element_under_configuration
2271 if element_type:
2272 db_dict[db_path + "elementType"] = element_type
2273 self.update_db_2("nsrs", nsr_id, db_dict)
2274 except DbException as e:
2275 self.logger.warn(
2276 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2277 status, nsr_id, vca_index, e
2278 )
2279 )
2280
2281 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2282 """
2283 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2284 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2285 Database is used because the result can be obtained from a different LCM worker in case of HA.
2286 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2287 :param db_nslcmop: database content of nslcmop
2288 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2289 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2290 computed 'vim-account-id'
2291 """
2292 modified = False
2293 nslcmop_id = db_nslcmop["_id"]
2294 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2295 if placement_engine == "PLA":
2296 self.logger.debug(
2297 logging_text + "Invoke and wait for placement optimization"
2298 )
2299 await self.msg.aiowrite(
2300 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2301 )
2302 db_poll_interval = 5
2303 wait = db_poll_interval * 10
2304 pla_result = None
2305 while not pla_result and wait >= 0:
2306 await asyncio.sleep(db_poll_interval)
2307 wait -= db_poll_interval
2308 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2309 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2310
2311 if not pla_result:
2312 raise LcmException(
2313 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2314 )
2315
2316 for pla_vnf in pla_result["vnf"]:
2317 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2318 if not pla_vnf.get("vimAccountId") or not vnfr:
2319 continue
2320 modified = True
2321 self.db.set_one(
2322 "vnfrs",
2323 {"_id": vnfr["_id"]},
2324 {"vim-account-id": pla_vnf["vimAccountId"]},
2325 )
2326 # Modifies db_vnfrs
2327 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2328 return modified
2329
2330 def _gather_vnfr_healing_alerts(self, vnfr, vnfd):
2331 alerts = []
2332 nsr_id = vnfr["nsr-id-ref"]
2333 df = vnfd.get("df", [{}])[0]
2334 # Checking for auto-healing configuration
2335 if "healing-aspect" in df:
2336 healing_aspects = df["healing-aspect"]
2337 for healing in healing_aspects:
2338 for healing_policy in healing.get("healing-policy", ()):
2339 vdu_id = healing_policy["vdu-id"]
2340 vdur = next(
2341 (vdur for vdur in vnfr["vdur"] if vdu_id == vdur["vdu-id-ref"]),
2342 {},
2343 )
2344 if not vdur:
2345 continue
2346 metric_name = "vm_status"
2347 vdu_name = vdur.get("name")
2348 vnf_member_index = vnfr["member-vnf-index-ref"]
2349 uuid = str(uuid4())
2350 name = f"healing_{uuid}"
2351 action = healing_policy
2352 # action_on_recovery = healing.get("action-on-recovery")
2353 # cooldown_time = healing.get("cooldown-time")
2354 # day1 = healing.get("day1")
2355 alert = {
2356 "uuid": uuid,
2357 "name": name,
2358 "metric": metric_name,
2359 "tags": {
2360 "ns_id": nsr_id,
2361 "vnf_member_index": vnf_member_index,
2362 "vdu_name": vdu_name,
2363 },
2364 "alarm_status": "ok",
2365 "action_type": "healing",
2366 "action": action,
2367 }
2368 alerts.append(alert)
2369 return alerts
2370
2371 def _gather_vnfr_scaling_alerts(self, vnfr, vnfd):
2372 alerts = []
2373 nsr_id = vnfr["nsr-id-ref"]
2374 df = vnfd.get("df", [{}])[0]
2375 # Checking for auto-scaling configuration
2376 if "scaling-aspect" in df:
2377 rel_operation_types = {
2378 "GE": ">=",
2379 "LE": "<=",
2380 "GT": ">",
2381 "LT": "<",
2382 "EQ": "==",
2383 "NE": "!=",
2384 }
2385 scaling_aspects = df["scaling-aspect"]
2386 all_vnfd_monitoring_params = {}
2387 for ivld in vnfd.get("int-virtual-link-desc", ()):
2388 for mp in ivld.get("monitoring-parameters", ()):
2389 all_vnfd_monitoring_params[mp.get("id")] = mp
2390 for vdu in vnfd.get("vdu", ()):
2391 for mp in vdu.get("monitoring-parameter", ()):
2392 all_vnfd_monitoring_params[mp.get("id")] = mp
2393 for df in vnfd.get("df", ()):
2394 for mp in df.get("monitoring-parameter", ()):
2395 all_vnfd_monitoring_params[mp.get("id")] = mp
2396 for scaling_aspect in scaling_aspects:
2397 scaling_group_name = scaling_aspect.get("name", "")
2398 # Get monitored VDUs
2399 all_monitored_vdus = set()
2400 for delta in scaling_aspect.get("aspect-delta-details", {}).get(
2401 "deltas", ()
2402 ):
2403 for vdu_delta in delta.get("vdu-delta", ()):
2404 all_monitored_vdus.add(vdu_delta.get("id"))
2405 monitored_vdurs = list(
2406 filter(
2407 lambda vdur: vdur["vdu-id-ref"] in all_monitored_vdus,
2408 vnfr["vdur"],
2409 )
2410 )
2411 if not monitored_vdurs:
2412 self.logger.error(
2413 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2414 )
2415 continue
2416 for scaling_policy in scaling_aspect.get("scaling-policy", ()):
2417 if scaling_policy["scaling-type"] != "automatic":
2418 continue
2419 threshold_time = scaling_policy.get("threshold-time", "1")
2420 cooldown_time = scaling_policy.get("cooldown-time", "0")
2421 for scaling_criteria in scaling_policy["scaling-criteria"]:
2422 monitoring_param_ref = scaling_criteria.get(
2423 "vnf-monitoring-param-ref"
2424 )
2425 vnf_monitoring_param = all_vnfd_monitoring_params[
2426 monitoring_param_ref
2427 ]
2428 for vdur in monitored_vdurs:
2429 vdu_id = vdur["vdu-id-ref"]
2430 metric_name = vnf_monitoring_param.get("performance-metric")
2431 vnf_member_index = vnfr["member-vnf-index-ref"]
2432 scalein_threshold = scaling_criteria.get(
2433 "scale-in-threshold"
2434 )
2435 scaleout_threshold = scaling_criteria.get(
2436 "scale-out-threshold"
2437 )
2438 # Looking for min/max-number-of-instances
2439 instances_min_number = 1
2440 instances_max_number = 1
2441 vdu_profile = df["vdu-profile"]
2442 if vdu_profile:
2443 profile = next(
2444 item for item in vdu_profile if item["id"] == vdu_id
2445 )
2446 instances_min_number = profile.get(
2447 "min-number-of-instances", 1
2448 )
2449 instances_max_number = profile.get(
2450 "max-number-of-instances", 1
2451 )
2452
2453 if scalein_threshold:
2454 uuid = str(uuid4())
2455 name = f"scalein_{uuid}"
2456 operation = scaling_criteria[
2457 "scale-in-relational-operation"
2458 ]
2459 rel_operator = rel_operation_types.get(operation, "<=")
2460 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2461 expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2462 labels = {
2463 "ns_id": nsr_id,
2464 "vnf_member_index": vnf_member_index,
2465 "vdu_id": vdu_id,
2466 }
2467 prom_cfg = {
2468 "alert": name,
2469 "expr": expression,
2470 "for": str(threshold_time) + "m",
2471 "labels": labels,
2472 }
2473 action = scaling_policy
2474 action = {
2475 "scaling-group": scaling_group_name,
2476 "cooldown-time": cooldown_time,
2477 }
2478 alert = {
2479 "uuid": uuid,
2480 "name": name,
2481 "metric": metric_name,
2482 "tags": {
2483 "ns_id": nsr_id,
2484 "vnf_member_index": vnf_member_index,
2485 "vdu_id": vdu_id,
2486 },
2487 "alarm_status": "ok",
2488 "action_type": "scale_in",
2489 "action": action,
2490 "prometheus_config": prom_cfg,
2491 }
2492 alerts.append(alert)
2493
2494 if scaleout_threshold:
2495 uuid = str(uuid4())
2496 name = f"scaleout_{uuid}"
2497 operation = scaling_criteria[
2498 "scale-out-relational-operation"
2499 ]
2500 rel_operator = rel_operation_types.get(operation, "<=")
2501 metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2502 expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2503 labels = {
2504 "ns_id": nsr_id,
2505 "vnf_member_index": vnf_member_index,
2506 "vdu_id": vdu_id,
2507 }
2508 prom_cfg = {
2509 "alert": name,
2510 "expr": expression,
2511 "for": str(threshold_time) + "m",
2512 "labels": labels,
2513 }
2514 action = scaling_policy
2515 action = {
2516 "scaling-group": scaling_group_name,
2517 "cooldown-time": cooldown_time,
2518 }
2519 alert = {
2520 "uuid": uuid,
2521 "name": name,
2522 "metric": metric_name,
2523 "tags": {
2524 "ns_id": nsr_id,
2525 "vnf_member_index": vnf_member_index,
2526 "vdu_id": vdu_id,
2527 },
2528 "alarm_status": "ok",
2529 "action_type": "scale_out",
2530 "action": action,
2531 "prometheus_config": prom_cfg,
2532 }
2533 alerts.append(alert)
2534 return alerts
2535
2536 def update_nsrs_with_pla_result(self, params):
2537 try:
2538 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2539 self.update_db_2(
2540 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2541 )
2542 except Exception as e:
2543 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2544
2545 async def instantiate(self, nsr_id, nslcmop_id):
2546 """
2547
2548 :param nsr_id: ns instance to deploy
2549 :param nslcmop_id: operation to run
2550 :return:
2551 """
2552
2553 # Try to lock HA task here
2554 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2555 if not task_is_locked_by_me:
2556 self.logger.debug(
2557 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2558 )
2559 return
2560
2561 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2562 self.logger.debug(logging_text + "Enter")
2563
2564 # get all needed from database
2565
2566 # database nsrs record
2567 db_nsr = None
2568
2569 # database nslcmops record
2570 db_nslcmop = None
2571
2572 # update operation on nsrs
2573 db_nsr_update = {}
2574 # update operation on nslcmops
2575 db_nslcmop_update = {}
2576
2577 timeout_ns_deploy = self.timeout.ns_deploy
2578
2579 nslcmop_operation_state = None
2580 db_vnfrs = {} # vnf's info indexed by member-index
2581 # n2vc_info = {}
2582 tasks_dict_info = {} # from task to info text
2583 exc = None
2584 error_list = []
2585 stage = [
2586 "Stage 1/5: preparation of the environment.",
2587 "Waiting for previous operations to terminate.",
2588 "",
2589 ]
2590 # ^ stage, step, VIM progress
2591 try:
2592 # wait for any previous tasks in process
2593 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2594
2595 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2596 stage[1] = "Reading from database."
2597 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2598 db_nsr_update["detailed-status"] = "creating"
2599 db_nsr_update["operational-status"] = "init"
2600 self._write_ns_status(
2601 nsr_id=nsr_id,
2602 ns_state="BUILDING",
2603 current_operation="INSTANTIATING",
2604 current_operation_id=nslcmop_id,
2605 other_update=db_nsr_update,
2606 )
2607 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2608
2609 # read from db: operation
2610 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2611 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2612 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2613 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2614 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2615 )
2616 ns_params = db_nslcmop.get("operationParams")
2617 if ns_params and ns_params.get("timeout_ns_deploy"):
2618 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2619
2620 # read from db: ns
2621 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2622 self.logger.debug(logging_text + stage[1])
2623 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2624 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2625 self.logger.debug(logging_text + stage[1])
2626 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2627 self.fs.sync(db_nsr["nsd-id"])
2628 db_nsr["nsd"] = nsd
2629 # nsr_name = db_nsr["name"] # TODO short-name??
2630
2631 # read from db: vnf's of this ns
2632 stage[1] = "Getting vnfrs from db."
2633 self.logger.debug(logging_text + stage[1])
2634 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2635
2636 # read from db: vnfd's for every vnf
2637 db_vnfds = [] # every vnfd data
2638
2639 # for each vnf in ns, read vnfd
2640 for vnfr in db_vnfrs_list:
2641 if vnfr.get("kdur"):
2642 kdur_list = []
2643 for kdur in vnfr["kdur"]:
2644 if kdur.get("additionalParams"):
2645 kdur["additionalParams"] = json.loads(
2646 kdur["additionalParams"]
2647 )
2648 kdur_list.append(kdur)
2649 vnfr["kdur"] = kdur_list
2650
2651 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2652 vnfd_id = vnfr["vnfd-id"]
2653 vnfd_ref = vnfr["vnfd-ref"]
2654 self.fs.sync(vnfd_id)
2655
2656 # if we haven't this vnfd, read it from db
2657 if vnfd_id not in db_vnfds:
2658 # read from db
2659 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2660 vnfd_id, vnfd_ref
2661 )
2662 self.logger.debug(logging_text + stage[1])
2663 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2664
2665 # store vnfd
2666 db_vnfds.append(vnfd)
2667
2668 # Get or generates the _admin.deployed.VCA list
2669 vca_deployed_list = None
2670 if db_nsr["_admin"].get("deployed"):
2671 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2672 if vca_deployed_list is None:
2673 vca_deployed_list = []
2674 configuration_status_list = []
2675 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2676 db_nsr_update["configurationStatus"] = configuration_status_list
2677 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2678 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2679 elif isinstance(vca_deployed_list, dict):
2680 # maintain backward compatibility. Change a dict to list at database
2681 vca_deployed_list = list(vca_deployed_list.values())
2682 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2683 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2684
2685 if not isinstance(
2686 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2687 ):
2688 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2689 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2690
2691 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2692 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2693 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2694 self.db.set_list(
2695 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2696 )
2697
2698 # n2vc_redesign STEP 2 Deploy Network Scenario
2699 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2700 self._write_op_status(op_id=nslcmop_id, stage=stage)
2701
2702 stage[1] = "Deploying KDUs."
2703 # self.logger.debug(logging_text + "Before deploy_kdus")
2704 # Call to deploy_kdus in case exists the "vdu:kdu" param
2705 await self.deploy_kdus(
2706 logging_text=logging_text,
2707 nsr_id=nsr_id,
2708 nslcmop_id=nslcmop_id,
2709 db_vnfrs=db_vnfrs,
2710 db_vnfds=db_vnfds,
2711 task_instantiation_info=tasks_dict_info,
2712 )
2713
2714 stage[1] = "Getting VCA public key."
2715 # n2vc_redesign STEP 1 Get VCA public ssh-key
2716 # feature 1429. Add n2vc public key to needed VMs
2717 n2vc_key = self.n2vc.get_public_key()
2718 n2vc_key_list = [n2vc_key]
2719 if self.vca_config.public_key:
2720 n2vc_key_list.append(self.vca_config.public_key)
2721
2722 stage[1] = "Deploying NS at VIM."
2723 task_ro = asyncio.ensure_future(
2724 self.instantiate_RO(
2725 logging_text=logging_text,
2726 nsr_id=nsr_id,
2727 nsd=nsd,
2728 db_nsr=db_nsr,
2729 db_nslcmop=db_nslcmop,
2730 db_vnfrs=db_vnfrs,
2731 db_vnfds=db_vnfds,
2732 n2vc_key_list=n2vc_key_list,
2733 stage=stage,
2734 )
2735 )
2736 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2737 tasks_dict_info[task_ro] = "Deploying at VIM"
2738
2739 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2740 stage[1] = "Deploying Execution Environments."
2741 self.logger.debug(logging_text + stage[1])
2742
2743 # create namespace and certificate if any helm based EE is present in the NS
2744 if check_helm_ee_in_ns(db_vnfds):
2745 # TODO: create EE namespace
2746 # create TLS certificates
2747 await self.vca_map["helm-v3"].create_tls_certificate(
2748 secret_name="ee-tls-{}".format(nsr_id),
2749 dns_prefix="*",
2750 nsr_id=nsr_id,
2751 usage="server auth",
2752 )
2753
2754 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2755 for vnf_profile in get_vnf_profiles(nsd):
2756 vnfd_id = vnf_profile["vnfd-id"]
2757 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2758 member_vnf_index = str(vnf_profile["id"])
2759 db_vnfr = db_vnfrs[member_vnf_index]
2760 base_folder = vnfd["_admin"]["storage"]
2761 vdu_id = None
2762 vdu_index = 0
2763 vdu_name = None
2764 kdu_name = None
2765 kdu_index = None
2766
2767 # Get additional parameters
2768 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2769 if db_vnfr.get("additionalParamsForVnf"):
2770 deploy_params.update(
2771 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2772 )
2773
2774 descriptor_config = get_configuration(vnfd, vnfd["id"])
2775 if descriptor_config:
2776 self._deploy_n2vc(
2777 logging_text=logging_text
2778 + "member_vnf_index={} ".format(member_vnf_index),
2779 db_nsr=db_nsr,
2780 db_vnfr=db_vnfr,
2781 nslcmop_id=nslcmop_id,
2782 nsr_id=nsr_id,
2783 nsi_id=nsi_id,
2784 vnfd_id=vnfd_id,
2785 vdu_id=vdu_id,
2786 kdu_name=kdu_name,
2787 member_vnf_index=member_vnf_index,
2788 vdu_index=vdu_index,
2789 kdu_index=kdu_index,
2790 vdu_name=vdu_name,
2791 deploy_params=deploy_params,
2792 descriptor_config=descriptor_config,
2793 base_folder=base_folder,
2794 task_instantiation_info=tasks_dict_info,
2795 stage=stage,
2796 )
2797
2798 # Deploy charms for each VDU that supports one.
2799 for vdud in get_vdu_list(vnfd):
2800 vdu_id = vdud["id"]
2801 descriptor_config = get_configuration(vnfd, vdu_id)
2802 vdur = find_in_list(
2803 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2804 )
2805
2806 if vdur.get("additionalParams"):
2807 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2808 else:
2809 deploy_params_vdu = deploy_params
2810 deploy_params_vdu["OSM"] = get_osm_params(
2811 db_vnfr, vdu_id, vdu_count_index=0
2812 )
2813 vdud_count = get_number_of_instances(vnfd, vdu_id)
2814
2815 self.logger.debug("VDUD > {}".format(vdud))
2816 self.logger.debug(
2817 "Descriptor config > {}".format(descriptor_config)
2818 )
2819 if descriptor_config:
2820 vdu_name = None
2821 kdu_name = None
2822 kdu_index = None
2823 for vdu_index in range(vdud_count):
2824 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2825 self._deploy_n2vc(
2826 logging_text=logging_text
2827 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2828 member_vnf_index, vdu_id, vdu_index
2829 ),
2830 db_nsr=db_nsr,
2831 db_vnfr=db_vnfr,
2832 nslcmop_id=nslcmop_id,
2833 nsr_id=nsr_id,
2834 nsi_id=nsi_id,
2835 vnfd_id=vnfd_id,
2836 vdu_id=vdu_id,
2837 kdu_name=kdu_name,
2838 kdu_index=kdu_index,
2839 member_vnf_index=member_vnf_index,
2840 vdu_index=vdu_index,
2841 vdu_name=vdu_name,
2842 deploy_params=deploy_params_vdu,
2843 descriptor_config=descriptor_config,
2844 base_folder=base_folder,
2845 task_instantiation_info=tasks_dict_info,
2846 stage=stage,
2847 )
2848 for kdud in get_kdu_list(vnfd):
2849 kdu_name = kdud["name"]
2850 descriptor_config = get_configuration(vnfd, kdu_name)
2851 if descriptor_config:
2852 vdu_id = None
2853 vdu_index = 0
2854 vdu_name = None
2855 kdu_index, kdur = next(
2856 x
2857 for x in enumerate(db_vnfr["kdur"])
2858 if x[1]["kdu-name"] == kdu_name
2859 )
2860 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2861 if kdur.get("additionalParams"):
2862 deploy_params_kdu.update(
2863 parse_yaml_strings(kdur["additionalParams"].copy())
2864 )
2865
2866 self._deploy_n2vc(
2867 logging_text=logging_text,
2868 db_nsr=db_nsr,
2869 db_vnfr=db_vnfr,
2870 nslcmop_id=nslcmop_id,
2871 nsr_id=nsr_id,
2872 nsi_id=nsi_id,
2873 vnfd_id=vnfd_id,
2874 vdu_id=vdu_id,
2875 kdu_name=kdu_name,
2876 member_vnf_index=member_vnf_index,
2877 vdu_index=vdu_index,
2878 kdu_index=kdu_index,
2879 vdu_name=vdu_name,
2880 deploy_params=deploy_params_kdu,
2881 descriptor_config=descriptor_config,
2882 base_folder=base_folder,
2883 task_instantiation_info=tasks_dict_info,
2884 stage=stage,
2885 )
2886
2887 # Check if this NS has a charm configuration
2888 descriptor_config = nsd.get("ns-configuration")
2889 if descriptor_config and descriptor_config.get("juju"):
2890 vnfd_id = None
2891 db_vnfr = None
2892 member_vnf_index = None
2893 vdu_id = None
2894 kdu_name = None
2895 kdu_index = None
2896 vdu_index = 0
2897 vdu_name = None
2898
2899 # Get additional parameters
2900 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2901 if db_nsr.get("additionalParamsForNs"):
2902 deploy_params.update(
2903 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2904 )
2905 base_folder = nsd["_admin"]["storage"]
2906 self._deploy_n2vc(
2907 logging_text=logging_text,
2908 db_nsr=db_nsr,
2909 db_vnfr=db_vnfr,
2910 nslcmop_id=nslcmop_id,
2911 nsr_id=nsr_id,
2912 nsi_id=nsi_id,
2913 vnfd_id=vnfd_id,
2914 vdu_id=vdu_id,
2915 kdu_name=kdu_name,
2916 member_vnf_index=member_vnf_index,
2917 vdu_index=vdu_index,
2918 kdu_index=kdu_index,
2919 vdu_name=vdu_name,
2920 deploy_params=deploy_params,
2921 descriptor_config=descriptor_config,
2922 base_folder=base_folder,
2923 task_instantiation_info=tasks_dict_info,
2924 stage=stage,
2925 )
2926
2927 # rest of staff will be done at finally
2928
2929 except (
2930 ROclient.ROClientException,
2931 DbException,
2932 LcmException,
2933 N2VCException,
2934 ) as e:
2935 self.logger.error(
2936 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2937 )
2938 exc = e
2939 except asyncio.CancelledError:
2940 self.logger.error(
2941 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2942 )
2943 exc = "Operation was cancelled"
2944 except Exception as e:
2945 exc = traceback.format_exc()
2946 self.logger.critical(
2947 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2948 exc_info=True,
2949 )
2950 finally:
2951 if exc:
2952 error_list.append(str(exc))
2953 try:
2954 # wait for pending tasks
2955 if tasks_dict_info:
2956 stage[1] = "Waiting for instantiate pending tasks."
2957 self.logger.debug(logging_text + stage[1])
2958 error_list += await self._wait_for_tasks(
2959 logging_text,
2960 tasks_dict_info,
2961 timeout_ns_deploy,
2962 stage,
2963 nslcmop_id,
2964 nsr_id=nsr_id,
2965 )
2966 stage[1] = stage[2] = ""
2967 except asyncio.CancelledError:
2968 error_list.append("Cancelled")
2969 # TODO cancel all tasks
2970 except Exception as exc:
2971 error_list.append(str(exc))
2972
2973 # update operation-status
2974 db_nsr_update["operational-status"] = "running"
2975 # let's begin with VCA 'configured' status (later we can change it)
2976 db_nsr_update["config-status"] = "configured"
2977 for task, task_name in tasks_dict_info.items():
2978 if not task.done() or task.cancelled() or task.exception():
2979 if task_name.startswith(self.task_name_deploy_vca):
2980 # A N2VC task is pending
2981 db_nsr_update["config-status"] = "failed"
2982 else:
2983 # RO or KDU task is pending
2984 db_nsr_update["operational-status"] = "failed"
2985
2986 # update status at database
2987 if error_list:
2988 error_detail = ". ".join(error_list)
2989 self.logger.error(logging_text + error_detail)
2990 error_description_nslcmop = "{} Detail: {}".format(
2991 stage[0], error_detail
2992 )
2993 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2994 nslcmop_id, stage[0]
2995 )
2996
2997 db_nsr_update["detailed-status"] = (
2998 error_description_nsr + " Detail: " + error_detail
2999 )
3000 db_nslcmop_update["detailed-status"] = error_detail
3001 nslcmop_operation_state = "FAILED"
3002 ns_state = "BROKEN"
3003 else:
3004 error_detail = None
3005 error_description_nsr = error_description_nslcmop = None
3006 ns_state = "READY"
3007 db_nsr_update["detailed-status"] = "Done"
3008 db_nslcmop_update["detailed-status"] = "Done"
3009 nslcmop_operation_state = "COMPLETED"
3010 # Gather auto-healing and auto-scaling alerts for each vnfr
3011 healing_alerts = []
3012 scaling_alerts = []
3013 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
3014 vnfd = next(
3015 (sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None
3016 )
3017 healing_alerts = self._gather_vnfr_healing_alerts(vnfr, vnfd)
3018 for alert in healing_alerts:
3019 self.logger.info(f"Storing healing alert in MongoDB: {alert}")
3020 self.db.create("alerts", alert)
3021
3022 scaling_alerts = self._gather_vnfr_scaling_alerts(vnfr, vnfd)
3023 for alert in scaling_alerts:
3024 self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
3025 self.db.create("alerts", alert)
3026
3027 if db_nsr:
3028 self._write_ns_status(
3029 nsr_id=nsr_id,
3030 ns_state=ns_state,
3031 current_operation="IDLE",
3032 current_operation_id=None,
3033 error_description=error_description_nsr,
3034 error_detail=error_detail,
3035 other_update=db_nsr_update,
3036 )
3037 self._write_op_status(
3038 op_id=nslcmop_id,
3039 stage="",
3040 error_message=error_description_nslcmop,
3041 operation_state=nslcmop_operation_state,
3042 other_update=db_nslcmop_update,
3043 )
3044
3045 if nslcmop_operation_state:
3046 try:
3047 await self.msg.aiowrite(
3048 "ns",
3049 "instantiated",
3050 {
3051 "nsr_id": nsr_id,
3052 "nslcmop_id": nslcmop_id,
3053 "operationState": nslcmop_operation_state,
3054 },
3055 loop=self.loop,
3056 )
3057 except Exception as e:
3058 self.logger.error(
3059 logging_text + "kafka_write notification Exception {}".format(e)
3060 )
3061
3062 self.logger.debug(logging_text + "Exit")
3063 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
3064
3065 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
3066 if vnfd_id not in cached_vnfds:
3067 cached_vnfds[vnfd_id] = self.db.get_one(
3068 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
3069 )
3070 return cached_vnfds[vnfd_id]
3071
3072 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
3073 if vnf_profile_id not in cached_vnfrs:
3074 cached_vnfrs[vnf_profile_id] = self.db.get_one(
3075 "vnfrs",
3076 {
3077 "member-vnf-index-ref": vnf_profile_id,
3078 "nsr-id-ref": nsr_id,
3079 },
3080 )
3081 return cached_vnfrs[vnf_profile_id]
3082
3083 def _is_deployed_vca_in_relation(
3084 self, vca: DeployedVCA, relation: Relation
3085 ) -> bool:
3086 found = False
3087 for endpoint in (relation.provider, relation.requirer):
3088 if endpoint["kdu-resource-profile-id"]:
3089 continue
3090 found = (
3091 vca.vnf_profile_id == endpoint.vnf_profile_id
3092 and vca.vdu_profile_id == endpoint.vdu_profile_id
3093 and vca.execution_environment_ref == endpoint.execution_environment_ref
3094 )
3095 if found:
3096 break
3097 return found
3098
3099 def _update_ee_relation_data_with_implicit_data(
3100 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
3101 ):
3102 ee_relation_data = safe_get_ee_relation(
3103 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
3104 )
3105 ee_relation_level = EELevel.get_level(ee_relation_data)
3106 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
3107 "execution-environment-ref"
3108 ]:
3109 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
3110 vnfd_id = vnf_profile["vnfd-id"]
3111 project = nsd["_admin"]["projects_read"][0]
3112 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3113 entity_id = (
3114 vnfd_id
3115 if ee_relation_level == EELevel.VNF
3116 else ee_relation_data["vdu-profile-id"]
3117 )
3118 ee = get_juju_ee_ref(db_vnfd, entity_id)
3119 if not ee:
3120 raise Exception(
3121 f"not execution environments found for ee_relation {ee_relation_data}"
3122 )
3123 ee_relation_data["execution-environment-ref"] = ee["id"]
3124 return ee_relation_data
3125
3126 def _get_ns_relations(
3127 self,
3128 nsr_id: str,
3129 nsd: Dict[str, Any],
3130 vca: DeployedVCA,
3131 cached_vnfds: Dict[str, Any],
3132 ) -> List[Relation]:
3133 relations = []
3134 db_ns_relations = get_ns_configuration_relation_list(nsd)
3135 for r in db_ns_relations:
3136 provider_dict = None
3137 requirer_dict = None
3138 if all(key in r for key in ("provider", "requirer")):
3139 provider_dict = r["provider"]
3140 requirer_dict = r["requirer"]
3141 elif "entities" in r:
3142 provider_id = r["entities"][0]["id"]
3143 provider_dict = {
3144 "nsr-id": nsr_id,
3145 "endpoint": r["entities"][0]["endpoint"],
3146 }
3147 if provider_id != nsd["id"]:
3148 provider_dict["vnf-profile-id"] = provider_id
3149 requirer_id = r["entities"][1]["id"]
3150 requirer_dict = {
3151 "nsr-id": nsr_id,
3152 "endpoint": r["entities"][1]["endpoint"],
3153 }
3154 if requirer_id != nsd["id"]:
3155 requirer_dict["vnf-profile-id"] = requirer_id
3156 else:
3157 raise Exception(
3158 "provider/requirer or entities must be included in the relation."
3159 )
3160 relation_provider = self._update_ee_relation_data_with_implicit_data(
3161 nsr_id, nsd, provider_dict, cached_vnfds
3162 )
3163 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3164 nsr_id, nsd, requirer_dict, cached_vnfds
3165 )
3166 provider = EERelation(relation_provider)
3167 requirer = EERelation(relation_requirer)
3168 relation = Relation(r["name"], provider, requirer)
3169 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3170 if vca_in_relation:
3171 relations.append(relation)
3172 return relations
3173
3174 def _get_vnf_relations(
3175 self,
3176 nsr_id: str,
3177 nsd: Dict[str, Any],
3178 vca: DeployedVCA,
3179 cached_vnfds: Dict[str, Any],
3180 ) -> List[Relation]:
3181 relations = []
3182 if vca.target_element == "ns":
3183 self.logger.debug("VCA is a NS charm, not a VNF.")
3184 return relations
3185 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3186 vnf_profile_id = vnf_profile["id"]
3187 vnfd_id = vnf_profile["vnfd-id"]
3188 project = nsd["_admin"]["projects_read"][0]
3189 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3190 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3191 for r in db_vnf_relations:
3192 provider_dict = None
3193 requirer_dict = None
3194 if all(key in r for key in ("provider", "requirer")):
3195 provider_dict = r["provider"]
3196 requirer_dict = r["requirer"]
3197 elif "entities" in r:
3198 provider_id = r["entities"][0]["id"]
3199 provider_dict = {
3200 "nsr-id": nsr_id,
3201 "vnf-profile-id": vnf_profile_id,
3202 "endpoint": r["entities"][0]["endpoint"],
3203 }
3204 if provider_id != vnfd_id:
3205 provider_dict["vdu-profile-id"] = provider_id
3206 requirer_id = r["entities"][1]["id"]
3207 requirer_dict = {
3208 "nsr-id": nsr_id,
3209 "vnf-profile-id": vnf_profile_id,
3210 "endpoint": r["entities"][1]["endpoint"],
3211 }
3212 if requirer_id != vnfd_id:
3213 requirer_dict["vdu-profile-id"] = requirer_id
3214 else:
3215 raise Exception(
3216 "provider/requirer or entities must be included in the relation."
3217 )
3218 relation_provider = self._update_ee_relation_data_with_implicit_data(
3219 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3220 )
3221 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3222 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3223 )
3224 provider = EERelation(relation_provider)
3225 requirer = EERelation(relation_requirer)
3226 relation = Relation(r["name"], provider, requirer)
3227 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3228 if vca_in_relation:
3229 relations.append(relation)
3230 return relations
3231
3232 def _get_kdu_resource_data(
3233 self,
3234 ee_relation: EERelation,
3235 db_nsr: Dict[str, Any],
3236 cached_vnfds: Dict[str, Any],
3237 ) -> DeployedK8sResource:
3238 nsd = get_nsd(db_nsr)
3239 vnf_profiles = get_vnf_profiles(nsd)
3240 vnfd_id = find_in_list(
3241 vnf_profiles,
3242 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3243 )["vnfd-id"]
3244 project = nsd["_admin"]["projects_read"][0]
3245 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3246 kdu_resource_profile = get_kdu_resource_profile(
3247 db_vnfd, ee_relation.kdu_resource_profile_id
3248 )
3249 kdu_name = kdu_resource_profile["kdu-name"]
3250 deployed_kdu, _ = get_deployed_kdu(
3251 db_nsr.get("_admin", ()).get("deployed", ()),
3252 kdu_name,
3253 ee_relation.vnf_profile_id,
3254 )
3255 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3256 return deployed_kdu
3257
3258 def _get_deployed_component(
3259 self,
3260 ee_relation: EERelation,
3261 db_nsr: Dict[str, Any],
3262 cached_vnfds: Dict[str, Any],
3263 ) -> DeployedComponent:
3264 nsr_id = db_nsr["_id"]
3265 deployed_component = None
3266 ee_level = EELevel.get_level(ee_relation)
3267 if ee_level == EELevel.NS:
3268 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3269 if vca:
3270 deployed_component = DeployedVCA(nsr_id, vca)
3271 elif ee_level == EELevel.VNF:
3272 vca = get_deployed_vca(
3273 db_nsr,
3274 {
3275 "vdu_id": None,
3276 "member-vnf-index": ee_relation.vnf_profile_id,
3277 "ee_descriptor_id": ee_relation.execution_environment_ref,
3278 },
3279 )
3280 if vca:
3281 deployed_component = DeployedVCA(nsr_id, vca)
3282 elif ee_level == EELevel.VDU:
3283 vca = get_deployed_vca(
3284 db_nsr,
3285 {
3286 "vdu_id": ee_relation.vdu_profile_id,
3287 "member-vnf-index": ee_relation.vnf_profile_id,
3288 "ee_descriptor_id": ee_relation.execution_environment_ref,
3289 },
3290 )
3291 if vca:
3292 deployed_component = DeployedVCA(nsr_id, vca)
3293 elif ee_level == EELevel.KDU:
3294 kdu_resource_data = self._get_kdu_resource_data(
3295 ee_relation, db_nsr, cached_vnfds
3296 )
3297 if kdu_resource_data:
3298 deployed_component = DeployedK8sResource(kdu_resource_data)
3299 return deployed_component
3300
3301 async def _add_relation(
3302 self,
3303 relation: Relation,
3304 vca_type: str,
3305 db_nsr: Dict[str, Any],
3306 cached_vnfds: Dict[str, Any],
3307 cached_vnfrs: Dict[str, Any],
3308 ) -> bool:
3309 deployed_provider = self._get_deployed_component(
3310 relation.provider, db_nsr, cached_vnfds
3311 )
3312 deployed_requirer = self._get_deployed_component(
3313 relation.requirer, db_nsr, cached_vnfds
3314 )
3315 if (
3316 deployed_provider
3317 and deployed_requirer
3318 and deployed_provider.config_sw_installed
3319 and deployed_requirer.config_sw_installed
3320 ):
3321 provider_db_vnfr = (
3322 self._get_vnfr(
3323 relation.provider.nsr_id,
3324 relation.provider.vnf_profile_id,
3325 cached_vnfrs,
3326 )
3327 if relation.provider.vnf_profile_id
3328 else None
3329 )
3330 requirer_db_vnfr = (
3331 self._get_vnfr(
3332 relation.requirer.nsr_id,
3333 relation.requirer.vnf_profile_id,
3334 cached_vnfrs,
3335 )
3336 if relation.requirer.vnf_profile_id
3337 else None
3338 )
3339 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3340 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3341 provider_relation_endpoint = RelationEndpoint(
3342 deployed_provider.ee_id,
3343 provider_vca_id,
3344 relation.provider.endpoint,
3345 )
3346 requirer_relation_endpoint = RelationEndpoint(
3347 deployed_requirer.ee_id,
3348 requirer_vca_id,
3349 relation.requirer.endpoint,
3350 )
3351 try:
3352 await self.vca_map[vca_type].add_relation(
3353 provider=provider_relation_endpoint,
3354 requirer=requirer_relation_endpoint,
3355 )
3356 except N2VCException as exception:
3357 self.logger.error(exception)
3358 raise LcmException(exception)
3359 return True
3360 return False
3361
3362 async def _add_vca_relations(
3363 self,
3364 logging_text,
3365 nsr_id,
3366 vca_type: str,
3367 vca_index: int,
3368 timeout: int = 3600,
3369 ) -> bool:
3370 # steps:
3371 # 1. find all relations for this VCA
3372 # 2. wait for other peers related
3373 # 3. add relations
3374
3375 try:
3376 # STEP 1: find all relations for this VCA
3377
3378 # read nsr record
3379 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3380 nsd = get_nsd(db_nsr)
3381
3382 # this VCA data
3383 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3384 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3385
3386 cached_vnfds = {}
3387 cached_vnfrs = {}
3388 relations = []
3389 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3390 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3391
3392 # if no relations, terminate
3393 if not relations:
3394 self.logger.debug(logging_text + " No relations")
3395 return True
3396
3397 self.logger.debug(logging_text + " adding relations {}".format(relations))
3398
3399 # add all relations
3400 start = time()
3401 while True:
3402 # check timeout
3403 now = time()
3404 if now - start >= timeout:
3405 self.logger.error(logging_text + " : timeout adding relations")
3406 return False
3407
3408 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3409 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3410
3411 # for each relation, find the VCA's related
3412 for relation in relations.copy():
3413 added = await self._add_relation(
3414 relation,
3415 vca_type,
3416 db_nsr,
3417 cached_vnfds,
3418 cached_vnfrs,
3419 )
3420 if added:
3421 relations.remove(relation)
3422
3423 if not relations:
3424 self.logger.debug("Relations added")
3425 break
3426 await asyncio.sleep(5.0)
3427
3428 return True
3429
3430 except Exception as e:
3431 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3432 return False
3433
3434 async def _install_kdu(
3435 self,
3436 nsr_id: str,
3437 nsr_db_path: str,
3438 vnfr_data: dict,
3439 kdu_index: int,
3440 kdud: dict,
3441 vnfd: dict,
3442 k8s_instance_info: dict,
3443 k8params: dict = None,
3444 timeout: int = 600,
3445 vca_id: str = None,
3446 ):
3447 try:
3448 k8sclustertype = k8s_instance_info["k8scluster-type"]
3449 # Instantiate kdu
3450 db_dict_install = {
3451 "collection": "nsrs",
3452 "filter": {"_id": nsr_id},
3453 "path": nsr_db_path,
3454 }
3455
3456 if k8s_instance_info.get("kdu-deployment-name"):
3457 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3458 else:
3459 kdu_instance = self.k8scluster_map[
3460 k8sclustertype
3461 ].generate_kdu_instance_name(
3462 db_dict=db_dict_install,
3463 kdu_model=k8s_instance_info["kdu-model"],
3464 kdu_name=k8s_instance_info["kdu-name"],
3465 )
3466
3467 # Update the nsrs table with the kdu-instance value
3468 self.update_db_2(
3469 item="nsrs",
3470 _id=nsr_id,
3471 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3472 )
3473
3474 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3475 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3476 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3477 # namespace, this first verification could be removed, and the next step would be done for any kind
3478 # of KNF.
3479 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3480 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3481 if k8sclustertype in ("juju", "juju-bundle"):
3482 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3483 # that the user passed a namespace which he wants its KDU to be deployed in)
3484 if (
3485 self.db.count(
3486 table="nsrs",
3487 q_filter={
3488 "_id": nsr_id,
3489 "_admin.projects_write": k8s_instance_info["namespace"],
3490 "_admin.projects_read": k8s_instance_info["namespace"],
3491 },
3492 )
3493 > 0
3494 ):
3495 self.logger.debug(
3496 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3497 )
3498 self.update_db_2(
3499 item="nsrs",
3500 _id=nsr_id,
3501 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3502 )
3503 k8s_instance_info["namespace"] = kdu_instance
3504
3505 await self.k8scluster_map[k8sclustertype].install(
3506 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3507 kdu_model=k8s_instance_info["kdu-model"],
3508 atomic=True,
3509 params=k8params,
3510 db_dict=db_dict_install,
3511 timeout=timeout,
3512 kdu_name=k8s_instance_info["kdu-name"],
3513 namespace=k8s_instance_info["namespace"],
3514 kdu_instance=kdu_instance,
3515 vca_id=vca_id,
3516 )
3517
3518 # Obtain services to obtain management service ip
3519 services = await self.k8scluster_map[k8sclustertype].get_services(
3520 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3521 kdu_instance=kdu_instance,
3522 namespace=k8s_instance_info["namespace"],
3523 )
3524
3525 # Obtain management service info (if exists)
3526 vnfr_update_dict = {}
3527 kdu_config = get_configuration(vnfd, kdud["name"])
3528 if kdu_config:
3529 target_ee_list = kdu_config.get("execution-environment-list", [])
3530 else:
3531 target_ee_list = []
3532
3533 if services:
3534 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3535 mgmt_services = [
3536 service
3537 for service in kdud.get("service", [])
3538 if service.get("mgmt-service")
3539 ]
3540 for mgmt_service in mgmt_services:
3541 for service in services:
3542 if service["name"].startswith(mgmt_service["name"]):
3543 # Mgmt service found, Obtain service ip
3544 ip = service.get("external_ip", service.get("cluster_ip"))
3545 if isinstance(ip, list) and len(ip) == 1:
3546 ip = ip[0]
3547
3548 vnfr_update_dict[
3549 "kdur.{}.ip-address".format(kdu_index)
3550 ] = ip
3551
3552 # Check if must update also mgmt ip at the vnf
3553 service_external_cp = mgmt_service.get(
3554 "external-connection-point-ref"
3555 )
3556 if service_external_cp:
3557 if (
3558 deep_get(vnfd, ("mgmt-interface", "cp"))
3559 == service_external_cp
3560 ):
3561 vnfr_update_dict["ip-address"] = ip
3562
3563 if find_in_list(
3564 target_ee_list,
3565 lambda ee: ee.get(
3566 "external-connection-point-ref", ""
3567 )
3568 == service_external_cp,
3569 ):
3570 vnfr_update_dict[
3571 "kdur.{}.ip-address".format(kdu_index)
3572 ] = ip
3573 break
3574 else:
3575 self.logger.warn(
3576 "Mgmt service name: {} not found".format(
3577 mgmt_service["name"]
3578 )
3579 )
3580
3581 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3582 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3583
3584 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3585 if (
3586 kdu_config
3587 and kdu_config.get("initial-config-primitive")
3588 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3589 ):
3590 initial_config_primitive_list = kdu_config.get(
3591 "initial-config-primitive"
3592 )
3593 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3594
3595 for initial_config_primitive in initial_config_primitive_list:
3596 primitive_params_ = self._map_primitive_params(
3597 initial_config_primitive, {}, {}
3598 )
3599
3600 await asyncio.wait_for(
3601 self.k8scluster_map[k8sclustertype].exec_primitive(
3602 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3603 kdu_instance=kdu_instance,
3604 primitive_name=initial_config_primitive["name"],
3605 params=primitive_params_,
3606 db_dict=db_dict_install,
3607 vca_id=vca_id,
3608 ),
3609 timeout=timeout,
3610 )
3611
3612 except Exception as e:
3613 # Prepare update db with error and raise exception
3614 try:
3615 self.update_db_2(
3616 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3617 )
3618 self.update_db_2(
3619 "vnfrs",
3620 vnfr_data.get("_id"),
3621 {"kdur.{}.status".format(kdu_index): "ERROR"},
3622 )
3623 except Exception:
3624 # ignore to keep original exception
3625 pass
3626 # reraise original error
3627 raise
3628
3629 return kdu_instance
3630
3631 async def deploy_kdus(
3632 self,
3633 logging_text,
3634 nsr_id,
3635 nslcmop_id,
3636 db_vnfrs,
3637 db_vnfds,
3638 task_instantiation_info,
3639 ):
3640 # Launch kdus if present in the descriptor
3641
3642 k8scluster_id_2_uuic = {
3643 "helm-chart-v3": {},
3644 "helm-chart": {},
3645 "juju-bundle": {},
3646 }
3647
3648 async def _get_cluster_id(cluster_id, cluster_type):
3649 nonlocal k8scluster_id_2_uuic
3650 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3651 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3652
3653 # check if K8scluster is creating and wait look if previous tasks in process
3654 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3655 "k8scluster", cluster_id
3656 )
3657 if task_dependency:
3658 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3659 task_name, cluster_id
3660 )
3661 self.logger.debug(logging_text + text)
3662 await asyncio.wait(task_dependency, timeout=3600)
3663
3664 db_k8scluster = self.db.get_one(
3665 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3666 )
3667 if not db_k8scluster:
3668 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3669
3670 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3671 if not k8s_id:
3672 if cluster_type == "helm-chart-v3":
3673 try:
3674 # backward compatibility for existing clusters that have not been initialized for helm v3
3675 k8s_credentials = yaml.safe_dump(
3676 db_k8scluster.get("credentials")
3677 )
3678 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3679 k8s_credentials, reuse_cluster_uuid=cluster_id
3680 )
3681 db_k8scluster_update = {}
3682 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3683 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3684 db_k8scluster_update[
3685 "_admin.helm-chart-v3.created"
3686 ] = uninstall_sw
3687 db_k8scluster_update[
3688 "_admin.helm-chart-v3.operationalState"
3689 ] = "ENABLED"
3690 self.update_db_2(
3691 "k8sclusters", cluster_id, db_k8scluster_update
3692 )
3693 except Exception as e:
3694 self.logger.error(
3695 logging_text
3696 + "error initializing helm-v3 cluster: {}".format(str(e))
3697 )
3698 raise LcmException(
3699 "K8s cluster '{}' has not been initialized for '{}'".format(
3700 cluster_id, cluster_type
3701 )
3702 )
3703 else:
3704 raise LcmException(
3705 "K8s cluster '{}' has not been initialized for '{}'".format(
3706 cluster_id, cluster_type
3707 )
3708 )
3709 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3710 return k8s_id
3711
3712 logging_text += "Deploy kdus: "
3713 step = ""
3714 try:
3715 db_nsr_update = {"_admin.deployed.K8s": []}
3716 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3717
3718 index = 0
3719 updated_cluster_list = []
3720 updated_v3_cluster_list = []
3721
3722 for vnfr_data in db_vnfrs.values():
3723 vca_id = self.get_vca_id(vnfr_data, {})
3724 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3725 # Step 0: Prepare and set parameters
3726 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3727 vnfd_id = vnfr_data.get("vnfd-id")
3728 vnfd_with_id = find_in_list(
3729 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3730 )
3731 kdud = next(
3732 kdud
3733 for kdud in vnfd_with_id["kdu"]
3734 if kdud["name"] == kdur["kdu-name"]
3735 )
3736 namespace = kdur.get("k8s-namespace")
3737 kdu_deployment_name = kdur.get("kdu-deployment-name")
3738 if kdur.get("helm-chart"):
3739 kdumodel = kdur["helm-chart"]
3740 # Default version: helm3, if helm-version is v2 assign v2
3741 k8sclustertype = "helm-chart-v3"
3742 self.logger.debug("kdur: {}".format(kdur))
3743 if (
3744 kdur.get("helm-version")
3745 and kdur.get("helm-version") == "v2"
3746 ):
3747 k8sclustertype = "helm-chart"
3748 elif kdur.get("juju-bundle"):
3749 kdumodel = kdur["juju-bundle"]
3750 k8sclustertype = "juju-bundle"
3751 else:
3752 raise LcmException(
3753 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3754 "juju-bundle. Maybe an old NBI version is running".format(
3755 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3756 )
3757 )
3758 # check if kdumodel is a file and exists
3759 try:
3760 vnfd_with_id = find_in_list(
3761 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3762 )
3763 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3764 if storage: # may be not present if vnfd has not artifacts
3765 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3766 if storage["pkg-dir"]:
3767 filename = "{}/{}/{}s/{}".format(
3768 storage["folder"],
3769 storage["pkg-dir"],
3770 k8sclustertype,
3771 kdumodel,
3772 )
3773 else:
3774 filename = "{}/Scripts/{}s/{}".format(
3775 storage["folder"],
3776 k8sclustertype,
3777 kdumodel,
3778 )
3779 if self.fs.file_exists(
3780 filename, mode="file"
3781 ) or self.fs.file_exists(filename, mode="dir"):
3782 kdumodel = self.fs.path + filename
3783 except (asyncio.TimeoutError, asyncio.CancelledError):
3784 raise
3785 except Exception: # it is not a file
3786 pass
3787
3788 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3789 step = "Synchronize repos for k8s cluster '{}'".format(
3790 k8s_cluster_id
3791 )
3792 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3793
3794 # Synchronize repos
3795 if (
3796 k8sclustertype == "helm-chart"
3797 and cluster_uuid not in updated_cluster_list
3798 ) or (
3799 k8sclustertype == "helm-chart-v3"
3800 and cluster_uuid not in updated_v3_cluster_list
3801 ):
3802 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3803 self.k8scluster_map[k8sclustertype].synchronize_repos(
3804 cluster_uuid=cluster_uuid
3805 )
3806 )
3807 if del_repo_list or added_repo_dict:
3808 if k8sclustertype == "helm-chart":
3809 unset = {
3810 "_admin.helm_charts_added." + item: None
3811 for item in del_repo_list
3812 }
3813 updated = {
3814 "_admin.helm_charts_added." + item: name
3815 for item, name in added_repo_dict.items()
3816 }
3817 updated_cluster_list.append(cluster_uuid)
3818 elif k8sclustertype == "helm-chart-v3":
3819 unset = {
3820 "_admin.helm_charts_v3_added." + item: None
3821 for item in del_repo_list
3822 }
3823 updated = {
3824 "_admin.helm_charts_v3_added." + item: name
3825 for item, name in added_repo_dict.items()
3826 }
3827 updated_v3_cluster_list.append(cluster_uuid)
3828 self.logger.debug(
3829 logging_text + "repos synchronized on k8s cluster "
3830 "'{}' to_delete: {}, to_add: {}".format(
3831 k8s_cluster_id, del_repo_list, added_repo_dict
3832 )
3833 )
3834 self.db.set_one(
3835 "k8sclusters",
3836 {"_id": k8s_cluster_id},
3837 updated,
3838 unset=unset,
3839 )
3840
3841 # Instantiate kdu
3842 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3843 vnfr_data["member-vnf-index-ref"],
3844 kdur["kdu-name"],
3845 k8s_cluster_id,
3846 )
3847 k8s_instance_info = {
3848 "kdu-instance": None,
3849 "k8scluster-uuid": cluster_uuid,
3850 "k8scluster-type": k8sclustertype,
3851 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3852 "kdu-name": kdur["kdu-name"],
3853 "kdu-model": kdumodel,
3854 "namespace": namespace,
3855 "kdu-deployment-name": kdu_deployment_name,
3856 }
3857 db_path = "_admin.deployed.K8s.{}".format(index)
3858 db_nsr_update[db_path] = k8s_instance_info
3859 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3860 vnfd_with_id = find_in_list(
3861 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3862 )
3863 task = asyncio.ensure_future(
3864 self._install_kdu(
3865 nsr_id,
3866 db_path,
3867 vnfr_data,
3868 kdu_index,
3869 kdud,
3870 vnfd_with_id,
3871 k8s_instance_info,
3872 k8params=desc_params,
3873 timeout=1800,
3874 vca_id=vca_id,
3875 )
3876 )
3877 self.lcm_tasks.register(
3878 "ns",
3879 nsr_id,
3880 nslcmop_id,
3881 "instantiate_KDU-{}".format(index),
3882 task,
3883 )
3884 task_instantiation_info[task] = "Deploying KDU {}".format(
3885 kdur["kdu-name"]
3886 )
3887
3888 index += 1
3889
3890 except (LcmException, asyncio.CancelledError):
3891 raise
3892 except Exception as e:
3893 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3894 if isinstance(e, (N2VCException, DbException)):
3895 self.logger.error(logging_text + msg)
3896 else:
3897 self.logger.critical(logging_text + msg, exc_info=True)
3898 raise LcmException(msg)
3899 finally:
3900 if db_nsr_update:
3901 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3902
3903 def _deploy_n2vc(
3904 self,
3905 logging_text,
3906 db_nsr,
3907 db_vnfr,
3908 nslcmop_id,
3909 nsr_id,
3910 nsi_id,
3911 vnfd_id,
3912 vdu_id,
3913 kdu_name,
3914 member_vnf_index,
3915 vdu_index,
3916 kdu_index,
3917 vdu_name,
3918 deploy_params,
3919 descriptor_config,
3920 base_folder,
3921 task_instantiation_info,
3922 stage,
3923 ):
3924 # launch instantiate_N2VC in a asyncio task and register task object
3925 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3926 # if not found, create one entry and update database
3927 # fill db_nsr._admin.deployed.VCA.<index>
3928
3929 self.logger.debug(
3930 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3931 )
3932
3933 charm_name = ""
3934 get_charm_name = False
3935 if "execution-environment-list" in descriptor_config:
3936 ee_list = descriptor_config.get("execution-environment-list", [])
3937 elif "juju" in descriptor_config:
3938 ee_list = [descriptor_config] # ns charms
3939 if "execution-environment-list" not in descriptor_config:
3940 # charm name is only required for ns charms
3941 get_charm_name = True
3942 else: # other types as script are not supported
3943 ee_list = []
3944
3945 for ee_item in ee_list:
3946 self.logger.debug(
3947 logging_text
3948 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3949 ee_item.get("juju"), ee_item.get("helm-chart")
3950 )
3951 )
3952 ee_descriptor_id = ee_item.get("id")
3953 if ee_item.get("juju"):
3954 vca_name = ee_item["juju"].get("charm")
3955 if get_charm_name:
3956 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3957 vca_type = (
3958 "lxc_proxy_charm"
3959 if ee_item["juju"].get("charm") is not None
3960 else "native_charm"
3961 )
3962 if ee_item["juju"].get("cloud") == "k8s":
3963 vca_type = "k8s_proxy_charm"
3964 elif ee_item["juju"].get("proxy") is False:
3965 vca_type = "native_charm"
3966 elif ee_item.get("helm-chart"):
3967 vca_name = ee_item["helm-chart"]
3968 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3969 vca_type = "helm"
3970 else:
3971 vca_type = "helm-v3"
3972 else:
3973 self.logger.debug(
3974 logging_text + "skipping non juju neither charm configuration"
3975 )
3976 continue
3977
3978 vca_index = -1
3979 for vca_index, vca_deployed in enumerate(
3980 db_nsr["_admin"]["deployed"]["VCA"]
3981 ):
3982 if not vca_deployed:
3983 continue
3984 if (
3985 vca_deployed.get("member-vnf-index") == member_vnf_index
3986 and vca_deployed.get("vdu_id") == vdu_id
3987 and vca_deployed.get("kdu_name") == kdu_name
3988 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3989 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3990 ):
3991 break
3992 else:
3993 # not found, create one.
3994 target = (
3995 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3996 )
3997 if vdu_id:
3998 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3999 elif kdu_name:
4000 target += "/kdu/{}".format(kdu_name)
4001 vca_deployed = {
4002 "target_element": target,
4003 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4004 "member-vnf-index": member_vnf_index,
4005 "vdu_id": vdu_id,
4006 "kdu_name": kdu_name,
4007 "vdu_count_index": vdu_index,
4008 "operational-status": "init", # TODO revise
4009 "detailed-status": "", # TODO revise
4010 "step": "initial-deploy", # TODO revise
4011 "vnfd_id": vnfd_id,
4012 "vdu_name": vdu_name,
4013 "type": vca_type,
4014 "ee_descriptor_id": ee_descriptor_id,
4015 "charm_name": charm_name,
4016 }
4017 vca_index += 1
4018
4019 # create VCA and configurationStatus in db
4020 db_dict = {
4021 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
4022 "configurationStatus.{}".format(vca_index): dict(),
4023 }
4024 self.update_db_2("nsrs", nsr_id, db_dict)
4025
4026 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
4027
4028 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
4029 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
4030 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
4031
4032 # Launch task
4033 task_n2vc = asyncio.ensure_future(
4034 self.instantiate_N2VC(
4035 logging_text=logging_text,
4036 vca_index=vca_index,
4037 nsi_id=nsi_id,
4038 db_nsr=db_nsr,
4039 db_vnfr=db_vnfr,
4040 vdu_id=vdu_id,
4041 kdu_name=kdu_name,
4042 vdu_index=vdu_index,
4043 kdu_index=kdu_index,
4044 deploy_params=deploy_params,
4045 config_descriptor=descriptor_config,
4046 base_folder=base_folder,
4047 nslcmop_id=nslcmop_id,
4048 stage=stage,
4049 vca_type=vca_type,
4050 vca_name=vca_name,
4051 ee_config_descriptor=ee_item,
4052 )
4053 )
4054 self.lcm_tasks.register(
4055 "ns",
4056 nsr_id,
4057 nslcmop_id,
4058 "instantiate_N2VC-{}".format(vca_index),
4059 task_n2vc,
4060 )
4061 task_instantiation_info[
4062 task_n2vc
4063 ] = self.task_name_deploy_vca + " {}.{}".format(
4064 member_vnf_index or "", vdu_id or ""
4065 )
4066
4067 @staticmethod
4068 def _create_nslcmop(nsr_id, operation, params):
4069 """
4070 Creates a ns-lcm-opp content to be stored at database.
4071 :param nsr_id: internal id of the instance
4072 :param operation: instantiate, terminate, scale, action, ...
4073 :param params: user parameters for the operation
4074 :return: dictionary following SOL005 format
4075 """
4076 # Raise exception if invalid arguments
4077 if not (nsr_id and operation and params):
4078 raise LcmException(
4079 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4080 )
4081 now = time()
4082 _id = str(uuid4())
4083 nslcmop = {
4084 "id": _id,
4085 "_id": _id,
4086 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4087 "operationState": "PROCESSING",
4088 "statusEnteredTime": now,
4089 "nsInstanceId": nsr_id,
4090 "lcmOperationType": operation,
4091 "startTime": now,
4092 "isAutomaticInvocation": False,
4093 "operationParams": params,
4094 "isCancelPending": False,
4095 "links": {
4096 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
4097 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
4098 },
4099 }
4100 return nslcmop
4101
4102 def _format_additional_params(self, params):
4103 params = params or {}
4104 for key, value in params.items():
4105 if str(value).startswith("!!yaml "):
4106 params[key] = yaml.safe_load(value[7:])
4107 return params
4108
4109 def _get_terminate_primitive_params(self, seq, vnf_index):
4110 primitive = seq.get("name")
4111 primitive_params = {}
4112 params = {
4113 "member_vnf_index": vnf_index,
4114 "primitive": primitive,
4115 "primitive_params": primitive_params,
4116 }
4117 desc_params = {}
4118 return self._map_primitive_params(seq, params, desc_params)
4119
4120 # sub-operations
4121
4122 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
4123 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
4124 if op.get("operationState") == "COMPLETED":
4125 # b. Skip sub-operation
4126 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4127 return self.SUBOPERATION_STATUS_SKIP
4128 else:
4129 # c. retry executing sub-operation
4130 # The sub-operation exists, and operationState != 'COMPLETED'
4131 # Update operationState = 'PROCESSING' to indicate a retry.
4132 operationState = "PROCESSING"
4133 detailed_status = "In progress"
4134 self._update_suboperation_status(
4135 db_nslcmop, op_index, operationState, detailed_status
4136 )
4137 # Return the sub-operation index
4138 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4139 # with arguments extracted from the sub-operation
4140 return op_index
4141
4142 # Find a sub-operation where all keys in a matching dictionary must match
4143 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4144 def _find_suboperation(self, db_nslcmop, match):
4145 if db_nslcmop and match:
4146 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4147 for i, op in enumerate(op_list):
4148 if all(op.get(k) == match[k] for k in match):
4149 return i
4150 return self.SUBOPERATION_STATUS_NOT_FOUND
4151
4152 # Update status for a sub-operation given its index
4153 def _update_suboperation_status(
4154 self, db_nslcmop, op_index, operationState, detailed_status
4155 ):
4156 # Update DB for HA tasks
4157 q_filter = {"_id": db_nslcmop["_id"]}
4158 update_dict = {
4159 "_admin.operations.{}.operationState".format(op_index): operationState,
4160 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4161 }
4162 self.db.set_one(
4163 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4164 )
4165
4166 # Add sub-operation, return the index of the added sub-operation
4167 # Optionally, set operationState, detailed-status, and operationType
4168 # Status and type are currently set for 'scale' sub-operations:
4169 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4170 # 'detailed-status' : status message
4171 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4172 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4173 def _add_suboperation(
4174 self,
4175 db_nslcmop,
4176 vnf_index,
4177 vdu_id,
4178 vdu_count_index,
4179 vdu_name,
4180 primitive,
4181 mapped_primitive_params,
4182 operationState=None,
4183 detailed_status=None,
4184 operationType=None,
4185 RO_nsr_id=None,
4186 RO_scaling_info=None,
4187 ):
4188 if not db_nslcmop:
4189 return self.SUBOPERATION_STATUS_NOT_FOUND
4190 # Get the "_admin.operations" list, if it exists
4191 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4192 op_list = db_nslcmop_admin.get("operations")
4193 # Create or append to the "_admin.operations" list
4194 new_op = {
4195 "member_vnf_index": vnf_index,
4196 "vdu_id": vdu_id,
4197 "vdu_count_index": vdu_count_index,
4198 "primitive": primitive,
4199 "primitive_params": mapped_primitive_params,
4200 }
4201 if operationState:
4202 new_op["operationState"] = operationState
4203 if detailed_status:
4204 new_op["detailed-status"] = detailed_status
4205 if operationType:
4206 new_op["lcmOperationType"] = operationType
4207 if RO_nsr_id:
4208 new_op["RO_nsr_id"] = RO_nsr_id
4209 if RO_scaling_info:
4210 new_op["RO_scaling_info"] = RO_scaling_info
4211 if not op_list:
4212 # No existing operations, create key 'operations' with current operation as first list element
4213 db_nslcmop_admin.update({"operations": [new_op]})
4214 op_list = db_nslcmop_admin.get("operations")
4215 else:
4216 # Existing operations, append operation to list
4217 op_list.append(new_op)
4218
4219 db_nslcmop_update = {"_admin.operations": op_list}
4220 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4221 op_index = len(op_list) - 1
4222 return op_index
4223
4224 # Helper methods for scale() sub-operations
4225
4226 # pre-scale/post-scale:
4227 # Check for 3 different cases:
4228 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4229 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4230 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4231 def _check_or_add_scale_suboperation(
4232 self,
4233 db_nslcmop,
4234 vnf_index,
4235 vnf_config_primitive,
4236 primitive_params,
4237 operationType,
4238 RO_nsr_id=None,
4239 RO_scaling_info=None,
4240 ):
4241 # Find this sub-operation
4242 if RO_nsr_id and RO_scaling_info:
4243 operationType = "SCALE-RO"
4244 match = {
4245 "member_vnf_index": vnf_index,
4246 "RO_nsr_id": RO_nsr_id,
4247 "RO_scaling_info": RO_scaling_info,
4248 }
4249 else:
4250 match = {
4251 "member_vnf_index": vnf_index,
4252 "primitive": vnf_config_primitive,
4253 "primitive_params": primitive_params,
4254 "lcmOperationType": operationType,
4255 }
4256 op_index = self._find_suboperation(db_nslcmop, match)
4257 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4258 # a. New sub-operation
4259 # The sub-operation does not exist, add it.
4260 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4261 # The following parameters are set to None for all kind of scaling:
4262 vdu_id = None
4263 vdu_count_index = None
4264 vdu_name = None
4265 if RO_nsr_id and RO_scaling_info:
4266 vnf_config_primitive = None
4267 primitive_params = None
4268 else:
4269 RO_nsr_id = None
4270 RO_scaling_info = None
4271 # Initial status for sub-operation
4272 operationState = "PROCESSING"
4273 detailed_status = "In progress"
4274 # Add sub-operation for pre/post-scaling (zero or more operations)
4275 self._add_suboperation(
4276 db_nslcmop,
4277 vnf_index,
4278 vdu_id,
4279 vdu_count_index,
4280 vdu_name,
4281 vnf_config_primitive,
4282 primitive_params,
4283 operationState,
4284 detailed_status,
4285 operationType,
4286 RO_nsr_id,
4287 RO_scaling_info,
4288 )
4289 return self.SUBOPERATION_STATUS_NEW
4290 else:
4291 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4292 # or op_index (operationState != 'COMPLETED')
4293 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4294
4295 # Function to return execution_environment id
4296
4297 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4298 # TODO vdu_index_count
4299 for vca in vca_deployed_list:
4300 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4301 return vca["ee_id"]
4302
4303 async def destroy_N2VC(
4304 self,
4305 logging_text,
4306 db_nslcmop,
4307 vca_deployed,
4308 config_descriptor,
4309 vca_index,
4310 destroy_ee=True,
4311 exec_primitives=True,
4312 scaling_in=False,
4313 vca_id: str = None,
4314 ):
4315 """
4316 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4317 :param logging_text:
4318 :param db_nslcmop:
4319 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4320 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4321 :param vca_index: index in the database _admin.deployed.VCA
4322 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4323 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4324 not executed properly
4325 :param scaling_in: True destroys the application, False destroys the model
4326 :return: None or exception
4327 """
4328
4329 self.logger.debug(
4330 logging_text
4331 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4332 vca_index, vca_deployed, config_descriptor, destroy_ee
4333 )
4334 )
4335
4336 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4337
4338 # execute terminate_primitives
4339 if exec_primitives:
4340 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4341 config_descriptor.get("terminate-config-primitive"),
4342 vca_deployed.get("ee_descriptor_id"),
4343 )
4344 vdu_id = vca_deployed.get("vdu_id")
4345 vdu_count_index = vca_deployed.get("vdu_count_index")
4346 vdu_name = vca_deployed.get("vdu_name")
4347 vnf_index = vca_deployed.get("member-vnf-index")
4348 if terminate_primitives and vca_deployed.get("needed_terminate"):
4349 for seq in terminate_primitives:
4350 # For each sequence in list, get primitive and call _ns_execute_primitive()
4351 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4352 vnf_index, seq.get("name")
4353 )
4354 self.logger.debug(logging_text + step)
4355 # Create the primitive for each sequence, i.e. "primitive": "touch"
4356 primitive = seq.get("name")
4357 mapped_primitive_params = self._get_terminate_primitive_params(
4358 seq, vnf_index
4359 )
4360
4361 # Add sub-operation
4362 self._add_suboperation(
4363 db_nslcmop,
4364 vnf_index,
4365 vdu_id,
4366 vdu_count_index,
4367 vdu_name,
4368 primitive,
4369 mapped_primitive_params,
4370 )
4371 # Sub-operations: Call _ns_execute_primitive() instead of action()
4372 try:
4373 result, result_detail = await self._ns_execute_primitive(
4374 vca_deployed["ee_id"],
4375 primitive,
4376 mapped_primitive_params,
4377 vca_type=vca_type,
4378 vca_id=vca_id,
4379 )
4380 except LcmException:
4381 # this happens when VCA is not deployed. In this case it is not needed to terminate
4382 continue
4383 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4384 if result not in result_ok:
4385 raise LcmException(
4386 "terminate_primitive {} for vnf_member_index={} fails with "
4387 "error {}".format(seq.get("name"), vnf_index, result_detail)
4388 )
4389 # set that this VCA do not need terminated
4390 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4391 vca_index
4392 )
4393 self.update_db_2(
4394 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4395 )
4396
4397 # Delete Prometheus Jobs if any
4398 # This uses NSR_ID, so it will destroy any jobs under this index
4399 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4400
4401 if destroy_ee:
4402 await self.vca_map[vca_type].delete_execution_environment(
4403 vca_deployed["ee_id"],
4404 scaling_in=scaling_in,
4405 vca_type=vca_type,
4406 vca_id=vca_id,
4407 )
4408
4409 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4410 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4411 namespace = "." + db_nsr["_id"]
4412 try:
4413 await self.n2vc.delete_namespace(
4414 namespace=namespace,
4415 total_timeout=self.timeout.charm_delete,
4416 vca_id=vca_id,
4417 )
4418 except N2VCNotFound: # already deleted. Skip
4419 pass
4420 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4421
4422 async def terminate(self, nsr_id, nslcmop_id):
4423 # Try to lock HA task here
4424 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4425 if not task_is_locked_by_me:
4426 return
4427
4428 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4429 self.logger.debug(logging_text + "Enter")
4430 timeout_ns_terminate = self.timeout.ns_terminate
4431 db_nsr = None
4432 db_nslcmop = None
4433 operation_params = None
4434 exc = None
4435 error_list = [] # annotates all failed error messages
4436 db_nslcmop_update = {}
4437 autoremove = False # autoremove after terminated
4438 tasks_dict_info = {}
4439 db_nsr_update = {}
4440 stage = [
4441 "Stage 1/3: Preparing task.",
4442 "Waiting for previous operations to terminate.",
4443 "",
4444 ]
4445 # ^ contains [stage, step, VIM-status]
4446 try:
4447 # wait for any previous tasks in process
4448 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4449
4450 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4451 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4452 operation_params = db_nslcmop.get("operationParams") or {}
4453 if operation_params.get("timeout_ns_terminate"):
4454 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4455 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4456 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4457
4458 db_nsr_update["operational-status"] = "terminating"
4459 db_nsr_update["config-status"] = "terminating"
4460 self._write_ns_status(
4461 nsr_id=nsr_id,
4462 ns_state="TERMINATING",
4463 current_operation="TERMINATING",
4464 current_operation_id=nslcmop_id,
4465 other_update=db_nsr_update,
4466 )
4467 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4468 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4469 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4470 return
4471
4472 stage[1] = "Getting vnf descriptors from db."
4473 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4474 db_vnfrs_dict = {
4475 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4476 }
4477 db_vnfds_from_id = {}
4478 db_vnfds_from_member_index = {}
4479 # Loop over VNFRs
4480 for vnfr in db_vnfrs_list:
4481 vnfd_id = vnfr["vnfd-id"]
4482 if vnfd_id not in db_vnfds_from_id:
4483 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4484 db_vnfds_from_id[vnfd_id] = vnfd
4485 db_vnfds_from_member_index[
4486 vnfr["member-vnf-index-ref"]
4487 ] = db_vnfds_from_id[vnfd_id]
4488
4489 # Destroy individual execution environments when there are terminating primitives.
4490 # Rest of EE will be deleted at once
4491 # TODO - check before calling _destroy_N2VC
4492 # if not operation_params.get("skip_terminate_primitives"):#
4493 # or not vca.get("needed_terminate"):
4494 stage[0] = "Stage 2/3 execute terminating primitives."
4495 self.logger.debug(logging_text + stage[0])
4496 stage[1] = "Looking execution environment that needs terminate."
4497 self.logger.debug(logging_text + stage[1])
4498
4499 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4500 config_descriptor = None
4501 vca_member_vnf_index = vca.get("member-vnf-index")
4502 vca_id = self.get_vca_id(
4503 db_vnfrs_dict.get(vca_member_vnf_index)
4504 if vca_member_vnf_index
4505 else None,
4506 db_nsr,
4507 )
4508 if not vca or not vca.get("ee_id"):
4509 continue
4510 if not vca.get("member-vnf-index"):
4511 # ns
4512 config_descriptor = db_nsr.get("ns-configuration")
4513 elif vca.get("vdu_id"):
4514 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4515 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4516 elif vca.get("kdu_name"):
4517 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4518 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4519 else:
4520 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4521 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4522 vca_type = vca.get("type")
4523 exec_terminate_primitives = not operation_params.get(
4524 "skip_terminate_primitives"
4525 ) and vca.get("needed_terminate")
4526 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4527 # pending native charms
4528 destroy_ee = (
4529 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4530 )
4531 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4532 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4533 task = asyncio.ensure_future(
4534 self.destroy_N2VC(
4535 logging_text,
4536 db_nslcmop,
4537 vca,
4538 config_descriptor,
4539 vca_index,
4540 destroy_ee,
4541 exec_terminate_primitives,
4542 vca_id=vca_id,
4543 )
4544 )
4545 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4546
4547 # wait for pending tasks of terminate primitives
4548 if tasks_dict_info:
4549 self.logger.debug(
4550 logging_text
4551 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4552 )
4553 error_list = await self._wait_for_tasks(
4554 logging_text,
4555 tasks_dict_info,
4556 min(self.timeout.charm_delete, timeout_ns_terminate),
4557 stage,
4558 nslcmop_id,
4559 )
4560 tasks_dict_info.clear()
4561 if error_list:
4562 return # raise LcmException("; ".join(error_list))
4563
4564 # remove All execution environments at once
4565 stage[0] = "Stage 3/3 delete all."
4566
4567 if nsr_deployed.get("VCA"):
4568 stage[1] = "Deleting all execution environments."
4569 self.logger.debug(logging_text + stage[1])
4570 vca_id = self.get_vca_id({}, db_nsr)
4571 task_delete_ee = asyncio.ensure_future(
4572 asyncio.wait_for(
4573 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4574 timeout=self.timeout.charm_delete,
4575 )
4576 )
4577 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4578 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4579
4580 # Delete Namespace and Certificates if necessary
4581 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4582 await self.vca_map["helm-v3"].delete_tls_certificate(
4583 certificate_name=db_nslcmop["nsInstanceId"],
4584 )
4585 # TODO: Delete namespace
4586
4587 # Delete from k8scluster
4588 stage[1] = "Deleting KDUs."
4589 self.logger.debug(logging_text + stage[1])
4590 # print(nsr_deployed)
4591 for kdu in get_iterable(nsr_deployed, "K8s"):
4592 if not kdu or not kdu.get("kdu-instance"):
4593 continue
4594 kdu_instance = kdu.get("kdu-instance")
4595 if kdu.get("k8scluster-type") in self.k8scluster_map:
4596 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4597 vca_id = self.get_vca_id({}, db_nsr)
4598 task_delete_kdu_instance = asyncio.ensure_future(
4599 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4600 cluster_uuid=kdu.get("k8scluster-uuid"),
4601 kdu_instance=kdu_instance,
4602 vca_id=vca_id,
4603 namespace=kdu.get("namespace"),
4604 )
4605 )
4606 else:
4607 self.logger.error(
4608 logging_text
4609 + "Unknown k8s deployment type {}".format(
4610 kdu.get("k8scluster-type")
4611 )
4612 )
4613 continue
4614 tasks_dict_info[
4615 task_delete_kdu_instance
4616 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4617
4618 # remove from RO
4619 stage[1] = "Deleting ns from VIM."
4620 if self.ro_config.ng:
4621 task_delete_ro = asyncio.ensure_future(
4622 self._terminate_ng_ro(
4623 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4624 )
4625 )
4626 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4627
4628 # rest of staff will be done at finally
4629
4630 except (
4631 ROclient.ROClientException,
4632 DbException,
4633 LcmException,
4634 N2VCException,
4635 ) as e:
4636 self.logger.error(logging_text + "Exit Exception {}".format(e))
4637 exc = e
4638 except asyncio.CancelledError:
4639 self.logger.error(
4640 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4641 )
4642 exc = "Operation was cancelled"
4643 except Exception as e:
4644 exc = traceback.format_exc()
4645 self.logger.critical(
4646 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4647 exc_info=True,
4648 )
4649 finally:
4650 if exc:
4651 error_list.append(str(exc))
4652 try:
4653 # wait for pending tasks
4654 if tasks_dict_info:
4655 stage[1] = "Waiting for terminate pending tasks."
4656 self.logger.debug(logging_text + stage[1])
4657 error_list += await self._wait_for_tasks(
4658 logging_text,
4659 tasks_dict_info,
4660 timeout_ns_terminate,
4661 stage,
4662 nslcmop_id,
4663 )
4664 stage[1] = stage[2] = ""
4665 except asyncio.CancelledError:
4666 error_list.append("Cancelled")
4667 # TODO cancell all tasks
4668 except Exception as exc:
4669 error_list.append(str(exc))
4670 # update status at database
4671 if error_list:
4672 error_detail = "; ".join(error_list)
4673 # self.logger.error(logging_text + error_detail)
4674 error_description_nslcmop = "{} Detail: {}".format(
4675 stage[0], error_detail
4676 )
4677 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4678 nslcmop_id, stage[0]
4679 )
4680
4681 db_nsr_update["operational-status"] = "failed"
4682 db_nsr_update["detailed-status"] = (
4683 error_description_nsr + " Detail: " + error_detail
4684 )
4685 db_nslcmop_update["detailed-status"] = error_detail
4686 nslcmop_operation_state = "FAILED"
4687 ns_state = "BROKEN"
4688 else:
4689 error_detail = None
4690 error_description_nsr = error_description_nslcmop = None
4691 ns_state = "NOT_INSTANTIATED"
4692 db_nsr_update["operational-status"] = "terminated"
4693 db_nsr_update["detailed-status"] = "Done"
4694 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4695 db_nslcmop_update["detailed-status"] = "Done"
4696 nslcmop_operation_state = "COMPLETED"
4697
4698 if db_nsr:
4699 self._write_ns_status(
4700 nsr_id=nsr_id,
4701 ns_state=ns_state,
4702 current_operation="IDLE",
4703 current_operation_id=None,
4704 error_description=error_description_nsr,
4705 error_detail=error_detail,
4706 other_update=db_nsr_update,
4707 )
4708 self._write_op_status(
4709 op_id=nslcmop_id,
4710 stage="",
4711 error_message=error_description_nslcmop,
4712 operation_state=nslcmop_operation_state,
4713 other_update=db_nslcmop_update,
4714 )
4715 if ns_state == "NOT_INSTANTIATED":
4716 try:
4717 self.db.set_list(
4718 "vnfrs",
4719 {"nsr-id-ref": nsr_id},
4720 {"_admin.nsState": "NOT_INSTANTIATED"},
4721 )
4722 except DbException as e:
4723 self.logger.warn(
4724 logging_text
4725 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4726 nsr_id, e
4727 )
4728 )
4729 if operation_params:
4730 autoremove = operation_params.get("autoremove", False)
4731 if nslcmop_operation_state:
4732 try:
4733 await self.msg.aiowrite(
4734 "ns",
4735 "terminated",
4736 {
4737 "nsr_id": nsr_id,
4738 "nslcmop_id": nslcmop_id,
4739 "operationState": nslcmop_operation_state,
4740 "autoremove": autoremove,
4741 },
4742 loop=self.loop,
4743 )
4744 except Exception as e:
4745 self.logger.error(
4746 logging_text + "kafka_write notification Exception {}".format(e)
4747 )
4748 self.logger.debug(f"Deleting alerts: ns_id={nsr_id}")
4749 self.db.del_list("alerts", {"tags.ns_id": nsr_id})
4750
4751 self.logger.debug(logging_text + "Exit")
4752 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4753
4754 async def _wait_for_tasks(
4755 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4756 ):
4757 time_start = time()
4758 error_detail_list = []
4759 error_list = []
4760 pending_tasks = list(created_tasks_info.keys())
4761 num_tasks = len(pending_tasks)
4762 num_done = 0
4763 stage[1] = "{}/{}.".format(num_done, num_tasks)
4764 self._write_op_status(nslcmop_id, stage)
4765 while pending_tasks:
4766 new_error = None
4767 _timeout = timeout + time_start - time()
4768 done, pending_tasks = await asyncio.wait(
4769 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4770 )
4771 num_done += len(done)
4772 if not done: # Timeout
4773 for task in pending_tasks:
4774 new_error = created_tasks_info[task] + ": Timeout"
4775 error_detail_list.append(new_error)
4776 error_list.append(new_error)
4777 break
4778 for task in done:
4779 if task.cancelled():
4780 exc = "Cancelled"
4781 else:
4782 exc = task.exception()
4783 if exc:
4784 if isinstance(exc, asyncio.TimeoutError):
4785 exc = "Timeout"
4786 new_error = created_tasks_info[task] + ": {}".format(exc)
4787 error_list.append(created_tasks_info[task])
4788 error_detail_list.append(new_error)
4789 if isinstance(
4790 exc,
4791 (
4792 str,
4793 DbException,
4794 N2VCException,
4795 ROclient.ROClientException,
4796 LcmException,
4797 K8sException,
4798 NgRoException,
4799 ),
4800 ):
4801 self.logger.error(logging_text + new_error)
4802 else:
4803 exc_traceback = "".join(
4804 traceback.format_exception(None, exc, exc.__traceback__)
4805 )
4806 self.logger.error(
4807 logging_text
4808 + created_tasks_info[task]
4809 + " "
4810 + exc_traceback
4811 )
4812 else:
4813 self.logger.debug(
4814 logging_text + created_tasks_info[task] + ": Done"
4815 )
4816 stage[1] = "{}/{}.".format(num_done, num_tasks)
4817 if new_error:
4818 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4819 if nsr_id: # update also nsr
4820 self.update_db_2(
4821 "nsrs",
4822 nsr_id,
4823 {
4824 "errorDescription": "Error at: " + ", ".join(error_list),
4825 "errorDetail": ". ".join(error_detail_list),
4826 },
4827 )
4828 self._write_op_status(nslcmop_id, stage)
4829 return error_detail_list
4830
4831 @staticmethod
4832 def _map_primitive_params(primitive_desc, params, instantiation_params):
4833 """
4834 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4835 The default-value is used. If it is between < > it look for a value at instantiation_params
4836 :param primitive_desc: portion of VNFD/NSD that describes primitive
4837 :param params: Params provided by user
4838 :param instantiation_params: Instantiation params provided by user
4839 :return: a dictionary with the calculated params
4840 """
4841 calculated_params = {}
4842 for parameter in primitive_desc.get("parameter", ()):
4843 param_name = parameter["name"]
4844 if param_name in params:
4845 calculated_params[param_name] = params[param_name]
4846 elif "default-value" in parameter or "value" in parameter:
4847 if "value" in parameter:
4848 calculated_params[param_name] = parameter["value"]
4849 else:
4850 calculated_params[param_name] = parameter["default-value"]
4851 if (
4852 isinstance(calculated_params[param_name], str)
4853 and calculated_params[param_name].startswith("<")
4854 and calculated_params[param_name].endswith(">")
4855 ):
4856 if calculated_params[param_name][1:-1] in instantiation_params:
4857 calculated_params[param_name] = instantiation_params[
4858 calculated_params[param_name][1:-1]
4859 ]
4860 else:
4861 raise LcmException(
4862 "Parameter {} needed to execute primitive {} not provided".format(
4863 calculated_params[param_name], primitive_desc["name"]
4864 )
4865 )
4866 else:
4867 raise LcmException(
4868 "Parameter {} needed to execute primitive {} not provided".format(
4869 param_name, primitive_desc["name"]
4870 )
4871 )
4872
4873 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4874 calculated_params[param_name] = yaml.safe_dump(
4875 calculated_params[param_name], default_flow_style=True, width=256
4876 )
4877 elif isinstance(calculated_params[param_name], str) and calculated_params[
4878 param_name
4879 ].startswith("!!yaml "):
4880 calculated_params[param_name] = calculated_params[param_name][7:]
4881 if parameter.get("data-type") == "INTEGER":
4882 try:
4883 calculated_params[param_name] = int(calculated_params[param_name])
4884 except ValueError: # error converting string to int
4885 raise LcmException(
4886 "Parameter {} of primitive {} must be integer".format(
4887 param_name, primitive_desc["name"]
4888 )
4889 )
4890 elif parameter.get("data-type") == "BOOLEAN":
4891 calculated_params[param_name] = not (
4892 (str(calculated_params[param_name])).lower() == "false"
4893 )
4894
4895 # add always ns_config_info if primitive name is config
4896 if primitive_desc["name"] == "config":
4897 if "ns_config_info" in instantiation_params:
4898 calculated_params["ns_config_info"] = instantiation_params[
4899 "ns_config_info"
4900 ]
4901 return calculated_params
4902
4903 def _look_for_deployed_vca(
4904 self,
4905 deployed_vca,
4906 member_vnf_index,
4907 vdu_id,
4908 vdu_count_index,
4909 kdu_name=None,
4910 ee_descriptor_id=None,
4911 ):
4912 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4913 for vca in deployed_vca:
4914 if not vca:
4915 continue
4916 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4917 continue
4918 if (
4919 vdu_count_index is not None
4920 and vdu_count_index != vca["vdu_count_index"]
4921 ):
4922 continue
4923 if kdu_name and kdu_name != vca["kdu_name"]:
4924 continue
4925 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4926 continue
4927 break
4928 else:
4929 # vca_deployed not found
4930 raise LcmException(
4931 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4932 " is not deployed".format(
4933 member_vnf_index,
4934 vdu_id,
4935 vdu_count_index,
4936 kdu_name,
4937 ee_descriptor_id,
4938 )
4939 )
4940 # get ee_id
4941 ee_id = vca.get("ee_id")
4942 vca_type = vca.get(
4943 "type", "lxc_proxy_charm"
4944 ) # default value for backward compatibility - proxy charm
4945 if not ee_id:
4946 raise LcmException(
4947 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4948 "execution environment".format(
4949 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4950 )
4951 )
4952 return ee_id, vca_type
4953
4954 async def _ns_execute_primitive(
4955 self,
4956 ee_id,
4957 primitive,
4958 primitive_params,
4959 retries=0,
4960 retries_interval=30,
4961 timeout=None,
4962 vca_type=None,
4963 db_dict=None,
4964 vca_id: str = None,
4965 ) -> (str, str):
4966 try:
4967 if primitive == "config":
4968 primitive_params = {"params": primitive_params}
4969
4970 vca_type = vca_type or "lxc_proxy_charm"
4971
4972 while retries >= 0:
4973 try:
4974 output = await asyncio.wait_for(
4975 self.vca_map[vca_type].exec_primitive(
4976 ee_id=ee_id,
4977 primitive_name=primitive,
4978 params_dict=primitive_params,
4979 progress_timeout=self.timeout.progress_primitive,
4980 total_timeout=self.timeout.primitive,
4981 db_dict=db_dict,
4982 vca_id=vca_id,
4983 vca_type=vca_type,
4984 ),
4985 timeout=timeout or self.timeout.primitive,
4986 )
4987 # execution was OK
4988 break
4989 except asyncio.CancelledError:
4990 raise
4991 except Exception as e:
4992 retries -= 1
4993 if retries >= 0:
4994 self.logger.debug(
4995 "Error executing action {} on {} -> {}".format(
4996 primitive, ee_id, e
4997 )
4998 )
4999 # wait and retry
5000 await asyncio.sleep(retries_interval, loop=self.loop)
5001 else:
5002 if isinstance(e, asyncio.TimeoutError):
5003 e = N2VCException(
5004 message="Timed out waiting for action to complete"
5005 )
5006 return "FAILED", getattr(e, "message", repr(e))
5007
5008 return "COMPLETED", output
5009
5010 except (LcmException, asyncio.CancelledError):
5011 raise
5012 except Exception as e:
5013 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5014
5015 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5016 """
5017 Updating the vca_status with latest juju information in nsrs record
5018 :param: nsr_id: Id of the nsr
5019 :param: nslcmop_id: Id of the nslcmop
5020 :return: None
5021 """
5022
5023 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5024 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5025 vca_id = self.get_vca_id({}, db_nsr)
5026 if db_nsr["_admin"]["deployed"]["K8s"]:
5027 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5028 cluster_uuid, kdu_instance, cluster_type = (
5029 k8s["k8scluster-uuid"],
5030 k8s["kdu-instance"],
5031 k8s["k8scluster-type"],
5032 )
5033 await self._on_update_k8s_db(
5034 cluster_uuid=cluster_uuid,
5035 kdu_instance=kdu_instance,
5036 filter={"_id": nsr_id},
5037 vca_id=vca_id,
5038 cluster_type=cluster_type,
5039 )
5040 else:
5041 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5042 table, filter = "nsrs", {"_id": nsr_id}
5043 path = "_admin.deployed.VCA.{}.".format(vca_index)
5044 await self._on_update_n2vc_db(table, filter, path, {})
5045
5046 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5047 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5048
5049 async def action(self, nsr_id, nslcmop_id):
5050 # Try to lock HA task here
5051 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5052 if not task_is_locked_by_me:
5053 return
5054
5055 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5056 self.logger.debug(logging_text + "Enter")
5057 # get all needed from database
5058 db_nsr = None
5059 db_nslcmop = None
5060 db_nsr_update = {}
5061 db_nslcmop_update = {}
5062 nslcmop_operation_state = None
5063 error_description_nslcmop = None
5064 exc = None
5065 step = ""
5066 try:
5067 # wait for any previous tasks in process
5068 step = "Waiting for previous operations to terminate"
5069 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5070
5071 self._write_ns_status(
5072 nsr_id=nsr_id,
5073 ns_state=None,
5074 current_operation="RUNNING ACTION",
5075 current_operation_id=nslcmop_id,
5076 )
5077
5078 step = "Getting information from database"
5079 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5080 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5081 if db_nslcmop["operationParams"].get("primitive_params"):
5082 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5083 db_nslcmop["operationParams"]["primitive_params"]
5084 )
5085
5086 nsr_deployed = db_nsr["_admin"].get("deployed")
5087 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5088 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5089 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5090 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5091 primitive = db_nslcmop["operationParams"]["primitive"]
5092 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5093 timeout_ns_action = db_nslcmop["operationParams"].get(
5094 "timeout_ns_action", self.timeout.primitive
5095 )
5096
5097 if vnf_index:
5098 step = "Getting vnfr from database"
5099 db_vnfr = self.db.get_one(
5100 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5101 )
5102 if db_vnfr.get("kdur"):
5103 kdur_list = []
5104 for kdur in db_vnfr["kdur"]:
5105 if kdur.get("additionalParams"):
5106 kdur["additionalParams"] = json.loads(
5107 kdur["additionalParams"]
5108 )
5109 kdur_list.append(kdur)
5110 db_vnfr["kdur"] = kdur_list
5111 step = "Getting vnfd from database"
5112 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5113
5114 # Sync filesystem before running a primitive
5115 self.fs.sync(db_vnfr["vnfd-id"])
5116 else:
5117 step = "Getting nsd from database"
5118 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5119
5120 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5121 # for backward compatibility
5122 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5123 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5124 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5125 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5126
5127 # look for primitive
5128 config_primitive_desc = descriptor_configuration = None
5129 if vdu_id:
5130 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5131 elif kdu_name:
5132 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5133 elif vnf_index:
5134 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5135 else:
5136 descriptor_configuration = db_nsd.get("ns-configuration")
5137
5138 if descriptor_configuration and descriptor_configuration.get(
5139 "config-primitive"
5140 ):
5141 for config_primitive in descriptor_configuration["config-primitive"]:
5142 if config_primitive["name"] == primitive:
5143 config_primitive_desc = config_primitive
5144 break
5145
5146 if not config_primitive_desc:
5147 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5148 raise LcmException(
5149 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5150 primitive
5151 )
5152 )
5153 primitive_name = primitive
5154 ee_descriptor_id = None
5155 else:
5156 primitive_name = config_primitive_desc.get(
5157 "execution-environment-primitive", primitive
5158 )
5159 ee_descriptor_id = config_primitive_desc.get(
5160 "execution-environment-ref"
5161 )
5162
5163 if vnf_index:
5164 if vdu_id:
5165 vdur = next(
5166 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5167 )
5168 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5169 elif kdu_name:
5170 kdur = next(
5171 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5172 )
5173 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5174 else:
5175 desc_params = parse_yaml_strings(
5176 db_vnfr.get("additionalParamsForVnf")
5177 )
5178 else:
5179 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5180 if kdu_name and get_configuration(db_vnfd, kdu_name):
5181 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5182 actions = set()
5183 for primitive in kdu_configuration.get("initial-config-primitive", []):
5184 actions.add(primitive["name"])
5185 for primitive in kdu_configuration.get("config-primitive", []):
5186 actions.add(primitive["name"])
5187 kdu = find_in_list(
5188 nsr_deployed["K8s"],
5189 lambda kdu: kdu_name == kdu["kdu-name"]
5190 and kdu["member-vnf-index"] == vnf_index,
5191 )
5192 kdu_action = (
5193 True
5194 if primitive_name in actions
5195 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5196 else False
5197 )
5198
5199 # TODO check if ns is in a proper status
5200 if kdu_name and (
5201 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5202 ):
5203 # kdur and desc_params already set from before
5204 if primitive_params:
5205 desc_params.update(primitive_params)
5206 # TODO Check if we will need something at vnf level
5207 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5208 if (
5209 kdu_name == kdu["kdu-name"]
5210 and kdu["member-vnf-index"] == vnf_index
5211 ):
5212 break
5213 else:
5214 raise LcmException(
5215 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5216 )
5217
5218 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5219 msg = "unknown k8scluster-type '{}'".format(
5220 kdu.get("k8scluster-type")
5221 )
5222 raise LcmException(msg)
5223
5224 db_dict = {
5225 "collection": "nsrs",
5226 "filter": {"_id": nsr_id},
5227 "path": "_admin.deployed.K8s.{}".format(index),
5228 }
5229 self.logger.debug(
5230 logging_text
5231 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5232 )
5233 step = "Executing kdu {}".format(primitive_name)
5234 if primitive_name == "upgrade":
5235 if desc_params.get("kdu_model"):
5236 kdu_model = desc_params.get("kdu_model")
5237 del desc_params["kdu_model"]
5238 else:
5239 kdu_model = kdu.get("kdu-model")
5240 if kdu_model.count("/") < 2: # helm chart is not embedded
5241 parts = kdu_model.split(sep=":")
5242 if len(parts) == 2:
5243 kdu_model = parts[0]
5244 if desc_params.get("kdu_atomic_upgrade"):
5245 atomic_upgrade = desc_params.get(
5246 "kdu_atomic_upgrade"
5247 ).lower() in ("yes", "true", "1")
5248 del desc_params["kdu_atomic_upgrade"]
5249 else:
5250 atomic_upgrade = True
5251
5252 detailed_status = await asyncio.wait_for(
5253 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5254 cluster_uuid=kdu.get("k8scluster-uuid"),
5255 kdu_instance=kdu.get("kdu-instance"),
5256 atomic=atomic_upgrade,
5257 kdu_model=kdu_model,
5258 params=desc_params,
5259 db_dict=db_dict,
5260 timeout=timeout_ns_action,
5261 ),
5262 timeout=timeout_ns_action + 10,
5263 )
5264 self.logger.debug(
5265 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5266 )
5267 elif primitive_name == "rollback":
5268 detailed_status = await asyncio.wait_for(
5269 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5270 cluster_uuid=kdu.get("k8scluster-uuid"),
5271 kdu_instance=kdu.get("kdu-instance"),
5272 db_dict=db_dict,
5273 ),
5274 timeout=timeout_ns_action,
5275 )
5276 elif primitive_name == "status":
5277 detailed_status = await asyncio.wait_for(
5278 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5279 cluster_uuid=kdu.get("k8scluster-uuid"),
5280 kdu_instance=kdu.get("kdu-instance"),
5281 vca_id=vca_id,
5282 ),
5283 timeout=timeout_ns_action,
5284 )
5285 else:
5286 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5287 kdu["kdu-name"], nsr_id
5288 )
5289 params = self._map_primitive_params(
5290 config_primitive_desc, primitive_params, desc_params
5291 )
5292
5293 detailed_status = await asyncio.wait_for(
5294 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5295 cluster_uuid=kdu.get("k8scluster-uuid"),
5296 kdu_instance=kdu_instance,
5297 primitive_name=primitive_name,
5298 params=params,
5299 db_dict=db_dict,
5300 timeout=timeout_ns_action,
5301 vca_id=vca_id,
5302 ),
5303 timeout=timeout_ns_action,
5304 )
5305
5306 if detailed_status:
5307 nslcmop_operation_state = "COMPLETED"
5308 else:
5309 detailed_status = ""
5310 nslcmop_operation_state = "FAILED"
5311 else:
5312 ee_id, vca_type = self._look_for_deployed_vca(
5313 nsr_deployed["VCA"],
5314 member_vnf_index=vnf_index,
5315 vdu_id=vdu_id,
5316 vdu_count_index=vdu_count_index,
5317 ee_descriptor_id=ee_descriptor_id,
5318 )
5319 for vca_index, vca_deployed in enumerate(
5320 db_nsr["_admin"]["deployed"]["VCA"]
5321 ):
5322 if vca_deployed.get("member-vnf-index") == vnf_index:
5323 db_dict = {
5324 "collection": "nsrs",
5325 "filter": {"_id": nsr_id},
5326 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5327 }
5328 break
5329 (
5330 nslcmop_operation_state,
5331 detailed_status,
5332 ) = await self._ns_execute_primitive(
5333 ee_id,
5334 primitive=primitive_name,
5335 primitive_params=self._map_primitive_params(
5336 config_primitive_desc, primitive_params, desc_params
5337 ),
5338 timeout=timeout_ns_action,
5339 vca_type=vca_type,
5340 db_dict=db_dict,
5341 vca_id=vca_id,
5342 )
5343
5344 db_nslcmop_update["detailed-status"] = detailed_status
5345 error_description_nslcmop = (
5346 detailed_status if nslcmop_operation_state == "FAILED" else ""
5347 )
5348 self.logger.debug(
5349 logging_text
5350 + "Done with result {} {}".format(
5351 nslcmop_operation_state, detailed_status
5352 )
5353 )
5354 return # database update is called inside finally
5355
5356 except (DbException, LcmException, N2VCException, K8sException) as e:
5357 self.logger.error(logging_text + "Exit Exception {}".format(e))
5358 exc = e
5359 except asyncio.CancelledError:
5360 self.logger.error(
5361 logging_text + "Cancelled Exception while '{}'".format(step)
5362 )
5363 exc = "Operation was cancelled"
5364 except asyncio.TimeoutError:
5365 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5366 exc = "Timeout"
5367 except Exception as e:
5368 exc = traceback.format_exc()
5369 self.logger.critical(
5370 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5371 exc_info=True,
5372 )
5373 finally:
5374 if exc:
5375 db_nslcmop_update[
5376 "detailed-status"
5377 ] = (
5378 detailed_status
5379 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5380 nslcmop_operation_state = "FAILED"
5381 if db_nsr:
5382 self._write_ns_status(
5383 nsr_id=nsr_id,
5384 ns_state=db_nsr[
5385 "nsState"
5386 ], # TODO check if degraded. For the moment use previous status
5387 current_operation="IDLE",
5388 current_operation_id=None,
5389 # error_description=error_description_nsr,
5390 # error_detail=error_detail,
5391 other_update=db_nsr_update,
5392 )
5393
5394 self._write_op_status(
5395 op_id=nslcmop_id,
5396 stage="",
5397 error_message=error_description_nslcmop,
5398 operation_state=nslcmop_operation_state,
5399 other_update=db_nslcmop_update,
5400 )
5401
5402 if nslcmop_operation_state:
5403 try:
5404 await self.msg.aiowrite(
5405 "ns",
5406 "actioned",
5407 {
5408 "nsr_id": nsr_id,
5409 "nslcmop_id": nslcmop_id,
5410 "operationState": nslcmop_operation_state,
5411 },
5412 loop=self.loop,
5413 )
5414 except Exception as e:
5415 self.logger.error(
5416 logging_text + "kafka_write notification Exception {}".format(e)
5417 )
5418 self.logger.debug(logging_text + "Exit")
5419 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5420 return nslcmop_operation_state, detailed_status
5421
5422 async def terminate_vdus(
5423 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5424 ):
5425 """This method terminates VDUs
5426
5427 Args:
5428 db_vnfr: VNF instance record
5429 member_vnf_index: VNF index to identify the VDUs to be removed
5430 db_nsr: NS instance record
5431 update_db_nslcmops: Nslcmop update record
5432 """
5433 vca_scaling_info = []
5434 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5435 scaling_info["scaling_direction"] = "IN"
5436 scaling_info["vdu-delete"] = {}
5437 scaling_info["kdu-delete"] = {}
5438 db_vdur = db_vnfr.get("vdur")
5439 vdur_list = copy(db_vdur)
5440 count_index = 0
5441 for index, vdu in enumerate(vdur_list):
5442 vca_scaling_info.append(
5443 {
5444 "osm_vdu_id": vdu["vdu-id-ref"],
5445 "member-vnf-index": member_vnf_index,
5446 "type": "delete",
5447 "vdu_index": count_index,
5448 }
5449 )
5450 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5451 scaling_info["vdu"].append(
5452 {
5453 "name": vdu.get("name") or vdu.get("vdu-name"),
5454 "vdu_id": vdu["vdu-id-ref"],
5455 "interface": [],
5456 }
5457 )
5458 for interface in vdu["interfaces"]:
5459 scaling_info["vdu"][index]["interface"].append(
5460 {
5461 "name": interface["name"],
5462 "ip_address": interface["ip-address"],
5463 "mac_address": interface.get("mac-address"),
5464 }
5465 )
5466 self.logger.info("NS update scaling info{}".format(scaling_info))
5467 stage[2] = "Terminating VDUs"
5468 if scaling_info.get("vdu-delete"):
5469 # scale_process = "RO"
5470 if self.ro_config.ng:
5471 await self._scale_ng_ro(
5472 logging_text,
5473 db_nsr,
5474 update_db_nslcmops,
5475 db_vnfr,
5476 scaling_info,
5477 stage,
5478 )
5479
5480 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5481 """This method is to Remove VNF instances from NS.
5482
5483 Args:
5484 nsr_id: NS instance id
5485 nslcmop_id: nslcmop id of update
5486 vnf_instance_id: id of the VNF instance to be removed
5487
5488 Returns:
5489 result: (str, str) COMPLETED/FAILED, details
5490 """
5491 try:
5492 db_nsr_update = {}
5493 logging_text = "Task ns={} update ".format(nsr_id)
5494 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5495 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5496 if check_vnfr_count > 1:
5497 stage = ["", "", ""]
5498 step = "Getting nslcmop from database"
5499 self.logger.debug(
5500 step + " after having waited for previous tasks to be completed"
5501 )
5502 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5503 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5504 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5505 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5506 """ db_vnfr = self.db.get_one(
5507 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5508
5509 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5510 await self.terminate_vdus(
5511 db_vnfr,
5512 member_vnf_index,
5513 db_nsr,
5514 update_db_nslcmops,
5515 stage,
5516 logging_text,
5517 )
5518
5519 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5520 constituent_vnfr.remove(db_vnfr.get("_id"))
5521 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5522 "constituent-vnfr-ref"
5523 )
5524 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5525 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5526 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5527 return "COMPLETED", "Done"
5528 else:
5529 step = "Terminate VNF Failed with"
5530 raise LcmException(
5531 "{} Cannot terminate the last VNF in this NS.".format(
5532 vnf_instance_id
5533 )
5534 )
5535 except (LcmException, asyncio.CancelledError):
5536 raise
5537 except Exception as e:
5538 self.logger.debug("Error removing VNF {}".format(e))
5539 return "FAILED", "Error removing VNF {}".format(e)
5540
5541 async def _ns_redeploy_vnf(
5542 self,
5543 nsr_id,
5544 nslcmop_id,
5545 db_vnfd,
5546 db_vnfr,
5547 db_nsr,
5548 ):
5549 """This method updates and redeploys VNF instances
5550
5551 Args:
5552 nsr_id: NS instance id
5553 nslcmop_id: nslcmop id
5554 db_vnfd: VNF descriptor
5555 db_vnfr: VNF instance record
5556 db_nsr: NS instance record
5557
5558 Returns:
5559 result: (str, str) COMPLETED/FAILED, details
5560 """
5561 try:
5562 count_index = 0
5563 stage = ["", "", ""]
5564 logging_text = "Task ns={} update ".format(nsr_id)
5565 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5566 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5567
5568 # Terminate old VNF resources
5569 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5570 await self.terminate_vdus(
5571 db_vnfr,
5572 member_vnf_index,
5573 db_nsr,
5574 update_db_nslcmops,
5575 stage,
5576 logging_text,
5577 )
5578
5579 # old_vnfd_id = db_vnfr["vnfd-id"]
5580 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5581 new_db_vnfd = db_vnfd
5582 # new_vnfd_ref = new_db_vnfd["id"]
5583 # new_vnfd_id = vnfd_id
5584
5585 # Create VDUR
5586 new_vnfr_cp = []
5587 for cp in new_db_vnfd.get("ext-cpd", ()):
5588 vnf_cp = {
5589 "name": cp.get("id"),
5590 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5591 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5592 "id": cp.get("id"),
5593 }
5594 new_vnfr_cp.append(vnf_cp)
5595 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5596 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5597 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5598 new_vnfr_update = {
5599 "revision": latest_vnfd_revision,
5600 "connection-point": new_vnfr_cp,
5601 "vdur": new_vdur,
5602 "ip-address": "",
5603 }
5604 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5605 updated_db_vnfr = self.db.get_one(
5606 "vnfrs",
5607 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5608 )
5609
5610 # Instantiate new VNF resources
5611 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5612 vca_scaling_info = []
5613 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5614 scaling_info["scaling_direction"] = "OUT"
5615 scaling_info["vdu-create"] = {}
5616 scaling_info["kdu-create"] = {}
5617 vdud_instantiate_list = db_vnfd["vdu"]
5618 for index, vdud in enumerate(vdud_instantiate_list):
5619 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5620 if cloud_init_text:
5621 additional_params = (
5622 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5623 or {}
5624 )
5625 cloud_init_list = []
5626 if cloud_init_text:
5627 # TODO Information of its own ip is not available because db_vnfr is not updated.
5628 additional_params["OSM"] = get_osm_params(
5629 updated_db_vnfr, vdud["id"], 1
5630 )
5631 cloud_init_list.append(
5632 self._parse_cloud_init(
5633 cloud_init_text,
5634 additional_params,
5635 db_vnfd["id"],
5636 vdud["id"],
5637 )
5638 )
5639 vca_scaling_info.append(
5640 {
5641 "osm_vdu_id": vdud["id"],
5642 "member-vnf-index": member_vnf_index,
5643 "type": "create",
5644 "vdu_index": count_index,
5645 }
5646 )
5647 scaling_info["vdu-create"][vdud["id"]] = count_index
5648 if self.ro_config.ng:
5649 self.logger.debug(
5650 "New Resources to be deployed: {}".format(scaling_info)
5651 )
5652 await self._scale_ng_ro(
5653 logging_text,
5654 db_nsr,
5655 update_db_nslcmops,
5656 updated_db_vnfr,
5657 scaling_info,
5658 stage,
5659 )
5660 return "COMPLETED", "Done"
5661 except (LcmException, asyncio.CancelledError):
5662 raise
5663 except Exception as e:
5664 self.logger.debug("Error updating VNF {}".format(e))
5665 return "FAILED", "Error updating VNF {}".format(e)
5666
5667 async def _ns_charm_upgrade(
5668 self,
5669 ee_id,
5670 charm_id,
5671 charm_type,
5672 path,
5673 timeout: float = None,
5674 ) -> (str, str):
5675 """This method upgrade charms in VNF instances
5676
5677 Args:
5678 ee_id: Execution environment id
5679 path: Local path to the charm
5680 charm_id: charm-id
5681 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5682 timeout: (Float) Timeout for the ns update operation
5683
5684 Returns:
5685 result: (str, str) COMPLETED/FAILED, details
5686 """
5687 try:
5688 charm_type = charm_type or "lxc_proxy_charm"
5689 output = await self.vca_map[charm_type].upgrade_charm(
5690 ee_id=ee_id,
5691 path=path,
5692 charm_id=charm_id,
5693 charm_type=charm_type,
5694 timeout=timeout or self.timeout.ns_update,
5695 )
5696
5697 if output:
5698 return "COMPLETED", output
5699
5700 except (LcmException, asyncio.CancelledError):
5701 raise
5702
5703 except Exception as e:
5704 self.logger.debug("Error upgrading charm {}".format(path))
5705
5706 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5707
5708 async def update(self, nsr_id, nslcmop_id):
5709 """Update NS according to different update types
5710
5711 This method performs upgrade of VNF instances then updates the revision
5712 number in VNF record
5713
5714 Args:
5715 nsr_id: Network service will be updated
5716 nslcmop_id: ns lcm operation id
5717
5718 Returns:
5719 It may raise DbException, LcmException, N2VCException, K8sException
5720
5721 """
5722 # Try to lock HA task here
5723 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5724 if not task_is_locked_by_me:
5725 return
5726
5727 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5728 self.logger.debug(logging_text + "Enter")
5729
5730 # Set the required variables to be filled up later
5731 db_nsr = None
5732 db_nslcmop_update = {}
5733 vnfr_update = {}
5734 nslcmop_operation_state = None
5735 db_nsr_update = {}
5736 error_description_nslcmop = ""
5737 exc = None
5738 change_type = "updated"
5739 detailed_status = ""
5740 member_vnf_index = None
5741
5742 try:
5743 # wait for any previous tasks in process
5744 step = "Waiting for previous operations to terminate"
5745 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5746 self._write_ns_status(
5747 nsr_id=nsr_id,
5748 ns_state=None,
5749 current_operation="UPDATING",
5750 current_operation_id=nslcmop_id,
5751 )
5752
5753 step = "Getting nslcmop from database"
5754 db_nslcmop = self.db.get_one(
5755 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5756 )
5757 update_type = db_nslcmop["operationParams"]["updateType"]
5758
5759 step = "Getting nsr from database"
5760 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5761 old_operational_status = db_nsr["operational-status"]
5762 db_nsr_update["operational-status"] = "updating"
5763 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5764 nsr_deployed = db_nsr["_admin"].get("deployed")
5765
5766 if update_type == "CHANGE_VNFPKG":
5767 # Get the input parameters given through update request
5768 vnf_instance_id = db_nslcmop["operationParams"][
5769 "changeVnfPackageData"
5770 ].get("vnfInstanceId")
5771
5772 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5773 "vnfdId"
5774 )
5775 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5776
5777 step = "Getting vnfr from database"
5778 db_vnfr = self.db.get_one(
5779 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5780 )
5781
5782 step = "Getting vnfds from database"
5783 # Latest VNFD
5784 latest_vnfd = self.db.get_one(
5785 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5786 )
5787 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5788
5789 # Current VNFD
5790 current_vnf_revision = db_vnfr.get("revision", 1)
5791 current_vnfd = self.db.get_one(
5792 "vnfds_revisions",
5793 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5794 fail_on_empty=False,
5795 )
5796 # Charm artifact paths will be filled up later
5797 (
5798 current_charm_artifact_path,
5799 target_charm_artifact_path,
5800 charm_artifact_paths,
5801 helm_artifacts,
5802 ) = ([], [], [], [])
5803
5804 step = "Checking if revision has changed in VNFD"
5805 if current_vnf_revision != latest_vnfd_revision:
5806 change_type = "policy_updated"
5807
5808 # There is new revision of VNFD, update operation is required
5809 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5810 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5811
5812 step = "Removing the VNFD packages if they exist in the local path"
5813 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5814 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5815
5816 step = "Get the VNFD packages from FSMongo"
5817 self.fs.sync(from_path=latest_vnfd_path)
5818 self.fs.sync(from_path=current_vnfd_path)
5819
5820 step = (
5821 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5822 )
5823 current_base_folder = current_vnfd["_admin"]["storage"]
5824 latest_base_folder = latest_vnfd["_admin"]["storage"]
5825
5826 for vca_index, vca_deployed in enumerate(
5827 get_iterable(nsr_deployed, "VCA")
5828 ):
5829 vnf_index = db_vnfr.get("member-vnf-index-ref")
5830
5831 # Getting charm-id and charm-type
5832 if vca_deployed.get("member-vnf-index") == vnf_index:
5833 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5834 vca_type = vca_deployed.get("type")
5835 vdu_count_index = vca_deployed.get("vdu_count_index")
5836
5837 # Getting ee-id
5838 ee_id = vca_deployed.get("ee_id")
5839
5840 step = "Getting descriptor config"
5841 if current_vnfd.get("kdu"):
5842 search_key = "kdu_name"
5843 else:
5844 search_key = "vnfd_id"
5845
5846 entity_id = vca_deployed.get(search_key)
5847
5848 descriptor_config = get_configuration(
5849 current_vnfd, entity_id
5850 )
5851
5852 if "execution-environment-list" in descriptor_config:
5853 ee_list = descriptor_config.get(
5854 "execution-environment-list", []
5855 )
5856 else:
5857 ee_list = []
5858
5859 # There could be several charm used in the same VNF
5860 for ee_item in ee_list:
5861 if ee_item.get("juju"):
5862 step = "Getting charm name"
5863 charm_name = ee_item["juju"].get("charm")
5864
5865 step = "Setting Charm artifact paths"
5866 current_charm_artifact_path.append(
5867 get_charm_artifact_path(
5868 current_base_folder,
5869 charm_name,
5870 vca_type,
5871 current_vnf_revision,
5872 )
5873 )
5874 target_charm_artifact_path.append(
5875 get_charm_artifact_path(
5876 latest_base_folder,
5877 charm_name,
5878 vca_type,
5879 latest_vnfd_revision,
5880 )
5881 )
5882 elif ee_item.get("helm-chart"):
5883 # add chart to list and all parameters
5884 step = "Getting helm chart name"
5885 chart_name = ee_item.get("helm-chart")
5886 if (
5887 ee_item.get("helm-version")
5888 and ee_item.get("helm-version") == "v2"
5889 ):
5890 vca_type = "helm"
5891 else:
5892 vca_type = "helm-v3"
5893 step = "Setting Helm chart artifact paths"
5894
5895 helm_artifacts.append(
5896 {
5897 "current_artifact_path": get_charm_artifact_path(
5898 current_base_folder,
5899 chart_name,
5900 vca_type,
5901 current_vnf_revision,
5902 ),
5903 "target_artifact_path": get_charm_artifact_path(
5904 latest_base_folder,
5905 chart_name,
5906 vca_type,
5907 latest_vnfd_revision,
5908 ),
5909 "ee_id": ee_id,
5910 "vca_index": vca_index,
5911 "vdu_index": vdu_count_index,
5912 }
5913 )
5914
5915 charm_artifact_paths = zip(
5916 current_charm_artifact_path, target_charm_artifact_path
5917 )
5918
5919 step = "Checking if software version has changed in VNFD"
5920 if find_software_version(current_vnfd) != find_software_version(
5921 latest_vnfd
5922 ):
5923 step = "Checking if existing VNF has charm"
5924 for current_charm_path, target_charm_path in list(
5925 charm_artifact_paths
5926 ):
5927 if current_charm_path:
5928 raise LcmException(
5929 "Software version change is not supported as VNF instance {} has charm.".format(
5930 vnf_instance_id
5931 )
5932 )
5933
5934 # There is no change in the charm package, then redeploy the VNF
5935 # based on new descriptor
5936 step = "Redeploying VNF"
5937 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5938 (result, detailed_status) = await self._ns_redeploy_vnf(
5939 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5940 )
5941 if result == "FAILED":
5942 nslcmop_operation_state = result
5943 error_description_nslcmop = detailed_status
5944 db_nslcmop_update["detailed-status"] = detailed_status
5945 self.logger.debug(
5946 logging_text
5947 + " step {} Done with result {} {}".format(
5948 step, nslcmop_operation_state, detailed_status
5949 )
5950 )
5951
5952 else:
5953 step = "Checking if any charm package has changed or not"
5954 for current_charm_path, target_charm_path in list(
5955 charm_artifact_paths
5956 ):
5957 if (
5958 current_charm_path
5959 and target_charm_path
5960 and self.check_charm_hash_changed(
5961 current_charm_path, target_charm_path
5962 )
5963 ):
5964 step = "Checking whether VNF uses juju bundle"
5965 if check_juju_bundle_existence(current_vnfd):
5966 raise LcmException(
5967 "Charm upgrade is not supported for the instance which"
5968 " uses juju-bundle: {}".format(
5969 check_juju_bundle_existence(current_vnfd)
5970 )
5971 )
5972
5973 step = "Upgrading Charm"
5974 (
5975 result,
5976 detailed_status,
5977 ) = await self._ns_charm_upgrade(
5978 ee_id=ee_id,
5979 charm_id=vca_id,
5980 charm_type=vca_type,
5981 path=self.fs.path + target_charm_path,
5982 timeout=timeout_seconds,
5983 )
5984
5985 if result == "FAILED":
5986 nslcmop_operation_state = result
5987 error_description_nslcmop = detailed_status
5988
5989 db_nslcmop_update["detailed-status"] = detailed_status
5990 self.logger.debug(
5991 logging_text
5992 + " step {} Done with result {} {}".format(
5993 step, nslcmop_operation_state, detailed_status
5994 )
5995 )
5996
5997 step = "Updating policies"
5998 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5999 result = "COMPLETED"
6000 detailed_status = "Done"
6001 db_nslcmop_update["detailed-status"] = "Done"
6002
6003 # helm base EE
6004 for item in helm_artifacts:
6005 if not (
6006 item["current_artifact_path"]
6007 and item["target_artifact_path"]
6008 and self.check_charm_hash_changed(
6009 item["current_artifact_path"],
6010 item["target_artifact_path"],
6011 )
6012 ):
6013 continue
6014 db_update_entry = "_admin.deployed.VCA.{}.".format(
6015 item["vca_index"]
6016 )
6017 vnfr_id = db_vnfr["_id"]
6018 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6019 db_dict = {
6020 "collection": "nsrs",
6021 "filter": {"_id": nsr_id},
6022 "path": db_update_entry,
6023 }
6024 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6025 await self.vca_map[vca_type].upgrade_execution_environment(
6026 namespace=namespace,
6027 helm_id=helm_id,
6028 db_dict=db_dict,
6029 config=osm_config,
6030 artifact_path=item["target_artifact_path"],
6031 vca_type=vca_type,
6032 )
6033 vnf_id = db_vnfr.get("vnfd-ref")
6034 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6035 self.logger.debug("get ssh key block")
6036 rw_mgmt_ip = None
6037 if deep_get(
6038 config_descriptor,
6039 ("config-access", "ssh-access", "required"),
6040 ):
6041 # Needed to inject a ssh key
6042 user = deep_get(
6043 config_descriptor,
6044 ("config-access", "ssh-access", "default-user"),
6045 )
6046 step = (
6047 "Install configuration Software, getting public ssh key"
6048 )
6049 pub_key = await self.vca_map[
6050 vca_type
6051 ].get_ee_ssh_public__key(
6052 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6053 )
6054
6055 step = (
6056 "Insert public key into VM user={} ssh_key={}".format(
6057 user, pub_key
6058 )
6059 )
6060 self.logger.debug(logging_text + step)
6061
6062 # wait for RO (ip-address) Insert pub_key into VM
6063 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6064 logging_text,
6065 nsr_id,
6066 vnfr_id,
6067 None,
6068 item["vdu_index"],
6069 user=user,
6070 pub_key=pub_key,
6071 )
6072
6073 initial_config_primitive_list = config_descriptor.get(
6074 "initial-config-primitive"
6075 )
6076 config_primitive = next(
6077 (
6078 p
6079 for p in initial_config_primitive_list
6080 if p["name"] == "config"
6081 ),
6082 None,
6083 )
6084 if not config_primitive:
6085 continue
6086
6087 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6088 if rw_mgmt_ip:
6089 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6090 if db_vnfr.get("additionalParamsForVnf"):
6091 deploy_params.update(
6092 parse_yaml_strings(
6093 db_vnfr["additionalParamsForVnf"].copy()
6094 )
6095 )
6096 primitive_params_ = self._map_primitive_params(
6097 config_primitive, {}, deploy_params
6098 )
6099
6100 step = "execute primitive '{}' params '{}'".format(
6101 config_primitive["name"], primitive_params_
6102 )
6103 self.logger.debug(logging_text + step)
6104 await self.vca_map[vca_type].exec_primitive(
6105 ee_id=ee_id,
6106 primitive_name=config_primitive["name"],
6107 params_dict=primitive_params_,
6108 db_dict=db_dict,
6109 vca_id=vca_id,
6110 vca_type=vca_type,
6111 )
6112
6113 step = "Updating policies"
6114 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6115 detailed_status = "Done"
6116 db_nslcmop_update["detailed-status"] = "Done"
6117
6118 # If nslcmop_operation_state is None, so any operation is not failed.
6119 if not nslcmop_operation_state:
6120 nslcmop_operation_state = "COMPLETED"
6121
6122 # If update CHANGE_VNFPKG nslcmop_operation is successful
6123 # vnf revision need to be updated
6124 vnfr_update["revision"] = latest_vnfd_revision
6125 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6126
6127 self.logger.debug(
6128 logging_text
6129 + " task Done with result {} {}".format(
6130 nslcmop_operation_state, detailed_status
6131 )
6132 )
6133 elif update_type == "REMOVE_VNF":
6134 # This part is included in https://osm.etsi.org/gerrit/11876
6135 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6136 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6137 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6138 step = "Removing VNF"
6139 (result, detailed_status) = await self.remove_vnf(
6140 nsr_id, nslcmop_id, vnf_instance_id
6141 )
6142 if result == "FAILED":
6143 nslcmop_operation_state = result
6144 error_description_nslcmop = detailed_status
6145 db_nslcmop_update["detailed-status"] = detailed_status
6146 change_type = "vnf_terminated"
6147 if not nslcmop_operation_state:
6148 nslcmop_operation_state = "COMPLETED"
6149 self.logger.debug(
6150 logging_text
6151 + " task Done with result {} {}".format(
6152 nslcmop_operation_state, detailed_status
6153 )
6154 )
6155
6156 elif update_type == "OPERATE_VNF":
6157 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6158 "vnfInstanceId"
6159 ]
6160 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6161 "changeStateTo"
6162 ]
6163 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6164 "additionalParam"
6165 ]
6166 (result, detailed_status) = await self.rebuild_start_stop(
6167 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6168 )
6169 if result == "FAILED":
6170 nslcmop_operation_state = result
6171 error_description_nslcmop = detailed_status
6172 db_nslcmop_update["detailed-status"] = detailed_status
6173 if not nslcmop_operation_state:
6174 nslcmop_operation_state = "COMPLETED"
6175 self.logger.debug(
6176 logging_text
6177 + " task Done with result {} {}".format(
6178 nslcmop_operation_state, detailed_status
6179 )
6180 )
6181
6182 # If nslcmop_operation_state is None, so any operation is not failed.
6183 # All operations are executed in overall.
6184 if not nslcmop_operation_state:
6185 nslcmop_operation_state = "COMPLETED"
6186 db_nsr_update["operational-status"] = old_operational_status
6187
6188 except (DbException, LcmException, N2VCException, K8sException) as e:
6189 self.logger.error(logging_text + "Exit Exception {}".format(e))
6190 exc = e
6191 except asyncio.CancelledError:
6192 self.logger.error(
6193 logging_text + "Cancelled Exception while '{}'".format(step)
6194 )
6195 exc = "Operation was cancelled"
6196 except asyncio.TimeoutError:
6197 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6198 exc = "Timeout"
6199 except Exception as e:
6200 exc = traceback.format_exc()
6201 self.logger.critical(
6202 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6203 exc_info=True,
6204 )
6205 finally:
6206 if exc:
6207 db_nslcmop_update[
6208 "detailed-status"
6209 ] = (
6210 detailed_status
6211 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6212 nslcmop_operation_state = "FAILED"
6213 db_nsr_update["operational-status"] = old_operational_status
6214 if db_nsr:
6215 self._write_ns_status(
6216 nsr_id=nsr_id,
6217 ns_state=db_nsr["nsState"],
6218 current_operation="IDLE",
6219 current_operation_id=None,
6220 other_update=db_nsr_update,
6221 )
6222
6223 self._write_op_status(
6224 op_id=nslcmop_id,
6225 stage="",
6226 error_message=error_description_nslcmop,
6227 operation_state=nslcmop_operation_state,
6228 other_update=db_nslcmop_update,
6229 )
6230
6231 if nslcmop_operation_state:
6232 try:
6233 msg = {
6234 "nsr_id": nsr_id,
6235 "nslcmop_id": nslcmop_id,
6236 "operationState": nslcmop_operation_state,
6237 }
6238 if (
6239 change_type in ("vnf_terminated", "policy_updated")
6240 and member_vnf_index
6241 ):
6242 msg.update({"vnf_member_index": member_vnf_index})
6243 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6244 except Exception as e:
6245 self.logger.error(
6246 logging_text + "kafka_write notification Exception {}".format(e)
6247 )
6248 self.logger.debug(logging_text + "Exit")
6249 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6250 return nslcmop_operation_state, detailed_status
6251
6252 async def scale(self, nsr_id, nslcmop_id):
6253 # Try to lock HA task here
6254 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6255 if not task_is_locked_by_me:
6256 return
6257
6258 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6259 stage = ["", "", ""]
6260 tasks_dict_info = {}
6261 # ^ stage, step, VIM progress
6262 self.logger.debug(logging_text + "Enter")
6263 # get all needed from database
6264 db_nsr = None
6265 db_nslcmop_update = {}
6266 db_nsr_update = {}
6267 exc = None
6268 # in case of error, indicates what part of scale was failed to put nsr at error status
6269 scale_process = None
6270 old_operational_status = ""
6271 old_config_status = ""
6272 nsi_id = None
6273 try:
6274 # wait for any previous tasks in process
6275 step = "Waiting for previous operations to terminate"
6276 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6277 self._write_ns_status(
6278 nsr_id=nsr_id,
6279 ns_state=None,
6280 current_operation="SCALING",
6281 current_operation_id=nslcmop_id,
6282 )
6283
6284 step = "Getting nslcmop from database"
6285 self.logger.debug(
6286 step + " after having waited for previous tasks to be completed"
6287 )
6288 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6289
6290 step = "Getting nsr from database"
6291 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6292 old_operational_status = db_nsr["operational-status"]
6293 old_config_status = db_nsr["config-status"]
6294
6295 step = "Parsing scaling parameters"
6296 db_nsr_update["operational-status"] = "scaling"
6297 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6298 nsr_deployed = db_nsr["_admin"].get("deployed")
6299
6300 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6301 "scaleByStepData"
6302 ]["member-vnf-index"]
6303 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6304 "scaleByStepData"
6305 ]["scaling-group-descriptor"]
6306 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6307 # for backward compatibility
6308 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6309 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6310 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6311 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6312
6313 step = "Getting vnfr from database"
6314 db_vnfr = self.db.get_one(
6315 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6316 )
6317
6318 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6319
6320 step = "Getting vnfd from database"
6321 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6322
6323 base_folder = db_vnfd["_admin"]["storage"]
6324
6325 step = "Getting scaling-group-descriptor"
6326 scaling_descriptor = find_in_list(
6327 get_scaling_aspect(db_vnfd),
6328 lambda scale_desc: scale_desc["name"] == scaling_group,
6329 )
6330 if not scaling_descriptor:
6331 raise LcmException(
6332 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6333 "at vnfd:scaling-group-descriptor".format(scaling_group)
6334 )
6335
6336 step = "Sending scale order to VIM"
6337 # TODO check if ns is in a proper status
6338 nb_scale_op = 0
6339 if not db_nsr["_admin"].get("scaling-group"):
6340 self.update_db_2(
6341 "nsrs",
6342 nsr_id,
6343 {
6344 "_admin.scaling-group": [
6345 {"name": scaling_group, "nb-scale-op": 0}
6346 ]
6347 },
6348 )
6349 admin_scale_index = 0
6350 else:
6351 for admin_scale_index, admin_scale_info in enumerate(
6352 db_nsr["_admin"]["scaling-group"]
6353 ):
6354 if admin_scale_info["name"] == scaling_group:
6355 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6356 break
6357 else: # not found, set index one plus last element and add new entry with the name
6358 admin_scale_index += 1
6359 db_nsr_update[
6360 "_admin.scaling-group.{}.name".format(admin_scale_index)
6361 ] = scaling_group
6362
6363 vca_scaling_info = []
6364 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6365 if scaling_type == "SCALE_OUT":
6366 if "aspect-delta-details" not in scaling_descriptor:
6367 raise LcmException(
6368 "Aspect delta details not fount in scaling descriptor {}".format(
6369 scaling_descriptor["name"]
6370 )
6371 )
6372 # count if max-instance-count is reached
6373 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6374
6375 scaling_info["scaling_direction"] = "OUT"
6376 scaling_info["vdu-create"] = {}
6377 scaling_info["kdu-create"] = {}
6378 for delta in deltas:
6379 for vdu_delta in delta.get("vdu-delta", {}):
6380 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6381 # vdu_index also provides the number of instance of the targeted vdu
6382 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6383 cloud_init_text = self._get_vdu_cloud_init_content(
6384 vdud, db_vnfd
6385 )
6386 if cloud_init_text:
6387 additional_params = (
6388 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6389 or {}
6390 )
6391 cloud_init_list = []
6392
6393 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6394 max_instance_count = 10
6395 if vdu_profile and "max-number-of-instances" in vdu_profile:
6396 max_instance_count = vdu_profile.get(
6397 "max-number-of-instances", 10
6398 )
6399
6400 default_instance_num = get_number_of_instances(
6401 db_vnfd, vdud["id"]
6402 )
6403 instances_number = vdu_delta.get("number-of-instances", 1)
6404 nb_scale_op += instances_number
6405
6406 new_instance_count = nb_scale_op + default_instance_num
6407 # Control if new count is over max and vdu count is less than max.
6408 # Then assign new instance count
6409 if new_instance_count > max_instance_count > vdu_count:
6410 instances_number = new_instance_count - max_instance_count
6411 else:
6412 instances_number = instances_number
6413
6414 if new_instance_count > max_instance_count:
6415 raise LcmException(
6416 "reached the limit of {} (max-instance-count) "
6417 "scaling-out operations for the "
6418 "scaling-group-descriptor '{}'".format(
6419 nb_scale_op, scaling_group
6420 )
6421 )
6422 for x in range(vdu_delta.get("number-of-instances", 1)):
6423 if cloud_init_text:
6424 # TODO Information of its own ip is not available because db_vnfr is not updated.
6425 additional_params["OSM"] = get_osm_params(
6426 db_vnfr, vdu_delta["id"], vdu_index + x
6427 )
6428 cloud_init_list.append(
6429 self._parse_cloud_init(
6430 cloud_init_text,
6431 additional_params,
6432 db_vnfd["id"],
6433 vdud["id"],
6434 )
6435 )
6436 vca_scaling_info.append(
6437 {
6438 "osm_vdu_id": vdu_delta["id"],
6439 "member-vnf-index": vnf_index,
6440 "type": "create",
6441 "vdu_index": vdu_index + x,
6442 }
6443 )
6444 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6445 for kdu_delta in delta.get("kdu-resource-delta", {}):
6446 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6447 kdu_name = kdu_profile["kdu-name"]
6448 resource_name = kdu_profile.get("resource-name", "")
6449
6450 # Might have different kdus in the same delta
6451 # Should have list for each kdu
6452 if not scaling_info["kdu-create"].get(kdu_name, None):
6453 scaling_info["kdu-create"][kdu_name] = []
6454
6455 kdur = get_kdur(db_vnfr, kdu_name)
6456 if kdur.get("helm-chart"):
6457 k8s_cluster_type = "helm-chart-v3"
6458 self.logger.debug("kdur: {}".format(kdur))
6459 if (
6460 kdur.get("helm-version")
6461 and kdur.get("helm-version") == "v2"
6462 ):
6463 k8s_cluster_type = "helm-chart"
6464 elif kdur.get("juju-bundle"):
6465 k8s_cluster_type = "juju-bundle"
6466 else:
6467 raise LcmException(
6468 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6469 "juju-bundle. Maybe an old NBI version is running".format(
6470 db_vnfr["member-vnf-index-ref"], kdu_name
6471 )
6472 )
6473
6474 max_instance_count = 10
6475 if kdu_profile and "max-number-of-instances" in kdu_profile:
6476 max_instance_count = kdu_profile.get(
6477 "max-number-of-instances", 10
6478 )
6479
6480 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6481 deployed_kdu, _ = get_deployed_kdu(
6482 nsr_deployed, kdu_name, vnf_index
6483 )
6484 if deployed_kdu is None:
6485 raise LcmException(
6486 "KDU '{}' for vnf '{}' not deployed".format(
6487 kdu_name, vnf_index
6488 )
6489 )
6490 kdu_instance = deployed_kdu.get("kdu-instance")
6491 instance_num = await self.k8scluster_map[
6492 k8s_cluster_type
6493 ].get_scale_count(
6494 resource_name,
6495 kdu_instance,
6496 vca_id=vca_id,
6497 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6498 kdu_model=deployed_kdu.get("kdu-model"),
6499 )
6500 kdu_replica_count = instance_num + kdu_delta.get(
6501 "number-of-instances", 1
6502 )
6503
6504 # Control if new count is over max and instance_num is less than max.
6505 # Then assign max instance number to kdu replica count
6506 if kdu_replica_count > max_instance_count > instance_num:
6507 kdu_replica_count = max_instance_count
6508 if kdu_replica_count > max_instance_count:
6509 raise LcmException(
6510 "reached the limit of {} (max-instance-count) "
6511 "scaling-out operations for the "
6512 "scaling-group-descriptor '{}'".format(
6513 instance_num, scaling_group
6514 )
6515 )
6516
6517 for x in range(kdu_delta.get("number-of-instances", 1)):
6518 vca_scaling_info.append(
6519 {
6520 "osm_kdu_id": kdu_name,
6521 "member-vnf-index": vnf_index,
6522 "type": "create",
6523 "kdu_index": instance_num + x - 1,
6524 }
6525 )
6526 scaling_info["kdu-create"][kdu_name].append(
6527 {
6528 "member-vnf-index": vnf_index,
6529 "type": "create",
6530 "k8s-cluster-type": k8s_cluster_type,
6531 "resource-name": resource_name,
6532 "scale": kdu_replica_count,
6533 }
6534 )
6535 elif scaling_type == "SCALE_IN":
6536 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6537
6538 scaling_info["scaling_direction"] = "IN"
6539 scaling_info["vdu-delete"] = {}
6540 scaling_info["kdu-delete"] = {}
6541
6542 for delta in deltas:
6543 for vdu_delta in delta.get("vdu-delta", {}):
6544 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6545 min_instance_count = 0
6546 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6547 if vdu_profile and "min-number-of-instances" in vdu_profile:
6548 min_instance_count = vdu_profile["min-number-of-instances"]
6549
6550 default_instance_num = get_number_of_instances(
6551 db_vnfd, vdu_delta["id"]
6552 )
6553 instance_num = vdu_delta.get("number-of-instances", 1)
6554 nb_scale_op -= instance_num
6555
6556 new_instance_count = nb_scale_op + default_instance_num
6557
6558 if new_instance_count < min_instance_count < vdu_count:
6559 instances_number = min_instance_count - new_instance_count
6560 else:
6561 instances_number = instance_num
6562
6563 if new_instance_count < min_instance_count:
6564 raise LcmException(
6565 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6566 "scaling-group-descriptor '{}'".format(
6567 nb_scale_op, scaling_group
6568 )
6569 )
6570 for x in range(vdu_delta.get("number-of-instances", 1)):
6571 vca_scaling_info.append(
6572 {
6573 "osm_vdu_id": vdu_delta["id"],
6574 "member-vnf-index": vnf_index,
6575 "type": "delete",
6576 "vdu_index": vdu_index - 1 - x,
6577 }
6578 )
6579 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6580 for kdu_delta in delta.get("kdu-resource-delta", {}):
6581 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6582 kdu_name = kdu_profile["kdu-name"]
6583 resource_name = kdu_profile.get("resource-name", "")
6584
6585 if not scaling_info["kdu-delete"].get(kdu_name, None):
6586 scaling_info["kdu-delete"][kdu_name] = []
6587
6588 kdur = get_kdur(db_vnfr, kdu_name)
6589 if kdur.get("helm-chart"):
6590 k8s_cluster_type = "helm-chart-v3"
6591 self.logger.debug("kdur: {}".format(kdur))
6592 if (
6593 kdur.get("helm-version")
6594 and kdur.get("helm-version") == "v2"
6595 ):
6596 k8s_cluster_type = "helm-chart"
6597 elif kdur.get("juju-bundle"):
6598 k8s_cluster_type = "juju-bundle"
6599 else:
6600 raise LcmException(
6601 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6602 "juju-bundle. Maybe an old NBI version is running".format(
6603 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6604 )
6605 )
6606
6607 min_instance_count = 0
6608 if kdu_profile and "min-number-of-instances" in kdu_profile:
6609 min_instance_count = kdu_profile["min-number-of-instances"]
6610
6611 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6612 deployed_kdu, _ = get_deployed_kdu(
6613 nsr_deployed, kdu_name, vnf_index
6614 )
6615 if deployed_kdu is None:
6616 raise LcmException(
6617 "KDU '{}' for vnf '{}' not deployed".format(
6618 kdu_name, vnf_index
6619 )
6620 )
6621 kdu_instance = deployed_kdu.get("kdu-instance")
6622 instance_num = await self.k8scluster_map[
6623 k8s_cluster_type
6624 ].get_scale_count(
6625 resource_name,
6626 kdu_instance,
6627 vca_id=vca_id,
6628 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6629 kdu_model=deployed_kdu.get("kdu-model"),
6630 )
6631 kdu_replica_count = instance_num - kdu_delta.get(
6632 "number-of-instances", 1
6633 )
6634
6635 if kdu_replica_count < min_instance_count < instance_num:
6636 kdu_replica_count = min_instance_count
6637 if kdu_replica_count < min_instance_count:
6638 raise LcmException(
6639 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6640 "scaling-group-descriptor '{}'".format(
6641 instance_num, scaling_group
6642 )
6643 )
6644
6645 for x in range(kdu_delta.get("number-of-instances", 1)):
6646 vca_scaling_info.append(
6647 {
6648 "osm_kdu_id": kdu_name,
6649 "member-vnf-index": vnf_index,
6650 "type": "delete",
6651 "kdu_index": instance_num - x - 1,
6652 }
6653 )
6654 scaling_info["kdu-delete"][kdu_name].append(
6655 {
6656 "member-vnf-index": vnf_index,
6657 "type": "delete",
6658 "k8s-cluster-type": k8s_cluster_type,
6659 "resource-name": resource_name,
6660 "scale": kdu_replica_count,
6661 }
6662 )
6663
6664 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6665 vdu_delete = copy(scaling_info.get("vdu-delete"))
6666 if scaling_info["scaling_direction"] == "IN":
6667 for vdur in reversed(db_vnfr["vdur"]):
6668 if vdu_delete.get(vdur["vdu-id-ref"]):
6669 vdu_delete[vdur["vdu-id-ref"]] -= 1
6670 scaling_info["vdu"].append(
6671 {
6672 "name": vdur.get("name") or vdur.get("vdu-name"),
6673 "vdu_id": vdur["vdu-id-ref"],
6674 "interface": [],
6675 }
6676 )
6677 for interface in vdur["interfaces"]:
6678 scaling_info["vdu"][-1]["interface"].append(
6679 {
6680 "name": interface["name"],
6681 "ip_address": interface["ip-address"],
6682 "mac_address": interface.get("mac-address"),
6683 }
6684 )
6685 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6686
6687 # PRE-SCALE BEGIN
6688 step = "Executing pre-scale vnf-config-primitive"
6689 if scaling_descriptor.get("scaling-config-action"):
6690 for scaling_config_action in scaling_descriptor[
6691 "scaling-config-action"
6692 ]:
6693 if (
6694 scaling_config_action.get("trigger") == "pre-scale-in"
6695 and scaling_type == "SCALE_IN"
6696 ) or (
6697 scaling_config_action.get("trigger") == "pre-scale-out"
6698 and scaling_type == "SCALE_OUT"
6699 ):
6700 vnf_config_primitive = scaling_config_action[
6701 "vnf-config-primitive-name-ref"
6702 ]
6703 step = db_nslcmop_update[
6704 "detailed-status"
6705 ] = "executing pre-scale scaling-config-action '{}'".format(
6706 vnf_config_primitive
6707 )
6708
6709 # look for primitive
6710 for config_primitive in (
6711 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6712 ).get("config-primitive", ()):
6713 if config_primitive["name"] == vnf_config_primitive:
6714 break
6715 else:
6716 raise LcmException(
6717 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6718 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6719 "primitive".format(scaling_group, vnf_config_primitive)
6720 )
6721
6722 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6723 if db_vnfr.get("additionalParamsForVnf"):
6724 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6725
6726 scale_process = "VCA"
6727 db_nsr_update["config-status"] = "configuring pre-scaling"
6728 primitive_params = self._map_primitive_params(
6729 config_primitive, {}, vnfr_params
6730 )
6731
6732 # Pre-scale retry check: Check if this sub-operation has been executed before
6733 op_index = self._check_or_add_scale_suboperation(
6734 db_nslcmop,
6735 vnf_index,
6736 vnf_config_primitive,
6737 primitive_params,
6738 "PRE-SCALE",
6739 )
6740 if op_index == self.SUBOPERATION_STATUS_SKIP:
6741 # Skip sub-operation
6742 result = "COMPLETED"
6743 result_detail = "Done"
6744 self.logger.debug(
6745 logging_text
6746 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6747 vnf_config_primitive, result, result_detail
6748 )
6749 )
6750 else:
6751 if op_index == self.SUBOPERATION_STATUS_NEW:
6752 # New sub-operation: Get index of this sub-operation
6753 op_index = (
6754 len(db_nslcmop.get("_admin", {}).get("operations"))
6755 - 1
6756 )
6757 self.logger.debug(
6758 logging_text
6759 + "vnf_config_primitive={} New sub-operation".format(
6760 vnf_config_primitive
6761 )
6762 )
6763 else:
6764 # retry: Get registered params for this existing sub-operation
6765 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6766 op_index
6767 ]
6768 vnf_index = op.get("member_vnf_index")
6769 vnf_config_primitive = op.get("primitive")
6770 primitive_params = op.get("primitive_params")
6771 self.logger.debug(
6772 logging_text
6773 + "vnf_config_primitive={} Sub-operation retry".format(
6774 vnf_config_primitive
6775 )
6776 )
6777 # Execute the primitive, either with new (first-time) or registered (reintent) args
6778 ee_descriptor_id = config_primitive.get(
6779 "execution-environment-ref"
6780 )
6781 primitive_name = config_primitive.get(
6782 "execution-environment-primitive", vnf_config_primitive
6783 )
6784 ee_id, vca_type = self._look_for_deployed_vca(
6785 nsr_deployed["VCA"],
6786 member_vnf_index=vnf_index,
6787 vdu_id=None,
6788 vdu_count_index=None,
6789 ee_descriptor_id=ee_descriptor_id,
6790 )
6791 result, result_detail = await self._ns_execute_primitive(
6792 ee_id,
6793 primitive_name,
6794 primitive_params,
6795 vca_type=vca_type,
6796 vca_id=vca_id,
6797 )
6798 self.logger.debug(
6799 logging_text
6800 + "vnf_config_primitive={} Done with result {} {}".format(
6801 vnf_config_primitive, result, result_detail
6802 )
6803 )
6804 # Update operationState = COMPLETED | FAILED
6805 self._update_suboperation_status(
6806 db_nslcmop, op_index, result, result_detail
6807 )
6808
6809 if result == "FAILED":
6810 raise LcmException(result_detail)
6811 db_nsr_update["config-status"] = old_config_status
6812 scale_process = None
6813 # PRE-SCALE END
6814
6815 db_nsr_update[
6816 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6817 ] = nb_scale_op
6818 db_nsr_update[
6819 "_admin.scaling-group.{}.time".format(admin_scale_index)
6820 ] = time()
6821
6822 # SCALE-IN VCA - BEGIN
6823 if vca_scaling_info:
6824 step = db_nslcmop_update[
6825 "detailed-status"
6826 ] = "Deleting the execution environments"
6827 scale_process = "VCA"
6828 for vca_info in vca_scaling_info:
6829 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6830 member_vnf_index = str(vca_info["member-vnf-index"])
6831 self.logger.debug(
6832 logging_text + "vdu info: {}".format(vca_info)
6833 )
6834 if vca_info.get("osm_vdu_id"):
6835 vdu_id = vca_info["osm_vdu_id"]
6836 vdu_index = int(vca_info["vdu_index"])
6837 stage[
6838 1
6839 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6840 member_vnf_index, vdu_id, vdu_index
6841 )
6842 stage[2] = step = "Scaling in VCA"
6843 self._write_op_status(op_id=nslcmop_id, stage=stage)
6844 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6845 config_update = db_nsr["configurationStatus"]
6846 for vca_index, vca in enumerate(vca_update):
6847 if (
6848 (vca or vca.get("ee_id"))
6849 and vca["member-vnf-index"] == member_vnf_index
6850 and vca["vdu_count_index"] == vdu_index
6851 ):
6852 if vca.get("vdu_id"):
6853 config_descriptor = get_configuration(
6854 db_vnfd, vca.get("vdu_id")
6855 )
6856 elif vca.get("kdu_name"):
6857 config_descriptor = get_configuration(
6858 db_vnfd, vca.get("kdu_name")
6859 )
6860 else:
6861 config_descriptor = get_configuration(
6862 db_vnfd, db_vnfd["id"]
6863 )
6864 operation_params = (
6865 db_nslcmop.get("operationParams") or {}
6866 )
6867 exec_terminate_primitives = not operation_params.get(
6868 "skip_terminate_primitives"
6869 ) and vca.get("needed_terminate")
6870 task = asyncio.ensure_future(
6871 asyncio.wait_for(
6872 self.destroy_N2VC(
6873 logging_text,
6874 db_nslcmop,
6875 vca,
6876 config_descriptor,
6877 vca_index,
6878 destroy_ee=True,
6879 exec_primitives=exec_terminate_primitives,
6880 scaling_in=True,
6881 vca_id=vca_id,
6882 ),
6883 timeout=self.timeout.charm_delete,
6884 )
6885 )
6886 tasks_dict_info[task] = "Terminating VCA {}".format(
6887 vca.get("ee_id")
6888 )
6889 del vca_update[vca_index]
6890 del config_update[vca_index]
6891 # wait for pending tasks of terminate primitives
6892 if tasks_dict_info:
6893 self.logger.debug(
6894 logging_text
6895 + "Waiting for tasks {}".format(
6896 list(tasks_dict_info.keys())
6897 )
6898 )
6899 error_list = await self._wait_for_tasks(
6900 logging_text,
6901 tasks_dict_info,
6902 min(
6903 self.timeout.charm_delete, self.timeout.ns_terminate
6904 ),
6905 stage,
6906 nslcmop_id,
6907 )
6908 tasks_dict_info.clear()
6909 if error_list:
6910 raise LcmException("; ".join(error_list))
6911
6912 db_vca_and_config_update = {
6913 "_admin.deployed.VCA": vca_update,
6914 "configurationStatus": config_update,
6915 }
6916 self.update_db_2(
6917 "nsrs", db_nsr["_id"], db_vca_and_config_update
6918 )
6919 scale_process = None
6920 # SCALE-IN VCA - END
6921
6922 # SCALE RO - BEGIN
6923 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6924 scale_process = "RO"
6925 if self.ro_config.ng:
6926 await self._scale_ng_ro(
6927 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6928 )
6929 scaling_info.pop("vdu-create", None)
6930 scaling_info.pop("vdu-delete", None)
6931
6932 scale_process = None
6933 # SCALE RO - END
6934
6935 # SCALE KDU - BEGIN
6936 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6937 scale_process = "KDU"
6938 await self._scale_kdu(
6939 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6940 )
6941 scaling_info.pop("kdu-create", None)
6942 scaling_info.pop("kdu-delete", None)
6943
6944 scale_process = None
6945 # SCALE KDU - END
6946
6947 if db_nsr_update:
6948 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6949
6950 # SCALE-UP VCA - BEGIN
6951 if vca_scaling_info:
6952 step = db_nslcmop_update[
6953 "detailed-status"
6954 ] = "Creating new execution environments"
6955 scale_process = "VCA"
6956 for vca_info in vca_scaling_info:
6957 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6958 member_vnf_index = str(vca_info["member-vnf-index"])
6959 self.logger.debug(
6960 logging_text + "vdu info: {}".format(vca_info)
6961 )
6962 vnfd_id = db_vnfr["vnfd-ref"]
6963 if vca_info.get("osm_vdu_id"):
6964 vdu_index = int(vca_info["vdu_index"])
6965 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6966 if db_vnfr.get("additionalParamsForVnf"):
6967 deploy_params.update(
6968 parse_yaml_strings(
6969 db_vnfr["additionalParamsForVnf"].copy()
6970 )
6971 )
6972 descriptor_config = get_configuration(
6973 db_vnfd, db_vnfd["id"]
6974 )
6975 if descriptor_config:
6976 vdu_id = None
6977 vdu_name = None
6978 kdu_name = None
6979 kdu_index = None
6980 self._deploy_n2vc(
6981 logging_text=logging_text
6982 + "member_vnf_index={} ".format(member_vnf_index),
6983 db_nsr=db_nsr,
6984 db_vnfr=db_vnfr,
6985 nslcmop_id=nslcmop_id,
6986 nsr_id=nsr_id,
6987 nsi_id=nsi_id,
6988 vnfd_id=vnfd_id,
6989 vdu_id=vdu_id,
6990 kdu_name=kdu_name,
6991 kdu_index=kdu_index,
6992 member_vnf_index=member_vnf_index,
6993 vdu_index=vdu_index,
6994 vdu_name=vdu_name,
6995 deploy_params=deploy_params,
6996 descriptor_config=descriptor_config,
6997 base_folder=base_folder,
6998 task_instantiation_info=tasks_dict_info,
6999 stage=stage,
7000 )
7001 vdu_id = vca_info["osm_vdu_id"]
7002 vdur = find_in_list(
7003 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7004 )
7005 descriptor_config = get_configuration(db_vnfd, vdu_id)
7006 if vdur.get("additionalParams"):
7007 deploy_params_vdu = parse_yaml_strings(
7008 vdur["additionalParams"]
7009 )
7010 else:
7011 deploy_params_vdu = deploy_params
7012 deploy_params_vdu["OSM"] = get_osm_params(
7013 db_vnfr, vdu_id, vdu_count_index=vdu_index
7014 )
7015 if descriptor_config:
7016 vdu_name = None
7017 kdu_name = None
7018 kdu_index = None
7019 stage[
7020 1
7021 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7022 member_vnf_index, vdu_id, vdu_index
7023 )
7024 stage[2] = step = "Scaling out VCA"
7025 self._write_op_status(op_id=nslcmop_id, stage=stage)
7026 self._deploy_n2vc(
7027 logging_text=logging_text
7028 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7029 member_vnf_index, vdu_id, vdu_index
7030 ),
7031 db_nsr=db_nsr,
7032 db_vnfr=db_vnfr,
7033 nslcmop_id=nslcmop_id,
7034 nsr_id=nsr_id,
7035 nsi_id=nsi_id,
7036 vnfd_id=vnfd_id,
7037 vdu_id=vdu_id,
7038 kdu_name=kdu_name,
7039 member_vnf_index=member_vnf_index,
7040 vdu_index=vdu_index,
7041 kdu_index=kdu_index,
7042 vdu_name=vdu_name,
7043 deploy_params=deploy_params_vdu,
7044 descriptor_config=descriptor_config,
7045 base_folder=base_folder,
7046 task_instantiation_info=tasks_dict_info,
7047 stage=stage,
7048 )
7049 # SCALE-UP VCA - END
7050 scale_process = None
7051
7052 # POST-SCALE BEGIN
7053 # execute primitive service POST-SCALING
7054 step = "Executing post-scale vnf-config-primitive"
7055 if scaling_descriptor.get("scaling-config-action"):
7056 for scaling_config_action in scaling_descriptor[
7057 "scaling-config-action"
7058 ]:
7059 if (
7060 scaling_config_action.get("trigger") == "post-scale-in"
7061 and scaling_type == "SCALE_IN"
7062 ) or (
7063 scaling_config_action.get("trigger") == "post-scale-out"
7064 and scaling_type == "SCALE_OUT"
7065 ):
7066 vnf_config_primitive = scaling_config_action[
7067 "vnf-config-primitive-name-ref"
7068 ]
7069 step = db_nslcmop_update[
7070 "detailed-status"
7071 ] = "executing post-scale scaling-config-action '{}'".format(
7072 vnf_config_primitive
7073 )
7074
7075 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7076 if db_vnfr.get("additionalParamsForVnf"):
7077 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7078
7079 # look for primitive
7080 for config_primitive in (
7081 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7082 ).get("config-primitive", ()):
7083 if config_primitive["name"] == vnf_config_primitive:
7084 break
7085 else:
7086 raise LcmException(
7087 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7088 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7089 "config-primitive".format(
7090 scaling_group, vnf_config_primitive
7091 )
7092 )
7093 scale_process = "VCA"
7094 db_nsr_update["config-status"] = "configuring post-scaling"
7095 primitive_params = self._map_primitive_params(
7096 config_primitive, {}, vnfr_params
7097 )
7098
7099 # Post-scale retry check: Check if this sub-operation has been executed before
7100 op_index = self._check_or_add_scale_suboperation(
7101 db_nslcmop,
7102 vnf_index,
7103 vnf_config_primitive,
7104 primitive_params,
7105 "POST-SCALE",
7106 )
7107 if op_index == self.SUBOPERATION_STATUS_SKIP:
7108 # Skip sub-operation
7109 result = "COMPLETED"
7110 result_detail = "Done"
7111 self.logger.debug(
7112 logging_text
7113 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7114 vnf_config_primitive, result, result_detail
7115 )
7116 )
7117 else:
7118 if op_index == self.SUBOPERATION_STATUS_NEW:
7119 # New sub-operation: Get index of this sub-operation
7120 op_index = (
7121 len(db_nslcmop.get("_admin", {}).get("operations"))
7122 - 1
7123 )
7124 self.logger.debug(
7125 logging_text
7126 + "vnf_config_primitive={} New sub-operation".format(
7127 vnf_config_primitive
7128 )
7129 )
7130 else:
7131 # retry: Get registered params for this existing sub-operation
7132 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7133 op_index
7134 ]
7135 vnf_index = op.get("member_vnf_index")
7136 vnf_config_primitive = op.get("primitive")
7137 primitive_params = op.get("primitive_params")
7138 self.logger.debug(
7139 logging_text
7140 + "vnf_config_primitive={} Sub-operation retry".format(
7141 vnf_config_primitive
7142 )
7143 )
7144 # Execute the primitive, either with new (first-time) or registered (reintent) args
7145 ee_descriptor_id = config_primitive.get(
7146 "execution-environment-ref"
7147 )
7148 primitive_name = config_primitive.get(
7149 "execution-environment-primitive", vnf_config_primitive
7150 )
7151 ee_id, vca_type = self._look_for_deployed_vca(
7152 nsr_deployed["VCA"],
7153 member_vnf_index=vnf_index,
7154 vdu_id=None,
7155 vdu_count_index=None,
7156 ee_descriptor_id=ee_descriptor_id,
7157 )
7158 result, result_detail = await self._ns_execute_primitive(
7159 ee_id,
7160 primitive_name,
7161 primitive_params,
7162 vca_type=vca_type,
7163 vca_id=vca_id,
7164 )
7165 self.logger.debug(
7166 logging_text
7167 + "vnf_config_primitive={} Done with result {} {}".format(
7168 vnf_config_primitive, result, result_detail
7169 )
7170 )
7171 # Update operationState = COMPLETED | FAILED
7172 self._update_suboperation_status(
7173 db_nslcmop, op_index, result, result_detail
7174 )
7175
7176 if result == "FAILED":
7177 raise LcmException(result_detail)
7178 db_nsr_update["config-status"] = old_config_status
7179 scale_process = None
7180 # POST-SCALE END
7181
7182 db_nsr_update[
7183 "detailed-status"
7184 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7185 db_nsr_update["operational-status"] = (
7186 "running"
7187 if old_operational_status == "failed"
7188 else old_operational_status
7189 )
7190 db_nsr_update["config-status"] = old_config_status
7191 return
7192 except (
7193 ROclient.ROClientException,
7194 DbException,
7195 LcmException,
7196 NgRoException,
7197 ) as e:
7198 self.logger.error(logging_text + "Exit Exception {}".format(e))
7199 exc = e
7200 except asyncio.CancelledError:
7201 self.logger.error(
7202 logging_text + "Cancelled Exception while '{}'".format(step)
7203 )
7204 exc = "Operation was cancelled"
7205 except Exception as e:
7206 exc = traceback.format_exc()
7207 self.logger.critical(
7208 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7209 exc_info=True,
7210 )
7211 finally:
7212 self._write_ns_status(
7213 nsr_id=nsr_id,
7214 ns_state=None,
7215 current_operation="IDLE",
7216 current_operation_id=None,
7217 )
7218 if tasks_dict_info:
7219 stage[1] = "Waiting for instantiate pending tasks."
7220 self.logger.debug(logging_text + stage[1])
7221 exc = await self._wait_for_tasks(
7222 logging_text,
7223 tasks_dict_info,
7224 self.timeout.ns_deploy,
7225 stage,
7226 nslcmop_id,
7227 nsr_id=nsr_id,
7228 )
7229 if exc:
7230 db_nslcmop_update[
7231 "detailed-status"
7232 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7233 nslcmop_operation_state = "FAILED"
7234 if db_nsr:
7235 db_nsr_update["operational-status"] = old_operational_status
7236 db_nsr_update["config-status"] = old_config_status
7237 db_nsr_update["detailed-status"] = ""
7238 if scale_process:
7239 if "VCA" in scale_process:
7240 db_nsr_update["config-status"] = "failed"
7241 if "RO" in scale_process:
7242 db_nsr_update["operational-status"] = "failed"
7243 db_nsr_update[
7244 "detailed-status"
7245 ] = "FAILED scaling nslcmop={} {}: {}".format(
7246 nslcmop_id, step, exc
7247 )
7248 else:
7249 error_description_nslcmop = None
7250 nslcmop_operation_state = "COMPLETED"
7251 db_nslcmop_update["detailed-status"] = "Done"
7252
7253 self._write_op_status(
7254 op_id=nslcmop_id,
7255 stage="",
7256 error_message=error_description_nslcmop,
7257 operation_state=nslcmop_operation_state,
7258 other_update=db_nslcmop_update,
7259 )
7260 if db_nsr:
7261 self._write_ns_status(
7262 nsr_id=nsr_id,
7263 ns_state=None,
7264 current_operation="IDLE",
7265 current_operation_id=None,
7266 other_update=db_nsr_update,
7267 )
7268
7269 if nslcmop_operation_state:
7270 try:
7271 msg = {
7272 "nsr_id": nsr_id,
7273 "nslcmop_id": nslcmop_id,
7274 "operationState": nslcmop_operation_state,
7275 }
7276 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7277 except Exception as e:
7278 self.logger.error(
7279 logging_text + "kafka_write notification Exception {}".format(e)
7280 )
7281 self.logger.debug(logging_text + "Exit")
7282 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7283
7284 async def _scale_kdu(
7285 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7286 ):
7287 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7288 for kdu_name in _scaling_info:
7289 for kdu_scaling_info in _scaling_info[kdu_name]:
7290 deployed_kdu, index = get_deployed_kdu(
7291 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7292 )
7293 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7294 kdu_instance = deployed_kdu["kdu-instance"]
7295 kdu_model = deployed_kdu.get("kdu-model")
7296 scale = int(kdu_scaling_info["scale"])
7297 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7298
7299 db_dict = {
7300 "collection": "nsrs",
7301 "filter": {"_id": nsr_id},
7302 "path": "_admin.deployed.K8s.{}".format(index),
7303 }
7304
7305 step = "scaling application {}".format(
7306 kdu_scaling_info["resource-name"]
7307 )
7308 self.logger.debug(logging_text + step)
7309
7310 if kdu_scaling_info["type"] == "delete":
7311 kdu_config = get_configuration(db_vnfd, kdu_name)
7312 if (
7313 kdu_config
7314 and kdu_config.get("terminate-config-primitive")
7315 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7316 ):
7317 terminate_config_primitive_list = kdu_config.get(
7318 "terminate-config-primitive"
7319 )
7320 terminate_config_primitive_list.sort(
7321 key=lambda val: int(val["seq"])
7322 )
7323
7324 for (
7325 terminate_config_primitive
7326 ) in terminate_config_primitive_list:
7327 primitive_params_ = self._map_primitive_params(
7328 terminate_config_primitive, {}, {}
7329 )
7330 step = "execute terminate config primitive"
7331 self.logger.debug(logging_text + step)
7332 await asyncio.wait_for(
7333 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7334 cluster_uuid=cluster_uuid,
7335 kdu_instance=kdu_instance,
7336 primitive_name=terminate_config_primitive["name"],
7337 params=primitive_params_,
7338 db_dict=db_dict,
7339 total_timeout=self.timeout.primitive,
7340 vca_id=vca_id,
7341 ),
7342 timeout=self.timeout.primitive
7343 * self.timeout.primitive_outer_factor,
7344 )
7345
7346 await asyncio.wait_for(
7347 self.k8scluster_map[k8s_cluster_type].scale(
7348 kdu_instance=kdu_instance,
7349 scale=scale,
7350 resource_name=kdu_scaling_info["resource-name"],
7351 total_timeout=self.timeout.scale_on_error,
7352 vca_id=vca_id,
7353 cluster_uuid=cluster_uuid,
7354 kdu_model=kdu_model,
7355 atomic=True,
7356 db_dict=db_dict,
7357 ),
7358 timeout=self.timeout.scale_on_error
7359 * self.timeout.scale_on_error_outer_factor,
7360 )
7361
7362 if kdu_scaling_info["type"] == "create":
7363 kdu_config = get_configuration(db_vnfd, kdu_name)
7364 if (
7365 kdu_config
7366 and kdu_config.get("initial-config-primitive")
7367 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7368 ):
7369 initial_config_primitive_list = kdu_config.get(
7370 "initial-config-primitive"
7371 )
7372 initial_config_primitive_list.sort(
7373 key=lambda val: int(val["seq"])
7374 )
7375
7376 for initial_config_primitive in initial_config_primitive_list:
7377 primitive_params_ = self._map_primitive_params(
7378 initial_config_primitive, {}, {}
7379 )
7380 step = "execute initial config primitive"
7381 self.logger.debug(logging_text + step)
7382 await asyncio.wait_for(
7383 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7384 cluster_uuid=cluster_uuid,
7385 kdu_instance=kdu_instance,
7386 primitive_name=initial_config_primitive["name"],
7387 params=primitive_params_,
7388 db_dict=db_dict,
7389 vca_id=vca_id,
7390 ),
7391 timeout=600,
7392 )
7393
7394 async def _scale_ng_ro(
7395 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7396 ):
7397 nsr_id = db_nslcmop["nsInstanceId"]
7398 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7399 db_vnfrs = {}
7400
7401 # read from db: vnfd's for every vnf
7402 db_vnfds = []
7403
7404 # for each vnf in ns, read vnfd
7405 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7406 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7407 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7408 # if we haven't this vnfd, read it from db
7409 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7410 # read from db
7411 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7412 db_vnfds.append(vnfd)
7413 n2vc_key = self.n2vc.get_public_key()
7414 n2vc_key_list = [n2vc_key]
7415 self.scale_vnfr(
7416 db_vnfr,
7417 vdu_scaling_info.get("vdu-create"),
7418 vdu_scaling_info.get("vdu-delete"),
7419 mark_delete=True,
7420 )
7421 # db_vnfr has been updated, update db_vnfrs to use it
7422 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7423 await self._instantiate_ng_ro(
7424 logging_text,
7425 nsr_id,
7426 db_nsd,
7427 db_nsr,
7428 db_nslcmop,
7429 db_vnfrs,
7430 db_vnfds,
7431 n2vc_key_list,
7432 stage=stage,
7433 start_deploy=time(),
7434 timeout_ns_deploy=self.timeout.ns_deploy,
7435 )
7436 if vdu_scaling_info.get("vdu-delete"):
7437 self.scale_vnfr(
7438 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7439 )
7440
7441 async def extract_prometheus_scrape_jobs(
7442 self,
7443 ee_id: str,
7444 artifact_path: str,
7445 ee_config_descriptor: dict,
7446 vnfr_id: str,
7447 nsr_id: str,
7448 target_ip: str,
7449 element_type: str,
7450 vnf_member_index: str = "",
7451 vdu_id: str = "",
7452 vdu_index: int = None,
7453 kdu_name: str = "",
7454 kdu_index: int = None,
7455 ) -> dict:
7456 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7457 This method will wait until the corresponding VDU or KDU is fully instantiated
7458
7459 Args:
7460 ee_id (str): Execution Environment ID
7461 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7462 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7463 vnfr_id (str): VNFR ID where this EE applies
7464 nsr_id (str): NSR ID where this EE applies
7465 target_ip (str): VDU/KDU instance IP address
7466 element_type (str): NS or VNF or VDU or KDU
7467 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7468 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7469 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7470 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7471 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7472
7473 Raises:
7474 LcmException: When the VDU or KDU instance was not found in an hour
7475
7476 Returns:
7477 _type_: Prometheus jobs
7478 """
7479 # default the vdur and kdur names to an empty string, to avoid any later
7480 # problem with Prometheus when the element type is not VDU or KDU
7481 vdur_name = ""
7482 kdur_name = ""
7483
7484 # look if exist a file called 'prometheus*.j2' and
7485 artifact_content = self.fs.dir_ls(artifact_path)
7486 job_file = next(
7487 (
7488 f
7489 for f in artifact_content
7490 if f.startswith("prometheus") and f.endswith(".j2")
7491 ),
7492 None,
7493 )
7494 if not job_file:
7495 return
7496 with self.fs.file_open((artifact_path, job_file), "r") as f:
7497 job_data = f.read()
7498
7499 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7500 if element_type in ("VDU", "KDU"):
7501 for _ in range(360):
7502 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7503 if vdu_id and vdu_index is not None:
7504 vdur = next(
7505 (
7506 x
7507 for x in get_iterable(db_vnfr, "vdur")
7508 if (
7509 x.get("vdu-id-ref") == vdu_id
7510 and x.get("count-index") == vdu_index
7511 )
7512 ),
7513 {},
7514 )
7515 if vdur.get("name"):
7516 vdur_name = vdur.get("name")
7517 break
7518 if kdu_name and kdu_index is not None:
7519 kdur = next(
7520 (
7521 x
7522 for x in get_iterable(db_vnfr, "kdur")
7523 if (
7524 x.get("kdu-name") == kdu_name
7525 and x.get("count-index") == kdu_index
7526 )
7527 ),
7528 {},
7529 )
7530 if kdur.get("name"):
7531 kdur_name = kdur.get("name")
7532 break
7533
7534 await asyncio.sleep(10, loop=self.loop)
7535 else:
7536 if vdu_id and vdu_index is not None:
7537 raise LcmException(
7538 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7539 )
7540 if kdu_name and kdu_index is not None:
7541 raise LcmException(
7542 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7543 )
7544
7545 # TODO get_service
7546 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7547 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7548 host_port = "80"
7549 vnfr_id = vnfr_id.replace("-", "")
7550 variables = {
7551 "JOB_NAME": vnfr_id,
7552 "TARGET_IP": target_ip,
7553 "EXPORTER_POD_IP": host_name,
7554 "EXPORTER_POD_PORT": host_port,
7555 "NSR_ID": nsr_id,
7556 "VNF_MEMBER_INDEX": vnf_member_index,
7557 "VDUR_NAME": vdur_name,
7558 "KDUR_NAME": kdur_name,
7559 "ELEMENT_TYPE": element_type,
7560 }
7561 job_list = parse_job(job_data, variables)
7562 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7563 for job in job_list:
7564 if (
7565 not isinstance(job.get("job_name"), str)
7566 or vnfr_id not in job["job_name"]
7567 ):
7568 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7569 job["nsr_id"] = nsr_id
7570 job["vnfr_id"] = vnfr_id
7571 return job_list
7572
7573 async def rebuild_start_stop(
7574 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7575 ):
7576 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7577 self.logger.info(logging_text + "Enter")
7578 stage = ["Preparing the environment", ""]
7579 # database nsrs record
7580 db_nsr_update = {}
7581 vdu_vim_name = None
7582 vim_vm_id = None
7583 # in case of error, indicates what part of scale was failed to put nsr at error status
7584 start_deploy = time()
7585 try:
7586 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7587 vim_account_id = db_vnfr.get("vim-account-id")
7588 vim_info_key = "vim:" + vim_account_id
7589 vdu_id = additional_param["vdu_id"]
7590 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7591 vdur = find_in_list(
7592 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7593 )
7594 if vdur:
7595 vdu_vim_name = vdur["name"]
7596 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7597 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7598 else:
7599 raise LcmException("Target vdu is not found")
7600 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7601 # wait for any previous tasks in process
7602 stage[1] = "Waiting for previous operations to terminate"
7603 self.logger.info(stage[1])
7604 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7605
7606 stage[1] = "Reading from database."
7607 self.logger.info(stage[1])
7608 self._write_ns_status(
7609 nsr_id=nsr_id,
7610 ns_state=None,
7611 current_operation=operation_type.upper(),
7612 current_operation_id=nslcmop_id,
7613 )
7614 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7615
7616 # read from db: ns
7617 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7618 db_nsr_update["operational-status"] = operation_type
7619 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7620 # Payload for RO
7621 desc = {
7622 operation_type: {
7623 "vim_vm_id": vim_vm_id,
7624 "vnf_id": vnf_id,
7625 "vdu_index": additional_param["count-index"],
7626 "vdu_id": vdur["id"],
7627 "target_vim": target_vim,
7628 "vim_account_id": vim_account_id,
7629 }
7630 }
7631 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7632 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7633 self.logger.info("ro nsr id: {}".format(nsr_id))
7634 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7635 self.logger.info("response from RO: {}".format(result_dict))
7636 action_id = result_dict["action_id"]
7637 await self._wait_ng_ro(
7638 nsr_id,
7639 action_id,
7640 nslcmop_id,
7641 start_deploy,
7642 self.timeout.operate,
7643 None,
7644 "start_stop_rebuild",
7645 )
7646 return "COMPLETED", "Done"
7647 except (ROclient.ROClientException, DbException, LcmException) as e:
7648 self.logger.error("Exit Exception {}".format(e))
7649 exc = e
7650 except asyncio.CancelledError:
7651 self.logger.error("Cancelled Exception while '{}'".format(stage))
7652 exc = "Operation was cancelled"
7653 except Exception as e:
7654 exc = traceback.format_exc()
7655 self.logger.critical(
7656 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7657 )
7658 return "FAILED", "Error in operate VNF {}".format(exc)
7659
7660 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7661 """
7662 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7663
7664 :param: vim_account_id: VIM Account ID
7665
7666 :return: (cloud_name, cloud_credential)
7667 """
7668 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7669 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7670
7671 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7672 """
7673 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7674
7675 :param: vim_account_id: VIM Account ID
7676
7677 :return: (cloud_name, cloud_credential)
7678 """
7679 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7680 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7681
7682 async def migrate(self, nsr_id, nslcmop_id):
7683 """
7684 Migrate VNFs and VDUs instances in a NS
7685
7686 :param: nsr_id: NS Instance ID
7687 :param: nslcmop_id: nslcmop ID of migrate
7688
7689 """
7690 # Try to lock HA task here
7691 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7692 if not task_is_locked_by_me:
7693 return
7694 logging_text = "Task ns={} migrate ".format(nsr_id)
7695 self.logger.debug(logging_text + "Enter")
7696 # get all needed from database
7697 db_nslcmop = None
7698 db_nslcmop_update = {}
7699 nslcmop_operation_state = None
7700 db_nsr_update = {}
7701 target = {}
7702 exc = None
7703 # in case of error, indicates what part of scale was failed to put nsr at error status
7704 start_deploy = time()
7705
7706 try:
7707 # wait for any previous tasks in process
7708 step = "Waiting for previous operations to terminate"
7709 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7710
7711 self._write_ns_status(
7712 nsr_id=nsr_id,
7713 ns_state=None,
7714 current_operation="MIGRATING",
7715 current_operation_id=nslcmop_id,
7716 )
7717 step = "Getting nslcmop from database"
7718 self.logger.debug(
7719 step + " after having waited for previous tasks to be completed"
7720 )
7721 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7722 migrate_params = db_nslcmop.get("operationParams")
7723
7724 target = {}
7725 target.update(migrate_params)
7726 desc = await self.RO.migrate(nsr_id, target)
7727 self.logger.debug("RO return > {}".format(desc))
7728 action_id = desc["action_id"]
7729 await self._wait_ng_ro(
7730 nsr_id,
7731 action_id,
7732 nslcmop_id,
7733 start_deploy,
7734 self.timeout.migrate,
7735 operation="migrate",
7736 )
7737 except (ROclient.ROClientException, DbException, LcmException) as e:
7738 self.logger.error("Exit Exception {}".format(e))
7739 exc = e
7740 except asyncio.CancelledError:
7741 self.logger.error("Cancelled Exception while '{}'".format(step))
7742 exc = "Operation was cancelled"
7743 except Exception as e:
7744 exc = traceback.format_exc()
7745 self.logger.critical(
7746 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7747 )
7748 finally:
7749 self._write_ns_status(
7750 nsr_id=nsr_id,
7751 ns_state=None,
7752 current_operation="IDLE",
7753 current_operation_id=None,
7754 )
7755 if exc:
7756 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7757 nslcmop_operation_state = "FAILED"
7758 else:
7759 nslcmop_operation_state = "COMPLETED"
7760 db_nslcmop_update["detailed-status"] = "Done"
7761 db_nsr_update["detailed-status"] = "Done"
7762
7763 self._write_op_status(
7764 op_id=nslcmop_id,
7765 stage="",
7766 error_message="",
7767 operation_state=nslcmop_operation_state,
7768 other_update=db_nslcmop_update,
7769 )
7770 if nslcmop_operation_state:
7771 try:
7772 msg = {
7773 "nsr_id": nsr_id,
7774 "nslcmop_id": nslcmop_id,
7775 "operationState": nslcmop_operation_state,
7776 }
7777 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7778 except Exception as e:
7779 self.logger.error(
7780 logging_text + "kafka_write notification Exception {}".format(e)
7781 )
7782 self.logger.debug(logging_text + "Exit")
7783 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7784
7785 async def heal(self, nsr_id, nslcmop_id):
7786 """
7787 Heal NS
7788
7789 :param nsr_id: ns instance to heal
7790 :param nslcmop_id: operation to run
7791 :return:
7792 """
7793
7794 # Try to lock HA task here
7795 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7796 if not task_is_locked_by_me:
7797 return
7798
7799 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7800 stage = ["", "", ""]
7801 tasks_dict_info = {}
7802 # ^ stage, step, VIM progress
7803 self.logger.debug(logging_text + "Enter")
7804 # get all needed from database
7805 db_nsr = None
7806 db_nslcmop_update = {}
7807 db_nsr_update = {}
7808 db_vnfrs = {} # vnf's info indexed by _id
7809 exc = None
7810 old_operational_status = ""
7811 old_config_status = ""
7812 nsi_id = None
7813 try:
7814 # wait for any previous tasks in process
7815 step = "Waiting for previous operations to terminate"
7816 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7817 self._write_ns_status(
7818 nsr_id=nsr_id,
7819 ns_state=None,
7820 current_operation="HEALING",
7821 current_operation_id=nslcmop_id,
7822 )
7823
7824 step = "Getting nslcmop from database"
7825 self.logger.debug(
7826 step + " after having waited for previous tasks to be completed"
7827 )
7828 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7829
7830 step = "Getting nsr from database"
7831 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7832 old_operational_status = db_nsr["operational-status"]
7833 old_config_status = db_nsr["config-status"]
7834
7835 db_nsr_update = {
7836 "_admin.deployed.RO.operational-status": "healing",
7837 }
7838 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7839
7840 step = "Sending heal order to VIM"
7841 await self.heal_RO(
7842 logging_text=logging_text,
7843 nsr_id=nsr_id,
7844 db_nslcmop=db_nslcmop,
7845 stage=stage,
7846 )
7847 # VCA tasks
7848 # read from db: nsd
7849 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7850 self.logger.debug(logging_text + stage[1])
7851 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7852 self.fs.sync(db_nsr["nsd-id"])
7853 db_nsr["nsd"] = nsd
7854 # read from db: vnfr's of this ns
7855 step = "Getting vnfrs from db"
7856 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7857 for vnfr in db_vnfrs_list:
7858 db_vnfrs[vnfr["_id"]] = vnfr
7859 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7860
7861 # Check for each target VNF
7862 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7863 for target_vnf in target_list:
7864 # Find this VNF in the list from DB
7865 vnfr_id = target_vnf.get("vnfInstanceId", None)
7866 if vnfr_id:
7867 db_vnfr = db_vnfrs[vnfr_id]
7868 vnfd_id = db_vnfr.get("vnfd-id")
7869 vnfd_ref = db_vnfr.get("vnfd-ref")
7870 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7871 base_folder = vnfd["_admin"]["storage"]
7872 vdu_id = None
7873 vdu_index = 0
7874 vdu_name = None
7875 kdu_name = None
7876 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7877 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7878
7879 # Check each target VDU and deploy N2VC
7880 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7881 "vdu", []
7882 )
7883 if not target_vdu_list:
7884 # Codigo nuevo para crear diccionario
7885 target_vdu_list = []
7886 for existing_vdu in db_vnfr.get("vdur"):
7887 vdu_name = existing_vdu.get("vdu-name", None)
7888 vdu_index = existing_vdu.get("count-index", 0)
7889 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7890 "run-day1", False
7891 )
7892 vdu_to_be_healed = {
7893 "vdu-id": vdu_name,
7894 "count-index": vdu_index,
7895 "run-day1": vdu_run_day1,
7896 }
7897 target_vdu_list.append(vdu_to_be_healed)
7898 for target_vdu in target_vdu_list:
7899 deploy_params_vdu = target_vdu
7900 # Set run-day1 vnf level value if not vdu level value exists
7901 if not deploy_params_vdu.get("run-day1") and target_vnf[
7902 "additionalParams"
7903 ].get("run-day1"):
7904 deploy_params_vdu["run-day1"] = target_vnf[
7905 "additionalParams"
7906 ].get("run-day1")
7907 vdu_name = target_vdu.get("vdu-id", None)
7908 # TODO: Get vdu_id from vdud.
7909 vdu_id = vdu_name
7910 # For multi instance VDU count-index is mandatory
7911 # For single session VDU count-indes is 0
7912 vdu_index = target_vdu.get("count-index", 0)
7913
7914 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7915 stage[1] = "Deploying Execution Environments."
7916 self.logger.debug(logging_text + stage[1])
7917
7918 # VNF Level charm. Normal case when proxy charms.
7919 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7920 descriptor_config = get_configuration(vnfd, vnfd_ref)
7921 if descriptor_config:
7922 # Continue if healed machine is management machine
7923 vnf_ip_address = db_vnfr.get("ip-address")
7924 target_instance = None
7925 for instance in db_vnfr.get("vdur", None):
7926 if (
7927 instance["vdu-name"] == vdu_name
7928 and instance["count-index"] == vdu_index
7929 ):
7930 target_instance = instance
7931 break
7932 if vnf_ip_address == target_instance.get("ip-address"):
7933 self._heal_n2vc(
7934 logging_text=logging_text
7935 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7936 member_vnf_index, vdu_name, vdu_index
7937 ),
7938 db_nsr=db_nsr,
7939 db_vnfr=db_vnfr,
7940 nslcmop_id=nslcmop_id,
7941 nsr_id=nsr_id,
7942 nsi_id=nsi_id,
7943 vnfd_id=vnfd_ref,
7944 vdu_id=None,
7945 kdu_name=None,
7946 member_vnf_index=member_vnf_index,
7947 vdu_index=0,
7948 vdu_name=None,
7949 deploy_params=deploy_params_vdu,
7950 descriptor_config=descriptor_config,
7951 base_folder=base_folder,
7952 task_instantiation_info=tasks_dict_info,
7953 stage=stage,
7954 )
7955
7956 # VDU Level charm. Normal case with native charms.
7957 descriptor_config = get_configuration(vnfd, vdu_name)
7958 if descriptor_config:
7959 self._heal_n2vc(
7960 logging_text=logging_text
7961 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7962 member_vnf_index, vdu_name, vdu_index
7963 ),
7964 db_nsr=db_nsr,
7965 db_vnfr=db_vnfr,
7966 nslcmop_id=nslcmop_id,
7967 nsr_id=nsr_id,
7968 nsi_id=nsi_id,
7969 vnfd_id=vnfd_ref,
7970 vdu_id=vdu_id,
7971 kdu_name=kdu_name,
7972 member_vnf_index=member_vnf_index,
7973 vdu_index=vdu_index,
7974 vdu_name=vdu_name,
7975 deploy_params=deploy_params_vdu,
7976 descriptor_config=descriptor_config,
7977 base_folder=base_folder,
7978 task_instantiation_info=tasks_dict_info,
7979 stage=stage,
7980 )
7981
7982 except (
7983 ROclient.ROClientException,
7984 DbException,
7985 LcmException,
7986 NgRoException,
7987 ) as e:
7988 self.logger.error(logging_text + "Exit Exception {}".format(e))
7989 exc = e
7990 except asyncio.CancelledError:
7991 self.logger.error(
7992 logging_text + "Cancelled Exception while '{}'".format(step)
7993 )
7994 exc = "Operation was cancelled"
7995 except Exception as e:
7996 exc = traceback.format_exc()
7997 self.logger.critical(
7998 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7999 exc_info=True,
8000 )
8001 finally:
8002 if tasks_dict_info:
8003 stage[1] = "Waiting for healing pending tasks."
8004 self.logger.debug(logging_text + stage[1])
8005 exc = await self._wait_for_tasks(
8006 logging_text,
8007 tasks_dict_info,
8008 self.timeout.ns_deploy,
8009 stage,
8010 nslcmop_id,
8011 nsr_id=nsr_id,
8012 )
8013 if exc:
8014 db_nslcmop_update[
8015 "detailed-status"
8016 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
8017 nslcmop_operation_state = "FAILED"
8018 if db_nsr:
8019 db_nsr_update["operational-status"] = old_operational_status
8020 db_nsr_update["config-status"] = old_config_status
8021 db_nsr_update[
8022 "detailed-status"
8023 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
8024 for task, task_name in tasks_dict_info.items():
8025 if not task.done() or task.cancelled() or task.exception():
8026 if task_name.startswith(self.task_name_deploy_vca):
8027 # A N2VC task is pending
8028 db_nsr_update["config-status"] = "failed"
8029 else:
8030 # RO task is pending
8031 db_nsr_update["operational-status"] = "failed"
8032 else:
8033 error_description_nslcmop = None
8034 nslcmop_operation_state = "COMPLETED"
8035 db_nslcmop_update["detailed-status"] = "Done"
8036 db_nsr_update["detailed-status"] = "Done"
8037 db_nsr_update["operational-status"] = "running"
8038 db_nsr_update["config-status"] = "configured"
8039
8040 self._write_op_status(
8041 op_id=nslcmop_id,
8042 stage="",
8043 error_message=error_description_nslcmop,
8044 operation_state=nslcmop_operation_state,
8045 other_update=db_nslcmop_update,
8046 )
8047 if db_nsr:
8048 self._write_ns_status(
8049 nsr_id=nsr_id,
8050 ns_state=None,
8051 current_operation="IDLE",
8052 current_operation_id=None,
8053 other_update=db_nsr_update,
8054 )
8055
8056 if nslcmop_operation_state:
8057 try:
8058 msg = {
8059 "nsr_id": nsr_id,
8060 "nslcmop_id": nslcmop_id,
8061 "operationState": nslcmop_operation_state,
8062 }
8063 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8064 except Exception as e:
8065 self.logger.error(
8066 logging_text + "kafka_write notification Exception {}".format(e)
8067 )
8068 self.logger.debug(logging_text + "Exit")
8069 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8070
8071 async def heal_RO(
8072 self,
8073 logging_text,
8074 nsr_id,
8075 db_nslcmop,
8076 stage,
8077 ):
8078 """
8079 Heal at RO
8080 :param logging_text: preffix text to use at logging
8081 :param nsr_id: nsr identity
8082 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8083 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8084 :return: None or exception
8085 """
8086
8087 def get_vim_account(vim_account_id):
8088 nonlocal db_vims
8089 if vim_account_id in db_vims:
8090 return db_vims[vim_account_id]
8091 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8092 db_vims[vim_account_id] = db_vim
8093 return db_vim
8094
8095 try:
8096 start_heal = time()
8097 ns_params = db_nslcmop.get("operationParams")
8098 if ns_params and ns_params.get("timeout_ns_heal"):
8099 timeout_ns_heal = ns_params["timeout_ns_heal"]
8100 else:
8101 timeout_ns_heal = self.timeout.ns_heal
8102
8103 db_vims = {}
8104
8105 nslcmop_id = db_nslcmop["_id"]
8106 target = {
8107 "action_id": nslcmop_id,
8108 }
8109 self.logger.warning(
8110 "db_nslcmop={} and timeout_ns_heal={}".format(
8111 db_nslcmop, timeout_ns_heal
8112 )
8113 )
8114 target.update(db_nslcmop.get("operationParams", {}))
8115
8116 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8117 desc = await self.RO.recreate(nsr_id, target)
8118 self.logger.debug("RO return > {}".format(desc))
8119 action_id = desc["action_id"]
8120 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8121 await self._wait_ng_ro(
8122 nsr_id,
8123 action_id,
8124 nslcmop_id,
8125 start_heal,
8126 timeout_ns_heal,
8127 stage,
8128 operation="healing",
8129 )
8130
8131 # Updating NSR
8132 db_nsr_update = {
8133 "_admin.deployed.RO.operational-status": "running",
8134 "detailed-status": " ".join(stage),
8135 }
8136 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8137 self._write_op_status(nslcmop_id, stage)
8138 self.logger.debug(
8139 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8140 )
8141
8142 except Exception as e:
8143 stage[2] = "ERROR healing at VIM"
8144 # self.set_vnfr_at_error(db_vnfrs, str(e))
8145 self.logger.error(
8146 "Error healing at VIM {}".format(e),
8147 exc_info=not isinstance(
8148 e,
8149 (
8150 ROclient.ROClientException,
8151 LcmException,
8152 DbException,
8153 NgRoException,
8154 ),
8155 ),
8156 )
8157 raise
8158
8159 def _heal_n2vc(
8160 self,
8161 logging_text,
8162 db_nsr,
8163 db_vnfr,
8164 nslcmop_id,
8165 nsr_id,
8166 nsi_id,
8167 vnfd_id,
8168 vdu_id,
8169 kdu_name,
8170 member_vnf_index,
8171 vdu_index,
8172 vdu_name,
8173 deploy_params,
8174 descriptor_config,
8175 base_folder,
8176 task_instantiation_info,
8177 stage,
8178 ):
8179 # launch instantiate_N2VC in a asyncio task and register task object
8180 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8181 # if not found, create one entry and update database
8182 # fill db_nsr._admin.deployed.VCA.<index>
8183
8184 self.logger.debug(
8185 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8186 )
8187
8188 charm_name = ""
8189 get_charm_name = False
8190 if "execution-environment-list" in descriptor_config:
8191 ee_list = descriptor_config.get("execution-environment-list", [])
8192 elif "juju" in descriptor_config:
8193 ee_list = [descriptor_config] # ns charms
8194 if "execution-environment-list" not in descriptor_config:
8195 # charm name is only required for ns charms
8196 get_charm_name = True
8197 else: # other types as script are not supported
8198 ee_list = []
8199
8200 for ee_item in ee_list:
8201 self.logger.debug(
8202 logging_text
8203 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8204 ee_item.get("juju"), ee_item.get("helm-chart")
8205 )
8206 )
8207 ee_descriptor_id = ee_item.get("id")
8208 if ee_item.get("juju"):
8209 vca_name = ee_item["juju"].get("charm")
8210 if get_charm_name:
8211 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8212 vca_type = (
8213 "lxc_proxy_charm"
8214 if ee_item["juju"].get("charm") is not None
8215 else "native_charm"
8216 )
8217 if ee_item["juju"].get("cloud") == "k8s":
8218 vca_type = "k8s_proxy_charm"
8219 elif ee_item["juju"].get("proxy") is False:
8220 vca_type = "native_charm"
8221 elif ee_item.get("helm-chart"):
8222 vca_name = ee_item["helm-chart"]
8223 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8224 vca_type = "helm"
8225 else:
8226 vca_type = "helm-v3"
8227 else:
8228 self.logger.debug(
8229 logging_text + "skipping non juju neither charm configuration"
8230 )
8231 continue
8232
8233 vca_index = -1
8234 for vca_index, vca_deployed in enumerate(
8235 db_nsr["_admin"]["deployed"]["VCA"]
8236 ):
8237 if not vca_deployed:
8238 continue
8239 if (
8240 vca_deployed.get("member-vnf-index") == member_vnf_index
8241 and vca_deployed.get("vdu_id") == vdu_id
8242 and vca_deployed.get("kdu_name") == kdu_name
8243 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8244 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8245 ):
8246 break
8247 else:
8248 # not found, create one.
8249 target = (
8250 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8251 )
8252 if vdu_id:
8253 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8254 elif kdu_name:
8255 target += "/kdu/{}".format(kdu_name)
8256 vca_deployed = {
8257 "target_element": target,
8258 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8259 "member-vnf-index": member_vnf_index,
8260 "vdu_id": vdu_id,
8261 "kdu_name": kdu_name,
8262 "vdu_count_index": vdu_index,
8263 "operational-status": "init", # TODO revise
8264 "detailed-status": "", # TODO revise
8265 "step": "initial-deploy", # TODO revise
8266 "vnfd_id": vnfd_id,
8267 "vdu_name": vdu_name,
8268 "type": vca_type,
8269 "ee_descriptor_id": ee_descriptor_id,
8270 "charm_name": charm_name,
8271 }
8272 vca_index += 1
8273
8274 # create VCA and configurationStatus in db
8275 db_dict = {
8276 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8277 "configurationStatus.{}".format(vca_index): dict(),
8278 }
8279 self.update_db_2("nsrs", nsr_id, db_dict)
8280
8281 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8282
8283 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8284 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8285 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8286
8287 # Launch task
8288 task_n2vc = asyncio.ensure_future(
8289 self.heal_N2VC(
8290 logging_text=logging_text,
8291 vca_index=vca_index,
8292 nsi_id=nsi_id,
8293 db_nsr=db_nsr,
8294 db_vnfr=db_vnfr,
8295 vdu_id=vdu_id,
8296 kdu_name=kdu_name,
8297 vdu_index=vdu_index,
8298 deploy_params=deploy_params,
8299 config_descriptor=descriptor_config,
8300 base_folder=base_folder,
8301 nslcmop_id=nslcmop_id,
8302 stage=stage,
8303 vca_type=vca_type,
8304 vca_name=vca_name,
8305 ee_config_descriptor=ee_item,
8306 )
8307 )
8308 self.lcm_tasks.register(
8309 "ns",
8310 nsr_id,
8311 nslcmop_id,
8312 "instantiate_N2VC-{}".format(vca_index),
8313 task_n2vc,
8314 )
8315 task_instantiation_info[
8316 task_n2vc
8317 ] = self.task_name_deploy_vca + " {}.{}".format(
8318 member_vnf_index or "", vdu_id or ""
8319 )
8320
8321 async def heal_N2VC(
8322 self,
8323 logging_text,
8324 vca_index,
8325 nsi_id,
8326 db_nsr,
8327 db_vnfr,
8328 vdu_id,
8329 kdu_name,
8330 vdu_index,
8331 config_descriptor,
8332 deploy_params,
8333 base_folder,
8334 nslcmop_id,
8335 stage,
8336 vca_type,
8337 vca_name,
8338 ee_config_descriptor,
8339 ):
8340 nsr_id = db_nsr["_id"]
8341 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8342 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8343 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8344 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8345 db_dict = {
8346 "collection": "nsrs",
8347 "filter": {"_id": nsr_id},
8348 "path": db_update_entry,
8349 }
8350 step = ""
8351 try:
8352 element_type = "NS"
8353 element_under_configuration = nsr_id
8354
8355 vnfr_id = None
8356 if db_vnfr:
8357 vnfr_id = db_vnfr["_id"]
8358 osm_config["osm"]["vnf_id"] = vnfr_id
8359
8360 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8361
8362 if vca_type == "native_charm":
8363 index_number = 0
8364 else:
8365 index_number = vdu_index or 0
8366
8367 if vnfr_id:
8368 element_type = "VNF"
8369 element_under_configuration = vnfr_id
8370 namespace += ".{}-{}".format(vnfr_id, index_number)
8371 if vdu_id:
8372 namespace += ".{}-{}".format(vdu_id, index_number)
8373 element_type = "VDU"
8374 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8375 osm_config["osm"]["vdu_id"] = vdu_id
8376 elif kdu_name:
8377 namespace += ".{}".format(kdu_name)
8378 element_type = "KDU"
8379 element_under_configuration = kdu_name
8380 osm_config["osm"]["kdu_name"] = kdu_name
8381
8382 # Get artifact path
8383 if base_folder["pkg-dir"]:
8384 artifact_path = "{}/{}/{}/{}".format(
8385 base_folder["folder"],
8386 base_folder["pkg-dir"],
8387 "charms"
8388 if vca_type
8389 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8390 else "helm-charts",
8391 vca_name,
8392 )
8393 else:
8394 artifact_path = "{}/Scripts/{}/{}/".format(
8395 base_folder["folder"],
8396 "charms"
8397 if vca_type
8398 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8399 else "helm-charts",
8400 vca_name,
8401 )
8402
8403 self.logger.debug("Artifact path > {}".format(artifact_path))
8404
8405 # get initial_config_primitive_list that applies to this element
8406 initial_config_primitive_list = config_descriptor.get(
8407 "initial-config-primitive"
8408 )
8409
8410 self.logger.debug(
8411 "Initial config primitive list > {}".format(
8412 initial_config_primitive_list
8413 )
8414 )
8415
8416 # add config if not present for NS charm
8417 ee_descriptor_id = ee_config_descriptor.get("id")
8418 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8419 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8420 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8421 )
8422
8423 self.logger.debug(
8424 "Initial config primitive list #2 > {}".format(
8425 initial_config_primitive_list
8426 )
8427 )
8428 # n2vc_redesign STEP 3.1
8429 # find old ee_id if exists
8430 ee_id = vca_deployed.get("ee_id")
8431
8432 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8433 # create or register execution environment in VCA. Only for native charms when healing
8434 if vca_type == "native_charm":
8435 step = "Waiting to VM being up and getting IP address"
8436 self.logger.debug(logging_text + step)
8437 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8438 logging_text,
8439 nsr_id,
8440 vnfr_id,
8441 vdu_id,
8442 vdu_index,
8443 user=None,
8444 pub_key=None,
8445 )
8446 credentials = {"hostname": rw_mgmt_ip}
8447 # get username
8448 username = deep_get(
8449 config_descriptor, ("config-access", "ssh-access", "default-user")
8450 )
8451 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8452 # merged. Meanwhile let's get username from initial-config-primitive
8453 if not username and initial_config_primitive_list:
8454 for config_primitive in initial_config_primitive_list:
8455 for param in config_primitive.get("parameter", ()):
8456 if param["name"] == "ssh-username":
8457 username = param["value"]
8458 break
8459 if not username:
8460 raise LcmException(
8461 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8462 "'config-access.ssh-access.default-user'"
8463 )
8464 credentials["username"] = username
8465
8466 # n2vc_redesign STEP 3.2
8467 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8468 self._write_configuration_status(
8469 nsr_id=nsr_id,
8470 vca_index=vca_index,
8471 status="REGISTERING",
8472 element_under_configuration=element_under_configuration,
8473 element_type=element_type,
8474 )
8475
8476 step = "register execution environment {}".format(credentials)
8477 self.logger.debug(logging_text + step)
8478 ee_id = await self.vca_map[vca_type].register_execution_environment(
8479 credentials=credentials,
8480 namespace=namespace,
8481 db_dict=db_dict,
8482 vca_id=vca_id,
8483 )
8484
8485 # update ee_id en db
8486 db_dict_ee_id = {
8487 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8488 }
8489 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8490
8491 # for compatibility with MON/POL modules, the need model and application name at database
8492 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8493 # Not sure if this need to be done when healing
8494 """
8495 ee_id_parts = ee_id.split(".")
8496 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8497 if len(ee_id_parts) >= 2:
8498 model_name = ee_id_parts[0]
8499 application_name = ee_id_parts[1]
8500 db_nsr_update[db_update_entry + "model"] = model_name
8501 db_nsr_update[db_update_entry + "application"] = application_name
8502 """
8503
8504 # n2vc_redesign STEP 3.3
8505 # Install configuration software. Only for native charms.
8506 step = "Install configuration Software"
8507
8508 self._write_configuration_status(
8509 nsr_id=nsr_id,
8510 vca_index=vca_index,
8511 status="INSTALLING SW",
8512 element_under_configuration=element_under_configuration,
8513 element_type=element_type,
8514 # other_update=db_nsr_update,
8515 other_update=None,
8516 )
8517
8518 # TODO check if already done
8519 self.logger.debug(logging_text + step)
8520 config = None
8521 if vca_type == "native_charm":
8522 config_primitive = next(
8523 (p for p in initial_config_primitive_list if p["name"] == "config"),
8524 None,
8525 )
8526 if config_primitive:
8527 config = self._map_primitive_params(
8528 config_primitive, {}, deploy_params
8529 )
8530 await self.vca_map[vca_type].install_configuration_sw(
8531 ee_id=ee_id,
8532 artifact_path=artifact_path,
8533 db_dict=db_dict,
8534 config=config,
8535 num_units=1,
8536 vca_id=vca_id,
8537 vca_type=vca_type,
8538 )
8539
8540 # write in db flag of configuration_sw already installed
8541 self.update_db_2(
8542 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8543 )
8544
8545 # Not sure if this need to be done when healing
8546 """
8547 # add relations for this VCA (wait for other peers related with this VCA)
8548 await self._add_vca_relations(
8549 logging_text=logging_text,
8550 nsr_id=nsr_id,
8551 vca_type=vca_type,
8552 vca_index=vca_index,
8553 )
8554 """
8555
8556 # if SSH access is required, then get execution environment SSH public
8557 # if native charm we have waited already to VM be UP
8558 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8559 pub_key = None
8560 user = None
8561 # self.logger.debug("get ssh key block")
8562 if deep_get(
8563 config_descriptor, ("config-access", "ssh-access", "required")
8564 ):
8565 # self.logger.debug("ssh key needed")
8566 # Needed to inject a ssh key
8567 user = deep_get(
8568 config_descriptor,
8569 ("config-access", "ssh-access", "default-user"),
8570 )
8571 step = "Install configuration Software, getting public ssh key"
8572 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8573 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8574 )
8575
8576 step = "Insert public key into VM user={} ssh_key={}".format(
8577 user, pub_key
8578 )
8579 else:
8580 # self.logger.debug("no need to get ssh key")
8581 step = "Waiting to VM being up and getting IP address"
8582 self.logger.debug(logging_text + step)
8583
8584 # n2vc_redesign STEP 5.1
8585 # wait for RO (ip-address) Insert pub_key into VM
8586 # IMPORTANT: We need do wait for RO to complete healing operation.
8587 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8588 if vnfr_id:
8589 if kdu_name:
8590 rw_mgmt_ip = await self.wait_kdu_up(
8591 logging_text, nsr_id, vnfr_id, kdu_name
8592 )
8593 else:
8594 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8595 logging_text,
8596 nsr_id,
8597 vnfr_id,
8598 vdu_id,
8599 vdu_index,
8600 user=user,
8601 pub_key=pub_key,
8602 )
8603 else:
8604 rw_mgmt_ip = None # This is for a NS configuration
8605
8606 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8607
8608 # store rw_mgmt_ip in deploy params for later replacement
8609 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8610
8611 # Day1 operations.
8612 # get run-day1 operation parameter
8613 runDay1 = deploy_params.get("run-day1", False)
8614 self.logger.debug(
8615 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8616 )
8617 if runDay1:
8618 # n2vc_redesign STEP 6 Execute initial config primitive
8619 step = "execute initial config primitive"
8620
8621 # wait for dependent primitives execution (NS -> VNF -> VDU)
8622 if initial_config_primitive_list:
8623 await self._wait_dependent_n2vc(
8624 nsr_id, vca_deployed_list, vca_index
8625 )
8626
8627 # stage, in function of element type: vdu, kdu, vnf or ns
8628 my_vca = vca_deployed_list[vca_index]
8629 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8630 # VDU or KDU
8631 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8632 elif my_vca.get("member-vnf-index"):
8633 # VNF
8634 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8635 else:
8636 # NS
8637 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8638
8639 self._write_configuration_status(
8640 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8641 )
8642
8643 self._write_op_status(op_id=nslcmop_id, stage=stage)
8644
8645 check_if_terminated_needed = True
8646 for initial_config_primitive in initial_config_primitive_list:
8647 # adding information on the vca_deployed if it is a NS execution environment
8648 if not vca_deployed["member-vnf-index"]:
8649 deploy_params["ns_config_info"] = json.dumps(
8650 self._get_ns_config_info(nsr_id)
8651 )
8652 # TODO check if already done
8653 primitive_params_ = self._map_primitive_params(
8654 initial_config_primitive, {}, deploy_params
8655 )
8656
8657 step = "execute primitive '{}' params '{}'".format(
8658 initial_config_primitive["name"], primitive_params_
8659 )
8660 self.logger.debug(logging_text + step)
8661 await self.vca_map[vca_type].exec_primitive(
8662 ee_id=ee_id,
8663 primitive_name=initial_config_primitive["name"],
8664 params_dict=primitive_params_,
8665 db_dict=db_dict,
8666 vca_id=vca_id,
8667 vca_type=vca_type,
8668 )
8669 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8670 if check_if_terminated_needed:
8671 if config_descriptor.get("terminate-config-primitive"):
8672 self.update_db_2(
8673 "nsrs",
8674 nsr_id,
8675 {db_update_entry + "needed_terminate": True},
8676 )
8677 check_if_terminated_needed = False
8678
8679 # TODO register in database that primitive is done
8680
8681 # STEP 7 Configure metrics
8682 # Not sure if this need to be done when healing
8683 """
8684 if vca_type == "helm" or vca_type == "helm-v3":
8685 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8686 ee_id=ee_id,
8687 artifact_path=artifact_path,
8688 ee_config_descriptor=ee_config_descriptor,
8689 vnfr_id=vnfr_id,
8690 nsr_id=nsr_id,
8691 target_ip=rw_mgmt_ip,
8692 )
8693 if prometheus_jobs:
8694 self.update_db_2(
8695 "nsrs",
8696 nsr_id,
8697 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8698 )
8699
8700 for job in prometheus_jobs:
8701 self.db.set_one(
8702 "prometheus_jobs",
8703 {"job_name": job["job_name"]},
8704 job,
8705 upsert=True,
8706 fail_on_empty=False,
8707 )
8708
8709 """
8710 step = "instantiated at VCA"
8711 self.logger.debug(logging_text + step)
8712
8713 self._write_configuration_status(
8714 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8715 )
8716
8717 except Exception as e: # TODO not use Exception but N2VC exception
8718 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8719 if not isinstance(
8720 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8721 ):
8722 self.logger.error(
8723 "Exception while {} : {}".format(step, e), exc_info=True
8724 )
8725 self._write_configuration_status(
8726 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8727 )
8728 raise LcmException("{} {}".format(step, e)) from e
8729
8730 async def _wait_heal_ro(
8731 self,
8732 nsr_id,
8733 timeout=600,
8734 ):
8735 start_time = time()
8736 while time() <= start_time + timeout:
8737 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8738 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8739 "operational-status"
8740 ]
8741 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8742 if operational_status_ro != "healing":
8743 break
8744 await asyncio.sleep(15, loop=self.loop)
8745 else: # timeout_ns_deploy
8746 raise NgRoException("Timeout waiting ns to deploy")
8747
8748 async def vertical_scale(self, nsr_id, nslcmop_id):
8749 """
8750 Vertical Scale the VDUs in a NS
8751
8752 :param: nsr_id: NS Instance ID
8753 :param: nslcmop_id: nslcmop ID of migrate
8754
8755 """
8756 # Try to lock HA task here
8757 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8758 if not task_is_locked_by_me:
8759 return
8760 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8761 self.logger.debug(logging_text + "Enter")
8762 # get all needed from database
8763 db_nslcmop = None
8764 db_nslcmop_update = {}
8765 nslcmop_operation_state = None
8766 db_nsr_update = {}
8767 target = {}
8768 exc = None
8769 # in case of error, indicates what part of scale was failed to put nsr at error status
8770 start_deploy = time()
8771
8772 try:
8773 # wait for any previous tasks in process
8774 step = "Waiting for previous operations to terminate"
8775 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8776
8777 self._write_ns_status(
8778 nsr_id=nsr_id,
8779 ns_state=None,
8780 current_operation="VerticalScale",
8781 current_operation_id=nslcmop_id,
8782 )
8783 step = "Getting nslcmop from database"
8784 self.logger.debug(
8785 step + " after having waited for previous tasks to be completed"
8786 )
8787 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8788 operationParams = db_nslcmop.get("operationParams")
8789 target = {}
8790 target.update(operationParams)
8791 desc = await self.RO.vertical_scale(nsr_id, target)
8792 self.logger.debug("RO return > {}".format(desc))
8793 action_id = desc["action_id"]
8794 await self._wait_ng_ro(
8795 nsr_id,
8796 action_id,
8797 nslcmop_id,
8798 start_deploy,
8799 self.timeout.verticalscale,
8800 operation="verticalscale",
8801 )
8802 except (ROclient.ROClientException, DbException, LcmException) as e:
8803 self.logger.error("Exit Exception {}".format(e))
8804 exc = e
8805 except asyncio.CancelledError:
8806 self.logger.error("Cancelled Exception while '{}'".format(step))
8807 exc = "Operation was cancelled"
8808 except Exception as e:
8809 exc = traceback.format_exc()
8810 self.logger.critical(
8811 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8812 )
8813 finally:
8814 self._write_ns_status(
8815 nsr_id=nsr_id,
8816 ns_state=None,
8817 current_operation="IDLE",
8818 current_operation_id=None,
8819 )
8820 if exc:
8821 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8822 nslcmop_operation_state = "FAILED"
8823 else:
8824 nslcmop_operation_state = "COMPLETED"
8825 db_nslcmop_update["detailed-status"] = "Done"
8826 db_nsr_update["detailed-status"] = "Done"
8827
8828 self._write_op_status(
8829 op_id=nslcmop_id,
8830 stage="",
8831 error_message="",
8832 operation_state=nslcmop_operation_state,
8833 other_update=db_nslcmop_update,
8834 )
8835 if nslcmop_operation_state:
8836 try:
8837 msg = {
8838 "nsr_id": nsr_id,
8839 "nslcmop_id": nslcmop_id,
8840 "operationState": nslcmop_operation_state,
8841 }
8842 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8843 except Exception as e:
8844 self.logger.error(
8845 logging_text + "kafka_write notification Exception {}".format(e)
8846 )
8847 self.logger.debug(logging_text + "Exit")
8848 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")