Bug 2236 - Upgrade of Helm Charts is failing when the Helm Chart is embeded in the...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import randint
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.loop = loop
149 self.lcm_tasks = lcm_tasks
150 self.timeout = config.timeout
151 self.ro_config = config.RO
152 self.vca_config = config.VCA
153
154 # create N2VC connector
155 self.n2vc = N2VCJujuConnector(
156 log=self.logger,
157 loop=self.loop,
158 on_update_db=self._on_update_n2vc_db,
159 fs=self.fs,
160 db=self.db,
161 )
162
163 self.conn_helm_ee = LCMHelmConn(
164 log=self.logger,
165 loop=self.loop,
166 vca_config=self.vca_config,
167 on_update_db=self._on_update_n2vc_db,
168 )
169
170 self.k8sclusterhelm2 = K8sHelmConnector(
171 kubectl_command=self.vca_config.kubectlpath,
172 helm_command=self.vca_config.helmpath,
173 log=self.logger,
174 on_update_db=None,
175 fs=self.fs,
176 db=self.db,
177 )
178
179 self.k8sclusterhelm3 = K8sHelm3Connector(
180 kubectl_command=self.vca_config.kubectlpath,
181 helm_command=self.vca_config.helm3path,
182 fs=self.fs,
183 log=self.logger,
184 db=self.db,
185 on_update_db=None,
186 )
187
188 self.k8sclusterjuju = K8sJujuConnector(
189 kubectl_command=self.vca_config.kubectlpath,
190 juju_command=self.vca_config.jujupath,
191 log=self.logger,
192 loop=self.loop,
193 on_update_db=self._on_update_k8s_db,
194 fs=self.fs,
195 db=self.db,
196 )
197
198 self.k8scluster_map = {
199 "helm-chart": self.k8sclusterhelm2,
200 "helm-chart-v3": self.k8sclusterhelm3,
201 "chart": self.k8sclusterhelm3,
202 "juju-bundle": self.k8sclusterjuju,
203 "juju": self.k8sclusterjuju,
204 }
205
206 self.vca_map = {
207 "lxc_proxy_charm": self.n2vc,
208 "native_charm": self.n2vc,
209 "k8s_proxy_charm": self.n2vc,
210 "helm": self.conn_helm_ee,
211 "helm-v3": self.conn_helm_ee,
212 }
213
214 # create RO client
215 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
216
217 self.op_status_map = {
218 "instantiation": self.RO.status,
219 "termination": self.RO.status,
220 "migrate": self.RO.status,
221 "healing": self.RO.recreate_status,
222 "verticalscale": self.RO.status,
223 "start_stop_rebuild": self.RO.status,
224 }
225
226 @staticmethod
227 def increment_ip_mac(ip_mac, vm_index=1):
228 if not isinstance(ip_mac, str):
229 return ip_mac
230 try:
231 # try with ipv4 look for last dot
232 i = ip_mac.rfind(".")
233 if i > 0:
234 i += 1
235 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
236 # try with ipv6 or mac look for last colon. Operate in hex
237 i = ip_mac.rfind(":")
238 if i > 0:
239 i += 1
240 # format in hex, len can be 2 for mac or 4 for ipv6
241 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
242 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
243 )
244 except Exception:
245 pass
246 return None
247
248 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266 # remove last dot from path (if exists)
267 if path.endswith("."):
268 path = path[:-1]
269
270 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
271 # .format(table, filter, path, updated_data))
272 try:
273 nsr_id = filter.get("_id")
274
275 # read ns record from database
276 nsr = self.db.get_one(table="nsrs", q_filter=filter)
277 current_ns_status = nsr.get("nsState")
278
279 # get vca status for NS
280 status_dict = await self.n2vc.get_status(
281 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
282 )
283
284 # vcaStatus
285 db_dict = dict()
286 db_dict["vcaStatus"] = status_dict
287
288 # update configurationStatus for this VCA
289 try:
290 vca_index = int(path[path.rfind(".") + 1 :])
291
292 vca_list = deep_get(
293 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
294 )
295 vca_status = vca_list[vca_index].get("status")
296
297 configuration_status_list = nsr.get("configurationStatus")
298 config_status = configuration_status_list[vca_index].get("status")
299
300 if config_status == "BROKEN" and vca_status != "failed":
301 db_dict["configurationStatus"][vca_index] = "READY"
302 elif config_status != "BROKEN" and vca_status == "failed":
303 db_dict["configurationStatus"][vca_index] = "BROKEN"
304 except Exception as e:
305 # not update configurationStatus
306 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
307
308 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
309 # if nsState = 'DEGRADED' check if all is OK
310 is_degraded = False
311 if current_ns_status in ("READY", "DEGRADED"):
312 error_description = ""
313 # check machines
314 if status_dict.get("machines"):
315 for machine_id in status_dict.get("machines"):
316 machine = status_dict.get("machines").get(machine_id)
317 # check machine agent-status
318 if machine.get("agent-status"):
319 s = machine.get("agent-status").get("status")
320 if s != "started":
321 is_degraded = True
322 error_description += (
323 "machine {} agent-status={} ; ".format(
324 machine_id, s
325 )
326 )
327 # check machine instance status
328 if machine.get("instance-status"):
329 s = machine.get("instance-status").get("status")
330 if s != "running":
331 is_degraded = True
332 error_description += (
333 "machine {} instance-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check applications
338 if status_dict.get("applications"):
339 for app_id in status_dict.get("applications"):
340 app = status_dict.get("applications").get(app_id)
341 # check application status
342 if app.get("status"):
343 s = app.get("status").get("status")
344 if s != "active":
345 is_degraded = True
346 error_description += (
347 "application {} status={} ; ".format(app_id, s)
348 )
349
350 if error_description:
351 db_dict["errorDescription"] = error_description
352 if current_ns_status == "READY" and is_degraded:
353 db_dict["nsState"] = "DEGRADED"
354 if current_ns_status == "DEGRADED" and not is_degraded:
355 db_dict["nsState"] = "READY"
356
357 # write to database
358 self.update_db_2("nsrs", nsr_id, db_dict)
359
360 except (asyncio.CancelledError, asyncio.TimeoutError):
361 raise
362 except Exception as e:
363 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
364
365 async def _on_update_k8s_db(
366 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
367 ):
368 """
369 Updating vca status in NSR record
370 :param cluster_uuid: UUID of a k8s cluster
371 :param kdu_instance: The unique name of the KDU instance
372 :param filter: To get nsr_id
373 :cluster_type: The cluster type (juju, k8s)
374 :return: none
375 """
376
377 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
378 # .format(cluster_uuid, kdu_instance, filter))
379
380 nsr_id = filter.get("_id")
381 try:
382 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
383 cluster_uuid=cluster_uuid,
384 kdu_instance=kdu_instance,
385 yaml_format=False,
386 complete_status=True,
387 vca_id=vca_id,
388 )
389
390 # vcaStatus
391 db_dict = dict()
392 db_dict["vcaStatus"] = {nsr_id: vca_status}
393
394 self.logger.debug(
395 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
396 )
397
398 # write to database
399 self.update_db_2("nsrs", nsr_id, db_dict)
400 except (asyncio.CancelledError, asyncio.TimeoutError):
401 raise
402 except Exception as e:
403 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
404
405 @staticmethod
406 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
407 try:
408 env = Environment(
409 undefined=StrictUndefined,
410 autoescape=select_autoescape(default_for_string=True, default=True),
411 )
412 template = env.from_string(cloud_init_text)
413 return template.render(additional_params or {})
414 except UndefinedError as e:
415 raise LcmException(
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
419 )
420 except (TemplateError, TemplateNotFound) as e:
421 raise LcmException(
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
423 vnfd_id, vdu_id, e
424 )
425 )
426
427 def _get_vdu_cloud_init_content(self, vdu, vnfd):
428 cloud_init_content = cloud_init_file = None
429 try:
430 if vdu.get("cloud-init-file"):
431 base_folder = vnfd["_admin"]["storage"]
432 if base_folder["pkg-dir"]:
433 cloud_init_file = "{}/{}/cloud_init/{}".format(
434 base_folder["folder"],
435 base_folder["pkg-dir"],
436 vdu["cloud-init-file"],
437 )
438 else:
439 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
440 base_folder["folder"],
441 vdu["cloud-init-file"],
442 )
443 with self.fs.file_open(cloud_init_file, "r") as ci_file:
444 cloud_init_content = ci_file.read()
445 elif vdu.get("cloud-init"):
446 cloud_init_content = vdu["cloud-init"]
447
448 return cloud_init_content
449 except FsException as e:
450 raise LcmException(
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd["id"], vdu["id"], cloud_init_file, e
453 )
454 )
455
456 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
457 vdur = next(
458 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
459 )
460 additional_params = vdur.get("additionalParams")
461 return parse_yaml_strings(additional_params)
462
463 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
464 """
465 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
466 :param vnfd: input vnfd
467 :param new_id: overrides vnf id if provided
468 :param additionalParams: Instantiation params for VNFs provided
469 :param nsrId: Id of the NSR
470 :return: copy of vnfd
471 """
472 vnfd_RO = deepcopy(vnfd)
473 # remove unused by RO configuration, monitoring, scaling and internal keys
474 vnfd_RO.pop("_id", None)
475 vnfd_RO.pop("_admin", None)
476 vnfd_RO.pop("monitoring-param", None)
477 vnfd_RO.pop("scaling-group-descriptor", None)
478 vnfd_RO.pop("kdu", None)
479 vnfd_RO.pop("k8s-cluster", None)
480 if new_id:
481 vnfd_RO["id"] = new_id
482
483 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
484 for vdu in get_iterable(vnfd_RO, "vdu"):
485 vdu.pop("cloud-init-file", None)
486 vdu.pop("cloud-init", None)
487 return vnfd_RO
488
489 @staticmethod
490 def ip_profile_2_RO(ip_profile):
491 RO_ip_profile = deepcopy(ip_profile)
492 if "dns-server" in RO_ip_profile:
493 if isinstance(RO_ip_profile["dns-server"], list):
494 RO_ip_profile["dns-address"] = []
495 for ds in RO_ip_profile.pop("dns-server"):
496 RO_ip_profile["dns-address"].append(ds["address"])
497 else:
498 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
499 if RO_ip_profile.get("ip-version") == "ipv4":
500 RO_ip_profile["ip-version"] = "IPv4"
501 if RO_ip_profile.get("ip-version") == "ipv6":
502 RO_ip_profile["ip-version"] = "IPv6"
503 if "dhcp-params" in RO_ip_profile:
504 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
505 return RO_ip_profile
506
507 def _get_ro_vim_id_for_vim_account(self, vim_account):
508 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
509 if db_vim["_admin"]["operationalState"] != "ENABLED":
510 raise LcmException(
511 "VIM={} is not available. operationalState={}".format(
512 vim_account, db_vim["_admin"]["operationalState"]
513 )
514 )
515 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
516 return RO_vim_id
517
518 def get_ro_wim_id_for_wim_account(self, wim_account):
519 if isinstance(wim_account, str):
520 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
521 if db_wim["_admin"]["operationalState"] != "ENABLED":
522 raise LcmException(
523 "WIM={} is not available. operationalState={}".format(
524 wim_account, db_wim["_admin"]["operationalState"]
525 )
526 )
527 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
528 return RO_wim_id
529 else:
530 return wim_account
531
532 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
533 db_vdu_push_list = []
534 template_vdur = []
535 db_update = {"_admin.modified": time()}
536 if vdu_create:
537 for vdu_id, vdu_count in vdu_create.items():
538 vdur = next(
539 (
540 vdur
541 for vdur in reversed(db_vnfr["vdur"])
542 if vdur["vdu-id-ref"] == vdu_id
543 ),
544 None,
545 )
546 if not vdur:
547 # Read the template saved in the db:
548 self.logger.debug(
549 "No vdur in the database. Using the vdur-template to scale"
550 )
551 vdur_template = db_vnfr.get("vdur-template")
552 if not vdur_template:
553 raise LcmException(
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
555 vdu_id
556 )
557 )
558 vdur = vdur_template[0]
559 # Delete a template from the database after using it
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur-template": {"_id": vdur["_id"]}},
565 )
566 for count in range(vdu_count):
567 vdur_copy = deepcopy(vdur)
568 vdur_copy["status"] = "BUILD"
569 vdur_copy["status-detailed"] = None
570 vdur_copy["ip-address"] = None
571 vdur_copy["_id"] = str(uuid4())
572 vdur_copy["count-index"] += count + 1
573 vdur_copy["id"] = "{}-{}".format(
574 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
575 )
576 vdur_copy.pop("vim_info", None)
577 for iface in vdur_copy["interfaces"]:
578 if iface.get("fixed-ip"):
579 iface["ip-address"] = self.increment_ip_mac(
580 iface["ip-address"], count + 1
581 )
582 else:
583 iface.pop("ip-address", None)
584 if iface.get("fixed-mac"):
585 iface["mac-address"] = self.increment_ip_mac(
586 iface["mac-address"], count + 1
587 )
588 else:
589 iface.pop("mac-address", None)
590 if db_vnfr["vdur"]:
591 iface.pop(
592 "mgmt_vnf", None
593 ) # only first vdu can be managment of vnf
594 db_vdu_push_list.append(vdur_copy)
595 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
596 if vdu_delete:
597 if len(db_vnfr["vdur"]) == 1:
598 # The scale will move to 0 instances
599 self.logger.debug(
600 "Scaling to 0 !, creating the template with the last vdur"
601 )
602 template_vdur = [db_vnfr["vdur"][0]]
603 for vdu_id, vdu_count in vdu_delete.items():
604 if mark_delete:
605 indexes_to_delete = [
606 iv[0]
607 for iv in enumerate(db_vnfr["vdur"])
608 if iv[1]["vdu-id-ref"] == vdu_id
609 ]
610 db_update.update(
611 {
612 "vdur.{}.status".format(i): "DELETING"
613 for i in indexes_to_delete[-vdu_count:]
614 }
615 )
616 else:
617 # it must be deleted one by one because common.db does not allow otherwise
618 vdus_to_delete = [
619 v
620 for v in reversed(db_vnfr["vdur"])
621 if v["vdu-id-ref"] == vdu_id
622 ]
623 for vdu in vdus_to_delete[:vdu_count]:
624 self.db.set_one(
625 "vnfrs",
626 {"_id": db_vnfr["_id"]},
627 None,
628 pull={"vdur": {"_id": vdu["_id"]}},
629 )
630 db_push = {}
631 if db_vdu_push_list:
632 db_push["vdur"] = db_vdu_push_list
633 if template_vdur:
634 db_push["vdur-template"] = template_vdur
635 if not db_push:
636 db_push = None
637 db_vnfr["vdur-template"] = template_vdur
638 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
639 # modify passed dictionary db_vnfr
640 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
641 db_vnfr["vdur"] = db_vnfr_["vdur"]
642
643 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
644 """
645 Updates database nsr with the RO info for the created vld
646 :param ns_update_nsr: dictionary to be filled with the updated info
647 :param db_nsr: content of db_nsr. This is also modified
648 :param nsr_desc_RO: nsr descriptor from RO
649 :return: Nothing, LcmException is raised on errors
650 """
651
652 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
653 for net_RO in get_iterable(nsr_desc_RO, "nets"):
654 if vld["id"] != net_RO.get("ns_net_osm_id"):
655 continue
656 vld["vim-id"] = net_RO.get("vim_net_id")
657 vld["name"] = net_RO.get("vim_name")
658 vld["status"] = net_RO.get("status")
659 vld["status-detailed"] = net_RO.get("error_msg")
660 ns_update_nsr["vld.{}".format(vld_index)] = vld
661 break
662 else:
663 raise LcmException(
664 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
665 )
666
667 def set_vnfr_at_error(self, db_vnfrs, error_text):
668 try:
669 for db_vnfr in db_vnfrs.values():
670 vnfr_update = {"status": "ERROR"}
671 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
672 if "status" not in vdur:
673 vdur["status"] = "ERROR"
674 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
675 if error_text:
676 vdur["status-detailed"] = str(error_text)
677 vnfr_update[
678 "vdur.{}.status-detailed".format(vdu_index)
679 ] = "ERROR"
680 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
681 except DbException as e:
682 self.logger.error("Cannot update vnf. {}".format(e))
683
684 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
685 """
686 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
687 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
688 :param nsr_desc_RO: nsr descriptor from RO
689 :return: Nothing, LcmException is raised on errors
690 """
691 for vnf_index, db_vnfr in db_vnfrs.items():
692 for vnf_RO in nsr_desc_RO["vnfs"]:
693 if vnf_RO["member_vnf_index"] != vnf_index:
694 continue
695 vnfr_update = {}
696 if vnf_RO.get("ip_address"):
697 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
698 "ip_address"
699 ].split(";")[0]
700 elif not db_vnfr.get("ip-address"):
701 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
702 raise LcmExceptionNoMgmtIP(
703 "ns member_vnf_index '{}' has no IP address".format(
704 vnf_index
705 )
706 )
707
708 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
709 vdur_RO_count_index = 0
710 if vdur.get("pdu-type"):
711 continue
712 for vdur_RO in get_iterable(vnf_RO, "vms"):
713 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
714 continue
715 if vdur["count-index"] != vdur_RO_count_index:
716 vdur_RO_count_index += 1
717 continue
718 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
719 if vdur_RO.get("ip_address"):
720 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
721 else:
722 vdur["ip-address"] = None
723 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
724 vdur["name"] = vdur_RO.get("vim_name")
725 vdur["status"] = vdur_RO.get("status")
726 vdur["status-detailed"] = vdur_RO.get("error_msg")
727 for ifacer in get_iterable(vdur, "interfaces"):
728 for interface_RO in get_iterable(vdur_RO, "interfaces"):
729 if ifacer["name"] == interface_RO.get("internal_name"):
730 ifacer["ip-address"] = interface_RO.get(
731 "ip_address"
732 )
733 ifacer["mac-address"] = interface_RO.get(
734 "mac_address"
735 )
736 break
737 else:
738 raise LcmException(
739 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
740 "from VIM info".format(
741 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
742 )
743 )
744 vnfr_update["vdur.{}".format(vdu_index)] = vdur
745 break
746 else:
747 raise LcmException(
748 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
749 "VIM info".format(
750 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
751 )
752 )
753
754 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
755 for net_RO in get_iterable(nsr_desc_RO, "nets"):
756 if vld["id"] != net_RO.get("vnf_net_osm_id"):
757 continue
758 vld["vim-id"] = net_RO.get("vim_net_id")
759 vld["name"] = net_RO.get("vim_name")
760 vld["status"] = net_RO.get("status")
761 vld["status-detailed"] = net_RO.get("error_msg")
762 vnfr_update["vld.{}".format(vld_index)] = vld
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
767 vnf_index, vld["id"]
768 )
769 )
770
771 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
772 break
773
774 else:
775 raise LcmException(
776 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
777 vnf_index
778 )
779 )
780
781 def _get_ns_config_info(self, nsr_id):
782 """
783 Generates a mapping between vnf,vdu elements and the N2VC id
784 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
785 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
786 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
787 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
788 """
789 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
790 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
791 mapping = {}
792 ns_config_info = {"osm-config-mapping": mapping}
793 for vca in vca_deployed_list:
794 if not vca["member-vnf-index"]:
795 continue
796 if not vca["vdu_id"]:
797 mapping[vca["member-vnf-index"]] = vca["application"]
798 else:
799 mapping[
800 "{}.{}.{}".format(
801 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
802 )
803 ] = vca["application"]
804 return ns_config_info
805
806 async def _instantiate_ng_ro(
807 self,
808 logging_text,
809 nsr_id,
810 nsd,
811 db_nsr,
812 db_nslcmop,
813 db_vnfrs,
814 db_vnfds,
815 n2vc_key_list,
816 stage,
817 start_deploy,
818 timeout_ns_deploy,
819 ):
820 db_vims = {}
821
822 def get_vim_account(vim_account_id):
823 nonlocal db_vims
824 if vim_account_id in db_vims:
825 return db_vims[vim_account_id]
826 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
827 db_vims[vim_account_id] = db_vim
828 return db_vim
829
830 # modify target_vld info with instantiation parameters
831 def parse_vld_instantiation_params(
832 target_vim, target_vld, vld_params, target_sdn
833 ):
834 if vld_params.get("ip-profile"):
835 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
836 vld_params["ip-profile"]
837 )
838 if vld_params.get("provider-network"):
839 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
840 "provider-network"
841 ]
842 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
843 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
844 "provider-network"
845 ]["sdn-ports"]
846
847 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
848 # if wim_account_id is specified in vld_params, validate if it is feasible.
849 wim_account_id, db_wim = select_feasible_wim_account(
850 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
851 )
852
853 if wim_account_id:
854 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
855 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
856 # update vld_params with correct WIM account Id
857 vld_params["wimAccountId"] = wim_account_id
858
859 target_wim = "wim:{}".format(wim_account_id)
860 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
861 sdn_ports = get_sdn_ports(vld_params, db_wim)
862 if len(sdn_ports) > 0:
863 target_vld["vim_info"][target_wim] = target_wim_attrs
864 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
865
866 self.logger.debug(
867 "Target VLD with WIM data: {:s}".format(str(target_vld))
868 )
869
870 for param in ("vim-network-name", "vim-network-id"):
871 if vld_params.get(param):
872 if isinstance(vld_params[param], dict):
873 for vim, vim_net in vld_params[param].items():
874 other_target_vim = "vim:" + vim
875 populate_dict(
876 target_vld["vim_info"],
877 (other_target_vim, param.replace("-", "_")),
878 vim_net,
879 )
880 else: # isinstance str
881 target_vld["vim_info"][target_vim][
882 param.replace("-", "_")
883 ] = vld_params[param]
884 if vld_params.get("common_id"):
885 target_vld["common_id"] = vld_params.get("common_id")
886
887 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
888 def update_ns_vld_target(target, ns_params):
889 for vnf_params in ns_params.get("vnf", ()):
890 if vnf_params.get("vimAccountId"):
891 target_vnf = next(
892 (
893 vnfr
894 for vnfr in db_vnfrs.values()
895 if vnf_params["member-vnf-index"]
896 == vnfr["member-vnf-index-ref"]
897 ),
898 None,
899 )
900 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
901 if not vdur:
902 return
903 for a_index, a_vld in enumerate(target["ns"]["vld"]):
904 target_vld = find_in_list(
905 get_iterable(vdur, "interfaces"),
906 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
907 )
908
909 vld_params = find_in_list(
910 get_iterable(ns_params, "vld"),
911 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
912 )
913 if target_vld:
914 if vnf_params.get("vimAccountId") not in a_vld.get(
915 "vim_info", {}
916 ):
917 target_vim_network_list = [
918 v for _, v in a_vld.get("vim_info").items()
919 ]
920 target_vim_network_name = next(
921 (
922 item.get("vim_network_name", "")
923 for item in target_vim_network_list
924 ),
925 "",
926 )
927
928 target["ns"]["vld"][a_index].get("vim_info").update(
929 {
930 "vim:{}".format(vnf_params["vimAccountId"]): {
931 "vim_network_name": target_vim_network_name,
932 }
933 }
934 )
935
936 if vld_params:
937 for param in ("vim-network-name", "vim-network-id"):
938 if vld_params.get(param) and isinstance(
939 vld_params[param], dict
940 ):
941 for vim, vim_net in vld_params[
942 param
943 ].items():
944 other_target_vim = "vim:" + vim
945 populate_dict(
946 target["ns"]["vld"][a_index].get(
947 "vim_info"
948 ),
949 (
950 other_target_vim,
951 param.replace("-", "_"),
952 ),
953 vim_net,
954 )
955
956 nslcmop_id = db_nslcmop["_id"]
957 target = {
958 "name": db_nsr["name"],
959 "ns": {"vld": []},
960 "vnf": [],
961 "image": deepcopy(db_nsr["image"]),
962 "flavor": deepcopy(db_nsr["flavor"]),
963 "action_id": nslcmop_id,
964 "cloud_init_content": {},
965 }
966 for image in target["image"]:
967 image["vim_info"] = {}
968 for flavor in target["flavor"]:
969 flavor["vim_info"] = {}
970 if db_nsr.get("affinity-or-anti-affinity-group"):
971 target["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr["affinity-or-anti-affinity-group"]
973 )
974 for affinity_or_anti_affinity_group in target[
975 "affinity-or-anti-affinity-group"
976 ]:
977 affinity_or_anti_affinity_group["vim_info"] = {}
978
979 if db_nslcmop.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate = self.db.get_list(
982 "nslcmops",
983 {
984 "nsInstanceId": db_nslcmop["nsInstanceId"],
985 "lcmOperationType": "instantiate",
986 },
987 )[-1]
988 ns_params = db_nslcmop_instantiate.get("operationParams")
989 else:
990 ns_params = db_nslcmop.get("operationParams")
991 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
992 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
993
994 cp2target = {}
995 for vld_index, vld in enumerate(db_nsr.get("vld")):
996 target_vim = "vim:{}".format(ns_params["vimAccountId"])
997 target_vld = {
998 "id": vld["id"],
999 "name": vld["name"],
1000 "mgmt-network": vld.get("mgmt-network", False),
1001 "type": vld.get("type"),
1002 "vim_info": {
1003 target_vim: {
1004 "vim_network_name": vld.get("vim-network-name"),
1005 "vim_account_id": ns_params["vimAccountId"],
1006 }
1007 },
1008 }
1009 # check if this network needs SDN assist
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(ns_params["vimAccountId"])
1012 if vim_config := db_vim.get("config"):
1013 if sdnc_id := vim_config.get("sdn-controller"):
1014 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 target_vld["vim_info"][target_sdn] = {
1017 "sdn": True,
1018 "target_vim": target_vim,
1019 "vlds": [sdn_vld],
1020 "type": vld.get("type"),
1021 }
1022
1023 nsd_vnf_profiles = get_vnf_profiles(nsd)
1024 for nsd_vnf_profile in nsd_vnf_profiles:
1025 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1026 if cp["virtual-link-profile-id"] == vld["id"]:
1027 cp2target[
1028 "member_vnf:{}.{}".format(
1029 cp["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1031 ],
1032 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1033 )
1034 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1035
1036 # check at nsd descriptor, if there is an ip-profile
1037 vld_params = {}
1038 nsd_vlp = find_in_list(
1039 get_virtual_link_profiles(nsd),
1040 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1041 == vld["id"],
1042 )
1043 if (
1044 nsd_vlp
1045 and nsd_vlp.get("virtual-link-protocol-data")
1046 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1047 ):
1048 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1049 "l3-protocol-data"
1050 ]
1051
1052 # update vld_params with instantiation params
1053 vld_instantiation_params = find_in_list(
1054 get_iterable(ns_params, "vld"),
1055 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1056 )
1057 if vld_instantiation_params:
1058 vld_params.update(vld_instantiation_params)
1059 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1060 target["ns"]["vld"].append(target_vld)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target, ns_params)
1063
1064 for vnfr in db_vnfrs.values():
1065 vnfd = find_in_list(
1066 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1067 )
1068 vnf_params = find_in_list(
1069 get_iterable(ns_params, "vnf"),
1070 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1071 )
1072 target_vnf = deepcopy(vnfr)
1073 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1074 for vld in target_vnf.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp = find_in_list(
1077 vnfd.get("int-virtual-link-desc", ()),
1078 lambda cpd: cpd.get("id") == vld["id"],
1079 )
1080 if vnf_cp:
1081 ns_cp = "member_vnf:{}.{}".format(
1082 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1083 )
1084 if cp2target.get(ns_cp):
1085 vld["target"] = cp2target[ns_cp]
1086
1087 vld["vim_info"] = {
1088 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1089 }
1090 # check if this network needs SDN assist
1091 target_sdn = None
1092 if vld.get("pci-interfaces"):
1093 db_vim = get_vim_account(vnfr["vim-account-id"])
1094 sdnc_id = db_vim["config"].get("sdn-controller")
1095 if sdnc_id:
1096 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1097 target_sdn = "sdn:{}".format(sdnc_id)
1098 vld["vim_info"][target_sdn] = {
1099 "sdn": True,
1100 "target_vim": target_vim,
1101 "vlds": [sdn_vld],
1102 "type": vld.get("type"),
1103 }
1104
1105 # check at vnfd descriptor, if there is an ip-profile
1106 vld_params = {}
1107 vnfd_vlp = find_in_list(
1108 get_virtual_link_profiles(vnfd),
1109 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1110 )
1111 if (
1112 vnfd_vlp
1113 and vnfd_vlp.get("virtual-link-protocol-data")
1114 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1115 ):
1116 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1117 "l3-protocol-data"
1118 ]
1119 # update vld_params with instantiation params
1120 if vnf_params:
1121 vld_instantiation_params = find_in_list(
1122 get_iterable(vnf_params, "internal-vld"),
1123 lambda i_vld: i_vld["name"] == vld["id"],
1124 )
1125 if vld_instantiation_params:
1126 vld_params.update(vld_instantiation_params)
1127 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1128
1129 vdur_list = []
1130 for vdur in target_vnf.get("vdur", ()):
1131 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1134
1135 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1136
1137 if ssh_keys_all:
1138 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1139 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1140 if (
1141 vdu_configuration
1142 and vdu_configuration.get("config-access")
1143 and vdu_configuration.get("config-access").get("ssh-access")
1144 ):
1145 vdur["ssh-keys"] = ssh_keys_all
1146 vdur["ssh-access-required"] = vdu_configuration[
1147 "config-access"
1148 ]["ssh-access"]["required"]
1149 elif (
1150 vnf_configuration
1151 and vnf_configuration.get("config-access")
1152 and vnf_configuration.get("config-access").get("ssh-access")
1153 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_all
1156 vdur["ssh-access-required"] = vnf_configuration[
1157 "config-access"
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation and find_in_list(
1160 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1161 ):
1162 vdur["ssh-keys"] = ssh_keys_instantiation
1163
1164 self.logger.debug("NS > vdur > {}".format(vdur))
1165
1166 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1167 # cloud-init
1168 if vdud.get("cloud-init-file"):
1169 vdur["cloud-init"] = "{}:file:{}".format(
1170 vnfd["_id"], vdud.get("cloud-init-file")
1171 )
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur["cloud-init"] not in target["cloud_init_content"]:
1174 base_folder = vnfd["_admin"]["storage"]
1175 if base_folder["pkg-dir"]:
1176 cloud_init_file = "{}/{}/cloud_init/{}".format(
1177 base_folder["folder"],
1178 base_folder["pkg-dir"],
1179 vdud.get("cloud-init-file"),
1180 )
1181 else:
1182 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1183 base_folder["folder"],
1184 vdud.get("cloud-init-file"),
1185 )
1186 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1187 target["cloud_init_content"][
1188 vdur["cloud-init"]
1189 ] = ci_file.read()
1190 elif vdud.get("cloud-init"):
1191 vdur["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1193 )
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1196 "cloud-init"
1197 ]
1198 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1199 deploy_params_vdu = self._format_additional_params(
1200 vdur.get("additionalParams") or {}
1201 )
1202 deploy_params_vdu["OSM"] = get_osm_params(
1203 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1204 )
1205 vdur["additionalParams"] = deploy_params_vdu
1206
1207 # flavor
1208 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1209 if target_vim not in ns_flavor["vim_info"]:
1210 ns_flavor["vim_info"][target_vim] = {}
1211
1212 # deal with images
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id = int(vdur["ns-image-id"])
1216 if vdur.get("alt-image-ids"):
1217 db_vim = get_vim_account(vnfr["vim-account-id"])
1218 vim_type = db_vim["vim_type"]
1219 for alt_image_id in vdur.get("alt-image-ids"):
1220 ns_alt_image = target["image"][int(alt_image_id)]
1221 if vim_type == ns_alt_image.get("vim-type"):
1222 # must use alternative image
1223 self.logger.debug(
1224 "use alternative image id: {}".format(alt_image_id)
1225 )
1226 ns_image_id = alt_image_id
1227 vdur["ns-image-id"] = ns_image_id
1228 break
1229 ns_image = target["image"][int(ns_image_id)]
1230 if target_vim not in ns_image["vim_info"]:
1231 ns_image["vim_info"][target_vim] = {}
1232
1233 # Affinity groups
1234 if vdur.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1236 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1237 if target_vim not in ns_ags["vim_info"]:
1238 ns_ags["vim_info"][target_vim] = {}
1239
1240 vdur["vim_info"] = {target_vim: {}}
1241 # instantiation parameters
1242 if vnf_params:
1243 vdu_instantiation_params = find_in_list(
1244 get_iterable(vnf_params, "vdu"),
1245 lambda i_vdu: i_vdu["id"] == vdud["id"],
1246 )
1247 if vdu_instantiation_params:
1248 # Parse the vdu_volumes from the instantiation params
1249 vdu_volumes = get_volumes_from_instantiation_params(
1250 vdu_instantiation_params, vdud
1251 )
1252 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1253 vdur_list.append(vdur)
1254 target_vnf["vdur"] = vdur_list
1255 target["vnf"].append(target_vnf)
1256
1257 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1258 desc = await self.RO.deploy(nsr_id, target)
1259 self.logger.debug("RO return > {}".format(desc))
1260 action_id = desc["action_id"]
1261 await self._wait_ng_ro(
1262 nsr_id,
1263 action_id,
1264 nslcmop_id,
1265 start_deploy,
1266 timeout_ns_deploy,
1267 stage,
1268 operation="instantiation",
1269 )
1270
1271 # Updating NSR
1272 db_nsr_update = {
1273 "_admin.deployed.RO.operational-status": "running",
1274 "detailed-status": " ".join(stage),
1275 }
1276 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1277 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1278 self._write_op_status(nslcmop_id, stage)
1279 self.logger.debug(
1280 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1281 )
1282 return
1283
1284 async def _wait_ng_ro(
1285 self,
1286 nsr_id,
1287 action_id,
1288 nslcmop_id=None,
1289 start_time=None,
1290 timeout=600,
1291 stage=None,
1292 operation=None,
1293 ):
1294 detailed_status_old = None
1295 db_nsr_update = {}
1296 start_time = start_time or time()
1297 while time() <= start_time + timeout:
1298 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1299 self.logger.debug("Wait NG RO > {}".format(desc_status))
1300 if desc_status["status"] == "FAILED":
1301 raise NgRoException(desc_status["details"])
1302 elif desc_status["status"] == "BUILD":
1303 if stage:
1304 stage[2] = "VIM: ({})".format(desc_status["details"])
1305 elif desc_status["status"] == "DONE":
1306 if stage:
1307 stage[2] = "Deployed at VIM"
1308 break
1309 else:
1310 assert False, "ROclient.check_ns_status returns unknown {}".format(
1311 desc_status["status"]
1312 )
1313 if stage and nslcmop_id and stage[2] != detailed_status_old:
1314 detailed_status_old = stage[2]
1315 db_nsr_update["detailed-status"] = " ".join(stage)
1316 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1317 self._write_op_status(nslcmop_id, stage)
1318 await asyncio.sleep(15, loop=self.loop)
1319 else: # timeout_ns_deploy
1320 raise NgRoException("Timeout waiting ns to deploy")
1321
1322 async def _terminate_ng_ro(
1323 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1324 ):
1325 db_nsr_update = {}
1326 failed_detail = []
1327 action_id = None
1328 start_deploy = time()
1329 try:
1330 target = {
1331 "ns": {"vld": []},
1332 "vnf": [],
1333 "image": [],
1334 "flavor": [],
1335 "action_id": nslcmop_id,
1336 }
1337 desc = await self.RO.deploy(nsr_id, target)
1338 action_id = desc["action_id"]
1339 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1340 self.logger.debug(
1341 logging_text
1342 + "ns terminate action at RO. action_id={}".format(action_id)
1343 )
1344
1345 # wait until done
1346 delete_timeout = 20 * 60 # 20 minutes
1347 await self._wait_ng_ro(
1348 nsr_id,
1349 action_id,
1350 nslcmop_id,
1351 start_deploy,
1352 delete_timeout,
1353 stage,
1354 operation="termination",
1355 )
1356 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1357 # delete all nsr
1358 await self.RO.delete(nsr_id)
1359 except NgRoException as e:
1360 if e.http_code == 404: # not found
1361 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1362 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1363 self.logger.debug(
1364 logging_text + "RO_action_id={} already deleted".format(action_id)
1365 )
1366 elif e.http_code == 409: # conflict
1367 failed_detail.append("delete conflict: {}".format(e))
1368 self.logger.debug(
1369 logging_text
1370 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1371 )
1372 else:
1373 failed_detail.append("delete error: {}".format(e))
1374 self.logger.error(
1375 logging_text
1376 + "RO_action_id={} delete error: {}".format(action_id, e)
1377 )
1378 except Exception as e:
1379 failed_detail.append("delete error: {}".format(e))
1380 self.logger.error(
1381 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1382 )
1383
1384 if failed_detail:
1385 stage[2] = "Error deleting from VIM"
1386 else:
1387 stage[2] = "Deleted from VIM"
1388 db_nsr_update["detailed-status"] = " ".join(stage)
1389 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1390 self._write_op_status(nslcmop_id, stage)
1391
1392 if failed_detail:
1393 raise LcmException("; ".join(failed_detail))
1394 return
1395
1396 async def instantiate_RO(
1397 self,
1398 logging_text,
1399 nsr_id,
1400 nsd,
1401 db_nsr,
1402 db_nslcmop,
1403 db_vnfrs,
1404 db_vnfds,
1405 n2vc_key_list,
1406 stage,
1407 ):
1408 """
1409 Instantiate at RO
1410 :param logging_text: preffix text to use at logging
1411 :param nsr_id: nsr identity
1412 :param nsd: database content of ns descriptor
1413 :param db_nsr: database content of ns record
1414 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1415 :param db_vnfrs:
1416 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1417 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1418 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1419 :return: None or exception
1420 """
1421 try:
1422 start_deploy = time()
1423 ns_params = db_nslcmop.get("operationParams")
1424 if ns_params and ns_params.get("timeout_ns_deploy"):
1425 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1426 else:
1427 timeout_ns_deploy = self.timeout.ns_deploy
1428
1429 # Check for and optionally request placement optimization. Database will be updated if placement activated
1430 stage[2] = "Waiting for Placement."
1431 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1432 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1433 for vnfr in db_vnfrs.values():
1434 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1435 break
1436 else:
1437 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1438
1439 return await self._instantiate_ng_ro(
1440 logging_text,
1441 nsr_id,
1442 nsd,
1443 db_nsr,
1444 db_nslcmop,
1445 db_vnfrs,
1446 db_vnfds,
1447 n2vc_key_list,
1448 stage,
1449 start_deploy,
1450 timeout_ns_deploy,
1451 )
1452 except Exception as e:
1453 stage[2] = "ERROR deploying at VIM"
1454 self.set_vnfr_at_error(db_vnfrs, str(e))
1455 self.logger.error(
1456 "Error deploying at VIM {}".format(e),
1457 exc_info=not isinstance(
1458 e,
1459 (
1460 ROclient.ROClientException,
1461 LcmException,
1462 DbException,
1463 NgRoException,
1464 ),
1465 ),
1466 )
1467 raise
1468
1469 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1470 """
1471 Wait for kdu to be up, get ip address
1472 :param logging_text: prefix use for logging
1473 :param nsr_id:
1474 :param vnfr_id:
1475 :param kdu_name:
1476 :return: IP address, K8s services
1477 """
1478
1479 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1480 nb_tries = 0
1481
1482 while nb_tries < 360:
1483 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1484 kdur = next(
1485 (
1486 x
1487 for x in get_iterable(db_vnfr, "kdur")
1488 if x.get("kdu-name") == kdu_name
1489 ),
1490 None,
1491 )
1492 if not kdur:
1493 raise LcmException(
1494 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1495 )
1496 if kdur.get("status"):
1497 if kdur["status"] in ("READY", "ENABLED"):
1498 return kdur.get("ip-address"), kdur.get("services")
1499 else:
1500 raise LcmException(
1501 "target KDU={} is in error state".format(kdu_name)
1502 )
1503
1504 await asyncio.sleep(10, loop=self.loop)
1505 nb_tries += 1
1506 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1507
1508 async def wait_vm_up_insert_key_ro(
1509 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1510 ):
1511 """
1512 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1513 :param logging_text: prefix use for logging
1514 :param nsr_id:
1515 :param vnfr_id:
1516 :param vdu_id:
1517 :param vdu_index:
1518 :param pub_key: public ssh key to inject, None to skip
1519 :param user: user to apply the public ssh key
1520 :return: IP address
1521 """
1522
1523 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1524 ip_address = None
1525 target_vdu_id = None
1526 ro_retries = 0
1527
1528 while True:
1529 ro_retries += 1
1530 if ro_retries >= 360: # 1 hour
1531 raise LcmException(
1532 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1533 )
1534
1535 await asyncio.sleep(10, loop=self.loop)
1536
1537 # get ip address
1538 if not target_vdu_id:
1539 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1540
1541 if not vdu_id: # for the VNF case
1542 if db_vnfr.get("status") == "ERROR":
1543 raise LcmException(
1544 "Cannot inject ssh-key because target VNF is in error state"
1545 )
1546 ip_address = db_vnfr.get("ip-address")
1547 if not ip_address:
1548 continue
1549 vdur = next(
1550 (
1551 x
1552 for x in get_iterable(db_vnfr, "vdur")
1553 if x.get("ip-address") == ip_address
1554 ),
1555 None,
1556 )
1557 else: # VDU case
1558 vdur = next(
1559 (
1560 x
1561 for x in get_iterable(db_vnfr, "vdur")
1562 if x.get("vdu-id-ref") == vdu_id
1563 and x.get("count-index") == vdu_index
1564 ),
1565 None,
1566 )
1567
1568 if (
1569 not vdur and len(db_vnfr.get("vdur", ())) == 1
1570 ): # If only one, this should be the target vdu
1571 vdur = db_vnfr["vdur"][0]
1572 if not vdur:
1573 raise LcmException(
1574 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1575 vnfr_id, vdu_id, vdu_index
1576 )
1577 )
1578 # New generation RO stores information at "vim_info"
1579 ng_ro_status = None
1580 target_vim = None
1581 if vdur.get("vim_info"):
1582 target_vim = next(
1583 t for t in vdur["vim_info"]
1584 ) # there should be only one key
1585 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1586 if (
1587 vdur.get("pdu-type")
1588 or vdur.get("status") == "ACTIVE"
1589 or ng_ro_status == "ACTIVE"
1590 ):
1591 ip_address = vdur.get("ip-address")
1592 if not ip_address:
1593 continue
1594 target_vdu_id = vdur["vdu-id-ref"]
1595 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1596 raise LcmException(
1597 "Cannot inject ssh-key because target VM is in error state"
1598 )
1599
1600 if not target_vdu_id:
1601 continue
1602
1603 # inject public key into machine
1604 if pub_key and user:
1605 self.logger.debug(logging_text + "Inserting RO key")
1606 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1607 if vdur.get("pdu-type"):
1608 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1609 return ip_address
1610 try:
1611 target = {
1612 "action": {
1613 "action": "inject_ssh_key",
1614 "key": pub_key,
1615 "user": user,
1616 },
1617 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1618 }
1619 desc = await self.RO.deploy(nsr_id, target)
1620 action_id = desc["action_id"]
1621 await self._wait_ng_ro(
1622 nsr_id, action_id, timeout=600, operation="instantiation"
1623 )
1624 break
1625 except NgRoException as e:
1626 raise LcmException(
1627 "Reaching max tries injecting key. Error: {}".format(e)
1628 )
1629 else:
1630 break
1631
1632 return ip_address
1633
1634 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1635 """
1636 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1637 """
1638 my_vca = vca_deployed_list[vca_index]
1639 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1640 # vdu or kdu: no dependencies
1641 return
1642 timeout = 300
1643 while timeout >= 0:
1644 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1645 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1646 configuration_status_list = db_nsr["configurationStatus"]
1647 for index, vca_deployed in enumerate(configuration_status_list):
1648 if index == vca_index:
1649 # myself
1650 continue
1651 if not my_vca.get("member-vnf-index") or (
1652 vca_deployed.get("member-vnf-index")
1653 == my_vca.get("member-vnf-index")
1654 ):
1655 internal_status = configuration_status_list[index].get("status")
1656 if internal_status == "READY":
1657 continue
1658 elif internal_status == "BROKEN":
1659 raise LcmException(
1660 "Configuration aborted because dependent charm/s has failed"
1661 )
1662 else:
1663 break
1664 else:
1665 # no dependencies, return
1666 return
1667 await asyncio.sleep(10)
1668 timeout -= 1
1669
1670 raise LcmException("Configuration aborted because dependent charm/s timeout")
1671
1672 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1673 vca_id = None
1674 if db_vnfr:
1675 vca_id = deep_get(db_vnfr, ("vca-id",))
1676 elif db_nsr:
1677 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1678 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1679 return vca_id
1680
1681 async def instantiate_N2VC(
1682 self,
1683 logging_text,
1684 vca_index,
1685 nsi_id,
1686 db_nsr,
1687 db_vnfr,
1688 vdu_id,
1689 kdu_name,
1690 vdu_index,
1691 kdu_index,
1692 config_descriptor,
1693 deploy_params,
1694 base_folder,
1695 nslcmop_id,
1696 stage,
1697 vca_type,
1698 vca_name,
1699 ee_config_descriptor,
1700 ):
1701 nsr_id = db_nsr["_id"]
1702 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1703 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1704 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1705 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1706 db_dict = {
1707 "collection": "nsrs",
1708 "filter": {"_id": nsr_id},
1709 "path": db_update_entry,
1710 }
1711 step = ""
1712 try:
1713 element_type = "NS"
1714 element_under_configuration = nsr_id
1715
1716 vnfr_id = None
1717 if db_vnfr:
1718 vnfr_id = db_vnfr["_id"]
1719 osm_config["osm"]["vnf_id"] = vnfr_id
1720
1721 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1722
1723 if vca_type == "native_charm":
1724 index_number = 0
1725 else:
1726 index_number = vdu_index or 0
1727
1728 if vnfr_id:
1729 element_type = "VNF"
1730 element_under_configuration = vnfr_id
1731 namespace += ".{}-{}".format(vnfr_id, index_number)
1732 if vdu_id:
1733 namespace += ".{}-{}".format(vdu_id, index_number)
1734 element_type = "VDU"
1735 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1736 osm_config["osm"]["vdu_id"] = vdu_id
1737 elif kdu_name:
1738 namespace += ".{}".format(kdu_name)
1739 element_type = "KDU"
1740 element_under_configuration = kdu_name
1741 osm_config["osm"]["kdu_name"] = kdu_name
1742
1743 # Get artifact path
1744 if base_folder["pkg-dir"]:
1745 artifact_path = "{}/{}/{}/{}".format(
1746 base_folder["folder"],
1747 base_folder["pkg-dir"],
1748 "charms"
1749 if vca_type
1750 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1751 else "helm-charts",
1752 vca_name,
1753 )
1754 else:
1755 artifact_path = "{}/Scripts/{}/{}/".format(
1756 base_folder["folder"],
1757 "charms"
1758 if vca_type
1759 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1760 else "helm-charts",
1761 vca_name,
1762 )
1763
1764 self.logger.debug("Artifact path > {}".format(artifact_path))
1765
1766 # get initial_config_primitive_list that applies to this element
1767 initial_config_primitive_list = config_descriptor.get(
1768 "initial-config-primitive"
1769 )
1770
1771 self.logger.debug(
1772 "Initial config primitive list > {}".format(
1773 initial_config_primitive_list
1774 )
1775 )
1776
1777 # add config if not present for NS charm
1778 ee_descriptor_id = ee_config_descriptor.get("id")
1779 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1780 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1781 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1782 )
1783
1784 self.logger.debug(
1785 "Initial config primitive list #2 > {}".format(
1786 initial_config_primitive_list
1787 )
1788 )
1789 # n2vc_redesign STEP 3.1
1790 # find old ee_id if exists
1791 ee_id = vca_deployed.get("ee_id")
1792
1793 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1794 # create or register execution environment in VCA
1795 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1796 self._write_configuration_status(
1797 nsr_id=nsr_id,
1798 vca_index=vca_index,
1799 status="CREATING",
1800 element_under_configuration=element_under_configuration,
1801 element_type=element_type,
1802 )
1803
1804 step = "create execution environment"
1805 self.logger.debug(logging_text + step)
1806
1807 ee_id = None
1808 credentials = None
1809 if vca_type == "k8s_proxy_charm":
1810 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1811 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1812 namespace=namespace,
1813 artifact_path=artifact_path,
1814 db_dict=db_dict,
1815 vca_id=vca_id,
1816 )
1817 elif vca_type == "helm" or vca_type == "helm-v3":
1818 ee_id, credentials = await self.vca_map[
1819 vca_type
1820 ].create_execution_environment(
1821 namespace=namespace,
1822 reuse_ee_id=ee_id,
1823 db_dict=db_dict,
1824 config=osm_config,
1825 artifact_path=artifact_path,
1826 chart_model=vca_name,
1827 vca_type=vca_type,
1828 )
1829 else:
1830 ee_id, credentials = await self.vca_map[
1831 vca_type
1832 ].create_execution_environment(
1833 namespace=namespace,
1834 reuse_ee_id=ee_id,
1835 db_dict=db_dict,
1836 vca_id=vca_id,
1837 )
1838
1839 elif vca_type == "native_charm":
1840 step = "Waiting to VM being up and getting IP address"
1841 self.logger.debug(logging_text + step)
1842 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1843 logging_text,
1844 nsr_id,
1845 vnfr_id,
1846 vdu_id,
1847 vdu_index,
1848 user=None,
1849 pub_key=None,
1850 )
1851 credentials = {"hostname": rw_mgmt_ip}
1852 # get username
1853 username = deep_get(
1854 config_descriptor, ("config-access", "ssh-access", "default-user")
1855 )
1856 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1857 # merged. Meanwhile let's get username from initial-config-primitive
1858 if not username and initial_config_primitive_list:
1859 for config_primitive in initial_config_primitive_list:
1860 for param in config_primitive.get("parameter", ()):
1861 if param["name"] == "ssh-username":
1862 username = param["value"]
1863 break
1864 if not username:
1865 raise LcmException(
1866 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1867 "'config-access.ssh-access.default-user'"
1868 )
1869 credentials["username"] = username
1870 # n2vc_redesign STEP 3.2
1871
1872 self._write_configuration_status(
1873 nsr_id=nsr_id,
1874 vca_index=vca_index,
1875 status="REGISTERING",
1876 element_under_configuration=element_under_configuration,
1877 element_type=element_type,
1878 )
1879
1880 step = "register execution environment {}".format(credentials)
1881 self.logger.debug(logging_text + step)
1882 ee_id = await self.vca_map[vca_type].register_execution_environment(
1883 credentials=credentials,
1884 namespace=namespace,
1885 db_dict=db_dict,
1886 vca_id=vca_id,
1887 )
1888
1889 # for compatibility with MON/POL modules, the need model and application name at database
1890 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1891 ee_id_parts = ee_id.split(".")
1892 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1893 if len(ee_id_parts) >= 2:
1894 model_name = ee_id_parts[0]
1895 application_name = ee_id_parts[1]
1896 db_nsr_update[db_update_entry + "model"] = model_name
1897 db_nsr_update[db_update_entry + "application"] = application_name
1898
1899 # n2vc_redesign STEP 3.3
1900 step = "Install configuration Software"
1901
1902 self._write_configuration_status(
1903 nsr_id=nsr_id,
1904 vca_index=vca_index,
1905 status="INSTALLING SW",
1906 element_under_configuration=element_under_configuration,
1907 element_type=element_type,
1908 other_update=db_nsr_update,
1909 )
1910
1911 # TODO check if already done
1912 self.logger.debug(logging_text + step)
1913 config = None
1914 if vca_type == "native_charm":
1915 config_primitive = next(
1916 (p for p in initial_config_primitive_list if p["name"] == "config"),
1917 None,
1918 )
1919 if config_primitive:
1920 config = self._map_primitive_params(
1921 config_primitive, {}, deploy_params
1922 )
1923 num_units = 1
1924 if vca_type == "lxc_proxy_charm":
1925 if element_type == "NS":
1926 num_units = db_nsr.get("config-units") or 1
1927 elif element_type == "VNF":
1928 num_units = db_vnfr.get("config-units") or 1
1929 elif element_type == "VDU":
1930 for v in db_vnfr["vdur"]:
1931 if vdu_id == v["vdu-id-ref"]:
1932 num_units = v.get("config-units") or 1
1933 break
1934 if vca_type != "k8s_proxy_charm":
1935 await self.vca_map[vca_type].install_configuration_sw(
1936 ee_id=ee_id,
1937 artifact_path=artifact_path,
1938 db_dict=db_dict,
1939 config=config,
1940 num_units=num_units,
1941 vca_id=vca_id,
1942 vca_type=vca_type,
1943 )
1944
1945 # write in db flag of configuration_sw already installed
1946 self.update_db_2(
1947 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1948 )
1949
1950 # add relations for this VCA (wait for other peers related with this VCA)
1951 is_relation_added = await self._add_vca_relations(
1952 logging_text=logging_text,
1953 nsr_id=nsr_id,
1954 vca_type=vca_type,
1955 vca_index=vca_index,
1956 )
1957
1958 if not is_relation_added:
1959 raise LcmException("Relations could not be added to VCA.")
1960
1961 # if SSH access is required, then get execution environment SSH public
1962 # if native charm we have waited already to VM be UP
1963 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1964 pub_key = None
1965 user = None
1966 # self.logger.debug("get ssh key block")
1967 if deep_get(
1968 config_descriptor, ("config-access", "ssh-access", "required")
1969 ):
1970 # self.logger.debug("ssh key needed")
1971 # Needed to inject a ssh key
1972 user = deep_get(
1973 config_descriptor,
1974 ("config-access", "ssh-access", "default-user"),
1975 )
1976 step = "Install configuration Software, getting public ssh key"
1977 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1978 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1979 )
1980
1981 step = "Insert public key into VM user={} ssh_key={}".format(
1982 user, pub_key
1983 )
1984 else:
1985 # self.logger.debug("no need to get ssh key")
1986 step = "Waiting to VM being up and getting IP address"
1987 self.logger.debug(logging_text + step)
1988
1989 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1990 rw_mgmt_ip = None
1991
1992 # n2vc_redesign STEP 5.1
1993 # wait for RO (ip-address) Insert pub_key into VM
1994 if vnfr_id:
1995 if kdu_name:
1996 rw_mgmt_ip, services = await self.wait_kdu_up(
1997 logging_text, nsr_id, vnfr_id, kdu_name
1998 )
1999 vnfd = self.db.get_one(
2000 "vnfds_revisions",
2001 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2002 )
2003 kdu = get_kdu(vnfd, kdu_name)
2004 kdu_services = [
2005 service["name"] for service in get_kdu_services(kdu)
2006 ]
2007 exposed_services = []
2008 for service in services:
2009 if any(s in service["name"] for s in kdu_services):
2010 exposed_services.append(service)
2011 await self.vca_map[vca_type].exec_primitive(
2012 ee_id=ee_id,
2013 primitive_name="config",
2014 params_dict={
2015 "osm-config": json.dumps(
2016 OsmConfigBuilder(
2017 k8s={"services": exposed_services}
2018 ).build()
2019 )
2020 },
2021 vca_id=vca_id,
2022 )
2023
2024 # This verification is needed in order to avoid trying to add a public key
2025 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2026 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2027 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2028 # or it is a KNF)
2029 elif db_vnfr.get("vdur"):
2030 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2031 logging_text,
2032 nsr_id,
2033 vnfr_id,
2034 vdu_id,
2035 vdu_index,
2036 user=user,
2037 pub_key=pub_key,
2038 )
2039
2040 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2041
2042 # store rw_mgmt_ip in deploy params for later replacement
2043 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2044
2045 # n2vc_redesign STEP 6 Execute initial config primitive
2046 step = "execute initial config primitive"
2047
2048 # wait for dependent primitives execution (NS -> VNF -> VDU)
2049 if initial_config_primitive_list:
2050 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2051
2052 # stage, in function of element type: vdu, kdu, vnf or ns
2053 my_vca = vca_deployed_list[vca_index]
2054 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2055 # VDU or KDU
2056 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2057 elif my_vca.get("member-vnf-index"):
2058 # VNF
2059 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2060 else:
2061 # NS
2062 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2063
2064 self._write_configuration_status(
2065 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2066 )
2067
2068 self._write_op_status(op_id=nslcmop_id, stage=stage)
2069
2070 check_if_terminated_needed = True
2071 for initial_config_primitive in initial_config_primitive_list:
2072 # adding information on the vca_deployed if it is a NS execution environment
2073 if not vca_deployed["member-vnf-index"]:
2074 deploy_params["ns_config_info"] = json.dumps(
2075 self._get_ns_config_info(nsr_id)
2076 )
2077 # TODO check if already done
2078 primitive_params_ = self._map_primitive_params(
2079 initial_config_primitive, {}, deploy_params
2080 )
2081
2082 step = "execute primitive '{}' params '{}'".format(
2083 initial_config_primitive["name"], primitive_params_
2084 )
2085 self.logger.debug(logging_text + step)
2086 await self.vca_map[vca_type].exec_primitive(
2087 ee_id=ee_id,
2088 primitive_name=initial_config_primitive["name"],
2089 params_dict=primitive_params_,
2090 db_dict=db_dict,
2091 vca_id=vca_id,
2092 vca_type=vca_type,
2093 )
2094 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2095 if check_if_terminated_needed:
2096 if config_descriptor.get("terminate-config-primitive"):
2097 self.update_db_2(
2098 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2099 )
2100 check_if_terminated_needed = False
2101
2102 # TODO register in database that primitive is done
2103
2104 # STEP 7 Configure metrics
2105 if vca_type == "helm" or vca_type == "helm-v3":
2106 # TODO: review for those cases where the helm chart is a reference and
2107 # is not part of the NF package
2108 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2109 ee_id=ee_id,
2110 artifact_path=artifact_path,
2111 ee_config_descriptor=ee_config_descriptor,
2112 vnfr_id=vnfr_id,
2113 nsr_id=nsr_id,
2114 target_ip=rw_mgmt_ip,
2115 element_type=element_type,
2116 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2117 vdu_id=vdu_id,
2118 vdu_index=vdu_index,
2119 kdu_name=kdu_name,
2120 kdu_index=kdu_index,
2121 )
2122 if prometheus_jobs:
2123 self.update_db_2(
2124 "nsrs",
2125 nsr_id,
2126 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2127 )
2128
2129 for job in prometheus_jobs:
2130 self.db.set_one(
2131 "prometheus_jobs",
2132 {"job_name": job["job_name"]},
2133 job,
2134 upsert=True,
2135 fail_on_empty=False,
2136 )
2137
2138 step = "instantiated at VCA"
2139 self.logger.debug(logging_text + step)
2140
2141 self._write_configuration_status(
2142 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2143 )
2144
2145 except Exception as e: # TODO not use Exception but N2VC exception
2146 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2147 if not isinstance(
2148 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2149 ):
2150 self.logger.error(
2151 "Exception while {} : {}".format(step, e), exc_info=True
2152 )
2153 self._write_configuration_status(
2154 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2155 )
2156 raise LcmException("{}. {}".format(step, e)) from e
2157
2158 def _write_ns_status(
2159 self,
2160 nsr_id: str,
2161 ns_state: str,
2162 current_operation: str,
2163 current_operation_id: str,
2164 error_description: str = None,
2165 error_detail: str = None,
2166 other_update: dict = None,
2167 ):
2168 """
2169 Update db_nsr fields.
2170 :param nsr_id:
2171 :param ns_state:
2172 :param current_operation:
2173 :param current_operation_id:
2174 :param error_description:
2175 :param error_detail:
2176 :param other_update: Other required changes at database if provided, will be cleared
2177 :return:
2178 """
2179 try:
2180 db_dict = other_update or {}
2181 db_dict[
2182 "_admin.nslcmop"
2183 ] = current_operation_id # for backward compatibility
2184 db_dict["_admin.current-operation"] = current_operation_id
2185 db_dict["_admin.operation-type"] = (
2186 current_operation if current_operation != "IDLE" else None
2187 )
2188 db_dict["currentOperation"] = current_operation
2189 db_dict["currentOperationID"] = current_operation_id
2190 db_dict["errorDescription"] = error_description
2191 db_dict["errorDetail"] = error_detail
2192
2193 if ns_state:
2194 db_dict["nsState"] = ns_state
2195 self.update_db_2("nsrs", nsr_id, db_dict)
2196 except DbException as e:
2197 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2198
2199 def _write_op_status(
2200 self,
2201 op_id: str,
2202 stage: list = None,
2203 error_message: str = None,
2204 queuePosition: int = 0,
2205 operation_state: str = None,
2206 other_update: dict = None,
2207 ):
2208 try:
2209 db_dict = other_update or {}
2210 db_dict["queuePosition"] = queuePosition
2211 if isinstance(stage, list):
2212 db_dict["stage"] = stage[0]
2213 db_dict["detailed-status"] = " ".join(stage)
2214 elif stage is not None:
2215 db_dict["stage"] = str(stage)
2216
2217 if error_message is not None:
2218 db_dict["errorMessage"] = error_message
2219 if operation_state is not None:
2220 db_dict["operationState"] = operation_state
2221 db_dict["statusEnteredTime"] = time()
2222 self.update_db_2("nslcmops", op_id, db_dict)
2223 except DbException as e:
2224 self.logger.warn(
2225 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2226 )
2227
2228 def _write_all_config_status(self, db_nsr: dict, status: str):
2229 try:
2230 nsr_id = db_nsr["_id"]
2231 # configurationStatus
2232 config_status = db_nsr.get("configurationStatus")
2233 if config_status:
2234 db_nsr_update = {
2235 "configurationStatus.{}.status".format(index): status
2236 for index, v in enumerate(config_status)
2237 if v
2238 }
2239 # update status
2240 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2241
2242 except DbException as e:
2243 self.logger.warn(
2244 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2245 )
2246
2247 def _write_configuration_status(
2248 self,
2249 nsr_id: str,
2250 vca_index: int,
2251 status: str = None,
2252 element_under_configuration: str = None,
2253 element_type: str = None,
2254 other_update: dict = None,
2255 ):
2256 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2257 # .format(vca_index, status))
2258
2259 try:
2260 db_path = "configurationStatus.{}.".format(vca_index)
2261 db_dict = other_update or {}
2262 if status:
2263 db_dict[db_path + "status"] = status
2264 if element_under_configuration:
2265 db_dict[
2266 db_path + "elementUnderConfiguration"
2267 ] = element_under_configuration
2268 if element_type:
2269 db_dict[db_path + "elementType"] = element_type
2270 self.update_db_2("nsrs", nsr_id, db_dict)
2271 except DbException as e:
2272 self.logger.warn(
2273 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2274 status, nsr_id, vca_index, e
2275 )
2276 )
2277
2278 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2279 """
2280 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2281 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2282 Database is used because the result can be obtained from a different LCM worker in case of HA.
2283 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2284 :param db_nslcmop: database content of nslcmop
2285 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2286 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2287 computed 'vim-account-id'
2288 """
2289 modified = False
2290 nslcmop_id = db_nslcmop["_id"]
2291 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2292 if placement_engine == "PLA":
2293 self.logger.debug(
2294 logging_text + "Invoke and wait for placement optimization"
2295 )
2296 await self.msg.aiowrite(
2297 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2298 )
2299 db_poll_interval = 5
2300 wait = db_poll_interval * 10
2301 pla_result = None
2302 while not pla_result and wait >= 0:
2303 await asyncio.sleep(db_poll_interval)
2304 wait -= db_poll_interval
2305 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2306 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2307
2308 if not pla_result:
2309 raise LcmException(
2310 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2311 )
2312
2313 for pla_vnf in pla_result["vnf"]:
2314 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2315 if not pla_vnf.get("vimAccountId") or not vnfr:
2316 continue
2317 modified = True
2318 self.db.set_one(
2319 "vnfrs",
2320 {"_id": vnfr["_id"]},
2321 {"vim-account-id": pla_vnf["vimAccountId"]},
2322 )
2323 # Modifies db_vnfrs
2324 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2325 return modified
2326
2327 def update_nsrs_with_pla_result(self, params):
2328 try:
2329 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2330 self.update_db_2(
2331 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2332 )
2333 except Exception as e:
2334 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2335
2336 async def instantiate(self, nsr_id, nslcmop_id):
2337 """
2338
2339 :param nsr_id: ns instance to deploy
2340 :param nslcmop_id: operation to run
2341 :return:
2342 """
2343
2344 # Try to lock HA task here
2345 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2346 if not task_is_locked_by_me:
2347 self.logger.debug(
2348 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2349 )
2350 return
2351
2352 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2353 self.logger.debug(logging_text + "Enter")
2354
2355 # get all needed from database
2356
2357 # database nsrs record
2358 db_nsr = None
2359
2360 # database nslcmops record
2361 db_nslcmop = None
2362
2363 # update operation on nsrs
2364 db_nsr_update = {}
2365 # update operation on nslcmops
2366 db_nslcmop_update = {}
2367
2368 timeout_ns_deploy = self.timeout.ns_deploy
2369
2370 nslcmop_operation_state = None
2371 db_vnfrs = {} # vnf's info indexed by member-index
2372 # n2vc_info = {}
2373 tasks_dict_info = {} # from task to info text
2374 exc = None
2375 error_list = []
2376 stage = [
2377 "Stage 1/5: preparation of the environment.",
2378 "Waiting for previous operations to terminate.",
2379 "",
2380 ]
2381 # ^ stage, step, VIM progress
2382 try:
2383 # wait for any previous tasks in process
2384 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2385
2386 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2387 stage[1] = "Reading from database."
2388 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2389 db_nsr_update["detailed-status"] = "creating"
2390 db_nsr_update["operational-status"] = "init"
2391 self._write_ns_status(
2392 nsr_id=nsr_id,
2393 ns_state="BUILDING",
2394 current_operation="INSTANTIATING",
2395 current_operation_id=nslcmop_id,
2396 other_update=db_nsr_update,
2397 )
2398 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2399
2400 # read from db: operation
2401 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2402 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2403 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2404 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2405 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2406 )
2407 ns_params = db_nslcmop.get("operationParams")
2408 if ns_params and ns_params.get("timeout_ns_deploy"):
2409 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2410
2411 # read from db: ns
2412 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2413 self.logger.debug(logging_text + stage[1])
2414 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2415 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2416 self.logger.debug(logging_text + stage[1])
2417 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2418 self.fs.sync(db_nsr["nsd-id"])
2419 db_nsr["nsd"] = nsd
2420 # nsr_name = db_nsr["name"] # TODO short-name??
2421
2422 # read from db: vnf's of this ns
2423 stage[1] = "Getting vnfrs from db."
2424 self.logger.debug(logging_text + stage[1])
2425 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2426
2427 # read from db: vnfd's for every vnf
2428 db_vnfds = [] # every vnfd data
2429
2430 # for each vnf in ns, read vnfd
2431 for vnfr in db_vnfrs_list:
2432 if vnfr.get("kdur"):
2433 kdur_list = []
2434 for kdur in vnfr["kdur"]:
2435 if kdur.get("additionalParams"):
2436 kdur["additionalParams"] = json.loads(
2437 kdur["additionalParams"]
2438 )
2439 kdur_list.append(kdur)
2440 vnfr["kdur"] = kdur_list
2441
2442 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2443 vnfd_id = vnfr["vnfd-id"]
2444 vnfd_ref = vnfr["vnfd-ref"]
2445 self.fs.sync(vnfd_id)
2446
2447 # if we haven't this vnfd, read it from db
2448 if vnfd_id not in db_vnfds:
2449 # read from db
2450 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2451 vnfd_id, vnfd_ref
2452 )
2453 self.logger.debug(logging_text + stage[1])
2454 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2455
2456 # store vnfd
2457 db_vnfds.append(vnfd)
2458
2459 # Get or generates the _admin.deployed.VCA list
2460 vca_deployed_list = None
2461 if db_nsr["_admin"].get("deployed"):
2462 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2463 if vca_deployed_list is None:
2464 vca_deployed_list = []
2465 configuration_status_list = []
2466 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2467 db_nsr_update["configurationStatus"] = configuration_status_list
2468 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2469 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2470 elif isinstance(vca_deployed_list, dict):
2471 # maintain backward compatibility. Change a dict to list at database
2472 vca_deployed_list = list(vca_deployed_list.values())
2473 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2474 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2475
2476 if not isinstance(
2477 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2478 ):
2479 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2480 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2481
2482 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2483 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2484 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2485 self.db.set_list(
2486 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2487 )
2488
2489 # n2vc_redesign STEP 2 Deploy Network Scenario
2490 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2491 self._write_op_status(op_id=nslcmop_id, stage=stage)
2492
2493 stage[1] = "Deploying KDUs."
2494 # self.logger.debug(logging_text + "Before deploy_kdus")
2495 # Call to deploy_kdus in case exists the "vdu:kdu" param
2496 await self.deploy_kdus(
2497 logging_text=logging_text,
2498 nsr_id=nsr_id,
2499 nslcmop_id=nslcmop_id,
2500 db_vnfrs=db_vnfrs,
2501 db_vnfds=db_vnfds,
2502 task_instantiation_info=tasks_dict_info,
2503 )
2504
2505 stage[1] = "Getting VCA public key."
2506 # n2vc_redesign STEP 1 Get VCA public ssh-key
2507 # feature 1429. Add n2vc public key to needed VMs
2508 n2vc_key = self.n2vc.get_public_key()
2509 n2vc_key_list = [n2vc_key]
2510 if self.vca_config.public_key:
2511 n2vc_key_list.append(self.vca_config.public_key)
2512
2513 stage[1] = "Deploying NS at VIM."
2514 task_ro = asyncio.ensure_future(
2515 self.instantiate_RO(
2516 logging_text=logging_text,
2517 nsr_id=nsr_id,
2518 nsd=nsd,
2519 db_nsr=db_nsr,
2520 db_nslcmop=db_nslcmop,
2521 db_vnfrs=db_vnfrs,
2522 db_vnfds=db_vnfds,
2523 n2vc_key_list=n2vc_key_list,
2524 stage=stage,
2525 )
2526 )
2527 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2528 tasks_dict_info[task_ro] = "Deploying at VIM"
2529
2530 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2531 stage[1] = "Deploying Execution Environments."
2532 self.logger.debug(logging_text + stage[1])
2533
2534 # create namespace and certificate if any helm based EE is present in the NS
2535 if check_helm_ee_in_ns(db_vnfds):
2536 # TODO: create EE namespace
2537 # create TLS certificates
2538 await self.vca_map["helm-v3"].create_tls_certificate(
2539 secret_name="ee-tls-{}".format(nsr_id),
2540 dns_prefix="*",
2541 nsr_id=nsr_id,
2542 usage="server auth",
2543 )
2544
2545 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2546 for vnf_profile in get_vnf_profiles(nsd):
2547 vnfd_id = vnf_profile["vnfd-id"]
2548 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2549 member_vnf_index = str(vnf_profile["id"])
2550 db_vnfr = db_vnfrs[member_vnf_index]
2551 base_folder = vnfd["_admin"]["storage"]
2552 vdu_id = None
2553 vdu_index = 0
2554 vdu_name = None
2555 kdu_name = None
2556 kdu_index = None
2557
2558 # Get additional parameters
2559 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2560 if db_vnfr.get("additionalParamsForVnf"):
2561 deploy_params.update(
2562 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2563 )
2564
2565 descriptor_config = get_configuration(vnfd, vnfd["id"])
2566 if descriptor_config:
2567 self._deploy_n2vc(
2568 logging_text=logging_text
2569 + "member_vnf_index={} ".format(member_vnf_index),
2570 db_nsr=db_nsr,
2571 db_vnfr=db_vnfr,
2572 nslcmop_id=nslcmop_id,
2573 nsr_id=nsr_id,
2574 nsi_id=nsi_id,
2575 vnfd_id=vnfd_id,
2576 vdu_id=vdu_id,
2577 kdu_name=kdu_name,
2578 member_vnf_index=member_vnf_index,
2579 vdu_index=vdu_index,
2580 kdu_index=kdu_index,
2581 vdu_name=vdu_name,
2582 deploy_params=deploy_params,
2583 descriptor_config=descriptor_config,
2584 base_folder=base_folder,
2585 task_instantiation_info=tasks_dict_info,
2586 stage=stage,
2587 )
2588
2589 # Deploy charms for each VDU that supports one.
2590 for vdud in get_vdu_list(vnfd):
2591 vdu_id = vdud["id"]
2592 descriptor_config = get_configuration(vnfd, vdu_id)
2593 vdur = find_in_list(
2594 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2595 )
2596
2597 if vdur.get("additionalParams"):
2598 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2599 else:
2600 deploy_params_vdu = deploy_params
2601 deploy_params_vdu["OSM"] = get_osm_params(
2602 db_vnfr, vdu_id, vdu_count_index=0
2603 )
2604 vdud_count = get_number_of_instances(vnfd, vdu_id)
2605
2606 self.logger.debug("VDUD > {}".format(vdud))
2607 self.logger.debug(
2608 "Descriptor config > {}".format(descriptor_config)
2609 )
2610 if descriptor_config:
2611 vdu_name = None
2612 kdu_name = None
2613 kdu_index = None
2614 for vdu_index in range(vdud_count):
2615 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2616 self._deploy_n2vc(
2617 logging_text=logging_text
2618 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2619 member_vnf_index, vdu_id, vdu_index
2620 ),
2621 db_nsr=db_nsr,
2622 db_vnfr=db_vnfr,
2623 nslcmop_id=nslcmop_id,
2624 nsr_id=nsr_id,
2625 nsi_id=nsi_id,
2626 vnfd_id=vnfd_id,
2627 vdu_id=vdu_id,
2628 kdu_name=kdu_name,
2629 kdu_index=kdu_index,
2630 member_vnf_index=member_vnf_index,
2631 vdu_index=vdu_index,
2632 vdu_name=vdu_name,
2633 deploy_params=deploy_params_vdu,
2634 descriptor_config=descriptor_config,
2635 base_folder=base_folder,
2636 task_instantiation_info=tasks_dict_info,
2637 stage=stage,
2638 )
2639 for kdud in get_kdu_list(vnfd):
2640 kdu_name = kdud["name"]
2641 descriptor_config = get_configuration(vnfd, kdu_name)
2642 if descriptor_config:
2643 vdu_id = None
2644 vdu_index = 0
2645 vdu_name = None
2646 kdu_index, kdur = next(
2647 x
2648 for x in enumerate(db_vnfr["kdur"])
2649 if x[1]["kdu-name"] == kdu_name
2650 )
2651 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2652 if kdur.get("additionalParams"):
2653 deploy_params_kdu.update(
2654 parse_yaml_strings(kdur["additionalParams"].copy())
2655 )
2656
2657 self._deploy_n2vc(
2658 logging_text=logging_text,
2659 db_nsr=db_nsr,
2660 db_vnfr=db_vnfr,
2661 nslcmop_id=nslcmop_id,
2662 nsr_id=nsr_id,
2663 nsi_id=nsi_id,
2664 vnfd_id=vnfd_id,
2665 vdu_id=vdu_id,
2666 kdu_name=kdu_name,
2667 member_vnf_index=member_vnf_index,
2668 vdu_index=vdu_index,
2669 kdu_index=kdu_index,
2670 vdu_name=vdu_name,
2671 deploy_params=deploy_params_kdu,
2672 descriptor_config=descriptor_config,
2673 base_folder=base_folder,
2674 task_instantiation_info=tasks_dict_info,
2675 stage=stage,
2676 )
2677
2678 # Check if this NS has a charm configuration
2679 descriptor_config = nsd.get("ns-configuration")
2680 if descriptor_config and descriptor_config.get("juju"):
2681 vnfd_id = None
2682 db_vnfr = None
2683 member_vnf_index = None
2684 vdu_id = None
2685 kdu_name = None
2686 kdu_index = None
2687 vdu_index = 0
2688 vdu_name = None
2689
2690 # Get additional parameters
2691 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2692 if db_nsr.get("additionalParamsForNs"):
2693 deploy_params.update(
2694 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2695 )
2696 base_folder = nsd["_admin"]["storage"]
2697 self._deploy_n2vc(
2698 logging_text=logging_text,
2699 db_nsr=db_nsr,
2700 db_vnfr=db_vnfr,
2701 nslcmop_id=nslcmop_id,
2702 nsr_id=nsr_id,
2703 nsi_id=nsi_id,
2704 vnfd_id=vnfd_id,
2705 vdu_id=vdu_id,
2706 kdu_name=kdu_name,
2707 member_vnf_index=member_vnf_index,
2708 vdu_index=vdu_index,
2709 kdu_index=kdu_index,
2710 vdu_name=vdu_name,
2711 deploy_params=deploy_params,
2712 descriptor_config=descriptor_config,
2713 base_folder=base_folder,
2714 task_instantiation_info=tasks_dict_info,
2715 stage=stage,
2716 )
2717
2718 # rest of staff will be done at finally
2719
2720 except (
2721 ROclient.ROClientException,
2722 DbException,
2723 LcmException,
2724 N2VCException,
2725 ) as e:
2726 self.logger.error(
2727 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2728 )
2729 exc = e
2730 except asyncio.CancelledError:
2731 self.logger.error(
2732 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2733 )
2734 exc = "Operation was cancelled"
2735 except Exception as e:
2736 exc = traceback.format_exc()
2737 self.logger.critical(
2738 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2739 exc_info=True,
2740 )
2741 finally:
2742 if exc:
2743 error_list.append(str(exc))
2744 try:
2745 # wait for pending tasks
2746 if tasks_dict_info:
2747 stage[1] = "Waiting for instantiate pending tasks."
2748 self.logger.debug(logging_text + stage[1])
2749 error_list += await self._wait_for_tasks(
2750 logging_text,
2751 tasks_dict_info,
2752 timeout_ns_deploy,
2753 stage,
2754 nslcmop_id,
2755 nsr_id=nsr_id,
2756 )
2757 stage[1] = stage[2] = ""
2758 except asyncio.CancelledError:
2759 error_list.append("Cancelled")
2760 # TODO cancel all tasks
2761 except Exception as exc:
2762 error_list.append(str(exc))
2763
2764 # update operation-status
2765 db_nsr_update["operational-status"] = "running"
2766 # let's begin with VCA 'configured' status (later we can change it)
2767 db_nsr_update["config-status"] = "configured"
2768 for task, task_name in tasks_dict_info.items():
2769 if not task.done() or task.cancelled() or task.exception():
2770 if task_name.startswith(self.task_name_deploy_vca):
2771 # A N2VC task is pending
2772 db_nsr_update["config-status"] = "failed"
2773 else:
2774 # RO or KDU task is pending
2775 db_nsr_update["operational-status"] = "failed"
2776
2777 # update status at database
2778 if error_list:
2779 error_detail = ". ".join(error_list)
2780 self.logger.error(logging_text + error_detail)
2781 error_description_nslcmop = "{} Detail: {}".format(
2782 stage[0], error_detail
2783 )
2784 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2785 nslcmop_id, stage[0]
2786 )
2787
2788 db_nsr_update["detailed-status"] = (
2789 error_description_nsr + " Detail: " + error_detail
2790 )
2791 db_nslcmop_update["detailed-status"] = error_detail
2792 nslcmop_operation_state = "FAILED"
2793 ns_state = "BROKEN"
2794 else:
2795 error_detail = None
2796 error_description_nsr = error_description_nslcmop = None
2797 ns_state = "READY"
2798 db_nsr_update["detailed-status"] = "Done"
2799 db_nslcmop_update["detailed-status"] = "Done"
2800 nslcmop_operation_state = "COMPLETED"
2801
2802 if db_nsr:
2803 self._write_ns_status(
2804 nsr_id=nsr_id,
2805 ns_state=ns_state,
2806 current_operation="IDLE",
2807 current_operation_id=None,
2808 error_description=error_description_nsr,
2809 error_detail=error_detail,
2810 other_update=db_nsr_update,
2811 )
2812 self._write_op_status(
2813 op_id=nslcmop_id,
2814 stage="",
2815 error_message=error_description_nslcmop,
2816 operation_state=nslcmop_operation_state,
2817 other_update=db_nslcmop_update,
2818 )
2819
2820 if nslcmop_operation_state:
2821 try:
2822 await self.msg.aiowrite(
2823 "ns",
2824 "instantiated",
2825 {
2826 "nsr_id": nsr_id,
2827 "nslcmop_id": nslcmop_id,
2828 "operationState": nslcmop_operation_state,
2829 },
2830 loop=self.loop,
2831 )
2832 except Exception as e:
2833 self.logger.error(
2834 logging_text + "kafka_write notification Exception {}".format(e)
2835 )
2836
2837 self.logger.debug(logging_text + "Exit")
2838 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2839
2840 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2841 if vnfd_id not in cached_vnfds:
2842 cached_vnfds[vnfd_id] = self.db.get_one(
2843 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2844 )
2845 return cached_vnfds[vnfd_id]
2846
2847 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2848 if vnf_profile_id not in cached_vnfrs:
2849 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2850 "vnfrs",
2851 {
2852 "member-vnf-index-ref": vnf_profile_id,
2853 "nsr-id-ref": nsr_id,
2854 },
2855 )
2856 return cached_vnfrs[vnf_profile_id]
2857
2858 def _is_deployed_vca_in_relation(
2859 self, vca: DeployedVCA, relation: Relation
2860 ) -> bool:
2861 found = False
2862 for endpoint in (relation.provider, relation.requirer):
2863 if endpoint["kdu-resource-profile-id"]:
2864 continue
2865 found = (
2866 vca.vnf_profile_id == endpoint.vnf_profile_id
2867 and vca.vdu_profile_id == endpoint.vdu_profile_id
2868 and vca.execution_environment_ref == endpoint.execution_environment_ref
2869 )
2870 if found:
2871 break
2872 return found
2873
2874 def _update_ee_relation_data_with_implicit_data(
2875 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2876 ):
2877 ee_relation_data = safe_get_ee_relation(
2878 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2879 )
2880 ee_relation_level = EELevel.get_level(ee_relation_data)
2881 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2882 "execution-environment-ref"
2883 ]:
2884 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2885 vnfd_id = vnf_profile["vnfd-id"]
2886 project = nsd["_admin"]["projects_read"][0]
2887 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2888 entity_id = (
2889 vnfd_id
2890 if ee_relation_level == EELevel.VNF
2891 else ee_relation_data["vdu-profile-id"]
2892 )
2893 ee = get_juju_ee_ref(db_vnfd, entity_id)
2894 if not ee:
2895 raise Exception(
2896 f"not execution environments found for ee_relation {ee_relation_data}"
2897 )
2898 ee_relation_data["execution-environment-ref"] = ee["id"]
2899 return ee_relation_data
2900
2901 def _get_ns_relations(
2902 self,
2903 nsr_id: str,
2904 nsd: Dict[str, Any],
2905 vca: DeployedVCA,
2906 cached_vnfds: Dict[str, Any],
2907 ) -> List[Relation]:
2908 relations = []
2909 db_ns_relations = get_ns_configuration_relation_list(nsd)
2910 for r in db_ns_relations:
2911 provider_dict = None
2912 requirer_dict = None
2913 if all(key in r for key in ("provider", "requirer")):
2914 provider_dict = r["provider"]
2915 requirer_dict = r["requirer"]
2916 elif "entities" in r:
2917 provider_id = r["entities"][0]["id"]
2918 provider_dict = {
2919 "nsr-id": nsr_id,
2920 "endpoint": r["entities"][0]["endpoint"],
2921 }
2922 if provider_id != nsd["id"]:
2923 provider_dict["vnf-profile-id"] = provider_id
2924 requirer_id = r["entities"][1]["id"]
2925 requirer_dict = {
2926 "nsr-id": nsr_id,
2927 "endpoint": r["entities"][1]["endpoint"],
2928 }
2929 if requirer_id != nsd["id"]:
2930 requirer_dict["vnf-profile-id"] = requirer_id
2931 else:
2932 raise Exception(
2933 "provider/requirer or entities must be included in the relation."
2934 )
2935 relation_provider = self._update_ee_relation_data_with_implicit_data(
2936 nsr_id, nsd, provider_dict, cached_vnfds
2937 )
2938 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2939 nsr_id, nsd, requirer_dict, cached_vnfds
2940 )
2941 provider = EERelation(relation_provider)
2942 requirer = EERelation(relation_requirer)
2943 relation = Relation(r["name"], provider, requirer)
2944 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2945 if vca_in_relation:
2946 relations.append(relation)
2947 return relations
2948
2949 def _get_vnf_relations(
2950 self,
2951 nsr_id: str,
2952 nsd: Dict[str, Any],
2953 vca: DeployedVCA,
2954 cached_vnfds: Dict[str, Any],
2955 ) -> List[Relation]:
2956 relations = []
2957 if vca.target_element == "ns":
2958 self.logger.debug("VCA is a NS charm, not a VNF.")
2959 return relations
2960 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2961 vnf_profile_id = vnf_profile["id"]
2962 vnfd_id = vnf_profile["vnfd-id"]
2963 project = nsd["_admin"]["projects_read"][0]
2964 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2965 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2966 for r in db_vnf_relations:
2967 provider_dict = None
2968 requirer_dict = None
2969 if all(key in r for key in ("provider", "requirer")):
2970 provider_dict = r["provider"]
2971 requirer_dict = r["requirer"]
2972 elif "entities" in r:
2973 provider_id = r["entities"][0]["id"]
2974 provider_dict = {
2975 "nsr-id": nsr_id,
2976 "vnf-profile-id": vnf_profile_id,
2977 "endpoint": r["entities"][0]["endpoint"],
2978 }
2979 if provider_id != vnfd_id:
2980 provider_dict["vdu-profile-id"] = provider_id
2981 requirer_id = r["entities"][1]["id"]
2982 requirer_dict = {
2983 "nsr-id": nsr_id,
2984 "vnf-profile-id": vnf_profile_id,
2985 "endpoint": r["entities"][1]["endpoint"],
2986 }
2987 if requirer_id != vnfd_id:
2988 requirer_dict["vdu-profile-id"] = requirer_id
2989 else:
2990 raise Exception(
2991 "provider/requirer or entities must be included in the relation."
2992 )
2993 relation_provider = self._update_ee_relation_data_with_implicit_data(
2994 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2995 )
2996 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2997 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2998 )
2999 provider = EERelation(relation_provider)
3000 requirer = EERelation(relation_requirer)
3001 relation = Relation(r["name"], provider, requirer)
3002 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3003 if vca_in_relation:
3004 relations.append(relation)
3005 return relations
3006
3007 def _get_kdu_resource_data(
3008 self,
3009 ee_relation: EERelation,
3010 db_nsr: Dict[str, Any],
3011 cached_vnfds: Dict[str, Any],
3012 ) -> DeployedK8sResource:
3013 nsd = get_nsd(db_nsr)
3014 vnf_profiles = get_vnf_profiles(nsd)
3015 vnfd_id = find_in_list(
3016 vnf_profiles,
3017 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3018 )["vnfd-id"]
3019 project = nsd["_admin"]["projects_read"][0]
3020 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3021 kdu_resource_profile = get_kdu_resource_profile(
3022 db_vnfd, ee_relation.kdu_resource_profile_id
3023 )
3024 kdu_name = kdu_resource_profile["kdu-name"]
3025 deployed_kdu, _ = get_deployed_kdu(
3026 db_nsr.get("_admin", ()).get("deployed", ()),
3027 kdu_name,
3028 ee_relation.vnf_profile_id,
3029 )
3030 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3031 return deployed_kdu
3032
3033 def _get_deployed_component(
3034 self,
3035 ee_relation: EERelation,
3036 db_nsr: Dict[str, Any],
3037 cached_vnfds: Dict[str, Any],
3038 ) -> DeployedComponent:
3039 nsr_id = db_nsr["_id"]
3040 deployed_component = None
3041 ee_level = EELevel.get_level(ee_relation)
3042 if ee_level == EELevel.NS:
3043 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3044 if vca:
3045 deployed_component = DeployedVCA(nsr_id, vca)
3046 elif ee_level == EELevel.VNF:
3047 vca = get_deployed_vca(
3048 db_nsr,
3049 {
3050 "vdu_id": None,
3051 "member-vnf-index": ee_relation.vnf_profile_id,
3052 "ee_descriptor_id": ee_relation.execution_environment_ref,
3053 },
3054 )
3055 if vca:
3056 deployed_component = DeployedVCA(nsr_id, vca)
3057 elif ee_level == EELevel.VDU:
3058 vca = get_deployed_vca(
3059 db_nsr,
3060 {
3061 "vdu_id": ee_relation.vdu_profile_id,
3062 "member-vnf-index": ee_relation.vnf_profile_id,
3063 "ee_descriptor_id": ee_relation.execution_environment_ref,
3064 },
3065 )
3066 if vca:
3067 deployed_component = DeployedVCA(nsr_id, vca)
3068 elif ee_level == EELevel.KDU:
3069 kdu_resource_data = self._get_kdu_resource_data(
3070 ee_relation, db_nsr, cached_vnfds
3071 )
3072 if kdu_resource_data:
3073 deployed_component = DeployedK8sResource(kdu_resource_data)
3074 return deployed_component
3075
3076 async def _add_relation(
3077 self,
3078 relation: Relation,
3079 vca_type: str,
3080 db_nsr: Dict[str, Any],
3081 cached_vnfds: Dict[str, Any],
3082 cached_vnfrs: Dict[str, Any],
3083 ) -> bool:
3084 deployed_provider = self._get_deployed_component(
3085 relation.provider, db_nsr, cached_vnfds
3086 )
3087 deployed_requirer = self._get_deployed_component(
3088 relation.requirer, db_nsr, cached_vnfds
3089 )
3090 if (
3091 deployed_provider
3092 and deployed_requirer
3093 and deployed_provider.config_sw_installed
3094 and deployed_requirer.config_sw_installed
3095 ):
3096 provider_db_vnfr = (
3097 self._get_vnfr(
3098 relation.provider.nsr_id,
3099 relation.provider.vnf_profile_id,
3100 cached_vnfrs,
3101 )
3102 if relation.provider.vnf_profile_id
3103 else None
3104 )
3105 requirer_db_vnfr = (
3106 self._get_vnfr(
3107 relation.requirer.nsr_id,
3108 relation.requirer.vnf_profile_id,
3109 cached_vnfrs,
3110 )
3111 if relation.requirer.vnf_profile_id
3112 else None
3113 )
3114 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3115 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3116 provider_relation_endpoint = RelationEndpoint(
3117 deployed_provider.ee_id,
3118 provider_vca_id,
3119 relation.provider.endpoint,
3120 )
3121 requirer_relation_endpoint = RelationEndpoint(
3122 deployed_requirer.ee_id,
3123 requirer_vca_id,
3124 relation.requirer.endpoint,
3125 )
3126 try:
3127 await self.vca_map[vca_type].add_relation(
3128 provider=provider_relation_endpoint,
3129 requirer=requirer_relation_endpoint,
3130 )
3131 except N2VCException as exception:
3132 self.logger.error(exception)
3133 raise LcmException(exception)
3134 return True
3135 return False
3136
3137 async def _add_vca_relations(
3138 self,
3139 logging_text,
3140 nsr_id,
3141 vca_type: str,
3142 vca_index: int,
3143 timeout: int = 3600,
3144 ) -> bool:
3145 # steps:
3146 # 1. find all relations for this VCA
3147 # 2. wait for other peers related
3148 # 3. add relations
3149
3150 try:
3151 # STEP 1: find all relations for this VCA
3152
3153 # read nsr record
3154 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3155 nsd = get_nsd(db_nsr)
3156
3157 # this VCA data
3158 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3159 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3160
3161 cached_vnfds = {}
3162 cached_vnfrs = {}
3163 relations = []
3164 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3165 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3166
3167 # if no relations, terminate
3168 if not relations:
3169 self.logger.debug(logging_text + " No relations")
3170 return True
3171
3172 self.logger.debug(logging_text + " adding relations {}".format(relations))
3173
3174 # add all relations
3175 start = time()
3176 while True:
3177 # check timeout
3178 now = time()
3179 if now - start >= timeout:
3180 self.logger.error(logging_text + " : timeout adding relations")
3181 return False
3182
3183 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3184 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3185
3186 # for each relation, find the VCA's related
3187 for relation in relations.copy():
3188 added = await self._add_relation(
3189 relation,
3190 vca_type,
3191 db_nsr,
3192 cached_vnfds,
3193 cached_vnfrs,
3194 )
3195 if added:
3196 relations.remove(relation)
3197
3198 if not relations:
3199 self.logger.debug("Relations added")
3200 break
3201 await asyncio.sleep(5.0)
3202
3203 return True
3204
3205 except Exception as e:
3206 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3207 return False
3208
3209 async def _install_kdu(
3210 self,
3211 nsr_id: str,
3212 nsr_db_path: str,
3213 vnfr_data: dict,
3214 kdu_index: int,
3215 kdud: dict,
3216 vnfd: dict,
3217 k8s_instance_info: dict,
3218 k8params: dict = None,
3219 timeout: int = 600,
3220 vca_id: str = None,
3221 ):
3222 try:
3223 k8sclustertype = k8s_instance_info["k8scluster-type"]
3224 # Instantiate kdu
3225 db_dict_install = {
3226 "collection": "nsrs",
3227 "filter": {"_id": nsr_id},
3228 "path": nsr_db_path,
3229 }
3230
3231 if k8s_instance_info.get("kdu-deployment-name"):
3232 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3233 else:
3234 kdu_instance = self.k8scluster_map[
3235 k8sclustertype
3236 ].generate_kdu_instance_name(
3237 db_dict=db_dict_install,
3238 kdu_model=k8s_instance_info["kdu-model"],
3239 kdu_name=k8s_instance_info["kdu-name"],
3240 )
3241
3242 # Update the nsrs table with the kdu-instance value
3243 self.update_db_2(
3244 item="nsrs",
3245 _id=nsr_id,
3246 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3247 )
3248
3249 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3250 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3251 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3252 # namespace, this first verification could be removed, and the next step would be done for any kind
3253 # of KNF.
3254 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3255 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3256 if k8sclustertype in ("juju", "juju-bundle"):
3257 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3258 # that the user passed a namespace which he wants its KDU to be deployed in)
3259 if (
3260 self.db.count(
3261 table="nsrs",
3262 q_filter={
3263 "_id": nsr_id,
3264 "_admin.projects_write": k8s_instance_info["namespace"],
3265 "_admin.projects_read": k8s_instance_info["namespace"],
3266 },
3267 )
3268 > 0
3269 ):
3270 self.logger.debug(
3271 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3272 )
3273 self.update_db_2(
3274 item="nsrs",
3275 _id=nsr_id,
3276 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3277 )
3278 k8s_instance_info["namespace"] = kdu_instance
3279
3280 await self.k8scluster_map[k8sclustertype].install(
3281 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3282 kdu_model=k8s_instance_info["kdu-model"],
3283 atomic=True,
3284 params=k8params,
3285 db_dict=db_dict_install,
3286 timeout=timeout,
3287 kdu_name=k8s_instance_info["kdu-name"],
3288 namespace=k8s_instance_info["namespace"],
3289 kdu_instance=kdu_instance,
3290 vca_id=vca_id,
3291 )
3292
3293 # Obtain services to obtain management service ip
3294 services = await self.k8scluster_map[k8sclustertype].get_services(
3295 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3296 kdu_instance=kdu_instance,
3297 namespace=k8s_instance_info["namespace"],
3298 )
3299
3300 # Obtain management service info (if exists)
3301 vnfr_update_dict = {}
3302 kdu_config = get_configuration(vnfd, kdud["name"])
3303 if kdu_config:
3304 target_ee_list = kdu_config.get("execution-environment-list", [])
3305 else:
3306 target_ee_list = []
3307
3308 if services:
3309 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3310 mgmt_services = [
3311 service
3312 for service in kdud.get("service", [])
3313 if service.get("mgmt-service")
3314 ]
3315 for mgmt_service in mgmt_services:
3316 for service in services:
3317 if service["name"].startswith(mgmt_service["name"]):
3318 # Mgmt service found, Obtain service ip
3319 ip = service.get("external_ip", service.get("cluster_ip"))
3320 if isinstance(ip, list) and len(ip) == 1:
3321 ip = ip[0]
3322
3323 vnfr_update_dict[
3324 "kdur.{}.ip-address".format(kdu_index)
3325 ] = ip
3326
3327 # Check if must update also mgmt ip at the vnf
3328 service_external_cp = mgmt_service.get(
3329 "external-connection-point-ref"
3330 )
3331 if service_external_cp:
3332 if (
3333 deep_get(vnfd, ("mgmt-interface", "cp"))
3334 == service_external_cp
3335 ):
3336 vnfr_update_dict["ip-address"] = ip
3337
3338 if find_in_list(
3339 target_ee_list,
3340 lambda ee: ee.get(
3341 "external-connection-point-ref", ""
3342 )
3343 == service_external_cp,
3344 ):
3345 vnfr_update_dict[
3346 "kdur.{}.ip-address".format(kdu_index)
3347 ] = ip
3348 break
3349 else:
3350 self.logger.warn(
3351 "Mgmt service name: {} not found".format(
3352 mgmt_service["name"]
3353 )
3354 )
3355
3356 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3357 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3358
3359 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3360 if (
3361 kdu_config
3362 and kdu_config.get("initial-config-primitive")
3363 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3364 ):
3365 initial_config_primitive_list = kdu_config.get(
3366 "initial-config-primitive"
3367 )
3368 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3369
3370 for initial_config_primitive in initial_config_primitive_list:
3371 primitive_params_ = self._map_primitive_params(
3372 initial_config_primitive, {}, {}
3373 )
3374
3375 await asyncio.wait_for(
3376 self.k8scluster_map[k8sclustertype].exec_primitive(
3377 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3378 kdu_instance=kdu_instance,
3379 primitive_name=initial_config_primitive["name"],
3380 params=primitive_params_,
3381 db_dict=db_dict_install,
3382 vca_id=vca_id,
3383 ),
3384 timeout=timeout,
3385 )
3386
3387 except Exception as e:
3388 # Prepare update db with error and raise exception
3389 try:
3390 self.update_db_2(
3391 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3392 )
3393 self.update_db_2(
3394 "vnfrs",
3395 vnfr_data.get("_id"),
3396 {"kdur.{}.status".format(kdu_index): "ERROR"},
3397 )
3398 except Exception:
3399 # ignore to keep original exception
3400 pass
3401 # reraise original error
3402 raise
3403
3404 return kdu_instance
3405
3406 async def deploy_kdus(
3407 self,
3408 logging_text,
3409 nsr_id,
3410 nslcmop_id,
3411 db_vnfrs,
3412 db_vnfds,
3413 task_instantiation_info,
3414 ):
3415 # Launch kdus if present in the descriptor
3416
3417 k8scluster_id_2_uuic = {
3418 "helm-chart-v3": {},
3419 "helm-chart": {},
3420 "juju-bundle": {},
3421 }
3422
3423 async def _get_cluster_id(cluster_id, cluster_type):
3424 nonlocal k8scluster_id_2_uuic
3425 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3426 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3427
3428 # check if K8scluster is creating and wait look if previous tasks in process
3429 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3430 "k8scluster", cluster_id
3431 )
3432 if task_dependency:
3433 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3434 task_name, cluster_id
3435 )
3436 self.logger.debug(logging_text + text)
3437 await asyncio.wait(task_dependency, timeout=3600)
3438
3439 db_k8scluster = self.db.get_one(
3440 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3441 )
3442 if not db_k8scluster:
3443 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3444
3445 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3446 if not k8s_id:
3447 if cluster_type == "helm-chart-v3":
3448 try:
3449 # backward compatibility for existing clusters that have not been initialized for helm v3
3450 k8s_credentials = yaml.safe_dump(
3451 db_k8scluster.get("credentials")
3452 )
3453 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3454 k8s_credentials, reuse_cluster_uuid=cluster_id
3455 )
3456 db_k8scluster_update = {}
3457 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3458 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3459 db_k8scluster_update[
3460 "_admin.helm-chart-v3.created"
3461 ] = uninstall_sw
3462 db_k8scluster_update[
3463 "_admin.helm-chart-v3.operationalState"
3464 ] = "ENABLED"
3465 self.update_db_2(
3466 "k8sclusters", cluster_id, db_k8scluster_update
3467 )
3468 except Exception as e:
3469 self.logger.error(
3470 logging_text
3471 + "error initializing helm-v3 cluster: {}".format(str(e))
3472 )
3473 raise LcmException(
3474 "K8s cluster '{}' has not been initialized for '{}'".format(
3475 cluster_id, cluster_type
3476 )
3477 )
3478 else:
3479 raise LcmException(
3480 "K8s cluster '{}' has not been initialized for '{}'".format(
3481 cluster_id, cluster_type
3482 )
3483 )
3484 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3485 return k8s_id
3486
3487 logging_text += "Deploy kdus: "
3488 step = ""
3489 try:
3490 db_nsr_update = {"_admin.deployed.K8s": []}
3491 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3492
3493 index = 0
3494 updated_cluster_list = []
3495 updated_v3_cluster_list = []
3496
3497 for vnfr_data in db_vnfrs.values():
3498 vca_id = self.get_vca_id(vnfr_data, {})
3499 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3500 # Step 0: Prepare and set parameters
3501 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3502 vnfd_id = vnfr_data.get("vnfd-id")
3503 vnfd_with_id = find_in_list(
3504 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3505 )
3506 kdud = next(
3507 kdud
3508 for kdud in vnfd_with_id["kdu"]
3509 if kdud["name"] == kdur["kdu-name"]
3510 )
3511 namespace = kdur.get("k8s-namespace")
3512 kdu_deployment_name = kdur.get("kdu-deployment-name")
3513 if kdur.get("helm-chart"):
3514 kdumodel = kdur["helm-chart"]
3515 # Default version: helm3, if helm-version is v2 assign v2
3516 k8sclustertype = "helm-chart-v3"
3517 self.logger.debug("kdur: {}".format(kdur))
3518 if (
3519 kdur.get("helm-version")
3520 and kdur.get("helm-version") == "v2"
3521 ):
3522 k8sclustertype = "helm-chart"
3523 elif kdur.get("juju-bundle"):
3524 kdumodel = kdur["juju-bundle"]
3525 k8sclustertype = "juju-bundle"
3526 else:
3527 raise LcmException(
3528 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3529 "juju-bundle. Maybe an old NBI version is running".format(
3530 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3531 )
3532 )
3533 # check if kdumodel is a file and exists
3534 try:
3535 vnfd_with_id = find_in_list(
3536 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3537 )
3538 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3539 if storage: # may be not present if vnfd has not artifacts
3540 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3541 if storage["pkg-dir"]:
3542 filename = "{}/{}/{}s/{}".format(
3543 storage["folder"],
3544 storage["pkg-dir"],
3545 k8sclustertype,
3546 kdumodel,
3547 )
3548 else:
3549 filename = "{}/Scripts/{}s/{}".format(
3550 storage["folder"],
3551 k8sclustertype,
3552 kdumodel,
3553 )
3554 if self.fs.file_exists(
3555 filename, mode="file"
3556 ) or self.fs.file_exists(filename, mode="dir"):
3557 kdumodel = self.fs.path + filename
3558 except (asyncio.TimeoutError, asyncio.CancelledError):
3559 raise
3560 except Exception: # it is not a file
3561 pass
3562
3563 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3564 step = "Synchronize repos for k8s cluster '{}'".format(
3565 k8s_cluster_id
3566 )
3567 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3568
3569 # Synchronize repos
3570 if (
3571 k8sclustertype == "helm-chart"
3572 and cluster_uuid not in updated_cluster_list
3573 ) or (
3574 k8sclustertype == "helm-chart-v3"
3575 and cluster_uuid not in updated_v3_cluster_list
3576 ):
3577 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3578 self.k8scluster_map[k8sclustertype].synchronize_repos(
3579 cluster_uuid=cluster_uuid
3580 )
3581 )
3582 if del_repo_list or added_repo_dict:
3583 if k8sclustertype == "helm-chart":
3584 unset = {
3585 "_admin.helm_charts_added." + item: None
3586 for item in del_repo_list
3587 }
3588 updated = {
3589 "_admin.helm_charts_added." + item: name
3590 for item, name in added_repo_dict.items()
3591 }
3592 updated_cluster_list.append(cluster_uuid)
3593 elif k8sclustertype == "helm-chart-v3":
3594 unset = {
3595 "_admin.helm_charts_v3_added." + item: None
3596 for item in del_repo_list
3597 }
3598 updated = {
3599 "_admin.helm_charts_v3_added." + item: name
3600 for item, name in added_repo_dict.items()
3601 }
3602 updated_v3_cluster_list.append(cluster_uuid)
3603 self.logger.debug(
3604 logging_text + "repos synchronized on k8s cluster "
3605 "'{}' to_delete: {}, to_add: {}".format(
3606 k8s_cluster_id, del_repo_list, added_repo_dict
3607 )
3608 )
3609 self.db.set_one(
3610 "k8sclusters",
3611 {"_id": k8s_cluster_id},
3612 updated,
3613 unset=unset,
3614 )
3615
3616 # Instantiate kdu
3617 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3618 vnfr_data["member-vnf-index-ref"],
3619 kdur["kdu-name"],
3620 k8s_cluster_id,
3621 )
3622 k8s_instance_info = {
3623 "kdu-instance": None,
3624 "k8scluster-uuid": cluster_uuid,
3625 "k8scluster-type": k8sclustertype,
3626 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3627 "kdu-name": kdur["kdu-name"],
3628 "kdu-model": kdumodel,
3629 "namespace": namespace,
3630 "kdu-deployment-name": kdu_deployment_name,
3631 }
3632 db_path = "_admin.deployed.K8s.{}".format(index)
3633 db_nsr_update[db_path] = k8s_instance_info
3634 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3635 vnfd_with_id = find_in_list(
3636 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3637 )
3638 task = asyncio.ensure_future(
3639 self._install_kdu(
3640 nsr_id,
3641 db_path,
3642 vnfr_data,
3643 kdu_index,
3644 kdud,
3645 vnfd_with_id,
3646 k8s_instance_info,
3647 k8params=desc_params,
3648 timeout=1800,
3649 vca_id=vca_id,
3650 )
3651 )
3652 self.lcm_tasks.register(
3653 "ns",
3654 nsr_id,
3655 nslcmop_id,
3656 "instantiate_KDU-{}".format(index),
3657 task,
3658 )
3659 task_instantiation_info[task] = "Deploying KDU {}".format(
3660 kdur["kdu-name"]
3661 )
3662
3663 index += 1
3664
3665 except (LcmException, asyncio.CancelledError):
3666 raise
3667 except Exception as e:
3668 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3669 if isinstance(e, (N2VCException, DbException)):
3670 self.logger.error(logging_text + msg)
3671 else:
3672 self.logger.critical(logging_text + msg, exc_info=True)
3673 raise LcmException(msg)
3674 finally:
3675 if db_nsr_update:
3676 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3677
3678 def _deploy_n2vc(
3679 self,
3680 logging_text,
3681 db_nsr,
3682 db_vnfr,
3683 nslcmop_id,
3684 nsr_id,
3685 nsi_id,
3686 vnfd_id,
3687 vdu_id,
3688 kdu_name,
3689 member_vnf_index,
3690 vdu_index,
3691 kdu_index,
3692 vdu_name,
3693 deploy_params,
3694 descriptor_config,
3695 base_folder,
3696 task_instantiation_info,
3697 stage,
3698 ):
3699 # launch instantiate_N2VC in a asyncio task and register task object
3700 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3701 # if not found, create one entry and update database
3702 # fill db_nsr._admin.deployed.VCA.<index>
3703
3704 self.logger.debug(
3705 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3706 )
3707
3708 charm_name = ""
3709 get_charm_name = False
3710 if "execution-environment-list" in descriptor_config:
3711 ee_list = descriptor_config.get("execution-environment-list", [])
3712 elif "juju" in descriptor_config:
3713 ee_list = [descriptor_config] # ns charms
3714 if "execution-environment-list" not in descriptor_config:
3715 # charm name is only required for ns charms
3716 get_charm_name = True
3717 else: # other types as script are not supported
3718 ee_list = []
3719
3720 for ee_item in ee_list:
3721 self.logger.debug(
3722 logging_text
3723 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3724 ee_item.get("juju"), ee_item.get("helm-chart")
3725 )
3726 )
3727 ee_descriptor_id = ee_item.get("id")
3728 if ee_item.get("juju"):
3729 vca_name = ee_item["juju"].get("charm")
3730 if get_charm_name:
3731 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3732 vca_type = (
3733 "lxc_proxy_charm"
3734 if ee_item["juju"].get("charm") is not None
3735 else "native_charm"
3736 )
3737 if ee_item["juju"].get("cloud") == "k8s":
3738 vca_type = "k8s_proxy_charm"
3739 elif ee_item["juju"].get("proxy") is False:
3740 vca_type = "native_charm"
3741 elif ee_item.get("helm-chart"):
3742 vca_name = ee_item["helm-chart"]
3743 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3744 vca_type = "helm"
3745 else:
3746 vca_type = "helm-v3"
3747 else:
3748 self.logger.debug(
3749 logging_text + "skipping non juju neither charm configuration"
3750 )
3751 continue
3752
3753 vca_index = -1
3754 for vca_index, vca_deployed in enumerate(
3755 db_nsr["_admin"]["deployed"]["VCA"]
3756 ):
3757 if not vca_deployed:
3758 continue
3759 if (
3760 vca_deployed.get("member-vnf-index") == member_vnf_index
3761 and vca_deployed.get("vdu_id") == vdu_id
3762 and vca_deployed.get("kdu_name") == kdu_name
3763 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3764 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3765 ):
3766 break
3767 else:
3768 # not found, create one.
3769 target = (
3770 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3771 )
3772 if vdu_id:
3773 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3774 elif kdu_name:
3775 target += "/kdu/{}".format(kdu_name)
3776 vca_deployed = {
3777 "target_element": target,
3778 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3779 "member-vnf-index": member_vnf_index,
3780 "vdu_id": vdu_id,
3781 "kdu_name": kdu_name,
3782 "vdu_count_index": vdu_index,
3783 "operational-status": "init", # TODO revise
3784 "detailed-status": "", # TODO revise
3785 "step": "initial-deploy", # TODO revise
3786 "vnfd_id": vnfd_id,
3787 "vdu_name": vdu_name,
3788 "type": vca_type,
3789 "ee_descriptor_id": ee_descriptor_id,
3790 "charm_name": charm_name,
3791 }
3792 vca_index += 1
3793
3794 # create VCA and configurationStatus in db
3795 db_dict = {
3796 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3797 "configurationStatus.{}".format(vca_index): dict(),
3798 }
3799 self.update_db_2("nsrs", nsr_id, db_dict)
3800
3801 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3802
3803 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3804 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3805 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3806
3807 # Launch task
3808 task_n2vc = asyncio.ensure_future(
3809 self.instantiate_N2VC(
3810 logging_text=logging_text,
3811 vca_index=vca_index,
3812 nsi_id=nsi_id,
3813 db_nsr=db_nsr,
3814 db_vnfr=db_vnfr,
3815 vdu_id=vdu_id,
3816 kdu_name=kdu_name,
3817 vdu_index=vdu_index,
3818 kdu_index=kdu_index,
3819 deploy_params=deploy_params,
3820 config_descriptor=descriptor_config,
3821 base_folder=base_folder,
3822 nslcmop_id=nslcmop_id,
3823 stage=stage,
3824 vca_type=vca_type,
3825 vca_name=vca_name,
3826 ee_config_descriptor=ee_item,
3827 )
3828 )
3829 self.lcm_tasks.register(
3830 "ns",
3831 nsr_id,
3832 nslcmop_id,
3833 "instantiate_N2VC-{}".format(vca_index),
3834 task_n2vc,
3835 )
3836 task_instantiation_info[
3837 task_n2vc
3838 ] = self.task_name_deploy_vca + " {}.{}".format(
3839 member_vnf_index or "", vdu_id or ""
3840 )
3841
3842 @staticmethod
3843 def _create_nslcmop(nsr_id, operation, params):
3844 """
3845 Creates a ns-lcm-opp content to be stored at database.
3846 :param nsr_id: internal id of the instance
3847 :param operation: instantiate, terminate, scale, action, ...
3848 :param params: user parameters for the operation
3849 :return: dictionary following SOL005 format
3850 """
3851 # Raise exception if invalid arguments
3852 if not (nsr_id and operation and params):
3853 raise LcmException(
3854 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3855 )
3856 now = time()
3857 _id = str(uuid4())
3858 nslcmop = {
3859 "id": _id,
3860 "_id": _id,
3861 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3862 "operationState": "PROCESSING",
3863 "statusEnteredTime": now,
3864 "nsInstanceId": nsr_id,
3865 "lcmOperationType": operation,
3866 "startTime": now,
3867 "isAutomaticInvocation": False,
3868 "operationParams": params,
3869 "isCancelPending": False,
3870 "links": {
3871 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3872 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3873 },
3874 }
3875 return nslcmop
3876
3877 def _format_additional_params(self, params):
3878 params = params or {}
3879 for key, value in params.items():
3880 if str(value).startswith("!!yaml "):
3881 params[key] = yaml.safe_load(value[7:])
3882 return params
3883
3884 def _get_terminate_primitive_params(self, seq, vnf_index):
3885 primitive = seq.get("name")
3886 primitive_params = {}
3887 params = {
3888 "member_vnf_index": vnf_index,
3889 "primitive": primitive,
3890 "primitive_params": primitive_params,
3891 }
3892 desc_params = {}
3893 return self._map_primitive_params(seq, params, desc_params)
3894
3895 # sub-operations
3896
3897 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3898 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3899 if op.get("operationState") == "COMPLETED":
3900 # b. Skip sub-operation
3901 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3902 return self.SUBOPERATION_STATUS_SKIP
3903 else:
3904 # c. retry executing sub-operation
3905 # The sub-operation exists, and operationState != 'COMPLETED'
3906 # Update operationState = 'PROCESSING' to indicate a retry.
3907 operationState = "PROCESSING"
3908 detailed_status = "In progress"
3909 self._update_suboperation_status(
3910 db_nslcmop, op_index, operationState, detailed_status
3911 )
3912 # Return the sub-operation index
3913 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3914 # with arguments extracted from the sub-operation
3915 return op_index
3916
3917 # Find a sub-operation where all keys in a matching dictionary must match
3918 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3919 def _find_suboperation(self, db_nslcmop, match):
3920 if db_nslcmop and match:
3921 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3922 for i, op in enumerate(op_list):
3923 if all(op.get(k) == match[k] for k in match):
3924 return i
3925 return self.SUBOPERATION_STATUS_NOT_FOUND
3926
3927 # Update status for a sub-operation given its index
3928 def _update_suboperation_status(
3929 self, db_nslcmop, op_index, operationState, detailed_status
3930 ):
3931 # Update DB for HA tasks
3932 q_filter = {"_id": db_nslcmop["_id"]}
3933 update_dict = {
3934 "_admin.operations.{}.operationState".format(op_index): operationState,
3935 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3936 }
3937 self.db.set_one(
3938 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3939 )
3940
3941 # Add sub-operation, return the index of the added sub-operation
3942 # Optionally, set operationState, detailed-status, and operationType
3943 # Status and type are currently set for 'scale' sub-operations:
3944 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3945 # 'detailed-status' : status message
3946 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3947 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3948 def _add_suboperation(
3949 self,
3950 db_nslcmop,
3951 vnf_index,
3952 vdu_id,
3953 vdu_count_index,
3954 vdu_name,
3955 primitive,
3956 mapped_primitive_params,
3957 operationState=None,
3958 detailed_status=None,
3959 operationType=None,
3960 RO_nsr_id=None,
3961 RO_scaling_info=None,
3962 ):
3963 if not db_nslcmop:
3964 return self.SUBOPERATION_STATUS_NOT_FOUND
3965 # Get the "_admin.operations" list, if it exists
3966 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3967 op_list = db_nslcmop_admin.get("operations")
3968 # Create or append to the "_admin.operations" list
3969 new_op = {
3970 "member_vnf_index": vnf_index,
3971 "vdu_id": vdu_id,
3972 "vdu_count_index": vdu_count_index,
3973 "primitive": primitive,
3974 "primitive_params": mapped_primitive_params,
3975 }
3976 if operationState:
3977 new_op["operationState"] = operationState
3978 if detailed_status:
3979 new_op["detailed-status"] = detailed_status
3980 if operationType:
3981 new_op["lcmOperationType"] = operationType
3982 if RO_nsr_id:
3983 new_op["RO_nsr_id"] = RO_nsr_id
3984 if RO_scaling_info:
3985 new_op["RO_scaling_info"] = RO_scaling_info
3986 if not op_list:
3987 # No existing operations, create key 'operations' with current operation as first list element
3988 db_nslcmop_admin.update({"operations": [new_op]})
3989 op_list = db_nslcmop_admin.get("operations")
3990 else:
3991 # Existing operations, append operation to list
3992 op_list.append(new_op)
3993
3994 db_nslcmop_update = {"_admin.operations": op_list}
3995 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3996 op_index = len(op_list) - 1
3997 return op_index
3998
3999 # Helper methods for scale() sub-operations
4000
4001 # pre-scale/post-scale:
4002 # Check for 3 different cases:
4003 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4004 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4005 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4006 def _check_or_add_scale_suboperation(
4007 self,
4008 db_nslcmop,
4009 vnf_index,
4010 vnf_config_primitive,
4011 primitive_params,
4012 operationType,
4013 RO_nsr_id=None,
4014 RO_scaling_info=None,
4015 ):
4016 # Find this sub-operation
4017 if RO_nsr_id and RO_scaling_info:
4018 operationType = "SCALE-RO"
4019 match = {
4020 "member_vnf_index": vnf_index,
4021 "RO_nsr_id": RO_nsr_id,
4022 "RO_scaling_info": RO_scaling_info,
4023 }
4024 else:
4025 match = {
4026 "member_vnf_index": vnf_index,
4027 "primitive": vnf_config_primitive,
4028 "primitive_params": primitive_params,
4029 "lcmOperationType": operationType,
4030 }
4031 op_index = self._find_suboperation(db_nslcmop, match)
4032 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4033 # a. New sub-operation
4034 # The sub-operation does not exist, add it.
4035 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4036 # The following parameters are set to None for all kind of scaling:
4037 vdu_id = None
4038 vdu_count_index = None
4039 vdu_name = None
4040 if RO_nsr_id and RO_scaling_info:
4041 vnf_config_primitive = None
4042 primitive_params = None
4043 else:
4044 RO_nsr_id = None
4045 RO_scaling_info = None
4046 # Initial status for sub-operation
4047 operationState = "PROCESSING"
4048 detailed_status = "In progress"
4049 # Add sub-operation for pre/post-scaling (zero or more operations)
4050 self._add_suboperation(
4051 db_nslcmop,
4052 vnf_index,
4053 vdu_id,
4054 vdu_count_index,
4055 vdu_name,
4056 vnf_config_primitive,
4057 primitive_params,
4058 operationState,
4059 detailed_status,
4060 operationType,
4061 RO_nsr_id,
4062 RO_scaling_info,
4063 )
4064 return self.SUBOPERATION_STATUS_NEW
4065 else:
4066 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4067 # or op_index (operationState != 'COMPLETED')
4068 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4069
4070 # Function to return execution_environment id
4071
4072 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4073 # TODO vdu_index_count
4074 for vca in vca_deployed_list:
4075 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4076 return vca["ee_id"]
4077
4078 async def destroy_N2VC(
4079 self,
4080 logging_text,
4081 db_nslcmop,
4082 vca_deployed,
4083 config_descriptor,
4084 vca_index,
4085 destroy_ee=True,
4086 exec_primitives=True,
4087 scaling_in=False,
4088 vca_id: str = None,
4089 ):
4090 """
4091 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4092 :param logging_text:
4093 :param db_nslcmop:
4094 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4095 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4096 :param vca_index: index in the database _admin.deployed.VCA
4097 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4098 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4099 not executed properly
4100 :param scaling_in: True destroys the application, False destroys the model
4101 :return: None or exception
4102 """
4103
4104 self.logger.debug(
4105 logging_text
4106 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4107 vca_index, vca_deployed, config_descriptor, destroy_ee
4108 )
4109 )
4110
4111 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4112
4113 # execute terminate_primitives
4114 if exec_primitives:
4115 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4116 config_descriptor.get("terminate-config-primitive"),
4117 vca_deployed.get("ee_descriptor_id"),
4118 )
4119 vdu_id = vca_deployed.get("vdu_id")
4120 vdu_count_index = vca_deployed.get("vdu_count_index")
4121 vdu_name = vca_deployed.get("vdu_name")
4122 vnf_index = vca_deployed.get("member-vnf-index")
4123 if terminate_primitives and vca_deployed.get("needed_terminate"):
4124 for seq in terminate_primitives:
4125 # For each sequence in list, get primitive and call _ns_execute_primitive()
4126 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4127 vnf_index, seq.get("name")
4128 )
4129 self.logger.debug(logging_text + step)
4130 # Create the primitive for each sequence, i.e. "primitive": "touch"
4131 primitive = seq.get("name")
4132 mapped_primitive_params = self._get_terminate_primitive_params(
4133 seq, vnf_index
4134 )
4135
4136 # Add sub-operation
4137 self._add_suboperation(
4138 db_nslcmop,
4139 vnf_index,
4140 vdu_id,
4141 vdu_count_index,
4142 vdu_name,
4143 primitive,
4144 mapped_primitive_params,
4145 )
4146 # Sub-operations: Call _ns_execute_primitive() instead of action()
4147 try:
4148 result, result_detail = await self._ns_execute_primitive(
4149 vca_deployed["ee_id"],
4150 primitive,
4151 mapped_primitive_params,
4152 vca_type=vca_type,
4153 vca_id=vca_id,
4154 )
4155 except LcmException:
4156 # this happens when VCA is not deployed. In this case it is not needed to terminate
4157 continue
4158 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4159 if result not in result_ok:
4160 raise LcmException(
4161 "terminate_primitive {} for vnf_member_index={} fails with "
4162 "error {}".format(seq.get("name"), vnf_index, result_detail)
4163 )
4164 # set that this VCA do not need terminated
4165 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4166 vca_index
4167 )
4168 self.update_db_2(
4169 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4170 )
4171
4172 # Delete Prometheus Jobs if any
4173 # This uses NSR_ID, so it will destroy any jobs under this index
4174 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4175
4176 if destroy_ee:
4177 await self.vca_map[vca_type].delete_execution_environment(
4178 vca_deployed["ee_id"],
4179 scaling_in=scaling_in,
4180 vca_type=vca_type,
4181 vca_id=vca_id,
4182 )
4183
4184 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4185 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4186 namespace = "." + db_nsr["_id"]
4187 try:
4188 await self.n2vc.delete_namespace(
4189 namespace=namespace,
4190 total_timeout=self.timeout.charm_delete,
4191 vca_id=vca_id,
4192 )
4193 except N2VCNotFound: # already deleted. Skip
4194 pass
4195 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4196
4197 async def terminate(self, nsr_id, nslcmop_id):
4198 # Try to lock HA task here
4199 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4200 if not task_is_locked_by_me:
4201 return
4202
4203 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4204 self.logger.debug(logging_text + "Enter")
4205 timeout_ns_terminate = self.timeout.ns_terminate
4206 db_nsr = None
4207 db_nslcmop = None
4208 operation_params = None
4209 exc = None
4210 error_list = [] # annotates all failed error messages
4211 db_nslcmop_update = {}
4212 autoremove = False # autoremove after terminated
4213 tasks_dict_info = {}
4214 db_nsr_update = {}
4215 stage = [
4216 "Stage 1/3: Preparing task.",
4217 "Waiting for previous operations to terminate.",
4218 "",
4219 ]
4220 # ^ contains [stage, step, VIM-status]
4221 try:
4222 # wait for any previous tasks in process
4223 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4224
4225 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4226 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4227 operation_params = db_nslcmop.get("operationParams") or {}
4228 if operation_params.get("timeout_ns_terminate"):
4229 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4230 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4231 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4232
4233 db_nsr_update["operational-status"] = "terminating"
4234 db_nsr_update["config-status"] = "terminating"
4235 self._write_ns_status(
4236 nsr_id=nsr_id,
4237 ns_state="TERMINATING",
4238 current_operation="TERMINATING",
4239 current_operation_id=nslcmop_id,
4240 other_update=db_nsr_update,
4241 )
4242 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4243 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4244 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4245 return
4246
4247 stage[1] = "Getting vnf descriptors from db."
4248 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4249 db_vnfrs_dict = {
4250 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4251 }
4252 db_vnfds_from_id = {}
4253 db_vnfds_from_member_index = {}
4254 # Loop over VNFRs
4255 for vnfr in db_vnfrs_list:
4256 vnfd_id = vnfr["vnfd-id"]
4257 if vnfd_id not in db_vnfds_from_id:
4258 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4259 db_vnfds_from_id[vnfd_id] = vnfd
4260 db_vnfds_from_member_index[
4261 vnfr["member-vnf-index-ref"]
4262 ] = db_vnfds_from_id[vnfd_id]
4263
4264 # Destroy individual execution environments when there are terminating primitives.
4265 # Rest of EE will be deleted at once
4266 # TODO - check before calling _destroy_N2VC
4267 # if not operation_params.get("skip_terminate_primitives"):#
4268 # or not vca.get("needed_terminate"):
4269 stage[0] = "Stage 2/3 execute terminating primitives."
4270 self.logger.debug(logging_text + stage[0])
4271 stage[1] = "Looking execution environment that needs terminate."
4272 self.logger.debug(logging_text + stage[1])
4273
4274 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4275 config_descriptor = None
4276 vca_member_vnf_index = vca.get("member-vnf-index")
4277 vca_id = self.get_vca_id(
4278 db_vnfrs_dict.get(vca_member_vnf_index)
4279 if vca_member_vnf_index
4280 else None,
4281 db_nsr,
4282 )
4283 if not vca or not vca.get("ee_id"):
4284 continue
4285 if not vca.get("member-vnf-index"):
4286 # ns
4287 config_descriptor = db_nsr.get("ns-configuration")
4288 elif vca.get("vdu_id"):
4289 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4290 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4291 elif vca.get("kdu_name"):
4292 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4293 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4294 else:
4295 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4296 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4297 vca_type = vca.get("type")
4298 exec_terminate_primitives = not operation_params.get(
4299 "skip_terminate_primitives"
4300 ) and vca.get("needed_terminate")
4301 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4302 # pending native charms
4303 destroy_ee = (
4304 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4305 )
4306 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4307 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4308 task = asyncio.ensure_future(
4309 self.destroy_N2VC(
4310 logging_text,
4311 db_nslcmop,
4312 vca,
4313 config_descriptor,
4314 vca_index,
4315 destroy_ee,
4316 exec_terminate_primitives,
4317 vca_id=vca_id,
4318 )
4319 )
4320 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4321
4322 # wait for pending tasks of terminate primitives
4323 if tasks_dict_info:
4324 self.logger.debug(
4325 logging_text
4326 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4327 )
4328 error_list = await self._wait_for_tasks(
4329 logging_text,
4330 tasks_dict_info,
4331 min(self.timeout.charm_delete, timeout_ns_terminate),
4332 stage,
4333 nslcmop_id,
4334 )
4335 tasks_dict_info.clear()
4336 if error_list:
4337 return # raise LcmException("; ".join(error_list))
4338
4339 # remove All execution environments at once
4340 stage[0] = "Stage 3/3 delete all."
4341
4342 if nsr_deployed.get("VCA"):
4343 stage[1] = "Deleting all execution environments."
4344 self.logger.debug(logging_text + stage[1])
4345 vca_id = self.get_vca_id({}, db_nsr)
4346 task_delete_ee = asyncio.ensure_future(
4347 asyncio.wait_for(
4348 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4349 timeout=self.timeout.charm_delete,
4350 )
4351 )
4352 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4353 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4354
4355 # Delete Namespace and Certificates if necessary
4356 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4357 await self.vca_map["helm-v3"].delete_tls_certificate(
4358 certificate_name=db_nslcmop["nsInstanceId"],
4359 )
4360 # TODO: Delete namespace
4361
4362 # Delete from k8scluster
4363 stage[1] = "Deleting KDUs."
4364 self.logger.debug(logging_text + stage[1])
4365 # print(nsr_deployed)
4366 for kdu in get_iterable(nsr_deployed, "K8s"):
4367 if not kdu or not kdu.get("kdu-instance"):
4368 continue
4369 kdu_instance = kdu.get("kdu-instance")
4370 if kdu.get("k8scluster-type") in self.k8scluster_map:
4371 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4372 vca_id = self.get_vca_id({}, db_nsr)
4373 task_delete_kdu_instance = asyncio.ensure_future(
4374 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4375 cluster_uuid=kdu.get("k8scluster-uuid"),
4376 kdu_instance=kdu_instance,
4377 vca_id=vca_id,
4378 namespace=kdu.get("namespace"),
4379 )
4380 )
4381 else:
4382 self.logger.error(
4383 logging_text
4384 + "Unknown k8s deployment type {}".format(
4385 kdu.get("k8scluster-type")
4386 )
4387 )
4388 continue
4389 tasks_dict_info[
4390 task_delete_kdu_instance
4391 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4392
4393 # remove from RO
4394 stage[1] = "Deleting ns from VIM."
4395 if self.ro_config.ng:
4396 task_delete_ro = asyncio.ensure_future(
4397 self._terminate_ng_ro(
4398 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4399 )
4400 )
4401 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4402
4403 # rest of staff will be done at finally
4404
4405 except (
4406 ROclient.ROClientException,
4407 DbException,
4408 LcmException,
4409 N2VCException,
4410 ) as e:
4411 self.logger.error(logging_text + "Exit Exception {}".format(e))
4412 exc = e
4413 except asyncio.CancelledError:
4414 self.logger.error(
4415 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4416 )
4417 exc = "Operation was cancelled"
4418 except Exception as e:
4419 exc = traceback.format_exc()
4420 self.logger.critical(
4421 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4422 exc_info=True,
4423 )
4424 finally:
4425 if exc:
4426 error_list.append(str(exc))
4427 try:
4428 # wait for pending tasks
4429 if tasks_dict_info:
4430 stage[1] = "Waiting for terminate pending tasks."
4431 self.logger.debug(logging_text + stage[1])
4432 error_list += await self._wait_for_tasks(
4433 logging_text,
4434 tasks_dict_info,
4435 timeout_ns_terminate,
4436 stage,
4437 nslcmop_id,
4438 )
4439 stage[1] = stage[2] = ""
4440 except asyncio.CancelledError:
4441 error_list.append("Cancelled")
4442 # TODO cancell all tasks
4443 except Exception as exc:
4444 error_list.append(str(exc))
4445 # update status at database
4446 if error_list:
4447 error_detail = "; ".join(error_list)
4448 # self.logger.error(logging_text + error_detail)
4449 error_description_nslcmop = "{} Detail: {}".format(
4450 stage[0], error_detail
4451 )
4452 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4453 nslcmop_id, stage[0]
4454 )
4455
4456 db_nsr_update["operational-status"] = "failed"
4457 db_nsr_update["detailed-status"] = (
4458 error_description_nsr + " Detail: " + error_detail
4459 )
4460 db_nslcmop_update["detailed-status"] = error_detail
4461 nslcmop_operation_state = "FAILED"
4462 ns_state = "BROKEN"
4463 else:
4464 error_detail = None
4465 error_description_nsr = error_description_nslcmop = None
4466 ns_state = "NOT_INSTANTIATED"
4467 db_nsr_update["operational-status"] = "terminated"
4468 db_nsr_update["detailed-status"] = "Done"
4469 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4470 db_nslcmop_update["detailed-status"] = "Done"
4471 nslcmop_operation_state = "COMPLETED"
4472
4473 if db_nsr:
4474 self._write_ns_status(
4475 nsr_id=nsr_id,
4476 ns_state=ns_state,
4477 current_operation="IDLE",
4478 current_operation_id=None,
4479 error_description=error_description_nsr,
4480 error_detail=error_detail,
4481 other_update=db_nsr_update,
4482 )
4483 self._write_op_status(
4484 op_id=nslcmop_id,
4485 stage="",
4486 error_message=error_description_nslcmop,
4487 operation_state=nslcmop_operation_state,
4488 other_update=db_nslcmop_update,
4489 )
4490 if ns_state == "NOT_INSTANTIATED":
4491 try:
4492 self.db.set_list(
4493 "vnfrs",
4494 {"nsr-id-ref": nsr_id},
4495 {"_admin.nsState": "NOT_INSTANTIATED"},
4496 )
4497 except DbException as e:
4498 self.logger.warn(
4499 logging_text
4500 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4501 nsr_id, e
4502 )
4503 )
4504 if operation_params:
4505 autoremove = operation_params.get("autoremove", False)
4506 if nslcmop_operation_state:
4507 try:
4508 await self.msg.aiowrite(
4509 "ns",
4510 "terminated",
4511 {
4512 "nsr_id": nsr_id,
4513 "nslcmop_id": nslcmop_id,
4514 "operationState": nslcmop_operation_state,
4515 "autoremove": autoremove,
4516 },
4517 loop=self.loop,
4518 )
4519 except Exception as e:
4520 self.logger.error(
4521 logging_text + "kafka_write notification Exception {}".format(e)
4522 )
4523
4524 self.logger.debug(logging_text + "Exit")
4525 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4526
4527 async def _wait_for_tasks(
4528 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4529 ):
4530 time_start = time()
4531 error_detail_list = []
4532 error_list = []
4533 pending_tasks = list(created_tasks_info.keys())
4534 num_tasks = len(pending_tasks)
4535 num_done = 0
4536 stage[1] = "{}/{}.".format(num_done, num_tasks)
4537 self._write_op_status(nslcmop_id, stage)
4538 while pending_tasks:
4539 new_error = None
4540 _timeout = timeout + time_start - time()
4541 done, pending_tasks = await asyncio.wait(
4542 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4543 )
4544 num_done += len(done)
4545 if not done: # Timeout
4546 for task in pending_tasks:
4547 new_error = created_tasks_info[task] + ": Timeout"
4548 error_detail_list.append(new_error)
4549 error_list.append(new_error)
4550 break
4551 for task in done:
4552 if task.cancelled():
4553 exc = "Cancelled"
4554 else:
4555 exc = task.exception()
4556 if exc:
4557 if isinstance(exc, asyncio.TimeoutError):
4558 exc = "Timeout"
4559 new_error = created_tasks_info[task] + ": {}".format(exc)
4560 error_list.append(created_tasks_info[task])
4561 error_detail_list.append(new_error)
4562 if isinstance(
4563 exc,
4564 (
4565 str,
4566 DbException,
4567 N2VCException,
4568 ROclient.ROClientException,
4569 LcmException,
4570 K8sException,
4571 NgRoException,
4572 ),
4573 ):
4574 self.logger.error(logging_text + new_error)
4575 else:
4576 exc_traceback = "".join(
4577 traceback.format_exception(None, exc, exc.__traceback__)
4578 )
4579 self.logger.error(
4580 logging_text
4581 + created_tasks_info[task]
4582 + " "
4583 + exc_traceback
4584 )
4585 else:
4586 self.logger.debug(
4587 logging_text + created_tasks_info[task] + ": Done"
4588 )
4589 stage[1] = "{}/{}.".format(num_done, num_tasks)
4590 if new_error:
4591 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4592 if nsr_id: # update also nsr
4593 self.update_db_2(
4594 "nsrs",
4595 nsr_id,
4596 {
4597 "errorDescription": "Error at: " + ", ".join(error_list),
4598 "errorDetail": ". ".join(error_detail_list),
4599 },
4600 )
4601 self._write_op_status(nslcmop_id, stage)
4602 return error_detail_list
4603
4604 @staticmethod
4605 def _map_primitive_params(primitive_desc, params, instantiation_params):
4606 """
4607 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4608 The default-value is used. If it is between < > it look for a value at instantiation_params
4609 :param primitive_desc: portion of VNFD/NSD that describes primitive
4610 :param params: Params provided by user
4611 :param instantiation_params: Instantiation params provided by user
4612 :return: a dictionary with the calculated params
4613 """
4614 calculated_params = {}
4615 for parameter in primitive_desc.get("parameter", ()):
4616 param_name = parameter["name"]
4617 if param_name in params:
4618 calculated_params[param_name] = params[param_name]
4619 elif "default-value" in parameter or "value" in parameter:
4620 if "value" in parameter:
4621 calculated_params[param_name] = parameter["value"]
4622 else:
4623 calculated_params[param_name] = parameter["default-value"]
4624 if (
4625 isinstance(calculated_params[param_name], str)
4626 and calculated_params[param_name].startswith("<")
4627 and calculated_params[param_name].endswith(">")
4628 ):
4629 if calculated_params[param_name][1:-1] in instantiation_params:
4630 calculated_params[param_name] = instantiation_params[
4631 calculated_params[param_name][1:-1]
4632 ]
4633 else:
4634 raise LcmException(
4635 "Parameter {} needed to execute primitive {} not provided".format(
4636 calculated_params[param_name], primitive_desc["name"]
4637 )
4638 )
4639 else:
4640 raise LcmException(
4641 "Parameter {} needed to execute primitive {} not provided".format(
4642 param_name, primitive_desc["name"]
4643 )
4644 )
4645
4646 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4647 calculated_params[param_name] = yaml.safe_dump(
4648 calculated_params[param_name], default_flow_style=True, width=256
4649 )
4650 elif isinstance(calculated_params[param_name], str) and calculated_params[
4651 param_name
4652 ].startswith("!!yaml "):
4653 calculated_params[param_name] = calculated_params[param_name][7:]
4654 if parameter.get("data-type") == "INTEGER":
4655 try:
4656 calculated_params[param_name] = int(calculated_params[param_name])
4657 except ValueError: # error converting string to int
4658 raise LcmException(
4659 "Parameter {} of primitive {} must be integer".format(
4660 param_name, primitive_desc["name"]
4661 )
4662 )
4663 elif parameter.get("data-type") == "BOOLEAN":
4664 calculated_params[param_name] = not (
4665 (str(calculated_params[param_name])).lower() == "false"
4666 )
4667
4668 # add always ns_config_info if primitive name is config
4669 if primitive_desc["name"] == "config":
4670 if "ns_config_info" in instantiation_params:
4671 calculated_params["ns_config_info"] = instantiation_params[
4672 "ns_config_info"
4673 ]
4674 return calculated_params
4675
4676 def _look_for_deployed_vca(
4677 self,
4678 deployed_vca,
4679 member_vnf_index,
4680 vdu_id,
4681 vdu_count_index,
4682 kdu_name=None,
4683 ee_descriptor_id=None,
4684 ):
4685 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4686 for vca in deployed_vca:
4687 if not vca:
4688 continue
4689 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4690 continue
4691 if (
4692 vdu_count_index is not None
4693 and vdu_count_index != vca["vdu_count_index"]
4694 ):
4695 continue
4696 if kdu_name and kdu_name != vca["kdu_name"]:
4697 continue
4698 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4699 continue
4700 break
4701 else:
4702 # vca_deployed not found
4703 raise LcmException(
4704 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4705 " is not deployed".format(
4706 member_vnf_index,
4707 vdu_id,
4708 vdu_count_index,
4709 kdu_name,
4710 ee_descriptor_id,
4711 )
4712 )
4713 # get ee_id
4714 ee_id = vca.get("ee_id")
4715 vca_type = vca.get(
4716 "type", "lxc_proxy_charm"
4717 ) # default value for backward compatibility - proxy charm
4718 if not ee_id:
4719 raise LcmException(
4720 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4721 "execution environment".format(
4722 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4723 )
4724 )
4725 return ee_id, vca_type
4726
4727 async def _ns_execute_primitive(
4728 self,
4729 ee_id,
4730 primitive,
4731 primitive_params,
4732 retries=0,
4733 retries_interval=30,
4734 timeout=None,
4735 vca_type=None,
4736 db_dict=None,
4737 vca_id: str = None,
4738 ) -> (str, str):
4739 try:
4740 if primitive == "config":
4741 primitive_params = {"params": primitive_params}
4742
4743 vca_type = vca_type or "lxc_proxy_charm"
4744
4745 while retries >= 0:
4746 try:
4747 output = await asyncio.wait_for(
4748 self.vca_map[vca_type].exec_primitive(
4749 ee_id=ee_id,
4750 primitive_name=primitive,
4751 params_dict=primitive_params,
4752 progress_timeout=self.timeout.progress_primitive,
4753 total_timeout=self.timeout.primitive,
4754 db_dict=db_dict,
4755 vca_id=vca_id,
4756 vca_type=vca_type,
4757 ),
4758 timeout=timeout or self.timeout.primitive,
4759 )
4760 # execution was OK
4761 break
4762 except asyncio.CancelledError:
4763 raise
4764 except Exception as e:
4765 retries -= 1
4766 if retries >= 0:
4767 self.logger.debug(
4768 "Error executing action {} on {} -> {}".format(
4769 primitive, ee_id, e
4770 )
4771 )
4772 # wait and retry
4773 await asyncio.sleep(retries_interval, loop=self.loop)
4774 else:
4775 if isinstance(e, asyncio.TimeoutError):
4776 e = N2VCException(
4777 message="Timed out waiting for action to complete"
4778 )
4779 return "FAILED", getattr(e, "message", repr(e))
4780
4781 return "COMPLETED", output
4782
4783 except (LcmException, asyncio.CancelledError):
4784 raise
4785 except Exception as e:
4786 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4787
4788 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4789 """
4790 Updating the vca_status with latest juju information in nsrs record
4791 :param: nsr_id: Id of the nsr
4792 :param: nslcmop_id: Id of the nslcmop
4793 :return: None
4794 """
4795
4796 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4797 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4798 vca_id = self.get_vca_id({}, db_nsr)
4799 if db_nsr["_admin"]["deployed"]["K8s"]:
4800 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4801 cluster_uuid, kdu_instance, cluster_type = (
4802 k8s["k8scluster-uuid"],
4803 k8s["kdu-instance"],
4804 k8s["k8scluster-type"],
4805 )
4806 await self._on_update_k8s_db(
4807 cluster_uuid=cluster_uuid,
4808 kdu_instance=kdu_instance,
4809 filter={"_id": nsr_id},
4810 vca_id=vca_id,
4811 cluster_type=cluster_type,
4812 )
4813 else:
4814 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4815 table, filter = "nsrs", {"_id": nsr_id}
4816 path = "_admin.deployed.VCA.{}.".format(vca_index)
4817 await self._on_update_n2vc_db(table, filter, path, {})
4818
4819 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4820 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4821
4822 async def action(self, nsr_id, nslcmop_id):
4823 # Try to lock HA task here
4824 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4825 if not task_is_locked_by_me:
4826 return
4827
4828 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4829 self.logger.debug(logging_text + "Enter")
4830 # get all needed from database
4831 db_nsr = None
4832 db_nslcmop = None
4833 db_nsr_update = {}
4834 db_nslcmop_update = {}
4835 nslcmop_operation_state = None
4836 error_description_nslcmop = None
4837 exc = None
4838 step = ""
4839 try:
4840 # wait for any previous tasks in process
4841 step = "Waiting for previous operations to terminate"
4842 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4843
4844 self._write_ns_status(
4845 nsr_id=nsr_id,
4846 ns_state=None,
4847 current_operation="RUNNING ACTION",
4848 current_operation_id=nslcmop_id,
4849 )
4850
4851 step = "Getting information from database"
4852 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4853 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4854 if db_nslcmop["operationParams"].get("primitive_params"):
4855 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4856 db_nslcmop["operationParams"]["primitive_params"]
4857 )
4858
4859 nsr_deployed = db_nsr["_admin"].get("deployed")
4860 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4861 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4862 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4863 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4864 primitive = db_nslcmop["operationParams"]["primitive"]
4865 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4866 timeout_ns_action = db_nslcmop["operationParams"].get(
4867 "timeout_ns_action", self.timeout.primitive
4868 )
4869
4870 if vnf_index:
4871 step = "Getting vnfr from database"
4872 db_vnfr = self.db.get_one(
4873 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4874 )
4875 if db_vnfr.get("kdur"):
4876 kdur_list = []
4877 for kdur in db_vnfr["kdur"]:
4878 if kdur.get("additionalParams"):
4879 kdur["additionalParams"] = json.loads(
4880 kdur["additionalParams"]
4881 )
4882 kdur_list.append(kdur)
4883 db_vnfr["kdur"] = kdur_list
4884 step = "Getting vnfd from database"
4885 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4886
4887 # Sync filesystem before running a primitive
4888 self.fs.sync(db_vnfr["vnfd-id"])
4889 else:
4890 step = "Getting nsd from database"
4891 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4892
4893 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4894 # for backward compatibility
4895 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4896 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4897 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4898 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4899
4900 # look for primitive
4901 config_primitive_desc = descriptor_configuration = None
4902 if vdu_id:
4903 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4904 elif kdu_name:
4905 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4906 elif vnf_index:
4907 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4908 else:
4909 descriptor_configuration = db_nsd.get("ns-configuration")
4910
4911 if descriptor_configuration and descriptor_configuration.get(
4912 "config-primitive"
4913 ):
4914 for config_primitive in descriptor_configuration["config-primitive"]:
4915 if config_primitive["name"] == primitive:
4916 config_primitive_desc = config_primitive
4917 break
4918
4919 if not config_primitive_desc:
4920 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4921 raise LcmException(
4922 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4923 primitive
4924 )
4925 )
4926 primitive_name = primitive
4927 ee_descriptor_id = None
4928 else:
4929 primitive_name = config_primitive_desc.get(
4930 "execution-environment-primitive", primitive
4931 )
4932 ee_descriptor_id = config_primitive_desc.get(
4933 "execution-environment-ref"
4934 )
4935
4936 if vnf_index:
4937 if vdu_id:
4938 vdur = next(
4939 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4940 )
4941 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4942 elif kdu_name:
4943 kdur = next(
4944 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4945 )
4946 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4947 else:
4948 desc_params = parse_yaml_strings(
4949 db_vnfr.get("additionalParamsForVnf")
4950 )
4951 else:
4952 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4953 if kdu_name and get_configuration(db_vnfd, kdu_name):
4954 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4955 actions = set()
4956 for primitive in kdu_configuration.get("initial-config-primitive", []):
4957 actions.add(primitive["name"])
4958 for primitive in kdu_configuration.get("config-primitive", []):
4959 actions.add(primitive["name"])
4960 kdu = find_in_list(
4961 nsr_deployed["K8s"],
4962 lambda kdu: kdu_name == kdu["kdu-name"]
4963 and kdu["member-vnf-index"] == vnf_index,
4964 )
4965 kdu_action = (
4966 True
4967 if primitive_name in actions
4968 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
4969 else False
4970 )
4971
4972 # TODO check if ns is in a proper status
4973 if kdu_name and (
4974 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4975 ):
4976 # kdur and desc_params already set from before
4977 if primitive_params:
4978 desc_params.update(primitive_params)
4979 # TODO Check if we will need something at vnf level
4980 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4981 if (
4982 kdu_name == kdu["kdu-name"]
4983 and kdu["member-vnf-index"] == vnf_index
4984 ):
4985 break
4986 else:
4987 raise LcmException(
4988 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4989 )
4990
4991 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4992 msg = "unknown k8scluster-type '{}'".format(
4993 kdu.get("k8scluster-type")
4994 )
4995 raise LcmException(msg)
4996
4997 db_dict = {
4998 "collection": "nsrs",
4999 "filter": {"_id": nsr_id},
5000 "path": "_admin.deployed.K8s.{}".format(index),
5001 }
5002 self.logger.debug(
5003 logging_text
5004 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5005 )
5006 step = "Executing kdu {}".format(primitive_name)
5007 if primitive_name == "upgrade":
5008 if desc_params.get("kdu_model"):
5009 kdu_model = desc_params.get("kdu_model")
5010 del desc_params["kdu_model"]
5011 else:
5012 kdu_model = kdu.get("kdu-model")
5013 if kdu_model.count("/") < 2: # helm chart is not embedded
5014 parts = kdu_model.split(sep=":")
5015 if len(parts) == 2:
5016 kdu_model = parts[0]
5017 if desc_params.get("kdu_atomic_upgrade"):
5018 atomic_upgrade = desc_params.get(
5019 "kdu_atomic_upgrade"
5020 ).lower() in ("yes", "true", "1")
5021 del desc_params["kdu_atomic_upgrade"]
5022 else:
5023 atomic_upgrade = True
5024
5025 detailed_status = await asyncio.wait_for(
5026 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5027 cluster_uuid=kdu.get("k8scluster-uuid"),
5028 kdu_instance=kdu.get("kdu-instance"),
5029 atomic=atomic_upgrade,
5030 kdu_model=kdu_model,
5031 params=desc_params,
5032 db_dict=db_dict,
5033 timeout=timeout_ns_action,
5034 ),
5035 timeout=timeout_ns_action + 10,
5036 )
5037 self.logger.debug(
5038 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5039 )
5040 elif primitive_name == "rollback":
5041 detailed_status = await asyncio.wait_for(
5042 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5043 cluster_uuid=kdu.get("k8scluster-uuid"),
5044 kdu_instance=kdu.get("kdu-instance"),
5045 db_dict=db_dict,
5046 ),
5047 timeout=timeout_ns_action,
5048 )
5049 elif primitive_name == "status":
5050 detailed_status = await asyncio.wait_for(
5051 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5052 cluster_uuid=kdu.get("k8scluster-uuid"),
5053 kdu_instance=kdu.get("kdu-instance"),
5054 vca_id=vca_id,
5055 ),
5056 timeout=timeout_ns_action,
5057 )
5058 else:
5059 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5060 kdu["kdu-name"], nsr_id
5061 )
5062 params = self._map_primitive_params(
5063 config_primitive_desc, primitive_params, desc_params
5064 )
5065
5066 detailed_status = await asyncio.wait_for(
5067 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5068 cluster_uuid=kdu.get("k8scluster-uuid"),
5069 kdu_instance=kdu_instance,
5070 primitive_name=primitive_name,
5071 params=params,
5072 db_dict=db_dict,
5073 timeout=timeout_ns_action,
5074 vca_id=vca_id,
5075 ),
5076 timeout=timeout_ns_action,
5077 )
5078
5079 if detailed_status:
5080 nslcmop_operation_state = "COMPLETED"
5081 else:
5082 detailed_status = ""
5083 nslcmop_operation_state = "FAILED"
5084 else:
5085 ee_id, vca_type = self._look_for_deployed_vca(
5086 nsr_deployed["VCA"],
5087 member_vnf_index=vnf_index,
5088 vdu_id=vdu_id,
5089 vdu_count_index=vdu_count_index,
5090 ee_descriptor_id=ee_descriptor_id,
5091 )
5092 for vca_index, vca_deployed in enumerate(
5093 db_nsr["_admin"]["deployed"]["VCA"]
5094 ):
5095 if vca_deployed.get("member-vnf-index") == vnf_index:
5096 db_dict = {
5097 "collection": "nsrs",
5098 "filter": {"_id": nsr_id},
5099 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5100 }
5101 break
5102 (
5103 nslcmop_operation_state,
5104 detailed_status,
5105 ) = await self._ns_execute_primitive(
5106 ee_id,
5107 primitive=primitive_name,
5108 primitive_params=self._map_primitive_params(
5109 config_primitive_desc, primitive_params, desc_params
5110 ),
5111 timeout=timeout_ns_action,
5112 vca_type=vca_type,
5113 db_dict=db_dict,
5114 vca_id=vca_id,
5115 )
5116
5117 db_nslcmop_update["detailed-status"] = detailed_status
5118 error_description_nslcmop = (
5119 detailed_status if nslcmop_operation_state == "FAILED" else ""
5120 )
5121 self.logger.debug(
5122 logging_text
5123 + "Done with result {} {}".format(
5124 nslcmop_operation_state, detailed_status
5125 )
5126 )
5127 return # database update is called inside finally
5128
5129 except (DbException, LcmException, N2VCException, K8sException) as e:
5130 self.logger.error(logging_text + "Exit Exception {}".format(e))
5131 exc = e
5132 except asyncio.CancelledError:
5133 self.logger.error(
5134 logging_text + "Cancelled Exception while '{}'".format(step)
5135 )
5136 exc = "Operation was cancelled"
5137 except asyncio.TimeoutError:
5138 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5139 exc = "Timeout"
5140 except Exception as e:
5141 exc = traceback.format_exc()
5142 self.logger.critical(
5143 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5144 exc_info=True,
5145 )
5146 finally:
5147 if exc:
5148 db_nslcmop_update[
5149 "detailed-status"
5150 ] = (
5151 detailed_status
5152 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5153 nslcmop_operation_state = "FAILED"
5154 if db_nsr:
5155 self._write_ns_status(
5156 nsr_id=nsr_id,
5157 ns_state=db_nsr[
5158 "nsState"
5159 ], # TODO check if degraded. For the moment use previous status
5160 current_operation="IDLE",
5161 current_operation_id=None,
5162 # error_description=error_description_nsr,
5163 # error_detail=error_detail,
5164 other_update=db_nsr_update,
5165 )
5166
5167 self._write_op_status(
5168 op_id=nslcmop_id,
5169 stage="",
5170 error_message=error_description_nslcmop,
5171 operation_state=nslcmop_operation_state,
5172 other_update=db_nslcmop_update,
5173 )
5174
5175 if nslcmop_operation_state:
5176 try:
5177 await self.msg.aiowrite(
5178 "ns",
5179 "actioned",
5180 {
5181 "nsr_id": nsr_id,
5182 "nslcmop_id": nslcmop_id,
5183 "operationState": nslcmop_operation_state,
5184 },
5185 loop=self.loop,
5186 )
5187 except Exception as e:
5188 self.logger.error(
5189 logging_text + "kafka_write notification Exception {}".format(e)
5190 )
5191 self.logger.debug(logging_text + "Exit")
5192 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5193 return nslcmop_operation_state, detailed_status
5194
5195 async def terminate_vdus(
5196 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5197 ):
5198 """This method terminates VDUs
5199
5200 Args:
5201 db_vnfr: VNF instance record
5202 member_vnf_index: VNF index to identify the VDUs to be removed
5203 db_nsr: NS instance record
5204 update_db_nslcmops: Nslcmop update record
5205 """
5206 vca_scaling_info = []
5207 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5208 scaling_info["scaling_direction"] = "IN"
5209 scaling_info["vdu-delete"] = {}
5210 scaling_info["kdu-delete"] = {}
5211 db_vdur = db_vnfr.get("vdur")
5212 vdur_list = copy(db_vdur)
5213 count_index = 0
5214 for index, vdu in enumerate(vdur_list):
5215 vca_scaling_info.append(
5216 {
5217 "osm_vdu_id": vdu["vdu-id-ref"],
5218 "member-vnf-index": member_vnf_index,
5219 "type": "delete",
5220 "vdu_index": count_index,
5221 }
5222 )
5223 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5224 scaling_info["vdu"].append(
5225 {
5226 "name": vdu.get("name") or vdu.get("vdu-name"),
5227 "vdu_id": vdu["vdu-id-ref"],
5228 "interface": [],
5229 }
5230 )
5231 for interface in vdu["interfaces"]:
5232 scaling_info["vdu"][index]["interface"].append(
5233 {
5234 "name": interface["name"],
5235 "ip_address": interface["ip-address"],
5236 "mac_address": interface.get("mac-address"),
5237 }
5238 )
5239 self.logger.info("NS update scaling info{}".format(scaling_info))
5240 stage[2] = "Terminating VDUs"
5241 if scaling_info.get("vdu-delete"):
5242 # scale_process = "RO"
5243 if self.ro_config.ng:
5244 await self._scale_ng_ro(
5245 logging_text,
5246 db_nsr,
5247 update_db_nslcmops,
5248 db_vnfr,
5249 scaling_info,
5250 stage,
5251 )
5252
5253 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5254 """This method is to Remove VNF instances from NS.
5255
5256 Args:
5257 nsr_id: NS instance id
5258 nslcmop_id: nslcmop id of update
5259 vnf_instance_id: id of the VNF instance to be removed
5260
5261 Returns:
5262 result: (str, str) COMPLETED/FAILED, details
5263 """
5264 try:
5265 db_nsr_update = {}
5266 logging_text = "Task ns={} update ".format(nsr_id)
5267 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5268 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5269 if check_vnfr_count > 1:
5270 stage = ["", "", ""]
5271 step = "Getting nslcmop from database"
5272 self.logger.debug(
5273 step + " after having waited for previous tasks to be completed"
5274 )
5275 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5276 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5277 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5278 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5279 """ db_vnfr = self.db.get_one(
5280 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5281
5282 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5283 await self.terminate_vdus(
5284 db_vnfr,
5285 member_vnf_index,
5286 db_nsr,
5287 update_db_nslcmops,
5288 stage,
5289 logging_text,
5290 )
5291
5292 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5293 constituent_vnfr.remove(db_vnfr.get("_id"))
5294 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5295 "constituent-vnfr-ref"
5296 )
5297 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5298 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5299 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5300 return "COMPLETED", "Done"
5301 else:
5302 step = "Terminate VNF Failed with"
5303 raise LcmException(
5304 "{} Cannot terminate the last VNF in this NS.".format(
5305 vnf_instance_id
5306 )
5307 )
5308 except (LcmException, asyncio.CancelledError):
5309 raise
5310 except Exception as e:
5311 self.logger.debug("Error removing VNF {}".format(e))
5312 return "FAILED", "Error removing VNF {}".format(e)
5313
5314 async def _ns_redeploy_vnf(
5315 self,
5316 nsr_id,
5317 nslcmop_id,
5318 db_vnfd,
5319 db_vnfr,
5320 db_nsr,
5321 ):
5322 """This method updates and redeploys VNF instances
5323
5324 Args:
5325 nsr_id: NS instance id
5326 nslcmop_id: nslcmop id
5327 db_vnfd: VNF descriptor
5328 db_vnfr: VNF instance record
5329 db_nsr: NS instance record
5330
5331 Returns:
5332 result: (str, str) COMPLETED/FAILED, details
5333 """
5334 try:
5335 count_index = 0
5336 stage = ["", "", ""]
5337 logging_text = "Task ns={} update ".format(nsr_id)
5338 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5339 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5340
5341 # Terminate old VNF resources
5342 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5343 await self.terminate_vdus(
5344 db_vnfr,
5345 member_vnf_index,
5346 db_nsr,
5347 update_db_nslcmops,
5348 stage,
5349 logging_text,
5350 )
5351
5352 # old_vnfd_id = db_vnfr["vnfd-id"]
5353 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5354 new_db_vnfd = db_vnfd
5355 # new_vnfd_ref = new_db_vnfd["id"]
5356 # new_vnfd_id = vnfd_id
5357
5358 # Create VDUR
5359 new_vnfr_cp = []
5360 for cp in new_db_vnfd.get("ext-cpd", ()):
5361 vnf_cp = {
5362 "name": cp.get("id"),
5363 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5364 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5365 "id": cp.get("id"),
5366 }
5367 new_vnfr_cp.append(vnf_cp)
5368 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5369 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5370 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5371 new_vnfr_update = {
5372 "revision": latest_vnfd_revision,
5373 "connection-point": new_vnfr_cp,
5374 "vdur": new_vdur,
5375 "ip-address": "",
5376 }
5377 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5378 updated_db_vnfr = self.db.get_one(
5379 "vnfrs",
5380 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5381 )
5382
5383 # Instantiate new VNF resources
5384 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5385 vca_scaling_info = []
5386 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5387 scaling_info["scaling_direction"] = "OUT"
5388 scaling_info["vdu-create"] = {}
5389 scaling_info["kdu-create"] = {}
5390 vdud_instantiate_list = db_vnfd["vdu"]
5391 for index, vdud in enumerate(vdud_instantiate_list):
5392 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5393 if cloud_init_text:
5394 additional_params = (
5395 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5396 or {}
5397 )
5398 cloud_init_list = []
5399 if cloud_init_text:
5400 # TODO Information of its own ip is not available because db_vnfr is not updated.
5401 additional_params["OSM"] = get_osm_params(
5402 updated_db_vnfr, vdud["id"], 1
5403 )
5404 cloud_init_list.append(
5405 self._parse_cloud_init(
5406 cloud_init_text,
5407 additional_params,
5408 db_vnfd["id"],
5409 vdud["id"],
5410 )
5411 )
5412 vca_scaling_info.append(
5413 {
5414 "osm_vdu_id": vdud["id"],
5415 "member-vnf-index": member_vnf_index,
5416 "type": "create",
5417 "vdu_index": count_index,
5418 }
5419 )
5420 scaling_info["vdu-create"][vdud["id"]] = count_index
5421 if self.ro_config.ng:
5422 self.logger.debug(
5423 "New Resources to be deployed: {}".format(scaling_info)
5424 )
5425 await self._scale_ng_ro(
5426 logging_text,
5427 db_nsr,
5428 update_db_nslcmops,
5429 updated_db_vnfr,
5430 scaling_info,
5431 stage,
5432 )
5433 return "COMPLETED", "Done"
5434 except (LcmException, asyncio.CancelledError):
5435 raise
5436 except Exception as e:
5437 self.logger.debug("Error updating VNF {}".format(e))
5438 return "FAILED", "Error updating VNF {}".format(e)
5439
5440 async def _ns_charm_upgrade(
5441 self,
5442 ee_id,
5443 charm_id,
5444 charm_type,
5445 path,
5446 timeout: float = None,
5447 ) -> (str, str):
5448 """This method upgrade charms in VNF instances
5449
5450 Args:
5451 ee_id: Execution environment id
5452 path: Local path to the charm
5453 charm_id: charm-id
5454 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5455 timeout: (Float) Timeout for the ns update operation
5456
5457 Returns:
5458 result: (str, str) COMPLETED/FAILED, details
5459 """
5460 try:
5461 charm_type = charm_type or "lxc_proxy_charm"
5462 output = await self.vca_map[charm_type].upgrade_charm(
5463 ee_id=ee_id,
5464 path=path,
5465 charm_id=charm_id,
5466 charm_type=charm_type,
5467 timeout=timeout or self.timeout.ns_update,
5468 )
5469
5470 if output:
5471 return "COMPLETED", output
5472
5473 except (LcmException, asyncio.CancelledError):
5474 raise
5475
5476 except Exception as e:
5477 self.logger.debug("Error upgrading charm {}".format(path))
5478
5479 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5480
5481 async def update(self, nsr_id, nslcmop_id):
5482 """Update NS according to different update types
5483
5484 This method performs upgrade of VNF instances then updates the revision
5485 number in VNF record
5486
5487 Args:
5488 nsr_id: Network service will be updated
5489 nslcmop_id: ns lcm operation id
5490
5491 Returns:
5492 It may raise DbException, LcmException, N2VCException, K8sException
5493
5494 """
5495 # Try to lock HA task here
5496 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5497 if not task_is_locked_by_me:
5498 return
5499
5500 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5501 self.logger.debug(logging_text + "Enter")
5502
5503 # Set the required variables to be filled up later
5504 db_nsr = None
5505 db_nslcmop_update = {}
5506 vnfr_update = {}
5507 nslcmop_operation_state = None
5508 db_nsr_update = {}
5509 error_description_nslcmop = ""
5510 exc = None
5511 change_type = "updated"
5512 detailed_status = ""
5513 member_vnf_index = None
5514
5515 try:
5516 # wait for any previous tasks in process
5517 step = "Waiting for previous operations to terminate"
5518 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5519 self._write_ns_status(
5520 nsr_id=nsr_id,
5521 ns_state=None,
5522 current_operation="UPDATING",
5523 current_operation_id=nslcmop_id,
5524 )
5525
5526 step = "Getting nslcmop from database"
5527 db_nslcmop = self.db.get_one(
5528 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5529 )
5530 update_type = db_nslcmop["operationParams"]["updateType"]
5531
5532 step = "Getting nsr from database"
5533 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5534 old_operational_status = db_nsr["operational-status"]
5535 db_nsr_update["operational-status"] = "updating"
5536 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5537 nsr_deployed = db_nsr["_admin"].get("deployed")
5538
5539 if update_type == "CHANGE_VNFPKG":
5540 # Get the input parameters given through update request
5541 vnf_instance_id = db_nslcmop["operationParams"][
5542 "changeVnfPackageData"
5543 ].get("vnfInstanceId")
5544
5545 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5546 "vnfdId"
5547 )
5548 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5549
5550 step = "Getting vnfr from database"
5551 db_vnfr = self.db.get_one(
5552 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5553 )
5554
5555 step = "Getting vnfds from database"
5556 # Latest VNFD
5557 latest_vnfd = self.db.get_one(
5558 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5559 )
5560 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5561
5562 # Current VNFD
5563 current_vnf_revision = db_vnfr.get("revision", 1)
5564 current_vnfd = self.db.get_one(
5565 "vnfds_revisions",
5566 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5567 fail_on_empty=False,
5568 )
5569 # Charm artifact paths will be filled up later
5570 (
5571 current_charm_artifact_path,
5572 target_charm_artifact_path,
5573 charm_artifact_paths,
5574 helm_artifacts,
5575 ) = ([], [], [], [])
5576
5577 step = "Checking if revision has changed in VNFD"
5578 if current_vnf_revision != latest_vnfd_revision:
5579 change_type = "policy_updated"
5580
5581 # There is new revision of VNFD, update operation is required
5582 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5583 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5584
5585 step = "Removing the VNFD packages if they exist in the local path"
5586 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5587 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5588
5589 step = "Get the VNFD packages from FSMongo"
5590 self.fs.sync(from_path=latest_vnfd_path)
5591 self.fs.sync(from_path=current_vnfd_path)
5592
5593 step = (
5594 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5595 )
5596 current_base_folder = current_vnfd["_admin"]["storage"]
5597 latest_base_folder = latest_vnfd["_admin"]["storage"]
5598
5599 for vca_index, vca_deployed in enumerate(
5600 get_iterable(nsr_deployed, "VCA")
5601 ):
5602 vnf_index = db_vnfr.get("member-vnf-index-ref")
5603
5604 # Getting charm-id and charm-type
5605 if vca_deployed.get("member-vnf-index") == vnf_index:
5606 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5607 vca_type = vca_deployed.get("type")
5608 vdu_count_index = vca_deployed.get("vdu_count_index")
5609
5610 # Getting ee-id
5611 ee_id = vca_deployed.get("ee_id")
5612
5613 step = "Getting descriptor config"
5614 if current_vnfd.get("kdu"):
5615 search_key = "kdu_name"
5616 else:
5617 search_key = "vnfd_id"
5618
5619 entity_id = vca_deployed.get(search_key)
5620
5621 descriptor_config = get_configuration(
5622 current_vnfd, entity_id
5623 )
5624
5625 if "execution-environment-list" in descriptor_config:
5626 ee_list = descriptor_config.get(
5627 "execution-environment-list", []
5628 )
5629 else:
5630 ee_list = []
5631
5632 # There could be several charm used in the same VNF
5633 for ee_item in ee_list:
5634 if ee_item.get("juju"):
5635 step = "Getting charm name"
5636 charm_name = ee_item["juju"].get("charm")
5637
5638 step = "Setting Charm artifact paths"
5639 current_charm_artifact_path.append(
5640 get_charm_artifact_path(
5641 current_base_folder,
5642 charm_name,
5643 vca_type,
5644 current_vnf_revision,
5645 )
5646 )
5647 target_charm_artifact_path.append(
5648 get_charm_artifact_path(
5649 latest_base_folder,
5650 charm_name,
5651 vca_type,
5652 latest_vnfd_revision,
5653 )
5654 )
5655 elif ee_item.get("helm-chart"):
5656 # add chart to list and all parameters
5657 step = "Getting helm chart name"
5658 chart_name = ee_item.get("helm-chart")
5659 if (
5660 ee_item.get("helm-version")
5661 and ee_item.get("helm-version") == "v2"
5662 ):
5663 vca_type = "helm"
5664 else:
5665 vca_type = "helm-v3"
5666 step = "Setting Helm chart artifact paths"
5667
5668 helm_artifacts.append(
5669 {
5670 "current_artifact_path": get_charm_artifact_path(
5671 current_base_folder,
5672 chart_name,
5673 vca_type,
5674 current_vnf_revision,
5675 ),
5676 "target_artifact_path": get_charm_artifact_path(
5677 latest_base_folder,
5678 chart_name,
5679 vca_type,
5680 latest_vnfd_revision,
5681 ),
5682 "ee_id": ee_id,
5683 "vca_index": vca_index,
5684 "vdu_index": vdu_count_index,
5685 }
5686 )
5687
5688 charm_artifact_paths = zip(
5689 current_charm_artifact_path, target_charm_artifact_path
5690 )
5691
5692 step = "Checking if software version has changed in VNFD"
5693 if find_software_version(current_vnfd) != find_software_version(
5694 latest_vnfd
5695 ):
5696 step = "Checking if existing VNF has charm"
5697 for current_charm_path, target_charm_path in list(
5698 charm_artifact_paths
5699 ):
5700 if current_charm_path:
5701 raise LcmException(
5702 "Software version change is not supported as VNF instance {} has charm.".format(
5703 vnf_instance_id
5704 )
5705 )
5706
5707 # There is no change in the charm package, then redeploy the VNF
5708 # based on new descriptor
5709 step = "Redeploying VNF"
5710 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5711 (result, detailed_status) = await self._ns_redeploy_vnf(
5712 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5713 )
5714 if result == "FAILED":
5715 nslcmop_operation_state = result
5716 error_description_nslcmop = detailed_status
5717 db_nslcmop_update["detailed-status"] = detailed_status
5718 self.logger.debug(
5719 logging_text
5720 + " step {} Done with result {} {}".format(
5721 step, nslcmop_operation_state, detailed_status
5722 )
5723 )
5724
5725 else:
5726 step = "Checking if any charm package has changed or not"
5727 for current_charm_path, target_charm_path in list(
5728 charm_artifact_paths
5729 ):
5730 if (
5731 current_charm_path
5732 and target_charm_path
5733 and self.check_charm_hash_changed(
5734 current_charm_path, target_charm_path
5735 )
5736 ):
5737 step = "Checking whether VNF uses juju bundle"
5738 if check_juju_bundle_existence(current_vnfd):
5739 raise LcmException(
5740 "Charm upgrade is not supported for the instance which"
5741 " uses juju-bundle: {}".format(
5742 check_juju_bundle_existence(current_vnfd)
5743 )
5744 )
5745
5746 step = "Upgrading Charm"
5747 (
5748 result,
5749 detailed_status,
5750 ) = await self._ns_charm_upgrade(
5751 ee_id=ee_id,
5752 charm_id=vca_id,
5753 charm_type=vca_type,
5754 path=self.fs.path + target_charm_path,
5755 timeout=timeout_seconds,
5756 )
5757
5758 if result == "FAILED":
5759 nslcmop_operation_state = result
5760 error_description_nslcmop = detailed_status
5761
5762 db_nslcmop_update["detailed-status"] = detailed_status
5763 self.logger.debug(
5764 logging_text
5765 + " step {} Done with result {} {}".format(
5766 step, nslcmop_operation_state, detailed_status
5767 )
5768 )
5769
5770 step = "Updating policies"
5771 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5772 result = "COMPLETED"
5773 detailed_status = "Done"
5774 db_nslcmop_update["detailed-status"] = "Done"
5775
5776 # helm base EE
5777 for item in helm_artifacts:
5778 if not (
5779 item["current_artifact_path"]
5780 and item["target_artifact_path"]
5781 and self.check_charm_hash_changed(
5782 item["current_artifact_path"],
5783 item["target_artifact_path"],
5784 )
5785 ):
5786 continue
5787 db_update_entry = "_admin.deployed.VCA.{}.".format(
5788 item["vca_index"]
5789 )
5790 vnfr_id = db_vnfr["_id"]
5791 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5792 db_dict = {
5793 "collection": "nsrs",
5794 "filter": {"_id": nsr_id},
5795 "path": db_update_entry,
5796 }
5797 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5798 await self.vca_map[vca_type].upgrade_execution_environment(
5799 namespace=namespace,
5800 helm_id=helm_id,
5801 db_dict=db_dict,
5802 config=osm_config,
5803 artifact_path=item["target_artifact_path"],
5804 vca_type=vca_type,
5805 )
5806 vnf_id = db_vnfr.get("vnfd-ref")
5807 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5808 self.logger.debug("get ssh key block")
5809 rw_mgmt_ip = None
5810 if deep_get(
5811 config_descriptor,
5812 ("config-access", "ssh-access", "required"),
5813 ):
5814 # Needed to inject a ssh key
5815 user = deep_get(
5816 config_descriptor,
5817 ("config-access", "ssh-access", "default-user"),
5818 )
5819 step = (
5820 "Install configuration Software, getting public ssh key"
5821 )
5822 pub_key = await self.vca_map[
5823 vca_type
5824 ].get_ee_ssh_public__key(
5825 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5826 )
5827
5828 step = (
5829 "Insert public key into VM user={} ssh_key={}".format(
5830 user, pub_key
5831 )
5832 )
5833 self.logger.debug(logging_text + step)
5834
5835 # wait for RO (ip-address) Insert pub_key into VM
5836 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
5837 logging_text,
5838 nsr_id,
5839 vnfr_id,
5840 None,
5841 item["vdu_index"],
5842 user=user,
5843 pub_key=pub_key,
5844 )
5845
5846 initial_config_primitive_list = config_descriptor.get(
5847 "initial-config-primitive"
5848 )
5849 config_primitive = next(
5850 (
5851 p
5852 for p in initial_config_primitive_list
5853 if p["name"] == "config"
5854 ),
5855 None,
5856 )
5857 if not config_primitive:
5858 continue
5859
5860 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5861 if rw_mgmt_ip:
5862 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
5863 if db_vnfr.get("additionalParamsForVnf"):
5864 deploy_params.update(
5865 parse_yaml_strings(
5866 db_vnfr["additionalParamsForVnf"].copy()
5867 )
5868 )
5869 primitive_params_ = self._map_primitive_params(
5870 config_primitive, {}, deploy_params
5871 )
5872
5873 step = "execute primitive '{}' params '{}'".format(
5874 config_primitive["name"], primitive_params_
5875 )
5876 self.logger.debug(logging_text + step)
5877 await self.vca_map[vca_type].exec_primitive(
5878 ee_id=ee_id,
5879 primitive_name=config_primitive["name"],
5880 params_dict=primitive_params_,
5881 db_dict=db_dict,
5882 vca_id=vca_id,
5883 vca_type=vca_type,
5884 )
5885
5886 step = "Updating policies"
5887 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5888 detailed_status = "Done"
5889 db_nslcmop_update["detailed-status"] = "Done"
5890
5891 # If nslcmop_operation_state is None, so any operation is not failed.
5892 if not nslcmop_operation_state:
5893 nslcmop_operation_state = "COMPLETED"
5894
5895 # If update CHANGE_VNFPKG nslcmop_operation is successful
5896 # vnf revision need to be updated
5897 vnfr_update["revision"] = latest_vnfd_revision
5898 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5899
5900 self.logger.debug(
5901 logging_text
5902 + " task Done with result {} {}".format(
5903 nslcmop_operation_state, detailed_status
5904 )
5905 )
5906 elif update_type == "REMOVE_VNF":
5907 # This part is included in https://osm.etsi.org/gerrit/11876
5908 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5909 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5910 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5911 step = "Removing VNF"
5912 (result, detailed_status) = await self.remove_vnf(
5913 nsr_id, nslcmop_id, vnf_instance_id
5914 )
5915 if result == "FAILED":
5916 nslcmop_operation_state = result
5917 error_description_nslcmop = detailed_status
5918 db_nslcmop_update["detailed-status"] = detailed_status
5919 change_type = "vnf_terminated"
5920 if not nslcmop_operation_state:
5921 nslcmop_operation_state = "COMPLETED"
5922 self.logger.debug(
5923 logging_text
5924 + " task Done with result {} {}".format(
5925 nslcmop_operation_state, detailed_status
5926 )
5927 )
5928
5929 elif update_type == "OPERATE_VNF":
5930 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
5931 "vnfInstanceId"
5932 ]
5933 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
5934 "changeStateTo"
5935 ]
5936 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
5937 "additionalParam"
5938 ]
5939 (result, detailed_status) = await self.rebuild_start_stop(
5940 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5941 )
5942 if result == "FAILED":
5943 nslcmop_operation_state = result
5944 error_description_nslcmop = detailed_status
5945 db_nslcmop_update["detailed-status"] = detailed_status
5946 if not nslcmop_operation_state:
5947 nslcmop_operation_state = "COMPLETED"
5948 self.logger.debug(
5949 logging_text
5950 + " task Done with result {} {}".format(
5951 nslcmop_operation_state, detailed_status
5952 )
5953 )
5954
5955 # If nslcmop_operation_state is None, so any operation is not failed.
5956 # All operations are executed in overall.
5957 if not nslcmop_operation_state:
5958 nslcmop_operation_state = "COMPLETED"
5959 db_nsr_update["operational-status"] = old_operational_status
5960
5961 except (DbException, LcmException, N2VCException, K8sException) as e:
5962 self.logger.error(logging_text + "Exit Exception {}".format(e))
5963 exc = e
5964 except asyncio.CancelledError:
5965 self.logger.error(
5966 logging_text + "Cancelled Exception while '{}'".format(step)
5967 )
5968 exc = "Operation was cancelled"
5969 except asyncio.TimeoutError:
5970 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5971 exc = "Timeout"
5972 except Exception as e:
5973 exc = traceback.format_exc()
5974 self.logger.critical(
5975 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5976 exc_info=True,
5977 )
5978 finally:
5979 if exc:
5980 db_nslcmop_update[
5981 "detailed-status"
5982 ] = (
5983 detailed_status
5984 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5985 nslcmop_operation_state = "FAILED"
5986 db_nsr_update["operational-status"] = old_operational_status
5987 if db_nsr:
5988 self._write_ns_status(
5989 nsr_id=nsr_id,
5990 ns_state=db_nsr["nsState"],
5991 current_operation="IDLE",
5992 current_operation_id=None,
5993 other_update=db_nsr_update,
5994 )
5995
5996 self._write_op_status(
5997 op_id=nslcmop_id,
5998 stage="",
5999 error_message=error_description_nslcmop,
6000 operation_state=nslcmop_operation_state,
6001 other_update=db_nslcmop_update,
6002 )
6003
6004 if nslcmop_operation_state:
6005 try:
6006 msg = {
6007 "nsr_id": nsr_id,
6008 "nslcmop_id": nslcmop_id,
6009 "operationState": nslcmop_operation_state,
6010 }
6011 if (
6012 change_type in ("vnf_terminated", "policy_updated")
6013 and member_vnf_index
6014 ):
6015 msg.update({"vnf_member_index": member_vnf_index})
6016 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6017 except Exception as e:
6018 self.logger.error(
6019 logging_text + "kafka_write notification Exception {}".format(e)
6020 )
6021 self.logger.debug(logging_text + "Exit")
6022 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6023 return nslcmop_operation_state, detailed_status
6024
6025 async def scale(self, nsr_id, nslcmop_id):
6026 # Try to lock HA task here
6027 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6028 if not task_is_locked_by_me:
6029 return
6030
6031 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6032 stage = ["", "", ""]
6033 tasks_dict_info = {}
6034 # ^ stage, step, VIM progress
6035 self.logger.debug(logging_text + "Enter")
6036 # get all needed from database
6037 db_nsr = None
6038 db_nslcmop_update = {}
6039 db_nsr_update = {}
6040 exc = None
6041 # in case of error, indicates what part of scale was failed to put nsr at error status
6042 scale_process = None
6043 old_operational_status = ""
6044 old_config_status = ""
6045 nsi_id = None
6046 try:
6047 # wait for any previous tasks in process
6048 step = "Waiting for previous operations to terminate"
6049 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6050 self._write_ns_status(
6051 nsr_id=nsr_id,
6052 ns_state=None,
6053 current_operation="SCALING",
6054 current_operation_id=nslcmop_id,
6055 )
6056
6057 step = "Getting nslcmop from database"
6058 self.logger.debug(
6059 step + " after having waited for previous tasks to be completed"
6060 )
6061 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6062
6063 step = "Getting nsr from database"
6064 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6065 old_operational_status = db_nsr["operational-status"]
6066 old_config_status = db_nsr["config-status"]
6067
6068 step = "Parsing scaling parameters"
6069 db_nsr_update["operational-status"] = "scaling"
6070 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6071 nsr_deployed = db_nsr["_admin"].get("deployed")
6072
6073 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6074 "scaleByStepData"
6075 ]["member-vnf-index"]
6076 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6077 "scaleByStepData"
6078 ]["scaling-group-descriptor"]
6079 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6080 # for backward compatibility
6081 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6082 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6083 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6084 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6085
6086 step = "Getting vnfr from database"
6087 db_vnfr = self.db.get_one(
6088 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6089 )
6090
6091 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6092
6093 step = "Getting vnfd from database"
6094 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6095
6096 base_folder = db_vnfd["_admin"]["storage"]
6097
6098 step = "Getting scaling-group-descriptor"
6099 scaling_descriptor = find_in_list(
6100 get_scaling_aspect(db_vnfd),
6101 lambda scale_desc: scale_desc["name"] == scaling_group,
6102 )
6103 if not scaling_descriptor:
6104 raise LcmException(
6105 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6106 "at vnfd:scaling-group-descriptor".format(scaling_group)
6107 )
6108
6109 step = "Sending scale order to VIM"
6110 # TODO check if ns is in a proper status
6111 nb_scale_op = 0
6112 if not db_nsr["_admin"].get("scaling-group"):
6113 self.update_db_2(
6114 "nsrs",
6115 nsr_id,
6116 {
6117 "_admin.scaling-group": [
6118 {"name": scaling_group, "nb-scale-op": 0}
6119 ]
6120 },
6121 )
6122 admin_scale_index = 0
6123 else:
6124 for admin_scale_index, admin_scale_info in enumerate(
6125 db_nsr["_admin"]["scaling-group"]
6126 ):
6127 if admin_scale_info["name"] == scaling_group:
6128 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6129 break
6130 else: # not found, set index one plus last element and add new entry with the name
6131 admin_scale_index += 1
6132 db_nsr_update[
6133 "_admin.scaling-group.{}.name".format(admin_scale_index)
6134 ] = scaling_group
6135
6136 vca_scaling_info = []
6137 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6138 if scaling_type == "SCALE_OUT":
6139 if "aspect-delta-details" not in scaling_descriptor:
6140 raise LcmException(
6141 "Aspect delta details not fount in scaling descriptor {}".format(
6142 scaling_descriptor["name"]
6143 )
6144 )
6145 # count if max-instance-count is reached
6146 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6147
6148 scaling_info["scaling_direction"] = "OUT"
6149 scaling_info["vdu-create"] = {}
6150 scaling_info["kdu-create"] = {}
6151 for delta in deltas:
6152 for vdu_delta in delta.get("vdu-delta", {}):
6153 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6154 # vdu_index also provides the number of instance of the targeted vdu
6155 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6156 cloud_init_text = self._get_vdu_cloud_init_content(
6157 vdud, db_vnfd
6158 )
6159 if cloud_init_text:
6160 additional_params = (
6161 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6162 or {}
6163 )
6164 cloud_init_list = []
6165
6166 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6167 max_instance_count = 10
6168 if vdu_profile and "max-number-of-instances" in vdu_profile:
6169 max_instance_count = vdu_profile.get(
6170 "max-number-of-instances", 10
6171 )
6172
6173 default_instance_num = get_number_of_instances(
6174 db_vnfd, vdud["id"]
6175 )
6176 instances_number = vdu_delta.get("number-of-instances", 1)
6177 nb_scale_op += instances_number
6178
6179 new_instance_count = nb_scale_op + default_instance_num
6180 # Control if new count is over max and vdu count is less than max.
6181 # Then assign new instance count
6182 if new_instance_count > max_instance_count > vdu_count:
6183 instances_number = new_instance_count - max_instance_count
6184 else:
6185 instances_number = instances_number
6186
6187 if new_instance_count > max_instance_count:
6188 raise LcmException(
6189 "reached the limit of {} (max-instance-count) "
6190 "scaling-out operations for the "
6191 "scaling-group-descriptor '{}'".format(
6192 nb_scale_op, scaling_group
6193 )
6194 )
6195 for x in range(vdu_delta.get("number-of-instances", 1)):
6196 if cloud_init_text:
6197 # TODO Information of its own ip is not available because db_vnfr is not updated.
6198 additional_params["OSM"] = get_osm_params(
6199 db_vnfr, vdu_delta["id"], vdu_index + x
6200 )
6201 cloud_init_list.append(
6202 self._parse_cloud_init(
6203 cloud_init_text,
6204 additional_params,
6205 db_vnfd["id"],
6206 vdud["id"],
6207 )
6208 )
6209 vca_scaling_info.append(
6210 {
6211 "osm_vdu_id": vdu_delta["id"],
6212 "member-vnf-index": vnf_index,
6213 "type": "create",
6214 "vdu_index": vdu_index + x,
6215 }
6216 )
6217 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6218 for kdu_delta in delta.get("kdu-resource-delta", {}):
6219 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6220 kdu_name = kdu_profile["kdu-name"]
6221 resource_name = kdu_profile.get("resource-name", "")
6222
6223 # Might have different kdus in the same delta
6224 # Should have list for each kdu
6225 if not scaling_info["kdu-create"].get(kdu_name, None):
6226 scaling_info["kdu-create"][kdu_name] = []
6227
6228 kdur = get_kdur(db_vnfr, kdu_name)
6229 if kdur.get("helm-chart"):
6230 k8s_cluster_type = "helm-chart-v3"
6231 self.logger.debug("kdur: {}".format(kdur))
6232 if (
6233 kdur.get("helm-version")
6234 and kdur.get("helm-version") == "v2"
6235 ):
6236 k8s_cluster_type = "helm-chart"
6237 elif kdur.get("juju-bundle"):
6238 k8s_cluster_type = "juju-bundle"
6239 else:
6240 raise LcmException(
6241 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6242 "juju-bundle. Maybe an old NBI version is running".format(
6243 db_vnfr["member-vnf-index-ref"], kdu_name
6244 )
6245 )
6246
6247 max_instance_count = 10
6248 if kdu_profile and "max-number-of-instances" in kdu_profile:
6249 max_instance_count = kdu_profile.get(
6250 "max-number-of-instances", 10
6251 )
6252
6253 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6254 deployed_kdu, _ = get_deployed_kdu(
6255 nsr_deployed, kdu_name, vnf_index
6256 )
6257 if deployed_kdu is None:
6258 raise LcmException(
6259 "KDU '{}' for vnf '{}' not deployed".format(
6260 kdu_name, vnf_index
6261 )
6262 )
6263 kdu_instance = deployed_kdu.get("kdu-instance")
6264 instance_num = await self.k8scluster_map[
6265 k8s_cluster_type
6266 ].get_scale_count(
6267 resource_name,
6268 kdu_instance,
6269 vca_id=vca_id,
6270 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6271 kdu_model=deployed_kdu.get("kdu-model"),
6272 )
6273 kdu_replica_count = instance_num + kdu_delta.get(
6274 "number-of-instances", 1
6275 )
6276
6277 # Control if new count is over max and instance_num is less than max.
6278 # Then assign max instance number to kdu replica count
6279 if kdu_replica_count > max_instance_count > instance_num:
6280 kdu_replica_count = max_instance_count
6281 if kdu_replica_count > max_instance_count:
6282 raise LcmException(
6283 "reached the limit of {} (max-instance-count) "
6284 "scaling-out operations for the "
6285 "scaling-group-descriptor '{}'".format(
6286 instance_num, scaling_group
6287 )
6288 )
6289
6290 for x in range(kdu_delta.get("number-of-instances", 1)):
6291 vca_scaling_info.append(
6292 {
6293 "osm_kdu_id": kdu_name,
6294 "member-vnf-index": vnf_index,
6295 "type": "create",
6296 "kdu_index": instance_num + x - 1,
6297 }
6298 )
6299 scaling_info["kdu-create"][kdu_name].append(
6300 {
6301 "member-vnf-index": vnf_index,
6302 "type": "create",
6303 "k8s-cluster-type": k8s_cluster_type,
6304 "resource-name": resource_name,
6305 "scale": kdu_replica_count,
6306 }
6307 )
6308 elif scaling_type == "SCALE_IN":
6309 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6310
6311 scaling_info["scaling_direction"] = "IN"
6312 scaling_info["vdu-delete"] = {}
6313 scaling_info["kdu-delete"] = {}
6314
6315 for delta in deltas:
6316 for vdu_delta in delta.get("vdu-delta", {}):
6317 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6318 min_instance_count = 0
6319 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6320 if vdu_profile and "min-number-of-instances" in vdu_profile:
6321 min_instance_count = vdu_profile["min-number-of-instances"]
6322
6323 default_instance_num = get_number_of_instances(
6324 db_vnfd, vdu_delta["id"]
6325 )
6326 instance_num = vdu_delta.get("number-of-instances", 1)
6327 nb_scale_op -= instance_num
6328
6329 new_instance_count = nb_scale_op + default_instance_num
6330
6331 if new_instance_count < min_instance_count < vdu_count:
6332 instances_number = min_instance_count - new_instance_count
6333 else:
6334 instances_number = instance_num
6335
6336 if new_instance_count < min_instance_count:
6337 raise LcmException(
6338 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6339 "scaling-group-descriptor '{}'".format(
6340 nb_scale_op, scaling_group
6341 )
6342 )
6343 for x in range(vdu_delta.get("number-of-instances", 1)):
6344 vca_scaling_info.append(
6345 {
6346 "osm_vdu_id": vdu_delta["id"],
6347 "member-vnf-index": vnf_index,
6348 "type": "delete",
6349 "vdu_index": vdu_index - 1 - x,
6350 }
6351 )
6352 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6353 for kdu_delta in delta.get("kdu-resource-delta", {}):
6354 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6355 kdu_name = kdu_profile["kdu-name"]
6356 resource_name = kdu_profile.get("resource-name", "")
6357
6358 if not scaling_info["kdu-delete"].get(kdu_name, None):
6359 scaling_info["kdu-delete"][kdu_name] = []
6360
6361 kdur = get_kdur(db_vnfr, kdu_name)
6362 if kdur.get("helm-chart"):
6363 k8s_cluster_type = "helm-chart-v3"
6364 self.logger.debug("kdur: {}".format(kdur))
6365 if (
6366 kdur.get("helm-version")
6367 and kdur.get("helm-version") == "v2"
6368 ):
6369 k8s_cluster_type = "helm-chart"
6370 elif kdur.get("juju-bundle"):
6371 k8s_cluster_type = "juju-bundle"
6372 else:
6373 raise LcmException(
6374 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6375 "juju-bundle. Maybe an old NBI version is running".format(
6376 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6377 )
6378 )
6379
6380 min_instance_count = 0
6381 if kdu_profile and "min-number-of-instances" in kdu_profile:
6382 min_instance_count = kdu_profile["min-number-of-instances"]
6383
6384 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6385 deployed_kdu, _ = get_deployed_kdu(
6386 nsr_deployed, kdu_name, vnf_index
6387 )
6388 if deployed_kdu is None:
6389 raise LcmException(
6390 "KDU '{}' for vnf '{}' not deployed".format(
6391 kdu_name, vnf_index
6392 )
6393 )
6394 kdu_instance = deployed_kdu.get("kdu-instance")
6395 instance_num = await self.k8scluster_map[
6396 k8s_cluster_type
6397 ].get_scale_count(
6398 resource_name,
6399 kdu_instance,
6400 vca_id=vca_id,
6401 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6402 kdu_model=deployed_kdu.get("kdu-model"),
6403 )
6404 kdu_replica_count = instance_num - kdu_delta.get(
6405 "number-of-instances", 1
6406 )
6407
6408 if kdu_replica_count < min_instance_count < instance_num:
6409 kdu_replica_count = min_instance_count
6410 if kdu_replica_count < min_instance_count:
6411 raise LcmException(
6412 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6413 "scaling-group-descriptor '{}'".format(
6414 instance_num, scaling_group
6415 )
6416 )
6417
6418 for x in range(kdu_delta.get("number-of-instances", 1)):
6419 vca_scaling_info.append(
6420 {
6421 "osm_kdu_id": kdu_name,
6422 "member-vnf-index": vnf_index,
6423 "type": "delete",
6424 "kdu_index": instance_num - x - 1,
6425 }
6426 )
6427 scaling_info["kdu-delete"][kdu_name].append(
6428 {
6429 "member-vnf-index": vnf_index,
6430 "type": "delete",
6431 "k8s-cluster-type": k8s_cluster_type,
6432 "resource-name": resource_name,
6433 "scale": kdu_replica_count,
6434 }
6435 )
6436
6437 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6438 vdu_delete = copy(scaling_info.get("vdu-delete"))
6439 if scaling_info["scaling_direction"] == "IN":
6440 for vdur in reversed(db_vnfr["vdur"]):
6441 if vdu_delete.get(vdur["vdu-id-ref"]):
6442 vdu_delete[vdur["vdu-id-ref"]] -= 1
6443 scaling_info["vdu"].append(
6444 {
6445 "name": vdur.get("name") or vdur.get("vdu-name"),
6446 "vdu_id": vdur["vdu-id-ref"],
6447 "interface": [],
6448 }
6449 )
6450 for interface in vdur["interfaces"]:
6451 scaling_info["vdu"][-1]["interface"].append(
6452 {
6453 "name": interface["name"],
6454 "ip_address": interface["ip-address"],
6455 "mac_address": interface.get("mac-address"),
6456 }
6457 )
6458 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6459
6460 # PRE-SCALE BEGIN
6461 step = "Executing pre-scale vnf-config-primitive"
6462 if scaling_descriptor.get("scaling-config-action"):
6463 for scaling_config_action in scaling_descriptor[
6464 "scaling-config-action"
6465 ]:
6466 if (
6467 scaling_config_action.get("trigger") == "pre-scale-in"
6468 and scaling_type == "SCALE_IN"
6469 ) or (
6470 scaling_config_action.get("trigger") == "pre-scale-out"
6471 and scaling_type == "SCALE_OUT"
6472 ):
6473 vnf_config_primitive = scaling_config_action[
6474 "vnf-config-primitive-name-ref"
6475 ]
6476 step = db_nslcmop_update[
6477 "detailed-status"
6478 ] = "executing pre-scale scaling-config-action '{}'".format(
6479 vnf_config_primitive
6480 )
6481
6482 # look for primitive
6483 for config_primitive in (
6484 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6485 ).get("config-primitive", ()):
6486 if config_primitive["name"] == vnf_config_primitive:
6487 break
6488 else:
6489 raise LcmException(
6490 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6491 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6492 "primitive".format(scaling_group, vnf_config_primitive)
6493 )
6494
6495 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6496 if db_vnfr.get("additionalParamsForVnf"):
6497 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6498
6499 scale_process = "VCA"
6500 db_nsr_update["config-status"] = "configuring pre-scaling"
6501 primitive_params = self._map_primitive_params(
6502 config_primitive, {}, vnfr_params
6503 )
6504
6505 # Pre-scale retry check: Check if this sub-operation has been executed before
6506 op_index = self._check_or_add_scale_suboperation(
6507 db_nslcmop,
6508 vnf_index,
6509 vnf_config_primitive,
6510 primitive_params,
6511 "PRE-SCALE",
6512 )
6513 if op_index == self.SUBOPERATION_STATUS_SKIP:
6514 # Skip sub-operation
6515 result = "COMPLETED"
6516 result_detail = "Done"
6517 self.logger.debug(
6518 logging_text
6519 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6520 vnf_config_primitive, result, result_detail
6521 )
6522 )
6523 else:
6524 if op_index == self.SUBOPERATION_STATUS_NEW:
6525 # New sub-operation: Get index of this sub-operation
6526 op_index = (
6527 len(db_nslcmop.get("_admin", {}).get("operations"))
6528 - 1
6529 )
6530 self.logger.debug(
6531 logging_text
6532 + "vnf_config_primitive={} New sub-operation".format(
6533 vnf_config_primitive
6534 )
6535 )
6536 else:
6537 # retry: Get registered params for this existing sub-operation
6538 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6539 op_index
6540 ]
6541 vnf_index = op.get("member_vnf_index")
6542 vnf_config_primitive = op.get("primitive")
6543 primitive_params = op.get("primitive_params")
6544 self.logger.debug(
6545 logging_text
6546 + "vnf_config_primitive={} Sub-operation retry".format(
6547 vnf_config_primitive
6548 )
6549 )
6550 # Execute the primitive, either with new (first-time) or registered (reintent) args
6551 ee_descriptor_id = config_primitive.get(
6552 "execution-environment-ref"
6553 )
6554 primitive_name = config_primitive.get(
6555 "execution-environment-primitive", vnf_config_primitive
6556 )
6557 ee_id, vca_type = self._look_for_deployed_vca(
6558 nsr_deployed["VCA"],
6559 member_vnf_index=vnf_index,
6560 vdu_id=None,
6561 vdu_count_index=None,
6562 ee_descriptor_id=ee_descriptor_id,
6563 )
6564 result, result_detail = await self._ns_execute_primitive(
6565 ee_id,
6566 primitive_name,
6567 primitive_params,
6568 vca_type=vca_type,
6569 vca_id=vca_id,
6570 )
6571 self.logger.debug(
6572 logging_text
6573 + "vnf_config_primitive={} Done with result {} {}".format(
6574 vnf_config_primitive, result, result_detail
6575 )
6576 )
6577 # Update operationState = COMPLETED | FAILED
6578 self._update_suboperation_status(
6579 db_nslcmop, op_index, result, result_detail
6580 )
6581
6582 if result == "FAILED":
6583 raise LcmException(result_detail)
6584 db_nsr_update["config-status"] = old_config_status
6585 scale_process = None
6586 # PRE-SCALE END
6587
6588 db_nsr_update[
6589 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6590 ] = nb_scale_op
6591 db_nsr_update[
6592 "_admin.scaling-group.{}.time".format(admin_scale_index)
6593 ] = time()
6594
6595 # SCALE-IN VCA - BEGIN
6596 if vca_scaling_info:
6597 step = db_nslcmop_update[
6598 "detailed-status"
6599 ] = "Deleting the execution environments"
6600 scale_process = "VCA"
6601 for vca_info in vca_scaling_info:
6602 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6603 member_vnf_index = str(vca_info["member-vnf-index"])
6604 self.logger.debug(
6605 logging_text + "vdu info: {}".format(vca_info)
6606 )
6607 if vca_info.get("osm_vdu_id"):
6608 vdu_id = vca_info["osm_vdu_id"]
6609 vdu_index = int(vca_info["vdu_index"])
6610 stage[
6611 1
6612 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6613 member_vnf_index, vdu_id, vdu_index
6614 )
6615 stage[2] = step = "Scaling in VCA"
6616 self._write_op_status(op_id=nslcmop_id, stage=stage)
6617 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6618 config_update = db_nsr["configurationStatus"]
6619 for vca_index, vca in enumerate(vca_update):
6620 if (
6621 (vca or vca.get("ee_id"))
6622 and vca["member-vnf-index"] == member_vnf_index
6623 and vca["vdu_count_index"] == vdu_index
6624 ):
6625 if vca.get("vdu_id"):
6626 config_descriptor = get_configuration(
6627 db_vnfd, vca.get("vdu_id")
6628 )
6629 elif vca.get("kdu_name"):
6630 config_descriptor = get_configuration(
6631 db_vnfd, vca.get("kdu_name")
6632 )
6633 else:
6634 config_descriptor = get_configuration(
6635 db_vnfd, db_vnfd["id"]
6636 )
6637 operation_params = (
6638 db_nslcmop.get("operationParams") or {}
6639 )
6640 exec_terminate_primitives = not operation_params.get(
6641 "skip_terminate_primitives"
6642 ) and vca.get("needed_terminate")
6643 task = asyncio.ensure_future(
6644 asyncio.wait_for(
6645 self.destroy_N2VC(
6646 logging_text,
6647 db_nslcmop,
6648 vca,
6649 config_descriptor,
6650 vca_index,
6651 destroy_ee=True,
6652 exec_primitives=exec_terminate_primitives,
6653 scaling_in=True,
6654 vca_id=vca_id,
6655 ),
6656 timeout=self.timeout.charm_delete,
6657 )
6658 )
6659 tasks_dict_info[task] = "Terminating VCA {}".format(
6660 vca.get("ee_id")
6661 )
6662 del vca_update[vca_index]
6663 del config_update[vca_index]
6664 # wait for pending tasks of terminate primitives
6665 if tasks_dict_info:
6666 self.logger.debug(
6667 logging_text
6668 + "Waiting for tasks {}".format(
6669 list(tasks_dict_info.keys())
6670 )
6671 )
6672 error_list = await self._wait_for_tasks(
6673 logging_text,
6674 tasks_dict_info,
6675 min(
6676 self.timeout.charm_delete, self.timeout.ns_terminate
6677 ),
6678 stage,
6679 nslcmop_id,
6680 )
6681 tasks_dict_info.clear()
6682 if error_list:
6683 raise LcmException("; ".join(error_list))
6684
6685 db_vca_and_config_update = {
6686 "_admin.deployed.VCA": vca_update,
6687 "configurationStatus": config_update,
6688 }
6689 self.update_db_2(
6690 "nsrs", db_nsr["_id"], db_vca_and_config_update
6691 )
6692 scale_process = None
6693 # SCALE-IN VCA - END
6694
6695 # SCALE RO - BEGIN
6696 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6697 scale_process = "RO"
6698 if self.ro_config.ng:
6699 await self._scale_ng_ro(
6700 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6701 )
6702 scaling_info.pop("vdu-create", None)
6703 scaling_info.pop("vdu-delete", None)
6704
6705 scale_process = None
6706 # SCALE RO - END
6707
6708 # SCALE KDU - BEGIN
6709 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6710 scale_process = "KDU"
6711 await self._scale_kdu(
6712 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6713 )
6714 scaling_info.pop("kdu-create", None)
6715 scaling_info.pop("kdu-delete", None)
6716
6717 scale_process = None
6718 # SCALE KDU - END
6719
6720 if db_nsr_update:
6721 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6722
6723 # SCALE-UP VCA - BEGIN
6724 if vca_scaling_info:
6725 step = db_nslcmop_update[
6726 "detailed-status"
6727 ] = "Creating new execution environments"
6728 scale_process = "VCA"
6729 for vca_info in vca_scaling_info:
6730 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6731 member_vnf_index = str(vca_info["member-vnf-index"])
6732 self.logger.debug(
6733 logging_text + "vdu info: {}".format(vca_info)
6734 )
6735 vnfd_id = db_vnfr["vnfd-ref"]
6736 if vca_info.get("osm_vdu_id"):
6737 vdu_index = int(vca_info["vdu_index"])
6738 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6739 if db_vnfr.get("additionalParamsForVnf"):
6740 deploy_params.update(
6741 parse_yaml_strings(
6742 db_vnfr["additionalParamsForVnf"].copy()
6743 )
6744 )
6745 descriptor_config = get_configuration(
6746 db_vnfd, db_vnfd["id"]
6747 )
6748 if descriptor_config:
6749 vdu_id = None
6750 vdu_name = None
6751 kdu_name = None
6752 kdu_index = None
6753 self._deploy_n2vc(
6754 logging_text=logging_text
6755 + "member_vnf_index={} ".format(member_vnf_index),
6756 db_nsr=db_nsr,
6757 db_vnfr=db_vnfr,
6758 nslcmop_id=nslcmop_id,
6759 nsr_id=nsr_id,
6760 nsi_id=nsi_id,
6761 vnfd_id=vnfd_id,
6762 vdu_id=vdu_id,
6763 kdu_name=kdu_name,
6764 kdu_index=kdu_index,
6765 member_vnf_index=member_vnf_index,
6766 vdu_index=vdu_index,
6767 vdu_name=vdu_name,
6768 deploy_params=deploy_params,
6769 descriptor_config=descriptor_config,
6770 base_folder=base_folder,
6771 task_instantiation_info=tasks_dict_info,
6772 stage=stage,
6773 )
6774 vdu_id = vca_info["osm_vdu_id"]
6775 vdur = find_in_list(
6776 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6777 )
6778 descriptor_config = get_configuration(db_vnfd, vdu_id)
6779 if vdur.get("additionalParams"):
6780 deploy_params_vdu = parse_yaml_strings(
6781 vdur["additionalParams"]
6782 )
6783 else:
6784 deploy_params_vdu = deploy_params
6785 deploy_params_vdu["OSM"] = get_osm_params(
6786 db_vnfr, vdu_id, vdu_count_index=vdu_index
6787 )
6788 if descriptor_config:
6789 vdu_name = None
6790 kdu_name = None
6791 kdu_index = None
6792 stage[
6793 1
6794 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6795 member_vnf_index, vdu_id, vdu_index
6796 )
6797 stage[2] = step = "Scaling out VCA"
6798 self._write_op_status(op_id=nslcmop_id, stage=stage)
6799 self._deploy_n2vc(
6800 logging_text=logging_text
6801 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6802 member_vnf_index, vdu_id, vdu_index
6803 ),
6804 db_nsr=db_nsr,
6805 db_vnfr=db_vnfr,
6806 nslcmop_id=nslcmop_id,
6807 nsr_id=nsr_id,
6808 nsi_id=nsi_id,
6809 vnfd_id=vnfd_id,
6810 vdu_id=vdu_id,
6811 kdu_name=kdu_name,
6812 member_vnf_index=member_vnf_index,
6813 vdu_index=vdu_index,
6814 kdu_index=kdu_index,
6815 vdu_name=vdu_name,
6816 deploy_params=deploy_params_vdu,
6817 descriptor_config=descriptor_config,
6818 base_folder=base_folder,
6819 task_instantiation_info=tasks_dict_info,
6820 stage=stage,
6821 )
6822 # SCALE-UP VCA - END
6823 scale_process = None
6824
6825 # POST-SCALE BEGIN
6826 # execute primitive service POST-SCALING
6827 step = "Executing post-scale vnf-config-primitive"
6828 if scaling_descriptor.get("scaling-config-action"):
6829 for scaling_config_action in scaling_descriptor[
6830 "scaling-config-action"
6831 ]:
6832 if (
6833 scaling_config_action.get("trigger") == "post-scale-in"
6834 and scaling_type == "SCALE_IN"
6835 ) or (
6836 scaling_config_action.get("trigger") == "post-scale-out"
6837 and scaling_type == "SCALE_OUT"
6838 ):
6839 vnf_config_primitive = scaling_config_action[
6840 "vnf-config-primitive-name-ref"
6841 ]
6842 step = db_nslcmop_update[
6843 "detailed-status"
6844 ] = "executing post-scale scaling-config-action '{}'".format(
6845 vnf_config_primitive
6846 )
6847
6848 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6849 if db_vnfr.get("additionalParamsForVnf"):
6850 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6851
6852 # look for primitive
6853 for config_primitive in (
6854 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6855 ).get("config-primitive", ()):
6856 if config_primitive["name"] == vnf_config_primitive:
6857 break
6858 else:
6859 raise LcmException(
6860 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6861 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6862 "config-primitive".format(
6863 scaling_group, vnf_config_primitive
6864 )
6865 )
6866 scale_process = "VCA"
6867 db_nsr_update["config-status"] = "configuring post-scaling"
6868 primitive_params = self._map_primitive_params(
6869 config_primitive, {}, vnfr_params
6870 )
6871
6872 # Post-scale retry check: Check if this sub-operation has been executed before
6873 op_index = self._check_or_add_scale_suboperation(
6874 db_nslcmop,
6875 vnf_index,
6876 vnf_config_primitive,
6877 primitive_params,
6878 "POST-SCALE",
6879 )
6880 if op_index == self.SUBOPERATION_STATUS_SKIP:
6881 # Skip sub-operation
6882 result = "COMPLETED"
6883 result_detail = "Done"
6884 self.logger.debug(
6885 logging_text
6886 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6887 vnf_config_primitive, result, result_detail
6888 )
6889 )
6890 else:
6891 if op_index == self.SUBOPERATION_STATUS_NEW:
6892 # New sub-operation: Get index of this sub-operation
6893 op_index = (
6894 len(db_nslcmop.get("_admin", {}).get("operations"))
6895 - 1
6896 )
6897 self.logger.debug(
6898 logging_text
6899 + "vnf_config_primitive={} New sub-operation".format(
6900 vnf_config_primitive
6901 )
6902 )
6903 else:
6904 # retry: Get registered params for this existing sub-operation
6905 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6906 op_index
6907 ]
6908 vnf_index = op.get("member_vnf_index")
6909 vnf_config_primitive = op.get("primitive")
6910 primitive_params = op.get("primitive_params")
6911 self.logger.debug(
6912 logging_text
6913 + "vnf_config_primitive={} Sub-operation retry".format(
6914 vnf_config_primitive
6915 )
6916 )
6917 # Execute the primitive, either with new (first-time) or registered (reintent) args
6918 ee_descriptor_id = config_primitive.get(
6919 "execution-environment-ref"
6920 )
6921 primitive_name = config_primitive.get(
6922 "execution-environment-primitive", vnf_config_primitive
6923 )
6924 ee_id, vca_type = self._look_for_deployed_vca(
6925 nsr_deployed["VCA"],
6926 member_vnf_index=vnf_index,
6927 vdu_id=None,
6928 vdu_count_index=None,
6929 ee_descriptor_id=ee_descriptor_id,
6930 )
6931 result, result_detail = await self._ns_execute_primitive(
6932 ee_id,
6933 primitive_name,
6934 primitive_params,
6935 vca_type=vca_type,
6936 vca_id=vca_id,
6937 )
6938 self.logger.debug(
6939 logging_text
6940 + "vnf_config_primitive={} Done with result {} {}".format(
6941 vnf_config_primitive, result, result_detail
6942 )
6943 )
6944 # Update operationState = COMPLETED | FAILED
6945 self._update_suboperation_status(
6946 db_nslcmop, op_index, result, result_detail
6947 )
6948
6949 if result == "FAILED":
6950 raise LcmException(result_detail)
6951 db_nsr_update["config-status"] = old_config_status
6952 scale_process = None
6953 # POST-SCALE END
6954
6955 db_nsr_update[
6956 "detailed-status"
6957 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6958 db_nsr_update["operational-status"] = (
6959 "running"
6960 if old_operational_status == "failed"
6961 else old_operational_status
6962 )
6963 db_nsr_update["config-status"] = old_config_status
6964 return
6965 except (
6966 ROclient.ROClientException,
6967 DbException,
6968 LcmException,
6969 NgRoException,
6970 ) as e:
6971 self.logger.error(logging_text + "Exit Exception {}".format(e))
6972 exc = e
6973 except asyncio.CancelledError:
6974 self.logger.error(
6975 logging_text + "Cancelled Exception while '{}'".format(step)
6976 )
6977 exc = "Operation was cancelled"
6978 except Exception as e:
6979 exc = traceback.format_exc()
6980 self.logger.critical(
6981 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6982 exc_info=True,
6983 )
6984 finally:
6985 self._write_ns_status(
6986 nsr_id=nsr_id,
6987 ns_state=None,
6988 current_operation="IDLE",
6989 current_operation_id=None,
6990 )
6991 if tasks_dict_info:
6992 stage[1] = "Waiting for instantiate pending tasks."
6993 self.logger.debug(logging_text + stage[1])
6994 exc = await self._wait_for_tasks(
6995 logging_text,
6996 tasks_dict_info,
6997 self.timeout.ns_deploy,
6998 stage,
6999 nslcmop_id,
7000 nsr_id=nsr_id,
7001 )
7002 if exc:
7003 db_nslcmop_update[
7004 "detailed-status"
7005 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7006 nslcmop_operation_state = "FAILED"
7007 if db_nsr:
7008 db_nsr_update["operational-status"] = old_operational_status
7009 db_nsr_update["config-status"] = old_config_status
7010 db_nsr_update["detailed-status"] = ""
7011 if scale_process:
7012 if "VCA" in scale_process:
7013 db_nsr_update["config-status"] = "failed"
7014 if "RO" in scale_process:
7015 db_nsr_update["operational-status"] = "failed"
7016 db_nsr_update[
7017 "detailed-status"
7018 ] = "FAILED scaling nslcmop={} {}: {}".format(
7019 nslcmop_id, step, exc
7020 )
7021 else:
7022 error_description_nslcmop = None
7023 nslcmop_operation_state = "COMPLETED"
7024 db_nslcmop_update["detailed-status"] = "Done"
7025
7026 self._write_op_status(
7027 op_id=nslcmop_id,
7028 stage="",
7029 error_message=error_description_nslcmop,
7030 operation_state=nslcmop_operation_state,
7031 other_update=db_nslcmop_update,
7032 )
7033 if db_nsr:
7034 self._write_ns_status(
7035 nsr_id=nsr_id,
7036 ns_state=None,
7037 current_operation="IDLE",
7038 current_operation_id=None,
7039 other_update=db_nsr_update,
7040 )
7041
7042 if nslcmop_operation_state:
7043 try:
7044 msg = {
7045 "nsr_id": nsr_id,
7046 "nslcmop_id": nslcmop_id,
7047 "operationState": nslcmop_operation_state,
7048 }
7049 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7050 except Exception as e:
7051 self.logger.error(
7052 logging_text + "kafka_write notification Exception {}".format(e)
7053 )
7054 self.logger.debug(logging_text + "Exit")
7055 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7056
7057 async def _scale_kdu(
7058 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7059 ):
7060 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7061 for kdu_name in _scaling_info:
7062 for kdu_scaling_info in _scaling_info[kdu_name]:
7063 deployed_kdu, index = get_deployed_kdu(
7064 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7065 )
7066 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7067 kdu_instance = deployed_kdu["kdu-instance"]
7068 kdu_model = deployed_kdu.get("kdu-model")
7069 scale = int(kdu_scaling_info["scale"])
7070 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7071
7072 db_dict = {
7073 "collection": "nsrs",
7074 "filter": {"_id": nsr_id},
7075 "path": "_admin.deployed.K8s.{}".format(index),
7076 }
7077
7078 step = "scaling application {}".format(
7079 kdu_scaling_info["resource-name"]
7080 )
7081 self.logger.debug(logging_text + step)
7082
7083 if kdu_scaling_info["type"] == "delete":
7084 kdu_config = get_configuration(db_vnfd, kdu_name)
7085 if (
7086 kdu_config
7087 and kdu_config.get("terminate-config-primitive")
7088 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7089 ):
7090 terminate_config_primitive_list = kdu_config.get(
7091 "terminate-config-primitive"
7092 )
7093 terminate_config_primitive_list.sort(
7094 key=lambda val: int(val["seq"])
7095 )
7096
7097 for (
7098 terminate_config_primitive
7099 ) in terminate_config_primitive_list:
7100 primitive_params_ = self._map_primitive_params(
7101 terminate_config_primitive, {}, {}
7102 )
7103 step = "execute terminate config primitive"
7104 self.logger.debug(logging_text + step)
7105 await asyncio.wait_for(
7106 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7107 cluster_uuid=cluster_uuid,
7108 kdu_instance=kdu_instance,
7109 primitive_name=terminate_config_primitive["name"],
7110 params=primitive_params_,
7111 db_dict=db_dict,
7112 total_timeout=self.timeout.primitive,
7113 vca_id=vca_id,
7114 ),
7115 timeout=self.timeout.primitive
7116 * self.timeout.primitive_outer_factor,
7117 )
7118
7119 await asyncio.wait_for(
7120 self.k8scluster_map[k8s_cluster_type].scale(
7121 kdu_instance=kdu_instance,
7122 scale=scale,
7123 resource_name=kdu_scaling_info["resource-name"],
7124 total_timeout=self.timeout.scale_on_error,
7125 vca_id=vca_id,
7126 cluster_uuid=cluster_uuid,
7127 kdu_model=kdu_model,
7128 atomic=True,
7129 db_dict=db_dict,
7130 ),
7131 timeout=self.timeout.scale_on_error
7132 * self.timeout.scale_on_error_outer_factor,
7133 )
7134
7135 if kdu_scaling_info["type"] == "create":
7136 kdu_config = get_configuration(db_vnfd, kdu_name)
7137 if (
7138 kdu_config
7139 and kdu_config.get("initial-config-primitive")
7140 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7141 ):
7142 initial_config_primitive_list = kdu_config.get(
7143 "initial-config-primitive"
7144 )
7145 initial_config_primitive_list.sort(
7146 key=lambda val: int(val["seq"])
7147 )
7148
7149 for initial_config_primitive in initial_config_primitive_list:
7150 primitive_params_ = self._map_primitive_params(
7151 initial_config_primitive, {}, {}
7152 )
7153 step = "execute initial config primitive"
7154 self.logger.debug(logging_text + step)
7155 await asyncio.wait_for(
7156 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7157 cluster_uuid=cluster_uuid,
7158 kdu_instance=kdu_instance,
7159 primitive_name=initial_config_primitive["name"],
7160 params=primitive_params_,
7161 db_dict=db_dict,
7162 vca_id=vca_id,
7163 ),
7164 timeout=600,
7165 )
7166
7167 async def _scale_ng_ro(
7168 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7169 ):
7170 nsr_id = db_nslcmop["nsInstanceId"]
7171 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7172 db_vnfrs = {}
7173
7174 # read from db: vnfd's for every vnf
7175 db_vnfds = []
7176
7177 # for each vnf in ns, read vnfd
7178 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7179 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7180 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7181 # if we haven't this vnfd, read it from db
7182 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7183 # read from db
7184 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7185 db_vnfds.append(vnfd)
7186 n2vc_key = self.n2vc.get_public_key()
7187 n2vc_key_list = [n2vc_key]
7188 self.scale_vnfr(
7189 db_vnfr,
7190 vdu_scaling_info.get("vdu-create"),
7191 vdu_scaling_info.get("vdu-delete"),
7192 mark_delete=True,
7193 )
7194 # db_vnfr has been updated, update db_vnfrs to use it
7195 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7196 await self._instantiate_ng_ro(
7197 logging_text,
7198 nsr_id,
7199 db_nsd,
7200 db_nsr,
7201 db_nslcmop,
7202 db_vnfrs,
7203 db_vnfds,
7204 n2vc_key_list,
7205 stage=stage,
7206 start_deploy=time(),
7207 timeout_ns_deploy=self.timeout.ns_deploy,
7208 )
7209 if vdu_scaling_info.get("vdu-delete"):
7210 self.scale_vnfr(
7211 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7212 )
7213
7214 async def extract_prometheus_scrape_jobs(
7215 self,
7216 ee_id: str,
7217 artifact_path: str,
7218 ee_config_descriptor: dict,
7219 vnfr_id: str,
7220 nsr_id: str,
7221 target_ip: str,
7222 element_type: str,
7223 vnf_member_index: str = "",
7224 vdu_id: str = "",
7225 vdu_index: int = None,
7226 kdu_name: str = "",
7227 kdu_index: int = None,
7228 ) -> dict:
7229 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7230 This method will wait until the corresponding VDU or KDU is fully instantiated
7231
7232 Args:
7233 ee_id (str): Execution Environment ID
7234 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7235 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7236 vnfr_id (str): VNFR ID where this EE applies
7237 nsr_id (str): NSR ID where this EE applies
7238 target_ip (str): VDU/KDU instance IP address
7239 element_type (str): NS or VNF or VDU or KDU
7240 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7241 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7242 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7243 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7244 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7245
7246 Raises:
7247 LcmException: When the VDU or KDU instance was not found in an hour
7248
7249 Returns:
7250 _type_: Prometheus jobs
7251 """
7252 # default the vdur and kdur names to an empty string, to avoid any later
7253 # problem with Prometheus when the element type is not VDU or KDU
7254 vdur_name = ""
7255 kdur_name = ""
7256
7257 # look if exist a file called 'prometheus*.j2' and
7258 artifact_content = self.fs.dir_ls(artifact_path)
7259 job_file = next(
7260 (
7261 f
7262 for f in artifact_content
7263 if f.startswith("prometheus") and f.endswith(".j2")
7264 ),
7265 None,
7266 )
7267 if not job_file:
7268 return
7269 with self.fs.file_open((artifact_path, job_file), "r") as f:
7270 job_data = f.read()
7271
7272 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7273 if element_type in ("VDU", "KDU"):
7274 for _ in range(360):
7275 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7276 if vdu_id and vdu_index is not None:
7277 vdur = next(
7278 (
7279 x
7280 for x in get_iterable(db_vnfr, "vdur")
7281 if (
7282 x.get("vdu-id-ref") == vdu_id
7283 and x.get("count-index") == vdu_index
7284 )
7285 ),
7286 {},
7287 )
7288 if vdur.get("name"):
7289 vdur_name = vdur.get("name")
7290 break
7291 if kdu_name and kdu_index is not None:
7292 kdur = next(
7293 (
7294 x
7295 for x in get_iterable(db_vnfr, "kdur")
7296 if (
7297 x.get("kdu-name") == kdu_name
7298 and x.get("count-index") == kdu_index
7299 )
7300 ),
7301 {},
7302 )
7303 if kdur.get("name"):
7304 kdur_name = kdur.get("name")
7305 break
7306
7307 await asyncio.sleep(10, loop=self.loop)
7308 else:
7309 if vdu_id and vdu_index is not None:
7310 raise LcmException(
7311 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7312 )
7313 if kdu_name and kdu_index is not None:
7314 raise LcmException(
7315 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7316 )
7317
7318 # TODO get_service
7319 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7320 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7321 host_port = "80"
7322 vnfr_id = vnfr_id.replace("-", "")
7323 variables = {
7324 "JOB_NAME": vnfr_id,
7325 "TARGET_IP": target_ip,
7326 "EXPORTER_POD_IP": host_name,
7327 "EXPORTER_POD_PORT": host_port,
7328 "NSR_ID": nsr_id,
7329 "VNF_MEMBER_INDEX": vnf_member_index,
7330 "VDUR_NAME": vdur_name,
7331 "KDUR_NAME": kdur_name,
7332 "ELEMENT_TYPE": element_type,
7333 }
7334 job_list = parse_job(job_data, variables)
7335 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7336 for job in job_list:
7337 if (
7338 not isinstance(job.get("job_name"), str)
7339 or vnfr_id not in job["job_name"]
7340 ):
7341 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7342 job["nsr_id"] = nsr_id
7343 job["vnfr_id"] = vnfr_id
7344 return job_list
7345
7346 async def rebuild_start_stop(
7347 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7348 ):
7349 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7350 self.logger.info(logging_text + "Enter")
7351 stage = ["Preparing the environment", ""]
7352 # database nsrs record
7353 db_nsr_update = {}
7354 vdu_vim_name = None
7355 vim_vm_id = None
7356 # in case of error, indicates what part of scale was failed to put nsr at error status
7357 start_deploy = time()
7358 try:
7359 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7360 vim_account_id = db_vnfr.get("vim-account-id")
7361 vim_info_key = "vim:" + vim_account_id
7362 vdu_id = additional_param["vdu_id"]
7363 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7364 vdur = find_in_list(
7365 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7366 )
7367 if vdur:
7368 vdu_vim_name = vdur["name"]
7369 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7370 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7371 else:
7372 raise LcmException("Target vdu is not found")
7373 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7374 # wait for any previous tasks in process
7375 stage[1] = "Waiting for previous operations to terminate"
7376 self.logger.info(stage[1])
7377 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7378
7379 stage[1] = "Reading from database."
7380 self.logger.info(stage[1])
7381 self._write_ns_status(
7382 nsr_id=nsr_id,
7383 ns_state=None,
7384 current_operation=operation_type.upper(),
7385 current_operation_id=nslcmop_id,
7386 )
7387 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7388
7389 # read from db: ns
7390 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7391 db_nsr_update["operational-status"] = operation_type
7392 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7393 # Payload for RO
7394 desc = {
7395 operation_type: {
7396 "vim_vm_id": vim_vm_id,
7397 "vnf_id": vnf_id,
7398 "vdu_index": additional_param["count-index"],
7399 "vdu_id": vdur["id"],
7400 "target_vim": target_vim,
7401 "vim_account_id": vim_account_id,
7402 }
7403 }
7404 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7405 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7406 self.logger.info("ro nsr id: {}".format(nsr_id))
7407 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7408 self.logger.info("response from RO: {}".format(result_dict))
7409 action_id = result_dict["action_id"]
7410 await self._wait_ng_ro(
7411 nsr_id,
7412 action_id,
7413 nslcmop_id,
7414 start_deploy,
7415 self.timeout.operate,
7416 None,
7417 "start_stop_rebuild",
7418 )
7419 return "COMPLETED", "Done"
7420 except (ROclient.ROClientException, DbException, LcmException) as e:
7421 self.logger.error("Exit Exception {}".format(e))
7422 exc = e
7423 except asyncio.CancelledError:
7424 self.logger.error("Cancelled Exception while '{}'".format(stage))
7425 exc = "Operation was cancelled"
7426 except Exception as e:
7427 exc = traceback.format_exc()
7428 self.logger.critical(
7429 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7430 )
7431 return "FAILED", "Error in operate VNF {}".format(exc)
7432
7433 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7434 """
7435 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7436
7437 :param: vim_account_id: VIM Account ID
7438
7439 :return: (cloud_name, cloud_credential)
7440 """
7441 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7442 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7443
7444 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7445 """
7446 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7447
7448 :param: vim_account_id: VIM Account ID
7449
7450 :return: (cloud_name, cloud_credential)
7451 """
7452 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7453 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7454
7455 async def migrate(self, nsr_id, nslcmop_id):
7456 """
7457 Migrate VNFs and VDUs instances in a NS
7458
7459 :param: nsr_id: NS Instance ID
7460 :param: nslcmop_id: nslcmop ID of migrate
7461
7462 """
7463 # Try to lock HA task here
7464 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7465 if not task_is_locked_by_me:
7466 return
7467 logging_text = "Task ns={} migrate ".format(nsr_id)
7468 self.logger.debug(logging_text + "Enter")
7469 # get all needed from database
7470 db_nslcmop = None
7471 db_nslcmop_update = {}
7472 nslcmop_operation_state = None
7473 db_nsr_update = {}
7474 target = {}
7475 exc = None
7476 # in case of error, indicates what part of scale was failed to put nsr at error status
7477 start_deploy = time()
7478
7479 try:
7480 # wait for any previous tasks in process
7481 step = "Waiting for previous operations to terminate"
7482 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7483
7484 self._write_ns_status(
7485 nsr_id=nsr_id,
7486 ns_state=None,
7487 current_operation="MIGRATING",
7488 current_operation_id=nslcmop_id,
7489 )
7490 step = "Getting nslcmop from database"
7491 self.logger.debug(
7492 step + " after having waited for previous tasks to be completed"
7493 )
7494 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7495 migrate_params = db_nslcmop.get("operationParams")
7496
7497 target = {}
7498 target.update(migrate_params)
7499 desc = await self.RO.migrate(nsr_id, target)
7500 self.logger.debug("RO return > {}".format(desc))
7501 action_id = desc["action_id"]
7502 await self._wait_ng_ro(
7503 nsr_id,
7504 action_id,
7505 nslcmop_id,
7506 start_deploy,
7507 self.timeout.migrate,
7508 operation="migrate",
7509 )
7510 except (ROclient.ROClientException, DbException, LcmException) as e:
7511 self.logger.error("Exit Exception {}".format(e))
7512 exc = e
7513 except asyncio.CancelledError:
7514 self.logger.error("Cancelled Exception while '{}'".format(step))
7515 exc = "Operation was cancelled"
7516 except Exception as e:
7517 exc = traceback.format_exc()
7518 self.logger.critical(
7519 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7520 )
7521 finally:
7522 self._write_ns_status(
7523 nsr_id=nsr_id,
7524 ns_state=None,
7525 current_operation="IDLE",
7526 current_operation_id=None,
7527 )
7528 if exc:
7529 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7530 nslcmop_operation_state = "FAILED"
7531 else:
7532 nslcmop_operation_state = "COMPLETED"
7533 db_nslcmop_update["detailed-status"] = "Done"
7534 db_nsr_update["detailed-status"] = "Done"
7535
7536 self._write_op_status(
7537 op_id=nslcmop_id,
7538 stage="",
7539 error_message="",
7540 operation_state=nslcmop_operation_state,
7541 other_update=db_nslcmop_update,
7542 )
7543 if nslcmop_operation_state:
7544 try:
7545 msg = {
7546 "nsr_id": nsr_id,
7547 "nslcmop_id": nslcmop_id,
7548 "operationState": nslcmop_operation_state,
7549 }
7550 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7551 except Exception as e:
7552 self.logger.error(
7553 logging_text + "kafka_write notification Exception {}".format(e)
7554 )
7555 self.logger.debug(logging_text + "Exit")
7556 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7557
7558 async def heal(self, nsr_id, nslcmop_id):
7559 """
7560 Heal NS
7561
7562 :param nsr_id: ns instance to heal
7563 :param nslcmop_id: operation to run
7564 :return:
7565 """
7566
7567 # Try to lock HA task here
7568 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7569 if not task_is_locked_by_me:
7570 return
7571
7572 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7573 stage = ["", "", ""]
7574 tasks_dict_info = {}
7575 # ^ stage, step, VIM progress
7576 self.logger.debug(logging_text + "Enter")
7577 # get all needed from database
7578 db_nsr = None
7579 db_nslcmop_update = {}
7580 db_nsr_update = {}
7581 db_vnfrs = {} # vnf's info indexed by _id
7582 exc = None
7583 old_operational_status = ""
7584 old_config_status = ""
7585 nsi_id = None
7586 try:
7587 # wait for any previous tasks in process
7588 step = "Waiting for previous operations to terminate"
7589 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7590 self._write_ns_status(
7591 nsr_id=nsr_id,
7592 ns_state=None,
7593 current_operation="HEALING",
7594 current_operation_id=nslcmop_id,
7595 )
7596
7597 step = "Getting nslcmop from database"
7598 self.logger.debug(
7599 step + " after having waited for previous tasks to be completed"
7600 )
7601 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7602
7603 step = "Getting nsr from database"
7604 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7605 old_operational_status = db_nsr["operational-status"]
7606 old_config_status = db_nsr["config-status"]
7607
7608 db_nsr_update = {
7609 "_admin.deployed.RO.operational-status": "healing",
7610 }
7611 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7612
7613 step = "Sending heal order to VIM"
7614 await self.heal_RO(
7615 logging_text=logging_text,
7616 nsr_id=nsr_id,
7617 db_nslcmop=db_nslcmop,
7618 stage=stage,
7619 )
7620 # VCA tasks
7621 # read from db: nsd
7622 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7623 self.logger.debug(logging_text + stage[1])
7624 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7625 self.fs.sync(db_nsr["nsd-id"])
7626 db_nsr["nsd"] = nsd
7627 # read from db: vnfr's of this ns
7628 step = "Getting vnfrs from db"
7629 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7630 for vnfr in db_vnfrs_list:
7631 db_vnfrs[vnfr["_id"]] = vnfr
7632 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7633
7634 # Check for each target VNF
7635 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7636 for target_vnf in target_list:
7637 # Find this VNF in the list from DB
7638 vnfr_id = target_vnf.get("vnfInstanceId", None)
7639 if vnfr_id:
7640 db_vnfr = db_vnfrs[vnfr_id]
7641 vnfd_id = db_vnfr.get("vnfd-id")
7642 vnfd_ref = db_vnfr.get("vnfd-ref")
7643 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7644 base_folder = vnfd["_admin"]["storage"]
7645 vdu_id = None
7646 vdu_index = 0
7647 vdu_name = None
7648 kdu_name = None
7649 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7650 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7651
7652 # Check each target VDU and deploy N2VC
7653 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7654 "vdu", []
7655 )
7656 if not target_vdu_list:
7657 # Codigo nuevo para crear diccionario
7658 target_vdu_list = []
7659 for existing_vdu in db_vnfr.get("vdur"):
7660 vdu_name = existing_vdu.get("vdu-name", None)
7661 vdu_index = existing_vdu.get("count-index", 0)
7662 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7663 "run-day1", False
7664 )
7665 vdu_to_be_healed = {
7666 "vdu-id": vdu_name,
7667 "count-index": vdu_index,
7668 "run-day1": vdu_run_day1,
7669 }
7670 target_vdu_list.append(vdu_to_be_healed)
7671 for target_vdu in target_vdu_list:
7672 deploy_params_vdu = target_vdu
7673 # Set run-day1 vnf level value if not vdu level value exists
7674 if not deploy_params_vdu.get("run-day1") and target_vnf[
7675 "additionalParams"
7676 ].get("run-day1"):
7677 deploy_params_vdu["run-day1"] = target_vnf[
7678 "additionalParams"
7679 ].get("run-day1")
7680 vdu_name = target_vdu.get("vdu-id", None)
7681 # TODO: Get vdu_id from vdud.
7682 vdu_id = vdu_name
7683 # For multi instance VDU count-index is mandatory
7684 # For single session VDU count-indes is 0
7685 vdu_index = target_vdu.get("count-index", 0)
7686
7687 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7688 stage[1] = "Deploying Execution Environments."
7689 self.logger.debug(logging_text + stage[1])
7690
7691 # VNF Level charm. Normal case when proxy charms.
7692 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7693 descriptor_config = get_configuration(vnfd, vnfd_ref)
7694 if descriptor_config:
7695 # Continue if healed machine is management machine
7696 vnf_ip_address = db_vnfr.get("ip-address")
7697 target_instance = None
7698 for instance in db_vnfr.get("vdur", None):
7699 if (
7700 instance["vdu-name"] == vdu_name
7701 and instance["count-index"] == vdu_index
7702 ):
7703 target_instance = instance
7704 break
7705 if vnf_ip_address == target_instance.get("ip-address"):
7706 self._heal_n2vc(
7707 logging_text=logging_text
7708 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7709 member_vnf_index, vdu_name, vdu_index
7710 ),
7711 db_nsr=db_nsr,
7712 db_vnfr=db_vnfr,
7713 nslcmop_id=nslcmop_id,
7714 nsr_id=nsr_id,
7715 nsi_id=nsi_id,
7716 vnfd_id=vnfd_ref,
7717 vdu_id=None,
7718 kdu_name=None,
7719 member_vnf_index=member_vnf_index,
7720 vdu_index=0,
7721 vdu_name=None,
7722 deploy_params=deploy_params_vdu,
7723 descriptor_config=descriptor_config,
7724 base_folder=base_folder,
7725 task_instantiation_info=tasks_dict_info,
7726 stage=stage,
7727 )
7728
7729 # VDU Level charm. Normal case with native charms.
7730 descriptor_config = get_configuration(vnfd, vdu_name)
7731 if descriptor_config:
7732 self._heal_n2vc(
7733 logging_text=logging_text
7734 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7735 member_vnf_index, vdu_name, vdu_index
7736 ),
7737 db_nsr=db_nsr,
7738 db_vnfr=db_vnfr,
7739 nslcmop_id=nslcmop_id,
7740 nsr_id=nsr_id,
7741 nsi_id=nsi_id,
7742 vnfd_id=vnfd_ref,
7743 vdu_id=vdu_id,
7744 kdu_name=kdu_name,
7745 member_vnf_index=member_vnf_index,
7746 vdu_index=vdu_index,
7747 vdu_name=vdu_name,
7748 deploy_params=deploy_params_vdu,
7749 descriptor_config=descriptor_config,
7750 base_folder=base_folder,
7751 task_instantiation_info=tasks_dict_info,
7752 stage=stage,
7753 )
7754
7755 except (
7756 ROclient.ROClientException,
7757 DbException,
7758 LcmException,
7759 NgRoException,
7760 ) as e:
7761 self.logger.error(logging_text + "Exit Exception {}".format(e))
7762 exc = e
7763 except asyncio.CancelledError:
7764 self.logger.error(
7765 logging_text + "Cancelled Exception while '{}'".format(step)
7766 )
7767 exc = "Operation was cancelled"
7768 except Exception as e:
7769 exc = traceback.format_exc()
7770 self.logger.critical(
7771 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7772 exc_info=True,
7773 )
7774 finally:
7775 if tasks_dict_info:
7776 stage[1] = "Waiting for healing pending tasks."
7777 self.logger.debug(logging_text + stage[1])
7778 exc = await self._wait_for_tasks(
7779 logging_text,
7780 tasks_dict_info,
7781 self.timeout.ns_deploy,
7782 stage,
7783 nslcmop_id,
7784 nsr_id=nsr_id,
7785 )
7786 if exc:
7787 db_nslcmop_update[
7788 "detailed-status"
7789 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7790 nslcmop_operation_state = "FAILED"
7791 if db_nsr:
7792 db_nsr_update["operational-status"] = old_operational_status
7793 db_nsr_update["config-status"] = old_config_status
7794 db_nsr_update[
7795 "detailed-status"
7796 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7797 for task, task_name in tasks_dict_info.items():
7798 if not task.done() or task.cancelled() or task.exception():
7799 if task_name.startswith(self.task_name_deploy_vca):
7800 # A N2VC task is pending
7801 db_nsr_update["config-status"] = "failed"
7802 else:
7803 # RO task is pending
7804 db_nsr_update["operational-status"] = "failed"
7805 else:
7806 error_description_nslcmop = None
7807 nslcmop_operation_state = "COMPLETED"
7808 db_nslcmop_update["detailed-status"] = "Done"
7809 db_nsr_update["detailed-status"] = "Done"
7810 db_nsr_update["operational-status"] = "running"
7811 db_nsr_update["config-status"] = "configured"
7812
7813 self._write_op_status(
7814 op_id=nslcmop_id,
7815 stage="",
7816 error_message=error_description_nslcmop,
7817 operation_state=nslcmop_operation_state,
7818 other_update=db_nslcmop_update,
7819 )
7820 if db_nsr:
7821 self._write_ns_status(
7822 nsr_id=nsr_id,
7823 ns_state=None,
7824 current_operation="IDLE",
7825 current_operation_id=None,
7826 other_update=db_nsr_update,
7827 )
7828
7829 if nslcmop_operation_state:
7830 try:
7831 msg = {
7832 "nsr_id": nsr_id,
7833 "nslcmop_id": nslcmop_id,
7834 "operationState": nslcmop_operation_state,
7835 }
7836 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7837 except Exception as e:
7838 self.logger.error(
7839 logging_text + "kafka_write notification Exception {}".format(e)
7840 )
7841 self.logger.debug(logging_text + "Exit")
7842 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7843
7844 async def heal_RO(
7845 self,
7846 logging_text,
7847 nsr_id,
7848 db_nslcmop,
7849 stage,
7850 ):
7851 """
7852 Heal at RO
7853 :param logging_text: preffix text to use at logging
7854 :param nsr_id: nsr identity
7855 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7856 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7857 :return: None or exception
7858 """
7859
7860 def get_vim_account(vim_account_id):
7861 nonlocal db_vims
7862 if vim_account_id in db_vims:
7863 return db_vims[vim_account_id]
7864 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7865 db_vims[vim_account_id] = db_vim
7866 return db_vim
7867
7868 try:
7869 start_heal = time()
7870 ns_params = db_nslcmop.get("operationParams")
7871 if ns_params and ns_params.get("timeout_ns_heal"):
7872 timeout_ns_heal = ns_params["timeout_ns_heal"]
7873 else:
7874 timeout_ns_heal = self.timeout.ns_heal
7875
7876 db_vims = {}
7877
7878 nslcmop_id = db_nslcmop["_id"]
7879 target = {
7880 "action_id": nslcmop_id,
7881 }
7882 self.logger.warning(
7883 "db_nslcmop={} and timeout_ns_heal={}".format(
7884 db_nslcmop, timeout_ns_heal
7885 )
7886 )
7887 target.update(db_nslcmop.get("operationParams", {}))
7888
7889 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7890 desc = await self.RO.recreate(nsr_id, target)
7891 self.logger.debug("RO return > {}".format(desc))
7892 action_id = desc["action_id"]
7893 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7894 await self._wait_ng_ro(
7895 nsr_id,
7896 action_id,
7897 nslcmop_id,
7898 start_heal,
7899 timeout_ns_heal,
7900 stage,
7901 operation="healing",
7902 )
7903
7904 # Updating NSR
7905 db_nsr_update = {
7906 "_admin.deployed.RO.operational-status": "running",
7907 "detailed-status": " ".join(stage),
7908 }
7909 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7910 self._write_op_status(nslcmop_id, stage)
7911 self.logger.debug(
7912 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7913 )
7914
7915 except Exception as e:
7916 stage[2] = "ERROR healing at VIM"
7917 # self.set_vnfr_at_error(db_vnfrs, str(e))
7918 self.logger.error(
7919 "Error healing at VIM {}".format(e),
7920 exc_info=not isinstance(
7921 e,
7922 (
7923 ROclient.ROClientException,
7924 LcmException,
7925 DbException,
7926 NgRoException,
7927 ),
7928 ),
7929 )
7930 raise
7931
7932 def _heal_n2vc(
7933 self,
7934 logging_text,
7935 db_nsr,
7936 db_vnfr,
7937 nslcmop_id,
7938 nsr_id,
7939 nsi_id,
7940 vnfd_id,
7941 vdu_id,
7942 kdu_name,
7943 member_vnf_index,
7944 vdu_index,
7945 vdu_name,
7946 deploy_params,
7947 descriptor_config,
7948 base_folder,
7949 task_instantiation_info,
7950 stage,
7951 ):
7952 # launch instantiate_N2VC in a asyncio task and register task object
7953 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7954 # if not found, create one entry and update database
7955 # fill db_nsr._admin.deployed.VCA.<index>
7956
7957 self.logger.debug(
7958 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7959 )
7960
7961 charm_name = ""
7962 get_charm_name = False
7963 if "execution-environment-list" in descriptor_config:
7964 ee_list = descriptor_config.get("execution-environment-list", [])
7965 elif "juju" in descriptor_config:
7966 ee_list = [descriptor_config] # ns charms
7967 if "execution-environment-list" not in descriptor_config:
7968 # charm name is only required for ns charms
7969 get_charm_name = True
7970 else: # other types as script are not supported
7971 ee_list = []
7972
7973 for ee_item in ee_list:
7974 self.logger.debug(
7975 logging_text
7976 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7977 ee_item.get("juju"), ee_item.get("helm-chart")
7978 )
7979 )
7980 ee_descriptor_id = ee_item.get("id")
7981 if ee_item.get("juju"):
7982 vca_name = ee_item["juju"].get("charm")
7983 if get_charm_name:
7984 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7985 vca_type = (
7986 "lxc_proxy_charm"
7987 if ee_item["juju"].get("charm") is not None
7988 else "native_charm"
7989 )
7990 if ee_item["juju"].get("cloud") == "k8s":
7991 vca_type = "k8s_proxy_charm"
7992 elif ee_item["juju"].get("proxy") is False:
7993 vca_type = "native_charm"
7994 elif ee_item.get("helm-chart"):
7995 vca_name = ee_item["helm-chart"]
7996 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7997 vca_type = "helm"
7998 else:
7999 vca_type = "helm-v3"
8000 else:
8001 self.logger.debug(
8002 logging_text + "skipping non juju neither charm configuration"
8003 )
8004 continue
8005
8006 vca_index = -1
8007 for vca_index, vca_deployed in enumerate(
8008 db_nsr["_admin"]["deployed"]["VCA"]
8009 ):
8010 if not vca_deployed:
8011 continue
8012 if (
8013 vca_deployed.get("member-vnf-index") == member_vnf_index
8014 and vca_deployed.get("vdu_id") == vdu_id
8015 and vca_deployed.get("kdu_name") == kdu_name
8016 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8017 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8018 ):
8019 break
8020 else:
8021 # not found, create one.
8022 target = (
8023 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8024 )
8025 if vdu_id:
8026 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8027 elif kdu_name:
8028 target += "/kdu/{}".format(kdu_name)
8029 vca_deployed = {
8030 "target_element": target,
8031 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8032 "member-vnf-index": member_vnf_index,
8033 "vdu_id": vdu_id,
8034 "kdu_name": kdu_name,
8035 "vdu_count_index": vdu_index,
8036 "operational-status": "init", # TODO revise
8037 "detailed-status": "", # TODO revise
8038 "step": "initial-deploy", # TODO revise
8039 "vnfd_id": vnfd_id,
8040 "vdu_name": vdu_name,
8041 "type": vca_type,
8042 "ee_descriptor_id": ee_descriptor_id,
8043 "charm_name": charm_name,
8044 }
8045 vca_index += 1
8046
8047 # create VCA and configurationStatus in db
8048 db_dict = {
8049 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8050 "configurationStatus.{}".format(vca_index): dict(),
8051 }
8052 self.update_db_2("nsrs", nsr_id, db_dict)
8053
8054 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8055
8056 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8057 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8058 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8059
8060 # Launch task
8061 task_n2vc = asyncio.ensure_future(
8062 self.heal_N2VC(
8063 logging_text=logging_text,
8064 vca_index=vca_index,
8065 nsi_id=nsi_id,
8066 db_nsr=db_nsr,
8067 db_vnfr=db_vnfr,
8068 vdu_id=vdu_id,
8069 kdu_name=kdu_name,
8070 vdu_index=vdu_index,
8071 deploy_params=deploy_params,
8072 config_descriptor=descriptor_config,
8073 base_folder=base_folder,
8074 nslcmop_id=nslcmop_id,
8075 stage=stage,
8076 vca_type=vca_type,
8077 vca_name=vca_name,
8078 ee_config_descriptor=ee_item,
8079 )
8080 )
8081 self.lcm_tasks.register(
8082 "ns",
8083 nsr_id,
8084 nslcmop_id,
8085 "instantiate_N2VC-{}".format(vca_index),
8086 task_n2vc,
8087 )
8088 task_instantiation_info[
8089 task_n2vc
8090 ] = self.task_name_deploy_vca + " {}.{}".format(
8091 member_vnf_index or "", vdu_id or ""
8092 )
8093
8094 async def heal_N2VC(
8095 self,
8096 logging_text,
8097 vca_index,
8098 nsi_id,
8099 db_nsr,
8100 db_vnfr,
8101 vdu_id,
8102 kdu_name,
8103 vdu_index,
8104 config_descriptor,
8105 deploy_params,
8106 base_folder,
8107 nslcmop_id,
8108 stage,
8109 vca_type,
8110 vca_name,
8111 ee_config_descriptor,
8112 ):
8113 nsr_id = db_nsr["_id"]
8114 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8115 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8116 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8117 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8118 db_dict = {
8119 "collection": "nsrs",
8120 "filter": {"_id": nsr_id},
8121 "path": db_update_entry,
8122 }
8123 step = ""
8124 try:
8125 element_type = "NS"
8126 element_under_configuration = nsr_id
8127
8128 vnfr_id = None
8129 if db_vnfr:
8130 vnfr_id = db_vnfr["_id"]
8131 osm_config["osm"]["vnf_id"] = vnfr_id
8132
8133 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8134
8135 if vca_type == "native_charm":
8136 index_number = 0
8137 else:
8138 index_number = vdu_index or 0
8139
8140 if vnfr_id:
8141 element_type = "VNF"
8142 element_under_configuration = vnfr_id
8143 namespace += ".{}-{}".format(vnfr_id, index_number)
8144 if vdu_id:
8145 namespace += ".{}-{}".format(vdu_id, index_number)
8146 element_type = "VDU"
8147 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8148 osm_config["osm"]["vdu_id"] = vdu_id
8149 elif kdu_name:
8150 namespace += ".{}".format(kdu_name)
8151 element_type = "KDU"
8152 element_under_configuration = kdu_name
8153 osm_config["osm"]["kdu_name"] = kdu_name
8154
8155 # Get artifact path
8156 if base_folder["pkg-dir"]:
8157 artifact_path = "{}/{}/{}/{}".format(
8158 base_folder["folder"],
8159 base_folder["pkg-dir"],
8160 "charms"
8161 if vca_type
8162 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8163 else "helm-charts",
8164 vca_name,
8165 )
8166 else:
8167 artifact_path = "{}/Scripts/{}/{}/".format(
8168 base_folder["folder"],
8169 "charms"
8170 if vca_type
8171 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8172 else "helm-charts",
8173 vca_name,
8174 )
8175
8176 self.logger.debug("Artifact path > {}".format(artifact_path))
8177
8178 # get initial_config_primitive_list that applies to this element
8179 initial_config_primitive_list = config_descriptor.get(
8180 "initial-config-primitive"
8181 )
8182
8183 self.logger.debug(
8184 "Initial config primitive list > {}".format(
8185 initial_config_primitive_list
8186 )
8187 )
8188
8189 # add config if not present for NS charm
8190 ee_descriptor_id = ee_config_descriptor.get("id")
8191 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8192 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8193 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8194 )
8195
8196 self.logger.debug(
8197 "Initial config primitive list #2 > {}".format(
8198 initial_config_primitive_list
8199 )
8200 )
8201 # n2vc_redesign STEP 3.1
8202 # find old ee_id if exists
8203 ee_id = vca_deployed.get("ee_id")
8204
8205 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8206 # create or register execution environment in VCA. Only for native charms when healing
8207 if vca_type == "native_charm":
8208 step = "Waiting to VM being up and getting IP address"
8209 self.logger.debug(logging_text + step)
8210 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8211 logging_text,
8212 nsr_id,
8213 vnfr_id,
8214 vdu_id,
8215 vdu_index,
8216 user=None,
8217 pub_key=None,
8218 )
8219 credentials = {"hostname": rw_mgmt_ip}
8220 # get username
8221 username = deep_get(
8222 config_descriptor, ("config-access", "ssh-access", "default-user")
8223 )
8224 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8225 # merged. Meanwhile let's get username from initial-config-primitive
8226 if not username and initial_config_primitive_list:
8227 for config_primitive in initial_config_primitive_list:
8228 for param in config_primitive.get("parameter", ()):
8229 if param["name"] == "ssh-username":
8230 username = param["value"]
8231 break
8232 if not username:
8233 raise LcmException(
8234 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8235 "'config-access.ssh-access.default-user'"
8236 )
8237 credentials["username"] = username
8238
8239 # n2vc_redesign STEP 3.2
8240 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8241 self._write_configuration_status(
8242 nsr_id=nsr_id,
8243 vca_index=vca_index,
8244 status="REGISTERING",
8245 element_under_configuration=element_under_configuration,
8246 element_type=element_type,
8247 )
8248
8249 step = "register execution environment {}".format(credentials)
8250 self.logger.debug(logging_text + step)
8251 ee_id = await self.vca_map[vca_type].register_execution_environment(
8252 credentials=credentials,
8253 namespace=namespace,
8254 db_dict=db_dict,
8255 vca_id=vca_id,
8256 )
8257
8258 # update ee_id en db
8259 db_dict_ee_id = {
8260 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8261 }
8262 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8263
8264 # for compatibility with MON/POL modules, the need model and application name at database
8265 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8266 # Not sure if this need to be done when healing
8267 """
8268 ee_id_parts = ee_id.split(".")
8269 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8270 if len(ee_id_parts) >= 2:
8271 model_name = ee_id_parts[0]
8272 application_name = ee_id_parts[1]
8273 db_nsr_update[db_update_entry + "model"] = model_name
8274 db_nsr_update[db_update_entry + "application"] = application_name
8275 """
8276
8277 # n2vc_redesign STEP 3.3
8278 # Install configuration software. Only for native charms.
8279 step = "Install configuration Software"
8280
8281 self._write_configuration_status(
8282 nsr_id=nsr_id,
8283 vca_index=vca_index,
8284 status="INSTALLING SW",
8285 element_under_configuration=element_under_configuration,
8286 element_type=element_type,
8287 # other_update=db_nsr_update,
8288 other_update=None,
8289 )
8290
8291 # TODO check if already done
8292 self.logger.debug(logging_text + step)
8293 config = None
8294 if vca_type == "native_charm":
8295 config_primitive = next(
8296 (p for p in initial_config_primitive_list if p["name"] == "config"),
8297 None,
8298 )
8299 if config_primitive:
8300 config = self._map_primitive_params(
8301 config_primitive, {}, deploy_params
8302 )
8303 await self.vca_map[vca_type].install_configuration_sw(
8304 ee_id=ee_id,
8305 artifact_path=artifact_path,
8306 db_dict=db_dict,
8307 config=config,
8308 num_units=1,
8309 vca_id=vca_id,
8310 vca_type=vca_type,
8311 )
8312
8313 # write in db flag of configuration_sw already installed
8314 self.update_db_2(
8315 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8316 )
8317
8318 # Not sure if this need to be done when healing
8319 """
8320 # add relations for this VCA (wait for other peers related with this VCA)
8321 await self._add_vca_relations(
8322 logging_text=logging_text,
8323 nsr_id=nsr_id,
8324 vca_type=vca_type,
8325 vca_index=vca_index,
8326 )
8327 """
8328
8329 # if SSH access is required, then get execution environment SSH public
8330 # if native charm we have waited already to VM be UP
8331 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8332 pub_key = None
8333 user = None
8334 # self.logger.debug("get ssh key block")
8335 if deep_get(
8336 config_descriptor, ("config-access", "ssh-access", "required")
8337 ):
8338 # self.logger.debug("ssh key needed")
8339 # Needed to inject a ssh key
8340 user = deep_get(
8341 config_descriptor,
8342 ("config-access", "ssh-access", "default-user"),
8343 )
8344 step = "Install configuration Software, getting public ssh key"
8345 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8346 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8347 )
8348
8349 step = "Insert public key into VM user={} ssh_key={}".format(
8350 user, pub_key
8351 )
8352 else:
8353 # self.logger.debug("no need to get ssh key")
8354 step = "Waiting to VM being up and getting IP address"
8355 self.logger.debug(logging_text + step)
8356
8357 # n2vc_redesign STEP 5.1
8358 # wait for RO (ip-address) Insert pub_key into VM
8359 # IMPORTANT: We need do wait for RO to complete healing operation.
8360 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8361 if vnfr_id:
8362 if kdu_name:
8363 rw_mgmt_ip = await self.wait_kdu_up(
8364 logging_text, nsr_id, vnfr_id, kdu_name
8365 )
8366 else:
8367 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8368 logging_text,
8369 nsr_id,
8370 vnfr_id,
8371 vdu_id,
8372 vdu_index,
8373 user=user,
8374 pub_key=pub_key,
8375 )
8376 else:
8377 rw_mgmt_ip = None # This is for a NS configuration
8378
8379 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8380
8381 # store rw_mgmt_ip in deploy params for later replacement
8382 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8383
8384 # Day1 operations.
8385 # get run-day1 operation parameter
8386 runDay1 = deploy_params.get("run-day1", False)
8387 self.logger.debug(
8388 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8389 )
8390 if runDay1:
8391 # n2vc_redesign STEP 6 Execute initial config primitive
8392 step = "execute initial config primitive"
8393
8394 # wait for dependent primitives execution (NS -> VNF -> VDU)
8395 if initial_config_primitive_list:
8396 await self._wait_dependent_n2vc(
8397 nsr_id, vca_deployed_list, vca_index
8398 )
8399
8400 # stage, in function of element type: vdu, kdu, vnf or ns
8401 my_vca = vca_deployed_list[vca_index]
8402 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8403 # VDU or KDU
8404 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8405 elif my_vca.get("member-vnf-index"):
8406 # VNF
8407 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8408 else:
8409 # NS
8410 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8411
8412 self._write_configuration_status(
8413 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8414 )
8415
8416 self._write_op_status(op_id=nslcmop_id, stage=stage)
8417
8418 check_if_terminated_needed = True
8419 for initial_config_primitive in initial_config_primitive_list:
8420 # adding information on the vca_deployed if it is a NS execution environment
8421 if not vca_deployed["member-vnf-index"]:
8422 deploy_params["ns_config_info"] = json.dumps(
8423 self._get_ns_config_info(nsr_id)
8424 )
8425 # TODO check if already done
8426 primitive_params_ = self._map_primitive_params(
8427 initial_config_primitive, {}, deploy_params
8428 )
8429
8430 step = "execute primitive '{}' params '{}'".format(
8431 initial_config_primitive["name"], primitive_params_
8432 )
8433 self.logger.debug(logging_text + step)
8434 await self.vca_map[vca_type].exec_primitive(
8435 ee_id=ee_id,
8436 primitive_name=initial_config_primitive["name"],
8437 params_dict=primitive_params_,
8438 db_dict=db_dict,
8439 vca_id=vca_id,
8440 vca_type=vca_type,
8441 )
8442 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8443 if check_if_terminated_needed:
8444 if config_descriptor.get("terminate-config-primitive"):
8445 self.update_db_2(
8446 "nsrs",
8447 nsr_id,
8448 {db_update_entry + "needed_terminate": True},
8449 )
8450 check_if_terminated_needed = False
8451
8452 # TODO register in database that primitive is done
8453
8454 # STEP 7 Configure metrics
8455 # Not sure if this need to be done when healing
8456 """
8457 if vca_type == "helm" or vca_type == "helm-v3":
8458 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8459 ee_id=ee_id,
8460 artifact_path=artifact_path,
8461 ee_config_descriptor=ee_config_descriptor,
8462 vnfr_id=vnfr_id,
8463 nsr_id=nsr_id,
8464 target_ip=rw_mgmt_ip,
8465 )
8466 if prometheus_jobs:
8467 self.update_db_2(
8468 "nsrs",
8469 nsr_id,
8470 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8471 )
8472
8473 for job in prometheus_jobs:
8474 self.db.set_one(
8475 "prometheus_jobs",
8476 {"job_name": job["job_name"]},
8477 job,
8478 upsert=True,
8479 fail_on_empty=False,
8480 )
8481
8482 """
8483 step = "instantiated at VCA"
8484 self.logger.debug(logging_text + step)
8485
8486 self._write_configuration_status(
8487 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8488 )
8489
8490 except Exception as e: # TODO not use Exception but N2VC exception
8491 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8492 if not isinstance(
8493 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8494 ):
8495 self.logger.error(
8496 "Exception while {} : {}".format(step, e), exc_info=True
8497 )
8498 self._write_configuration_status(
8499 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8500 )
8501 raise LcmException("{} {}".format(step, e)) from e
8502
8503 async def _wait_heal_ro(
8504 self,
8505 nsr_id,
8506 timeout=600,
8507 ):
8508 start_time = time()
8509 while time() <= start_time + timeout:
8510 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8511 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8512 "operational-status"
8513 ]
8514 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8515 if operational_status_ro != "healing":
8516 break
8517 await asyncio.sleep(15, loop=self.loop)
8518 else: # timeout_ns_deploy
8519 raise NgRoException("Timeout waiting ns to deploy")
8520
8521 async def vertical_scale(self, nsr_id, nslcmop_id):
8522 """
8523 Vertical Scale the VDUs in a NS
8524
8525 :param: nsr_id: NS Instance ID
8526 :param: nslcmop_id: nslcmop ID of migrate
8527
8528 """
8529 # Try to lock HA task here
8530 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8531 if not task_is_locked_by_me:
8532 return
8533 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8534 self.logger.debug(logging_text + "Enter")
8535 # get all needed from database
8536 db_nslcmop = None
8537 db_nslcmop_update = {}
8538 nslcmop_operation_state = None
8539 db_nsr_update = {}
8540 target = {}
8541 exc = None
8542 # in case of error, indicates what part of scale was failed to put nsr at error status
8543 start_deploy = time()
8544
8545 try:
8546 # wait for any previous tasks in process
8547 step = "Waiting for previous operations to terminate"
8548 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8549
8550 self._write_ns_status(
8551 nsr_id=nsr_id,
8552 ns_state=None,
8553 current_operation="VerticalScale",
8554 current_operation_id=nslcmop_id,
8555 )
8556 step = "Getting nslcmop from database"
8557 self.logger.debug(
8558 step + " after having waited for previous tasks to be completed"
8559 )
8560 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8561 operationParams = db_nslcmop.get("operationParams")
8562 target = {}
8563 target.update(operationParams)
8564 desc = await self.RO.vertical_scale(nsr_id, target)
8565 self.logger.debug("RO return > {}".format(desc))
8566 action_id = desc["action_id"]
8567 await self._wait_ng_ro(
8568 nsr_id,
8569 action_id,
8570 nslcmop_id,
8571 start_deploy,
8572 self.timeout.verticalscale,
8573 operation="verticalscale",
8574 )
8575 except (ROclient.ROClientException, DbException, LcmException) as e:
8576 self.logger.error("Exit Exception {}".format(e))
8577 exc = e
8578 except asyncio.CancelledError:
8579 self.logger.error("Cancelled Exception while '{}'".format(step))
8580 exc = "Operation was cancelled"
8581 except Exception as e:
8582 exc = traceback.format_exc()
8583 self.logger.critical(
8584 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8585 )
8586 finally:
8587 self._write_ns_status(
8588 nsr_id=nsr_id,
8589 ns_state=None,
8590 current_operation="IDLE",
8591 current_operation_id=None,
8592 )
8593 if exc:
8594 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8595 nslcmop_operation_state = "FAILED"
8596 else:
8597 nslcmop_operation_state = "COMPLETED"
8598 db_nslcmop_update["detailed-status"] = "Done"
8599 db_nsr_update["detailed-status"] = "Done"
8600
8601 self._write_op_status(
8602 op_id=nslcmop_id,
8603 stage="",
8604 error_message="",
8605 operation_state=nslcmop_operation_state,
8606 other_update=db_nslcmop_update,
8607 )
8608 if nslcmop_operation_state:
8609 try:
8610 msg = {
8611 "nsr_id": nsr_id,
8612 "nslcmop_id": nslcmop_id,
8613 "operationState": nslcmop_operation_state,
8614 }
8615 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8616 except Exception as e:
8617 self.logger.error(
8618 logging_text + "kafka_write notification Exception {}".format(e)
8619 )
8620 self.logger.debug(logging_text + "Exit")
8621 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")