Fix Bug 1556 - redundant input param in calling _check_or_add_scale_suboperation
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vnfr_id:
1607 element_type = "VNF"
1608 element_under_configuration = vnfr_id
1609 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1610 if vdu_id:
1611 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1612 element_type = "VDU"
1613 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1614 osm_config["osm"]["vdu_id"] = vdu_id
1615 elif kdu_name:
1616 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1617 element_type = "KDU"
1618 element_under_configuration = kdu_name
1619 osm_config["osm"]["kdu_name"] = kdu_name
1620
1621 # Get artifact path
1622 artifact_path = "{}/{}/{}/{}".format(
1623 base_folder["folder"],
1624 base_folder["pkg-dir"],
1625 "charms"
1626 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1627 else "helm-charts",
1628 vca_name,
1629 )
1630
1631 self.logger.debug("Artifact path > {}".format(artifact_path))
1632
1633 # get initial_config_primitive_list that applies to this element
1634 initial_config_primitive_list = config_descriptor.get(
1635 "initial-config-primitive"
1636 )
1637
1638 self.logger.debug(
1639 "Initial config primitive list > {}".format(
1640 initial_config_primitive_list
1641 )
1642 )
1643
1644 # add config if not present for NS charm
1645 ee_descriptor_id = ee_config_descriptor.get("id")
1646 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1647 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1648 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1649 )
1650
1651 self.logger.debug(
1652 "Initial config primitive list #2 > {}".format(
1653 initial_config_primitive_list
1654 )
1655 )
1656 # n2vc_redesign STEP 3.1
1657 # find old ee_id if exists
1658 ee_id = vca_deployed.get("ee_id")
1659
1660 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1661 # create or register execution environment in VCA
1662 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1663
1664 self._write_configuration_status(
1665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status="CREATING",
1668 element_under_configuration=element_under_configuration,
1669 element_type=element_type,
1670 )
1671
1672 step = "create execution environment"
1673 self.logger.debug(logging_text + step)
1674
1675 ee_id = None
1676 credentials = None
1677 if vca_type == "k8s_proxy_charm":
1678 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1679 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1680 namespace=namespace,
1681 artifact_path=artifact_path,
1682 db_dict=db_dict,
1683 vca_id=vca_id,
1684 )
1685 elif vca_type == "helm" or vca_type == "helm-v3":
1686 ee_id, credentials = await self.vca_map[
1687 vca_type
1688 ].create_execution_environment(
1689 namespace=namespace,
1690 reuse_ee_id=ee_id,
1691 db_dict=db_dict,
1692 config=osm_config,
1693 artifact_path=artifact_path,
1694 vca_type=vca_type,
1695 )
1696 else:
1697 ee_id, credentials = await self.vca_map[
1698 vca_type
1699 ].create_execution_environment(
1700 namespace=namespace,
1701 reuse_ee_id=ee_id,
1702 db_dict=db_dict,
1703 vca_id=vca_id,
1704 )
1705
1706 elif vca_type == "native_charm":
1707 step = "Waiting to VM being up and getting IP address"
1708 self.logger.debug(logging_text + step)
1709 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1710 logging_text,
1711 nsr_id,
1712 vnfr_id,
1713 vdu_id,
1714 vdu_index,
1715 user=None,
1716 pub_key=None,
1717 )
1718 credentials = {"hostname": rw_mgmt_ip}
1719 # get username
1720 username = deep_get(
1721 config_descriptor, ("config-access", "ssh-access", "default-user")
1722 )
1723 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1724 # merged. Meanwhile let's get username from initial-config-primitive
1725 if not username and initial_config_primitive_list:
1726 for config_primitive in initial_config_primitive_list:
1727 for param in config_primitive.get("parameter", ()):
1728 if param["name"] == "ssh-username":
1729 username = param["value"]
1730 break
1731 if not username:
1732 raise LcmException(
1733 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1734 "'config-access.ssh-access.default-user'"
1735 )
1736 credentials["username"] = username
1737 # n2vc_redesign STEP 3.2
1738
1739 self._write_configuration_status(
1740 nsr_id=nsr_id,
1741 vca_index=vca_index,
1742 status="REGISTERING",
1743 element_under_configuration=element_under_configuration,
1744 element_type=element_type,
1745 )
1746
1747 step = "register execution environment {}".format(credentials)
1748 self.logger.debug(logging_text + step)
1749 ee_id = await self.vca_map[vca_type].register_execution_environment(
1750 credentials=credentials,
1751 namespace=namespace,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 # for compatibility with MON/POL modules, the need model and application name at database
1757 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1758 ee_id_parts = ee_id.split(".")
1759 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1760 if len(ee_id_parts) >= 2:
1761 model_name = ee_id_parts[0]
1762 application_name = ee_id_parts[1]
1763 db_nsr_update[db_update_entry + "model"] = model_name
1764 db_nsr_update[db_update_entry + "application"] = application_name
1765
1766 # n2vc_redesign STEP 3.3
1767 step = "Install configuration Software"
1768
1769 self._write_configuration_status(
1770 nsr_id=nsr_id,
1771 vca_index=vca_index,
1772 status="INSTALLING SW",
1773 element_under_configuration=element_under_configuration,
1774 element_type=element_type,
1775 other_update=db_nsr_update,
1776 )
1777
1778 # TODO check if already done
1779 self.logger.debug(logging_text + step)
1780 config = None
1781 if vca_type == "native_charm":
1782 config_primitive = next(
1783 (p for p in initial_config_primitive_list if p["name"] == "config"),
1784 None,
1785 )
1786 if config_primitive:
1787 config = self._map_primitive_params(
1788 config_primitive, {}, deploy_params
1789 )
1790 num_units = 1
1791 if vca_type == "lxc_proxy_charm":
1792 if element_type == "NS":
1793 num_units = db_nsr.get("config-units") or 1
1794 elif element_type == "VNF":
1795 num_units = db_vnfr.get("config-units") or 1
1796 elif element_type == "VDU":
1797 for v in db_vnfr["vdur"]:
1798 if vdu_id == v["vdu-id-ref"]:
1799 num_units = v.get("config-units") or 1
1800 break
1801 if vca_type != "k8s_proxy_charm":
1802 await self.vca_map[vca_type].install_configuration_sw(
1803 ee_id=ee_id,
1804 artifact_path=artifact_path,
1805 db_dict=db_dict,
1806 config=config,
1807 num_units=num_units,
1808 vca_id=vca_id,
1809 )
1810
1811 # write in db flag of configuration_sw already installed
1812 self.update_db_2(
1813 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1814 )
1815
1816 # add relations for this VCA (wait for other peers related with this VCA)
1817 await self._add_vca_relations(
1818 logging_text=logging_text,
1819 nsr_id=nsr_id,
1820 vca_index=vca_index,
1821 vca_id=vca_id,
1822 vca_type=vca_type,
1823 )
1824
1825 # if SSH access is required, then get execution environment SSH public
1826 # if native charm we have waited already to VM be UP
1827 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1828 pub_key = None
1829 user = None
1830 # self.logger.debug("get ssh key block")
1831 if deep_get(
1832 config_descriptor, ("config-access", "ssh-access", "required")
1833 ):
1834 # self.logger.debug("ssh key needed")
1835 # Needed to inject a ssh key
1836 user = deep_get(
1837 config_descriptor,
1838 ("config-access", "ssh-access", "default-user"),
1839 )
1840 step = "Install configuration Software, getting public ssh key"
1841 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1842 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1843 )
1844
1845 step = "Insert public key into VM user={} ssh_key={}".format(
1846 user, pub_key
1847 )
1848 else:
1849 # self.logger.debug("no need to get ssh key")
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852
1853 # n2vc_redesign STEP 5.1
1854 # wait for RO (ip-address) Insert pub_key into VM
1855 if vnfr_id:
1856 if kdu_name:
1857 rw_mgmt_ip = await self.wait_kdu_up(
1858 logging_text, nsr_id, vnfr_id, kdu_name
1859 )
1860 else:
1861 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1862 logging_text,
1863 nsr_id,
1864 vnfr_id,
1865 vdu_id,
1866 vdu_index,
1867 user=user,
1868 pub_key=pub_key,
1869 )
1870 else:
1871 rw_mgmt_ip = None # This is for a NS configuration
1872
1873 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1874
1875 # store rw_mgmt_ip in deploy params for later replacement
1876 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1877
1878 # n2vc_redesign STEP 6 Execute initial config primitive
1879 step = "execute initial config primitive"
1880
1881 # wait for dependent primitives execution (NS -> VNF -> VDU)
1882 if initial_config_primitive_list:
1883 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1884
1885 # stage, in function of element type: vdu, kdu, vnf or ns
1886 my_vca = vca_deployed_list[vca_index]
1887 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1888 # VDU or KDU
1889 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1890 elif my_vca.get("member-vnf-index"):
1891 # VNF
1892 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1893 else:
1894 # NS
1895 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1896
1897 self._write_configuration_status(
1898 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1899 )
1900
1901 self._write_op_status(op_id=nslcmop_id, stage=stage)
1902
1903 check_if_terminated_needed = True
1904 for initial_config_primitive in initial_config_primitive_list:
1905 # adding information on the vca_deployed if it is a NS execution environment
1906 if not vca_deployed["member-vnf-index"]:
1907 deploy_params["ns_config_info"] = json.dumps(
1908 self._get_ns_config_info(nsr_id)
1909 )
1910 # TODO check if already done
1911 primitive_params_ = self._map_primitive_params(
1912 initial_config_primitive, {}, deploy_params
1913 )
1914
1915 step = "execute primitive '{}' params '{}'".format(
1916 initial_config_primitive["name"], primitive_params_
1917 )
1918 self.logger.debug(logging_text + step)
1919 await self.vca_map[vca_type].exec_primitive(
1920 ee_id=ee_id,
1921 primitive_name=initial_config_primitive["name"],
1922 params_dict=primitive_params_,
1923 db_dict=db_dict,
1924 vca_id=vca_id,
1925 )
1926 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1927 if check_if_terminated_needed:
1928 if config_descriptor.get("terminate-config-primitive"):
1929 self.update_db_2(
1930 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1931 )
1932 check_if_terminated_needed = False
1933
1934 # TODO register in database that primitive is done
1935
1936 # STEP 7 Configure metrics
1937 if vca_type == "helm" or vca_type == "helm-v3":
1938 prometheus_jobs = await self.add_prometheus_metrics(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 ee_config_descriptor=ee_config_descriptor,
1942 vnfr_id=vnfr_id,
1943 nsr_id=nsr_id,
1944 target_ip=rw_mgmt_ip,
1945 )
1946 if prometheus_jobs:
1947 self.update_db_2(
1948 "nsrs",
1949 nsr_id,
1950 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1951 )
1952
1953 step = "instantiated at VCA"
1954 self.logger.debug(logging_text + step)
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1958 )
1959
1960 except Exception as e: # TODO not use Exception but N2VC exception
1961 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1962 if not isinstance(
1963 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1964 ):
1965 self.logger.error(
1966 "Exception while {} : {}".format(step, e), exc_info=True
1967 )
1968 self._write_configuration_status(
1969 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1970 )
1971 raise LcmException("{} {}".format(step, e)) from e
1972
1973 def _write_ns_status(
1974 self,
1975 nsr_id: str,
1976 ns_state: str,
1977 current_operation: str,
1978 current_operation_id: str,
1979 error_description: str = None,
1980 error_detail: str = None,
1981 other_update: dict = None,
1982 ):
1983 """
1984 Update db_nsr fields.
1985 :param nsr_id:
1986 :param ns_state:
1987 :param current_operation:
1988 :param current_operation_id:
1989 :param error_description:
1990 :param error_detail:
1991 :param other_update: Other required changes at database if provided, will be cleared
1992 :return:
1993 """
1994 try:
1995 db_dict = other_update or {}
1996 db_dict[
1997 "_admin.nslcmop"
1998 ] = current_operation_id # for backward compatibility
1999 db_dict["_admin.current-operation"] = current_operation_id
2000 db_dict["_admin.operation-type"] = (
2001 current_operation if current_operation != "IDLE" else None
2002 )
2003 db_dict["currentOperation"] = current_operation
2004 db_dict["currentOperationID"] = current_operation_id
2005 db_dict["errorDescription"] = error_description
2006 db_dict["errorDetail"] = error_detail
2007
2008 if ns_state:
2009 db_dict["nsState"] = ns_state
2010 self.update_db_2("nsrs", nsr_id, db_dict)
2011 except DbException as e:
2012 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2013
2014 def _write_op_status(
2015 self,
2016 op_id: str,
2017 stage: list = None,
2018 error_message: str = None,
2019 queuePosition: int = 0,
2020 operation_state: str = None,
2021 other_update: dict = None,
2022 ):
2023 try:
2024 db_dict = other_update or {}
2025 db_dict["queuePosition"] = queuePosition
2026 if isinstance(stage, list):
2027 db_dict["stage"] = stage[0]
2028 db_dict["detailed-status"] = " ".join(stage)
2029 elif stage is not None:
2030 db_dict["stage"] = str(stage)
2031
2032 if error_message is not None:
2033 db_dict["errorMessage"] = error_message
2034 if operation_state is not None:
2035 db_dict["operationState"] = operation_state
2036 db_dict["statusEnteredTime"] = time()
2037 self.update_db_2("nslcmops", op_id, db_dict)
2038 except DbException as e:
2039 self.logger.warn(
2040 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2041 )
2042
2043 def _write_all_config_status(self, db_nsr: dict, status: str):
2044 try:
2045 nsr_id = db_nsr["_id"]
2046 # configurationStatus
2047 config_status = db_nsr.get("configurationStatus")
2048 if config_status:
2049 db_nsr_update = {
2050 "configurationStatus.{}.status".format(index): status
2051 for index, v in enumerate(config_status)
2052 if v
2053 }
2054 # update status
2055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2056
2057 except DbException as e:
2058 self.logger.warn(
2059 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2060 )
2061
2062 def _write_configuration_status(
2063 self,
2064 nsr_id: str,
2065 vca_index: int,
2066 status: str = None,
2067 element_under_configuration: str = None,
2068 element_type: str = None,
2069 other_update: dict = None,
2070 ):
2071
2072 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2073 # .format(vca_index, status))
2074
2075 try:
2076 db_path = "configurationStatus.{}.".format(vca_index)
2077 db_dict = other_update or {}
2078 if status:
2079 db_dict[db_path + "status"] = status
2080 if element_under_configuration:
2081 db_dict[
2082 db_path + "elementUnderConfiguration"
2083 ] = element_under_configuration
2084 if element_type:
2085 db_dict[db_path + "elementType"] = element_type
2086 self.update_db_2("nsrs", nsr_id, db_dict)
2087 except DbException as e:
2088 self.logger.warn(
2089 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2090 status, nsr_id, vca_index, e
2091 )
2092 )
2093
2094 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2095 """
2096 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2097 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2098 Database is used because the result can be obtained from a different LCM worker in case of HA.
2099 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2100 :param db_nslcmop: database content of nslcmop
2101 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2102 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2103 computed 'vim-account-id'
2104 """
2105 modified = False
2106 nslcmop_id = db_nslcmop["_id"]
2107 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2108 if placement_engine == "PLA":
2109 self.logger.debug(
2110 logging_text + "Invoke and wait for placement optimization"
2111 )
2112 await self.msg.aiowrite(
2113 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2114 )
2115 db_poll_interval = 5
2116 wait = db_poll_interval * 10
2117 pla_result = None
2118 while not pla_result and wait >= 0:
2119 await asyncio.sleep(db_poll_interval)
2120 wait -= db_poll_interval
2121 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2122 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2123
2124 if not pla_result:
2125 raise LcmException(
2126 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2127 )
2128
2129 for pla_vnf in pla_result["vnf"]:
2130 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2131 if not pla_vnf.get("vimAccountId") or not vnfr:
2132 continue
2133 modified = True
2134 self.db.set_one(
2135 "vnfrs",
2136 {"_id": vnfr["_id"]},
2137 {"vim-account-id": pla_vnf["vimAccountId"]},
2138 )
2139 # Modifies db_vnfrs
2140 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2141 return modified
2142
2143 def update_nsrs_with_pla_result(self, params):
2144 try:
2145 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2146 self.update_db_2(
2147 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2148 )
2149 except Exception as e:
2150 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2151
2152 async def instantiate(self, nsr_id, nslcmop_id):
2153 """
2154
2155 :param nsr_id: ns instance to deploy
2156 :param nslcmop_id: operation to run
2157 :return:
2158 """
2159
2160 # Try to lock HA task here
2161 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2162 if not task_is_locked_by_me:
2163 self.logger.debug(
2164 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2165 )
2166 return
2167
2168 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2169 self.logger.debug(logging_text + "Enter")
2170
2171 # get all needed from database
2172
2173 # database nsrs record
2174 db_nsr = None
2175
2176 # database nslcmops record
2177 db_nslcmop = None
2178
2179 # update operation on nsrs
2180 db_nsr_update = {}
2181 # update operation on nslcmops
2182 db_nslcmop_update = {}
2183
2184 nslcmop_operation_state = None
2185 db_vnfrs = {} # vnf's info indexed by member-index
2186 # n2vc_info = {}
2187 tasks_dict_info = {} # from task to info text
2188 exc = None
2189 error_list = []
2190 stage = [
2191 "Stage 1/5: preparation of the environment.",
2192 "Waiting for previous operations to terminate.",
2193 "",
2194 ]
2195 # ^ stage, step, VIM progress
2196 try:
2197 # wait for any previous tasks in process
2198 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2199
2200 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2201 stage[1] = "Reading from database."
2202 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2203 db_nsr_update["detailed-status"] = "creating"
2204 db_nsr_update["operational-status"] = "init"
2205 self._write_ns_status(
2206 nsr_id=nsr_id,
2207 ns_state="BUILDING",
2208 current_operation="INSTANTIATING",
2209 current_operation_id=nslcmop_id,
2210 other_update=db_nsr_update,
2211 )
2212 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2213
2214 # read from db: operation
2215 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2216 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2217 ns_params = db_nslcmop.get("operationParams")
2218 if ns_params and ns_params.get("timeout_ns_deploy"):
2219 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2220 else:
2221 timeout_ns_deploy = self.timeout.get(
2222 "ns_deploy", self.timeout_ns_deploy
2223 )
2224
2225 # read from db: ns
2226 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2228 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2229 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2230 self.fs.sync(db_nsr["nsd-id"])
2231 db_nsr["nsd"] = nsd
2232 # nsr_name = db_nsr["name"] # TODO short-name??
2233
2234 # read from db: vnf's of this ns
2235 stage[1] = "Getting vnfrs from db."
2236 self.logger.debug(logging_text + stage[1])
2237 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2238
2239 # read from db: vnfd's for every vnf
2240 db_vnfds = [] # every vnfd data
2241
2242 # for each vnf in ns, read vnfd
2243 for vnfr in db_vnfrs_list:
2244 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2245 vnfd_id = vnfr["vnfd-id"]
2246 vnfd_ref = vnfr["vnfd-ref"]
2247 self.fs.sync(vnfd_id)
2248
2249 # if we haven't this vnfd, read it from db
2250 if vnfd_id not in db_vnfds:
2251 # read from db
2252 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2253 vnfd_id, vnfd_ref
2254 )
2255 self.logger.debug(logging_text + stage[1])
2256 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2257
2258 # store vnfd
2259 db_vnfds.append(vnfd)
2260
2261 # Get or generates the _admin.deployed.VCA list
2262 vca_deployed_list = None
2263 if db_nsr["_admin"].get("deployed"):
2264 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2265 if vca_deployed_list is None:
2266 vca_deployed_list = []
2267 configuration_status_list = []
2268 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2269 db_nsr_update["configurationStatus"] = configuration_status_list
2270 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2271 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2272 elif isinstance(vca_deployed_list, dict):
2273 # maintain backward compatibility. Change a dict to list at database
2274 vca_deployed_list = list(vca_deployed_list.values())
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2277
2278 if not isinstance(
2279 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2280 ):
2281 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2282 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2283
2284 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2285 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2286 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2287 self.db.set_list(
2288 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2289 )
2290
2291 # n2vc_redesign STEP 2 Deploy Network Scenario
2292 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2293 self._write_op_status(op_id=nslcmop_id, stage=stage)
2294
2295 stage[1] = "Deploying KDUs."
2296 # self.logger.debug(logging_text + "Before deploy_kdus")
2297 # Call to deploy_kdus in case exists the "vdu:kdu" param
2298 await self.deploy_kdus(
2299 logging_text=logging_text,
2300 nsr_id=nsr_id,
2301 nslcmop_id=nslcmop_id,
2302 db_vnfrs=db_vnfrs,
2303 db_vnfds=db_vnfds,
2304 task_instantiation_info=tasks_dict_info,
2305 )
2306
2307 stage[1] = "Getting VCA public key."
2308 # n2vc_redesign STEP 1 Get VCA public ssh-key
2309 # feature 1429. Add n2vc public key to needed VMs
2310 n2vc_key = self.n2vc.get_public_key()
2311 n2vc_key_list = [n2vc_key]
2312 if self.vca_config.get("public_key"):
2313 n2vc_key_list.append(self.vca_config["public_key"])
2314
2315 stage[1] = "Deploying NS at VIM."
2316 task_ro = asyncio.ensure_future(
2317 self.instantiate_RO(
2318 logging_text=logging_text,
2319 nsr_id=nsr_id,
2320 nsd=nsd,
2321 db_nsr=db_nsr,
2322 db_nslcmop=db_nslcmop,
2323 db_vnfrs=db_vnfrs,
2324 db_vnfds=db_vnfds,
2325 n2vc_key_list=n2vc_key_list,
2326 stage=stage,
2327 )
2328 )
2329 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2330 tasks_dict_info[task_ro] = "Deploying at VIM"
2331
2332 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2333 stage[1] = "Deploying Execution Environments."
2334 self.logger.debug(logging_text + stage[1])
2335
2336 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2337 for vnf_profile in get_vnf_profiles(nsd):
2338 vnfd_id = vnf_profile["vnfd-id"]
2339 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2340 member_vnf_index = str(vnf_profile["id"])
2341 db_vnfr = db_vnfrs[member_vnf_index]
2342 base_folder = vnfd["_admin"]["storage"]
2343 vdu_id = None
2344 vdu_index = 0
2345 vdu_name = None
2346 kdu_name = None
2347
2348 # Get additional parameters
2349 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2350 if db_vnfr.get("additionalParamsForVnf"):
2351 deploy_params.update(
2352 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2353 )
2354
2355 descriptor_config = get_configuration(vnfd, vnfd["id"])
2356 if descriptor_config:
2357 self._deploy_n2vc(
2358 logging_text=logging_text
2359 + "member_vnf_index={} ".format(member_vnf_index),
2360 db_nsr=db_nsr,
2361 db_vnfr=db_vnfr,
2362 nslcmop_id=nslcmop_id,
2363 nsr_id=nsr_id,
2364 nsi_id=nsi_id,
2365 vnfd_id=vnfd_id,
2366 vdu_id=vdu_id,
2367 kdu_name=kdu_name,
2368 member_vnf_index=member_vnf_index,
2369 vdu_index=vdu_index,
2370 vdu_name=vdu_name,
2371 deploy_params=deploy_params,
2372 descriptor_config=descriptor_config,
2373 base_folder=base_folder,
2374 task_instantiation_info=tasks_dict_info,
2375 stage=stage,
2376 )
2377
2378 # Deploy charms for each VDU that supports one.
2379 for vdud in get_vdu_list(vnfd):
2380 vdu_id = vdud["id"]
2381 descriptor_config = get_configuration(vnfd, vdu_id)
2382 vdur = find_in_list(
2383 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2384 )
2385
2386 if vdur.get("additionalParams"):
2387 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2388 else:
2389 deploy_params_vdu = deploy_params
2390 deploy_params_vdu["OSM"] = get_osm_params(
2391 db_vnfr, vdu_id, vdu_count_index=0
2392 )
2393 vdud_count = get_vdu_profile(vnfd, vdu_id).get(
2394 "max-number-of-instances", 1
2395 )
2396
2397 self.logger.debug("VDUD > {}".format(vdud))
2398 self.logger.debug(
2399 "Descriptor config > {}".format(descriptor_config)
2400 )
2401 if descriptor_config:
2402 vdu_name = None
2403 kdu_name = None
2404 for vdu_index in range(vdud_count):
2405 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2406 self._deploy_n2vc(
2407 logging_text=logging_text
2408 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2409 member_vnf_index, vdu_id, vdu_index
2410 ),
2411 db_nsr=db_nsr,
2412 db_vnfr=db_vnfr,
2413 nslcmop_id=nslcmop_id,
2414 nsr_id=nsr_id,
2415 nsi_id=nsi_id,
2416 vnfd_id=vnfd_id,
2417 vdu_id=vdu_id,
2418 kdu_name=kdu_name,
2419 member_vnf_index=member_vnf_index,
2420 vdu_index=vdu_index,
2421 vdu_name=vdu_name,
2422 deploy_params=deploy_params_vdu,
2423 descriptor_config=descriptor_config,
2424 base_folder=base_folder,
2425 task_instantiation_info=tasks_dict_info,
2426 stage=stage,
2427 )
2428 for kdud in get_kdu_list(vnfd):
2429 kdu_name = kdud["name"]
2430 descriptor_config = get_configuration(vnfd, kdu_name)
2431 if descriptor_config:
2432 vdu_id = None
2433 vdu_index = 0
2434 vdu_name = None
2435 kdur = next(
2436 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2437 )
2438 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2439 if kdur.get("additionalParams"):
2440 deploy_params_kdu = parse_yaml_strings(
2441 kdur["additionalParams"]
2442 )
2443
2444 self._deploy_n2vc(
2445 logging_text=logging_text,
2446 db_nsr=db_nsr,
2447 db_vnfr=db_vnfr,
2448 nslcmop_id=nslcmop_id,
2449 nsr_id=nsr_id,
2450 nsi_id=nsi_id,
2451 vnfd_id=vnfd_id,
2452 vdu_id=vdu_id,
2453 kdu_name=kdu_name,
2454 member_vnf_index=member_vnf_index,
2455 vdu_index=vdu_index,
2456 vdu_name=vdu_name,
2457 deploy_params=deploy_params_kdu,
2458 descriptor_config=descriptor_config,
2459 base_folder=base_folder,
2460 task_instantiation_info=tasks_dict_info,
2461 stage=stage,
2462 )
2463
2464 # Check if this NS has a charm configuration
2465 descriptor_config = nsd.get("ns-configuration")
2466 if descriptor_config and descriptor_config.get("juju"):
2467 vnfd_id = None
2468 db_vnfr = None
2469 member_vnf_index = None
2470 vdu_id = None
2471 kdu_name = None
2472 vdu_index = 0
2473 vdu_name = None
2474
2475 # Get additional parameters
2476 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2477 if db_nsr.get("additionalParamsForNs"):
2478 deploy_params.update(
2479 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2480 )
2481 base_folder = nsd["_admin"]["storage"]
2482 self._deploy_n2vc(
2483 logging_text=logging_text,
2484 db_nsr=db_nsr,
2485 db_vnfr=db_vnfr,
2486 nslcmop_id=nslcmop_id,
2487 nsr_id=nsr_id,
2488 nsi_id=nsi_id,
2489 vnfd_id=vnfd_id,
2490 vdu_id=vdu_id,
2491 kdu_name=kdu_name,
2492 member_vnf_index=member_vnf_index,
2493 vdu_index=vdu_index,
2494 vdu_name=vdu_name,
2495 deploy_params=deploy_params,
2496 descriptor_config=descriptor_config,
2497 base_folder=base_folder,
2498 task_instantiation_info=tasks_dict_info,
2499 stage=stage,
2500 )
2501
2502 # rest of staff will be done at finally
2503
2504 except (
2505 ROclient.ROClientException,
2506 DbException,
2507 LcmException,
2508 N2VCException,
2509 ) as e:
2510 self.logger.error(
2511 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2512 )
2513 exc = e
2514 except asyncio.CancelledError:
2515 self.logger.error(
2516 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2517 )
2518 exc = "Operation was cancelled"
2519 except Exception as e:
2520 exc = traceback.format_exc()
2521 self.logger.critical(
2522 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2523 exc_info=True,
2524 )
2525 finally:
2526 if exc:
2527 error_list.append(str(exc))
2528 try:
2529 # wait for pending tasks
2530 if tasks_dict_info:
2531 stage[1] = "Waiting for instantiate pending tasks."
2532 self.logger.debug(logging_text + stage[1])
2533 error_list += await self._wait_for_tasks(
2534 logging_text,
2535 tasks_dict_info,
2536 timeout_ns_deploy,
2537 stage,
2538 nslcmop_id,
2539 nsr_id=nsr_id,
2540 )
2541 stage[1] = stage[2] = ""
2542 except asyncio.CancelledError:
2543 error_list.append("Cancelled")
2544 # TODO cancel all tasks
2545 except Exception as exc:
2546 error_list.append(str(exc))
2547
2548 # update operation-status
2549 db_nsr_update["operational-status"] = "running"
2550 # let's begin with VCA 'configured' status (later we can change it)
2551 db_nsr_update["config-status"] = "configured"
2552 for task, task_name in tasks_dict_info.items():
2553 if not task.done() or task.cancelled() or task.exception():
2554 if task_name.startswith(self.task_name_deploy_vca):
2555 # A N2VC task is pending
2556 db_nsr_update["config-status"] = "failed"
2557 else:
2558 # RO or KDU task is pending
2559 db_nsr_update["operational-status"] = "failed"
2560
2561 # update status at database
2562 if error_list:
2563 error_detail = ". ".join(error_list)
2564 self.logger.error(logging_text + error_detail)
2565 error_description_nslcmop = "{} Detail: {}".format(
2566 stage[0], error_detail
2567 )
2568 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2569 nslcmop_id, stage[0]
2570 )
2571
2572 db_nsr_update["detailed-status"] = (
2573 error_description_nsr + " Detail: " + error_detail
2574 )
2575 db_nslcmop_update["detailed-status"] = error_detail
2576 nslcmop_operation_state = "FAILED"
2577 ns_state = "BROKEN"
2578 else:
2579 error_detail = None
2580 error_description_nsr = error_description_nslcmop = None
2581 ns_state = "READY"
2582 db_nsr_update["detailed-status"] = "Done"
2583 db_nslcmop_update["detailed-status"] = "Done"
2584 nslcmop_operation_state = "COMPLETED"
2585
2586 if db_nsr:
2587 self._write_ns_status(
2588 nsr_id=nsr_id,
2589 ns_state=ns_state,
2590 current_operation="IDLE",
2591 current_operation_id=None,
2592 error_description=error_description_nsr,
2593 error_detail=error_detail,
2594 other_update=db_nsr_update,
2595 )
2596 self._write_op_status(
2597 op_id=nslcmop_id,
2598 stage="",
2599 error_message=error_description_nslcmop,
2600 operation_state=nslcmop_operation_state,
2601 other_update=db_nslcmop_update,
2602 )
2603
2604 if nslcmop_operation_state:
2605 try:
2606 await self.msg.aiowrite(
2607 "ns",
2608 "instantiated",
2609 {
2610 "nsr_id": nsr_id,
2611 "nslcmop_id": nslcmop_id,
2612 "operationState": nslcmop_operation_state,
2613 },
2614 loop=self.loop,
2615 )
2616 except Exception as e:
2617 self.logger.error(
2618 logging_text + "kafka_write notification Exception {}".format(e)
2619 )
2620
2621 self.logger.debug(logging_text + "Exit")
2622 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2623
2624 async def _add_vca_relations(
2625 self,
2626 logging_text,
2627 nsr_id,
2628 vca_index: int,
2629 timeout: int = 3600,
2630 vca_type: str = None,
2631 vca_id: str = None,
2632 ) -> bool:
2633
2634 # steps:
2635 # 1. find all relations for this VCA
2636 # 2. wait for other peers related
2637 # 3. add relations
2638
2639 try:
2640 vca_type = vca_type or "lxc_proxy_charm"
2641
2642 # STEP 1: find all relations for this VCA
2643
2644 # read nsr record
2645 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2646 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2647
2648 # this VCA data
2649 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2650
2651 # read all ns-configuration relations
2652 ns_relations = list()
2653 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2654 if db_ns_relations:
2655 for r in db_ns_relations:
2656 # check if this VCA is in the relation
2657 if my_vca.get("member-vnf-index") in (
2658 r.get("entities")[0].get("id"),
2659 r.get("entities")[1].get("id"),
2660 ):
2661 ns_relations.append(r)
2662
2663 # read all vnf-configuration relations
2664 vnf_relations = list()
2665 db_vnfd_list = db_nsr.get("vnfd-id")
2666 if db_vnfd_list:
2667 for vnfd in db_vnfd_list:
2668 db_vnf_relations = None
2669 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2670 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2671 if db_vnf_configuration:
2672 db_vnf_relations = db_vnf_configuration.get("relation", [])
2673 if db_vnf_relations:
2674 for r in db_vnf_relations:
2675 # check if this VCA is in the relation
2676 if my_vca.get("vdu_id") in (
2677 r.get("entities")[0].get("id"),
2678 r.get("entities")[1].get("id"),
2679 ):
2680 vnf_relations.append(r)
2681
2682 # if no relations, terminate
2683 if not ns_relations and not vnf_relations:
2684 self.logger.debug(logging_text + " No relations")
2685 return True
2686
2687 self.logger.debug(
2688 logging_text
2689 + " adding relations\n {}\n {}".format(
2690 ns_relations, vnf_relations
2691 )
2692 )
2693
2694 # add all relations
2695 start = time()
2696 while True:
2697 # check timeout
2698 now = time()
2699 if now - start >= timeout:
2700 self.logger.error(logging_text + " : timeout adding relations")
2701 return False
2702
2703 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2704 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2705
2706 # for each defined NS relation, find the VCA's related
2707 for r in ns_relations.copy():
2708 from_vca_ee_id = None
2709 to_vca_ee_id = None
2710 from_vca_endpoint = None
2711 to_vca_endpoint = None
2712 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2713 for vca in vca_list:
2714 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2715 "id"
2716 ) and vca.get("config_sw_installed"):
2717 from_vca_ee_id = vca.get("ee_id")
2718 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2719 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2720 "id"
2721 ) and vca.get("config_sw_installed"):
2722 to_vca_ee_id = vca.get("ee_id")
2723 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2724 if from_vca_ee_id and to_vca_ee_id:
2725 # add relation
2726 await self.vca_map[vca_type].add_relation(
2727 ee_id_1=from_vca_ee_id,
2728 ee_id_2=to_vca_ee_id,
2729 endpoint_1=from_vca_endpoint,
2730 endpoint_2=to_vca_endpoint,
2731 vca_id=vca_id,
2732 )
2733 # remove entry from relations list
2734 ns_relations.remove(r)
2735 else:
2736 # check failed peers
2737 try:
2738 vca_status_list = db_nsr.get("configurationStatus")
2739 if vca_status_list:
2740 for i in range(len(vca_list)):
2741 vca = vca_list[i]
2742 vca_status = vca_status_list[i]
2743 if vca.get("member-vnf-index") == r.get("entities")[
2744 0
2745 ].get("id"):
2746 if vca_status.get("status") == "BROKEN":
2747 # peer broken: remove relation from list
2748 ns_relations.remove(r)
2749 if vca.get("member-vnf-index") == r.get("entities")[
2750 1
2751 ].get("id"):
2752 if vca_status.get("status") == "BROKEN":
2753 # peer broken: remove relation from list
2754 ns_relations.remove(r)
2755 except Exception:
2756 # ignore
2757 pass
2758
2759 # for each defined VNF relation, find the VCA's related
2760 for r in vnf_relations.copy():
2761 from_vca_ee_id = None
2762 to_vca_ee_id = None
2763 from_vca_endpoint = None
2764 to_vca_endpoint = None
2765 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2766 for vca in vca_list:
2767 key_to_check = "vdu_id"
2768 if vca.get("vdu_id") is None:
2769 key_to_check = "vnfd_id"
2770 if vca.get(key_to_check) == r.get("entities")[0].get(
2771 "id"
2772 ) and vca.get("config_sw_installed"):
2773 from_vca_ee_id = vca.get("ee_id")
2774 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2775 if vca.get(key_to_check) == r.get("entities")[1].get(
2776 "id"
2777 ) and vca.get("config_sw_installed"):
2778 to_vca_ee_id = vca.get("ee_id")
2779 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2780 if from_vca_ee_id and to_vca_ee_id:
2781 # add relation
2782 await self.vca_map[vca_type].add_relation(
2783 ee_id_1=from_vca_ee_id,
2784 ee_id_2=to_vca_ee_id,
2785 endpoint_1=from_vca_endpoint,
2786 endpoint_2=to_vca_endpoint,
2787 vca_id=vca_id,
2788 )
2789 # remove entry from relations list
2790 vnf_relations.remove(r)
2791 else:
2792 # check failed peers
2793 try:
2794 vca_status_list = db_nsr.get("configurationStatus")
2795 if vca_status_list:
2796 for i in range(len(vca_list)):
2797 vca = vca_list[i]
2798 vca_status = vca_status_list[i]
2799 if vca.get("vdu_id") == r.get("entities")[0].get(
2800 "id"
2801 ):
2802 if vca_status.get("status") == "BROKEN":
2803 # peer broken: remove relation from list
2804 vnf_relations.remove(r)
2805 if vca.get("vdu_id") == r.get("entities")[1].get(
2806 "id"
2807 ):
2808 if vca_status.get("status") == "BROKEN":
2809 # peer broken: remove relation from list
2810 vnf_relations.remove(r)
2811 except Exception:
2812 # ignore
2813 pass
2814
2815 # wait for next try
2816 await asyncio.sleep(5.0)
2817
2818 if not ns_relations and not vnf_relations:
2819 self.logger.debug("Relations added")
2820 break
2821
2822 return True
2823
2824 except Exception as e:
2825 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2826 return False
2827
2828 async def _install_kdu(
2829 self,
2830 nsr_id: str,
2831 nsr_db_path: str,
2832 vnfr_data: dict,
2833 kdu_index: int,
2834 kdud: dict,
2835 vnfd: dict,
2836 k8s_instance_info: dict,
2837 k8params: dict = None,
2838 timeout: int = 600,
2839 vca_id: str = None,
2840 ):
2841
2842 try:
2843 k8sclustertype = k8s_instance_info["k8scluster-type"]
2844 # Instantiate kdu
2845 db_dict_install = {
2846 "collection": "nsrs",
2847 "filter": {"_id": nsr_id},
2848 "path": nsr_db_path,
2849 }
2850
2851 if k8s_instance_info.get("kdu-deployment-name"):
2852 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
2853 else:
2854 kdu_instance = self.k8scluster_map[
2855 k8sclustertype
2856 ].generate_kdu_instance_name(
2857 db_dict=db_dict_install,
2858 kdu_model=k8s_instance_info["kdu-model"],
2859 kdu_name=k8s_instance_info["kdu-name"],
2860 )
2861 self.update_db_2(
2862 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2863 )
2864 await self.k8scluster_map[k8sclustertype].install(
2865 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2866 kdu_model=k8s_instance_info["kdu-model"],
2867 atomic=True,
2868 params=k8params,
2869 db_dict=db_dict_install,
2870 timeout=timeout,
2871 kdu_name=k8s_instance_info["kdu-name"],
2872 namespace=k8s_instance_info["namespace"],
2873 kdu_instance=kdu_instance,
2874 vca_id=vca_id,
2875 )
2876 self.update_db_2(
2877 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2878 )
2879
2880 # Obtain services to obtain management service ip
2881 services = await self.k8scluster_map[k8sclustertype].get_services(
2882 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2883 kdu_instance=kdu_instance,
2884 namespace=k8s_instance_info["namespace"],
2885 )
2886
2887 # Obtain management service info (if exists)
2888 vnfr_update_dict = {}
2889 kdu_config = get_configuration(vnfd, kdud["name"])
2890 if kdu_config:
2891 target_ee_list = kdu_config.get("execution-environment-list", [])
2892 else:
2893 target_ee_list = []
2894
2895 if services:
2896 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2897 mgmt_services = [
2898 service
2899 for service in kdud.get("service", [])
2900 if service.get("mgmt-service")
2901 ]
2902 for mgmt_service in mgmt_services:
2903 for service in services:
2904 if service["name"].startswith(mgmt_service["name"]):
2905 # Mgmt service found, Obtain service ip
2906 ip = service.get("external_ip", service.get("cluster_ip"))
2907 if isinstance(ip, list) and len(ip) == 1:
2908 ip = ip[0]
2909
2910 vnfr_update_dict[
2911 "kdur.{}.ip-address".format(kdu_index)
2912 ] = ip
2913
2914 # Check if must update also mgmt ip at the vnf
2915 service_external_cp = mgmt_service.get(
2916 "external-connection-point-ref"
2917 )
2918 if service_external_cp:
2919 if (
2920 deep_get(vnfd, ("mgmt-interface", "cp"))
2921 == service_external_cp
2922 ):
2923 vnfr_update_dict["ip-address"] = ip
2924
2925 if find_in_list(
2926 target_ee_list,
2927 lambda ee: ee.get(
2928 "external-connection-point-ref", ""
2929 )
2930 == service_external_cp,
2931 ):
2932 vnfr_update_dict[
2933 "kdur.{}.ip-address".format(kdu_index)
2934 ] = ip
2935 break
2936 else:
2937 self.logger.warn(
2938 "Mgmt service name: {} not found".format(
2939 mgmt_service["name"]
2940 )
2941 )
2942
2943 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2944 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2945
2946 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2947 if (
2948 kdu_config
2949 and kdu_config.get("initial-config-primitive")
2950 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2951 ):
2952 initial_config_primitive_list = kdu_config.get(
2953 "initial-config-primitive"
2954 )
2955 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2956
2957 for initial_config_primitive in initial_config_primitive_list:
2958 primitive_params_ = self._map_primitive_params(
2959 initial_config_primitive, {}, {}
2960 )
2961
2962 await asyncio.wait_for(
2963 self.k8scluster_map[k8sclustertype].exec_primitive(
2964 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2965 kdu_instance=kdu_instance,
2966 primitive_name=initial_config_primitive["name"],
2967 params=primitive_params_,
2968 db_dict=db_dict_install,
2969 vca_id=vca_id,
2970 ),
2971 timeout=timeout,
2972 )
2973
2974 except Exception as e:
2975 # Prepare update db with error and raise exception
2976 try:
2977 self.update_db_2(
2978 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2979 )
2980 self.update_db_2(
2981 "vnfrs",
2982 vnfr_data.get("_id"),
2983 {"kdur.{}.status".format(kdu_index): "ERROR"},
2984 )
2985 except Exception:
2986 # ignore to keep original exception
2987 pass
2988 # reraise original error
2989 raise
2990
2991 return kdu_instance
2992
2993 async def deploy_kdus(
2994 self,
2995 logging_text,
2996 nsr_id,
2997 nslcmop_id,
2998 db_vnfrs,
2999 db_vnfds,
3000 task_instantiation_info,
3001 ):
3002 # Launch kdus if present in the descriptor
3003
3004 k8scluster_id_2_uuic = {
3005 "helm-chart-v3": {},
3006 "helm-chart": {},
3007 "juju-bundle": {},
3008 }
3009
3010 async def _get_cluster_id(cluster_id, cluster_type):
3011 nonlocal k8scluster_id_2_uuic
3012 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3013 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3014
3015 # check if K8scluster is creating and wait look if previous tasks in process
3016 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3017 "k8scluster", cluster_id
3018 )
3019 if task_dependency:
3020 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3021 task_name, cluster_id
3022 )
3023 self.logger.debug(logging_text + text)
3024 await asyncio.wait(task_dependency, timeout=3600)
3025
3026 db_k8scluster = self.db.get_one(
3027 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3028 )
3029 if not db_k8scluster:
3030 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3031
3032 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3033 if not k8s_id:
3034 if cluster_type == "helm-chart-v3":
3035 try:
3036 # backward compatibility for existing clusters that have not been initialized for helm v3
3037 k8s_credentials = yaml.safe_dump(
3038 db_k8scluster.get("credentials")
3039 )
3040 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3041 k8s_credentials, reuse_cluster_uuid=cluster_id
3042 )
3043 db_k8scluster_update = {}
3044 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3045 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3046 db_k8scluster_update[
3047 "_admin.helm-chart-v3.created"
3048 ] = uninstall_sw
3049 db_k8scluster_update[
3050 "_admin.helm-chart-v3.operationalState"
3051 ] = "ENABLED"
3052 self.update_db_2(
3053 "k8sclusters", cluster_id, db_k8scluster_update
3054 )
3055 except Exception as e:
3056 self.logger.error(
3057 logging_text
3058 + "error initializing helm-v3 cluster: {}".format(str(e))
3059 )
3060 raise LcmException(
3061 "K8s cluster '{}' has not been initialized for '{}'".format(
3062 cluster_id, cluster_type
3063 )
3064 )
3065 else:
3066 raise LcmException(
3067 "K8s cluster '{}' has not been initialized for '{}'".format(
3068 cluster_id, cluster_type
3069 )
3070 )
3071 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3072 return k8s_id
3073
3074 logging_text += "Deploy kdus: "
3075 step = ""
3076 try:
3077 db_nsr_update = {"_admin.deployed.K8s": []}
3078 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3079
3080 index = 0
3081 updated_cluster_list = []
3082 updated_v3_cluster_list = []
3083
3084 for vnfr_data in db_vnfrs.values():
3085 vca_id = self.get_vca_id(vnfr_data, {})
3086 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3087 # Step 0: Prepare and set parameters
3088 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3089 vnfd_id = vnfr_data.get("vnfd-id")
3090 vnfd_with_id = find_in_list(
3091 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3092 )
3093 kdud = next(
3094 kdud
3095 for kdud in vnfd_with_id["kdu"]
3096 if kdud["name"] == kdur["kdu-name"]
3097 )
3098 namespace = kdur.get("k8s-namespace")
3099 kdu_deployment_name = kdur.get("kdu-deployment-name")
3100 if kdur.get("helm-chart"):
3101 kdumodel = kdur["helm-chart"]
3102 # Default version: helm3, if helm-version is v2 assign v2
3103 k8sclustertype = "helm-chart-v3"
3104 self.logger.debug("kdur: {}".format(kdur))
3105 if (
3106 kdur.get("helm-version")
3107 and kdur.get("helm-version") == "v2"
3108 ):
3109 k8sclustertype = "helm-chart"
3110 elif kdur.get("juju-bundle"):
3111 kdumodel = kdur["juju-bundle"]
3112 k8sclustertype = "juju-bundle"
3113 else:
3114 raise LcmException(
3115 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3116 "juju-bundle. Maybe an old NBI version is running".format(
3117 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3118 )
3119 )
3120 # check if kdumodel is a file and exists
3121 try:
3122 vnfd_with_id = find_in_list(
3123 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3124 )
3125 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3126 if storage and storage.get(
3127 "pkg-dir"
3128 ): # may be not present if vnfd has not artifacts
3129 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3130 filename = "{}/{}/{}s/{}".format(
3131 storage["folder"],
3132 storage["pkg-dir"],
3133 k8sclustertype,
3134 kdumodel,
3135 )
3136 if self.fs.file_exists(
3137 filename, mode="file"
3138 ) or self.fs.file_exists(filename, mode="dir"):
3139 kdumodel = self.fs.path + filename
3140 except (asyncio.TimeoutError, asyncio.CancelledError):
3141 raise
3142 except Exception: # it is not a file
3143 pass
3144
3145 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3146 step = "Synchronize repos for k8s cluster '{}'".format(
3147 k8s_cluster_id
3148 )
3149 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3150
3151 # Synchronize repos
3152 if (
3153 k8sclustertype == "helm-chart"
3154 and cluster_uuid not in updated_cluster_list
3155 ) or (
3156 k8sclustertype == "helm-chart-v3"
3157 and cluster_uuid not in updated_v3_cluster_list
3158 ):
3159 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3160 self.k8scluster_map[k8sclustertype].synchronize_repos(
3161 cluster_uuid=cluster_uuid
3162 )
3163 )
3164 if del_repo_list or added_repo_dict:
3165 if k8sclustertype == "helm-chart":
3166 unset = {
3167 "_admin.helm_charts_added." + item: None
3168 for item in del_repo_list
3169 }
3170 updated = {
3171 "_admin.helm_charts_added." + item: name
3172 for item, name in added_repo_dict.items()
3173 }
3174 updated_cluster_list.append(cluster_uuid)
3175 elif k8sclustertype == "helm-chart-v3":
3176 unset = {
3177 "_admin.helm_charts_v3_added." + item: None
3178 for item in del_repo_list
3179 }
3180 updated = {
3181 "_admin.helm_charts_v3_added." + item: name
3182 for item, name in added_repo_dict.items()
3183 }
3184 updated_v3_cluster_list.append(cluster_uuid)
3185 self.logger.debug(
3186 logging_text + "repos synchronized on k8s cluster "
3187 "'{}' to_delete: {}, to_add: {}".format(
3188 k8s_cluster_id, del_repo_list, added_repo_dict
3189 )
3190 )
3191 self.db.set_one(
3192 "k8sclusters",
3193 {"_id": k8s_cluster_id},
3194 updated,
3195 unset=unset,
3196 )
3197
3198 # Instantiate kdu
3199 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3200 vnfr_data["member-vnf-index-ref"],
3201 kdur["kdu-name"],
3202 k8s_cluster_id,
3203 )
3204 k8s_instance_info = {
3205 "kdu-instance": None,
3206 "k8scluster-uuid": cluster_uuid,
3207 "k8scluster-type": k8sclustertype,
3208 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3209 "kdu-name": kdur["kdu-name"],
3210 "kdu-model": kdumodel,
3211 "namespace": namespace,
3212 "kdu-deployment-name": kdu_deployment_name,
3213 }
3214 db_path = "_admin.deployed.K8s.{}".format(index)
3215 db_nsr_update[db_path] = k8s_instance_info
3216 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3217 vnfd_with_id = find_in_list(
3218 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3219 )
3220 task = asyncio.ensure_future(
3221 self._install_kdu(
3222 nsr_id,
3223 db_path,
3224 vnfr_data,
3225 kdu_index,
3226 kdud,
3227 vnfd_with_id,
3228 k8s_instance_info,
3229 k8params=desc_params,
3230 timeout=600,
3231 vca_id=vca_id,
3232 )
3233 )
3234 self.lcm_tasks.register(
3235 "ns",
3236 nsr_id,
3237 nslcmop_id,
3238 "instantiate_KDU-{}".format(index),
3239 task,
3240 )
3241 task_instantiation_info[task] = "Deploying KDU {}".format(
3242 kdur["kdu-name"]
3243 )
3244
3245 index += 1
3246
3247 except (LcmException, asyncio.CancelledError):
3248 raise
3249 except Exception as e:
3250 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3251 if isinstance(e, (N2VCException, DbException)):
3252 self.logger.error(logging_text + msg)
3253 else:
3254 self.logger.critical(logging_text + msg, exc_info=True)
3255 raise LcmException(msg)
3256 finally:
3257 if db_nsr_update:
3258 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3259
3260 def _deploy_n2vc(
3261 self,
3262 logging_text,
3263 db_nsr,
3264 db_vnfr,
3265 nslcmop_id,
3266 nsr_id,
3267 nsi_id,
3268 vnfd_id,
3269 vdu_id,
3270 kdu_name,
3271 member_vnf_index,
3272 vdu_index,
3273 vdu_name,
3274 deploy_params,
3275 descriptor_config,
3276 base_folder,
3277 task_instantiation_info,
3278 stage,
3279 ):
3280 # launch instantiate_N2VC in a asyncio task and register task object
3281 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3282 # if not found, create one entry and update database
3283 # fill db_nsr._admin.deployed.VCA.<index>
3284
3285 self.logger.debug(
3286 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3287 )
3288 if "execution-environment-list" in descriptor_config:
3289 ee_list = descriptor_config.get("execution-environment-list", [])
3290 elif "juju" in descriptor_config:
3291 ee_list = [descriptor_config] # ns charms
3292 else: # other types as script are not supported
3293 ee_list = []
3294
3295 for ee_item in ee_list:
3296 self.logger.debug(
3297 logging_text
3298 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3299 ee_item.get("juju"), ee_item.get("helm-chart")
3300 )
3301 )
3302 ee_descriptor_id = ee_item.get("id")
3303 if ee_item.get("juju"):
3304 vca_name = ee_item["juju"].get("charm")
3305 vca_type = (
3306 "lxc_proxy_charm"
3307 if ee_item["juju"].get("charm") is not None
3308 else "native_charm"
3309 )
3310 if ee_item["juju"].get("cloud") == "k8s":
3311 vca_type = "k8s_proxy_charm"
3312 elif ee_item["juju"].get("proxy") is False:
3313 vca_type = "native_charm"
3314 elif ee_item.get("helm-chart"):
3315 vca_name = ee_item["helm-chart"]
3316 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3317 vca_type = "helm"
3318 else:
3319 vca_type = "helm-v3"
3320 else:
3321 self.logger.debug(
3322 logging_text + "skipping non juju neither charm configuration"
3323 )
3324 continue
3325
3326 vca_index = -1
3327 for vca_index, vca_deployed in enumerate(
3328 db_nsr["_admin"]["deployed"]["VCA"]
3329 ):
3330 if not vca_deployed:
3331 continue
3332 if (
3333 vca_deployed.get("member-vnf-index") == member_vnf_index
3334 and vca_deployed.get("vdu_id") == vdu_id
3335 and vca_deployed.get("kdu_name") == kdu_name
3336 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3337 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3338 ):
3339 break
3340 else:
3341 # not found, create one.
3342 target = (
3343 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3344 )
3345 if vdu_id:
3346 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3347 elif kdu_name:
3348 target += "/kdu/{}".format(kdu_name)
3349 vca_deployed = {
3350 "target_element": target,
3351 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3352 "member-vnf-index": member_vnf_index,
3353 "vdu_id": vdu_id,
3354 "kdu_name": kdu_name,
3355 "vdu_count_index": vdu_index,
3356 "operational-status": "init", # TODO revise
3357 "detailed-status": "", # TODO revise
3358 "step": "initial-deploy", # TODO revise
3359 "vnfd_id": vnfd_id,
3360 "vdu_name": vdu_name,
3361 "type": vca_type,
3362 "ee_descriptor_id": ee_descriptor_id,
3363 }
3364 vca_index += 1
3365
3366 # create VCA and configurationStatus in db
3367 db_dict = {
3368 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3369 "configurationStatus.{}".format(vca_index): dict(),
3370 }
3371 self.update_db_2("nsrs", nsr_id, db_dict)
3372
3373 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3374
3375 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3376 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3377 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3378
3379 # Launch task
3380 task_n2vc = asyncio.ensure_future(
3381 self.instantiate_N2VC(
3382 logging_text=logging_text,
3383 vca_index=vca_index,
3384 nsi_id=nsi_id,
3385 db_nsr=db_nsr,
3386 db_vnfr=db_vnfr,
3387 vdu_id=vdu_id,
3388 kdu_name=kdu_name,
3389 vdu_index=vdu_index,
3390 deploy_params=deploy_params,
3391 config_descriptor=descriptor_config,
3392 base_folder=base_folder,
3393 nslcmop_id=nslcmop_id,
3394 stage=stage,
3395 vca_type=vca_type,
3396 vca_name=vca_name,
3397 ee_config_descriptor=ee_item,
3398 )
3399 )
3400 self.lcm_tasks.register(
3401 "ns",
3402 nsr_id,
3403 nslcmop_id,
3404 "instantiate_N2VC-{}".format(vca_index),
3405 task_n2vc,
3406 )
3407 task_instantiation_info[
3408 task_n2vc
3409 ] = self.task_name_deploy_vca + " {}.{}".format(
3410 member_vnf_index or "", vdu_id or ""
3411 )
3412
3413 @staticmethod
3414 def _create_nslcmop(nsr_id, operation, params):
3415 """
3416 Creates a ns-lcm-opp content to be stored at database.
3417 :param nsr_id: internal id of the instance
3418 :param operation: instantiate, terminate, scale, action, ...
3419 :param params: user parameters for the operation
3420 :return: dictionary following SOL005 format
3421 """
3422 # Raise exception if invalid arguments
3423 if not (nsr_id and operation and params):
3424 raise LcmException(
3425 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3426 )
3427 now = time()
3428 _id = str(uuid4())
3429 nslcmop = {
3430 "id": _id,
3431 "_id": _id,
3432 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3433 "operationState": "PROCESSING",
3434 "statusEnteredTime": now,
3435 "nsInstanceId": nsr_id,
3436 "lcmOperationType": operation,
3437 "startTime": now,
3438 "isAutomaticInvocation": False,
3439 "operationParams": params,
3440 "isCancelPending": False,
3441 "links": {
3442 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3443 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3444 },
3445 }
3446 return nslcmop
3447
3448 def _format_additional_params(self, params):
3449 params = params or {}
3450 for key, value in params.items():
3451 if str(value).startswith("!!yaml "):
3452 params[key] = yaml.safe_load(value[7:])
3453 return params
3454
3455 def _get_terminate_primitive_params(self, seq, vnf_index):
3456 primitive = seq.get("name")
3457 primitive_params = {}
3458 params = {
3459 "member_vnf_index": vnf_index,
3460 "primitive": primitive,
3461 "primitive_params": primitive_params,
3462 }
3463 desc_params = {}
3464 return self._map_primitive_params(seq, params, desc_params)
3465
3466 # sub-operations
3467
3468 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3469 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3470 if op.get("operationState") == "COMPLETED":
3471 # b. Skip sub-operation
3472 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3473 return self.SUBOPERATION_STATUS_SKIP
3474 else:
3475 # c. retry executing sub-operation
3476 # The sub-operation exists, and operationState != 'COMPLETED'
3477 # Update operationState = 'PROCESSING' to indicate a retry.
3478 operationState = "PROCESSING"
3479 detailed_status = "In progress"
3480 self._update_suboperation_status(
3481 db_nslcmop, op_index, operationState, detailed_status
3482 )
3483 # Return the sub-operation index
3484 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3485 # with arguments extracted from the sub-operation
3486 return op_index
3487
3488 # Find a sub-operation where all keys in a matching dictionary must match
3489 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3490 def _find_suboperation(self, db_nslcmop, match):
3491 if db_nslcmop and match:
3492 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3493 for i, op in enumerate(op_list):
3494 if all(op.get(k) == match[k] for k in match):
3495 return i
3496 return self.SUBOPERATION_STATUS_NOT_FOUND
3497
3498 # Update status for a sub-operation given its index
3499 def _update_suboperation_status(
3500 self, db_nslcmop, op_index, operationState, detailed_status
3501 ):
3502 # Update DB for HA tasks
3503 q_filter = {"_id": db_nslcmop["_id"]}
3504 update_dict = {
3505 "_admin.operations.{}.operationState".format(op_index): operationState,
3506 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3507 }
3508 self.db.set_one(
3509 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3510 )
3511
3512 # Add sub-operation, return the index of the added sub-operation
3513 # Optionally, set operationState, detailed-status, and operationType
3514 # Status and type are currently set for 'scale' sub-operations:
3515 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3516 # 'detailed-status' : status message
3517 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3518 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3519 def _add_suboperation(
3520 self,
3521 db_nslcmop,
3522 vnf_index,
3523 vdu_id,
3524 vdu_count_index,
3525 vdu_name,
3526 primitive,
3527 mapped_primitive_params,
3528 operationState=None,
3529 detailed_status=None,
3530 operationType=None,
3531 RO_nsr_id=None,
3532 RO_scaling_info=None,
3533 ):
3534 if not db_nslcmop:
3535 return self.SUBOPERATION_STATUS_NOT_FOUND
3536 # Get the "_admin.operations" list, if it exists
3537 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3538 op_list = db_nslcmop_admin.get("operations")
3539 # Create or append to the "_admin.operations" list
3540 new_op = {
3541 "member_vnf_index": vnf_index,
3542 "vdu_id": vdu_id,
3543 "vdu_count_index": vdu_count_index,
3544 "primitive": primitive,
3545 "primitive_params": mapped_primitive_params,
3546 }
3547 if operationState:
3548 new_op["operationState"] = operationState
3549 if detailed_status:
3550 new_op["detailed-status"] = detailed_status
3551 if operationType:
3552 new_op["lcmOperationType"] = operationType
3553 if RO_nsr_id:
3554 new_op["RO_nsr_id"] = RO_nsr_id
3555 if RO_scaling_info:
3556 new_op["RO_scaling_info"] = RO_scaling_info
3557 if not op_list:
3558 # No existing operations, create key 'operations' with current operation as first list element
3559 db_nslcmop_admin.update({"operations": [new_op]})
3560 op_list = db_nslcmop_admin.get("operations")
3561 else:
3562 # Existing operations, append operation to list
3563 op_list.append(new_op)
3564
3565 db_nslcmop_update = {"_admin.operations": op_list}
3566 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3567 op_index = len(op_list) - 1
3568 return op_index
3569
3570 # Helper methods for scale() sub-operations
3571
3572 # pre-scale/post-scale:
3573 # Check for 3 different cases:
3574 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3575 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3576 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3577 def _check_or_add_scale_suboperation(
3578 self,
3579 db_nslcmop,
3580 vnf_index,
3581 vnf_config_primitive,
3582 primitive_params,
3583 operationType,
3584 RO_nsr_id=None,
3585 RO_scaling_info=None,
3586 ):
3587 # Find this sub-operation
3588 if RO_nsr_id and RO_scaling_info:
3589 operationType = "SCALE-RO"
3590 match = {
3591 "member_vnf_index": vnf_index,
3592 "RO_nsr_id": RO_nsr_id,
3593 "RO_scaling_info": RO_scaling_info,
3594 }
3595 else:
3596 match = {
3597 "member_vnf_index": vnf_index,
3598 "primitive": vnf_config_primitive,
3599 "primitive_params": primitive_params,
3600 "lcmOperationType": operationType,
3601 }
3602 op_index = self._find_suboperation(db_nslcmop, match)
3603 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3604 # a. New sub-operation
3605 # The sub-operation does not exist, add it.
3606 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3607 # The following parameters are set to None for all kind of scaling:
3608 vdu_id = None
3609 vdu_count_index = None
3610 vdu_name = None
3611 if RO_nsr_id and RO_scaling_info:
3612 vnf_config_primitive = None
3613 primitive_params = None
3614 else:
3615 RO_nsr_id = None
3616 RO_scaling_info = None
3617 # Initial status for sub-operation
3618 operationState = "PROCESSING"
3619 detailed_status = "In progress"
3620 # Add sub-operation for pre/post-scaling (zero or more operations)
3621 self._add_suboperation(
3622 db_nslcmop,
3623 vnf_index,
3624 vdu_id,
3625 vdu_count_index,
3626 vdu_name,
3627 vnf_config_primitive,
3628 primitive_params,
3629 operationState,
3630 detailed_status,
3631 operationType,
3632 RO_nsr_id,
3633 RO_scaling_info,
3634 )
3635 return self.SUBOPERATION_STATUS_NEW
3636 else:
3637 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3638 # or op_index (operationState != 'COMPLETED')
3639 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3640
3641 # Function to return execution_environment id
3642
3643 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3644 # TODO vdu_index_count
3645 for vca in vca_deployed_list:
3646 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3647 return vca["ee_id"]
3648
3649 async def destroy_N2VC(
3650 self,
3651 logging_text,
3652 db_nslcmop,
3653 vca_deployed,
3654 config_descriptor,
3655 vca_index,
3656 destroy_ee=True,
3657 exec_primitives=True,
3658 scaling_in=False,
3659 vca_id: str = None,
3660 ):
3661 """
3662 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3663 :param logging_text:
3664 :param db_nslcmop:
3665 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3666 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3667 :param vca_index: index in the database _admin.deployed.VCA
3668 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3669 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3670 not executed properly
3671 :param scaling_in: True destroys the application, False destroys the model
3672 :return: None or exception
3673 """
3674
3675 self.logger.debug(
3676 logging_text
3677 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3678 vca_index, vca_deployed, config_descriptor, destroy_ee
3679 )
3680 )
3681
3682 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3683
3684 # execute terminate_primitives
3685 if exec_primitives:
3686 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3687 config_descriptor.get("terminate-config-primitive"),
3688 vca_deployed.get("ee_descriptor_id"),
3689 )
3690 vdu_id = vca_deployed.get("vdu_id")
3691 vdu_count_index = vca_deployed.get("vdu_count_index")
3692 vdu_name = vca_deployed.get("vdu_name")
3693 vnf_index = vca_deployed.get("member-vnf-index")
3694 if terminate_primitives and vca_deployed.get("needed_terminate"):
3695 for seq in terminate_primitives:
3696 # For each sequence in list, get primitive and call _ns_execute_primitive()
3697 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3698 vnf_index, seq.get("name")
3699 )
3700 self.logger.debug(logging_text + step)
3701 # Create the primitive for each sequence, i.e. "primitive": "touch"
3702 primitive = seq.get("name")
3703 mapped_primitive_params = self._get_terminate_primitive_params(
3704 seq, vnf_index
3705 )
3706
3707 # Add sub-operation
3708 self._add_suboperation(
3709 db_nslcmop,
3710 vnf_index,
3711 vdu_id,
3712 vdu_count_index,
3713 vdu_name,
3714 primitive,
3715 mapped_primitive_params,
3716 )
3717 # Sub-operations: Call _ns_execute_primitive() instead of action()
3718 try:
3719 result, result_detail = await self._ns_execute_primitive(
3720 vca_deployed["ee_id"],
3721 primitive,
3722 mapped_primitive_params,
3723 vca_type=vca_type,
3724 vca_id=vca_id,
3725 )
3726 except LcmException:
3727 # this happens when VCA is not deployed. In this case it is not needed to terminate
3728 continue
3729 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3730 if result not in result_ok:
3731 raise LcmException(
3732 "terminate_primitive {} for vnf_member_index={} fails with "
3733 "error {}".format(seq.get("name"), vnf_index, result_detail)
3734 )
3735 # set that this VCA do not need terminated
3736 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3737 vca_index
3738 )
3739 self.update_db_2(
3740 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3741 )
3742
3743 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3744 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3745
3746 if destroy_ee:
3747 await self.vca_map[vca_type].delete_execution_environment(
3748 vca_deployed["ee_id"],
3749 scaling_in=scaling_in,
3750 vca_id=vca_id,
3751 )
3752
3753 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3754 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3755 namespace = "." + db_nsr["_id"]
3756 try:
3757 await self.n2vc.delete_namespace(
3758 namespace=namespace,
3759 total_timeout=self.timeout_charm_delete,
3760 vca_id=vca_id,
3761 )
3762 except N2VCNotFound: # already deleted. Skip
3763 pass
3764 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3765
3766 async def _terminate_RO(
3767 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3768 ):
3769 """
3770 Terminates a deployment from RO
3771 :param logging_text:
3772 :param nsr_deployed: db_nsr._admin.deployed
3773 :param nsr_id:
3774 :param nslcmop_id:
3775 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3776 this method will update only the index 2, but it will write on database the concatenated content of the list
3777 :return:
3778 """
3779 db_nsr_update = {}
3780 failed_detail = []
3781 ro_nsr_id = ro_delete_action = None
3782 if nsr_deployed and nsr_deployed.get("RO"):
3783 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3784 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3785 try:
3786 if ro_nsr_id:
3787 stage[2] = "Deleting ns from VIM."
3788 db_nsr_update["detailed-status"] = " ".join(stage)
3789 self._write_op_status(nslcmop_id, stage)
3790 self.logger.debug(logging_text + stage[2])
3791 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3792 self._write_op_status(nslcmop_id, stage)
3793 desc = await self.RO.delete("ns", ro_nsr_id)
3794 ro_delete_action = desc["action_id"]
3795 db_nsr_update[
3796 "_admin.deployed.RO.nsr_delete_action_id"
3797 ] = ro_delete_action
3798 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3799 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3800 if ro_delete_action:
3801 # wait until NS is deleted from VIM
3802 stage[2] = "Waiting ns deleted from VIM."
3803 detailed_status_old = None
3804 self.logger.debug(
3805 logging_text
3806 + stage[2]
3807 + " RO_id={} ro_delete_action={}".format(
3808 ro_nsr_id, ro_delete_action
3809 )
3810 )
3811 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3812 self._write_op_status(nslcmop_id, stage)
3813
3814 delete_timeout = 20 * 60 # 20 minutes
3815 while delete_timeout > 0:
3816 desc = await self.RO.show(
3817 "ns",
3818 item_id_name=ro_nsr_id,
3819 extra_item="action",
3820 extra_item_id=ro_delete_action,
3821 )
3822
3823 # deploymentStatus
3824 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3825
3826 ns_status, ns_status_info = self.RO.check_action_status(desc)
3827 if ns_status == "ERROR":
3828 raise ROclient.ROClientException(ns_status_info)
3829 elif ns_status == "BUILD":
3830 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3831 elif ns_status == "ACTIVE":
3832 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3833 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3834 break
3835 else:
3836 assert (
3837 False
3838 ), "ROclient.check_action_status returns unknown {}".format(
3839 ns_status
3840 )
3841 if stage[2] != detailed_status_old:
3842 detailed_status_old = stage[2]
3843 db_nsr_update["detailed-status"] = " ".join(stage)
3844 self._write_op_status(nslcmop_id, stage)
3845 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3846 await asyncio.sleep(5, loop=self.loop)
3847 delete_timeout -= 5
3848 else: # delete_timeout <= 0:
3849 raise ROclient.ROClientException(
3850 "Timeout waiting ns deleted from VIM"
3851 )
3852
3853 except Exception as e:
3854 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3855 if (
3856 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3857 ): # not found
3858 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3859 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3860 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3861 self.logger.debug(
3862 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3863 )
3864 elif (
3865 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3866 ): # conflict
3867 failed_detail.append("delete conflict: {}".format(e))
3868 self.logger.debug(
3869 logging_text
3870 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3871 )
3872 else:
3873 failed_detail.append("delete error: {}".format(e))
3874 self.logger.error(
3875 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3876 )
3877
3878 # Delete nsd
3879 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3880 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3881 try:
3882 stage[2] = "Deleting nsd from RO."
3883 db_nsr_update["detailed-status"] = " ".join(stage)
3884 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3885 self._write_op_status(nslcmop_id, stage)
3886 await self.RO.delete("nsd", ro_nsd_id)
3887 self.logger.debug(
3888 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3889 )
3890 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3891 except Exception as e:
3892 if (
3893 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3894 ): # not found
3895 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3896 self.logger.debug(
3897 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3898 )
3899 elif (
3900 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3901 ): # conflict
3902 failed_detail.append(
3903 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3904 )
3905 self.logger.debug(logging_text + failed_detail[-1])
3906 else:
3907 failed_detail.append(
3908 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3909 )
3910 self.logger.error(logging_text + failed_detail[-1])
3911
3912 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3913 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3914 if not vnf_deployed or not vnf_deployed["id"]:
3915 continue
3916 try:
3917 ro_vnfd_id = vnf_deployed["id"]
3918 stage[
3919 2
3920 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3921 vnf_deployed["member-vnf-index"], ro_vnfd_id
3922 )
3923 db_nsr_update["detailed-status"] = " ".join(stage)
3924 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3925 self._write_op_status(nslcmop_id, stage)
3926 await self.RO.delete("vnfd", ro_vnfd_id)
3927 self.logger.debug(
3928 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3929 )
3930 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3931 except Exception as e:
3932 if (
3933 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3934 ): # not found
3935 db_nsr_update[
3936 "_admin.deployed.RO.vnfd.{}.id".format(index)
3937 ] = None
3938 self.logger.debug(
3939 logging_text
3940 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3941 )
3942 elif (
3943 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3944 ): # conflict
3945 failed_detail.append(
3946 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3947 )
3948 self.logger.debug(logging_text + failed_detail[-1])
3949 else:
3950 failed_detail.append(
3951 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3952 )
3953 self.logger.error(logging_text + failed_detail[-1])
3954
3955 if failed_detail:
3956 stage[2] = "Error deleting from VIM"
3957 else:
3958 stage[2] = "Deleted from VIM"
3959 db_nsr_update["detailed-status"] = " ".join(stage)
3960 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3961 self._write_op_status(nslcmop_id, stage)
3962
3963 if failed_detail:
3964 raise LcmException("; ".join(failed_detail))
3965
3966 async def terminate(self, nsr_id, nslcmop_id):
3967 # Try to lock HA task here
3968 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3969 if not task_is_locked_by_me:
3970 return
3971
3972 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3973 self.logger.debug(logging_text + "Enter")
3974 timeout_ns_terminate = self.timeout_ns_terminate
3975 db_nsr = None
3976 db_nslcmop = None
3977 operation_params = None
3978 exc = None
3979 error_list = [] # annotates all failed error messages
3980 db_nslcmop_update = {}
3981 autoremove = False # autoremove after terminated
3982 tasks_dict_info = {}
3983 db_nsr_update = {}
3984 stage = [
3985 "Stage 1/3: Preparing task.",
3986 "Waiting for previous operations to terminate.",
3987 "",
3988 ]
3989 # ^ contains [stage, step, VIM-status]
3990 try:
3991 # wait for any previous tasks in process
3992 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3993
3994 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3995 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3996 operation_params = db_nslcmop.get("operationParams") or {}
3997 if operation_params.get("timeout_ns_terminate"):
3998 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3999 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4000 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4001
4002 db_nsr_update["operational-status"] = "terminating"
4003 db_nsr_update["config-status"] = "terminating"
4004 self._write_ns_status(
4005 nsr_id=nsr_id,
4006 ns_state="TERMINATING",
4007 current_operation="TERMINATING",
4008 current_operation_id=nslcmop_id,
4009 other_update=db_nsr_update,
4010 )
4011 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4012 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4013 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4014 return
4015
4016 stage[1] = "Getting vnf descriptors from db."
4017 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4018 db_vnfrs_dict = {
4019 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4020 }
4021 db_vnfds_from_id = {}
4022 db_vnfds_from_member_index = {}
4023 # Loop over VNFRs
4024 for vnfr in db_vnfrs_list:
4025 vnfd_id = vnfr["vnfd-id"]
4026 if vnfd_id not in db_vnfds_from_id:
4027 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4028 db_vnfds_from_id[vnfd_id] = vnfd
4029 db_vnfds_from_member_index[
4030 vnfr["member-vnf-index-ref"]
4031 ] = db_vnfds_from_id[vnfd_id]
4032
4033 # Destroy individual execution environments when there are terminating primitives.
4034 # Rest of EE will be deleted at once
4035 # TODO - check before calling _destroy_N2VC
4036 # if not operation_params.get("skip_terminate_primitives"):#
4037 # or not vca.get("needed_terminate"):
4038 stage[0] = "Stage 2/3 execute terminating primitives."
4039 self.logger.debug(logging_text + stage[0])
4040 stage[1] = "Looking execution environment that needs terminate."
4041 self.logger.debug(logging_text + stage[1])
4042
4043 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4044 config_descriptor = None
4045 vca_member_vnf_index = vca.get("member-vnf-index")
4046 vca_id = self.get_vca_id(
4047 db_vnfrs_dict.get(vca_member_vnf_index)
4048 if vca_member_vnf_index
4049 else None,
4050 db_nsr,
4051 )
4052 if not vca or not vca.get("ee_id"):
4053 continue
4054 if not vca.get("member-vnf-index"):
4055 # ns
4056 config_descriptor = db_nsr.get("ns-configuration")
4057 elif vca.get("vdu_id"):
4058 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4059 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4060 elif vca.get("kdu_name"):
4061 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4062 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4063 else:
4064 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4065 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4066 vca_type = vca.get("type")
4067 exec_terminate_primitives = not operation_params.get(
4068 "skip_terminate_primitives"
4069 ) and vca.get("needed_terminate")
4070 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4071 # pending native charms
4072 destroy_ee = (
4073 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4074 )
4075 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4076 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4077 task = asyncio.ensure_future(
4078 self.destroy_N2VC(
4079 logging_text,
4080 db_nslcmop,
4081 vca,
4082 config_descriptor,
4083 vca_index,
4084 destroy_ee,
4085 exec_terminate_primitives,
4086 vca_id=vca_id,
4087 )
4088 )
4089 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4090
4091 # wait for pending tasks of terminate primitives
4092 if tasks_dict_info:
4093 self.logger.debug(
4094 logging_text
4095 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4096 )
4097 error_list = await self._wait_for_tasks(
4098 logging_text,
4099 tasks_dict_info,
4100 min(self.timeout_charm_delete, timeout_ns_terminate),
4101 stage,
4102 nslcmop_id,
4103 )
4104 tasks_dict_info.clear()
4105 if error_list:
4106 return # raise LcmException("; ".join(error_list))
4107
4108 # remove All execution environments at once
4109 stage[0] = "Stage 3/3 delete all."
4110
4111 if nsr_deployed.get("VCA"):
4112 stage[1] = "Deleting all execution environments."
4113 self.logger.debug(logging_text + stage[1])
4114 vca_id = self.get_vca_id({}, db_nsr)
4115 task_delete_ee = asyncio.ensure_future(
4116 asyncio.wait_for(
4117 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4118 timeout=self.timeout_charm_delete,
4119 )
4120 )
4121 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4122 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4123
4124 # Delete from k8scluster
4125 stage[1] = "Deleting KDUs."
4126 self.logger.debug(logging_text + stage[1])
4127 # print(nsr_deployed)
4128 for kdu in get_iterable(nsr_deployed, "K8s"):
4129 if not kdu or not kdu.get("kdu-instance"):
4130 continue
4131 kdu_instance = kdu.get("kdu-instance")
4132 if kdu.get("k8scluster-type") in self.k8scluster_map:
4133 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4134 vca_id = self.get_vca_id({}, db_nsr)
4135 task_delete_kdu_instance = asyncio.ensure_future(
4136 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4137 cluster_uuid=kdu.get("k8scluster-uuid"),
4138 kdu_instance=kdu_instance,
4139 vca_id=vca_id,
4140 )
4141 )
4142 else:
4143 self.logger.error(
4144 logging_text
4145 + "Unknown k8s deployment type {}".format(
4146 kdu.get("k8scluster-type")
4147 )
4148 )
4149 continue
4150 tasks_dict_info[
4151 task_delete_kdu_instance
4152 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4153
4154 # remove from RO
4155 stage[1] = "Deleting ns from VIM."
4156 if self.ng_ro:
4157 task_delete_ro = asyncio.ensure_future(
4158 self._terminate_ng_ro(
4159 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4160 )
4161 )
4162 else:
4163 task_delete_ro = asyncio.ensure_future(
4164 self._terminate_RO(
4165 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4166 )
4167 )
4168 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4169
4170 # rest of staff will be done at finally
4171
4172 except (
4173 ROclient.ROClientException,
4174 DbException,
4175 LcmException,
4176 N2VCException,
4177 ) as e:
4178 self.logger.error(logging_text + "Exit Exception {}".format(e))
4179 exc = e
4180 except asyncio.CancelledError:
4181 self.logger.error(
4182 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4183 )
4184 exc = "Operation was cancelled"
4185 except Exception as e:
4186 exc = traceback.format_exc()
4187 self.logger.critical(
4188 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4189 exc_info=True,
4190 )
4191 finally:
4192 if exc:
4193 error_list.append(str(exc))
4194 try:
4195 # wait for pending tasks
4196 if tasks_dict_info:
4197 stage[1] = "Waiting for terminate pending tasks."
4198 self.logger.debug(logging_text + stage[1])
4199 error_list += await self._wait_for_tasks(
4200 logging_text,
4201 tasks_dict_info,
4202 timeout_ns_terminate,
4203 stage,
4204 nslcmop_id,
4205 )
4206 stage[1] = stage[2] = ""
4207 except asyncio.CancelledError:
4208 error_list.append("Cancelled")
4209 # TODO cancell all tasks
4210 except Exception as exc:
4211 error_list.append(str(exc))
4212 # update status at database
4213 if error_list:
4214 error_detail = "; ".join(error_list)
4215 # self.logger.error(logging_text + error_detail)
4216 error_description_nslcmop = "{} Detail: {}".format(
4217 stage[0], error_detail
4218 )
4219 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4220 nslcmop_id, stage[0]
4221 )
4222
4223 db_nsr_update["operational-status"] = "failed"
4224 db_nsr_update["detailed-status"] = (
4225 error_description_nsr + " Detail: " + error_detail
4226 )
4227 db_nslcmop_update["detailed-status"] = error_detail
4228 nslcmop_operation_state = "FAILED"
4229 ns_state = "BROKEN"
4230 else:
4231 error_detail = None
4232 error_description_nsr = error_description_nslcmop = None
4233 ns_state = "NOT_INSTANTIATED"
4234 db_nsr_update["operational-status"] = "terminated"
4235 db_nsr_update["detailed-status"] = "Done"
4236 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4237 db_nslcmop_update["detailed-status"] = "Done"
4238 nslcmop_operation_state = "COMPLETED"
4239
4240 if db_nsr:
4241 self._write_ns_status(
4242 nsr_id=nsr_id,
4243 ns_state=ns_state,
4244 current_operation="IDLE",
4245 current_operation_id=None,
4246 error_description=error_description_nsr,
4247 error_detail=error_detail,
4248 other_update=db_nsr_update,
4249 )
4250 self._write_op_status(
4251 op_id=nslcmop_id,
4252 stage="",
4253 error_message=error_description_nslcmop,
4254 operation_state=nslcmop_operation_state,
4255 other_update=db_nslcmop_update,
4256 )
4257 if ns_state == "NOT_INSTANTIATED":
4258 try:
4259 self.db.set_list(
4260 "vnfrs",
4261 {"nsr-id-ref": nsr_id},
4262 {"_admin.nsState": "NOT_INSTANTIATED"},
4263 )
4264 except DbException as e:
4265 self.logger.warn(
4266 logging_text
4267 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4268 nsr_id, e
4269 )
4270 )
4271 if operation_params:
4272 autoremove = operation_params.get("autoremove", False)
4273 if nslcmop_operation_state:
4274 try:
4275 await self.msg.aiowrite(
4276 "ns",
4277 "terminated",
4278 {
4279 "nsr_id": nsr_id,
4280 "nslcmop_id": nslcmop_id,
4281 "operationState": nslcmop_operation_state,
4282 "autoremove": autoremove,
4283 },
4284 loop=self.loop,
4285 )
4286 except Exception as e:
4287 self.logger.error(
4288 logging_text + "kafka_write notification Exception {}".format(e)
4289 )
4290
4291 self.logger.debug(logging_text + "Exit")
4292 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4293
4294 async def _wait_for_tasks(
4295 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4296 ):
4297 time_start = time()
4298 error_detail_list = []
4299 error_list = []
4300 pending_tasks = list(created_tasks_info.keys())
4301 num_tasks = len(pending_tasks)
4302 num_done = 0
4303 stage[1] = "{}/{}.".format(num_done, num_tasks)
4304 self._write_op_status(nslcmop_id, stage)
4305 while pending_tasks:
4306 new_error = None
4307 _timeout = timeout + time_start - time()
4308 done, pending_tasks = await asyncio.wait(
4309 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4310 )
4311 num_done += len(done)
4312 if not done: # Timeout
4313 for task in pending_tasks:
4314 new_error = created_tasks_info[task] + ": Timeout"
4315 error_detail_list.append(new_error)
4316 error_list.append(new_error)
4317 break
4318 for task in done:
4319 if task.cancelled():
4320 exc = "Cancelled"
4321 else:
4322 exc = task.exception()
4323 if exc:
4324 if isinstance(exc, asyncio.TimeoutError):
4325 exc = "Timeout"
4326 new_error = created_tasks_info[task] + ": {}".format(exc)
4327 error_list.append(created_tasks_info[task])
4328 error_detail_list.append(new_error)
4329 if isinstance(
4330 exc,
4331 (
4332 str,
4333 DbException,
4334 N2VCException,
4335 ROclient.ROClientException,
4336 LcmException,
4337 K8sException,
4338 NgRoException,
4339 ),
4340 ):
4341 self.logger.error(logging_text + new_error)
4342 else:
4343 exc_traceback = "".join(
4344 traceback.format_exception(None, exc, exc.__traceback__)
4345 )
4346 self.logger.error(
4347 logging_text
4348 + created_tasks_info[task]
4349 + " "
4350 + exc_traceback
4351 )
4352 else:
4353 self.logger.debug(
4354 logging_text + created_tasks_info[task] + ": Done"
4355 )
4356 stage[1] = "{}/{}.".format(num_done, num_tasks)
4357 if new_error:
4358 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4359 if nsr_id: # update also nsr
4360 self.update_db_2(
4361 "nsrs",
4362 nsr_id,
4363 {
4364 "errorDescription": "Error at: " + ", ".join(error_list),
4365 "errorDetail": ". ".join(error_detail_list),
4366 },
4367 )
4368 self._write_op_status(nslcmop_id, stage)
4369 return error_detail_list
4370
4371 @staticmethod
4372 def _map_primitive_params(primitive_desc, params, instantiation_params):
4373 """
4374 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4375 The default-value is used. If it is between < > it look for a value at instantiation_params
4376 :param primitive_desc: portion of VNFD/NSD that describes primitive
4377 :param params: Params provided by user
4378 :param instantiation_params: Instantiation params provided by user
4379 :return: a dictionary with the calculated params
4380 """
4381 calculated_params = {}
4382 for parameter in primitive_desc.get("parameter", ()):
4383 param_name = parameter["name"]
4384 if param_name in params:
4385 calculated_params[param_name] = params[param_name]
4386 elif "default-value" in parameter or "value" in parameter:
4387 if "value" in parameter:
4388 calculated_params[param_name] = parameter["value"]
4389 else:
4390 calculated_params[param_name] = parameter["default-value"]
4391 if (
4392 isinstance(calculated_params[param_name], str)
4393 and calculated_params[param_name].startswith("<")
4394 and calculated_params[param_name].endswith(">")
4395 ):
4396 if calculated_params[param_name][1:-1] in instantiation_params:
4397 calculated_params[param_name] = instantiation_params[
4398 calculated_params[param_name][1:-1]
4399 ]
4400 else:
4401 raise LcmException(
4402 "Parameter {} needed to execute primitive {} not provided".format(
4403 calculated_params[param_name], primitive_desc["name"]
4404 )
4405 )
4406 else:
4407 raise LcmException(
4408 "Parameter {} needed to execute primitive {} not provided".format(
4409 param_name, primitive_desc["name"]
4410 )
4411 )
4412
4413 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4414 calculated_params[param_name] = yaml.safe_dump(
4415 calculated_params[param_name], default_flow_style=True, width=256
4416 )
4417 elif isinstance(calculated_params[param_name], str) and calculated_params[
4418 param_name
4419 ].startswith("!!yaml "):
4420 calculated_params[param_name] = calculated_params[param_name][7:]
4421 if parameter.get("data-type") == "INTEGER":
4422 try:
4423 calculated_params[param_name] = int(calculated_params[param_name])
4424 except ValueError: # error converting string to int
4425 raise LcmException(
4426 "Parameter {} of primitive {} must be integer".format(
4427 param_name, primitive_desc["name"]
4428 )
4429 )
4430 elif parameter.get("data-type") == "BOOLEAN":
4431 calculated_params[param_name] = not (
4432 (str(calculated_params[param_name])).lower() == "false"
4433 )
4434
4435 # add always ns_config_info if primitive name is config
4436 if primitive_desc["name"] == "config":
4437 if "ns_config_info" in instantiation_params:
4438 calculated_params["ns_config_info"] = instantiation_params[
4439 "ns_config_info"
4440 ]
4441 return calculated_params
4442
4443 def _look_for_deployed_vca(
4444 self,
4445 deployed_vca,
4446 member_vnf_index,
4447 vdu_id,
4448 vdu_count_index,
4449 kdu_name=None,
4450 ee_descriptor_id=None,
4451 ):
4452 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4453 for vca in deployed_vca:
4454 if not vca:
4455 continue
4456 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4457 continue
4458 if (
4459 vdu_count_index is not None
4460 and vdu_count_index != vca["vdu_count_index"]
4461 ):
4462 continue
4463 if kdu_name and kdu_name != vca["kdu_name"]:
4464 continue
4465 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4466 continue
4467 break
4468 else:
4469 # vca_deployed not found
4470 raise LcmException(
4471 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4472 " is not deployed".format(
4473 member_vnf_index,
4474 vdu_id,
4475 vdu_count_index,
4476 kdu_name,
4477 ee_descriptor_id,
4478 )
4479 )
4480 # get ee_id
4481 ee_id = vca.get("ee_id")
4482 vca_type = vca.get(
4483 "type", "lxc_proxy_charm"
4484 ) # default value for backward compatibility - proxy charm
4485 if not ee_id:
4486 raise LcmException(
4487 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4488 "execution environment".format(
4489 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4490 )
4491 )
4492 return ee_id, vca_type
4493
4494 async def _ns_execute_primitive(
4495 self,
4496 ee_id,
4497 primitive,
4498 primitive_params,
4499 retries=0,
4500 retries_interval=30,
4501 timeout=None,
4502 vca_type=None,
4503 db_dict=None,
4504 vca_id: str = None,
4505 ) -> (str, str):
4506 try:
4507 if primitive == "config":
4508 primitive_params = {"params": primitive_params}
4509
4510 vca_type = vca_type or "lxc_proxy_charm"
4511
4512 while retries >= 0:
4513 try:
4514 output = await asyncio.wait_for(
4515 self.vca_map[vca_type].exec_primitive(
4516 ee_id=ee_id,
4517 primitive_name=primitive,
4518 params_dict=primitive_params,
4519 progress_timeout=self.timeout_progress_primitive,
4520 total_timeout=self.timeout_primitive,
4521 db_dict=db_dict,
4522 vca_id=vca_id,
4523 ),
4524 timeout=timeout or self.timeout_primitive,
4525 )
4526 # execution was OK
4527 break
4528 except asyncio.CancelledError:
4529 raise
4530 except Exception as e: # asyncio.TimeoutError
4531 if isinstance(e, asyncio.TimeoutError):
4532 e = "Timeout"
4533 retries -= 1
4534 if retries >= 0:
4535 self.logger.debug(
4536 "Error executing action {} on {} -> {}".format(
4537 primitive, ee_id, e
4538 )
4539 )
4540 # wait and retry
4541 await asyncio.sleep(retries_interval, loop=self.loop)
4542 else:
4543 return "FAILED", str(e)
4544
4545 return "COMPLETED", output
4546
4547 except (LcmException, asyncio.CancelledError):
4548 raise
4549 except Exception as e:
4550 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4551
4552 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4553 """
4554 Updating the vca_status with latest juju information in nsrs record
4555 :param: nsr_id: Id of the nsr
4556 :param: nslcmop_id: Id of the nslcmop
4557 :return: None
4558 """
4559
4560 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4561 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4562 vca_id = self.get_vca_id({}, db_nsr)
4563 if db_nsr["_admin"]["deployed"]["K8s"]:
4564 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4565 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4566 await self._on_update_k8s_db(
4567 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4568 )
4569 else:
4570 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4571 table, filter = "nsrs", {"_id": nsr_id}
4572 path = "_admin.deployed.VCA.{}.".format(vca_index)
4573 await self._on_update_n2vc_db(table, filter, path, {})
4574
4575 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4576 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4577
4578 async def action(self, nsr_id, nslcmop_id):
4579 # Try to lock HA task here
4580 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4581 if not task_is_locked_by_me:
4582 return
4583
4584 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4585 self.logger.debug(logging_text + "Enter")
4586 # get all needed from database
4587 db_nsr = None
4588 db_nslcmop = None
4589 db_nsr_update = {}
4590 db_nslcmop_update = {}
4591 nslcmop_operation_state = None
4592 error_description_nslcmop = None
4593 exc = None
4594 try:
4595 # wait for any previous tasks in process
4596 step = "Waiting for previous operations to terminate"
4597 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4598
4599 self._write_ns_status(
4600 nsr_id=nsr_id,
4601 ns_state=None,
4602 current_operation="RUNNING ACTION",
4603 current_operation_id=nslcmop_id,
4604 )
4605
4606 step = "Getting information from database"
4607 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4608 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4609
4610 nsr_deployed = db_nsr["_admin"].get("deployed")
4611 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4612 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4613 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4614 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4615 primitive = db_nslcmop["operationParams"]["primitive"]
4616 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4617 timeout_ns_action = db_nslcmop["operationParams"].get(
4618 "timeout_ns_action", self.timeout_primitive
4619 )
4620
4621 if vnf_index:
4622 step = "Getting vnfr from database"
4623 db_vnfr = self.db.get_one(
4624 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4625 )
4626 step = "Getting vnfd from database"
4627 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4628 else:
4629 step = "Getting nsd from database"
4630 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4631
4632 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4633 # for backward compatibility
4634 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4635 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4636 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4637 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4638
4639 # look for primitive
4640 config_primitive_desc = descriptor_configuration = None
4641 if vdu_id:
4642 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4643 elif kdu_name:
4644 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4645 elif vnf_index:
4646 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4647 else:
4648 descriptor_configuration = db_nsd.get("ns-configuration")
4649
4650 if descriptor_configuration and descriptor_configuration.get(
4651 "config-primitive"
4652 ):
4653 for config_primitive in descriptor_configuration["config-primitive"]:
4654 if config_primitive["name"] == primitive:
4655 config_primitive_desc = config_primitive
4656 break
4657
4658 if not config_primitive_desc:
4659 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4660 raise LcmException(
4661 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4662 primitive
4663 )
4664 )
4665 primitive_name = primitive
4666 ee_descriptor_id = None
4667 else:
4668 primitive_name = config_primitive_desc.get(
4669 "execution-environment-primitive", primitive
4670 )
4671 ee_descriptor_id = config_primitive_desc.get(
4672 "execution-environment-ref"
4673 )
4674
4675 if vnf_index:
4676 if vdu_id:
4677 vdur = next(
4678 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4679 )
4680 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4681 elif kdu_name:
4682 kdur = next(
4683 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4684 )
4685 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4686 else:
4687 desc_params = parse_yaml_strings(
4688 db_vnfr.get("additionalParamsForVnf")
4689 )
4690 else:
4691 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4692 if kdu_name and get_configuration(db_vnfd, kdu_name):
4693 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4694 actions = set()
4695 for primitive in kdu_configuration.get("initial-config-primitive", []):
4696 actions.add(primitive["name"])
4697 for primitive in kdu_configuration.get("config-primitive", []):
4698 actions.add(primitive["name"])
4699 kdu_action = True if primitive_name in actions else False
4700
4701 # TODO check if ns is in a proper status
4702 if kdu_name and (
4703 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4704 ):
4705 # kdur and desc_params already set from before
4706 if primitive_params:
4707 desc_params.update(primitive_params)
4708 # TODO Check if we will need something at vnf level
4709 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4710 if (
4711 kdu_name == kdu["kdu-name"]
4712 and kdu["member-vnf-index"] == vnf_index
4713 ):
4714 break
4715 else:
4716 raise LcmException(
4717 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4718 )
4719
4720 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4721 msg = "unknown k8scluster-type '{}'".format(
4722 kdu.get("k8scluster-type")
4723 )
4724 raise LcmException(msg)
4725
4726 db_dict = {
4727 "collection": "nsrs",
4728 "filter": {"_id": nsr_id},
4729 "path": "_admin.deployed.K8s.{}".format(index),
4730 }
4731 self.logger.debug(
4732 logging_text
4733 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4734 )
4735 step = "Executing kdu {}".format(primitive_name)
4736 if primitive_name == "upgrade":
4737 if desc_params.get("kdu_model"):
4738 kdu_model = desc_params.get("kdu_model")
4739 del desc_params["kdu_model"]
4740 else:
4741 kdu_model = kdu.get("kdu-model")
4742 parts = kdu_model.split(sep=":")
4743 if len(parts) == 2:
4744 kdu_model = parts[0]
4745
4746 detailed_status = await asyncio.wait_for(
4747 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4748 cluster_uuid=kdu.get("k8scluster-uuid"),
4749 kdu_instance=kdu.get("kdu-instance"),
4750 atomic=True,
4751 kdu_model=kdu_model,
4752 params=desc_params,
4753 db_dict=db_dict,
4754 timeout=timeout_ns_action,
4755 ),
4756 timeout=timeout_ns_action + 10,
4757 )
4758 self.logger.debug(
4759 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4760 )
4761 elif primitive_name == "rollback":
4762 detailed_status = await asyncio.wait_for(
4763 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4764 cluster_uuid=kdu.get("k8scluster-uuid"),
4765 kdu_instance=kdu.get("kdu-instance"),
4766 db_dict=db_dict,
4767 ),
4768 timeout=timeout_ns_action,
4769 )
4770 elif primitive_name == "status":
4771 detailed_status = await asyncio.wait_for(
4772 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4773 cluster_uuid=kdu.get("k8scluster-uuid"),
4774 kdu_instance=kdu.get("kdu-instance"),
4775 vca_id=vca_id,
4776 ),
4777 timeout=timeout_ns_action,
4778 )
4779 else:
4780 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4781 kdu["kdu-name"], nsr_id
4782 )
4783 params = self._map_primitive_params(
4784 config_primitive_desc, primitive_params, desc_params
4785 )
4786
4787 detailed_status = await asyncio.wait_for(
4788 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4789 cluster_uuid=kdu.get("k8scluster-uuid"),
4790 kdu_instance=kdu_instance,
4791 primitive_name=primitive_name,
4792 params=params,
4793 db_dict=db_dict,
4794 timeout=timeout_ns_action,
4795 vca_id=vca_id,
4796 ),
4797 timeout=timeout_ns_action,
4798 )
4799
4800 if detailed_status:
4801 nslcmop_operation_state = "COMPLETED"
4802 else:
4803 detailed_status = ""
4804 nslcmop_operation_state = "FAILED"
4805 else:
4806 ee_id, vca_type = self._look_for_deployed_vca(
4807 nsr_deployed["VCA"],
4808 member_vnf_index=vnf_index,
4809 vdu_id=vdu_id,
4810 vdu_count_index=vdu_count_index,
4811 ee_descriptor_id=ee_descriptor_id,
4812 )
4813 for vca_index, vca_deployed in enumerate(
4814 db_nsr["_admin"]["deployed"]["VCA"]
4815 ):
4816 if vca_deployed.get("member-vnf-index") == vnf_index:
4817 db_dict = {
4818 "collection": "nsrs",
4819 "filter": {"_id": nsr_id},
4820 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4821 }
4822 break
4823 (
4824 nslcmop_operation_state,
4825 detailed_status,
4826 ) = await self._ns_execute_primitive(
4827 ee_id,
4828 primitive=primitive_name,
4829 primitive_params=self._map_primitive_params(
4830 config_primitive_desc, primitive_params, desc_params
4831 ),
4832 timeout=timeout_ns_action,
4833 vca_type=vca_type,
4834 db_dict=db_dict,
4835 vca_id=vca_id,
4836 )
4837
4838 db_nslcmop_update["detailed-status"] = detailed_status
4839 error_description_nslcmop = (
4840 detailed_status if nslcmop_operation_state == "FAILED" else ""
4841 )
4842 self.logger.debug(
4843 logging_text
4844 + " task Done with result {} {}".format(
4845 nslcmop_operation_state, detailed_status
4846 )
4847 )
4848 return # database update is called inside finally
4849
4850 except (DbException, LcmException, N2VCException, K8sException) as e:
4851 self.logger.error(logging_text + "Exit Exception {}".format(e))
4852 exc = e
4853 except asyncio.CancelledError:
4854 self.logger.error(
4855 logging_text + "Cancelled Exception while '{}'".format(step)
4856 )
4857 exc = "Operation was cancelled"
4858 except asyncio.TimeoutError:
4859 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4860 exc = "Timeout"
4861 except Exception as e:
4862 exc = traceback.format_exc()
4863 self.logger.critical(
4864 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4865 exc_info=True,
4866 )
4867 finally:
4868 if exc:
4869 db_nslcmop_update[
4870 "detailed-status"
4871 ] = (
4872 detailed_status
4873 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4874 nslcmop_operation_state = "FAILED"
4875 if db_nsr:
4876 self._write_ns_status(
4877 nsr_id=nsr_id,
4878 ns_state=db_nsr[
4879 "nsState"
4880 ], # TODO check if degraded. For the moment use previous status
4881 current_operation="IDLE",
4882 current_operation_id=None,
4883 # error_description=error_description_nsr,
4884 # error_detail=error_detail,
4885 other_update=db_nsr_update,
4886 )
4887
4888 self._write_op_status(
4889 op_id=nslcmop_id,
4890 stage="",
4891 error_message=error_description_nslcmop,
4892 operation_state=nslcmop_operation_state,
4893 other_update=db_nslcmop_update,
4894 )
4895
4896 if nslcmop_operation_state:
4897 try:
4898 await self.msg.aiowrite(
4899 "ns",
4900 "actioned",
4901 {
4902 "nsr_id": nsr_id,
4903 "nslcmop_id": nslcmop_id,
4904 "operationState": nslcmop_operation_state,
4905 },
4906 loop=self.loop,
4907 )
4908 except Exception as e:
4909 self.logger.error(
4910 logging_text + "kafka_write notification Exception {}".format(e)
4911 )
4912 self.logger.debug(logging_text + "Exit")
4913 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4914 return nslcmop_operation_state, detailed_status
4915
4916 async def scale(self, nsr_id, nslcmop_id):
4917 # Try to lock HA task here
4918 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4919 if not task_is_locked_by_me:
4920 return
4921
4922 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4923 stage = ["", "", ""]
4924 tasks_dict_info = {}
4925 # ^ stage, step, VIM progress
4926 self.logger.debug(logging_text + "Enter")
4927 # get all needed from database
4928 db_nsr = None
4929 db_nslcmop_update = {}
4930 db_nsr_update = {}
4931 exc = None
4932 # in case of error, indicates what part of scale was failed to put nsr at error status
4933 scale_process = None
4934 old_operational_status = ""
4935 old_config_status = ""
4936 nsi_id = None
4937 try:
4938 # wait for any previous tasks in process
4939 step = "Waiting for previous operations to terminate"
4940 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4941 self._write_ns_status(
4942 nsr_id=nsr_id,
4943 ns_state=None,
4944 current_operation="SCALING",
4945 current_operation_id=nslcmop_id,
4946 )
4947
4948 step = "Getting nslcmop from database"
4949 self.logger.debug(
4950 step + " after having waited for previous tasks to be completed"
4951 )
4952 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4953
4954 step = "Getting nsr from database"
4955 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4956 old_operational_status = db_nsr["operational-status"]
4957 old_config_status = db_nsr["config-status"]
4958
4959 step = "Parsing scaling parameters"
4960 db_nsr_update["operational-status"] = "scaling"
4961 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4962 nsr_deployed = db_nsr["_admin"].get("deployed")
4963
4964 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4965 "scaleByStepData"
4966 ]["member-vnf-index"]
4967 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4968 "scaleByStepData"
4969 ]["scaling-group-descriptor"]
4970 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4971 # for backward compatibility
4972 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4973 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4974 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4975 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4976
4977 step = "Getting vnfr from database"
4978 db_vnfr = self.db.get_one(
4979 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4980 )
4981
4982 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4983
4984 step = "Getting vnfd from database"
4985 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4986
4987 base_folder = db_vnfd["_admin"]["storage"]
4988
4989 step = "Getting scaling-group-descriptor"
4990 scaling_descriptor = find_in_list(
4991 get_scaling_aspect(db_vnfd),
4992 lambda scale_desc: scale_desc["name"] == scaling_group,
4993 )
4994 if not scaling_descriptor:
4995 raise LcmException(
4996 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4997 "at vnfd:scaling-group-descriptor".format(scaling_group)
4998 )
4999
5000 step = "Sending scale order to VIM"
5001 # TODO check if ns is in a proper status
5002 nb_scale_op = 0
5003 if not db_nsr["_admin"].get("scaling-group"):
5004 self.update_db_2(
5005 "nsrs",
5006 nsr_id,
5007 {
5008 "_admin.scaling-group": [
5009 {"name": scaling_group, "nb-scale-op": 0}
5010 ]
5011 },
5012 )
5013 admin_scale_index = 0
5014 else:
5015 for admin_scale_index, admin_scale_info in enumerate(
5016 db_nsr["_admin"]["scaling-group"]
5017 ):
5018 if admin_scale_info["name"] == scaling_group:
5019 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5020 break
5021 else: # not found, set index one plus last element and add new entry with the name
5022 admin_scale_index += 1
5023 db_nsr_update[
5024 "_admin.scaling-group.{}.name".format(admin_scale_index)
5025 ] = scaling_group
5026
5027 vca_scaling_info = []
5028 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5029 if scaling_type == "SCALE_OUT":
5030 if "aspect-delta-details" not in scaling_descriptor:
5031 raise LcmException(
5032 "Aspect delta details not fount in scaling descriptor {}".format(
5033 scaling_descriptor["name"]
5034 )
5035 )
5036 # count if max-instance-count is reached
5037 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5038
5039 scaling_info["scaling_direction"] = "OUT"
5040 scaling_info["vdu-create"] = {}
5041 scaling_info["kdu-create"] = {}
5042 for delta in deltas:
5043 for vdu_delta in delta.get("vdu-delta", {}):
5044 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5045 # vdu_index also provides the number of instance of the targeted vdu
5046 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5047 cloud_init_text = self._get_vdu_cloud_init_content(
5048 vdud, db_vnfd
5049 )
5050 if cloud_init_text:
5051 additional_params = (
5052 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5053 or {}
5054 )
5055 cloud_init_list = []
5056
5057 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5058 max_instance_count = 10
5059 if vdu_profile and "max-number-of-instances" in vdu_profile:
5060 max_instance_count = vdu_profile.get(
5061 "max-number-of-instances", 10
5062 )
5063
5064 default_instance_num = get_number_of_instances(
5065 db_vnfd, vdud["id"]
5066 )
5067 instances_number = vdu_delta.get("number-of-instances", 1)
5068 nb_scale_op += instances_number
5069
5070 new_instance_count = nb_scale_op + default_instance_num
5071 # Control if new count is over max and vdu count is less than max.
5072 # Then assign new instance count
5073 if new_instance_count > max_instance_count > vdu_count:
5074 instances_number = new_instance_count - max_instance_count
5075 else:
5076 instances_number = instances_number
5077
5078 if new_instance_count > max_instance_count:
5079 raise LcmException(
5080 "reached the limit of {} (max-instance-count) "
5081 "scaling-out operations for the "
5082 "scaling-group-descriptor '{}'".format(
5083 nb_scale_op, scaling_group
5084 )
5085 )
5086 for x in range(vdu_delta.get("number-of-instances", 1)):
5087 if cloud_init_text:
5088 # TODO Information of its own ip is not available because db_vnfr is not updated.
5089 additional_params["OSM"] = get_osm_params(
5090 db_vnfr, vdu_delta["id"], vdu_index + x
5091 )
5092 cloud_init_list.append(
5093 self._parse_cloud_init(
5094 cloud_init_text,
5095 additional_params,
5096 db_vnfd["id"],
5097 vdud["id"],
5098 )
5099 )
5100 vca_scaling_info.append(
5101 {
5102 "osm_vdu_id": vdu_delta["id"],
5103 "member-vnf-index": vnf_index,
5104 "type": "create",
5105 "vdu_index": vdu_index + x,
5106 }
5107 )
5108 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5109 for kdu_delta in delta.get("kdu-resource-delta", {}):
5110 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5111 kdu_name = kdu_profile["kdu-name"]
5112 resource_name = kdu_profile["resource-name"]
5113
5114 # Might have different kdus in the same delta
5115 # Should have list for each kdu
5116 if not scaling_info["kdu-create"].get(kdu_name, None):
5117 scaling_info["kdu-create"][kdu_name] = []
5118
5119 kdur = get_kdur(db_vnfr, kdu_name)
5120 if kdur.get("helm-chart"):
5121 k8s_cluster_type = "helm-chart-v3"
5122 self.logger.debug("kdur: {}".format(kdur))
5123 if (
5124 kdur.get("helm-version")
5125 and kdur.get("helm-version") == "v2"
5126 ):
5127 k8s_cluster_type = "helm-chart"
5128 raise NotImplementedError
5129 elif kdur.get("juju-bundle"):
5130 k8s_cluster_type = "juju-bundle"
5131 else:
5132 raise LcmException(
5133 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5134 "juju-bundle. Maybe an old NBI version is running".format(
5135 db_vnfr["member-vnf-index-ref"], kdu_name
5136 )
5137 )
5138
5139 max_instance_count = 10
5140 if kdu_profile and "max-number-of-instances" in kdu_profile:
5141 max_instance_count = kdu_profile.get(
5142 "max-number-of-instances", 10
5143 )
5144
5145 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5146 deployed_kdu, _ = get_deployed_kdu(
5147 nsr_deployed, kdu_name, vnf_index
5148 )
5149 if deployed_kdu is None:
5150 raise LcmException(
5151 "KDU '{}' for vnf '{}' not deployed".format(
5152 kdu_name, vnf_index
5153 )
5154 )
5155 kdu_instance = deployed_kdu.get("kdu-instance")
5156 instance_num = await self.k8scluster_map[
5157 k8s_cluster_type
5158 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5159 kdu_replica_count = instance_num + kdu_delta.get(
5160 "number-of-instances", 1
5161 )
5162
5163 # Control if new count is over max and instance_num is less than max.
5164 # Then assign max instance number to kdu replica count
5165 if kdu_replica_count > max_instance_count > instance_num:
5166 kdu_replica_count = max_instance_count
5167 if kdu_replica_count > max_instance_count:
5168 raise LcmException(
5169 "reached the limit of {} (max-instance-count) "
5170 "scaling-out operations for the "
5171 "scaling-group-descriptor '{}'".format(
5172 instance_num, scaling_group
5173 )
5174 )
5175
5176 for x in range(kdu_delta.get("number-of-instances", 1)):
5177 vca_scaling_info.append(
5178 {
5179 "osm_kdu_id": kdu_name,
5180 "member-vnf-index": vnf_index,
5181 "type": "create",
5182 "kdu_index": instance_num + x - 1,
5183 }
5184 )
5185 scaling_info["kdu-create"][kdu_name].append(
5186 {
5187 "member-vnf-index": vnf_index,
5188 "type": "create",
5189 "k8s-cluster-type": k8s_cluster_type,
5190 "resource-name": resource_name,
5191 "scale": kdu_replica_count,
5192 }
5193 )
5194 elif scaling_type == "SCALE_IN":
5195 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5196
5197 scaling_info["scaling_direction"] = "IN"
5198 scaling_info["vdu-delete"] = {}
5199 scaling_info["kdu-delete"] = {}
5200
5201 for delta in deltas:
5202 for vdu_delta in delta.get("vdu-delta", {}):
5203 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5204 min_instance_count = 0
5205 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5206 if vdu_profile and "min-number-of-instances" in vdu_profile:
5207 min_instance_count = vdu_profile["min-number-of-instances"]
5208
5209 default_instance_num = get_number_of_instances(
5210 db_vnfd, vdu_delta["id"]
5211 )
5212 instance_num = vdu_delta.get("number-of-instances", 1)
5213 nb_scale_op -= instance_num
5214
5215 new_instance_count = nb_scale_op + default_instance_num
5216
5217 if new_instance_count < min_instance_count < vdu_count:
5218 instances_number = min_instance_count - new_instance_count
5219 else:
5220 instances_number = instance_num
5221
5222 if new_instance_count < min_instance_count:
5223 raise LcmException(
5224 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5225 "scaling-group-descriptor '{}'".format(
5226 nb_scale_op, scaling_group
5227 )
5228 )
5229 for x in range(vdu_delta.get("number-of-instances", 1)):
5230 vca_scaling_info.append(
5231 {
5232 "osm_vdu_id": vdu_delta["id"],
5233 "member-vnf-index": vnf_index,
5234 "type": "delete",
5235 "vdu_index": vdu_index - 1 - x,
5236 }
5237 )
5238 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5239 for kdu_delta in delta.get("kdu-resource-delta", {}):
5240 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5241 kdu_name = kdu_profile["kdu-name"]
5242 resource_name = kdu_profile["resource-name"]
5243
5244 if not scaling_info["kdu-delete"].get(kdu_name, None):
5245 scaling_info["kdu-delete"][kdu_name] = []
5246
5247 kdur = get_kdur(db_vnfr, kdu_name)
5248 if kdur.get("helm-chart"):
5249 k8s_cluster_type = "helm-chart-v3"
5250 self.logger.debug("kdur: {}".format(kdur))
5251 if (
5252 kdur.get("helm-version")
5253 and kdur.get("helm-version") == "v2"
5254 ):
5255 k8s_cluster_type = "helm-chart"
5256 raise NotImplementedError
5257 elif kdur.get("juju-bundle"):
5258 k8s_cluster_type = "juju-bundle"
5259 else:
5260 raise LcmException(
5261 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5262 "juju-bundle. Maybe an old NBI version is running".format(
5263 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5264 )
5265 )
5266
5267 min_instance_count = 0
5268 if kdu_profile and "min-number-of-instances" in kdu_profile:
5269 min_instance_count = kdu_profile["min-number-of-instances"]
5270
5271 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5272 deployed_kdu, _ = get_deployed_kdu(
5273 nsr_deployed, kdu_name, vnf_index
5274 )
5275 if deployed_kdu is None:
5276 raise LcmException(
5277 "KDU '{}' for vnf '{}' not deployed".format(
5278 kdu_name, vnf_index
5279 )
5280 )
5281 kdu_instance = deployed_kdu.get("kdu-instance")
5282 instance_num = await self.k8scluster_map[
5283 k8s_cluster_type
5284 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5285 kdu_replica_count = instance_num - kdu_delta.get(
5286 "number-of-instances", 1
5287 )
5288
5289 if kdu_replica_count < min_instance_count < instance_num:
5290 kdu_replica_count = min_instance_count
5291 if kdu_replica_count < min_instance_count:
5292 raise LcmException(
5293 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5294 "scaling-group-descriptor '{}'".format(
5295 instance_num, scaling_group
5296 )
5297 )
5298
5299 for x in range(kdu_delta.get("number-of-instances", 1)):
5300 vca_scaling_info.append(
5301 {
5302 "osm_kdu_id": kdu_name,
5303 "member-vnf-index": vnf_index,
5304 "type": "delete",
5305 "kdu_index": instance_num - x - 1,
5306 }
5307 )
5308 scaling_info["kdu-delete"][kdu_name].append(
5309 {
5310 "member-vnf-index": vnf_index,
5311 "type": "delete",
5312 "k8s-cluster-type": k8s_cluster_type,
5313 "resource-name": resource_name,
5314 "scale": kdu_replica_count,
5315 }
5316 )
5317
5318 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5319 vdu_delete = copy(scaling_info.get("vdu-delete"))
5320 if scaling_info["scaling_direction"] == "IN":
5321 for vdur in reversed(db_vnfr["vdur"]):
5322 if vdu_delete.get(vdur["vdu-id-ref"]):
5323 vdu_delete[vdur["vdu-id-ref"]] -= 1
5324 scaling_info["vdu"].append(
5325 {
5326 "name": vdur.get("name") or vdur.get("vdu-name"),
5327 "vdu_id": vdur["vdu-id-ref"],
5328 "interface": [],
5329 }
5330 )
5331 for interface in vdur["interfaces"]:
5332 scaling_info["vdu"][-1]["interface"].append(
5333 {
5334 "name": interface["name"],
5335 "ip_address": interface["ip-address"],
5336 "mac_address": interface.get("mac-address"),
5337 }
5338 )
5339 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5340
5341 # PRE-SCALE BEGIN
5342 step = "Executing pre-scale vnf-config-primitive"
5343 if scaling_descriptor.get("scaling-config-action"):
5344 for scaling_config_action in scaling_descriptor[
5345 "scaling-config-action"
5346 ]:
5347 if (
5348 scaling_config_action.get("trigger") == "pre-scale-in"
5349 and scaling_type == "SCALE_IN"
5350 ) or (
5351 scaling_config_action.get("trigger") == "pre-scale-out"
5352 and scaling_type == "SCALE_OUT"
5353 ):
5354 vnf_config_primitive = scaling_config_action[
5355 "vnf-config-primitive-name-ref"
5356 ]
5357 step = db_nslcmop_update[
5358 "detailed-status"
5359 ] = "executing pre-scale scaling-config-action '{}'".format(
5360 vnf_config_primitive
5361 )
5362
5363 # look for primitive
5364 for config_primitive in (
5365 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5366 ).get("config-primitive", ()):
5367 if config_primitive["name"] == vnf_config_primitive:
5368 break
5369 else:
5370 raise LcmException(
5371 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5372 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5373 "primitive".format(scaling_group, vnf_config_primitive)
5374 )
5375
5376 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5377 if db_vnfr.get("additionalParamsForVnf"):
5378 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5379
5380 scale_process = "VCA"
5381 db_nsr_update["config-status"] = "configuring pre-scaling"
5382 primitive_params = self._map_primitive_params(
5383 config_primitive, {}, vnfr_params
5384 )
5385
5386 # Pre-scale retry check: Check if this sub-operation has been executed before
5387 op_index = self._check_or_add_scale_suboperation(
5388 db_nslcmop,
5389 vnf_index,
5390 vnf_config_primitive,
5391 primitive_params,
5392 "PRE-SCALE",
5393 )
5394 if op_index == self.SUBOPERATION_STATUS_SKIP:
5395 # Skip sub-operation
5396 result = "COMPLETED"
5397 result_detail = "Done"
5398 self.logger.debug(
5399 logging_text
5400 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5401 vnf_config_primitive, result, result_detail
5402 )
5403 )
5404 else:
5405 if op_index == self.SUBOPERATION_STATUS_NEW:
5406 # New sub-operation: Get index of this sub-operation
5407 op_index = (
5408 len(db_nslcmop.get("_admin", {}).get("operations"))
5409 - 1
5410 )
5411 self.logger.debug(
5412 logging_text
5413 + "vnf_config_primitive={} New sub-operation".format(
5414 vnf_config_primitive
5415 )
5416 )
5417 else:
5418 # retry: Get registered params for this existing sub-operation
5419 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5420 op_index
5421 ]
5422 vnf_index = op.get("member_vnf_index")
5423 vnf_config_primitive = op.get("primitive")
5424 primitive_params = op.get("primitive_params")
5425 self.logger.debug(
5426 logging_text
5427 + "vnf_config_primitive={} Sub-operation retry".format(
5428 vnf_config_primitive
5429 )
5430 )
5431 # Execute the primitive, either with new (first-time) or registered (reintent) args
5432 ee_descriptor_id = config_primitive.get(
5433 "execution-environment-ref"
5434 )
5435 primitive_name = config_primitive.get(
5436 "execution-environment-primitive", vnf_config_primitive
5437 )
5438 ee_id, vca_type = self._look_for_deployed_vca(
5439 nsr_deployed["VCA"],
5440 member_vnf_index=vnf_index,
5441 vdu_id=None,
5442 vdu_count_index=None,
5443 ee_descriptor_id=ee_descriptor_id,
5444 )
5445 result, result_detail = await self._ns_execute_primitive(
5446 ee_id,
5447 primitive_name,
5448 primitive_params,
5449 vca_type=vca_type,
5450 vca_id=vca_id,
5451 )
5452 self.logger.debug(
5453 logging_text
5454 + "vnf_config_primitive={} Done with result {} {}".format(
5455 vnf_config_primitive, result, result_detail
5456 )
5457 )
5458 # Update operationState = COMPLETED | FAILED
5459 self._update_suboperation_status(
5460 db_nslcmop, op_index, result, result_detail
5461 )
5462
5463 if result == "FAILED":
5464 raise LcmException(result_detail)
5465 db_nsr_update["config-status"] = old_config_status
5466 scale_process = None
5467 # PRE-SCALE END
5468
5469 db_nsr_update[
5470 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5471 ] = nb_scale_op
5472 db_nsr_update[
5473 "_admin.scaling-group.{}.time".format(admin_scale_index)
5474 ] = time()
5475
5476 # SCALE-IN VCA - BEGIN
5477 if vca_scaling_info:
5478 step = db_nslcmop_update[
5479 "detailed-status"
5480 ] = "Deleting the execution environments"
5481 scale_process = "VCA"
5482 for vca_info in vca_scaling_info:
5483 if vca_info["type"] == "delete":
5484 member_vnf_index = str(vca_info["member-vnf-index"])
5485 self.logger.debug(
5486 logging_text + "vdu info: {}".format(vca_info)
5487 )
5488 if vca_info.get("osm_vdu_id"):
5489 vdu_id = vca_info["osm_vdu_id"]
5490 vdu_index = int(vca_info["vdu_index"])
5491 stage[
5492 1
5493 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5494 member_vnf_index, vdu_id, vdu_index
5495 )
5496 else:
5497 vdu_index = 0
5498 kdu_id = vca_info["osm_kdu_id"]
5499 stage[
5500 1
5501 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5502 member_vnf_index, kdu_id, vdu_index
5503 )
5504 stage[2] = step = "Scaling in VCA"
5505 self._write_op_status(op_id=nslcmop_id, stage=stage)
5506 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5507 config_update = db_nsr["configurationStatus"]
5508 for vca_index, vca in enumerate(vca_update):
5509 if (
5510 (vca or vca.get("ee_id"))
5511 and vca["member-vnf-index"] == member_vnf_index
5512 and vca["vdu_count_index"] == vdu_index
5513 ):
5514 if vca.get("vdu_id"):
5515 config_descriptor = get_configuration(
5516 db_vnfd, vca.get("vdu_id")
5517 )
5518 elif vca.get("kdu_name"):
5519 config_descriptor = get_configuration(
5520 db_vnfd, vca.get("kdu_name")
5521 )
5522 else:
5523 config_descriptor = get_configuration(
5524 db_vnfd, db_vnfd["id"]
5525 )
5526 operation_params = (
5527 db_nslcmop.get("operationParams") or {}
5528 )
5529 exec_terminate_primitives = not operation_params.get(
5530 "skip_terminate_primitives"
5531 ) and vca.get("needed_terminate")
5532 task = asyncio.ensure_future(
5533 asyncio.wait_for(
5534 self.destroy_N2VC(
5535 logging_text,
5536 db_nslcmop,
5537 vca,
5538 config_descriptor,
5539 vca_index,
5540 destroy_ee=True,
5541 exec_primitives=exec_terminate_primitives,
5542 scaling_in=True,
5543 vca_id=vca_id,
5544 ),
5545 timeout=self.timeout_charm_delete,
5546 )
5547 )
5548 tasks_dict_info[task] = "Terminating VCA {}".format(
5549 vca.get("ee_id")
5550 )
5551 del vca_update[vca_index]
5552 del config_update[vca_index]
5553 # wait for pending tasks of terminate primitives
5554 if tasks_dict_info:
5555 self.logger.debug(
5556 logging_text
5557 + "Waiting for tasks {}".format(
5558 list(tasks_dict_info.keys())
5559 )
5560 )
5561 error_list = await self._wait_for_tasks(
5562 logging_text,
5563 tasks_dict_info,
5564 min(
5565 self.timeout_charm_delete, self.timeout_ns_terminate
5566 ),
5567 stage,
5568 nslcmop_id,
5569 )
5570 tasks_dict_info.clear()
5571 if error_list:
5572 raise LcmException("; ".join(error_list))
5573
5574 db_vca_and_config_update = {
5575 "_admin.deployed.VCA": vca_update,
5576 "configurationStatus": config_update,
5577 }
5578 self.update_db_2(
5579 "nsrs", db_nsr["_id"], db_vca_and_config_update
5580 )
5581 scale_process = None
5582 # SCALE-IN VCA - END
5583
5584 # SCALE RO - BEGIN
5585 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5586 scale_process = "RO"
5587 if self.ro_config.get("ng"):
5588 await self._scale_ng_ro(
5589 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5590 )
5591 scaling_info.pop("vdu-create", None)
5592 scaling_info.pop("vdu-delete", None)
5593
5594 scale_process = None
5595 # SCALE RO - END
5596
5597 # SCALE KDU - BEGIN
5598 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5599 scale_process = "KDU"
5600 await self._scale_kdu(
5601 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5602 )
5603 scaling_info.pop("kdu-create", None)
5604 scaling_info.pop("kdu-delete", None)
5605
5606 scale_process = None
5607 # SCALE KDU - END
5608
5609 if db_nsr_update:
5610 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5611
5612 # SCALE-UP VCA - BEGIN
5613 if vca_scaling_info:
5614 step = db_nslcmop_update[
5615 "detailed-status"
5616 ] = "Creating new execution environments"
5617 scale_process = "VCA"
5618 for vca_info in vca_scaling_info:
5619 if vca_info["type"] == "create":
5620 member_vnf_index = str(vca_info["member-vnf-index"])
5621 self.logger.debug(
5622 logging_text + "vdu info: {}".format(vca_info)
5623 )
5624 vnfd_id = db_vnfr["vnfd-ref"]
5625 if vca_info.get("osm_vdu_id"):
5626 vdu_index = int(vca_info["vdu_index"])
5627 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5628 if db_vnfr.get("additionalParamsForVnf"):
5629 deploy_params.update(
5630 parse_yaml_strings(
5631 db_vnfr["additionalParamsForVnf"].copy()
5632 )
5633 )
5634 descriptor_config = get_configuration(
5635 db_vnfd, db_vnfd["id"]
5636 )
5637 if descriptor_config:
5638 vdu_id = None
5639 vdu_name = None
5640 kdu_name = None
5641 self._deploy_n2vc(
5642 logging_text=logging_text
5643 + "member_vnf_index={} ".format(member_vnf_index),
5644 db_nsr=db_nsr,
5645 db_vnfr=db_vnfr,
5646 nslcmop_id=nslcmop_id,
5647 nsr_id=nsr_id,
5648 nsi_id=nsi_id,
5649 vnfd_id=vnfd_id,
5650 vdu_id=vdu_id,
5651 kdu_name=kdu_name,
5652 member_vnf_index=member_vnf_index,
5653 vdu_index=vdu_index,
5654 vdu_name=vdu_name,
5655 deploy_params=deploy_params,
5656 descriptor_config=descriptor_config,
5657 base_folder=base_folder,
5658 task_instantiation_info=tasks_dict_info,
5659 stage=stage,
5660 )
5661 vdu_id = vca_info["osm_vdu_id"]
5662 vdur = find_in_list(
5663 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5664 )
5665 descriptor_config = get_configuration(db_vnfd, vdu_id)
5666 if vdur.get("additionalParams"):
5667 deploy_params_vdu = parse_yaml_strings(
5668 vdur["additionalParams"]
5669 )
5670 else:
5671 deploy_params_vdu = deploy_params
5672 deploy_params_vdu["OSM"] = get_osm_params(
5673 db_vnfr, vdu_id, vdu_count_index=vdu_index
5674 )
5675 if descriptor_config:
5676 vdu_name = None
5677 kdu_name = None
5678 stage[
5679 1
5680 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5681 member_vnf_index, vdu_id, vdu_index
5682 )
5683 stage[2] = step = "Scaling out VCA"
5684 self._write_op_status(op_id=nslcmop_id, stage=stage)
5685 self._deploy_n2vc(
5686 logging_text=logging_text
5687 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5688 member_vnf_index, vdu_id, vdu_index
5689 ),
5690 db_nsr=db_nsr,
5691 db_vnfr=db_vnfr,
5692 nslcmop_id=nslcmop_id,
5693 nsr_id=nsr_id,
5694 nsi_id=nsi_id,
5695 vnfd_id=vnfd_id,
5696 vdu_id=vdu_id,
5697 kdu_name=kdu_name,
5698 member_vnf_index=member_vnf_index,
5699 vdu_index=vdu_index,
5700 vdu_name=vdu_name,
5701 deploy_params=deploy_params_vdu,
5702 descriptor_config=descriptor_config,
5703 base_folder=base_folder,
5704 task_instantiation_info=tasks_dict_info,
5705 stage=stage,
5706 )
5707 else:
5708 kdu_name = vca_info["osm_kdu_id"]
5709 descriptor_config = get_configuration(db_vnfd, kdu_name)
5710 if descriptor_config:
5711 vdu_id = None
5712 kdu_index = int(vca_info["kdu_index"])
5713 vdu_name = None
5714 kdur = next(
5715 x
5716 for x in db_vnfr["kdur"]
5717 if x["kdu-name"] == kdu_name
5718 )
5719 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5720 if kdur.get("additionalParams"):
5721 deploy_params_kdu = parse_yaml_strings(
5722 kdur["additionalParams"]
5723 )
5724
5725 self._deploy_n2vc(
5726 logging_text=logging_text,
5727 db_nsr=db_nsr,
5728 db_vnfr=db_vnfr,
5729 nslcmop_id=nslcmop_id,
5730 nsr_id=nsr_id,
5731 nsi_id=nsi_id,
5732 vnfd_id=vnfd_id,
5733 vdu_id=vdu_id,
5734 kdu_name=kdu_name,
5735 member_vnf_index=member_vnf_index,
5736 vdu_index=kdu_index,
5737 vdu_name=vdu_name,
5738 deploy_params=deploy_params_kdu,
5739 descriptor_config=descriptor_config,
5740 base_folder=base_folder,
5741 task_instantiation_info=tasks_dict_info,
5742 stage=stage,
5743 )
5744 # SCALE-UP VCA - END
5745 scale_process = None
5746
5747 # POST-SCALE BEGIN
5748 # execute primitive service POST-SCALING
5749 step = "Executing post-scale vnf-config-primitive"
5750 if scaling_descriptor.get("scaling-config-action"):
5751 for scaling_config_action in scaling_descriptor[
5752 "scaling-config-action"
5753 ]:
5754 if (
5755 scaling_config_action.get("trigger") == "post-scale-in"
5756 and scaling_type == "SCALE_IN"
5757 ) or (
5758 scaling_config_action.get("trigger") == "post-scale-out"
5759 and scaling_type == "SCALE_OUT"
5760 ):
5761 vnf_config_primitive = scaling_config_action[
5762 "vnf-config-primitive-name-ref"
5763 ]
5764 step = db_nslcmop_update[
5765 "detailed-status"
5766 ] = "executing post-scale scaling-config-action '{}'".format(
5767 vnf_config_primitive
5768 )
5769
5770 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5771 if db_vnfr.get("additionalParamsForVnf"):
5772 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5773
5774 # look for primitive
5775 for config_primitive in (
5776 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5777 ).get("config-primitive", ()):
5778 if config_primitive["name"] == vnf_config_primitive:
5779 break
5780 else:
5781 raise LcmException(
5782 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5783 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5784 "config-primitive".format(
5785 scaling_group, vnf_config_primitive
5786 )
5787 )
5788 scale_process = "VCA"
5789 db_nsr_update["config-status"] = "configuring post-scaling"
5790 primitive_params = self._map_primitive_params(
5791 config_primitive, {}, vnfr_params
5792 )
5793
5794 # Post-scale retry check: Check if this sub-operation has been executed before
5795 op_index = self._check_or_add_scale_suboperation(
5796 db_nslcmop,
5797 vnf_index,
5798 vnf_config_primitive,
5799 primitive_params,
5800 "POST-SCALE",
5801 )
5802 if op_index == self.SUBOPERATION_STATUS_SKIP:
5803 # Skip sub-operation
5804 result = "COMPLETED"
5805 result_detail = "Done"
5806 self.logger.debug(
5807 logging_text
5808 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5809 vnf_config_primitive, result, result_detail
5810 )
5811 )
5812 else:
5813 if op_index == self.SUBOPERATION_STATUS_NEW:
5814 # New sub-operation: Get index of this sub-operation
5815 op_index = (
5816 len(db_nslcmop.get("_admin", {}).get("operations"))
5817 - 1
5818 )
5819 self.logger.debug(
5820 logging_text
5821 + "vnf_config_primitive={} New sub-operation".format(
5822 vnf_config_primitive
5823 )
5824 )
5825 else:
5826 # retry: Get registered params for this existing sub-operation
5827 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5828 op_index
5829 ]
5830 vnf_index = op.get("member_vnf_index")
5831 vnf_config_primitive = op.get("primitive")
5832 primitive_params = op.get("primitive_params")
5833 self.logger.debug(
5834 logging_text
5835 + "vnf_config_primitive={} Sub-operation retry".format(
5836 vnf_config_primitive
5837 )
5838 )
5839 # Execute the primitive, either with new (first-time) or registered (reintent) args
5840 ee_descriptor_id = config_primitive.get(
5841 "execution-environment-ref"
5842 )
5843 primitive_name = config_primitive.get(
5844 "execution-environment-primitive", vnf_config_primitive
5845 )
5846 ee_id, vca_type = self._look_for_deployed_vca(
5847 nsr_deployed["VCA"],
5848 member_vnf_index=vnf_index,
5849 vdu_id=None,
5850 vdu_count_index=None,
5851 ee_descriptor_id=ee_descriptor_id,
5852 )
5853 result, result_detail = await self._ns_execute_primitive(
5854 ee_id,
5855 primitive_name,
5856 primitive_params,
5857 vca_type=vca_type,
5858 vca_id=vca_id,
5859 )
5860 self.logger.debug(
5861 logging_text
5862 + "vnf_config_primitive={} Done with result {} {}".format(
5863 vnf_config_primitive, result, result_detail
5864 )
5865 )
5866 # Update operationState = COMPLETED | FAILED
5867 self._update_suboperation_status(
5868 db_nslcmop, op_index, result, result_detail
5869 )
5870
5871 if result == "FAILED":
5872 raise LcmException(result_detail)
5873 db_nsr_update["config-status"] = old_config_status
5874 scale_process = None
5875 # POST-SCALE END
5876
5877 db_nsr_update[
5878 "detailed-status"
5879 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5880 db_nsr_update["operational-status"] = (
5881 "running"
5882 if old_operational_status == "failed"
5883 else old_operational_status
5884 )
5885 db_nsr_update["config-status"] = old_config_status
5886 return
5887 except (
5888 ROclient.ROClientException,
5889 DbException,
5890 LcmException,
5891 NgRoException,
5892 ) as e:
5893 self.logger.error(logging_text + "Exit Exception {}".format(e))
5894 exc = e
5895 except asyncio.CancelledError:
5896 self.logger.error(
5897 logging_text + "Cancelled Exception while '{}'".format(step)
5898 )
5899 exc = "Operation was cancelled"
5900 except Exception as e:
5901 exc = traceback.format_exc()
5902 self.logger.critical(
5903 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5904 exc_info=True,
5905 )
5906 finally:
5907 self._write_ns_status(
5908 nsr_id=nsr_id,
5909 ns_state=None,
5910 current_operation="IDLE",
5911 current_operation_id=None,
5912 )
5913 if tasks_dict_info:
5914 stage[1] = "Waiting for instantiate pending tasks."
5915 self.logger.debug(logging_text + stage[1])
5916 exc = await self._wait_for_tasks(
5917 logging_text,
5918 tasks_dict_info,
5919 self.timeout_ns_deploy,
5920 stage,
5921 nslcmop_id,
5922 nsr_id=nsr_id,
5923 )
5924 if exc:
5925 db_nslcmop_update[
5926 "detailed-status"
5927 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5928 nslcmop_operation_state = "FAILED"
5929 if db_nsr:
5930 db_nsr_update["operational-status"] = old_operational_status
5931 db_nsr_update["config-status"] = old_config_status
5932 db_nsr_update["detailed-status"] = ""
5933 if scale_process:
5934 if "VCA" in scale_process:
5935 db_nsr_update["config-status"] = "failed"
5936 if "RO" in scale_process:
5937 db_nsr_update["operational-status"] = "failed"
5938 db_nsr_update[
5939 "detailed-status"
5940 ] = "FAILED scaling nslcmop={} {}: {}".format(
5941 nslcmop_id, step, exc
5942 )
5943 else:
5944 error_description_nslcmop = None
5945 nslcmop_operation_state = "COMPLETED"
5946 db_nslcmop_update["detailed-status"] = "Done"
5947
5948 self._write_op_status(
5949 op_id=nslcmop_id,
5950 stage="",
5951 error_message=error_description_nslcmop,
5952 operation_state=nslcmop_operation_state,
5953 other_update=db_nslcmop_update,
5954 )
5955 if db_nsr:
5956 self._write_ns_status(
5957 nsr_id=nsr_id,
5958 ns_state=None,
5959 current_operation="IDLE",
5960 current_operation_id=None,
5961 other_update=db_nsr_update,
5962 )
5963
5964 if nslcmop_operation_state:
5965 try:
5966 msg = {
5967 "nsr_id": nsr_id,
5968 "nslcmop_id": nslcmop_id,
5969 "operationState": nslcmop_operation_state,
5970 }
5971 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5972 except Exception as e:
5973 self.logger.error(
5974 logging_text + "kafka_write notification Exception {}".format(e)
5975 )
5976 self.logger.debug(logging_text + "Exit")
5977 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5978
5979 async def _scale_kdu(
5980 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5981 ):
5982 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5983 for kdu_name in _scaling_info:
5984 for kdu_scaling_info in _scaling_info[kdu_name]:
5985 deployed_kdu, index = get_deployed_kdu(
5986 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5987 )
5988 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5989 kdu_instance = deployed_kdu["kdu-instance"]
5990 scale = int(kdu_scaling_info["scale"])
5991 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5992
5993 db_dict = {
5994 "collection": "nsrs",
5995 "filter": {"_id": nsr_id},
5996 "path": "_admin.deployed.K8s.{}".format(index),
5997 }
5998
5999 step = "scaling application {}".format(
6000 kdu_scaling_info["resource-name"]
6001 )
6002 self.logger.debug(logging_text + step)
6003
6004 if kdu_scaling_info["type"] == "delete":
6005 kdu_config = get_configuration(db_vnfd, kdu_name)
6006 if (
6007 kdu_config
6008 and kdu_config.get("terminate-config-primitive")
6009 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6010 ):
6011 terminate_config_primitive_list = kdu_config.get(
6012 "terminate-config-primitive"
6013 )
6014 terminate_config_primitive_list.sort(
6015 key=lambda val: int(val["seq"])
6016 )
6017
6018 for (
6019 terminate_config_primitive
6020 ) in terminate_config_primitive_list:
6021 primitive_params_ = self._map_primitive_params(
6022 terminate_config_primitive, {}, {}
6023 )
6024 step = "execute terminate config primitive"
6025 self.logger.debug(logging_text + step)
6026 await asyncio.wait_for(
6027 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6028 cluster_uuid=cluster_uuid,
6029 kdu_instance=kdu_instance,
6030 primitive_name=terminate_config_primitive["name"],
6031 params=primitive_params_,
6032 db_dict=db_dict,
6033 vca_id=vca_id,
6034 ),
6035 timeout=600,
6036 )
6037
6038 await asyncio.wait_for(
6039 self.k8scluster_map[k8s_cluster_type].scale(
6040 kdu_instance,
6041 scale,
6042 kdu_scaling_info["resource-name"],
6043 vca_id=vca_id,
6044 ),
6045 timeout=self.timeout_vca_on_error,
6046 )
6047
6048 if kdu_scaling_info["type"] == "create":
6049 kdu_config = get_configuration(db_vnfd, kdu_name)
6050 if (
6051 kdu_config
6052 and kdu_config.get("initial-config-primitive")
6053 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6054 ):
6055 initial_config_primitive_list = kdu_config.get(
6056 "initial-config-primitive"
6057 )
6058 initial_config_primitive_list.sort(
6059 key=lambda val: int(val["seq"])
6060 )
6061
6062 for initial_config_primitive in initial_config_primitive_list:
6063 primitive_params_ = self._map_primitive_params(
6064 initial_config_primitive, {}, {}
6065 )
6066 step = "execute initial config primitive"
6067 self.logger.debug(logging_text + step)
6068 await asyncio.wait_for(
6069 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6070 cluster_uuid=cluster_uuid,
6071 kdu_instance=kdu_instance,
6072 primitive_name=initial_config_primitive["name"],
6073 params=primitive_params_,
6074 db_dict=db_dict,
6075 vca_id=vca_id,
6076 ),
6077 timeout=600,
6078 )
6079
6080 async def _scale_ng_ro(
6081 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6082 ):
6083 nsr_id = db_nslcmop["nsInstanceId"]
6084 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6085 db_vnfrs = {}
6086
6087 # read from db: vnfd's for every vnf
6088 db_vnfds = []
6089
6090 # for each vnf in ns, read vnfd
6091 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6092 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6093 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6094 # if we haven't this vnfd, read it from db
6095 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6096 # read from db
6097 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6098 db_vnfds.append(vnfd)
6099 n2vc_key = self.n2vc.get_public_key()
6100 n2vc_key_list = [n2vc_key]
6101 self.scale_vnfr(
6102 db_vnfr,
6103 vdu_scaling_info.get("vdu-create"),
6104 vdu_scaling_info.get("vdu-delete"),
6105 mark_delete=True,
6106 )
6107 # db_vnfr has been updated, update db_vnfrs to use it
6108 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6109 await self._instantiate_ng_ro(
6110 logging_text,
6111 nsr_id,
6112 db_nsd,
6113 db_nsr,
6114 db_nslcmop,
6115 db_vnfrs,
6116 db_vnfds,
6117 n2vc_key_list,
6118 stage=stage,
6119 start_deploy=time(),
6120 timeout_ns_deploy=self.timeout_ns_deploy,
6121 )
6122 if vdu_scaling_info.get("vdu-delete"):
6123 self.scale_vnfr(
6124 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6125 )
6126
6127 async def add_prometheus_metrics(
6128 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6129 ):
6130 if not self.prometheus:
6131 return
6132 # look if exist a file called 'prometheus*.j2' and
6133 artifact_content = self.fs.dir_ls(artifact_path)
6134 job_file = next(
6135 (
6136 f
6137 for f in artifact_content
6138 if f.startswith("prometheus") and f.endswith(".j2")
6139 ),
6140 None,
6141 )
6142 if not job_file:
6143 return
6144 with self.fs.file_open((artifact_path, job_file), "r") as f:
6145 job_data = f.read()
6146
6147 # TODO get_service
6148 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6149 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6150 host_port = "80"
6151 vnfr_id = vnfr_id.replace("-", "")
6152 variables = {
6153 "JOB_NAME": vnfr_id,
6154 "TARGET_IP": target_ip,
6155 "EXPORTER_POD_IP": host_name,
6156 "EXPORTER_POD_PORT": host_port,
6157 }
6158 job_list = self.prometheus.parse_job(job_data, variables)
6159 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6160 for job in job_list:
6161 if (
6162 not isinstance(job.get("job_name"), str)
6163 or vnfr_id not in job["job_name"]
6164 ):
6165 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6166 job["nsr_id"] = nsr_id
6167 job_dict = {jl["job_name"]: jl for jl in job_list}
6168 if await self.prometheus.update(job_dict):
6169 return list(job_dict.keys())
6170
6171 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6172 """
6173 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6174
6175 :param: vim_account_id: VIM Account ID
6176
6177 :return: (cloud_name, cloud_credential)
6178 """
6179 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6180 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6181
6182 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6183 """
6184 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6185
6186 :param: vim_account_id: VIM Account ID
6187
6188 :return: (cloud_name, cloud_credential)
6189 """
6190 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6191 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")