Bugfix 1550: Setting a custom release name for Helm based kdus
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vnfr_id:
1607 element_type = "VNF"
1608 element_under_configuration = vnfr_id
1609 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1610 if vdu_id:
1611 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1612 element_type = "VDU"
1613 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1614 osm_config["osm"]["vdu_id"] = vdu_id
1615 elif kdu_name:
1616 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1617 element_type = "KDU"
1618 element_under_configuration = kdu_name
1619 osm_config["osm"]["kdu_name"] = kdu_name
1620
1621 # Get artifact path
1622 artifact_path = "{}/{}/{}/{}".format(
1623 base_folder["folder"],
1624 base_folder["pkg-dir"],
1625 "charms"
1626 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1627 else "helm-charts",
1628 vca_name,
1629 )
1630
1631 self.logger.debug("Artifact path > {}".format(artifact_path))
1632
1633 # get initial_config_primitive_list that applies to this element
1634 initial_config_primitive_list = config_descriptor.get(
1635 "initial-config-primitive"
1636 )
1637
1638 self.logger.debug(
1639 "Initial config primitive list > {}".format(
1640 initial_config_primitive_list
1641 )
1642 )
1643
1644 # add config if not present for NS charm
1645 ee_descriptor_id = ee_config_descriptor.get("id")
1646 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1647 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1648 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1649 )
1650
1651 self.logger.debug(
1652 "Initial config primitive list #2 > {}".format(
1653 initial_config_primitive_list
1654 )
1655 )
1656 # n2vc_redesign STEP 3.1
1657 # find old ee_id if exists
1658 ee_id = vca_deployed.get("ee_id")
1659
1660 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1661 # create or register execution environment in VCA
1662 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1663
1664 self._write_configuration_status(
1665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status="CREATING",
1668 element_under_configuration=element_under_configuration,
1669 element_type=element_type,
1670 )
1671
1672 step = "create execution environment"
1673 self.logger.debug(logging_text + step)
1674
1675 ee_id = None
1676 credentials = None
1677 if vca_type == "k8s_proxy_charm":
1678 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1679 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1680 namespace=namespace,
1681 artifact_path=artifact_path,
1682 db_dict=db_dict,
1683 vca_id=vca_id,
1684 )
1685 elif vca_type == "helm" or vca_type == "helm-v3":
1686 ee_id, credentials = await self.vca_map[
1687 vca_type
1688 ].create_execution_environment(
1689 namespace=namespace,
1690 reuse_ee_id=ee_id,
1691 db_dict=db_dict,
1692 config=osm_config,
1693 artifact_path=artifact_path,
1694 vca_type=vca_type,
1695 )
1696 else:
1697 ee_id, credentials = await self.vca_map[
1698 vca_type
1699 ].create_execution_environment(
1700 namespace=namespace,
1701 reuse_ee_id=ee_id,
1702 db_dict=db_dict,
1703 vca_id=vca_id,
1704 )
1705
1706 elif vca_type == "native_charm":
1707 step = "Waiting to VM being up and getting IP address"
1708 self.logger.debug(logging_text + step)
1709 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1710 logging_text,
1711 nsr_id,
1712 vnfr_id,
1713 vdu_id,
1714 vdu_index,
1715 user=None,
1716 pub_key=None,
1717 )
1718 credentials = {"hostname": rw_mgmt_ip}
1719 # get username
1720 username = deep_get(
1721 config_descriptor, ("config-access", "ssh-access", "default-user")
1722 )
1723 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1724 # merged. Meanwhile let's get username from initial-config-primitive
1725 if not username and initial_config_primitive_list:
1726 for config_primitive in initial_config_primitive_list:
1727 for param in config_primitive.get("parameter", ()):
1728 if param["name"] == "ssh-username":
1729 username = param["value"]
1730 break
1731 if not username:
1732 raise LcmException(
1733 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1734 "'config-access.ssh-access.default-user'"
1735 )
1736 credentials["username"] = username
1737 # n2vc_redesign STEP 3.2
1738
1739 self._write_configuration_status(
1740 nsr_id=nsr_id,
1741 vca_index=vca_index,
1742 status="REGISTERING",
1743 element_under_configuration=element_under_configuration,
1744 element_type=element_type,
1745 )
1746
1747 step = "register execution environment {}".format(credentials)
1748 self.logger.debug(logging_text + step)
1749 ee_id = await self.vca_map[vca_type].register_execution_environment(
1750 credentials=credentials,
1751 namespace=namespace,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 # for compatibility with MON/POL modules, the need model and application name at database
1757 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1758 ee_id_parts = ee_id.split(".")
1759 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1760 if len(ee_id_parts) >= 2:
1761 model_name = ee_id_parts[0]
1762 application_name = ee_id_parts[1]
1763 db_nsr_update[db_update_entry + "model"] = model_name
1764 db_nsr_update[db_update_entry + "application"] = application_name
1765
1766 # n2vc_redesign STEP 3.3
1767 step = "Install configuration Software"
1768
1769 self._write_configuration_status(
1770 nsr_id=nsr_id,
1771 vca_index=vca_index,
1772 status="INSTALLING SW",
1773 element_under_configuration=element_under_configuration,
1774 element_type=element_type,
1775 other_update=db_nsr_update,
1776 )
1777
1778 # TODO check if already done
1779 self.logger.debug(logging_text + step)
1780 config = None
1781 if vca_type == "native_charm":
1782 config_primitive = next(
1783 (p for p in initial_config_primitive_list if p["name"] == "config"),
1784 None,
1785 )
1786 if config_primitive:
1787 config = self._map_primitive_params(
1788 config_primitive, {}, deploy_params
1789 )
1790 num_units = 1
1791 if vca_type == "lxc_proxy_charm":
1792 if element_type == "NS":
1793 num_units = db_nsr.get("config-units") or 1
1794 elif element_type == "VNF":
1795 num_units = db_vnfr.get("config-units") or 1
1796 elif element_type == "VDU":
1797 for v in db_vnfr["vdur"]:
1798 if vdu_id == v["vdu-id-ref"]:
1799 num_units = v.get("config-units") or 1
1800 break
1801 if vca_type != "k8s_proxy_charm":
1802 await self.vca_map[vca_type].install_configuration_sw(
1803 ee_id=ee_id,
1804 artifact_path=artifact_path,
1805 db_dict=db_dict,
1806 config=config,
1807 num_units=num_units,
1808 vca_id=vca_id,
1809 )
1810
1811 # write in db flag of configuration_sw already installed
1812 self.update_db_2(
1813 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1814 )
1815
1816 # add relations for this VCA (wait for other peers related with this VCA)
1817 await self._add_vca_relations(
1818 logging_text=logging_text,
1819 nsr_id=nsr_id,
1820 vca_index=vca_index,
1821 vca_id=vca_id,
1822 vca_type=vca_type,
1823 )
1824
1825 # if SSH access is required, then get execution environment SSH public
1826 # if native charm we have waited already to VM be UP
1827 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1828 pub_key = None
1829 user = None
1830 # self.logger.debug("get ssh key block")
1831 if deep_get(
1832 config_descriptor, ("config-access", "ssh-access", "required")
1833 ):
1834 # self.logger.debug("ssh key needed")
1835 # Needed to inject a ssh key
1836 user = deep_get(
1837 config_descriptor,
1838 ("config-access", "ssh-access", "default-user"),
1839 )
1840 step = "Install configuration Software, getting public ssh key"
1841 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1842 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1843 )
1844
1845 step = "Insert public key into VM user={} ssh_key={}".format(
1846 user, pub_key
1847 )
1848 else:
1849 # self.logger.debug("no need to get ssh key")
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852
1853 # n2vc_redesign STEP 5.1
1854 # wait for RO (ip-address) Insert pub_key into VM
1855 if vnfr_id:
1856 if kdu_name:
1857 rw_mgmt_ip = await self.wait_kdu_up(
1858 logging_text, nsr_id, vnfr_id, kdu_name
1859 )
1860 else:
1861 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1862 logging_text,
1863 nsr_id,
1864 vnfr_id,
1865 vdu_id,
1866 vdu_index,
1867 user=user,
1868 pub_key=pub_key,
1869 )
1870 else:
1871 rw_mgmt_ip = None # This is for a NS configuration
1872
1873 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1874
1875 # store rw_mgmt_ip in deploy params for later replacement
1876 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1877
1878 # n2vc_redesign STEP 6 Execute initial config primitive
1879 step = "execute initial config primitive"
1880
1881 # wait for dependent primitives execution (NS -> VNF -> VDU)
1882 if initial_config_primitive_list:
1883 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1884
1885 # stage, in function of element type: vdu, kdu, vnf or ns
1886 my_vca = vca_deployed_list[vca_index]
1887 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1888 # VDU or KDU
1889 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1890 elif my_vca.get("member-vnf-index"):
1891 # VNF
1892 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1893 else:
1894 # NS
1895 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1896
1897 self._write_configuration_status(
1898 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1899 )
1900
1901 self._write_op_status(op_id=nslcmop_id, stage=stage)
1902
1903 check_if_terminated_needed = True
1904 for initial_config_primitive in initial_config_primitive_list:
1905 # adding information on the vca_deployed if it is a NS execution environment
1906 if not vca_deployed["member-vnf-index"]:
1907 deploy_params["ns_config_info"] = json.dumps(
1908 self._get_ns_config_info(nsr_id)
1909 )
1910 # TODO check if already done
1911 primitive_params_ = self._map_primitive_params(
1912 initial_config_primitive, {}, deploy_params
1913 )
1914
1915 step = "execute primitive '{}' params '{}'".format(
1916 initial_config_primitive["name"], primitive_params_
1917 )
1918 self.logger.debug(logging_text + step)
1919 await self.vca_map[vca_type].exec_primitive(
1920 ee_id=ee_id,
1921 primitive_name=initial_config_primitive["name"],
1922 params_dict=primitive_params_,
1923 db_dict=db_dict,
1924 vca_id=vca_id,
1925 )
1926 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1927 if check_if_terminated_needed:
1928 if config_descriptor.get("terminate-config-primitive"):
1929 self.update_db_2(
1930 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1931 )
1932 check_if_terminated_needed = False
1933
1934 # TODO register in database that primitive is done
1935
1936 # STEP 7 Configure metrics
1937 if vca_type == "helm" or vca_type == "helm-v3":
1938 prometheus_jobs = await self.add_prometheus_metrics(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 ee_config_descriptor=ee_config_descriptor,
1942 vnfr_id=vnfr_id,
1943 nsr_id=nsr_id,
1944 target_ip=rw_mgmt_ip,
1945 )
1946 if prometheus_jobs:
1947 self.update_db_2(
1948 "nsrs",
1949 nsr_id,
1950 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1951 )
1952
1953 step = "instantiated at VCA"
1954 self.logger.debug(logging_text + step)
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1958 )
1959
1960 except Exception as e: # TODO not use Exception but N2VC exception
1961 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1962 if not isinstance(
1963 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1964 ):
1965 self.logger.error(
1966 "Exception while {} : {}".format(step, e), exc_info=True
1967 )
1968 self._write_configuration_status(
1969 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1970 )
1971 raise LcmException("{} {}".format(step, e)) from e
1972
1973 def _write_ns_status(
1974 self,
1975 nsr_id: str,
1976 ns_state: str,
1977 current_operation: str,
1978 current_operation_id: str,
1979 error_description: str = None,
1980 error_detail: str = None,
1981 other_update: dict = None,
1982 ):
1983 """
1984 Update db_nsr fields.
1985 :param nsr_id:
1986 :param ns_state:
1987 :param current_operation:
1988 :param current_operation_id:
1989 :param error_description:
1990 :param error_detail:
1991 :param other_update: Other required changes at database if provided, will be cleared
1992 :return:
1993 """
1994 try:
1995 db_dict = other_update or {}
1996 db_dict[
1997 "_admin.nslcmop"
1998 ] = current_operation_id # for backward compatibility
1999 db_dict["_admin.current-operation"] = current_operation_id
2000 db_dict["_admin.operation-type"] = (
2001 current_operation if current_operation != "IDLE" else None
2002 )
2003 db_dict["currentOperation"] = current_operation
2004 db_dict["currentOperationID"] = current_operation_id
2005 db_dict["errorDescription"] = error_description
2006 db_dict["errorDetail"] = error_detail
2007
2008 if ns_state:
2009 db_dict["nsState"] = ns_state
2010 self.update_db_2("nsrs", nsr_id, db_dict)
2011 except DbException as e:
2012 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2013
2014 def _write_op_status(
2015 self,
2016 op_id: str,
2017 stage: list = None,
2018 error_message: str = None,
2019 queuePosition: int = 0,
2020 operation_state: str = None,
2021 other_update: dict = None,
2022 ):
2023 try:
2024 db_dict = other_update or {}
2025 db_dict["queuePosition"] = queuePosition
2026 if isinstance(stage, list):
2027 db_dict["stage"] = stage[0]
2028 db_dict["detailed-status"] = " ".join(stage)
2029 elif stage is not None:
2030 db_dict["stage"] = str(stage)
2031
2032 if error_message is not None:
2033 db_dict["errorMessage"] = error_message
2034 if operation_state is not None:
2035 db_dict["operationState"] = operation_state
2036 db_dict["statusEnteredTime"] = time()
2037 self.update_db_2("nslcmops", op_id, db_dict)
2038 except DbException as e:
2039 self.logger.warn(
2040 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2041 )
2042
2043 def _write_all_config_status(self, db_nsr: dict, status: str):
2044 try:
2045 nsr_id = db_nsr["_id"]
2046 # configurationStatus
2047 config_status = db_nsr.get("configurationStatus")
2048 if config_status:
2049 db_nsr_update = {
2050 "configurationStatus.{}.status".format(index): status
2051 for index, v in enumerate(config_status)
2052 if v
2053 }
2054 # update status
2055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2056
2057 except DbException as e:
2058 self.logger.warn(
2059 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2060 )
2061
2062 def _write_configuration_status(
2063 self,
2064 nsr_id: str,
2065 vca_index: int,
2066 status: str = None,
2067 element_under_configuration: str = None,
2068 element_type: str = None,
2069 other_update: dict = None,
2070 ):
2071
2072 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2073 # .format(vca_index, status))
2074
2075 try:
2076 db_path = "configurationStatus.{}.".format(vca_index)
2077 db_dict = other_update or {}
2078 if status:
2079 db_dict[db_path + "status"] = status
2080 if element_under_configuration:
2081 db_dict[
2082 db_path + "elementUnderConfiguration"
2083 ] = element_under_configuration
2084 if element_type:
2085 db_dict[db_path + "elementType"] = element_type
2086 self.update_db_2("nsrs", nsr_id, db_dict)
2087 except DbException as e:
2088 self.logger.warn(
2089 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2090 status, nsr_id, vca_index, e
2091 )
2092 )
2093
2094 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2095 """
2096 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2097 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2098 Database is used because the result can be obtained from a different LCM worker in case of HA.
2099 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2100 :param db_nslcmop: database content of nslcmop
2101 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2102 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2103 computed 'vim-account-id'
2104 """
2105 modified = False
2106 nslcmop_id = db_nslcmop["_id"]
2107 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2108 if placement_engine == "PLA":
2109 self.logger.debug(
2110 logging_text + "Invoke and wait for placement optimization"
2111 )
2112 await self.msg.aiowrite(
2113 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2114 )
2115 db_poll_interval = 5
2116 wait = db_poll_interval * 10
2117 pla_result = None
2118 while not pla_result and wait >= 0:
2119 await asyncio.sleep(db_poll_interval)
2120 wait -= db_poll_interval
2121 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2122 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2123
2124 if not pla_result:
2125 raise LcmException(
2126 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2127 )
2128
2129 for pla_vnf in pla_result["vnf"]:
2130 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2131 if not pla_vnf.get("vimAccountId") or not vnfr:
2132 continue
2133 modified = True
2134 self.db.set_one(
2135 "vnfrs",
2136 {"_id": vnfr["_id"]},
2137 {"vim-account-id": pla_vnf["vimAccountId"]},
2138 )
2139 # Modifies db_vnfrs
2140 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2141 return modified
2142
2143 def update_nsrs_with_pla_result(self, params):
2144 try:
2145 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2146 self.update_db_2(
2147 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2148 )
2149 except Exception as e:
2150 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2151
2152 async def instantiate(self, nsr_id, nslcmop_id):
2153 """
2154
2155 :param nsr_id: ns instance to deploy
2156 :param nslcmop_id: operation to run
2157 :return:
2158 """
2159
2160 # Try to lock HA task here
2161 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2162 if not task_is_locked_by_me:
2163 self.logger.debug(
2164 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2165 )
2166 return
2167
2168 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2169 self.logger.debug(logging_text + "Enter")
2170
2171 # get all needed from database
2172
2173 # database nsrs record
2174 db_nsr = None
2175
2176 # database nslcmops record
2177 db_nslcmop = None
2178
2179 # update operation on nsrs
2180 db_nsr_update = {}
2181 # update operation on nslcmops
2182 db_nslcmop_update = {}
2183
2184 nslcmop_operation_state = None
2185 db_vnfrs = {} # vnf's info indexed by member-index
2186 # n2vc_info = {}
2187 tasks_dict_info = {} # from task to info text
2188 exc = None
2189 error_list = []
2190 stage = [
2191 "Stage 1/5: preparation of the environment.",
2192 "Waiting for previous operations to terminate.",
2193 "",
2194 ]
2195 # ^ stage, step, VIM progress
2196 try:
2197 # wait for any previous tasks in process
2198 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2199
2200 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2201 stage[1] = "Reading from database."
2202 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2203 db_nsr_update["detailed-status"] = "creating"
2204 db_nsr_update["operational-status"] = "init"
2205 self._write_ns_status(
2206 nsr_id=nsr_id,
2207 ns_state="BUILDING",
2208 current_operation="INSTANTIATING",
2209 current_operation_id=nslcmop_id,
2210 other_update=db_nsr_update,
2211 )
2212 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2213
2214 # read from db: operation
2215 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2216 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2217 ns_params = db_nslcmop.get("operationParams")
2218 if ns_params and ns_params.get("timeout_ns_deploy"):
2219 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2220 else:
2221 timeout_ns_deploy = self.timeout.get(
2222 "ns_deploy", self.timeout_ns_deploy
2223 )
2224
2225 # read from db: ns
2226 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2228 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2229 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2230 self.fs.sync(db_nsr["nsd-id"])
2231 db_nsr["nsd"] = nsd
2232 # nsr_name = db_nsr["name"] # TODO short-name??
2233
2234 # read from db: vnf's of this ns
2235 stage[1] = "Getting vnfrs from db."
2236 self.logger.debug(logging_text + stage[1])
2237 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2238
2239 # read from db: vnfd's for every vnf
2240 db_vnfds = [] # every vnfd data
2241
2242 # for each vnf in ns, read vnfd
2243 for vnfr in db_vnfrs_list:
2244 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2245 vnfd_id = vnfr["vnfd-id"]
2246 vnfd_ref = vnfr["vnfd-ref"]
2247 self.fs.sync(vnfd_id)
2248
2249 # if we haven't this vnfd, read it from db
2250 if vnfd_id not in db_vnfds:
2251 # read from db
2252 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2253 vnfd_id, vnfd_ref
2254 )
2255 self.logger.debug(logging_text + stage[1])
2256 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2257
2258 # store vnfd
2259 db_vnfds.append(vnfd)
2260
2261 # Get or generates the _admin.deployed.VCA list
2262 vca_deployed_list = None
2263 if db_nsr["_admin"].get("deployed"):
2264 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2265 if vca_deployed_list is None:
2266 vca_deployed_list = []
2267 configuration_status_list = []
2268 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2269 db_nsr_update["configurationStatus"] = configuration_status_list
2270 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2271 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2272 elif isinstance(vca_deployed_list, dict):
2273 # maintain backward compatibility. Change a dict to list at database
2274 vca_deployed_list = list(vca_deployed_list.values())
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2277
2278 if not isinstance(
2279 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2280 ):
2281 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2282 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2283
2284 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2285 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2286 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2287 self.db.set_list(
2288 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2289 )
2290
2291 # n2vc_redesign STEP 2 Deploy Network Scenario
2292 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2293 self._write_op_status(op_id=nslcmop_id, stage=stage)
2294
2295 stage[1] = "Deploying KDUs."
2296 # self.logger.debug(logging_text + "Before deploy_kdus")
2297 # Call to deploy_kdus in case exists the "vdu:kdu" param
2298 await self.deploy_kdus(
2299 logging_text=logging_text,
2300 nsr_id=nsr_id,
2301 nslcmop_id=nslcmop_id,
2302 db_vnfrs=db_vnfrs,
2303 db_vnfds=db_vnfds,
2304 task_instantiation_info=tasks_dict_info,
2305 )
2306
2307 stage[1] = "Getting VCA public key."
2308 # n2vc_redesign STEP 1 Get VCA public ssh-key
2309 # feature 1429. Add n2vc public key to needed VMs
2310 n2vc_key = self.n2vc.get_public_key()
2311 n2vc_key_list = [n2vc_key]
2312 if self.vca_config.get("public_key"):
2313 n2vc_key_list.append(self.vca_config["public_key"])
2314
2315 stage[1] = "Deploying NS at VIM."
2316 task_ro = asyncio.ensure_future(
2317 self.instantiate_RO(
2318 logging_text=logging_text,
2319 nsr_id=nsr_id,
2320 nsd=nsd,
2321 db_nsr=db_nsr,
2322 db_nslcmop=db_nslcmop,
2323 db_vnfrs=db_vnfrs,
2324 db_vnfds=db_vnfds,
2325 n2vc_key_list=n2vc_key_list,
2326 stage=stage,
2327 )
2328 )
2329 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2330 tasks_dict_info[task_ro] = "Deploying at VIM"
2331
2332 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2333 stage[1] = "Deploying Execution Environments."
2334 self.logger.debug(logging_text + stage[1])
2335
2336 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2337 for vnf_profile in get_vnf_profiles(nsd):
2338 vnfd_id = vnf_profile["vnfd-id"]
2339 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2340 member_vnf_index = str(vnf_profile["id"])
2341 db_vnfr = db_vnfrs[member_vnf_index]
2342 base_folder = vnfd["_admin"]["storage"]
2343 vdu_id = None
2344 vdu_index = 0
2345 vdu_name = None
2346 kdu_name = None
2347
2348 # Get additional parameters
2349 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2350 if db_vnfr.get("additionalParamsForVnf"):
2351 deploy_params.update(
2352 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2353 )
2354
2355 descriptor_config = get_configuration(vnfd, vnfd["id"])
2356 if descriptor_config:
2357 self._deploy_n2vc(
2358 logging_text=logging_text
2359 + "member_vnf_index={} ".format(member_vnf_index),
2360 db_nsr=db_nsr,
2361 db_vnfr=db_vnfr,
2362 nslcmop_id=nslcmop_id,
2363 nsr_id=nsr_id,
2364 nsi_id=nsi_id,
2365 vnfd_id=vnfd_id,
2366 vdu_id=vdu_id,
2367 kdu_name=kdu_name,
2368 member_vnf_index=member_vnf_index,
2369 vdu_index=vdu_index,
2370 vdu_name=vdu_name,
2371 deploy_params=deploy_params,
2372 descriptor_config=descriptor_config,
2373 base_folder=base_folder,
2374 task_instantiation_info=tasks_dict_info,
2375 stage=stage,
2376 )
2377
2378 # Deploy charms for each VDU that supports one.
2379 for vdud in get_vdu_list(vnfd):
2380 vdu_id = vdud["id"]
2381 descriptor_config = get_configuration(vnfd, vdu_id)
2382 vdur = find_in_list(
2383 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2384 )
2385
2386 if vdur.get("additionalParams"):
2387 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2388 else:
2389 deploy_params_vdu = deploy_params
2390 deploy_params_vdu["OSM"] = get_osm_params(
2391 db_vnfr, vdu_id, vdu_count_index=0
2392 )
2393 vdud_count = get_vdu_profile(vnfd, vdu_id).get(
2394 "max-number-of-instances", 1
2395 )
2396
2397 self.logger.debug("VDUD > {}".format(vdud))
2398 self.logger.debug(
2399 "Descriptor config > {}".format(descriptor_config)
2400 )
2401 if descriptor_config:
2402 vdu_name = None
2403 kdu_name = None
2404 for vdu_index in range(vdud_count):
2405 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2406 self._deploy_n2vc(
2407 logging_text=logging_text
2408 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2409 member_vnf_index, vdu_id, vdu_index
2410 ),
2411 db_nsr=db_nsr,
2412 db_vnfr=db_vnfr,
2413 nslcmop_id=nslcmop_id,
2414 nsr_id=nsr_id,
2415 nsi_id=nsi_id,
2416 vnfd_id=vnfd_id,
2417 vdu_id=vdu_id,
2418 kdu_name=kdu_name,
2419 member_vnf_index=member_vnf_index,
2420 vdu_index=vdu_index,
2421 vdu_name=vdu_name,
2422 deploy_params=deploy_params_vdu,
2423 descriptor_config=descriptor_config,
2424 base_folder=base_folder,
2425 task_instantiation_info=tasks_dict_info,
2426 stage=stage,
2427 )
2428 for kdud in get_kdu_list(vnfd):
2429 kdu_name = kdud["name"]
2430 descriptor_config = get_configuration(vnfd, kdu_name)
2431 if descriptor_config:
2432 vdu_id = None
2433 vdu_index = 0
2434 vdu_name = None
2435 kdur = next(
2436 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2437 )
2438 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2439 if kdur.get("additionalParams"):
2440 deploy_params_kdu = parse_yaml_strings(
2441 kdur["additionalParams"]
2442 )
2443
2444 self._deploy_n2vc(
2445 logging_text=logging_text,
2446 db_nsr=db_nsr,
2447 db_vnfr=db_vnfr,
2448 nslcmop_id=nslcmop_id,
2449 nsr_id=nsr_id,
2450 nsi_id=nsi_id,
2451 vnfd_id=vnfd_id,
2452 vdu_id=vdu_id,
2453 kdu_name=kdu_name,
2454 member_vnf_index=member_vnf_index,
2455 vdu_index=vdu_index,
2456 vdu_name=vdu_name,
2457 deploy_params=deploy_params_kdu,
2458 descriptor_config=descriptor_config,
2459 base_folder=base_folder,
2460 task_instantiation_info=tasks_dict_info,
2461 stage=stage,
2462 )
2463
2464 # Check if this NS has a charm configuration
2465 descriptor_config = nsd.get("ns-configuration")
2466 if descriptor_config and descriptor_config.get("juju"):
2467 vnfd_id = None
2468 db_vnfr = None
2469 member_vnf_index = None
2470 vdu_id = None
2471 kdu_name = None
2472 vdu_index = 0
2473 vdu_name = None
2474
2475 # Get additional parameters
2476 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2477 if db_nsr.get("additionalParamsForNs"):
2478 deploy_params.update(
2479 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2480 )
2481 base_folder = nsd["_admin"]["storage"]
2482 self._deploy_n2vc(
2483 logging_text=logging_text,
2484 db_nsr=db_nsr,
2485 db_vnfr=db_vnfr,
2486 nslcmop_id=nslcmop_id,
2487 nsr_id=nsr_id,
2488 nsi_id=nsi_id,
2489 vnfd_id=vnfd_id,
2490 vdu_id=vdu_id,
2491 kdu_name=kdu_name,
2492 member_vnf_index=member_vnf_index,
2493 vdu_index=vdu_index,
2494 vdu_name=vdu_name,
2495 deploy_params=deploy_params,
2496 descriptor_config=descriptor_config,
2497 base_folder=base_folder,
2498 task_instantiation_info=tasks_dict_info,
2499 stage=stage,
2500 )
2501
2502 # rest of staff will be done at finally
2503
2504 except (
2505 ROclient.ROClientException,
2506 DbException,
2507 LcmException,
2508 N2VCException,
2509 ) as e:
2510 self.logger.error(
2511 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2512 )
2513 exc = e
2514 except asyncio.CancelledError:
2515 self.logger.error(
2516 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2517 )
2518 exc = "Operation was cancelled"
2519 except Exception as e:
2520 exc = traceback.format_exc()
2521 self.logger.critical(
2522 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2523 exc_info=True,
2524 )
2525 finally:
2526 if exc:
2527 error_list.append(str(exc))
2528 try:
2529 # wait for pending tasks
2530 if tasks_dict_info:
2531 stage[1] = "Waiting for instantiate pending tasks."
2532 self.logger.debug(logging_text + stage[1])
2533 error_list += await self._wait_for_tasks(
2534 logging_text,
2535 tasks_dict_info,
2536 timeout_ns_deploy,
2537 stage,
2538 nslcmop_id,
2539 nsr_id=nsr_id,
2540 )
2541 stage[1] = stage[2] = ""
2542 except asyncio.CancelledError:
2543 error_list.append("Cancelled")
2544 # TODO cancel all tasks
2545 except Exception as exc:
2546 error_list.append(str(exc))
2547
2548 # update operation-status
2549 db_nsr_update["operational-status"] = "running"
2550 # let's begin with VCA 'configured' status (later we can change it)
2551 db_nsr_update["config-status"] = "configured"
2552 for task, task_name in tasks_dict_info.items():
2553 if not task.done() or task.cancelled() or task.exception():
2554 if task_name.startswith(self.task_name_deploy_vca):
2555 # A N2VC task is pending
2556 db_nsr_update["config-status"] = "failed"
2557 else:
2558 # RO or KDU task is pending
2559 db_nsr_update["operational-status"] = "failed"
2560
2561 # update status at database
2562 if error_list:
2563 error_detail = ". ".join(error_list)
2564 self.logger.error(logging_text + error_detail)
2565 error_description_nslcmop = "{} Detail: {}".format(
2566 stage[0], error_detail
2567 )
2568 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2569 nslcmop_id, stage[0]
2570 )
2571
2572 db_nsr_update["detailed-status"] = (
2573 error_description_nsr + " Detail: " + error_detail
2574 )
2575 db_nslcmop_update["detailed-status"] = error_detail
2576 nslcmop_operation_state = "FAILED"
2577 ns_state = "BROKEN"
2578 else:
2579 error_detail = None
2580 error_description_nsr = error_description_nslcmop = None
2581 ns_state = "READY"
2582 db_nsr_update["detailed-status"] = "Done"
2583 db_nslcmop_update["detailed-status"] = "Done"
2584 nslcmop_operation_state = "COMPLETED"
2585
2586 if db_nsr:
2587 self._write_ns_status(
2588 nsr_id=nsr_id,
2589 ns_state=ns_state,
2590 current_operation="IDLE",
2591 current_operation_id=None,
2592 error_description=error_description_nsr,
2593 error_detail=error_detail,
2594 other_update=db_nsr_update,
2595 )
2596 self._write_op_status(
2597 op_id=nslcmop_id,
2598 stage="",
2599 error_message=error_description_nslcmop,
2600 operation_state=nslcmop_operation_state,
2601 other_update=db_nslcmop_update,
2602 )
2603
2604 if nslcmop_operation_state:
2605 try:
2606 await self.msg.aiowrite(
2607 "ns",
2608 "instantiated",
2609 {
2610 "nsr_id": nsr_id,
2611 "nslcmop_id": nslcmop_id,
2612 "operationState": nslcmop_operation_state,
2613 },
2614 loop=self.loop,
2615 )
2616 except Exception as e:
2617 self.logger.error(
2618 logging_text + "kafka_write notification Exception {}".format(e)
2619 )
2620
2621 self.logger.debug(logging_text + "Exit")
2622 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2623
2624 async def _add_vca_relations(
2625 self,
2626 logging_text,
2627 nsr_id,
2628 vca_index: int,
2629 timeout: int = 3600,
2630 vca_type: str = None,
2631 vca_id: str = None,
2632 ) -> bool:
2633
2634 # steps:
2635 # 1. find all relations for this VCA
2636 # 2. wait for other peers related
2637 # 3. add relations
2638
2639 try:
2640 vca_type = vca_type or "lxc_proxy_charm"
2641
2642 # STEP 1: find all relations for this VCA
2643
2644 # read nsr record
2645 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2646 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2647
2648 # this VCA data
2649 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2650
2651 # read all ns-configuration relations
2652 ns_relations = list()
2653 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2654 if db_ns_relations:
2655 for r in db_ns_relations:
2656 # check if this VCA is in the relation
2657 if my_vca.get("member-vnf-index") in (
2658 r.get("entities")[0].get("id"),
2659 r.get("entities")[1].get("id"),
2660 ):
2661 ns_relations.append(r)
2662
2663 # read all vnf-configuration relations
2664 vnf_relations = list()
2665 db_vnfd_list = db_nsr.get("vnfd-id")
2666 if db_vnfd_list:
2667 for vnfd in db_vnfd_list:
2668 db_vnf_relations = None
2669 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2670 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2671 if db_vnf_configuration:
2672 db_vnf_relations = db_vnf_configuration.get("relation", [])
2673 if db_vnf_relations:
2674 for r in db_vnf_relations:
2675 # check if this VCA is in the relation
2676 if my_vca.get("vdu_id") in (
2677 r.get("entities")[0].get("id"),
2678 r.get("entities")[1].get("id"),
2679 ):
2680 vnf_relations.append(r)
2681
2682 # if no relations, terminate
2683 if not ns_relations and not vnf_relations:
2684 self.logger.debug(logging_text + " No relations")
2685 return True
2686
2687 self.logger.debug(
2688 logging_text
2689 + " adding relations\n {}\n {}".format(
2690 ns_relations, vnf_relations
2691 )
2692 )
2693
2694 # add all relations
2695 start = time()
2696 while True:
2697 # check timeout
2698 now = time()
2699 if now - start >= timeout:
2700 self.logger.error(logging_text + " : timeout adding relations")
2701 return False
2702
2703 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2704 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2705
2706 # for each defined NS relation, find the VCA's related
2707 for r in ns_relations.copy():
2708 from_vca_ee_id = None
2709 to_vca_ee_id = None
2710 from_vca_endpoint = None
2711 to_vca_endpoint = None
2712 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2713 for vca in vca_list:
2714 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2715 "id"
2716 ) and vca.get("config_sw_installed"):
2717 from_vca_ee_id = vca.get("ee_id")
2718 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2719 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2720 "id"
2721 ) and vca.get("config_sw_installed"):
2722 to_vca_ee_id = vca.get("ee_id")
2723 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2724 if from_vca_ee_id and to_vca_ee_id:
2725 # add relation
2726 await self.vca_map[vca_type].add_relation(
2727 ee_id_1=from_vca_ee_id,
2728 ee_id_2=to_vca_ee_id,
2729 endpoint_1=from_vca_endpoint,
2730 endpoint_2=to_vca_endpoint,
2731 vca_id=vca_id,
2732 )
2733 # remove entry from relations list
2734 ns_relations.remove(r)
2735 else:
2736 # check failed peers
2737 try:
2738 vca_status_list = db_nsr.get("configurationStatus")
2739 if vca_status_list:
2740 for i in range(len(vca_list)):
2741 vca = vca_list[i]
2742 vca_status = vca_status_list[i]
2743 if vca.get("member-vnf-index") == r.get("entities")[
2744 0
2745 ].get("id"):
2746 if vca_status.get("status") == "BROKEN":
2747 # peer broken: remove relation from list
2748 ns_relations.remove(r)
2749 if vca.get("member-vnf-index") == r.get("entities")[
2750 1
2751 ].get("id"):
2752 if vca_status.get("status") == "BROKEN":
2753 # peer broken: remove relation from list
2754 ns_relations.remove(r)
2755 except Exception:
2756 # ignore
2757 pass
2758
2759 # for each defined VNF relation, find the VCA's related
2760 for r in vnf_relations.copy():
2761 from_vca_ee_id = None
2762 to_vca_ee_id = None
2763 from_vca_endpoint = None
2764 to_vca_endpoint = None
2765 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2766 for vca in vca_list:
2767 key_to_check = "vdu_id"
2768 if vca.get("vdu_id") is None:
2769 key_to_check = "vnfd_id"
2770 if vca.get(key_to_check) == r.get("entities")[0].get(
2771 "id"
2772 ) and vca.get("config_sw_installed"):
2773 from_vca_ee_id = vca.get("ee_id")
2774 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2775 if vca.get(key_to_check) == r.get("entities")[1].get(
2776 "id"
2777 ) and vca.get("config_sw_installed"):
2778 to_vca_ee_id = vca.get("ee_id")
2779 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2780 if from_vca_ee_id and to_vca_ee_id:
2781 # add relation
2782 await self.vca_map[vca_type].add_relation(
2783 ee_id_1=from_vca_ee_id,
2784 ee_id_2=to_vca_ee_id,
2785 endpoint_1=from_vca_endpoint,
2786 endpoint_2=to_vca_endpoint,
2787 vca_id=vca_id,
2788 )
2789 # remove entry from relations list
2790 vnf_relations.remove(r)
2791 else:
2792 # check failed peers
2793 try:
2794 vca_status_list = db_nsr.get("configurationStatus")
2795 if vca_status_list:
2796 for i in range(len(vca_list)):
2797 vca = vca_list[i]
2798 vca_status = vca_status_list[i]
2799 if vca.get("vdu_id") == r.get("entities")[0].get(
2800 "id"
2801 ):
2802 if vca_status.get("status") == "BROKEN":
2803 # peer broken: remove relation from list
2804 vnf_relations.remove(r)
2805 if vca.get("vdu_id") == r.get("entities")[1].get(
2806 "id"
2807 ):
2808 if vca_status.get("status") == "BROKEN":
2809 # peer broken: remove relation from list
2810 vnf_relations.remove(r)
2811 except Exception:
2812 # ignore
2813 pass
2814
2815 # wait for next try
2816 await asyncio.sleep(5.0)
2817
2818 if not ns_relations and not vnf_relations:
2819 self.logger.debug("Relations added")
2820 break
2821
2822 return True
2823
2824 except Exception as e:
2825 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2826 return False
2827
2828 async def _install_kdu(
2829 self,
2830 nsr_id: str,
2831 nsr_db_path: str,
2832 vnfr_data: dict,
2833 kdu_index: int,
2834 kdud: dict,
2835 vnfd: dict,
2836 k8s_instance_info: dict,
2837 k8params: dict = None,
2838 timeout: int = 600,
2839 vca_id: str = None,
2840 ):
2841
2842 try:
2843 k8sclustertype = k8s_instance_info["k8scluster-type"]
2844 # Instantiate kdu
2845 db_dict_install = {
2846 "collection": "nsrs",
2847 "filter": {"_id": nsr_id},
2848 "path": nsr_db_path,
2849 }
2850
2851 if k8s_instance_info.get("kdu-deployment-name"):
2852 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
2853 else:
2854 kdu_instance = self.k8scluster_map[
2855 k8sclustertype
2856 ].generate_kdu_instance_name(
2857 db_dict=db_dict_install,
2858 kdu_model=k8s_instance_info["kdu-model"],
2859 kdu_name=k8s_instance_info["kdu-name"],
2860 )
2861 self.update_db_2(
2862 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2863 )
2864 await self.k8scluster_map[k8sclustertype].install(
2865 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2866 kdu_model=k8s_instance_info["kdu-model"],
2867 atomic=True,
2868 params=k8params,
2869 db_dict=db_dict_install,
2870 timeout=timeout,
2871 kdu_name=k8s_instance_info["kdu-name"],
2872 namespace=k8s_instance_info["namespace"],
2873 kdu_instance=kdu_instance,
2874 vca_id=vca_id,
2875 )
2876 self.update_db_2(
2877 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2878 )
2879
2880 # Obtain services to obtain management service ip
2881 services = await self.k8scluster_map[k8sclustertype].get_services(
2882 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2883 kdu_instance=kdu_instance,
2884 namespace=k8s_instance_info["namespace"],
2885 )
2886
2887 # Obtain management service info (if exists)
2888 vnfr_update_dict = {}
2889 kdu_config = get_configuration(vnfd, kdud["name"])
2890 if kdu_config:
2891 target_ee_list = kdu_config.get("execution-environment-list", [])
2892 else:
2893 target_ee_list = []
2894
2895 if services:
2896 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2897 mgmt_services = [
2898 service
2899 for service in kdud.get("service", [])
2900 if service.get("mgmt-service")
2901 ]
2902 for mgmt_service in mgmt_services:
2903 for service in services:
2904 if service["name"].startswith(mgmt_service["name"]):
2905 # Mgmt service found, Obtain service ip
2906 ip = service.get("external_ip", service.get("cluster_ip"))
2907 if isinstance(ip, list) and len(ip) == 1:
2908 ip = ip[0]
2909
2910 vnfr_update_dict[
2911 "kdur.{}.ip-address".format(kdu_index)
2912 ] = ip
2913
2914 # Check if must update also mgmt ip at the vnf
2915 service_external_cp = mgmt_service.get(
2916 "external-connection-point-ref"
2917 )
2918 if service_external_cp:
2919 if (
2920 deep_get(vnfd, ("mgmt-interface", "cp"))
2921 == service_external_cp
2922 ):
2923 vnfr_update_dict["ip-address"] = ip
2924
2925 if find_in_list(
2926 target_ee_list,
2927 lambda ee: ee.get(
2928 "external-connection-point-ref", ""
2929 )
2930 == service_external_cp,
2931 ):
2932 vnfr_update_dict[
2933 "kdur.{}.ip-address".format(kdu_index)
2934 ] = ip
2935 break
2936 else:
2937 self.logger.warn(
2938 "Mgmt service name: {} not found".format(
2939 mgmt_service["name"]
2940 )
2941 )
2942
2943 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2944 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2945
2946 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2947 if (
2948 kdu_config
2949 and kdu_config.get("initial-config-primitive")
2950 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2951 ):
2952 initial_config_primitive_list = kdu_config.get(
2953 "initial-config-primitive"
2954 )
2955 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2956
2957 for initial_config_primitive in initial_config_primitive_list:
2958 primitive_params_ = self._map_primitive_params(
2959 initial_config_primitive, {}, {}
2960 )
2961
2962 await asyncio.wait_for(
2963 self.k8scluster_map[k8sclustertype].exec_primitive(
2964 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2965 kdu_instance=kdu_instance,
2966 primitive_name=initial_config_primitive["name"],
2967 params=primitive_params_,
2968 db_dict=db_dict_install,
2969 vca_id=vca_id,
2970 ),
2971 timeout=timeout,
2972 )
2973
2974 except Exception as e:
2975 # Prepare update db with error and raise exception
2976 try:
2977 self.update_db_2(
2978 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2979 )
2980 self.update_db_2(
2981 "vnfrs",
2982 vnfr_data.get("_id"),
2983 {"kdur.{}.status".format(kdu_index): "ERROR"},
2984 )
2985 except Exception:
2986 # ignore to keep original exception
2987 pass
2988 # reraise original error
2989 raise
2990
2991 return kdu_instance
2992
2993 async def deploy_kdus(
2994 self,
2995 logging_text,
2996 nsr_id,
2997 nslcmop_id,
2998 db_vnfrs,
2999 db_vnfds,
3000 task_instantiation_info,
3001 ):
3002 # Launch kdus if present in the descriptor
3003
3004 k8scluster_id_2_uuic = {
3005 "helm-chart-v3": {},
3006 "helm-chart": {},
3007 "juju-bundle": {},
3008 }
3009
3010 async def _get_cluster_id(cluster_id, cluster_type):
3011 nonlocal k8scluster_id_2_uuic
3012 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3013 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3014
3015 # check if K8scluster is creating and wait look if previous tasks in process
3016 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3017 "k8scluster", cluster_id
3018 )
3019 if task_dependency:
3020 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3021 task_name, cluster_id
3022 )
3023 self.logger.debug(logging_text + text)
3024 await asyncio.wait(task_dependency, timeout=3600)
3025
3026 db_k8scluster = self.db.get_one(
3027 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3028 )
3029 if not db_k8scluster:
3030 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3031
3032 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3033 if not k8s_id:
3034 if cluster_type == "helm-chart-v3":
3035 try:
3036 # backward compatibility for existing clusters that have not been initialized for helm v3
3037 k8s_credentials = yaml.safe_dump(
3038 db_k8scluster.get("credentials")
3039 )
3040 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3041 k8s_credentials, reuse_cluster_uuid=cluster_id
3042 )
3043 db_k8scluster_update = {}
3044 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3045 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3046 db_k8scluster_update[
3047 "_admin.helm-chart-v3.created"
3048 ] = uninstall_sw
3049 db_k8scluster_update[
3050 "_admin.helm-chart-v3.operationalState"
3051 ] = "ENABLED"
3052 self.update_db_2(
3053 "k8sclusters", cluster_id, db_k8scluster_update
3054 )
3055 except Exception as e:
3056 self.logger.error(
3057 logging_text
3058 + "error initializing helm-v3 cluster: {}".format(str(e))
3059 )
3060 raise LcmException(
3061 "K8s cluster '{}' has not been initialized for '{}'".format(
3062 cluster_id, cluster_type
3063 )
3064 )
3065 else:
3066 raise LcmException(
3067 "K8s cluster '{}' has not been initialized for '{}'".format(
3068 cluster_id, cluster_type
3069 )
3070 )
3071 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3072 return k8s_id
3073
3074 logging_text += "Deploy kdus: "
3075 step = ""
3076 try:
3077 db_nsr_update = {"_admin.deployed.K8s": []}
3078 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3079
3080 index = 0
3081 updated_cluster_list = []
3082 updated_v3_cluster_list = []
3083
3084 for vnfr_data in db_vnfrs.values():
3085 vca_id = self.get_vca_id(vnfr_data, {})
3086 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3087 # Step 0: Prepare and set parameters
3088 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3089 vnfd_id = vnfr_data.get("vnfd-id")
3090 vnfd_with_id = find_in_list(
3091 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3092 )
3093 kdud = next(
3094 kdud
3095 for kdud in vnfd_with_id["kdu"]
3096 if kdud["name"] == kdur["kdu-name"]
3097 )
3098 namespace = kdur.get("k8s-namespace")
3099 kdu_deployment_name = kdur.get("kdu-deployment-name")
3100 if kdur.get("helm-chart"):
3101 kdumodel = kdur["helm-chart"]
3102 # Default version: helm3, if helm-version is v2 assign v2
3103 k8sclustertype = "helm-chart-v3"
3104 self.logger.debug("kdur: {}".format(kdur))
3105 if (
3106 kdur.get("helm-version")
3107 and kdur.get("helm-version") == "v2"
3108 ):
3109 k8sclustertype = "helm-chart"
3110 elif kdur.get("juju-bundle"):
3111 kdumodel = kdur["juju-bundle"]
3112 k8sclustertype = "juju-bundle"
3113 else:
3114 raise LcmException(
3115 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3116 "juju-bundle. Maybe an old NBI version is running".format(
3117 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3118 )
3119 )
3120 # check if kdumodel is a file and exists
3121 try:
3122 vnfd_with_id = find_in_list(
3123 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3124 )
3125 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3126 if storage and storage.get(
3127 "pkg-dir"
3128 ): # may be not present if vnfd has not artifacts
3129 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3130 filename = "{}/{}/{}s/{}".format(
3131 storage["folder"],
3132 storage["pkg-dir"],
3133 k8sclustertype,
3134 kdumodel,
3135 )
3136 if self.fs.file_exists(
3137 filename, mode="file"
3138 ) or self.fs.file_exists(filename, mode="dir"):
3139 kdumodel = self.fs.path + filename
3140 except (asyncio.TimeoutError, asyncio.CancelledError):
3141 raise
3142 except Exception: # it is not a file
3143 pass
3144
3145 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3146 step = "Synchronize repos for k8s cluster '{}'".format(
3147 k8s_cluster_id
3148 )
3149 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3150
3151 # Synchronize repos
3152 if (
3153 k8sclustertype == "helm-chart"
3154 and cluster_uuid not in updated_cluster_list
3155 ) or (
3156 k8sclustertype == "helm-chart-v3"
3157 and cluster_uuid not in updated_v3_cluster_list
3158 ):
3159 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3160 self.k8scluster_map[k8sclustertype].synchronize_repos(
3161 cluster_uuid=cluster_uuid
3162 )
3163 )
3164 if del_repo_list or added_repo_dict:
3165 if k8sclustertype == "helm-chart":
3166 unset = {
3167 "_admin.helm_charts_added." + item: None
3168 for item in del_repo_list
3169 }
3170 updated = {
3171 "_admin.helm_charts_added." + item: name
3172 for item, name in added_repo_dict.items()
3173 }
3174 updated_cluster_list.append(cluster_uuid)
3175 elif k8sclustertype == "helm-chart-v3":
3176 unset = {
3177 "_admin.helm_charts_v3_added." + item: None
3178 for item in del_repo_list
3179 }
3180 updated = {
3181 "_admin.helm_charts_v3_added." + item: name
3182 for item, name in added_repo_dict.items()
3183 }
3184 updated_v3_cluster_list.append(cluster_uuid)
3185 self.logger.debug(
3186 logging_text + "repos synchronized on k8s cluster "
3187 "'{}' to_delete: {}, to_add: {}".format(
3188 k8s_cluster_id, del_repo_list, added_repo_dict
3189 )
3190 )
3191 self.db.set_one(
3192 "k8sclusters",
3193 {"_id": k8s_cluster_id},
3194 updated,
3195 unset=unset,
3196 )
3197
3198 # Instantiate kdu
3199 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3200 vnfr_data["member-vnf-index-ref"],
3201 kdur["kdu-name"],
3202 k8s_cluster_id,
3203 )
3204 k8s_instance_info = {
3205 "kdu-instance": None,
3206 "k8scluster-uuid": cluster_uuid,
3207 "k8scluster-type": k8sclustertype,
3208 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3209 "kdu-name": kdur["kdu-name"],
3210 "kdu-model": kdumodel,
3211 "namespace": namespace,
3212 "kdu-deployment-name": kdu_deployment_name,
3213 }
3214 db_path = "_admin.deployed.K8s.{}".format(index)
3215 db_nsr_update[db_path] = k8s_instance_info
3216 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3217 vnfd_with_id = find_in_list(
3218 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3219 )
3220 task = asyncio.ensure_future(
3221 self._install_kdu(
3222 nsr_id,
3223 db_path,
3224 vnfr_data,
3225 kdu_index,
3226 kdud,
3227 vnfd_with_id,
3228 k8s_instance_info,
3229 k8params=desc_params,
3230 timeout=600,
3231 vca_id=vca_id,
3232 )
3233 )
3234 self.lcm_tasks.register(
3235 "ns",
3236 nsr_id,
3237 nslcmop_id,
3238 "instantiate_KDU-{}".format(index),
3239 task,
3240 )
3241 task_instantiation_info[task] = "Deploying KDU {}".format(
3242 kdur["kdu-name"]
3243 )
3244
3245 index += 1
3246
3247 except (LcmException, asyncio.CancelledError):
3248 raise
3249 except Exception as e:
3250 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3251 if isinstance(e, (N2VCException, DbException)):
3252 self.logger.error(logging_text + msg)
3253 else:
3254 self.logger.critical(logging_text + msg, exc_info=True)
3255 raise LcmException(msg)
3256 finally:
3257 if db_nsr_update:
3258 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3259
3260 def _deploy_n2vc(
3261 self,
3262 logging_text,
3263 db_nsr,
3264 db_vnfr,
3265 nslcmop_id,
3266 nsr_id,
3267 nsi_id,
3268 vnfd_id,
3269 vdu_id,
3270 kdu_name,
3271 member_vnf_index,
3272 vdu_index,
3273 vdu_name,
3274 deploy_params,
3275 descriptor_config,
3276 base_folder,
3277 task_instantiation_info,
3278 stage,
3279 ):
3280 # launch instantiate_N2VC in a asyncio task and register task object
3281 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3282 # if not found, create one entry and update database
3283 # fill db_nsr._admin.deployed.VCA.<index>
3284
3285 self.logger.debug(
3286 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3287 )
3288 if "execution-environment-list" in descriptor_config:
3289 ee_list = descriptor_config.get("execution-environment-list", [])
3290 else: # other types as script are not supported
3291 ee_list = []
3292
3293 for ee_item in ee_list:
3294 self.logger.debug(
3295 logging_text
3296 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3297 ee_item.get("juju"), ee_item.get("helm-chart")
3298 )
3299 )
3300 ee_descriptor_id = ee_item.get("id")
3301 if ee_item.get("juju"):
3302 vca_name = ee_item["juju"].get("charm")
3303 vca_type = (
3304 "lxc_proxy_charm"
3305 if ee_item["juju"].get("charm") is not None
3306 else "native_charm"
3307 )
3308 if ee_item["juju"].get("cloud") == "k8s":
3309 vca_type = "k8s_proxy_charm"
3310 elif ee_item["juju"].get("proxy") is False:
3311 vca_type = "native_charm"
3312 elif ee_item.get("helm-chart"):
3313 vca_name = ee_item["helm-chart"]
3314 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3315 vca_type = "helm"
3316 else:
3317 vca_type = "helm-v3"
3318 else:
3319 self.logger.debug(
3320 logging_text + "skipping non juju neither charm configuration"
3321 )
3322 continue
3323
3324 vca_index = -1
3325 for vca_index, vca_deployed in enumerate(
3326 db_nsr["_admin"]["deployed"]["VCA"]
3327 ):
3328 if not vca_deployed:
3329 continue
3330 if (
3331 vca_deployed.get("member-vnf-index") == member_vnf_index
3332 and vca_deployed.get("vdu_id") == vdu_id
3333 and vca_deployed.get("kdu_name") == kdu_name
3334 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3335 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3336 ):
3337 break
3338 else:
3339 # not found, create one.
3340 target = (
3341 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3342 )
3343 if vdu_id:
3344 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3345 elif kdu_name:
3346 target += "/kdu/{}".format(kdu_name)
3347 vca_deployed = {
3348 "target_element": target,
3349 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3350 "member-vnf-index": member_vnf_index,
3351 "vdu_id": vdu_id,
3352 "kdu_name": kdu_name,
3353 "vdu_count_index": vdu_index,
3354 "operational-status": "init", # TODO revise
3355 "detailed-status": "", # TODO revise
3356 "step": "initial-deploy", # TODO revise
3357 "vnfd_id": vnfd_id,
3358 "vdu_name": vdu_name,
3359 "type": vca_type,
3360 "ee_descriptor_id": ee_descriptor_id,
3361 }
3362 vca_index += 1
3363
3364 # create VCA and configurationStatus in db
3365 db_dict = {
3366 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3367 "configurationStatus.{}".format(vca_index): dict(),
3368 }
3369 self.update_db_2("nsrs", nsr_id, db_dict)
3370
3371 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3372
3373 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3374 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3375 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3376
3377 # Launch task
3378 task_n2vc = asyncio.ensure_future(
3379 self.instantiate_N2VC(
3380 logging_text=logging_text,
3381 vca_index=vca_index,
3382 nsi_id=nsi_id,
3383 db_nsr=db_nsr,
3384 db_vnfr=db_vnfr,
3385 vdu_id=vdu_id,
3386 kdu_name=kdu_name,
3387 vdu_index=vdu_index,
3388 deploy_params=deploy_params,
3389 config_descriptor=descriptor_config,
3390 base_folder=base_folder,
3391 nslcmop_id=nslcmop_id,
3392 stage=stage,
3393 vca_type=vca_type,
3394 vca_name=vca_name,
3395 ee_config_descriptor=ee_item,
3396 )
3397 )
3398 self.lcm_tasks.register(
3399 "ns",
3400 nsr_id,
3401 nslcmop_id,
3402 "instantiate_N2VC-{}".format(vca_index),
3403 task_n2vc,
3404 )
3405 task_instantiation_info[
3406 task_n2vc
3407 ] = self.task_name_deploy_vca + " {}.{}".format(
3408 member_vnf_index or "", vdu_id or ""
3409 )
3410
3411 @staticmethod
3412 def _create_nslcmop(nsr_id, operation, params):
3413 """
3414 Creates a ns-lcm-opp content to be stored at database.
3415 :param nsr_id: internal id of the instance
3416 :param operation: instantiate, terminate, scale, action, ...
3417 :param params: user parameters for the operation
3418 :return: dictionary following SOL005 format
3419 """
3420 # Raise exception if invalid arguments
3421 if not (nsr_id and operation and params):
3422 raise LcmException(
3423 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3424 )
3425 now = time()
3426 _id = str(uuid4())
3427 nslcmop = {
3428 "id": _id,
3429 "_id": _id,
3430 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3431 "operationState": "PROCESSING",
3432 "statusEnteredTime": now,
3433 "nsInstanceId": nsr_id,
3434 "lcmOperationType": operation,
3435 "startTime": now,
3436 "isAutomaticInvocation": False,
3437 "operationParams": params,
3438 "isCancelPending": False,
3439 "links": {
3440 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3441 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3442 },
3443 }
3444 return nslcmop
3445
3446 def _format_additional_params(self, params):
3447 params = params or {}
3448 for key, value in params.items():
3449 if str(value).startswith("!!yaml "):
3450 params[key] = yaml.safe_load(value[7:])
3451 return params
3452
3453 def _get_terminate_primitive_params(self, seq, vnf_index):
3454 primitive = seq.get("name")
3455 primitive_params = {}
3456 params = {
3457 "member_vnf_index": vnf_index,
3458 "primitive": primitive,
3459 "primitive_params": primitive_params,
3460 }
3461 desc_params = {}
3462 return self._map_primitive_params(seq, params, desc_params)
3463
3464 # sub-operations
3465
3466 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3467 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3468 if op.get("operationState") == "COMPLETED":
3469 # b. Skip sub-operation
3470 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3471 return self.SUBOPERATION_STATUS_SKIP
3472 else:
3473 # c. retry executing sub-operation
3474 # The sub-operation exists, and operationState != 'COMPLETED'
3475 # Update operationState = 'PROCESSING' to indicate a retry.
3476 operationState = "PROCESSING"
3477 detailed_status = "In progress"
3478 self._update_suboperation_status(
3479 db_nslcmop, op_index, operationState, detailed_status
3480 )
3481 # Return the sub-operation index
3482 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3483 # with arguments extracted from the sub-operation
3484 return op_index
3485
3486 # Find a sub-operation where all keys in a matching dictionary must match
3487 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3488 def _find_suboperation(self, db_nslcmop, match):
3489 if db_nslcmop and match:
3490 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3491 for i, op in enumerate(op_list):
3492 if all(op.get(k) == match[k] for k in match):
3493 return i
3494 return self.SUBOPERATION_STATUS_NOT_FOUND
3495
3496 # Update status for a sub-operation given its index
3497 def _update_suboperation_status(
3498 self, db_nslcmop, op_index, operationState, detailed_status
3499 ):
3500 # Update DB for HA tasks
3501 q_filter = {"_id": db_nslcmop["_id"]}
3502 update_dict = {
3503 "_admin.operations.{}.operationState".format(op_index): operationState,
3504 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3505 }
3506 self.db.set_one(
3507 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3508 )
3509
3510 # Add sub-operation, return the index of the added sub-operation
3511 # Optionally, set operationState, detailed-status, and operationType
3512 # Status and type are currently set for 'scale' sub-operations:
3513 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3514 # 'detailed-status' : status message
3515 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3516 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3517 def _add_suboperation(
3518 self,
3519 db_nslcmop,
3520 vnf_index,
3521 vdu_id,
3522 vdu_count_index,
3523 vdu_name,
3524 primitive,
3525 mapped_primitive_params,
3526 operationState=None,
3527 detailed_status=None,
3528 operationType=None,
3529 RO_nsr_id=None,
3530 RO_scaling_info=None,
3531 ):
3532 if not db_nslcmop:
3533 return self.SUBOPERATION_STATUS_NOT_FOUND
3534 # Get the "_admin.operations" list, if it exists
3535 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3536 op_list = db_nslcmop_admin.get("operations")
3537 # Create or append to the "_admin.operations" list
3538 new_op = {
3539 "member_vnf_index": vnf_index,
3540 "vdu_id": vdu_id,
3541 "vdu_count_index": vdu_count_index,
3542 "primitive": primitive,
3543 "primitive_params": mapped_primitive_params,
3544 }
3545 if operationState:
3546 new_op["operationState"] = operationState
3547 if detailed_status:
3548 new_op["detailed-status"] = detailed_status
3549 if operationType:
3550 new_op["lcmOperationType"] = operationType
3551 if RO_nsr_id:
3552 new_op["RO_nsr_id"] = RO_nsr_id
3553 if RO_scaling_info:
3554 new_op["RO_scaling_info"] = RO_scaling_info
3555 if not op_list:
3556 # No existing operations, create key 'operations' with current operation as first list element
3557 db_nslcmop_admin.update({"operations": [new_op]})
3558 op_list = db_nslcmop_admin.get("operations")
3559 else:
3560 # Existing operations, append operation to list
3561 op_list.append(new_op)
3562
3563 db_nslcmop_update = {"_admin.operations": op_list}
3564 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3565 op_index = len(op_list) - 1
3566 return op_index
3567
3568 # Helper methods for scale() sub-operations
3569
3570 # pre-scale/post-scale:
3571 # Check for 3 different cases:
3572 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3573 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3574 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3575 def _check_or_add_scale_suboperation(
3576 self,
3577 db_nslcmop,
3578 vnf_index,
3579 vnf_config_primitive,
3580 primitive_params,
3581 operationType,
3582 RO_nsr_id=None,
3583 RO_scaling_info=None,
3584 ):
3585 # Find this sub-operation
3586 if RO_nsr_id and RO_scaling_info:
3587 operationType = "SCALE-RO"
3588 match = {
3589 "member_vnf_index": vnf_index,
3590 "RO_nsr_id": RO_nsr_id,
3591 "RO_scaling_info": RO_scaling_info,
3592 }
3593 else:
3594 match = {
3595 "member_vnf_index": vnf_index,
3596 "primitive": vnf_config_primitive,
3597 "primitive_params": primitive_params,
3598 "lcmOperationType": operationType,
3599 }
3600 op_index = self._find_suboperation(db_nslcmop, match)
3601 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3602 # a. New sub-operation
3603 # The sub-operation does not exist, add it.
3604 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3605 # The following parameters are set to None for all kind of scaling:
3606 vdu_id = None
3607 vdu_count_index = None
3608 vdu_name = None
3609 if RO_nsr_id and RO_scaling_info:
3610 vnf_config_primitive = None
3611 primitive_params = None
3612 else:
3613 RO_nsr_id = None
3614 RO_scaling_info = None
3615 # Initial status for sub-operation
3616 operationState = "PROCESSING"
3617 detailed_status = "In progress"
3618 # Add sub-operation for pre/post-scaling (zero or more operations)
3619 self._add_suboperation(
3620 db_nslcmop,
3621 vnf_index,
3622 vdu_id,
3623 vdu_count_index,
3624 vdu_name,
3625 vnf_config_primitive,
3626 primitive_params,
3627 operationState,
3628 detailed_status,
3629 operationType,
3630 RO_nsr_id,
3631 RO_scaling_info,
3632 )
3633 return self.SUBOPERATION_STATUS_NEW
3634 else:
3635 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3636 # or op_index (operationState != 'COMPLETED')
3637 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3638
3639 # Function to return execution_environment id
3640
3641 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3642 # TODO vdu_index_count
3643 for vca in vca_deployed_list:
3644 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3645 return vca["ee_id"]
3646
3647 async def destroy_N2VC(
3648 self,
3649 logging_text,
3650 db_nslcmop,
3651 vca_deployed,
3652 config_descriptor,
3653 vca_index,
3654 destroy_ee=True,
3655 exec_primitives=True,
3656 scaling_in=False,
3657 vca_id: str = None,
3658 ):
3659 """
3660 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3661 :param logging_text:
3662 :param db_nslcmop:
3663 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3664 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3665 :param vca_index: index in the database _admin.deployed.VCA
3666 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3667 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3668 not executed properly
3669 :param scaling_in: True destroys the application, False destroys the model
3670 :return: None or exception
3671 """
3672
3673 self.logger.debug(
3674 logging_text
3675 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3676 vca_index, vca_deployed, config_descriptor, destroy_ee
3677 )
3678 )
3679
3680 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3681
3682 # execute terminate_primitives
3683 if exec_primitives:
3684 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3685 config_descriptor.get("terminate-config-primitive"),
3686 vca_deployed.get("ee_descriptor_id"),
3687 )
3688 vdu_id = vca_deployed.get("vdu_id")
3689 vdu_count_index = vca_deployed.get("vdu_count_index")
3690 vdu_name = vca_deployed.get("vdu_name")
3691 vnf_index = vca_deployed.get("member-vnf-index")
3692 if terminate_primitives and vca_deployed.get("needed_terminate"):
3693 for seq in terminate_primitives:
3694 # For each sequence in list, get primitive and call _ns_execute_primitive()
3695 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3696 vnf_index, seq.get("name")
3697 )
3698 self.logger.debug(logging_text + step)
3699 # Create the primitive for each sequence, i.e. "primitive": "touch"
3700 primitive = seq.get("name")
3701 mapped_primitive_params = self._get_terminate_primitive_params(
3702 seq, vnf_index
3703 )
3704
3705 # Add sub-operation
3706 self._add_suboperation(
3707 db_nslcmop,
3708 vnf_index,
3709 vdu_id,
3710 vdu_count_index,
3711 vdu_name,
3712 primitive,
3713 mapped_primitive_params,
3714 )
3715 # Sub-operations: Call _ns_execute_primitive() instead of action()
3716 try:
3717 result, result_detail = await self._ns_execute_primitive(
3718 vca_deployed["ee_id"],
3719 primitive,
3720 mapped_primitive_params,
3721 vca_type=vca_type,
3722 vca_id=vca_id,
3723 )
3724 except LcmException:
3725 # this happens when VCA is not deployed. In this case it is not needed to terminate
3726 continue
3727 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3728 if result not in result_ok:
3729 raise LcmException(
3730 "terminate_primitive {} for vnf_member_index={} fails with "
3731 "error {}".format(seq.get("name"), vnf_index, result_detail)
3732 )
3733 # set that this VCA do not need terminated
3734 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3735 vca_index
3736 )
3737 self.update_db_2(
3738 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3739 )
3740
3741 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3742 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3743
3744 if destroy_ee:
3745 await self.vca_map[vca_type].delete_execution_environment(
3746 vca_deployed["ee_id"],
3747 scaling_in=scaling_in,
3748 vca_id=vca_id,
3749 )
3750
3751 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3752 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3753 namespace = "." + db_nsr["_id"]
3754 try:
3755 await self.n2vc.delete_namespace(
3756 namespace=namespace,
3757 total_timeout=self.timeout_charm_delete,
3758 vca_id=vca_id,
3759 )
3760 except N2VCNotFound: # already deleted. Skip
3761 pass
3762 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3763
3764 async def _terminate_RO(
3765 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3766 ):
3767 """
3768 Terminates a deployment from RO
3769 :param logging_text:
3770 :param nsr_deployed: db_nsr._admin.deployed
3771 :param nsr_id:
3772 :param nslcmop_id:
3773 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3774 this method will update only the index 2, but it will write on database the concatenated content of the list
3775 :return:
3776 """
3777 db_nsr_update = {}
3778 failed_detail = []
3779 ro_nsr_id = ro_delete_action = None
3780 if nsr_deployed and nsr_deployed.get("RO"):
3781 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3782 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3783 try:
3784 if ro_nsr_id:
3785 stage[2] = "Deleting ns from VIM."
3786 db_nsr_update["detailed-status"] = " ".join(stage)
3787 self._write_op_status(nslcmop_id, stage)
3788 self.logger.debug(logging_text + stage[2])
3789 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3790 self._write_op_status(nslcmop_id, stage)
3791 desc = await self.RO.delete("ns", ro_nsr_id)
3792 ro_delete_action = desc["action_id"]
3793 db_nsr_update[
3794 "_admin.deployed.RO.nsr_delete_action_id"
3795 ] = ro_delete_action
3796 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3797 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3798 if ro_delete_action:
3799 # wait until NS is deleted from VIM
3800 stage[2] = "Waiting ns deleted from VIM."
3801 detailed_status_old = None
3802 self.logger.debug(
3803 logging_text
3804 + stage[2]
3805 + " RO_id={} ro_delete_action={}".format(
3806 ro_nsr_id, ro_delete_action
3807 )
3808 )
3809 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3810 self._write_op_status(nslcmop_id, stage)
3811
3812 delete_timeout = 20 * 60 # 20 minutes
3813 while delete_timeout > 0:
3814 desc = await self.RO.show(
3815 "ns",
3816 item_id_name=ro_nsr_id,
3817 extra_item="action",
3818 extra_item_id=ro_delete_action,
3819 )
3820
3821 # deploymentStatus
3822 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3823
3824 ns_status, ns_status_info = self.RO.check_action_status(desc)
3825 if ns_status == "ERROR":
3826 raise ROclient.ROClientException(ns_status_info)
3827 elif ns_status == "BUILD":
3828 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3829 elif ns_status == "ACTIVE":
3830 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3831 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3832 break
3833 else:
3834 assert (
3835 False
3836 ), "ROclient.check_action_status returns unknown {}".format(
3837 ns_status
3838 )
3839 if stage[2] != detailed_status_old:
3840 detailed_status_old = stage[2]
3841 db_nsr_update["detailed-status"] = " ".join(stage)
3842 self._write_op_status(nslcmop_id, stage)
3843 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3844 await asyncio.sleep(5, loop=self.loop)
3845 delete_timeout -= 5
3846 else: # delete_timeout <= 0:
3847 raise ROclient.ROClientException(
3848 "Timeout waiting ns deleted from VIM"
3849 )
3850
3851 except Exception as e:
3852 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3853 if (
3854 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3855 ): # not found
3856 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3857 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3858 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3859 self.logger.debug(
3860 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3861 )
3862 elif (
3863 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3864 ): # conflict
3865 failed_detail.append("delete conflict: {}".format(e))
3866 self.logger.debug(
3867 logging_text
3868 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3869 )
3870 else:
3871 failed_detail.append("delete error: {}".format(e))
3872 self.logger.error(
3873 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3874 )
3875
3876 # Delete nsd
3877 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3878 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3879 try:
3880 stage[2] = "Deleting nsd from RO."
3881 db_nsr_update["detailed-status"] = " ".join(stage)
3882 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3883 self._write_op_status(nslcmop_id, stage)
3884 await self.RO.delete("nsd", ro_nsd_id)
3885 self.logger.debug(
3886 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3887 )
3888 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3889 except Exception as e:
3890 if (
3891 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3892 ): # not found
3893 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3894 self.logger.debug(
3895 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3896 )
3897 elif (
3898 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3899 ): # conflict
3900 failed_detail.append(
3901 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3902 )
3903 self.logger.debug(logging_text + failed_detail[-1])
3904 else:
3905 failed_detail.append(
3906 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3907 )
3908 self.logger.error(logging_text + failed_detail[-1])
3909
3910 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3911 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3912 if not vnf_deployed or not vnf_deployed["id"]:
3913 continue
3914 try:
3915 ro_vnfd_id = vnf_deployed["id"]
3916 stage[
3917 2
3918 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3919 vnf_deployed["member-vnf-index"], ro_vnfd_id
3920 )
3921 db_nsr_update["detailed-status"] = " ".join(stage)
3922 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3923 self._write_op_status(nslcmop_id, stage)
3924 await self.RO.delete("vnfd", ro_vnfd_id)
3925 self.logger.debug(
3926 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3927 )
3928 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3929 except Exception as e:
3930 if (
3931 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3932 ): # not found
3933 db_nsr_update[
3934 "_admin.deployed.RO.vnfd.{}.id".format(index)
3935 ] = None
3936 self.logger.debug(
3937 logging_text
3938 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3939 )
3940 elif (
3941 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3942 ): # conflict
3943 failed_detail.append(
3944 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3945 )
3946 self.logger.debug(logging_text + failed_detail[-1])
3947 else:
3948 failed_detail.append(
3949 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3950 )
3951 self.logger.error(logging_text + failed_detail[-1])
3952
3953 if failed_detail:
3954 stage[2] = "Error deleting from VIM"
3955 else:
3956 stage[2] = "Deleted from VIM"
3957 db_nsr_update["detailed-status"] = " ".join(stage)
3958 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3959 self._write_op_status(nslcmop_id, stage)
3960
3961 if failed_detail:
3962 raise LcmException("; ".join(failed_detail))
3963
3964 async def terminate(self, nsr_id, nslcmop_id):
3965 # Try to lock HA task here
3966 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3967 if not task_is_locked_by_me:
3968 return
3969
3970 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3971 self.logger.debug(logging_text + "Enter")
3972 timeout_ns_terminate = self.timeout_ns_terminate
3973 db_nsr = None
3974 db_nslcmop = None
3975 operation_params = None
3976 exc = None
3977 error_list = [] # annotates all failed error messages
3978 db_nslcmop_update = {}
3979 autoremove = False # autoremove after terminated
3980 tasks_dict_info = {}
3981 db_nsr_update = {}
3982 stage = [
3983 "Stage 1/3: Preparing task.",
3984 "Waiting for previous operations to terminate.",
3985 "",
3986 ]
3987 # ^ contains [stage, step, VIM-status]
3988 try:
3989 # wait for any previous tasks in process
3990 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3991
3992 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3993 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3994 operation_params = db_nslcmop.get("operationParams") or {}
3995 if operation_params.get("timeout_ns_terminate"):
3996 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3997 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3998 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3999
4000 db_nsr_update["operational-status"] = "terminating"
4001 db_nsr_update["config-status"] = "terminating"
4002 self._write_ns_status(
4003 nsr_id=nsr_id,
4004 ns_state="TERMINATING",
4005 current_operation="TERMINATING",
4006 current_operation_id=nslcmop_id,
4007 other_update=db_nsr_update,
4008 )
4009 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4010 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4011 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4012 return
4013
4014 stage[1] = "Getting vnf descriptors from db."
4015 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4016 db_vnfrs_dict = {
4017 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4018 }
4019 db_vnfds_from_id = {}
4020 db_vnfds_from_member_index = {}
4021 # Loop over VNFRs
4022 for vnfr in db_vnfrs_list:
4023 vnfd_id = vnfr["vnfd-id"]
4024 if vnfd_id not in db_vnfds_from_id:
4025 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4026 db_vnfds_from_id[vnfd_id] = vnfd
4027 db_vnfds_from_member_index[
4028 vnfr["member-vnf-index-ref"]
4029 ] = db_vnfds_from_id[vnfd_id]
4030
4031 # Destroy individual execution environments when there are terminating primitives.
4032 # Rest of EE will be deleted at once
4033 # TODO - check before calling _destroy_N2VC
4034 # if not operation_params.get("skip_terminate_primitives"):#
4035 # or not vca.get("needed_terminate"):
4036 stage[0] = "Stage 2/3 execute terminating primitives."
4037 self.logger.debug(logging_text + stage[0])
4038 stage[1] = "Looking execution environment that needs terminate."
4039 self.logger.debug(logging_text + stage[1])
4040
4041 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4042 config_descriptor = None
4043
4044 vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr)
4045 if not vca or not vca.get("ee_id"):
4046 continue
4047 if not vca.get("member-vnf-index"):
4048 # ns
4049 config_descriptor = db_nsr.get("ns-configuration")
4050 elif vca.get("vdu_id"):
4051 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4052 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4053 elif vca.get("kdu_name"):
4054 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4055 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4056 else:
4057 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4058 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4059 vca_type = vca.get("type")
4060 exec_terminate_primitives = not operation_params.get(
4061 "skip_terminate_primitives"
4062 ) and vca.get("needed_terminate")
4063 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4064 # pending native charms
4065 destroy_ee = (
4066 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4067 )
4068 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4069 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4070 task = asyncio.ensure_future(
4071 self.destroy_N2VC(
4072 logging_text,
4073 db_nslcmop,
4074 vca,
4075 config_descriptor,
4076 vca_index,
4077 destroy_ee,
4078 exec_terminate_primitives,
4079 vca_id=vca_id,
4080 )
4081 )
4082 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4083
4084 # wait for pending tasks of terminate primitives
4085 if tasks_dict_info:
4086 self.logger.debug(
4087 logging_text
4088 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4089 )
4090 error_list = await self._wait_for_tasks(
4091 logging_text,
4092 tasks_dict_info,
4093 min(self.timeout_charm_delete, timeout_ns_terminate),
4094 stage,
4095 nslcmop_id,
4096 )
4097 tasks_dict_info.clear()
4098 if error_list:
4099 return # raise LcmException("; ".join(error_list))
4100
4101 # remove All execution environments at once
4102 stage[0] = "Stage 3/3 delete all."
4103
4104 if nsr_deployed.get("VCA"):
4105 stage[1] = "Deleting all execution environments."
4106 self.logger.debug(logging_text + stage[1])
4107 vca_id = self.get_vca_id({}, db_nsr)
4108 task_delete_ee = asyncio.ensure_future(
4109 asyncio.wait_for(
4110 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4111 timeout=self.timeout_charm_delete,
4112 )
4113 )
4114 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4115 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4116
4117 # Delete from k8scluster
4118 stage[1] = "Deleting KDUs."
4119 self.logger.debug(logging_text + stage[1])
4120 # print(nsr_deployed)
4121 for kdu in get_iterable(nsr_deployed, "K8s"):
4122 if not kdu or not kdu.get("kdu-instance"):
4123 continue
4124 kdu_instance = kdu.get("kdu-instance")
4125 if kdu.get("k8scluster-type") in self.k8scluster_map:
4126 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4127 vca_id = self.get_vca_id({}, db_nsr)
4128 task_delete_kdu_instance = asyncio.ensure_future(
4129 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4130 cluster_uuid=kdu.get("k8scluster-uuid"),
4131 kdu_instance=kdu_instance,
4132 vca_id=vca_id,
4133 )
4134 )
4135 else:
4136 self.logger.error(
4137 logging_text
4138 + "Unknown k8s deployment type {}".format(
4139 kdu.get("k8scluster-type")
4140 )
4141 )
4142 continue
4143 tasks_dict_info[
4144 task_delete_kdu_instance
4145 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4146
4147 # remove from RO
4148 stage[1] = "Deleting ns from VIM."
4149 if self.ng_ro:
4150 task_delete_ro = asyncio.ensure_future(
4151 self._terminate_ng_ro(
4152 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4153 )
4154 )
4155 else:
4156 task_delete_ro = asyncio.ensure_future(
4157 self._terminate_RO(
4158 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4159 )
4160 )
4161 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4162
4163 # rest of staff will be done at finally
4164
4165 except (
4166 ROclient.ROClientException,
4167 DbException,
4168 LcmException,
4169 N2VCException,
4170 ) as e:
4171 self.logger.error(logging_text + "Exit Exception {}".format(e))
4172 exc = e
4173 except asyncio.CancelledError:
4174 self.logger.error(
4175 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4176 )
4177 exc = "Operation was cancelled"
4178 except Exception as e:
4179 exc = traceback.format_exc()
4180 self.logger.critical(
4181 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4182 exc_info=True,
4183 )
4184 finally:
4185 if exc:
4186 error_list.append(str(exc))
4187 try:
4188 # wait for pending tasks
4189 if tasks_dict_info:
4190 stage[1] = "Waiting for terminate pending tasks."
4191 self.logger.debug(logging_text + stage[1])
4192 error_list += await self._wait_for_tasks(
4193 logging_text,
4194 tasks_dict_info,
4195 timeout_ns_terminate,
4196 stage,
4197 nslcmop_id,
4198 )
4199 stage[1] = stage[2] = ""
4200 except asyncio.CancelledError:
4201 error_list.append("Cancelled")
4202 # TODO cancell all tasks
4203 except Exception as exc:
4204 error_list.append(str(exc))
4205 # update status at database
4206 if error_list:
4207 error_detail = "; ".join(error_list)
4208 # self.logger.error(logging_text + error_detail)
4209 error_description_nslcmop = "{} Detail: {}".format(
4210 stage[0], error_detail
4211 )
4212 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4213 nslcmop_id, stage[0]
4214 )
4215
4216 db_nsr_update["operational-status"] = "failed"
4217 db_nsr_update["detailed-status"] = (
4218 error_description_nsr + " Detail: " + error_detail
4219 )
4220 db_nslcmop_update["detailed-status"] = error_detail
4221 nslcmop_operation_state = "FAILED"
4222 ns_state = "BROKEN"
4223 else:
4224 error_detail = None
4225 error_description_nsr = error_description_nslcmop = None
4226 ns_state = "NOT_INSTANTIATED"
4227 db_nsr_update["operational-status"] = "terminated"
4228 db_nsr_update["detailed-status"] = "Done"
4229 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4230 db_nslcmop_update["detailed-status"] = "Done"
4231 nslcmop_operation_state = "COMPLETED"
4232
4233 if db_nsr:
4234 self._write_ns_status(
4235 nsr_id=nsr_id,
4236 ns_state=ns_state,
4237 current_operation="IDLE",
4238 current_operation_id=None,
4239 error_description=error_description_nsr,
4240 error_detail=error_detail,
4241 other_update=db_nsr_update,
4242 )
4243 self._write_op_status(
4244 op_id=nslcmop_id,
4245 stage="",
4246 error_message=error_description_nslcmop,
4247 operation_state=nslcmop_operation_state,
4248 other_update=db_nslcmop_update,
4249 )
4250 if ns_state == "NOT_INSTANTIATED":
4251 try:
4252 self.db.set_list(
4253 "vnfrs",
4254 {"nsr-id-ref": nsr_id},
4255 {"_admin.nsState": "NOT_INSTANTIATED"},
4256 )
4257 except DbException as e:
4258 self.logger.warn(
4259 logging_text
4260 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4261 nsr_id, e
4262 )
4263 )
4264 if operation_params:
4265 autoremove = operation_params.get("autoremove", False)
4266 if nslcmop_operation_state:
4267 try:
4268 await self.msg.aiowrite(
4269 "ns",
4270 "terminated",
4271 {
4272 "nsr_id": nsr_id,
4273 "nslcmop_id": nslcmop_id,
4274 "operationState": nslcmop_operation_state,
4275 "autoremove": autoremove,
4276 },
4277 loop=self.loop,
4278 )
4279 except Exception as e:
4280 self.logger.error(
4281 logging_text + "kafka_write notification Exception {}".format(e)
4282 )
4283
4284 self.logger.debug(logging_text + "Exit")
4285 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4286
4287 async def _wait_for_tasks(
4288 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4289 ):
4290 time_start = time()
4291 error_detail_list = []
4292 error_list = []
4293 pending_tasks = list(created_tasks_info.keys())
4294 num_tasks = len(pending_tasks)
4295 num_done = 0
4296 stage[1] = "{}/{}.".format(num_done, num_tasks)
4297 self._write_op_status(nslcmop_id, stage)
4298 while pending_tasks:
4299 new_error = None
4300 _timeout = timeout + time_start - time()
4301 done, pending_tasks = await asyncio.wait(
4302 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4303 )
4304 num_done += len(done)
4305 if not done: # Timeout
4306 for task in pending_tasks:
4307 new_error = created_tasks_info[task] + ": Timeout"
4308 error_detail_list.append(new_error)
4309 error_list.append(new_error)
4310 break
4311 for task in done:
4312 if task.cancelled():
4313 exc = "Cancelled"
4314 else:
4315 exc = task.exception()
4316 if exc:
4317 if isinstance(exc, asyncio.TimeoutError):
4318 exc = "Timeout"
4319 new_error = created_tasks_info[task] + ": {}".format(exc)
4320 error_list.append(created_tasks_info[task])
4321 error_detail_list.append(new_error)
4322 if isinstance(
4323 exc,
4324 (
4325 str,
4326 DbException,
4327 N2VCException,
4328 ROclient.ROClientException,
4329 LcmException,
4330 K8sException,
4331 NgRoException,
4332 ),
4333 ):
4334 self.logger.error(logging_text + new_error)
4335 else:
4336 exc_traceback = "".join(
4337 traceback.format_exception(None, exc, exc.__traceback__)
4338 )
4339 self.logger.error(
4340 logging_text
4341 + created_tasks_info[task]
4342 + " "
4343 + exc_traceback
4344 )
4345 else:
4346 self.logger.debug(
4347 logging_text + created_tasks_info[task] + ": Done"
4348 )
4349 stage[1] = "{}/{}.".format(num_done, num_tasks)
4350 if new_error:
4351 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4352 if nsr_id: # update also nsr
4353 self.update_db_2(
4354 "nsrs",
4355 nsr_id,
4356 {
4357 "errorDescription": "Error at: " + ", ".join(error_list),
4358 "errorDetail": ". ".join(error_detail_list),
4359 },
4360 )
4361 self._write_op_status(nslcmop_id, stage)
4362 return error_detail_list
4363
4364 @staticmethod
4365 def _map_primitive_params(primitive_desc, params, instantiation_params):
4366 """
4367 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4368 The default-value is used. If it is between < > it look for a value at instantiation_params
4369 :param primitive_desc: portion of VNFD/NSD that describes primitive
4370 :param params: Params provided by user
4371 :param instantiation_params: Instantiation params provided by user
4372 :return: a dictionary with the calculated params
4373 """
4374 calculated_params = {}
4375 for parameter in primitive_desc.get("parameter", ()):
4376 param_name = parameter["name"]
4377 if param_name in params:
4378 calculated_params[param_name] = params[param_name]
4379 elif "default-value" in parameter or "value" in parameter:
4380 if "value" in parameter:
4381 calculated_params[param_name] = parameter["value"]
4382 else:
4383 calculated_params[param_name] = parameter["default-value"]
4384 if (
4385 isinstance(calculated_params[param_name], str)
4386 and calculated_params[param_name].startswith("<")
4387 and calculated_params[param_name].endswith(">")
4388 ):
4389 if calculated_params[param_name][1:-1] in instantiation_params:
4390 calculated_params[param_name] = instantiation_params[
4391 calculated_params[param_name][1:-1]
4392 ]
4393 else:
4394 raise LcmException(
4395 "Parameter {} needed to execute primitive {} not provided".format(
4396 calculated_params[param_name], primitive_desc["name"]
4397 )
4398 )
4399 else:
4400 raise LcmException(
4401 "Parameter {} needed to execute primitive {} not provided".format(
4402 param_name, primitive_desc["name"]
4403 )
4404 )
4405
4406 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4407 calculated_params[param_name] = yaml.safe_dump(
4408 calculated_params[param_name], default_flow_style=True, width=256
4409 )
4410 elif isinstance(calculated_params[param_name], str) and calculated_params[
4411 param_name
4412 ].startswith("!!yaml "):
4413 calculated_params[param_name] = calculated_params[param_name][7:]
4414 if parameter.get("data-type") == "INTEGER":
4415 try:
4416 calculated_params[param_name] = int(calculated_params[param_name])
4417 except ValueError: # error converting string to int
4418 raise LcmException(
4419 "Parameter {} of primitive {} must be integer".format(
4420 param_name, primitive_desc["name"]
4421 )
4422 )
4423 elif parameter.get("data-type") == "BOOLEAN":
4424 calculated_params[param_name] = not (
4425 (str(calculated_params[param_name])).lower() == "false"
4426 )
4427
4428 # add always ns_config_info if primitive name is config
4429 if primitive_desc["name"] == "config":
4430 if "ns_config_info" in instantiation_params:
4431 calculated_params["ns_config_info"] = instantiation_params[
4432 "ns_config_info"
4433 ]
4434 return calculated_params
4435
4436 def _look_for_deployed_vca(
4437 self,
4438 deployed_vca,
4439 member_vnf_index,
4440 vdu_id,
4441 vdu_count_index,
4442 kdu_name=None,
4443 ee_descriptor_id=None,
4444 ):
4445 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4446 for vca in deployed_vca:
4447 if not vca:
4448 continue
4449 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4450 continue
4451 if (
4452 vdu_count_index is not None
4453 and vdu_count_index != vca["vdu_count_index"]
4454 ):
4455 continue
4456 if kdu_name and kdu_name != vca["kdu_name"]:
4457 continue
4458 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4459 continue
4460 break
4461 else:
4462 # vca_deployed not found
4463 raise LcmException(
4464 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4465 " is not deployed".format(
4466 member_vnf_index,
4467 vdu_id,
4468 vdu_count_index,
4469 kdu_name,
4470 ee_descriptor_id,
4471 )
4472 )
4473 # get ee_id
4474 ee_id = vca.get("ee_id")
4475 vca_type = vca.get(
4476 "type", "lxc_proxy_charm"
4477 ) # default value for backward compatibility - proxy charm
4478 if not ee_id:
4479 raise LcmException(
4480 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4481 "execution environment".format(
4482 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4483 )
4484 )
4485 return ee_id, vca_type
4486
4487 async def _ns_execute_primitive(
4488 self,
4489 ee_id,
4490 primitive,
4491 primitive_params,
4492 retries=0,
4493 retries_interval=30,
4494 timeout=None,
4495 vca_type=None,
4496 db_dict=None,
4497 vca_id: str = None,
4498 ) -> (str, str):
4499 try:
4500 if primitive == "config":
4501 primitive_params = {"params": primitive_params}
4502
4503 vca_type = vca_type or "lxc_proxy_charm"
4504
4505 while retries >= 0:
4506 try:
4507 output = await asyncio.wait_for(
4508 self.vca_map[vca_type].exec_primitive(
4509 ee_id=ee_id,
4510 primitive_name=primitive,
4511 params_dict=primitive_params,
4512 progress_timeout=self.timeout_progress_primitive,
4513 total_timeout=self.timeout_primitive,
4514 db_dict=db_dict,
4515 vca_id=vca_id,
4516 ),
4517 timeout=timeout or self.timeout_primitive,
4518 )
4519 # execution was OK
4520 break
4521 except asyncio.CancelledError:
4522 raise
4523 except Exception as e: # asyncio.TimeoutError
4524 if isinstance(e, asyncio.TimeoutError):
4525 e = "Timeout"
4526 retries -= 1
4527 if retries >= 0:
4528 self.logger.debug(
4529 "Error executing action {} on {} -> {}".format(
4530 primitive, ee_id, e
4531 )
4532 )
4533 # wait and retry
4534 await asyncio.sleep(retries_interval, loop=self.loop)
4535 else:
4536 return "FAILED", str(e)
4537
4538 return "COMPLETED", output
4539
4540 except (LcmException, asyncio.CancelledError):
4541 raise
4542 except Exception as e:
4543 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4544
4545 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4546 """
4547 Updating the vca_status with latest juju information in nsrs record
4548 :param: nsr_id: Id of the nsr
4549 :param: nslcmop_id: Id of the nslcmop
4550 :return: None
4551 """
4552
4553 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4554 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4555 vca_id = self.get_vca_id({}, db_nsr)
4556 if db_nsr["_admin"]["deployed"]["K8s"]:
4557 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4558 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4559 await self._on_update_k8s_db(
4560 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4561 )
4562 else:
4563 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4564 table, filter = "nsrs", {"_id": nsr_id}
4565 path = "_admin.deployed.VCA.{}.".format(vca_index)
4566 await self._on_update_n2vc_db(table, filter, path, {})
4567
4568 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4569 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4570
4571 async def action(self, nsr_id, nslcmop_id):
4572 # Try to lock HA task here
4573 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4574 if not task_is_locked_by_me:
4575 return
4576
4577 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4578 self.logger.debug(logging_text + "Enter")
4579 # get all needed from database
4580 db_nsr = None
4581 db_nslcmop = None
4582 db_nsr_update = {}
4583 db_nslcmop_update = {}
4584 nslcmop_operation_state = None
4585 error_description_nslcmop = None
4586 exc = None
4587 try:
4588 # wait for any previous tasks in process
4589 step = "Waiting for previous operations to terminate"
4590 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4591
4592 self._write_ns_status(
4593 nsr_id=nsr_id,
4594 ns_state=None,
4595 current_operation="RUNNING ACTION",
4596 current_operation_id=nslcmop_id,
4597 )
4598
4599 step = "Getting information from database"
4600 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4601 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4602
4603 nsr_deployed = db_nsr["_admin"].get("deployed")
4604 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4605 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4606 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4607 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4608 primitive = db_nslcmop["operationParams"]["primitive"]
4609 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4610 timeout_ns_action = db_nslcmop["operationParams"].get(
4611 "timeout_ns_action", self.timeout_primitive
4612 )
4613
4614 if vnf_index:
4615 step = "Getting vnfr from database"
4616 db_vnfr = self.db.get_one(
4617 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4618 )
4619 step = "Getting vnfd from database"
4620 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4621 else:
4622 step = "Getting nsd from database"
4623 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4624
4625 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4626 # for backward compatibility
4627 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4628 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4629 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4630 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4631
4632 # look for primitive
4633 config_primitive_desc = descriptor_configuration = None
4634 if vdu_id:
4635 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4636 elif kdu_name:
4637 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4638 elif vnf_index:
4639 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4640 else:
4641 descriptor_configuration = db_nsd.get("ns-configuration")
4642
4643 if descriptor_configuration and descriptor_configuration.get(
4644 "config-primitive"
4645 ):
4646 for config_primitive in descriptor_configuration["config-primitive"]:
4647 if config_primitive["name"] == primitive:
4648 config_primitive_desc = config_primitive
4649 break
4650
4651 if not config_primitive_desc:
4652 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4653 raise LcmException(
4654 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4655 primitive
4656 )
4657 )
4658 primitive_name = primitive
4659 ee_descriptor_id = None
4660 else:
4661 primitive_name = config_primitive_desc.get(
4662 "execution-environment-primitive", primitive
4663 )
4664 ee_descriptor_id = config_primitive_desc.get(
4665 "execution-environment-ref"
4666 )
4667
4668 if vnf_index:
4669 if vdu_id:
4670 vdur = next(
4671 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4672 )
4673 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4674 elif kdu_name:
4675 kdur = next(
4676 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4677 )
4678 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4679 else:
4680 desc_params = parse_yaml_strings(
4681 db_vnfr.get("additionalParamsForVnf")
4682 )
4683 else:
4684 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4685 if kdu_name and get_configuration(db_vnfd, kdu_name):
4686 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4687 actions = set()
4688 for primitive in kdu_configuration.get("initial-config-primitive", []):
4689 actions.add(primitive["name"])
4690 for primitive in kdu_configuration.get("config-primitive", []):
4691 actions.add(primitive["name"])
4692 kdu_action = True if primitive_name in actions else False
4693
4694 # TODO check if ns is in a proper status
4695 if kdu_name and (
4696 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4697 ):
4698 # kdur and desc_params already set from before
4699 if primitive_params:
4700 desc_params.update(primitive_params)
4701 # TODO Check if we will need something at vnf level
4702 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4703 if (
4704 kdu_name == kdu["kdu-name"]
4705 and kdu["member-vnf-index"] == vnf_index
4706 ):
4707 break
4708 else:
4709 raise LcmException(
4710 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4711 )
4712
4713 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4714 msg = "unknown k8scluster-type '{}'".format(
4715 kdu.get("k8scluster-type")
4716 )
4717 raise LcmException(msg)
4718
4719 db_dict = {
4720 "collection": "nsrs",
4721 "filter": {"_id": nsr_id},
4722 "path": "_admin.deployed.K8s.{}".format(index),
4723 }
4724 self.logger.debug(
4725 logging_text
4726 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4727 )
4728 step = "Executing kdu {}".format(primitive_name)
4729 if primitive_name == "upgrade":
4730 if desc_params.get("kdu_model"):
4731 kdu_model = desc_params.get("kdu_model")
4732 del desc_params["kdu_model"]
4733 else:
4734 kdu_model = kdu.get("kdu-model")
4735 parts = kdu_model.split(sep=":")
4736 if len(parts) == 2:
4737 kdu_model = parts[0]
4738
4739 detailed_status = await asyncio.wait_for(
4740 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4741 cluster_uuid=kdu.get("k8scluster-uuid"),
4742 kdu_instance=kdu.get("kdu-instance"),
4743 atomic=True,
4744 kdu_model=kdu_model,
4745 params=desc_params,
4746 db_dict=db_dict,
4747 timeout=timeout_ns_action,
4748 ),
4749 timeout=timeout_ns_action + 10,
4750 )
4751 self.logger.debug(
4752 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4753 )
4754 elif primitive_name == "rollback":
4755 detailed_status = await asyncio.wait_for(
4756 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4757 cluster_uuid=kdu.get("k8scluster-uuid"),
4758 kdu_instance=kdu.get("kdu-instance"),
4759 db_dict=db_dict,
4760 ),
4761 timeout=timeout_ns_action,
4762 )
4763 elif primitive_name == "status":
4764 detailed_status = await asyncio.wait_for(
4765 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4766 cluster_uuid=kdu.get("k8scluster-uuid"),
4767 kdu_instance=kdu.get("kdu-instance"),
4768 vca_id=vca_id,
4769 ),
4770 timeout=timeout_ns_action,
4771 )
4772 else:
4773 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4774 kdu["kdu-name"], nsr_id
4775 )
4776 params = self._map_primitive_params(
4777 config_primitive_desc, primitive_params, desc_params
4778 )
4779
4780 detailed_status = await asyncio.wait_for(
4781 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4782 cluster_uuid=kdu.get("k8scluster-uuid"),
4783 kdu_instance=kdu_instance,
4784 primitive_name=primitive_name,
4785 params=params,
4786 db_dict=db_dict,
4787 timeout=timeout_ns_action,
4788 vca_id=vca_id,
4789 ),
4790 timeout=timeout_ns_action,
4791 )
4792
4793 if detailed_status:
4794 nslcmop_operation_state = "COMPLETED"
4795 else:
4796 detailed_status = ""
4797 nslcmop_operation_state = "FAILED"
4798 else:
4799 ee_id, vca_type = self._look_for_deployed_vca(
4800 nsr_deployed["VCA"],
4801 member_vnf_index=vnf_index,
4802 vdu_id=vdu_id,
4803 vdu_count_index=vdu_count_index,
4804 ee_descriptor_id=ee_descriptor_id,
4805 )
4806 for vca_index, vca_deployed in enumerate(
4807 db_nsr["_admin"]["deployed"]["VCA"]
4808 ):
4809 if vca_deployed.get("member-vnf-index") == vnf_index:
4810 db_dict = {
4811 "collection": "nsrs",
4812 "filter": {"_id": nsr_id},
4813 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4814 }
4815 break
4816 (
4817 nslcmop_operation_state,
4818 detailed_status,
4819 ) = await self._ns_execute_primitive(
4820 ee_id,
4821 primitive=primitive_name,
4822 primitive_params=self._map_primitive_params(
4823 config_primitive_desc, primitive_params, desc_params
4824 ),
4825 timeout=timeout_ns_action,
4826 vca_type=vca_type,
4827 db_dict=db_dict,
4828 vca_id=vca_id,
4829 )
4830
4831 db_nslcmop_update["detailed-status"] = detailed_status
4832 error_description_nslcmop = (
4833 detailed_status if nslcmop_operation_state == "FAILED" else ""
4834 )
4835 self.logger.debug(
4836 logging_text
4837 + " task Done with result {} {}".format(
4838 nslcmop_operation_state, detailed_status
4839 )
4840 )
4841 return # database update is called inside finally
4842
4843 except (DbException, LcmException, N2VCException, K8sException) as e:
4844 self.logger.error(logging_text + "Exit Exception {}".format(e))
4845 exc = e
4846 except asyncio.CancelledError:
4847 self.logger.error(
4848 logging_text + "Cancelled Exception while '{}'".format(step)
4849 )
4850 exc = "Operation was cancelled"
4851 except asyncio.TimeoutError:
4852 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4853 exc = "Timeout"
4854 except Exception as e:
4855 exc = traceback.format_exc()
4856 self.logger.critical(
4857 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4858 exc_info=True,
4859 )
4860 finally:
4861 if exc:
4862 db_nslcmop_update[
4863 "detailed-status"
4864 ] = (
4865 detailed_status
4866 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4867 nslcmop_operation_state = "FAILED"
4868 if db_nsr:
4869 self._write_ns_status(
4870 nsr_id=nsr_id,
4871 ns_state=db_nsr[
4872 "nsState"
4873 ], # TODO check if degraded. For the moment use previous status
4874 current_operation="IDLE",
4875 current_operation_id=None,
4876 # error_description=error_description_nsr,
4877 # error_detail=error_detail,
4878 other_update=db_nsr_update,
4879 )
4880
4881 self._write_op_status(
4882 op_id=nslcmop_id,
4883 stage="",
4884 error_message=error_description_nslcmop,
4885 operation_state=nslcmop_operation_state,
4886 other_update=db_nslcmop_update,
4887 )
4888
4889 if nslcmop_operation_state:
4890 try:
4891 await self.msg.aiowrite(
4892 "ns",
4893 "actioned",
4894 {
4895 "nsr_id": nsr_id,
4896 "nslcmop_id": nslcmop_id,
4897 "operationState": nslcmop_operation_state,
4898 },
4899 loop=self.loop,
4900 )
4901 except Exception as e:
4902 self.logger.error(
4903 logging_text + "kafka_write notification Exception {}".format(e)
4904 )
4905 self.logger.debug(logging_text + "Exit")
4906 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4907 return nslcmop_operation_state, detailed_status
4908
4909 async def scale(self, nsr_id, nslcmop_id):
4910 # Try to lock HA task here
4911 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4912 if not task_is_locked_by_me:
4913 return
4914
4915 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4916 stage = ["", "", ""]
4917 tasks_dict_info = {}
4918 # ^ stage, step, VIM progress
4919 self.logger.debug(logging_text + "Enter")
4920 # get all needed from database
4921 db_nsr = None
4922 db_nslcmop_update = {}
4923 db_nsr_update = {}
4924 exc = None
4925 # in case of error, indicates what part of scale was failed to put nsr at error status
4926 scale_process = None
4927 old_operational_status = ""
4928 old_config_status = ""
4929 nsi_id = None
4930 try:
4931 # wait for any previous tasks in process
4932 step = "Waiting for previous operations to terminate"
4933 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4934 self._write_ns_status(
4935 nsr_id=nsr_id,
4936 ns_state=None,
4937 current_operation="SCALING",
4938 current_operation_id=nslcmop_id,
4939 )
4940
4941 step = "Getting nslcmop from database"
4942 self.logger.debug(
4943 step + " after having waited for previous tasks to be completed"
4944 )
4945 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4946
4947 step = "Getting nsr from database"
4948 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4949 old_operational_status = db_nsr["operational-status"]
4950 old_config_status = db_nsr["config-status"]
4951
4952 step = "Parsing scaling parameters"
4953 db_nsr_update["operational-status"] = "scaling"
4954 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4955 nsr_deployed = db_nsr["_admin"].get("deployed")
4956
4957 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4958 "scaleByStepData"
4959 ]["member-vnf-index"]
4960 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4961 "scaleByStepData"
4962 ]["scaling-group-descriptor"]
4963 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4964 # for backward compatibility
4965 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4966 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4967 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4968 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4969
4970 step = "Getting vnfr from database"
4971 db_vnfr = self.db.get_one(
4972 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4973 )
4974
4975 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4976
4977 step = "Getting vnfd from database"
4978 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4979
4980 base_folder = db_vnfd["_admin"]["storage"]
4981
4982 step = "Getting scaling-group-descriptor"
4983 scaling_descriptor = find_in_list(
4984 get_scaling_aspect(db_vnfd),
4985 lambda scale_desc: scale_desc["name"] == scaling_group,
4986 )
4987 if not scaling_descriptor:
4988 raise LcmException(
4989 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4990 "at vnfd:scaling-group-descriptor".format(scaling_group)
4991 )
4992
4993 step = "Sending scale order to VIM"
4994 # TODO check if ns is in a proper status
4995 nb_scale_op = 0
4996 if not db_nsr["_admin"].get("scaling-group"):
4997 self.update_db_2(
4998 "nsrs",
4999 nsr_id,
5000 {
5001 "_admin.scaling-group": [
5002 {"name": scaling_group, "nb-scale-op": 0}
5003 ]
5004 },
5005 )
5006 admin_scale_index = 0
5007 else:
5008 for admin_scale_index, admin_scale_info in enumerate(
5009 db_nsr["_admin"]["scaling-group"]
5010 ):
5011 if admin_scale_info["name"] == scaling_group:
5012 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5013 break
5014 else: # not found, set index one plus last element and add new entry with the name
5015 admin_scale_index += 1
5016 db_nsr_update[
5017 "_admin.scaling-group.{}.name".format(admin_scale_index)
5018 ] = scaling_group
5019
5020 vca_scaling_info = []
5021 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5022 if scaling_type == "SCALE_OUT":
5023 if "aspect-delta-details" not in scaling_descriptor:
5024 raise LcmException(
5025 "Aspect delta details not fount in scaling descriptor {}".format(
5026 scaling_descriptor["name"]
5027 )
5028 )
5029 # count if max-instance-count is reached
5030 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5031
5032 scaling_info["scaling_direction"] = "OUT"
5033 scaling_info["vdu-create"] = {}
5034 scaling_info["kdu-create"] = {}
5035 for delta in deltas:
5036 for vdu_delta in delta.get("vdu-delta", {}):
5037 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5038 # vdu_index also provides the number of instance of the targeted vdu
5039 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5040 cloud_init_text = self._get_vdu_cloud_init_content(
5041 vdud, db_vnfd
5042 )
5043 if cloud_init_text:
5044 additional_params = (
5045 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5046 or {}
5047 )
5048 cloud_init_list = []
5049
5050 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5051 max_instance_count = 10
5052 if vdu_profile and "max-number-of-instances" in vdu_profile:
5053 max_instance_count = vdu_profile.get(
5054 "max-number-of-instances", 10
5055 )
5056
5057 default_instance_num = get_number_of_instances(
5058 db_vnfd, vdud["id"]
5059 )
5060 instances_number = vdu_delta.get("number-of-instances", 1)
5061 nb_scale_op += instances_number
5062
5063 new_instance_count = nb_scale_op + default_instance_num
5064 # Control if new count is over max and vdu count is less than max.
5065 # Then assign new instance count
5066 if new_instance_count > max_instance_count > vdu_count:
5067 instances_number = new_instance_count - max_instance_count
5068 else:
5069 instances_number = instances_number
5070
5071 if new_instance_count > max_instance_count:
5072 raise LcmException(
5073 "reached the limit of {} (max-instance-count) "
5074 "scaling-out operations for the "
5075 "scaling-group-descriptor '{}'".format(
5076 nb_scale_op, scaling_group
5077 )
5078 )
5079 for x in range(vdu_delta.get("number-of-instances", 1)):
5080 if cloud_init_text:
5081 # TODO Information of its own ip is not available because db_vnfr is not updated.
5082 additional_params["OSM"] = get_osm_params(
5083 db_vnfr, vdu_delta["id"], vdu_index + x
5084 )
5085 cloud_init_list.append(
5086 self._parse_cloud_init(
5087 cloud_init_text,
5088 additional_params,
5089 db_vnfd["id"],
5090 vdud["id"],
5091 )
5092 )
5093 vca_scaling_info.append(
5094 {
5095 "osm_vdu_id": vdu_delta["id"],
5096 "member-vnf-index": vnf_index,
5097 "type": "create",
5098 "vdu_index": vdu_index + x,
5099 }
5100 )
5101 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5102 for kdu_delta in delta.get("kdu-resource-delta", {}):
5103 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5104 kdu_name = kdu_profile["kdu-name"]
5105 resource_name = kdu_profile["resource-name"]
5106
5107 # Might have different kdus in the same delta
5108 # Should have list for each kdu
5109 if not scaling_info["kdu-create"].get(kdu_name, None):
5110 scaling_info["kdu-create"][kdu_name] = []
5111
5112 kdur = get_kdur(db_vnfr, kdu_name)
5113 if kdur.get("helm-chart"):
5114 k8s_cluster_type = "helm-chart-v3"
5115 self.logger.debug("kdur: {}".format(kdur))
5116 if (
5117 kdur.get("helm-version")
5118 and kdur.get("helm-version") == "v2"
5119 ):
5120 k8s_cluster_type = "helm-chart"
5121 raise NotImplementedError
5122 elif kdur.get("juju-bundle"):
5123 k8s_cluster_type = "juju-bundle"
5124 else:
5125 raise LcmException(
5126 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5127 "juju-bundle. Maybe an old NBI version is running".format(
5128 db_vnfr["member-vnf-index-ref"], kdu_name
5129 )
5130 )
5131
5132 max_instance_count = 10
5133 if kdu_profile and "max-number-of-instances" in kdu_profile:
5134 max_instance_count = kdu_profile.get(
5135 "max-number-of-instances", 10
5136 )
5137
5138 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5139 deployed_kdu, _ = get_deployed_kdu(
5140 nsr_deployed, kdu_name, vnf_index
5141 )
5142 if deployed_kdu is None:
5143 raise LcmException(
5144 "KDU '{}' for vnf '{}' not deployed".format(
5145 kdu_name, vnf_index
5146 )
5147 )
5148 kdu_instance = deployed_kdu.get("kdu-instance")
5149 instance_num = await self.k8scluster_map[
5150 k8s_cluster_type
5151 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5152 kdu_replica_count = instance_num + kdu_delta.get(
5153 "number-of-instances", 1
5154 )
5155
5156 # Control if new count is over max and instance_num is less than max.
5157 # Then assign max instance number to kdu replica count
5158 if kdu_replica_count > max_instance_count > instance_num:
5159 kdu_replica_count = max_instance_count
5160 if kdu_replica_count > max_instance_count:
5161 raise LcmException(
5162 "reached the limit of {} (max-instance-count) "
5163 "scaling-out operations for the "
5164 "scaling-group-descriptor '{}'".format(
5165 instance_num, scaling_group
5166 )
5167 )
5168
5169 for x in range(kdu_delta.get("number-of-instances", 1)):
5170 vca_scaling_info.append(
5171 {
5172 "osm_kdu_id": kdu_name,
5173 "member-vnf-index": vnf_index,
5174 "type": "create",
5175 "kdu_index": instance_num + x - 1,
5176 }
5177 )
5178 scaling_info["kdu-create"][kdu_name].append(
5179 {
5180 "member-vnf-index": vnf_index,
5181 "type": "create",
5182 "k8s-cluster-type": k8s_cluster_type,
5183 "resource-name": resource_name,
5184 "scale": kdu_replica_count,
5185 }
5186 )
5187 elif scaling_type == "SCALE_IN":
5188 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5189
5190 scaling_info["scaling_direction"] = "IN"
5191 scaling_info["vdu-delete"] = {}
5192 scaling_info["kdu-delete"] = {}
5193
5194 for delta in deltas:
5195 for vdu_delta in delta.get("vdu-delta", {}):
5196 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5197 min_instance_count = 0
5198 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5199 if vdu_profile and "min-number-of-instances" in vdu_profile:
5200 min_instance_count = vdu_profile["min-number-of-instances"]
5201
5202 default_instance_num = get_number_of_instances(
5203 db_vnfd, vdu_delta["id"]
5204 )
5205 instance_num = vdu_delta.get("number-of-instances", 1)
5206 nb_scale_op -= instance_num
5207
5208 new_instance_count = nb_scale_op + default_instance_num
5209
5210 if new_instance_count < min_instance_count < vdu_count:
5211 instances_number = min_instance_count - new_instance_count
5212 else:
5213 instances_number = instance_num
5214
5215 if new_instance_count < min_instance_count:
5216 raise LcmException(
5217 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5218 "scaling-group-descriptor '{}'".format(
5219 nb_scale_op, scaling_group
5220 )
5221 )
5222 for x in range(vdu_delta.get("number-of-instances", 1)):
5223 vca_scaling_info.append(
5224 {
5225 "osm_vdu_id": vdu_delta["id"],
5226 "member-vnf-index": vnf_index,
5227 "type": "delete",
5228 "vdu_index": vdu_index - 1 - x,
5229 }
5230 )
5231 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5232 for kdu_delta in delta.get("kdu-resource-delta", {}):
5233 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5234 kdu_name = kdu_profile["kdu-name"]
5235 resource_name = kdu_profile["resource-name"]
5236
5237 if not scaling_info["kdu-delete"].get(kdu_name, None):
5238 scaling_info["kdu-delete"][kdu_name] = []
5239
5240 kdur = get_kdur(db_vnfr, kdu_name)
5241 if kdur.get("helm-chart"):
5242 k8s_cluster_type = "helm-chart-v3"
5243 self.logger.debug("kdur: {}".format(kdur))
5244 if (
5245 kdur.get("helm-version")
5246 and kdur.get("helm-version") == "v2"
5247 ):
5248 k8s_cluster_type = "helm-chart"
5249 raise NotImplementedError
5250 elif kdur.get("juju-bundle"):
5251 k8s_cluster_type = "juju-bundle"
5252 else:
5253 raise LcmException(
5254 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5255 "juju-bundle. Maybe an old NBI version is running".format(
5256 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5257 )
5258 )
5259
5260 min_instance_count = 0
5261 if kdu_profile and "min-number-of-instances" in kdu_profile:
5262 min_instance_count = kdu_profile["min-number-of-instances"]
5263
5264 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5265 deployed_kdu, _ = get_deployed_kdu(
5266 nsr_deployed, kdu_name, vnf_index
5267 )
5268 if deployed_kdu is None:
5269 raise LcmException(
5270 "KDU '{}' for vnf '{}' not deployed".format(
5271 kdu_name, vnf_index
5272 )
5273 )
5274 kdu_instance = deployed_kdu.get("kdu-instance")
5275 instance_num = await self.k8scluster_map[
5276 k8s_cluster_type
5277 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5278 kdu_replica_count = instance_num - kdu_delta.get(
5279 "number-of-instances", 1
5280 )
5281
5282 if kdu_replica_count < min_instance_count < instance_num:
5283 kdu_replica_count = min_instance_count
5284 if kdu_replica_count < min_instance_count:
5285 raise LcmException(
5286 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5287 "scaling-group-descriptor '{}'".format(
5288 instance_num, scaling_group
5289 )
5290 )
5291
5292 for x in range(kdu_delta.get("number-of-instances", 1)):
5293 vca_scaling_info.append(
5294 {
5295 "osm_kdu_id": kdu_name,
5296 "member-vnf-index": vnf_index,
5297 "type": "delete",
5298 "kdu_index": instance_num - x - 1,
5299 }
5300 )
5301 scaling_info["kdu-delete"][kdu_name].append(
5302 {
5303 "member-vnf-index": vnf_index,
5304 "type": "delete",
5305 "k8s-cluster-type": k8s_cluster_type,
5306 "resource-name": resource_name,
5307 "scale": kdu_replica_count,
5308 }
5309 )
5310
5311 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5312 vdu_delete = copy(scaling_info.get("vdu-delete"))
5313 if scaling_info["scaling_direction"] == "IN":
5314 for vdur in reversed(db_vnfr["vdur"]):
5315 if vdu_delete.get(vdur["vdu-id-ref"]):
5316 vdu_delete[vdur["vdu-id-ref"]] -= 1
5317 scaling_info["vdu"].append(
5318 {
5319 "name": vdur.get("name") or vdur.get("vdu-name"),
5320 "vdu_id": vdur["vdu-id-ref"],
5321 "interface": [],
5322 }
5323 )
5324 for interface in vdur["interfaces"]:
5325 scaling_info["vdu"][-1]["interface"].append(
5326 {
5327 "name": interface["name"],
5328 "ip_address": interface["ip-address"],
5329 "mac_address": interface.get("mac-address"),
5330 }
5331 )
5332 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5333
5334 # PRE-SCALE BEGIN
5335 step = "Executing pre-scale vnf-config-primitive"
5336 if scaling_descriptor.get("scaling-config-action"):
5337 for scaling_config_action in scaling_descriptor[
5338 "scaling-config-action"
5339 ]:
5340 if (
5341 scaling_config_action.get("trigger") == "pre-scale-in"
5342 and scaling_type == "SCALE_IN"
5343 ) or (
5344 scaling_config_action.get("trigger") == "pre-scale-out"
5345 and scaling_type == "SCALE_OUT"
5346 ):
5347 vnf_config_primitive = scaling_config_action[
5348 "vnf-config-primitive-name-ref"
5349 ]
5350 step = db_nslcmop_update[
5351 "detailed-status"
5352 ] = "executing pre-scale scaling-config-action '{}'".format(
5353 vnf_config_primitive
5354 )
5355
5356 # look for primitive
5357 for config_primitive in (
5358 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5359 ).get("config-primitive", ()):
5360 if config_primitive["name"] == vnf_config_primitive:
5361 break
5362 else:
5363 raise LcmException(
5364 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5365 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5366 "primitive".format(scaling_group, vnf_config_primitive)
5367 )
5368
5369 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5370 if db_vnfr.get("additionalParamsForVnf"):
5371 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5372
5373 scale_process = "VCA"
5374 db_nsr_update["config-status"] = "configuring pre-scaling"
5375 primitive_params = self._map_primitive_params(
5376 config_primitive, {}, vnfr_params
5377 )
5378
5379 # Pre-scale retry check: Check if this sub-operation has been executed before
5380 op_index = self._check_or_add_scale_suboperation(
5381 db_nslcmop,
5382 nslcmop_id,
5383 vnf_index,
5384 vnf_config_primitive,
5385 primitive_params,
5386 "PRE-SCALE",
5387 )
5388 if op_index == self.SUBOPERATION_STATUS_SKIP:
5389 # Skip sub-operation
5390 result = "COMPLETED"
5391 result_detail = "Done"
5392 self.logger.debug(
5393 logging_text
5394 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5395 vnf_config_primitive, result, result_detail
5396 )
5397 )
5398 else:
5399 if op_index == self.SUBOPERATION_STATUS_NEW:
5400 # New sub-operation: Get index of this sub-operation
5401 op_index = (
5402 len(db_nslcmop.get("_admin", {}).get("operations"))
5403 - 1
5404 )
5405 self.logger.debug(
5406 logging_text
5407 + "vnf_config_primitive={} New sub-operation".format(
5408 vnf_config_primitive
5409 )
5410 )
5411 else:
5412 # retry: Get registered params for this existing sub-operation
5413 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5414 op_index
5415 ]
5416 vnf_index = op.get("member_vnf_index")
5417 vnf_config_primitive = op.get("primitive")
5418 primitive_params = op.get("primitive_params")
5419 self.logger.debug(
5420 logging_text
5421 + "vnf_config_primitive={} Sub-operation retry".format(
5422 vnf_config_primitive
5423 )
5424 )
5425 # Execute the primitive, either with new (first-time) or registered (reintent) args
5426 ee_descriptor_id = config_primitive.get(
5427 "execution-environment-ref"
5428 )
5429 primitive_name = config_primitive.get(
5430 "execution-environment-primitive", vnf_config_primitive
5431 )
5432 ee_id, vca_type = self._look_for_deployed_vca(
5433 nsr_deployed["VCA"],
5434 member_vnf_index=vnf_index,
5435 vdu_id=None,
5436 vdu_count_index=None,
5437 ee_descriptor_id=ee_descriptor_id,
5438 )
5439 result, result_detail = await self._ns_execute_primitive(
5440 ee_id,
5441 primitive_name,
5442 primitive_params,
5443 vca_type=vca_type,
5444 vca_id=vca_id,
5445 )
5446 self.logger.debug(
5447 logging_text
5448 + "vnf_config_primitive={} Done with result {} {}".format(
5449 vnf_config_primitive, result, result_detail
5450 )
5451 )
5452 # Update operationState = COMPLETED | FAILED
5453 self._update_suboperation_status(
5454 db_nslcmop, op_index, result, result_detail
5455 )
5456
5457 if result == "FAILED":
5458 raise LcmException(result_detail)
5459 db_nsr_update["config-status"] = old_config_status
5460 scale_process = None
5461 # PRE-SCALE END
5462
5463 db_nsr_update[
5464 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5465 ] = nb_scale_op
5466 db_nsr_update[
5467 "_admin.scaling-group.{}.time".format(admin_scale_index)
5468 ] = time()
5469
5470 # SCALE-IN VCA - BEGIN
5471 if vca_scaling_info:
5472 step = db_nslcmop_update[
5473 "detailed-status"
5474 ] = "Deleting the execution environments"
5475 scale_process = "VCA"
5476 for vca_info in vca_scaling_info:
5477 if vca_info["type"] == "delete":
5478 member_vnf_index = str(vca_info["member-vnf-index"])
5479 self.logger.debug(
5480 logging_text + "vdu info: {}".format(vca_info)
5481 )
5482 if vca_info.get("osm_vdu_id"):
5483 vdu_id = vca_info["osm_vdu_id"]
5484 vdu_index = int(vca_info["vdu_index"])
5485 stage[
5486 1
5487 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5488 member_vnf_index, vdu_id, vdu_index
5489 )
5490 else:
5491 vdu_index = 0
5492 kdu_id = vca_info["osm_kdu_id"]
5493 stage[
5494 1
5495 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5496 member_vnf_index, kdu_id, vdu_index
5497 )
5498 stage[2] = step = "Scaling in VCA"
5499 self._write_op_status(op_id=nslcmop_id, stage=stage)
5500 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5501 config_update = db_nsr["configurationStatus"]
5502 for vca_index, vca in enumerate(vca_update):
5503 if (
5504 (vca or vca.get("ee_id"))
5505 and vca["member-vnf-index"] == member_vnf_index
5506 and vca["vdu_count_index"] == vdu_index
5507 ):
5508 if vca.get("vdu_id"):
5509 config_descriptor = get_configuration(
5510 db_vnfd, vca.get("vdu_id")
5511 )
5512 elif vca.get("kdu_name"):
5513 config_descriptor = get_configuration(
5514 db_vnfd, vca.get("kdu_name")
5515 )
5516 else:
5517 config_descriptor = get_configuration(
5518 db_vnfd, db_vnfd["id"]
5519 )
5520 operation_params = (
5521 db_nslcmop.get("operationParams") or {}
5522 )
5523 exec_terminate_primitives = not operation_params.get(
5524 "skip_terminate_primitives"
5525 ) and vca.get("needed_terminate")
5526 task = asyncio.ensure_future(
5527 asyncio.wait_for(
5528 self.destroy_N2VC(
5529 logging_text,
5530 db_nslcmop,
5531 vca,
5532 config_descriptor,
5533 vca_index,
5534 destroy_ee=True,
5535 exec_primitives=exec_terminate_primitives,
5536 scaling_in=True,
5537 vca_id=vca_id,
5538 ),
5539 timeout=self.timeout_charm_delete,
5540 )
5541 )
5542 tasks_dict_info[task] = "Terminating VCA {}".format(
5543 vca.get("ee_id")
5544 )
5545 del vca_update[vca_index]
5546 del config_update[vca_index]
5547 # wait for pending tasks of terminate primitives
5548 if tasks_dict_info:
5549 self.logger.debug(
5550 logging_text
5551 + "Waiting for tasks {}".format(
5552 list(tasks_dict_info.keys())
5553 )
5554 )
5555 error_list = await self._wait_for_tasks(
5556 logging_text,
5557 tasks_dict_info,
5558 min(
5559 self.timeout_charm_delete, self.timeout_ns_terminate
5560 ),
5561 stage,
5562 nslcmop_id,
5563 )
5564 tasks_dict_info.clear()
5565 if error_list:
5566 raise LcmException("; ".join(error_list))
5567
5568 db_vca_and_config_update = {
5569 "_admin.deployed.VCA": vca_update,
5570 "configurationStatus": config_update,
5571 }
5572 self.update_db_2(
5573 "nsrs", db_nsr["_id"], db_vca_and_config_update
5574 )
5575 scale_process = None
5576 # SCALE-IN VCA - END
5577
5578 # SCALE RO - BEGIN
5579 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5580 scale_process = "RO"
5581 if self.ro_config.get("ng"):
5582 await self._scale_ng_ro(
5583 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5584 )
5585 scaling_info.pop("vdu-create", None)
5586 scaling_info.pop("vdu-delete", None)
5587
5588 scale_process = None
5589 # SCALE RO - END
5590
5591 # SCALE KDU - BEGIN
5592 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5593 scale_process = "KDU"
5594 await self._scale_kdu(
5595 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5596 )
5597 scaling_info.pop("kdu-create", None)
5598 scaling_info.pop("kdu-delete", None)
5599
5600 scale_process = None
5601 # SCALE KDU - END
5602
5603 if db_nsr_update:
5604 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5605
5606 # SCALE-UP VCA - BEGIN
5607 if vca_scaling_info:
5608 step = db_nslcmop_update[
5609 "detailed-status"
5610 ] = "Creating new execution environments"
5611 scale_process = "VCA"
5612 for vca_info in vca_scaling_info:
5613 if vca_info["type"] == "create":
5614 member_vnf_index = str(vca_info["member-vnf-index"])
5615 self.logger.debug(
5616 logging_text + "vdu info: {}".format(vca_info)
5617 )
5618 vnfd_id = db_vnfr["vnfd-ref"]
5619 if vca_info.get("osm_vdu_id"):
5620 vdu_index = int(vca_info["vdu_index"])
5621 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5622 if db_vnfr.get("additionalParamsForVnf"):
5623 deploy_params.update(
5624 parse_yaml_strings(
5625 db_vnfr["additionalParamsForVnf"].copy()
5626 )
5627 )
5628 descriptor_config = get_configuration(
5629 db_vnfd, db_vnfd["id"]
5630 )
5631 if descriptor_config:
5632 vdu_id = None
5633 vdu_name = None
5634 kdu_name = None
5635 self._deploy_n2vc(
5636 logging_text=logging_text
5637 + "member_vnf_index={} ".format(member_vnf_index),
5638 db_nsr=db_nsr,
5639 db_vnfr=db_vnfr,
5640 nslcmop_id=nslcmop_id,
5641 nsr_id=nsr_id,
5642 nsi_id=nsi_id,
5643 vnfd_id=vnfd_id,
5644 vdu_id=vdu_id,
5645 kdu_name=kdu_name,
5646 member_vnf_index=member_vnf_index,
5647 vdu_index=vdu_index,
5648 vdu_name=vdu_name,
5649 deploy_params=deploy_params,
5650 descriptor_config=descriptor_config,
5651 base_folder=base_folder,
5652 task_instantiation_info=tasks_dict_info,
5653 stage=stage,
5654 )
5655 vdu_id = vca_info["osm_vdu_id"]
5656 vdur = find_in_list(
5657 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5658 )
5659 descriptor_config = get_configuration(db_vnfd, vdu_id)
5660 if vdur.get("additionalParams"):
5661 deploy_params_vdu = parse_yaml_strings(
5662 vdur["additionalParams"]
5663 )
5664 else:
5665 deploy_params_vdu = deploy_params
5666 deploy_params_vdu["OSM"] = get_osm_params(
5667 db_vnfr, vdu_id, vdu_count_index=vdu_index
5668 )
5669 if descriptor_config:
5670 vdu_name = None
5671 kdu_name = None
5672 stage[
5673 1
5674 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5675 member_vnf_index, vdu_id, vdu_index
5676 )
5677 stage[2] = step = "Scaling out VCA"
5678 self._write_op_status(op_id=nslcmop_id, stage=stage)
5679 self._deploy_n2vc(
5680 logging_text=logging_text
5681 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5682 member_vnf_index, vdu_id, vdu_index
5683 ),
5684 db_nsr=db_nsr,
5685 db_vnfr=db_vnfr,
5686 nslcmop_id=nslcmop_id,
5687 nsr_id=nsr_id,
5688 nsi_id=nsi_id,
5689 vnfd_id=vnfd_id,
5690 vdu_id=vdu_id,
5691 kdu_name=kdu_name,
5692 member_vnf_index=member_vnf_index,
5693 vdu_index=vdu_index,
5694 vdu_name=vdu_name,
5695 deploy_params=deploy_params_vdu,
5696 descriptor_config=descriptor_config,
5697 base_folder=base_folder,
5698 task_instantiation_info=tasks_dict_info,
5699 stage=stage,
5700 )
5701 else:
5702 kdu_name = vca_info["osm_kdu_id"]
5703 descriptor_config = get_configuration(db_vnfd, kdu_name)
5704 if descriptor_config:
5705 vdu_id = None
5706 kdu_index = int(vca_info["kdu_index"])
5707 vdu_name = None
5708 kdur = next(
5709 x
5710 for x in db_vnfr["kdur"]
5711 if x["kdu-name"] == kdu_name
5712 )
5713 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5714 if kdur.get("additionalParams"):
5715 deploy_params_kdu = parse_yaml_strings(
5716 kdur["additionalParams"]
5717 )
5718
5719 self._deploy_n2vc(
5720 logging_text=logging_text,
5721 db_nsr=db_nsr,
5722 db_vnfr=db_vnfr,
5723 nslcmop_id=nslcmop_id,
5724 nsr_id=nsr_id,
5725 nsi_id=nsi_id,
5726 vnfd_id=vnfd_id,
5727 vdu_id=vdu_id,
5728 kdu_name=kdu_name,
5729 member_vnf_index=member_vnf_index,
5730 vdu_index=kdu_index,
5731 vdu_name=vdu_name,
5732 deploy_params=deploy_params_kdu,
5733 descriptor_config=descriptor_config,
5734 base_folder=base_folder,
5735 task_instantiation_info=tasks_dict_info,
5736 stage=stage,
5737 )
5738 # SCALE-UP VCA - END
5739 scale_process = None
5740
5741 # POST-SCALE BEGIN
5742 # execute primitive service POST-SCALING
5743 step = "Executing post-scale vnf-config-primitive"
5744 if scaling_descriptor.get("scaling-config-action"):
5745 for scaling_config_action in scaling_descriptor[
5746 "scaling-config-action"
5747 ]:
5748 if (
5749 scaling_config_action.get("trigger") == "post-scale-in"
5750 and scaling_type == "SCALE_IN"
5751 ) or (
5752 scaling_config_action.get("trigger") == "post-scale-out"
5753 and scaling_type == "SCALE_OUT"
5754 ):
5755 vnf_config_primitive = scaling_config_action[
5756 "vnf-config-primitive-name-ref"
5757 ]
5758 step = db_nslcmop_update[
5759 "detailed-status"
5760 ] = "executing post-scale scaling-config-action '{}'".format(
5761 vnf_config_primitive
5762 )
5763
5764 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5765 if db_vnfr.get("additionalParamsForVnf"):
5766 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5767
5768 # look for primitive
5769 for config_primitive in (
5770 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5771 ).get("config-primitive", ()):
5772 if config_primitive["name"] == vnf_config_primitive:
5773 break
5774 else:
5775 raise LcmException(
5776 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5777 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5778 "config-primitive".format(
5779 scaling_group, vnf_config_primitive
5780 )
5781 )
5782 scale_process = "VCA"
5783 db_nsr_update["config-status"] = "configuring post-scaling"
5784 primitive_params = self._map_primitive_params(
5785 config_primitive, {}, vnfr_params
5786 )
5787
5788 # Post-scale retry check: Check if this sub-operation has been executed before
5789 op_index = self._check_or_add_scale_suboperation(
5790 db_nslcmop,
5791 nslcmop_id,
5792 vnf_index,
5793 vnf_config_primitive,
5794 primitive_params,
5795 "POST-SCALE",
5796 )
5797 if op_index == self.SUBOPERATION_STATUS_SKIP:
5798 # Skip sub-operation
5799 result = "COMPLETED"
5800 result_detail = "Done"
5801 self.logger.debug(
5802 logging_text
5803 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5804 vnf_config_primitive, result, result_detail
5805 )
5806 )
5807 else:
5808 if op_index == self.SUBOPERATION_STATUS_NEW:
5809 # New sub-operation: Get index of this sub-operation
5810 op_index = (
5811 len(db_nslcmop.get("_admin", {}).get("operations"))
5812 - 1
5813 )
5814 self.logger.debug(
5815 logging_text
5816 + "vnf_config_primitive={} New sub-operation".format(
5817 vnf_config_primitive
5818 )
5819 )
5820 else:
5821 # retry: Get registered params for this existing sub-operation
5822 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5823 op_index
5824 ]
5825 vnf_index = op.get("member_vnf_index")
5826 vnf_config_primitive = op.get("primitive")
5827 primitive_params = op.get("primitive_params")
5828 self.logger.debug(
5829 logging_text
5830 + "vnf_config_primitive={} Sub-operation retry".format(
5831 vnf_config_primitive
5832 )
5833 )
5834 # Execute the primitive, either with new (first-time) or registered (reintent) args
5835 ee_descriptor_id = config_primitive.get(
5836 "execution-environment-ref"
5837 )
5838 primitive_name = config_primitive.get(
5839 "execution-environment-primitive", vnf_config_primitive
5840 )
5841 ee_id, vca_type = self._look_for_deployed_vca(
5842 nsr_deployed["VCA"],
5843 member_vnf_index=vnf_index,
5844 vdu_id=None,
5845 vdu_count_index=None,
5846 ee_descriptor_id=ee_descriptor_id,
5847 )
5848 result, result_detail = await self._ns_execute_primitive(
5849 ee_id,
5850 primitive_name,
5851 primitive_params,
5852 vca_type=vca_type,
5853 vca_id=vca_id,
5854 )
5855 self.logger.debug(
5856 logging_text
5857 + "vnf_config_primitive={} Done with result {} {}".format(
5858 vnf_config_primitive, result, result_detail
5859 )
5860 )
5861 # Update operationState = COMPLETED | FAILED
5862 self._update_suboperation_status(
5863 db_nslcmop, op_index, result, result_detail
5864 )
5865
5866 if result == "FAILED":
5867 raise LcmException(result_detail)
5868 db_nsr_update["config-status"] = old_config_status
5869 scale_process = None
5870 # POST-SCALE END
5871
5872 db_nsr_update[
5873 "detailed-status"
5874 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5875 db_nsr_update["operational-status"] = (
5876 "running"
5877 if old_operational_status == "failed"
5878 else old_operational_status
5879 )
5880 db_nsr_update["config-status"] = old_config_status
5881 return
5882 except (
5883 ROclient.ROClientException,
5884 DbException,
5885 LcmException,
5886 NgRoException,
5887 ) as e:
5888 self.logger.error(logging_text + "Exit Exception {}".format(e))
5889 exc = e
5890 except asyncio.CancelledError:
5891 self.logger.error(
5892 logging_text + "Cancelled Exception while '{}'".format(step)
5893 )
5894 exc = "Operation was cancelled"
5895 except Exception as e:
5896 exc = traceback.format_exc()
5897 self.logger.critical(
5898 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5899 exc_info=True,
5900 )
5901 finally:
5902 self._write_ns_status(
5903 nsr_id=nsr_id,
5904 ns_state=None,
5905 current_operation="IDLE",
5906 current_operation_id=None,
5907 )
5908 if tasks_dict_info:
5909 stage[1] = "Waiting for instantiate pending tasks."
5910 self.logger.debug(logging_text + stage[1])
5911 exc = await self._wait_for_tasks(
5912 logging_text,
5913 tasks_dict_info,
5914 self.timeout_ns_deploy,
5915 stage,
5916 nslcmop_id,
5917 nsr_id=nsr_id,
5918 )
5919 if exc:
5920 db_nslcmop_update[
5921 "detailed-status"
5922 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5923 nslcmop_operation_state = "FAILED"
5924 if db_nsr:
5925 db_nsr_update["operational-status"] = old_operational_status
5926 db_nsr_update["config-status"] = old_config_status
5927 db_nsr_update["detailed-status"] = ""
5928 if scale_process:
5929 if "VCA" in scale_process:
5930 db_nsr_update["config-status"] = "failed"
5931 if "RO" in scale_process:
5932 db_nsr_update["operational-status"] = "failed"
5933 db_nsr_update[
5934 "detailed-status"
5935 ] = "FAILED scaling nslcmop={} {}: {}".format(
5936 nslcmop_id, step, exc
5937 )
5938 else:
5939 error_description_nslcmop = None
5940 nslcmop_operation_state = "COMPLETED"
5941 db_nslcmop_update["detailed-status"] = "Done"
5942
5943 self._write_op_status(
5944 op_id=nslcmop_id,
5945 stage="",
5946 error_message=error_description_nslcmop,
5947 operation_state=nslcmop_operation_state,
5948 other_update=db_nslcmop_update,
5949 )
5950 if db_nsr:
5951 self._write_ns_status(
5952 nsr_id=nsr_id,
5953 ns_state=None,
5954 current_operation="IDLE",
5955 current_operation_id=None,
5956 other_update=db_nsr_update,
5957 )
5958
5959 if nslcmop_operation_state:
5960 try:
5961 msg = {
5962 "nsr_id": nsr_id,
5963 "nslcmop_id": nslcmop_id,
5964 "operationState": nslcmop_operation_state,
5965 }
5966 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5967 except Exception as e:
5968 self.logger.error(
5969 logging_text + "kafka_write notification Exception {}".format(e)
5970 )
5971 self.logger.debug(logging_text + "Exit")
5972 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5973
5974 async def _scale_kdu(
5975 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5976 ):
5977 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5978 for kdu_name in _scaling_info:
5979 for kdu_scaling_info in _scaling_info[kdu_name]:
5980 deployed_kdu, index = get_deployed_kdu(
5981 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5982 )
5983 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5984 kdu_instance = deployed_kdu["kdu-instance"]
5985 scale = int(kdu_scaling_info["scale"])
5986 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5987
5988 db_dict = {
5989 "collection": "nsrs",
5990 "filter": {"_id": nsr_id},
5991 "path": "_admin.deployed.K8s.{}".format(index),
5992 }
5993
5994 step = "scaling application {}".format(
5995 kdu_scaling_info["resource-name"]
5996 )
5997 self.logger.debug(logging_text + step)
5998
5999 if kdu_scaling_info["type"] == "delete":
6000 kdu_config = get_configuration(db_vnfd, kdu_name)
6001 if (
6002 kdu_config
6003 and kdu_config.get("terminate-config-primitive")
6004 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6005 ):
6006 terminate_config_primitive_list = kdu_config.get(
6007 "terminate-config-primitive"
6008 )
6009 terminate_config_primitive_list.sort(
6010 key=lambda val: int(val["seq"])
6011 )
6012
6013 for (
6014 terminate_config_primitive
6015 ) in terminate_config_primitive_list:
6016 primitive_params_ = self._map_primitive_params(
6017 terminate_config_primitive, {}, {}
6018 )
6019 step = "execute terminate config primitive"
6020 self.logger.debug(logging_text + step)
6021 await asyncio.wait_for(
6022 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6023 cluster_uuid=cluster_uuid,
6024 kdu_instance=kdu_instance,
6025 primitive_name=terminate_config_primitive["name"],
6026 params=primitive_params_,
6027 db_dict=db_dict,
6028 vca_id=vca_id,
6029 ),
6030 timeout=600,
6031 )
6032
6033 await asyncio.wait_for(
6034 self.k8scluster_map[k8s_cluster_type].scale(
6035 kdu_instance,
6036 scale,
6037 kdu_scaling_info["resource-name"],
6038 vca_id=vca_id,
6039 ),
6040 timeout=self.timeout_vca_on_error,
6041 )
6042
6043 if kdu_scaling_info["type"] == "create":
6044 kdu_config = get_configuration(db_vnfd, kdu_name)
6045 if (
6046 kdu_config
6047 and kdu_config.get("initial-config-primitive")
6048 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6049 ):
6050 initial_config_primitive_list = kdu_config.get(
6051 "initial-config-primitive"
6052 )
6053 initial_config_primitive_list.sort(
6054 key=lambda val: int(val["seq"])
6055 )
6056
6057 for initial_config_primitive in initial_config_primitive_list:
6058 primitive_params_ = self._map_primitive_params(
6059 initial_config_primitive, {}, {}
6060 )
6061 step = "execute initial config primitive"
6062 self.logger.debug(logging_text + step)
6063 await asyncio.wait_for(
6064 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6065 cluster_uuid=cluster_uuid,
6066 kdu_instance=kdu_instance,
6067 primitive_name=initial_config_primitive["name"],
6068 params=primitive_params_,
6069 db_dict=db_dict,
6070 vca_id=vca_id,
6071 ),
6072 timeout=600,
6073 )
6074
6075 async def _scale_ng_ro(
6076 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6077 ):
6078 nsr_id = db_nslcmop["nsInstanceId"]
6079 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6080 db_vnfrs = {}
6081
6082 # read from db: vnfd's for every vnf
6083 db_vnfds = []
6084
6085 # for each vnf in ns, read vnfd
6086 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6087 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6088 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6089 # if we haven't this vnfd, read it from db
6090 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6091 # read from db
6092 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6093 db_vnfds.append(vnfd)
6094 n2vc_key = self.n2vc.get_public_key()
6095 n2vc_key_list = [n2vc_key]
6096 self.scale_vnfr(
6097 db_vnfr,
6098 vdu_scaling_info.get("vdu-create"),
6099 vdu_scaling_info.get("vdu-delete"),
6100 mark_delete=True,
6101 )
6102 # db_vnfr has been updated, update db_vnfrs to use it
6103 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6104 await self._instantiate_ng_ro(
6105 logging_text,
6106 nsr_id,
6107 db_nsd,
6108 db_nsr,
6109 db_nslcmop,
6110 db_vnfrs,
6111 db_vnfds,
6112 n2vc_key_list,
6113 stage=stage,
6114 start_deploy=time(),
6115 timeout_ns_deploy=self.timeout_ns_deploy,
6116 )
6117 if vdu_scaling_info.get("vdu-delete"):
6118 self.scale_vnfr(
6119 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6120 )
6121
6122 async def add_prometheus_metrics(
6123 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6124 ):
6125 if not self.prometheus:
6126 return
6127 # look if exist a file called 'prometheus*.j2' and
6128 artifact_content = self.fs.dir_ls(artifact_path)
6129 job_file = next(
6130 (
6131 f
6132 for f in artifact_content
6133 if f.startswith("prometheus") and f.endswith(".j2")
6134 ),
6135 None,
6136 )
6137 if not job_file:
6138 return
6139 with self.fs.file_open((artifact_path, job_file), "r") as f:
6140 job_data = f.read()
6141
6142 # TODO get_service
6143 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6144 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6145 host_port = "80"
6146 vnfr_id = vnfr_id.replace("-", "")
6147 variables = {
6148 "JOB_NAME": vnfr_id,
6149 "TARGET_IP": target_ip,
6150 "EXPORTER_POD_IP": host_name,
6151 "EXPORTER_POD_PORT": host_port,
6152 }
6153 job_list = self.prometheus.parse_job(job_data, variables)
6154 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6155 for job in job_list:
6156 if (
6157 not isinstance(job.get("job_name"), str)
6158 or vnfr_id not in job["job_name"]
6159 ):
6160 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6161 job["nsr_id"] = nsr_id
6162 job_dict = {jl["job_name"]: jl for jl in job_list}
6163 if await self.prometheus.update(job_dict):
6164 return list(job_dict.keys())
6165
6166 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6167 """
6168 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6169
6170 :param: vim_account_id: VIM Account ID
6171
6172 :return: (cloud_name, cloud_credential)
6173 """
6174 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6175 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6176
6177 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6178 """
6179 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6180
6181 :param: vim_account_id: VIM Account ID
6182
6183 :return: (cloud_name, cloud_credential)
6184 """
6185 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6186 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")