Feature 10509 manual scaling for native k8s charm
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vnfr_id:
1607 element_type = "VNF"
1608 element_under_configuration = vnfr_id
1609 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1610 if vdu_id:
1611 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1612 element_type = "VDU"
1613 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1614 osm_config["osm"]["vdu_id"] = vdu_id
1615 elif kdu_name:
1616 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1617 element_type = "KDU"
1618 element_under_configuration = kdu_name
1619 osm_config["osm"]["kdu_name"] = kdu_name
1620
1621 # Get artifact path
1622 artifact_path = "{}/{}/{}/{}".format(
1623 base_folder["folder"],
1624 base_folder["pkg-dir"],
1625 "charms"
1626 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1627 else "helm-charts",
1628 vca_name,
1629 )
1630
1631 self.logger.debug("Artifact path > {}".format(artifact_path))
1632
1633 # get initial_config_primitive_list that applies to this element
1634 initial_config_primitive_list = config_descriptor.get(
1635 "initial-config-primitive"
1636 )
1637
1638 self.logger.debug(
1639 "Initial config primitive list > {}".format(
1640 initial_config_primitive_list
1641 )
1642 )
1643
1644 # add config if not present for NS charm
1645 ee_descriptor_id = ee_config_descriptor.get("id")
1646 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1647 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1648 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1649 )
1650
1651 self.logger.debug(
1652 "Initial config primitive list #2 > {}".format(
1653 initial_config_primitive_list
1654 )
1655 )
1656 # n2vc_redesign STEP 3.1
1657 # find old ee_id if exists
1658 ee_id = vca_deployed.get("ee_id")
1659
1660 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1661 # create or register execution environment in VCA
1662 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1663
1664 self._write_configuration_status(
1665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status="CREATING",
1668 element_under_configuration=element_under_configuration,
1669 element_type=element_type,
1670 )
1671
1672 step = "create execution environment"
1673 self.logger.debug(logging_text + step)
1674
1675 ee_id = None
1676 credentials = None
1677 if vca_type == "k8s_proxy_charm":
1678 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1679 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1680 namespace=namespace,
1681 artifact_path=artifact_path,
1682 db_dict=db_dict,
1683 vca_id=vca_id,
1684 )
1685 elif vca_type == "helm" or vca_type == "helm-v3":
1686 ee_id, credentials = await self.vca_map[
1687 vca_type
1688 ].create_execution_environment(
1689 namespace=namespace,
1690 reuse_ee_id=ee_id,
1691 db_dict=db_dict,
1692 config=osm_config,
1693 artifact_path=artifact_path,
1694 vca_type=vca_type,
1695 )
1696 else:
1697 ee_id, credentials = await self.vca_map[
1698 vca_type
1699 ].create_execution_environment(
1700 namespace=namespace,
1701 reuse_ee_id=ee_id,
1702 db_dict=db_dict,
1703 vca_id=vca_id,
1704 )
1705
1706 elif vca_type == "native_charm":
1707 step = "Waiting to VM being up and getting IP address"
1708 self.logger.debug(logging_text + step)
1709 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1710 logging_text,
1711 nsr_id,
1712 vnfr_id,
1713 vdu_id,
1714 vdu_index,
1715 user=None,
1716 pub_key=None,
1717 )
1718 credentials = {"hostname": rw_mgmt_ip}
1719 # get username
1720 username = deep_get(
1721 config_descriptor, ("config-access", "ssh-access", "default-user")
1722 )
1723 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1724 # merged. Meanwhile let's get username from initial-config-primitive
1725 if not username and initial_config_primitive_list:
1726 for config_primitive in initial_config_primitive_list:
1727 for param in config_primitive.get("parameter", ()):
1728 if param["name"] == "ssh-username":
1729 username = param["value"]
1730 break
1731 if not username:
1732 raise LcmException(
1733 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1734 "'config-access.ssh-access.default-user'"
1735 )
1736 credentials["username"] = username
1737 # n2vc_redesign STEP 3.2
1738
1739 self._write_configuration_status(
1740 nsr_id=nsr_id,
1741 vca_index=vca_index,
1742 status="REGISTERING",
1743 element_under_configuration=element_under_configuration,
1744 element_type=element_type,
1745 )
1746
1747 step = "register execution environment {}".format(credentials)
1748 self.logger.debug(logging_text + step)
1749 ee_id = await self.vca_map[vca_type].register_execution_environment(
1750 credentials=credentials,
1751 namespace=namespace,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 # for compatibility with MON/POL modules, the need model and application name at database
1757 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1758 ee_id_parts = ee_id.split(".")
1759 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1760 if len(ee_id_parts) >= 2:
1761 model_name = ee_id_parts[0]
1762 application_name = ee_id_parts[1]
1763 db_nsr_update[db_update_entry + "model"] = model_name
1764 db_nsr_update[db_update_entry + "application"] = application_name
1765
1766 # n2vc_redesign STEP 3.3
1767 step = "Install configuration Software"
1768
1769 self._write_configuration_status(
1770 nsr_id=nsr_id,
1771 vca_index=vca_index,
1772 status="INSTALLING SW",
1773 element_under_configuration=element_under_configuration,
1774 element_type=element_type,
1775 other_update=db_nsr_update,
1776 )
1777
1778 # TODO check if already done
1779 self.logger.debug(logging_text + step)
1780 config = None
1781 if vca_type == "native_charm":
1782 config_primitive = next(
1783 (p for p in initial_config_primitive_list if p["name"] == "config"),
1784 None,
1785 )
1786 if config_primitive:
1787 config = self._map_primitive_params(
1788 config_primitive, {}, deploy_params
1789 )
1790 num_units = 1
1791 if vca_type == "lxc_proxy_charm":
1792 if element_type == "NS":
1793 num_units = db_nsr.get("config-units") or 1
1794 elif element_type == "VNF":
1795 num_units = db_vnfr.get("config-units") or 1
1796 elif element_type == "VDU":
1797 for v in db_vnfr["vdur"]:
1798 if vdu_id == v["vdu-id-ref"]:
1799 num_units = v.get("config-units") or 1
1800 break
1801 if vca_type != "k8s_proxy_charm":
1802 await self.vca_map[vca_type].install_configuration_sw(
1803 ee_id=ee_id,
1804 artifact_path=artifact_path,
1805 db_dict=db_dict,
1806 config=config,
1807 num_units=num_units,
1808 vca_id=vca_id,
1809 )
1810
1811 # write in db flag of configuration_sw already installed
1812 self.update_db_2(
1813 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1814 )
1815
1816 # add relations for this VCA (wait for other peers related with this VCA)
1817 await self._add_vca_relations(
1818 logging_text=logging_text,
1819 nsr_id=nsr_id,
1820 vca_index=vca_index,
1821 vca_id=vca_id,
1822 vca_type=vca_type,
1823 )
1824
1825 # if SSH access is required, then get execution environment SSH public
1826 # if native charm we have waited already to VM be UP
1827 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1828 pub_key = None
1829 user = None
1830 # self.logger.debug("get ssh key block")
1831 if deep_get(
1832 config_descriptor, ("config-access", "ssh-access", "required")
1833 ):
1834 # self.logger.debug("ssh key needed")
1835 # Needed to inject a ssh key
1836 user = deep_get(
1837 config_descriptor,
1838 ("config-access", "ssh-access", "default-user"),
1839 )
1840 step = "Install configuration Software, getting public ssh key"
1841 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1842 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1843 )
1844
1845 step = "Insert public key into VM user={} ssh_key={}".format(
1846 user, pub_key
1847 )
1848 else:
1849 # self.logger.debug("no need to get ssh key")
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852
1853 # n2vc_redesign STEP 5.1
1854 # wait for RO (ip-address) Insert pub_key into VM
1855 if vnfr_id:
1856 if kdu_name:
1857 rw_mgmt_ip = await self.wait_kdu_up(
1858 logging_text, nsr_id, vnfr_id, kdu_name
1859 )
1860 else:
1861 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1862 logging_text,
1863 nsr_id,
1864 vnfr_id,
1865 vdu_id,
1866 vdu_index,
1867 user=user,
1868 pub_key=pub_key,
1869 )
1870 else:
1871 rw_mgmt_ip = None # This is for a NS configuration
1872
1873 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1874
1875 # store rw_mgmt_ip in deploy params for later replacement
1876 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1877
1878 # n2vc_redesign STEP 6 Execute initial config primitive
1879 step = "execute initial config primitive"
1880
1881 # wait for dependent primitives execution (NS -> VNF -> VDU)
1882 if initial_config_primitive_list:
1883 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1884
1885 # stage, in function of element type: vdu, kdu, vnf or ns
1886 my_vca = vca_deployed_list[vca_index]
1887 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1888 # VDU or KDU
1889 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1890 elif my_vca.get("member-vnf-index"):
1891 # VNF
1892 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1893 else:
1894 # NS
1895 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1896
1897 self._write_configuration_status(
1898 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1899 )
1900
1901 self._write_op_status(op_id=nslcmop_id, stage=stage)
1902
1903 check_if_terminated_needed = True
1904 for initial_config_primitive in initial_config_primitive_list:
1905 # adding information on the vca_deployed if it is a NS execution environment
1906 if not vca_deployed["member-vnf-index"]:
1907 deploy_params["ns_config_info"] = json.dumps(
1908 self._get_ns_config_info(nsr_id)
1909 )
1910 # TODO check if already done
1911 primitive_params_ = self._map_primitive_params(
1912 initial_config_primitive, {}, deploy_params
1913 )
1914
1915 step = "execute primitive '{}' params '{}'".format(
1916 initial_config_primitive["name"], primitive_params_
1917 )
1918 self.logger.debug(logging_text + step)
1919 await self.vca_map[vca_type].exec_primitive(
1920 ee_id=ee_id,
1921 primitive_name=initial_config_primitive["name"],
1922 params_dict=primitive_params_,
1923 db_dict=db_dict,
1924 vca_id=vca_id,
1925 )
1926 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1927 if check_if_terminated_needed:
1928 if config_descriptor.get("terminate-config-primitive"):
1929 self.update_db_2(
1930 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1931 )
1932 check_if_terminated_needed = False
1933
1934 # TODO register in database that primitive is done
1935
1936 # STEP 7 Configure metrics
1937 if vca_type == "helm" or vca_type == "helm-v3":
1938 prometheus_jobs = await self.add_prometheus_metrics(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 ee_config_descriptor=ee_config_descriptor,
1942 vnfr_id=vnfr_id,
1943 nsr_id=nsr_id,
1944 target_ip=rw_mgmt_ip,
1945 )
1946 if prometheus_jobs:
1947 self.update_db_2(
1948 "nsrs",
1949 nsr_id,
1950 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1951 )
1952
1953 step = "instantiated at VCA"
1954 self.logger.debug(logging_text + step)
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1958 )
1959
1960 except Exception as e: # TODO not use Exception but N2VC exception
1961 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1962 if not isinstance(
1963 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1964 ):
1965 self.logger.error(
1966 "Exception while {} : {}".format(step, e), exc_info=True
1967 )
1968 self._write_configuration_status(
1969 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1970 )
1971 raise LcmException("{} {}".format(step, e)) from e
1972
1973 def _write_ns_status(
1974 self,
1975 nsr_id: str,
1976 ns_state: str,
1977 current_operation: str,
1978 current_operation_id: str,
1979 error_description: str = None,
1980 error_detail: str = None,
1981 other_update: dict = None,
1982 ):
1983 """
1984 Update db_nsr fields.
1985 :param nsr_id:
1986 :param ns_state:
1987 :param current_operation:
1988 :param current_operation_id:
1989 :param error_description:
1990 :param error_detail:
1991 :param other_update: Other required changes at database if provided, will be cleared
1992 :return:
1993 """
1994 try:
1995 db_dict = other_update or {}
1996 db_dict[
1997 "_admin.nslcmop"
1998 ] = current_operation_id # for backward compatibility
1999 db_dict["_admin.current-operation"] = current_operation_id
2000 db_dict["_admin.operation-type"] = (
2001 current_operation if current_operation != "IDLE" else None
2002 )
2003 db_dict["currentOperation"] = current_operation
2004 db_dict["currentOperationID"] = current_operation_id
2005 db_dict["errorDescription"] = error_description
2006 db_dict["errorDetail"] = error_detail
2007
2008 if ns_state:
2009 db_dict["nsState"] = ns_state
2010 self.update_db_2("nsrs", nsr_id, db_dict)
2011 except DbException as e:
2012 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2013
2014 def _write_op_status(
2015 self,
2016 op_id: str,
2017 stage: list = None,
2018 error_message: str = None,
2019 queuePosition: int = 0,
2020 operation_state: str = None,
2021 other_update: dict = None,
2022 ):
2023 try:
2024 db_dict = other_update or {}
2025 db_dict["queuePosition"] = queuePosition
2026 if isinstance(stage, list):
2027 db_dict["stage"] = stage[0]
2028 db_dict["detailed-status"] = " ".join(stage)
2029 elif stage is not None:
2030 db_dict["stage"] = str(stage)
2031
2032 if error_message is not None:
2033 db_dict["errorMessage"] = error_message
2034 if operation_state is not None:
2035 db_dict["operationState"] = operation_state
2036 db_dict["statusEnteredTime"] = time()
2037 self.update_db_2("nslcmops", op_id, db_dict)
2038 except DbException as e:
2039 self.logger.warn(
2040 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2041 )
2042
2043 def _write_all_config_status(self, db_nsr: dict, status: str):
2044 try:
2045 nsr_id = db_nsr["_id"]
2046 # configurationStatus
2047 config_status = db_nsr.get("configurationStatus")
2048 if config_status:
2049 db_nsr_update = {
2050 "configurationStatus.{}.status".format(index): status
2051 for index, v in enumerate(config_status)
2052 if v
2053 }
2054 # update status
2055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2056
2057 except DbException as e:
2058 self.logger.warn(
2059 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2060 )
2061
2062 def _write_configuration_status(
2063 self,
2064 nsr_id: str,
2065 vca_index: int,
2066 status: str = None,
2067 element_under_configuration: str = None,
2068 element_type: str = None,
2069 other_update: dict = None,
2070 ):
2071
2072 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2073 # .format(vca_index, status))
2074
2075 try:
2076 db_path = "configurationStatus.{}.".format(vca_index)
2077 db_dict = other_update or {}
2078 if status:
2079 db_dict[db_path + "status"] = status
2080 if element_under_configuration:
2081 db_dict[
2082 db_path + "elementUnderConfiguration"
2083 ] = element_under_configuration
2084 if element_type:
2085 db_dict[db_path + "elementType"] = element_type
2086 self.update_db_2("nsrs", nsr_id, db_dict)
2087 except DbException as e:
2088 self.logger.warn(
2089 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2090 status, nsr_id, vca_index, e
2091 )
2092 )
2093
2094 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2095 """
2096 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2097 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2098 Database is used because the result can be obtained from a different LCM worker in case of HA.
2099 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2100 :param db_nslcmop: database content of nslcmop
2101 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2102 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2103 computed 'vim-account-id'
2104 """
2105 modified = False
2106 nslcmop_id = db_nslcmop["_id"]
2107 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2108 if placement_engine == "PLA":
2109 self.logger.debug(
2110 logging_text + "Invoke and wait for placement optimization"
2111 )
2112 await self.msg.aiowrite(
2113 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2114 )
2115 db_poll_interval = 5
2116 wait = db_poll_interval * 10
2117 pla_result = None
2118 while not pla_result and wait >= 0:
2119 await asyncio.sleep(db_poll_interval)
2120 wait -= db_poll_interval
2121 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2122 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2123
2124 if not pla_result:
2125 raise LcmException(
2126 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2127 )
2128
2129 for pla_vnf in pla_result["vnf"]:
2130 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2131 if not pla_vnf.get("vimAccountId") or not vnfr:
2132 continue
2133 modified = True
2134 self.db.set_one(
2135 "vnfrs",
2136 {"_id": vnfr["_id"]},
2137 {"vim-account-id": pla_vnf["vimAccountId"]},
2138 )
2139 # Modifies db_vnfrs
2140 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2141 return modified
2142
2143 def update_nsrs_with_pla_result(self, params):
2144 try:
2145 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2146 self.update_db_2(
2147 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2148 )
2149 except Exception as e:
2150 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2151
2152 async def instantiate(self, nsr_id, nslcmop_id):
2153 """
2154
2155 :param nsr_id: ns instance to deploy
2156 :param nslcmop_id: operation to run
2157 :return:
2158 """
2159
2160 # Try to lock HA task here
2161 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2162 if not task_is_locked_by_me:
2163 self.logger.debug(
2164 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2165 )
2166 return
2167
2168 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2169 self.logger.debug(logging_text + "Enter")
2170
2171 # get all needed from database
2172
2173 # database nsrs record
2174 db_nsr = None
2175
2176 # database nslcmops record
2177 db_nslcmop = None
2178
2179 # update operation on nsrs
2180 db_nsr_update = {}
2181 # update operation on nslcmops
2182 db_nslcmop_update = {}
2183
2184 nslcmop_operation_state = None
2185 db_vnfrs = {} # vnf's info indexed by member-index
2186 # n2vc_info = {}
2187 tasks_dict_info = {} # from task to info text
2188 exc = None
2189 error_list = []
2190 stage = [
2191 "Stage 1/5: preparation of the environment.",
2192 "Waiting for previous operations to terminate.",
2193 "",
2194 ]
2195 # ^ stage, step, VIM progress
2196 try:
2197 # wait for any previous tasks in process
2198 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2199
2200 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2201 stage[1] = "Reading from database."
2202 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2203 db_nsr_update["detailed-status"] = "creating"
2204 db_nsr_update["operational-status"] = "init"
2205 self._write_ns_status(
2206 nsr_id=nsr_id,
2207 ns_state="BUILDING",
2208 current_operation="INSTANTIATING",
2209 current_operation_id=nslcmop_id,
2210 other_update=db_nsr_update,
2211 )
2212 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2213
2214 # read from db: operation
2215 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2216 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2217 ns_params = db_nslcmop.get("operationParams")
2218 if ns_params and ns_params.get("timeout_ns_deploy"):
2219 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2220 else:
2221 timeout_ns_deploy = self.timeout.get(
2222 "ns_deploy", self.timeout_ns_deploy
2223 )
2224
2225 # read from db: ns
2226 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2228 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2229 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2230 self.fs.sync(db_nsr["nsd-id"])
2231 db_nsr["nsd"] = nsd
2232 # nsr_name = db_nsr["name"] # TODO short-name??
2233
2234 # read from db: vnf's of this ns
2235 stage[1] = "Getting vnfrs from db."
2236 self.logger.debug(logging_text + stage[1])
2237 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2238
2239 # read from db: vnfd's for every vnf
2240 db_vnfds = [] # every vnfd data
2241
2242 # for each vnf in ns, read vnfd
2243 for vnfr in db_vnfrs_list:
2244 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2245 vnfd_id = vnfr["vnfd-id"]
2246 vnfd_ref = vnfr["vnfd-ref"]
2247 self.fs.sync(vnfd_id)
2248
2249 # if we haven't this vnfd, read it from db
2250 if vnfd_id not in db_vnfds:
2251 # read from db
2252 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2253 vnfd_id, vnfd_ref
2254 )
2255 self.logger.debug(logging_text + stage[1])
2256 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2257
2258 # store vnfd
2259 db_vnfds.append(vnfd)
2260
2261 # Get or generates the _admin.deployed.VCA list
2262 vca_deployed_list = None
2263 if db_nsr["_admin"].get("deployed"):
2264 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2265 if vca_deployed_list is None:
2266 vca_deployed_list = []
2267 configuration_status_list = []
2268 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2269 db_nsr_update["configurationStatus"] = configuration_status_list
2270 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2271 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2272 elif isinstance(vca_deployed_list, dict):
2273 # maintain backward compatibility. Change a dict to list at database
2274 vca_deployed_list = list(vca_deployed_list.values())
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2277
2278 if not isinstance(
2279 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2280 ):
2281 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2282 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2283
2284 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2285 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2286 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2287 self.db.set_list(
2288 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2289 )
2290
2291 # n2vc_redesign STEP 2 Deploy Network Scenario
2292 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2293 self._write_op_status(op_id=nslcmop_id, stage=stage)
2294
2295 stage[1] = "Deploying KDUs."
2296 # self.logger.debug(logging_text + "Before deploy_kdus")
2297 # Call to deploy_kdus in case exists the "vdu:kdu" param
2298 await self.deploy_kdus(
2299 logging_text=logging_text,
2300 nsr_id=nsr_id,
2301 nslcmop_id=nslcmop_id,
2302 db_vnfrs=db_vnfrs,
2303 db_vnfds=db_vnfds,
2304 task_instantiation_info=tasks_dict_info,
2305 )
2306
2307 stage[1] = "Getting VCA public key."
2308 # n2vc_redesign STEP 1 Get VCA public ssh-key
2309 # feature 1429. Add n2vc public key to needed VMs
2310 n2vc_key = self.n2vc.get_public_key()
2311 n2vc_key_list = [n2vc_key]
2312 if self.vca_config.get("public_key"):
2313 n2vc_key_list.append(self.vca_config["public_key"])
2314
2315 stage[1] = "Deploying NS at VIM."
2316 task_ro = asyncio.ensure_future(
2317 self.instantiate_RO(
2318 logging_text=logging_text,
2319 nsr_id=nsr_id,
2320 nsd=nsd,
2321 db_nsr=db_nsr,
2322 db_nslcmop=db_nslcmop,
2323 db_vnfrs=db_vnfrs,
2324 db_vnfds=db_vnfds,
2325 n2vc_key_list=n2vc_key_list,
2326 stage=stage,
2327 )
2328 )
2329 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2330 tasks_dict_info[task_ro] = "Deploying at VIM"
2331
2332 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2333 stage[1] = "Deploying Execution Environments."
2334 self.logger.debug(logging_text + stage[1])
2335
2336 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2337 for vnf_profile in get_vnf_profiles(nsd):
2338 vnfd_id = vnf_profile["vnfd-id"]
2339 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2340 member_vnf_index = str(vnf_profile["id"])
2341 db_vnfr = db_vnfrs[member_vnf_index]
2342 base_folder = vnfd["_admin"]["storage"]
2343 vdu_id = None
2344 vdu_index = 0
2345 vdu_name = None
2346 kdu_name = None
2347
2348 # Get additional parameters
2349 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2350 if db_vnfr.get("additionalParamsForVnf"):
2351 deploy_params.update(
2352 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2353 )
2354
2355 descriptor_config = get_configuration(vnfd, vnfd["id"])
2356 if descriptor_config:
2357 self._deploy_n2vc(
2358 logging_text=logging_text
2359 + "member_vnf_index={} ".format(member_vnf_index),
2360 db_nsr=db_nsr,
2361 db_vnfr=db_vnfr,
2362 nslcmop_id=nslcmop_id,
2363 nsr_id=nsr_id,
2364 nsi_id=nsi_id,
2365 vnfd_id=vnfd_id,
2366 vdu_id=vdu_id,
2367 kdu_name=kdu_name,
2368 member_vnf_index=member_vnf_index,
2369 vdu_index=vdu_index,
2370 vdu_name=vdu_name,
2371 deploy_params=deploy_params,
2372 descriptor_config=descriptor_config,
2373 base_folder=base_folder,
2374 task_instantiation_info=tasks_dict_info,
2375 stage=stage,
2376 )
2377
2378 # Deploy charms for each VDU that supports one.
2379 for vdud in get_vdu_list(vnfd):
2380 vdu_id = vdud["id"]
2381 descriptor_config = get_configuration(vnfd, vdu_id)
2382 vdur = find_in_list(
2383 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2384 )
2385
2386 if vdur.get("additionalParams"):
2387 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2388 else:
2389 deploy_params_vdu = deploy_params
2390 deploy_params_vdu["OSM"] = get_osm_params(
2391 db_vnfr, vdu_id, vdu_count_index=0
2392 )
2393 vdud_count = get_vdu_profile(vnfd, vdu_id).get(
2394 "max-number-of-instances", 1
2395 )
2396
2397 self.logger.debug("VDUD > {}".format(vdud))
2398 self.logger.debug(
2399 "Descriptor config > {}".format(descriptor_config)
2400 )
2401 if descriptor_config:
2402 vdu_name = None
2403 kdu_name = None
2404 for vdu_index in range(vdud_count):
2405 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2406 self._deploy_n2vc(
2407 logging_text=logging_text
2408 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2409 member_vnf_index, vdu_id, vdu_index
2410 ),
2411 db_nsr=db_nsr,
2412 db_vnfr=db_vnfr,
2413 nslcmop_id=nslcmop_id,
2414 nsr_id=nsr_id,
2415 nsi_id=nsi_id,
2416 vnfd_id=vnfd_id,
2417 vdu_id=vdu_id,
2418 kdu_name=kdu_name,
2419 member_vnf_index=member_vnf_index,
2420 vdu_index=vdu_index,
2421 vdu_name=vdu_name,
2422 deploy_params=deploy_params_vdu,
2423 descriptor_config=descriptor_config,
2424 base_folder=base_folder,
2425 task_instantiation_info=tasks_dict_info,
2426 stage=stage,
2427 )
2428 for kdud in get_kdu_list(vnfd):
2429 kdu_name = kdud["name"]
2430 descriptor_config = get_configuration(vnfd, kdu_name)
2431 if descriptor_config:
2432 vdu_id = None
2433 vdu_index = 0
2434 vdu_name = None
2435 kdur = next(
2436 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2437 )
2438 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2439 if kdur.get("additionalParams"):
2440 deploy_params_kdu = parse_yaml_strings(
2441 kdur["additionalParams"]
2442 )
2443
2444 self._deploy_n2vc(
2445 logging_text=logging_text,
2446 db_nsr=db_nsr,
2447 db_vnfr=db_vnfr,
2448 nslcmop_id=nslcmop_id,
2449 nsr_id=nsr_id,
2450 nsi_id=nsi_id,
2451 vnfd_id=vnfd_id,
2452 vdu_id=vdu_id,
2453 kdu_name=kdu_name,
2454 member_vnf_index=member_vnf_index,
2455 vdu_index=vdu_index,
2456 vdu_name=vdu_name,
2457 deploy_params=deploy_params_kdu,
2458 descriptor_config=descriptor_config,
2459 base_folder=base_folder,
2460 task_instantiation_info=tasks_dict_info,
2461 stage=stage,
2462 )
2463
2464 # Check if this NS has a charm configuration
2465 descriptor_config = nsd.get("ns-configuration")
2466 if descriptor_config and descriptor_config.get("juju"):
2467 vnfd_id = None
2468 db_vnfr = None
2469 member_vnf_index = None
2470 vdu_id = None
2471 kdu_name = None
2472 vdu_index = 0
2473 vdu_name = None
2474
2475 # Get additional parameters
2476 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2477 if db_nsr.get("additionalParamsForNs"):
2478 deploy_params.update(
2479 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2480 )
2481 base_folder = nsd["_admin"]["storage"]
2482 self._deploy_n2vc(
2483 logging_text=logging_text,
2484 db_nsr=db_nsr,
2485 db_vnfr=db_vnfr,
2486 nslcmop_id=nslcmop_id,
2487 nsr_id=nsr_id,
2488 nsi_id=nsi_id,
2489 vnfd_id=vnfd_id,
2490 vdu_id=vdu_id,
2491 kdu_name=kdu_name,
2492 member_vnf_index=member_vnf_index,
2493 vdu_index=vdu_index,
2494 vdu_name=vdu_name,
2495 deploy_params=deploy_params,
2496 descriptor_config=descriptor_config,
2497 base_folder=base_folder,
2498 task_instantiation_info=tasks_dict_info,
2499 stage=stage,
2500 )
2501
2502 # rest of staff will be done at finally
2503
2504 except (
2505 ROclient.ROClientException,
2506 DbException,
2507 LcmException,
2508 N2VCException,
2509 ) as e:
2510 self.logger.error(
2511 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2512 )
2513 exc = e
2514 except asyncio.CancelledError:
2515 self.logger.error(
2516 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2517 )
2518 exc = "Operation was cancelled"
2519 except Exception as e:
2520 exc = traceback.format_exc()
2521 self.logger.critical(
2522 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2523 exc_info=True,
2524 )
2525 finally:
2526 if exc:
2527 error_list.append(str(exc))
2528 try:
2529 # wait for pending tasks
2530 if tasks_dict_info:
2531 stage[1] = "Waiting for instantiate pending tasks."
2532 self.logger.debug(logging_text + stage[1])
2533 error_list += await self._wait_for_tasks(
2534 logging_text,
2535 tasks_dict_info,
2536 timeout_ns_deploy,
2537 stage,
2538 nslcmop_id,
2539 nsr_id=nsr_id,
2540 )
2541 stage[1] = stage[2] = ""
2542 except asyncio.CancelledError:
2543 error_list.append("Cancelled")
2544 # TODO cancel all tasks
2545 except Exception as exc:
2546 error_list.append(str(exc))
2547
2548 # update operation-status
2549 db_nsr_update["operational-status"] = "running"
2550 # let's begin with VCA 'configured' status (later we can change it)
2551 db_nsr_update["config-status"] = "configured"
2552 for task, task_name in tasks_dict_info.items():
2553 if not task.done() or task.cancelled() or task.exception():
2554 if task_name.startswith(self.task_name_deploy_vca):
2555 # A N2VC task is pending
2556 db_nsr_update["config-status"] = "failed"
2557 else:
2558 # RO or KDU task is pending
2559 db_nsr_update["operational-status"] = "failed"
2560
2561 # update status at database
2562 if error_list:
2563 error_detail = ". ".join(error_list)
2564 self.logger.error(logging_text + error_detail)
2565 error_description_nslcmop = "{} Detail: {}".format(
2566 stage[0], error_detail
2567 )
2568 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2569 nslcmop_id, stage[0]
2570 )
2571
2572 db_nsr_update["detailed-status"] = (
2573 error_description_nsr + " Detail: " + error_detail
2574 )
2575 db_nslcmop_update["detailed-status"] = error_detail
2576 nslcmop_operation_state = "FAILED"
2577 ns_state = "BROKEN"
2578 else:
2579 error_detail = None
2580 error_description_nsr = error_description_nslcmop = None
2581 ns_state = "READY"
2582 db_nsr_update["detailed-status"] = "Done"
2583 db_nslcmop_update["detailed-status"] = "Done"
2584 nslcmop_operation_state = "COMPLETED"
2585
2586 if db_nsr:
2587 self._write_ns_status(
2588 nsr_id=nsr_id,
2589 ns_state=ns_state,
2590 current_operation="IDLE",
2591 current_operation_id=None,
2592 error_description=error_description_nsr,
2593 error_detail=error_detail,
2594 other_update=db_nsr_update,
2595 )
2596 self._write_op_status(
2597 op_id=nslcmop_id,
2598 stage="",
2599 error_message=error_description_nslcmop,
2600 operation_state=nslcmop_operation_state,
2601 other_update=db_nslcmop_update,
2602 )
2603
2604 if nslcmop_operation_state:
2605 try:
2606 await self.msg.aiowrite(
2607 "ns",
2608 "instantiated",
2609 {
2610 "nsr_id": nsr_id,
2611 "nslcmop_id": nslcmop_id,
2612 "operationState": nslcmop_operation_state,
2613 },
2614 loop=self.loop,
2615 )
2616 except Exception as e:
2617 self.logger.error(
2618 logging_text + "kafka_write notification Exception {}".format(e)
2619 )
2620
2621 self.logger.debug(logging_text + "Exit")
2622 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2623
2624 async def _add_vca_relations(
2625 self,
2626 logging_text,
2627 nsr_id,
2628 vca_index: int,
2629 timeout: int = 3600,
2630 vca_type: str = None,
2631 vca_id: str = None,
2632 ) -> bool:
2633
2634 # steps:
2635 # 1. find all relations for this VCA
2636 # 2. wait for other peers related
2637 # 3. add relations
2638
2639 try:
2640 vca_type = vca_type or "lxc_proxy_charm"
2641
2642 # STEP 1: find all relations for this VCA
2643
2644 # read nsr record
2645 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2646 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2647
2648 # this VCA data
2649 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2650
2651 # read all ns-configuration relations
2652 ns_relations = list()
2653 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2654 if db_ns_relations:
2655 for r in db_ns_relations:
2656 # check if this VCA is in the relation
2657 if my_vca.get("member-vnf-index") in (
2658 r.get("entities")[0].get("id"),
2659 r.get("entities")[1].get("id"),
2660 ):
2661 ns_relations.append(r)
2662
2663 # read all vnf-configuration relations
2664 vnf_relations = list()
2665 db_vnfd_list = db_nsr.get("vnfd-id")
2666 if db_vnfd_list:
2667 for vnfd in db_vnfd_list:
2668 db_vnf_relations = None
2669 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2670 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2671 if db_vnf_configuration:
2672 db_vnf_relations = db_vnf_configuration.get("relation", [])
2673 if db_vnf_relations:
2674 for r in db_vnf_relations:
2675 # check if this VCA is in the relation
2676 if my_vca.get("vdu_id") in (
2677 r.get("entities")[0].get("id"),
2678 r.get("entities")[1].get("id"),
2679 ):
2680 vnf_relations.append(r)
2681
2682 # if no relations, terminate
2683 if not ns_relations and not vnf_relations:
2684 self.logger.debug(logging_text + " No relations")
2685 return True
2686
2687 self.logger.debug(
2688 logging_text
2689 + " adding relations\n {}\n {}".format(
2690 ns_relations, vnf_relations
2691 )
2692 )
2693
2694 # add all relations
2695 start = time()
2696 while True:
2697 # check timeout
2698 now = time()
2699 if now - start >= timeout:
2700 self.logger.error(logging_text + " : timeout adding relations")
2701 return False
2702
2703 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2704 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2705
2706 # for each defined NS relation, find the VCA's related
2707 for r in ns_relations.copy():
2708 from_vca_ee_id = None
2709 to_vca_ee_id = None
2710 from_vca_endpoint = None
2711 to_vca_endpoint = None
2712 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2713 for vca in vca_list:
2714 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2715 "id"
2716 ) and vca.get("config_sw_installed"):
2717 from_vca_ee_id = vca.get("ee_id")
2718 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2719 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2720 "id"
2721 ) and vca.get("config_sw_installed"):
2722 to_vca_ee_id = vca.get("ee_id")
2723 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2724 if from_vca_ee_id and to_vca_ee_id:
2725 # add relation
2726 await self.vca_map[vca_type].add_relation(
2727 ee_id_1=from_vca_ee_id,
2728 ee_id_2=to_vca_ee_id,
2729 endpoint_1=from_vca_endpoint,
2730 endpoint_2=to_vca_endpoint,
2731 vca_id=vca_id,
2732 )
2733 # remove entry from relations list
2734 ns_relations.remove(r)
2735 else:
2736 # check failed peers
2737 try:
2738 vca_status_list = db_nsr.get("configurationStatus")
2739 if vca_status_list:
2740 for i in range(len(vca_list)):
2741 vca = vca_list[i]
2742 vca_status = vca_status_list[i]
2743 if vca.get("member-vnf-index") == r.get("entities")[
2744 0
2745 ].get("id"):
2746 if vca_status.get("status") == "BROKEN":
2747 # peer broken: remove relation from list
2748 ns_relations.remove(r)
2749 if vca.get("member-vnf-index") == r.get("entities")[
2750 1
2751 ].get("id"):
2752 if vca_status.get("status") == "BROKEN":
2753 # peer broken: remove relation from list
2754 ns_relations.remove(r)
2755 except Exception:
2756 # ignore
2757 pass
2758
2759 # for each defined VNF relation, find the VCA's related
2760 for r in vnf_relations.copy():
2761 from_vca_ee_id = None
2762 to_vca_ee_id = None
2763 from_vca_endpoint = None
2764 to_vca_endpoint = None
2765 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2766 for vca in vca_list:
2767 key_to_check = "vdu_id"
2768 if vca.get("vdu_id") is None:
2769 key_to_check = "vnfd_id"
2770 if vca.get(key_to_check) == r.get("entities")[0].get(
2771 "id"
2772 ) and vca.get("config_sw_installed"):
2773 from_vca_ee_id = vca.get("ee_id")
2774 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2775 if vca.get(key_to_check) == r.get("entities")[1].get(
2776 "id"
2777 ) and vca.get("config_sw_installed"):
2778 to_vca_ee_id = vca.get("ee_id")
2779 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2780 if from_vca_ee_id and to_vca_ee_id:
2781 # add relation
2782 await self.vca_map[vca_type].add_relation(
2783 ee_id_1=from_vca_ee_id,
2784 ee_id_2=to_vca_ee_id,
2785 endpoint_1=from_vca_endpoint,
2786 endpoint_2=to_vca_endpoint,
2787 vca_id=vca_id,
2788 )
2789 # remove entry from relations list
2790 vnf_relations.remove(r)
2791 else:
2792 # check failed peers
2793 try:
2794 vca_status_list = db_nsr.get("configurationStatus")
2795 if vca_status_list:
2796 for i in range(len(vca_list)):
2797 vca = vca_list[i]
2798 vca_status = vca_status_list[i]
2799 if vca.get("vdu_id") == r.get("entities")[0].get(
2800 "id"
2801 ):
2802 if vca_status.get("status") == "BROKEN":
2803 # peer broken: remove relation from list
2804 vnf_relations.remove(r)
2805 if vca.get("vdu_id") == r.get("entities")[1].get(
2806 "id"
2807 ):
2808 if vca_status.get("status") == "BROKEN":
2809 # peer broken: remove relation from list
2810 vnf_relations.remove(r)
2811 except Exception:
2812 # ignore
2813 pass
2814
2815 # wait for next try
2816 await asyncio.sleep(5.0)
2817
2818 if not ns_relations and not vnf_relations:
2819 self.logger.debug("Relations added")
2820 break
2821
2822 return True
2823
2824 except Exception as e:
2825 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2826 return False
2827
2828 async def _install_kdu(
2829 self,
2830 nsr_id: str,
2831 nsr_db_path: str,
2832 vnfr_data: dict,
2833 kdu_index: int,
2834 kdud: dict,
2835 vnfd: dict,
2836 k8s_instance_info: dict,
2837 k8params: dict = None,
2838 timeout: int = 600,
2839 vca_id: str = None,
2840 ):
2841
2842 try:
2843 k8sclustertype = k8s_instance_info["k8scluster-type"]
2844 # Instantiate kdu
2845 db_dict_install = {
2846 "collection": "nsrs",
2847 "filter": {"_id": nsr_id},
2848 "path": nsr_db_path,
2849 }
2850
2851 kdu_instance = self.k8scluster_map[
2852 k8sclustertype
2853 ].generate_kdu_instance_name(
2854 db_dict=db_dict_install,
2855 kdu_model=k8s_instance_info["kdu-model"],
2856 kdu_name=k8s_instance_info["kdu-name"],
2857 )
2858 self.update_db_2(
2859 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2860 )
2861 await self.k8scluster_map[k8sclustertype].install(
2862 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2863 kdu_model=k8s_instance_info["kdu-model"],
2864 atomic=True,
2865 params=k8params,
2866 db_dict=db_dict_install,
2867 timeout=timeout,
2868 kdu_name=k8s_instance_info["kdu-name"],
2869 namespace=k8s_instance_info["namespace"],
2870 kdu_instance=kdu_instance,
2871 vca_id=vca_id,
2872 )
2873 self.update_db_2(
2874 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2875 )
2876
2877 # Obtain services to obtain management service ip
2878 services = await self.k8scluster_map[k8sclustertype].get_services(
2879 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2880 kdu_instance=kdu_instance,
2881 namespace=k8s_instance_info["namespace"],
2882 )
2883
2884 # Obtain management service info (if exists)
2885 vnfr_update_dict = {}
2886 kdu_config = get_configuration(vnfd, kdud["name"])
2887 if kdu_config:
2888 target_ee_list = kdu_config.get("execution-environment-list", [])
2889 else:
2890 target_ee_list = []
2891
2892 if services:
2893 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2894 mgmt_services = [
2895 service
2896 for service in kdud.get("service", [])
2897 if service.get("mgmt-service")
2898 ]
2899 for mgmt_service in mgmt_services:
2900 for service in services:
2901 if service["name"].startswith(mgmt_service["name"]):
2902 # Mgmt service found, Obtain service ip
2903 ip = service.get("external_ip", service.get("cluster_ip"))
2904 if isinstance(ip, list) and len(ip) == 1:
2905 ip = ip[0]
2906
2907 vnfr_update_dict[
2908 "kdur.{}.ip-address".format(kdu_index)
2909 ] = ip
2910
2911 # Check if must update also mgmt ip at the vnf
2912 service_external_cp = mgmt_service.get(
2913 "external-connection-point-ref"
2914 )
2915 if service_external_cp:
2916 if (
2917 deep_get(vnfd, ("mgmt-interface", "cp"))
2918 == service_external_cp
2919 ):
2920 vnfr_update_dict["ip-address"] = ip
2921
2922 if find_in_list(
2923 target_ee_list,
2924 lambda ee: ee.get(
2925 "external-connection-point-ref", ""
2926 )
2927 == service_external_cp,
2928 ):
2929 vnfr_update_dict[
2930 "kdur.{}.ip-address".format(kdu_index)
2931 ] = ip
2932 break
2933 else:
2934 self.logger.warn(
2935 "Mgmt service name: {} not found".format(
2936 mgmt_service["name"]
2937 )
2938 )
2939
2940 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2941 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2942
2943 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2944 if (
2945 kdu_config
2946 and kdu_config.get("initial-config-primitive")
2947 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2948 ):
2949 initial_config_primitive_list = kdu_config.get(
2950 "initial-config-primitive"
2951 )
2952 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2953
2954 for initial_config_primitive in initial_config_primitive_list:
2955 primitive_params_ = self._map_primitive_params(
2956 initial_config_primitive, {}, {}
2957 )
2958
2959 await asyncio.wait_for(
2960 self.k8scluster_map[k8sclustertype].exec_primitive(
2961 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2962 kdu_instance=kdu_instance,
2963 primitive_name=initial_config_primitive["name"],
2964 params=primitive_params_,
2965 db_dict=db_dict_install,
2966 vca_id=vca_id,
2967 ),
2968 timeout=timeout,
2969 )
2970
2971 except Exception as e:
2972 # Prepare update db with error and raise exception
2973 try:
2974 self.update_db_2(
2975 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2976 )
2977 self.update_db_2(
2978 "vnfrs",
2979 vnfr_data.get("_id"),
2980 {"kdur.{}.status".format(kdu_index): "ERROR"},
2981 )
2982 except Exception:
2983 # ignore to keep original exception
2984 pass
2985 # reraise original error
2986 raise
2987
2988 return kdu_instance
2989
2990 async def deploy_kdus(
2991 self,
2992 logging_text,
2993 nsr_id,
2994 nslcmop_id,
2995 db_vnfrs,
2996 db_vnfds,
2997 task_instantiation_info,
2998 ):
2999 # Launch kdus if present in the descriptor
3000
3001 k8scluster_id_2_uuic = {
3002 "helm-chart-v3": {},
3003 "helm-chart": {},
3004 "juju-bundle": {},
3005 }
3006
3007 async def _get_cluster_id(cluster_id, cluster_type):
3008 nonlocal k8scluster_id_2_uuic
3009 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3010 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3011
3012 # check if K8scluster is creating and wait look if previous tasks in process
3013 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3014 "k8scluster", cluster_id
3015 )
3016 if task_dependency:
3017 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3018 task_name, cluster_id
3019 )
3020 self.logger.debug(logging_text + text)
3021 await asyncio.wait(task_dependency, timeout=3600)
3022
3023 db_k8scluster = self.db.get_one(
3024 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3025 )
3026 if not db_k8scluster:
3027 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3028
3029 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3030 if not k8s_id:
3031 if cluster_type == "helm-chart-v3":
3032 try:
3033 # backward compatibility for existing clusters that have not been initialized for helm v3
3034 k8s_credentials = yaml.safe_dump(
3035 db_k8scluster.get("credentials")
3036 )
3037 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3038 k8s_credentials, reuse_cluster_uuid=cluster_id
3039 )
3040 db_k8scluster_update = {}
3041 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3042 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3043 db_k8scluster_update[
3044 "_admin.helm-chart-v3.created"
3045 ] = uninstall_sw
3046 db_k8scluster_update[
3047 "_admin.helm-chart-v3.operationalState"
3048 ] = "ENABLED"
3049 self.update_db_2(
3050 "k8sclusters", cluster_id, db_k8scluster_update
3051 )
3052 except Exception as e:
3053 self.logger.error(
3054 logging_text
3055 + "error initializing helm-v3 cluster: {}".format(str(e))
3056 )
3057 raise LcmException(
3058 "K8s cluster '{}' has not been initialized for '{}'".format(
3059 cluster_id, cluster_type
3060 )
3061 )
3062 else:
3063 raise LcmException(
3064 "K8s cluster '{}' has not been initialized for '{}'".format(
3065 cluster_id, cluster_type
3066 )
3067 )
3068 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3069 return k8s_id
3070
3071 logging_text += "Deploy kdus: "
3072 step = ""
3073 try:
3074 db_nsr_update = {"_admin.deployed.K8s": []}
3075 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3076
3077 index = 0
3078 updated_cluster_list = []
3079 updated_v3_cluster_list = []
3080
3081 for vnfr_data in db_vnfrs.values():
3082 vca_id = self.get_vca_id(vnfr_data, {})
3083 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3084 # Step 0: Prepare and set parameters
3085 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3086 vnfd_id = vnfr_data.get("vnfd-id")
3087 vnfd_with_id = find_in_list(
3088 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3089 )
3090 kdud = next(
3091 kdud
3092 for kdud in vnfd_with_id["kdu"]
3093 if kdud["name"] == kdur["kdu-name"]
3094 )
3095 namespace = kdur.get("k8s-namespace")
3096 if kdur.get("helm-chart"):
3097 kdumodel = kdur["helm-chart"]
3098 # Default version: helm3, if helm-version is v2 assign v2
3099 k8sclustertype = "helm-chart-v3"
3100 self.logger.debug("kdur: {}".format(kdur))
3101 if (
3102 kdur.get("helm-version")
3103 and kdur.get("helm-version") == "v2"
3104 ):
3105 k8sclustertype = "helm-chart"
3106 elif kdur.get("juju-bundle"):
3107 kdumodel = kdur["juju-bundle"]
3108 k8sclustertype = "juju-bundle"
3109 else:
3110 raise LcmException(
3111 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3112 "juju-bundle. Maybe an old NBI version is running".format(
3113 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3114 )
3115 )
3116 # check if kdumodel is a file and exists
3117 try:
3118 vnfd_with_id = find_in_list(
3119 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3120 )
3121 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3122 if storage and storage.get(
3123 "pkg-dir"
3124 ): # may be not present if vnfd has not artifacts
3125 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3126 filename = "{}/{}/{}s/{}".format(
3127 storage["folder"],
3128 storage["pkg-dir"],
3129 k8sclustertype,
3130 kdumodel,
3131 )
3132 if self.fs.file_exists(
3133 filename, mode="file"
3134 ) or self.fs.file_exists(filename, mode="dir"):
3135 kdumodel = self.fs.path + filename
3136 except (asyncio.TimeoutError, asyncio.CancelledError):
3137 raise
3138 except Exception: # it is not a file
3139 pass
3140
3141 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3142 step = "Synchronize repos for k8s cluster '{}'".format(
3143 k8s_cluster_id
3144 )
3145 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3146
3147 # Synchronize repos
3148 if (
3149 k8sclustertype == "helm-chart"
3150 and cluster_uuid not in updated_cluster_list
3151 ) or (
3152 k8sclustertype == "helm-chart-v3"
3153 and cluster_uuid not in updated_v3_cluster_list
3154 ):
3155 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3156 self.k8scluster_map[k8sclustertype].synchronize_repos(
3157 cluster_uuid=cluster_uuid
3158 )
3159 )
3160 if del_repo_list or added_repo_dict:
3161 if k8sclustertype == "helm-chart":
3162 unset = {
3163 "_admin.helm_charts_added." + item: None
3164 for item in del_repo_list
3165 }
3166 updated = {
3167 "_admin.helm_charts_added." + item: name
3168 for item, name in added_repo_dict.items()
3169 }
3170 updated_cluster_list.append(cluster_uuid)
3171 elif k8sclustertype == "helm-chart-v3":
3172 unset = {
3173 "_admin.helm_charts_v3_added." + item: None
3174 for item in del_repo_list
3175 }
3176 updated = {
3177 "_admin.helm_charts_v3_added." + item: name
3178 for item, name in added_repo_dict.items()
3179 }
3180 updated_v3_cluster_list.append(cluster_uuid)
3181 self.logger.debug(
3182 logging_text + "repos synchronized on k8s cluster "
3183 "'{}' to_delete: {}, to_add: {}".format(
3184 k8s_cluster_id, del_repo_list, added_repo_dict
3185 )
3186 )
3187 self.db.set_one(
3188 "k8sclusters",
3189 {"_id": k8s_cluster_id},
3190 updated,
3191 unset=unset,
3192 )
3193
3194 # Instantiate kdu
3195 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3196 vnfr_data["member-vnf-index-ref"],
3197 kdur["kdu-name"],
3198 k8s_cluster_id,
3199 )
3200 k8s_instance_info = {
3201 "kdu-instance": None,
3202 "k8scluster-uuid": cluster_uuid,
3203 "k8scluster-type": k8sclustertype,
3204 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3205 "kdu-name": kdur["kdu-name"],
3206 "kdu-model": kdumodel,
3207 "namespace": namespace,
3208 }
3209 db_path = "_admin.deployed.K8s.{}".format(index)
3210 db_nsr_update[db_path] = k8s_instance_info
3211 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3212 vnfd_with_id = find_in_list(
3213 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3214 )
3215 task = asyncio.ensure_future(
3216 self._install_kdu(
3217 nsr_id,
3218 db_path,
3219 vnfr_data,
3220 kdu_index,
3221 kdud,
3222 vnfd_with_id,
3223 k8s_instance_info,
3224 k8params=desc_params,
3225 timeout=600,
3226 vca_id=vca_id,
3227 )
3228 )
3229 self.lcm_tasks.register(
3230 "ns",
3231 nsr_id,
3232 nslcmop_id,
3233 "instantiate_KDU-{}".format(index),
3234 task,
3235 )
3236 task_instantiation_info[task] = "Deploying KDU {}".format(
3237 kdur["kdu-name"]
3238 )
3239
3240 index += 1
3241
3242 except (LcmException, asyncio.CancelledError):
3243 raise
3244 except Exception as e:
3245 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3246 if isinstance(e, (N2VCException, DbException)):
3247 self.logger.error(logging_text + msg)
3248 else:
3249 self.logger.critical(logging_text + msg, exc_info=True)
3250 raise LcmException(msg)
3251 finally:
3252 if db_nsr_update:
3253 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3254
3255 def _deploy_n2vc(
3256 self,
3257 logging_text,
3258 db_nsr,
3259 db_vnfr,
3260 nslcmop_id,
3261 nsr_id,
3262 nsi_id,
3263 vnfd_id,
3264 vdu_id,
3265 kdu_name,
3266 member_vnf_index,
3267 vdu_index,
3268 vdu_name,
3269 deploy_params,
3270 descriptor_config,
3271 base_folder,
3272 task_instantiation_info,
3273 stage,
3274 ):
3275 # launch instantiate_N2VC in a asyncio task and register task object
3276 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3277 # if not found, create one entry and update database
3278 # fill db_nsr._admin.deployed.VCA.<index>
3279
3280 self.logger.debug(
3281 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3282 )
3283 if "execution-environment-list" in descriptor_config:
3284 ee_list = descriptor_config.get("execution-environment-list", [])
3285 else: # other types as script are not supported
3286 ee_list = []
3287
3288 for ee_item in ee_list:
3289 self.logger.debug(
3290 logging_text
3291 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3292 ee_item.get("juju"), ee_item.get("helm-chart")
3293 )
3294 )
3295 ee_descriptor_id = ee_item.get("id")
3296 if ee_item.get("juju"):
3297 vca_name = ee_item["juju"].get("charm")
3298 vca_type = (
3299 "lxc_proxy_charm"
3300 if ee_item["juju"].get("charm") is not None
3301 else "native_charm"
3302 )
3303 if ee_item["juju"].get("cloud") == "k8s":
3304 vca_type = "k8s_proxy_charm"
3305 elif ee_item["juju"].get("proxy") is False:
3306 vca_type = "native_charm"
3307 elif ee_item.get("helm-chart"):
3308 vca_name = ee_item["helm-chart"]
3309 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3310 vca_type = "helm"
3311 else:
3312 vca_type = "helm-v3"
3313 else:
3314 self.logger.debug(
3315 logging_text + "skipping non juju neither charm configuration"
3316 )
3317 continue
3318
3319 vca_index = -1
3320 for vca_index, vca_deployed in enumerate(
3321 db_nsr["_admin"]["deployed"]["VCA"]
3322 ):
3323 if not vca_deployed:
3324 continue
3325 if (
3326 vca_deployed.get("member-vnf-index") == member_vnf_index
3327 and vca_deployed.get("vdu_id") == vdu_id
3328 and vca_deployed.get("kdu_name") == kdu_name
3329 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3330 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3331 ):
3332 break
3333 else:
3334 # not found, create one.
3335 target = (
3336 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3337 )
3338 if vdu_id:
3339 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3340 elif kdu_name:
3341 target += "/kdu/{}".format(kdu_name)
3342 vca_deployed = {
3343 "target_element": target,
3344 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3345 "member-vnf-index": member_vnf_index,
3346 "vdu_id": vdu_id,
3347 "kdu_name": kdu_name,
3348 "vdu_count_index": vdu_index,
3349 "operational-status": "init", # TODO revise
3350 "detailed-status": "", # TODO revise
3351 "step": "initial-deploy", # TODO revise
3352 "vnfd_id": vnfd_id,
3353 "vdu_name": vdu_name,
3354 "type": vca_type,
3355 "ee_descriptor_id": ee_descriptor_id,
3356 }
3357 vca_index += 1
3358
3359 # create VCA and configurationStatus in db
3360 db_dict = {
3361 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3362 "configurationStatus.{}".format(vca_index): dict(),
3363 }
3364 self.update_db_2("nsrs", nsr_id, db_dict)
3365
3366 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3367
3368 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3369 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3370 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3371
3372 # Launch task
3373 task_n2vc = asyncio.ensure_future(
3374 self.instantiate_N2VC(
3375 logging_text=logging_text,
3376 vca_index=vca_index,
3377 nsi_id=nsi_id,
3378 db_nsr=db_nsr,
3379 db_vnfr=db_vnfr,
3380 vdu_id=vdu_id,
3381 kdu_name=kdu_name,
3382 vdu_index=vdu_index,
3383 deploy_params=deploy_params,
3384 config_descriptor=descriptor_config,
3385 base_folder=base_folder,
3386 nslcmop_id=nslcmop_id,
3387 stage=stage,
3388 vca_type=vca_type,
3389 vca_name=vca_name,
3390 ee_config_descriptor=ee_item,
3391 )
3392 )
3393 self.lcm_tasks.register(
3394 "ns",
3395 nsr_id,
3396 nslcmop_id,
3397 "instantiate_N2VC-{}".format(vca_index),
3398 task_n2vc,
3399 )
3400 task_instantiation_info[
3401 task_n2vc
3402 ] = self.task_name_deploy_vca + " {}.{}".format(
3403 member_vnf_index or "", vdu_id or ""
3404 )
3405
3406 @staticmethod
3407 def _create_nslcmop(nsr_id, operation, params):
3408 """
3409 Creates a ns-lcm-opp content to be stored at database.
3410 :param nsr_id: internal id of the instance
3411 :param operation: instantiate, terminate, scale, action, ...
3412 :param params: user parameters for the operation
3413 :return: dictionary following SOL005 format
3414 """
3415 # Raise exception if invalid arguments
3416 if not (nsr_id and operation and params):
3417 raise LcmException(
3418 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3419 )
3420 now = time()
3421 _id = str(uuid4())
3422 nslcmop = {
3423 "id": _id,
3424 "_id": _id,
3425 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3426 "operationState": "PROCESSING",
3427 "statusEnteredTime": now,
3428 "nsInstanceId": nsr_id,
3429 "lcmOperationType": operation,
3430 "startTime": now,
3431 "isAutomaticInvocation": False,
3432 "operationParams": params,
3433 "isCancelPending": False,
3434 "links": {
3435 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3436 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3437 },
3438 }
3439 return nslcmop
3440
3441 def _format_additional_params(self, params):
3442 params = params or {}
3443 for key, value in params.items():
3444 if str(value).startswith("!!yaml "):
3445 params[key] = yaml.safe_load(value[7:])
3446 return params
3447
3448 def _get_terminate_primitive_params(self, seq, vnf_index):
3449 primitive = seq.get("name")
3450 primitive_params = {}
3451 params = {
3452 "member_vnf_index": vnf_index,
3453 "primitive": primitive,
3454 "primitive_params": primitive_params,
3455 }
3456 desc_params = {}
3457 return self._map_primitive_params(seq, params, desc_params)
3458
3459 # sub-operations
3460
3461 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3462 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3463 if op.get("operationState") == "COMPLETED":
3464 # b. Skip sub-operation
3465 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3466 return self.SUBOPERATION_STATUS_SKIP
3467 else:
3468 # c. retry executing sub-operation
3469 # The sub-operation exists, and operationState != 'COMPLETED'
3470 # Update operationState = 'PROCESSING' to indicate a retry.
3471 operationState = "PROCESSING"
3472 detailed_status = "In progress"
3473 self._update_suboperation_status(
3474 db_nslcmop, op_index, operationState, detailed_status
3475 )
3476 # Return the sub-operation index
3477 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3478 # with arguments extracted from the sub-operation
3479 return op_index
3480
3481 # Find a sub-operation where all keys in a matching dictionary must match
3482 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3483 def _find_suboperation(self, db_nslcmop, match):
3484 if db_nslcmop and match:
3485 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3486 for i, op in enumerate(op_list):
3487 if all(op.get(k) == match[k] for k in match):
3488 return i
3489 return self.SUBOPERATION_STATUS_NOT_FOUND
3490
3491 # Update status for a sub-operation given its index
3492 def _update_suboperation_status(
3493 self, db_nslcmop, op_index, operationState, detailed_status
3494 ):
3495 # Update DB for HA tasks
3496 q_filter = {"_id": db_nslcmop["_id"]}
3497 update_dict = {
3498 "_admin.operations.{}.operationState".format(op_index): operationState,
3499 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3500 }
3501 self.db.set_one(
3502 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3503 )
3504
3505 # Add sub-operation, return the index of the added sub-operation
3506 # Optionally, set operationState, detailed-status, and operationType
3507 # Status and type are currently set for 'scale' sub-operations:
3508 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3509 # 'detailed-status' : status message
3510 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3511 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3512 def _add_suboperation(
3513 self,
3514 db_nslcmop,
3515 vnf_index,
3516 vdu_id,
3517 vdu_count_index,
3518 vdu_name,
3519 primitive,
3520 mapped_primitive_params,
3521 operationState=None,
3522 detailed_status=None,
3523 operationType=None,
3524 RO_nsr_id=None,
3525 RO_scaling_info=None,
3526 ):
3527 if not db_nslcmop:
3528 return self.SUBOPERATION_STATUS_NOT_FOUND
3529 # Get the "_admin.operations" list, if it exists
3530 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3531 op_list = db_nslcmop_admin.get("operations")
3532 # Create or append to the "_admin.operations" list
3533 new_op = {
3534 "member_vnf_index": vnf_index,
3535 "vdu_id": vdu_id,
3536 "vdu_count_index": vdu_count_index,
3537 "primitive": primitive,
3538 "primitive_params": mapped_primitive_params,
3539 }
3540 if operationState:
3541 new_op["operationState"] = operationState
3542 if detailed_status:
3543 new_op["detailed-status"] = detailed_status
3544 if operationType:
3545 new_op["lcmOperationType"] = operationType
3546 if RO_nsr_id:
3547 new_op["RO_nsr_id"] = RO_nsr_id
3548 if RO_scaling_info:
3549 new_op["RO_scaling_info"] = RO_scaling_info
3550 if not op_list:
3551 # No existing operations, create key 'operations' with current operation as first list element
3552 db_nslcmop_admin.update({"operations": [new_op]})
3553 op_list = db_nslcmop_admin.get("operations")
3554 else:
3555 # Existing operations, append operation to list
3556 op_list.append(new_op)
3557
3558 db_nslcmop_update = {"_admin.operations": op_list}
3559 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3560 op_index = len(op_list) - 1
3561 return op_index
3562
3563 # Helper methods for scale() sub-operations
3564
3565 # pre-scale/post-scale:
3566 # Check for 3 different cases:
3567 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3568 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3569 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3570 def _check_or_add_scale_suboperation(
3571 self,
3572 db_nslcmop,
3573 vnf_index,
3574 vnf_config_primitive,
3575 primitive_params,
3576 operationType,
3577 RO_nsr_id=None,
3578 RO_scaling_info=None,
3579 ):
3580 # Find this sub-operation
3581 if RO_nsr_id and RO_scaling_info:
3582 operationType = "SCALE-RO"
3583 match = {
3584 "member_vnf_index": vnf_index,
3585 "RO_nsr_id": RO_nsr_id,
3586 "RO_scaling_info": RO_scaling_info,
3587 }
3588 else:
3589 match = {
3590 "member_vnf_index": vnf_index,
3591 "primitive": vnf_config_primitive,
3592 "primitive_params": primitive_params,
3593 "lcmOperationType": operationType,
3594 }
3595 op_index = self._find_suboperation(db_nslcmop, match)
3596 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3597 # a. New sub-operation
3598 # The sub-operation does not exist, add it.
3599 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3600 # The following parameters are set to None for all kind of scaling:
3601 vdu_id = None
3602 vdu_count_index = None
3603 vdu_name = None
3604 if RO_nsr_id and RO_scaling_info:
3605 vnf_config_primitive = None
3606 primitive_params = None
3607 else:
3608 RO_nsr_id = None
3609 RO_scaling_info = None
3610 # Initial status for sub-operation
3611 operationState = "PROCESSING"
3612 detailed_status = "In progress"
3613 # Add sub-operation for pre/post-scaling (zero or more operations)
3614 self._add_suboperation(
3615 db_nslcmop,
3616 vnf_index,
3617 vdu_id,
3618 vdu_count_index,
3619 vdu_name,
3620 vnf_config_primitive,
3621 primitive_params,
3622 operationState,
3623 detailed_status,
3624 operationType,
3625 RO_nsr_id,
3626 RO_scaling_info,
3627 )
3628 return self.SUBOPERATION_STATUS_NEW
3629 else:
3630 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3631 # or op_index (operationState != 'COMPLETED')
3632 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3633
3634 # Function to return execution_environment id
3635
3636 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3637 # TODO vdu_index_count
3638 for vca in vca_deployed_list:
3639 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3640 return vca["ee_id"]
3641
3642 async def destroy_N2VC(
3643 self,
3644 logging_text,
3645 db_nslcmop,
3646 vca_deployed,
3647 config_descriptor,
3648 vca_index,
3649 destroy_ee=True,
3650 exec_primitives=True,
3651 scaling_in=False,
3652 vca_id: str = None,
3653 ):
3654 """
3655 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3656 :param logging_text:
3657 :param db_nslcmop:
3658 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3659 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3660 :param vca_index: index in the database _admin.deployed.VCA
3661 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3662 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3663 not executed properly
3664 :param scaling_in: True destroys the application, False destroys the model
3665 :return: None or exception
3666 """
3667
3668 self.logger.debug(
3669 logging_text
3670 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3671 vca_index, vca_deployed, config_descriptor, destroy_ee
3672 )
3673 )
3674
3675 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3676
3677 # execute terminate_primitives
3678 if exec_primitives:
3679 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3680 config_descriptor.get("terminate-config-primitive"),
3681 vca_deployed.get("ee_descriptor_id"),
3682 )
3683 vdu_id = vca_deployed.get("vdu_id")
3684 vdu_count_index = vca_deployed.get("vdu_count_index")
3685 vdu_name = vca_deployed.get("vdu_name")
3686 vnf_index = vca_deployed.get("member-vnf-index")
3687 if terminate_primitives and vca_deployed.get("needed_terminate"):
3688 for seq in terminate_primitives:
3689 # For each sequence in list, get primitive and call _ns_execute_primitive()
3690 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3691 vnf_index, seq.get("name")
3692 )
3693 self.logger.debug(logging_text + step)
3694 # Create the primitive for each sequence, i.e. "primitive": "touch"
3695 primitive = seq.get("name")
3696 mapped_primitive_params = self._get_terminate_primitive_params(
3697 seq, vnf_index
3698 )
3699
3700 # Add sub-operation
3701 self._add_suboperation(
3702 db_nslcmop,
3703 vnf_index,
3704 vdu_id,
3705 vdu_count_index,
3706 vdu_name,
3707 primitive,
3708 mapped_primitive_params,
3709 )
3710 # Sub-operations: Call _ns_execute_primitive() instead of action()
3711 try:
3712 result, result_detail = await self._ns_execute_primitive(
3713 vca_deployed["ee_id"],
3714 primitive,
3715 mapped_primitive_params,
3716 vca_type=vca_type,
3717 vca_id=vca_id,
3718 )
3719 except LcmException:
3720 # this happens when VCA is not deployed. In this case it is not needed to terminate
3721 continue
3722 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3723 if result not in result_ok:
3724 raise LcmException(
3725 "terminate_primitive {} for vnf_member_index={} fails with "
3726 "error {}".format(seq.get("name"), vnf_index, result_detail)
3727 )
3728 # set that this VCA do not need terminated
3729 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3730 vca_index
3731 )
3732 self.update_db_2(
3733 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3734 )
3735
3736 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3737 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3738
3739 if destroy_ee:
3740 await self.vca_map[vca_type].delete_execution_environment(
3741 vca_deployed["ee_id"],
3742 scaling_in=scaling_in,
3743 vca_id=vca_id,
3744 )
3745
3746 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3747 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3748 namespace = "." + db_nsr["_id"]
3749 try:
3750 await self.n2vc.delete_namespace(
3751 namespace=namespace,
3752 total_timeout=self.timeout_charm_delete,
3753 vca_id=vca_id,
3754 )
3755 except N2VCNotFound: # already deleted. Skip
3756 pass
3757 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3758
3759 async def _terminate_RO(
3760 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3761 ):
3762 """
3763 Terminates a deployment from RO
3764 :param logging_text:
3765 :param nsr_deployed: db_nsr._admin.deployed
3766 :param nsr_id:
3767 :param nslcmop_id:
3768 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3769 this method will update only the index 2, but it will write on database the concatenated content of the list
3770 :return:
3771 """
3772 db_nsr_update = {}
3773 failed_detail = []
3774 ro_nsr_id = ro_delete_action = None
3775 if nsr_deployed and nsr_deployed.get("RO"):
3776 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3777 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3778 try:
3779 if ro_nsr_id:
3780 stage[2] = "Deleting ns from VIM."
3781 db_nsr_update["detailed-status"] = " ".join(stage)
3782 self._write_op_status(nslcmop_id, stage)
3783 self.logger.debug(logging_text + stage[2])
3784 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3785 self._write_op_status(nslcmop_id, stage)
3786 desc = await self.RO.delete("ns", ro_nsr_id)
3787 ro_delete_action = desc["action_id"]
3788 db_nsr_update[
3789 "_admin.deployed.RO.nsr_delete_action_id"
3790 ] = ro_delete_action
3791 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3792 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3793 if ro_delete_action:
3794 # wait until NS is deleted from VIM
3795 stage[2] = "Waiting ns deleted from VIM."
3796 detailed_status_old = None
3797 self.logger.debug(
3798 logging_text
3799 + stage[2]
3800 + " RO_id={} ro_delete_action={}".format(
3801 ro_nsr_id, ro_delete_action
3802 )
3803 )
3804 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3805 self._write_op_status(nslcmop_id, stage)
3806
3807 delete_timeout = 20 * 60 # 20 minutes
3808 while delete_timeout > 0:
3809 desc = await self.RO.show(
3810 "ns",
3811 item_id_name=ro_nsr_id,
3812 extra_item="action",
3813 extra_item_id=ro_delete_action,
3814 )
3815
3816 # deploymentStatus
3817 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3818
3819 ns_status, ns_status_info = self.RO.check_action_status(desc)
3820 if ns_status == "ERROR":
3821 raise ROclient.ROClientException(ns_status_info)
3822 elif ns_status == "BUILD":
3823 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3824 elif ns_status == "ACTIVE":
3825 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3826 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3827 break
3828 else:
3829 assert (
3830 False
3831 ), "ROclient.check_action_status returns unknown {}".format(
3832 ns_status
3833 )
3834 if stage[2] != detailed_status_old:
3835 detailed_status_old = stage[2]
3836 db_nsr_update["detailed-status"] = " ".join(stage)
3837 self._write_op_status(nslcmop_id, stage)
3838 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3839 await asyncio.sleep(5, loop=self.loop)
3840 delete_timeout -= 5
3841 else: # delete_timeout <= 0:
3842 raise ROclient.ROClientException(
3843 "Timeout waiting ns deleted from VIM"
3844 )
3845
3846 except Exception as e:
3847 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3848 if (
3849 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3850 ): # not found
3851 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3852 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3853 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3854 self.logger.debug(
3855 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3856 )
3857 elif (
3858 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3859 ): # conflict
3860 failed_detail.append("delete conflict: {}".format(e))
3861 self.logger.debug(
3862 logging_text
3863 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3864 )
3865 else:
3866 failed_detail.append("delete error: {}".format(e))
3867 self.logger.error(
3868 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3869 )
3870
3871 # Delete nsd
3872 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3873 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3874 try:
3875 stage[2] = "Deleting nsd from RO."
3876 db_nsr_update["detailed-status"] = " ".join(stage)
3877 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3878 self._write_op_status(nslcmop_id, stage)
3879 await self.RO.delete("nsd", ro_nsd_id)
3880 self.logger.debug(
3881 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3882 )
3883 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3884 except Exception as e:
3885 if (
3886 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3887 ): # not found
3888 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3889 self.logger.debug(
3890 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3891 )
3892 elif (
3893 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3894 ): # conflict
3895 failed_detail.append(
3896 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3897 )
3898 self.logger.debug(logging_text + failed_detail[-1])
3899 else:
3900 failed_detail.append(
3901 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3902 )
3903 self.logger.error(logging_text + failed_detail[-1])
3904
3905 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3906 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3907 if not vnf_deployed or not vnf_deployed["id"]:
3908 continue
3909 try:
3910 ro_vnfd_id = vnf_deployed["id"]
3911 stage[
3912 2
3913 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3914 vnf_deployed["member-vnf-index"], ro_vnfd_id
3915 )
3916 db_nsr_update["detailed-status"] = " ".join(stage)
3917 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3918 self._write_op_status(nslcmop_id, stage)
3919 await self.RO.delete("vnfd", ro_vnfd_id)
3920 self.logger.debug(
3921 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3922 )
3923 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3924 except Exception as e:
3925 if (
3926 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3927 ): # not found
3928 db_nsr_update[
3929 "_admin.deployed.RO.vnfd.{}.id".format(index)
3930 ] = None
3931 self.logger.debug(
3932 logging_text
3933 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3934 )
3935 elif (
3936 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3937 ): # conflict
3938 failed_detail.append(
3939 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3940 )
3941 self.logger.debug(logging_text + failed_detail[-1])
3942 else:
3943 failed_detail.append(
3944 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3945 )
3946 self.logger.error(logging_text + failed_detail[-1])
3947
3948 if failed_detail:
3949 stage[2] = "Error deleting from VIM"
3950 else:
3951 stage[2] = "Deleted from VIM"
3952 db_nsr_update["detailed-status"] = " ".join(stage)
3953 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3954 self._write_op_status(nslcmop_id, stage)
3955
3956 if failed_detail:
3957 raise LcmException("; ".join(failed_detail))
3958
3959 async def terminate(self, nsr_id, nslcmop_id):
3960 # Try to lock HA task here
3961 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3962 if not task_is_locked_by_me:
3963 return
3964
3965 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3966 self.logger.debug(logging_text + "Enter")
3967 timeout_ns_terminate = self.timeout_ns_terminate
3968 db_nsr = None
3969 db_nslcmop = None
3970 operation_params = None
3971 exc = None
3972 error_list = [] # annotates all failed error messages
3973 db_nslcmop_update = {}
3974 autoremove = False # autoremove after terminated
3975 tasks_dict_info = {}
3976 db_nsr_update = {}
3977 stage = [
3978 "Stage 1/3: Preparing task.",
3979 "Waiting for previous operations to terminate.",
3980 "",
3981 ]
3982 # ^ contains [stage, step, VIM-status]
3983 try:
3984 # wait for any previous tasks in process
3985 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3986
3987 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3988 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3989 operation_params = db_nslcmop.get("operationParams") or {}
3990 if operation_params.get("timeout_ns_terminate"):
3991 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3992 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3993 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3994
3995 db_nsr_update["operational-status"] = "terminating"
3996 db_nsr_update["config-status"] = "terminating"
3997 self._write_ns_status(
3998 nsr_id=nsr_id,
3999 ns_state="TERMINATING",
4000 current_operation="TERMINATING",
4001 current_operation_id=nslcmop_id,
4002 other_update=db_nsr_update,
4003 )
4004 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4005 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4006 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4007 return
4008
4009 stage[1] = "Getting vnf descriptors from db."
4010 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4011 db_vnfrs_dict = {
4012 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4013 }
4014 db_vnfds_from_id = {}
4015 db_vnfds_from_member_index = {}
4016 # Loop over VNFRs
4017 for vnfr in db_vnfrs_list:
4018 vnfd_id = vnfr["vnfd-id"]
4019 if vnfd_id not in db_vnfds_from_id:
4020 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4021 db_vnfds_from_id[vnfd_id] = vnfd
4022 db_vnfds_from_member_index[
4023 vnfr["member-vnf-index-ref"]
4024 ] = db_vnfds_from_id[vnfd_id]
4025
4026 # Destroy individual execution environments when there are terminating primitives.
4027 # Rest of EE will be deleted at once
4028 # TODO - check before calling _destroy_N2VC
4029 # if not operation_params.get("skip_terminate_primitives"):#
4030 # or not vca.get("needed_terminate"):
4031 stage[0] = "Stage 2/3 execute terminating primitives."
4032 self.logger.debug(logging_text + stage[0])
4033 stage[1] = "Looking execution environment that needs terminate."
4034 self.logger.debug(logging_text + stage[1])
4035
4036 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4037 config_descriptor = None
4038
4039 vca_id = self.get_vca_id(db_vnfrs_dict[vca["member-vnf-index"]], db_nsr)
4040 if not vca or not vca.get("ee_id"):
4041 continue
4042 if not vca.get("member-vnf-index"):
4043 # ns
4044 config_descriptor = db_nsr.get("ns-configuration")
4045 elif vca.get("vdu_id"):
4046 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4047 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4048 elif vca.get("kdu_name"):
4049 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4050 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4051 else:
4052 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4053 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4054 vca_type = vca.get("type")
4055 exec_terminate_primitives = not operation_params.get(
4056 "skip_terminate_primitives"
4057 ) and vca.get("needed_terminate")
4058 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4059 # pending native charms
4060 destroy_ee = (
4061 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4062 )
4063 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4064 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4065 task = asyncio.ensure_future(
4066 self.destroy_N2VC(
4067 logging_text,
4068 db_nslcmop,
4069 vca,
4070 config_descriptor,
4071 vca_index,
4072 destroy_ee,
4073 exec_terminate_primitives,
4074 vca_id=vca_id,
4075 )
4076 )
4077 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4078
4079 # wait for pending tasks of terminate primitives
4080 if tasks_dict_info:
4081 self.logger.debug(
4082 logging_text
4083 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4084 )
4085 error_list = await self._wait_for_tasks(
4086 logging_text,
4087 tasks_dict_info,
4088 min(self.timeout_charm_delete, timeout_ns_terminate),
4089 stage,
4090 nslcmop_id,
4091 )
4092 tasks_dict_info.clear()
4093 if error_list:
4094 return # raise LcmException("; ".join(error_list))
4095
4096 # remove All execution environments at once
4097 stage[0] = "Stage 3/3 delete all."
4098
4099 if nsr_deployed.get("VCA"):
4100 stage[1] = "Deleting all execution environments."
4101 self.logger.debug(logging_text + stage[1])
4102 vca_id = self.get_vca_id({}, db_nsr)
4103 task_delete_ee = asyncio.ensure_future(
4104 asyncio.wait_for(
4105 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4106 timeout=self.timeout_charm_delete,
4107 )
4108 )
4109 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4110 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4111
4112 # Delete from k8scluster
4113 stage[1] = "Deleting KDUs."
4114 self.logger.debug(logging_text + stage[1])
4115 # print(nsr_deployed)
4116 for kdu in get_iterable(nsr_deployed, "K8s"):
4117 if not kdu or not kdu.get("kdu-instance"):
4118 continue
4119 kdu_instance = kdu.get("kdu-instance")
4120 if kdu.get("k8scluster-type") in self.k8scluster_map:
4121 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4122 vca_id = self.get_vca_id({}, db_nsr)
4123 task_delete_kdu_instance = asyncio.ensure_future(
4124 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4125 cluster_uuid=kdu.get("k8scluster-uuid"),
4126 kdu_instance=kdu_instance,
4127 vca_id=vca_id,
4128 )
4129 )
4130 else:
4131 self.logger.error(
4132 logging_text
4133 + "Unknown k8s deployment type {}".format(
4134 kdu.get("k8scluster-type")
4135 )
4136 )
4137 continue
4138 tasks_dict_info[
4139 task_delete_kdu_instance
4140 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4141
4142 # remove from RO
4143 stage[1] = "Deleting ns from VIM."
4144 if self.ng_ro:
4145 task_delete_ro = asyncio.ensure_future(
4146 self._terminate_ng_ro(
4147 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4148 )
4149 )
4150 else:
4151 task_delete_ro = asyncio.ensure_future(
4152 self._terminate_RO(
4153 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4154 )
4155 )
4156 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4157
4158 # rest of staff will be done at finally
4159
4160 except (
4161 ROclient.ROClientException,
4162 DbException,
4163 LcmException,
4164 N2VCException,
4165 ) as e:
4166 self.logger.error(logging_text + "Exit Exception {}".format(e))
4167 exc = e
4168 except asyncio.CancelledError:
4169 self.logger.error(
4170 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4171 )
4172 exc = "Operation was cancelled"
4173 except Exception as e:
4174 exc = traceback.format_exc()
4175 self.logger.critical(
4176 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4177 exc_info=True,
4178 )
4179 finally:
4180 if exc:
4181 error_list.append(str(exc))
4182 try:
4183 # wait for pending tasks
4184 if tasks_dict_info:
4185 stage[1] = "Waiting for terminate pending tasks."
4186 self.logger.debug(logging_text + stage[1])
4187 error_list += await self._wait_for_tasks(
4188 logging_text,
4189 tasks_dict_info,
4190 timeout_ns_terminate,
4191 stage,
4192 nslcmop_id,
4193 )
4194 stage[1] = stage[2] = ""
4195 except asyncio.CancelledError:
4196 error_list.append("Cancelled")
4197 # TODO cancell all tasks
4198 except Exception as exc:
4199 error_list.append(str(exc))
4200 # update status at database
4201 if error_list:
4202 error_detail = "; ".join(error_list)
4203 # self.logger.error(logging_text + error_detail)
4204 error_description_nslcmop = "{} Detail: {}".format(
4205 stage[0], error_detail
4206 )
4207 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4208 nslcmop_id, stage[0]
4209 )
4210
4211 db_nsr_update["operational-status"] = "failed"
4212 db_nsr_update["detailed-status"] = (
4213 error_description_nsr + " Detail: " + error_detail
4214 )
4215 db_nslcmop_update["detailed-status"] = error_detail
4216 nslcmop_operation_state = "FAILED"
4217 ns_state = "BROKEN"
4218 else:
4219 error_detail = None
4220 error_description_nsr = error_description_nslcmop = None
4221 ns_state = "NOT_INSTANTIATED"
4222 db_nsr_update["operational-status"] = "terminated"
4223 db_nsr_update["detailed-status"] = "Done"
4224 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4225 db_nslcmop_update["detailed-status"] = "Done"
4226 nslcmop_operation_state = "COMPLETED"
4227
4228 if db_nsr:
4229 self._write_ns_status(
4230 nsr_id=nsr_id,
4231 ns_state=ns_state,
4232 current_operation="IDLE",
4233 current_operation_id=None,
4234 error_description=error_description_nsr,
4235 error_detail=error_detail,
4236 other_update=db_nsr_update,
4237 )
4238 self._write_op_status(
4239 op_id=nslcmop_id,
4240 stage="",
4241 error_message=error_description_nslcmop,
4242 operation_state=nslcmop_operation_state,
4243 other_update=db_nslcmop_update,
4244 )
4245 if ns_state == "NOT_INSTANTIATED":
4246 try:
4247 self.db.set_list(
4248 "vnfrs",
4249 {"nsr-id-ref": nsr_id},
4250 {"_admin.nsState": "NOT_INSTANTIATED"},
4251 )
4252 except DbException as e:
4253 self.logger.warn(
4254 logging_text
4255 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4256 nsr_id, e
4257 )
4258 )
4259 if operation_params:
4260 autoremove = operation_params.get("autoremove", False)
4261 if nslcmop_operation_state:
4262 try:
4263 await self.msg.aiowrite(
4264 "ns",
4265 "terminated",
4266 {
4267 "nsr_id": nsr_id,
4268 "nslcmop_id": nslcmop_id,
4269 "operationState": nslcmop_operation_state,
4270 "autoremove": autoremove,
4271 },
4272 loop=self.loop,
4273 )
4274 except Exception as e:
4275 self.logger.error(
4276 logging_text + "kafka_write notification Exception {}".format(e)
4277 )
4278
4279 self.logger.debug(logging_text + "Exit")
4280 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4281
4282 async def _wait_for_tasks(
4283 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4284 ):
4285 time_start = time()
4286 error_detail_list = []
4287 error_list = []
4288 pending_tasks = list(created_tasks_info.keys())
4289 num_tasks = len(pending_tasks)
4290 num_done = 0
4291 stage[1] = "{}/{}.".format(num_done, num_tasks)
4292 self._write_op_status(nslcmop_id, stage)
4293 while pending_tasks:
4294 new_error = None
4295 _timeout = timeout + time_start - time()
4296 done, pending_tasks = await asyncio.wait(
4297 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4298 )
4299 num_done += len(done)
4300 if not done: # Timeout
4301 for task in pending_tasks:
4302 new_error = created_tasks_info[task] + ": Timeout"
4303 error_detail_list.append(new_error)
4304 error_list.append(new_error)
4305 break
4306 for task in done:
4307 if task.cancelled():
4308 exc = "Cancelled"
4309 else:
4310 exc = task.exception()
4311 if exc:
4312 if isinstance(exc, asyncio.TimeoutError):
4313 exc = "Timeout"
4314 new_error = created_tasks_info[task] + ": {}".format(exc)
4315 error_list.append(created_tasks_info[task])
4316 error_detail_list.append(new_error)
4317 if isinstance(
4318 exc,
4319 (
4320 str,
4321 DbException,
4322 N2VCException,
4323 ROclient.ROClientException,
4324 LcmException,
4325 K8sException,
4326 NgRoException,
4327 ),
4328 ):
4329 self.logger.error(logging_text + new_error)
4330 else:
4331 exc_traceback = "".join(
4332 traceback.format_exception(None, exc, exc.__traceback__)
4333 )
4334 self.logger.error(
4335 logging_text
4336 + created_tasks_info[task]
4337 + " "
4338 + exc_traceback
4339 )
4340 else:
4341 self.logger.debug(
4342 logging_text + created_tasks_info[task] + ": Done"
4343 )
4344 stage[1] = "{}/{}.".format(num_done, num_tasks)
4345 if new_error:
4346 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4347 if nsr_id: # update also nsr
4348 self.update_db_2(
4349 "nsrs",
4350 nsr_id,
4351 {
4352 "errorDescription": "Error at: " + ", ".join(error_list),
4353 "errorDetail": ". ".join(error_detail_list),
4354 },
4355 )
4356 self._write_op_status(nslcmop_id, stage)
4357 return error_detail_list
4358
4359 @staticmethod
4360 def _map_primitive_params(primitive_desc, params, instantiation_params):
4361 """
4362 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4363 The default-value is used. If it is between < > it look for a value at instantiation_params
4364 :param primitive_desc: portion of VNFD/NSD that describes primitive
4365 :param params: Params provided by user
4366 :param instantiation_params: Instantiation params provided by user
4367 :return: a dictionary with the calculated params
4368 """
4369 calculated_params = {}
4370 for parameter in primitive_desc.get("parameter", ()):
4371 param_name = parameter["name"]
4372 if param_name in params:
4373 calculated_params[param_name] = params[param_name]
4374 elif "default-value" in parameter or "value" in parameter:
4375 if "value" in parameter:
4376 calculated_params[param_name] = parameter["value"]
4377 else:
4378 calculated_params[param_name] = parameter["default-value"]
4379 if (
4380 isinstance(calculated_params[param_name], str)
4381 and calculated_params[param_name].startswith("<")
4382 and calculated_params[param_name].endswith(">")
4383 ):
4384 if calculated_params[param_name][1:-1] in instantiation_params:
4385 calculated_params[param_name] = instantiation_params[
4386 calculated_params[param_name][1:-1]
4387 ]
4388 else:
4389 raise LcmException(
4390 "Parameter {} needed to execute primitive {} not provided".format(
4391 calculated_params[param_name], primitive_desc["name"]
4392 )
4393 )
4394 else:
4395 raise LcmException(
4396 "Parameter {} needed to execute primitive {} not provided".format(
4397 param_name, primitive_desc["name"]
4398 )
4399 )
4400
4401 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4402 calculated_params[param_name] = yaml.safe_dump(
4403 calculated_params[param_name], default_flow_style=True, width=256
4404 )
4405 elif isinstance(calculated_params[param_name], str) and calculated_params[
4406 param_name
4407 ].startswith("!!yaml "):
4408 calculated_params[param_name] = calculated_params[param_name][7:]
4409 if parameter.get("data-type") == "INTEGER":
4410 try:
4411 calculated_params[param_name] = int(calculated_params[param_name])
4412 except ValueError: # error converting string to int
4413 raise LcmException(
4414 "Parameter {} of primitive {} must be integer".format(
4415 param_name, primitive_desc["name"]
4416 )
4417 )
4418 elif parameter.get("data-type") == "BOOLEAN":
4419 calculated_params[param_name] = not (
4420 (str(calculated_params[param_name])).lower() == "false"
4421 )
4422
4423 # add always ns_config_info if primitive name is config
4424 if primitive_desc["name"] == "config":
4425 if "ns_config_info" in instantiation_params:
4426 calculated_params["ns_config_info"] = instantiation_params[
4427 "ns_config_info"
4428 ]
4429 return calculated_params
4430
4431 def _look_for_deployed_vca(
4432 self,
4433 deployed_vca,
4434 member_vnf_index,
4435 vdu_id,
4436 vdu_count_index,
4437 kdu_name=None,
4438 ee_descriptor_id=None,
4439 ):
4440 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4441 for vca in deployed_vca:
4442 if not vca:
4443 continue
4444 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4445 continue
4446 if (
4447 vdu_count_index is not None
4448 and vdu_count_index != vca["vdu_count_index"]
4449 ):
4450 continue
4451 if kdu_name and kdu_name != vca["kdu_name"]:
4452 continue
4453 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4454 continue
4455 break
4456 else:
4457 # vca_deployed not found
4458 raise LcmException(
4459 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4460 " is not deployed".format(
4461 member_vnf_index,
4462 vdu_id,
4463 vdu_count_index,
4464 kdu_name,
4465 ee_descriptor_id,
4466 )
4467 )
4468 # get ee_id
4469 ee_id = vca.get("ee_id")
4470 vca_type = vca.get(
4471 "type", "lxc_proxy_charm"
4472 ) # default value for backward compatibility - proxy charm
4473 if not ee_id:
4474 raise LcmException(
4475 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4476 "execution environment".format(
4477 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4478 )
4479 )
4480 return ee_id, vca_type
4481
4482 async def _ns_execute_primitive(
4483 self,
4484 ee_id,
4485 primitive,
4486 primitive_params,
4487 retries=0,
4488 retries_interval=30,
4489 timeout=None,
4490 vca_type=None,
4491 db_dict=None,
4492 vca_id: str = None,
4493 ) -> (str, str):
4494 try:
4495 if primitive == "config":
4496 primitive_params = {"params": primitive_params}
4497
4498 vca_type = vca_type or "lxc_proxy_charm"
4499
4500 while retries >= 0:
4501 try:
4502 output = await asyncio.wait_for(
4503 self.vca_map[vca_type].exec_primitive(
4504 ee_id=ee_id,
4505 primitive_name=primitive,
4506 params_dict=primitive_params,
4507 progress_timeout=self.timeout_progress_primitive,
4508 total_timeout=self.timeout_primitive,
4509 db_dict=db_dict,
4510 vca_id=vca_id,
4511 ),
4512 timeout=timeout or self.timeout_primitive,
4513 )
4514 # execution was OK
4515 break
4516 except asyncio.CancelledError:
4517 raise
4518 except Exception as e: # asyncio.TimeoutError
4519 if isinstance(e, asyncio.TimeoutError):
4520 e = "Timeout"
4521 retries -= 1
4522 if retries >= 0:
4523 self.logger.debug(
4524 "Error executing action {} on {} -> {}".format(
4525 primitive, ee_id, e
4526 )
4527 )
4528 # wait and retry
4529 await asyncio.sleep(retries_interval, loop=self.loop)
4530 else:
4531 return "FAILED", str(e)
4532
4533 return "COMPLETED", output
4534
4535 except (LcmException, asyncio.CancelledError):
4536 raise
4537 except Exception as e:
4538 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4539
4540 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4541 """
4542 Updating the vca_status with latest juju information in nsrs record
4543 :param: nsr_id: Id of the nsr
4544 :param: nslcmop_id: Id of the nslcmop
4545 :return: None
4546 """
4547
4548 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4549 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4550 vca_id = self.get_vca_id({}, db_nsr)
4551 if db_nsr["_admin"]["deployed"]["K8s"]:
4552 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4553 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4554 await self._on_update_k8s_db(
4555 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4556 )
4557 else:
4558 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4559 table, filter = "nsrs", {"_id": nsr_id}
4560 path = "_admin.deployed.VCA.{}.".format(vca_index)
4561 await self._on_update_n2vc_db(table, filter, path, {})
4562
4563 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4564 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4565
4566 async def action(self, nsr_id, nslcmop_id):
4567 # Try to lock HA task here
4568 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4569 if not task_is_locked_by_me:
4570 return
4571
4572 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4573 self.logger.debug(logging_text + "Enter")
4574 # get all needed from database
4575 db_nsr = None
4576 db_nslcmop = None
4577 db_nsr_update = {}
4578 db_nslcmop_update = {}
4579 nslcmop_operation_state = None
4580 error_description_nslcmop = None
4581 exc = None
4582 try:
4583 # wait for any previous tasks in process
4584 step = "Waiting for previous operations to terminate"
4585 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4586
4587 self._write_ns_status(
4588 nsr_id=nsr_id,
4589 ns_state=None,
4590 current_operation="RUNNING ACTION",
4591 current_operation_id=nslcmop_id,
4592 )
4593
4594 step = "Getting information from database"
4595 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4596 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4597
4598 nsr_deployed = db_nsr["_admin"].get("deployed")
4599 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4600 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4601 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4602 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4603 primitive = db_nslcmop["operationParams"]["primitive"]
4604 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4605 timeout_ns_action = db_nslcmop["operationParams"].get(
4606 "timeout_ns_action", self.timeout_primitive
4607 )
4608
4609 if vnf_index:
4610 step = "Getting vnfr from database"
4611 db_vnfr = self.db.get_one(
4612 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4613 )
4614 step = "Getting vnfd from database"
4615 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4616 else:
4617 step = "Getting nsd from database"
4618 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4619
4620 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4621 # for backward compatibility
4622 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4623 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4624 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4625 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4626
4627 # look for primitive
4628 config_primitive_desc = descriptor_configuration = None
4629 if vdu_id:
4630 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4631 elif kdu_name:
4632 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4633 elif vnf_index:
4634 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4635 else:
4636 descriptor_configuration = db_nsd.get("ns-configuration")
4637
4638 if descriptor_configuration and descriptor_configuration.get(
4639 "config-primitive"
4640 ):
4641 for config_primitive in descriptor_configuration["config-primitive"]:
4642 if config_primitive["name"] == primitive:
4643 config_primitive_desc = config_primitive
4644 break
4645
4646 if not config_primitive_desc:
4647 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4648 raise LcmException(
4649 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4650 primitive
4651 )
4652 )
4653 primitive_name = primitive
4654 ee_descriptor_id = None
4655 else:
4656 primitive_name = config_primitive_desc.get(
4657 "execution-environment-primitive", primitive
4658 )
4659 ee_descriptor_id = config_primitive_desc.get(
4660 "execution-environment-ref"
4661 )
4662
4663 if vnf_index:
4664 if vdu_id:
4665 vdur = next(
4666 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4667 )
4668 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4669 elif kdu_name:
4670 kdur = next(
4671 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4672 )
4673 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4674 else:
4675 desc_params = parse_yaml_strings(
4676 db_vnfr.get("additionalParamsForVnf")
4677 )
4678 else:
4679 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4680 if kdu_name and get_configuration(db_vnfd, kdu_name):
4681 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4682 actions = set()
4683 for primitive in kdu_configuration.get("initial-config-primitive", []):
4684 actions.add(primitive["name"])
4685 for primitive in kdu_configuration.get("config-primitive", []):
4686 actions.add(primitive["name"])
4687 kdu_action = True if primitive_name in actions else False
4688
4689 # TODO check if ns is in a proper status
4690 if kdu_name and (
4691 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4692 ):
4693 # kdur and desc_params already set from before
4694 if primitive_params:
4695 desc_params.update(primitive_params)
4696 # TODO Check if we will need something at vnf level
4697 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4698 if (
4699 kdu_name == kdu["kdu-name"]
4700 and kdu["member-vnf-index"] == vnf_index
4701 ):
4702 break
4703 else:
4704 raise LcmException(
4705 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4706 )
4707
4708 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4709 msg = "unknown k8scluster-type '{}'".format(
4710 kdu.get("k8scluster-type")
4711 )
4712 raise LcmException(msg)
4713
4714 db_dict = {
4715 "collection": "nsrs",
4716 "filter": {"_id": nsr_id},
4717 "path": "_admin.deployed.K8s.{}".format(index),
4718 }
4719 self.logger.debug(
4720 logging_text
4721 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4722 )
4723 step = "Executing kdu {}".format(primitive_name)
4724 if primitive_name == "upgrade":
4725 if desc_params.get("kdu_model"):
4726 kdu_model = desc_params.get("kdu_model")
4727 del desc_params["kdu_model"]
4728 else:
4729 kdu_model = kdu.get("kdu-model")
4730 parts = kdu_model.split(sep=":")
4731 if len(parts) == 2:
4732 kdu_model = parts[0]
4733
4734 detailed_status = await asyncio.wait_for(
4735 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4736 cluster_uuid=kdu.get("k8scluster-uuid"),
4737 kdu_instance=kdu.get("kdu-instance"),
4738 atomic=True,
4739 kdu_model=kdu_model,
4740 params=desc_params,
4741 db_dict=db_dict,
4742 timeout=timeout_ns_action,
4743 ),
4744 timeout=timeout_ns_action + 10,
4745 )
4746 self.logger.debug(
4747 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4748 )
4749 elif primitive_name == "rollback":
4750 detailed_status = await asyncio.wait_for(
4751 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4752 cluster_uuid=kdu.get("k8scluster-uuid"),
4753 kdu_instance=kdu.get("kdu-instance"),
4754 db_dict=db_dict,
4755 ),
4756 timeout=timeout_ns_action,
4757 )
4758 elif primitive_name == "status":
4759 detailed_status = await asyncio.wait_for(
4760 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4761 cluster_uuid=kdu.get("k8scluster-uuid"),
4762 kdu_instance=kdu.get("kdu-instance"),
4763 vca_id=vca_id,
4764 ),
4765 timeout=timeout_ns_action,
4766 )
4767 else:
4768 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4769 kdu["kdu-name"], nsr_id
4770 )
4771 params = self._map_primitive_params(
4772 config_primitive_desc, primitive_params, desc_params
4773 )
4774
4775 detailed_status = await asyncio.wait_for(
4776 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4777 cluster_uuid=kdu.get("k8scluster-uuid"),
4778 kdu_instance=kdu_instance,
4779 primitive_name=primitive_name,
4780 params=params,
4781 db_dict=db_dict,
4782 timeout=timeout_ns_action,
4783 vca_id=vca_id,
4784 ),
4785 timeout=timeout_ns_action,
4786 )
4787
4788 if detailed_status:
4789 nslcmop_operation_state = "COMPLETED"
4790 else:
4791 detailed_status = ""
4792 nslcmop_operation_state = "FAILED"
4793 else:
4794 ee_id, vca_type = self._look_for_deployed_vca(
4795 nsr_deployed["VCA"],
4796 member_vnf_index=vnf_index,
4797 vdu_id=vdu_id,
4798 vdu_count_index=vdu_count_index,
4799 ee_descriptor_id=ee_descriptor_id,
4800 )
4801 for vca_index, vca_deployed in enumerate(
4802 db_nsr["_admin"]["deployed"]["VCA"]
4803 ):
4804 if vca_deployed.get("member-vnf-index") == vnf_index:
4805 db_dict = {
4806 "collection": "nsrs",
4807 "filter": {"_id": nsr_id},
4808 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4809 }
4810 break
4811 (
4812 nslcmop_operation_state,
4813 detailed_status,
4814 ) = await self._ns_execute_primitive(
4815 ee_id,
4816 primitive=primitive_name,
4817 primitive_params=self._map_primitive_params(
4818 config_primitive_desc, primitive_params, desc_params
4819 ),
4820 timeout=timeout_ns_action,
4821 vca_type=vca_type,
4822 db_dict=db_dict,
4823 vca_id=vca_id,
4824 )
4825
4826 db_nslcmop_update["detailed-status"] = detailed_status
4827 error_description_nslcmop = (
4828 detailed_status if nslcmop_operation_state == "FAILED" else ""
4829 )
4830 self.logger.debug(
4831 logging_text
4832 + " task Done with result {} {}".format(
4833 nslcmop_operation_state, detailed_status
4834 )
4835 )
4836 return # database update is called inside finally
4837
4838 except (DbException, LcmException, N2VCException, K8sException) as e:
4839 self.logger.error(logging_text + "Exit Exception {}".format(e))
4840 exc = e
4841 except asyncio.CancelledError:
4842 self.logger.error(
4843 logging_text + "Cancelled Exception while '{}'".format(step)
4844 )
4845 exc = "Operation was cancelled"
4846 except asyncio.TimeoutError:
4847 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4848 exc = "Timeout"
4849 except Exception as e:
4850 exc = traceback.format_exc()
4851 self.logger.critical(
4852 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4853 exc_info=True,
4854 )
4855 finally:
4856 if exc:
4857 db_nslcmop_update[
4858 "detailed-status"
4859 ] = (
4860 detailed_status
4861 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4862 nslcmop_operation_state = "FAILED"
4863 if db_nsr:
4864 self._write_ns_status(
4865 nsr_id=nsr_id,
4866 ns_state=db_nsr[
4867 "nsState"
4868 ], # TODO check if degraded. For the moment use previous status
4869 current_operation="IDLE",
4870 current_operation_id=None,
4871 # error_description=error_description_nsr,
4872 # error_detail=error_detail,
4873 other_update=db_nsr_update,
4874 )
4875
4876 self._write_op_status(
4877 op_id=nslcmop_id,
4878 stage="",
4879 error_message=error_description_nslcmop,
4880 operation_state=nslcmop_operation_state,
4881 other_update=db_nslcmop_update,
4882 )
4883
4884 if nslcmop_operation_state:
4885 try:
4886 await self.msg.aiowrite(
4887 "ns",
4888 "actioned",
4889 {
4890 "nsr_id": nsr_id,
4891 "nslcmop_id": nslcmop_id,
4892 "operationState": nslcmop_operation_state,
4893 },
4894 loop=self.loop,
4895 )
4896 except Exception as e:
4897 self.logger.error(
4898 logging_text + "kafka_write notification Exception {}".format(e)
4899 )
4900 self.logger.debug(logging_text + "Exit")
4901 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4902 return nslcmop_operation_state, detailed_status
4903
4904 async def scale(self, nsr_id, nslcmop_id):
4905 # Try to lock HA task here
4906 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4907 if not task_is_locked_by_me:
4908 return
4909
4910 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4911 stage = ["", "", ""]
4912 tasks_dict_info = {}
4913 # ^ stage, step, VIM progress
4914 self.logger.debug(logging_text + "Enter")
4915 # get all needed from database
4916 db_nsr = None
4917 db_nslcmop_update = {}
4918 db_nsr_update = {}
4919 exc = None
4920 # in case of error, indicates what part of scale was failed to put nsr at error status
4921 scale_process = None
4922 old_operational_status = ""
4923 old_config_status = ""
4924 nsi_id = None
4925 try:
4926 # wait for any previous tasks in process
4927 step = "Waiting for previous operations to terminate"
4928 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4929 self._write_ns_status(
4930 nsr_id=nsr_id,
4931 ns_state=None,
4932 current_operation="SCALING",
4933 current_operation_id=nslcmop_id,
4934 )
4935
4936 step = "Getting nslcmop from database"
4937 self.logger.debug(
4938 step + " after having waited for previous tasks to be completed"
4939 )
4940 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4941
4942 step = "Getting nsr from database"
4943 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4944 old_operational_status = db_nsr["operational-status"]
4945 old_config_status = db_nsr["config-status"]
4946
4947 step = "Parsing scaling parameters"
4948 db_nsr_update["operational-status"] = "scaling"
4949 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4950 nsr_deployed = db_nsr["_admin"].get("deployed")
4951
4952 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4953 "scaleByStepData"
4954 ]["member-vnf-index"]
4955 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4956 "scaleByStepData"
4957 ]["scaling-group-descriptor"]
4958 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4959 # for backward compatibility
4960 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4961 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4962 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4963 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4964
4965 step = "Getting vnfr from database"
4966 db_vnfr = self.db.get_one(
4967 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4968 )
4969
4970 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4971
4972 step = "Getting vnfd from database"
4973 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4974
4975 base_folder = db_vnfd["_admin"]["storage"]
4976
4977 step = "Getting scaling-group-descriptor"
4978 scaling_descriptor = find_in_list(
4979 get_scaling_aspect(db_vnfd),
4980 lambda scale_desc: scale_desc["name"] == scaling_group,
4981 )
4982 if not scaling_descriptor:
4983 raise LcmException(
4984 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4985 "at vnfd:scaling-group-descriptor".format(scaling_group)
4986 )
4987
4988 step = "Sending scale order to VIM"
4989 # TODO check if ns is in a proper status
4990 nb_scale_op = 0
4991 if not db_nsr["_admin"].get("scaling-group"):
4992 self.update_db_2(
4993 "nsrs",
4994 nsr_id,
4995 {
4996 "_admin.scaling-group": [
4997 {"name": scaling_group, "nb-scale-op": 0}
4998 ]
4999 },
5000 )
5001 admin_scale_index = 0
5002 else:
5003 for admin_scale_index, admin_scale_info in enumerate(
5004 db_nsr["_admin"]["scaling-group"]
5005 ):
5006 if admin_scale_info["name"] == scaling_group:
5007 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5008 break
5009 else: # not found, set index one plus last element and add new entry with the name
5010 admin_scale_index += 1
5011 db_nsr_update[
5012 "_admin.scaling-group.{}.name".format(admin_scale_index)
5013 ] = scaling_group
5014
5015 vca_scaling_info = []
5016 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5017 if scaling_type == "SCALE_OUT":
5018 if "aspect-delta-details" not in scaling_descriptor:
5019 raise LcmException(
5020 "Aspect delta details not fount in scaling descriptor {}".format(
5021 scaling_descriptor["name"]
5022 )
5023 )
5024 # count if max-instance-count is reached
5025 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5026
5027 scaling_info["scaling_direction"] = "OUT"
5028 scaling_info["vdu-create"] = {}
5029 scaling_info["kdu-create"] = {}
5030 for delta in deltas:
5031 for vdu_delta in delta.get("vdu-delta", {}):
5032 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5033 # vdu_index also provides the number of instance of the targeted vdu
5034 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5035 cloud_init_text = self._get_vdu_cloud_init_content(
5036 vdud, db_vnfd
5037 )
5038 if cloud_init_text:
5039 additional_params = (
5040 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5041 or {}
5042 )
5043 cloud_init_list = []
5044
5045 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5046 max_instance_count = 10
5047 if vdu_profile and "max-number-of-instances" in vdu_profile:
5048 max_instance_count = vdu_profile.get(
5049 "max-number-of-instances", 10
5050 )
5051
5052 default_instance_num = get_number_of_instances(
5053 db_vnfd, vdud["id"]
5054 )
5055 instances_number = vdu_delta.get("number-of-instances", 1)
5056 nb_scale_op += instances_number
5057
5058 new_instance_count = nb_scale_op + default_instance_num
5059 # Control if new count is over max and vdu count is less than max.
5060 # Then assign new instance count
5061 if new_instance_count > max_instance_count > vdu_count:
5062 instances_number = new_instance_count - max_instance_count
5063 else:
5064 instances_number = instances_number
5065
5066 if new_instance_count > max_instance_count:
5067 raise LcmException(
5068 "reached the limit of {} (max-instance-count) "
5069 "scaling-out operations for the "
5070 "scaling-group-descriptor '{}'".format(
5071 nb_scale_op, scaling_group
5072 )
5073 )
5074 for x in range(vdu_delta.get("number-of-instances", 1)):
5075 if cloud_init_text:
5076 # TODO Information of its own ip is not available because db_vnfr is not updated.
5077 additional_params["OSM"] = get_osm_params(
5078 db_vnfr, vdu_delta["id"], vdu_index + x
5079 )
5080 cloud_init_list.append(
5081 self._parse_cloud_init(
5082 cloud_init_text,
5083 additional_params,
5084 db_vnfd["id"],
5085 vdud["id"],
5086 )
5087 )
5088 vca_scaling_info.append(
5089 {
5090 "osm_vdu_id": vdu_delta["id"],
5091 "member-vnf-index": vnf_index,
5092 "type": "create",
5093 "vdu_index": vdu_index + x,
5094 }
5095 )
5096 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5097 for kdu_delta in delta.get("kdu-resource-delta", {}):
5098 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5099 kdu_name = kdu_profile["kdu-name"]
5100 resource_name = kdu_profile["resource-name"]
5101
5102 # Might have different kdus in the same delta
5103 # Should have list for each kdu
5104 if not scaling_info["kdu-create"].get(kdu_name, None):
5105 scaling_info["kdu-create"][kdu_name] = []
5106
5107 kdur = get_kdur(db_vnfr, kdu_name)
5108 if kdur.get("helm-chart"):
5109 k8s_cluster_type = "helm-chart-v3"
5110 self.logger.debug("kdur: {}".format(kdur))
5111 if (
5112 kdur.get("helm-version")
5113 and kdur.get("helm-version") == "v2"
5114 ):
5115 k8s_cluster_type = "helm-chart"
5116 raise NotImplementedError
5117 elif kdur.get("juju-bundle"):
5118 k8s_cluster_type = "juju-bundle"
5119 else:
5120 raise LcmException(
5121 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5122 "juju-bundle. Maybe an old NBI version is running".format(
5123 db_vnfr["member-vnf-index-ref"], kdu_name
5124 )
5125 )
5126
5127 max_instance_count = 10
5128 if kdu_profile and "max-number-of-instances" in kdu_profile:
5129 max_instance_count = kdu_profile.get(
5130 "max-number-of-instances", 10
5131 )
5132
5133 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5134 deployed_kdu, _ = get_deployed_kdu(
5135 nsr_deployed, kdu_name, vnf_index
5136 )
5137 if deployed_kdu is None:
5138 raise LcmException(
5139 "KDU '{}' for vnf '{}' not deployed".format(
5140 kdu_name, vnf_index
5141 )
5142 )
5143 kdu_instance = deployed_kdu.get("kdu-instance")
5144 instance_num = await self.k8scluster_map[
5145 k8s_cluster_type
5146 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5147 kdu_replica_count = instance_num + kdu_delta.get(
5148 "number-of-instances", 1
5149 )
5150
5151 # Control if new count is over max and instance_num is less than max.
5152 # Then assign max instance number to kdu replica count
5153 if kdu_replica_count > max_instance_count > instance_num:
5154 kdu_replica_count = max_instance_count
5155 if kdu_replica_count > max_instance_count:
5156 raise LcmException(
5157 "reached the limit of {} (max-instance-count) "
5158 "scaling-out operations for the "
5159 "scaling-group-descriptor '{}'".format(
5160 instance_num, scaling_group
5161 )
5162 )
5163
5164 for x in range(kdu_delta.get("number-of-instances", 1)):
5165 vca_scaling_info.append(
5166 {
5167 "osm_kdu_id": kdu_name,
5168 "member-vnf-index": vnf_index,
5169 "type": "create",
5170 "kdu_index": instance_num + x - 1,
5171 }
5172 )
5173 scaling_info["kdu-create"][kdu_name].append(
5174 {
5175 "member-vnf-index": vnf_index,
5176 "type": "create",
5177 "k8s-cluster-type": k8s_cluster_type,
5178 "resource-name": resource_name,
5179 "scale": kdu_replica_count,
5180 }
5181 )
5182 elif scaling_type == "SCALE_IN":
5183 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5184
5185 scaling_info["scaling_direction"] = "IN"
5186 scaling_info["vdu-delete"] = {}
5187 scaling_info["kdu-delete"] = {}
5188
5189 for delta in deltas:
5190 for vdu_delta in delta.get("vdu-delta", {}):
5191 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5192 min_instance_count = 0
5193 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5194 if vdu_profile and "min-number-of-instances" in vdu_profile:
5195 min_instance_count = vdu_profile["min-number-of-instances"]
5196
5197 default_instance_num = get_number_of_instances(
5198 db_vnfd, vdu_delta["id"]
5199 )
5200 instance_num = vdu_delta.get("number-of-instances", 1)
5201 nb_scale_op -= instance_num
5202
5203 new_instance_count = nb_scale_op + default_instance_num
5204
5205 if new_instance_count < min_instance_count < vdu_count:
5206 instances_number = min_instance_count - new_instance_count
5207 else:
5208 instances_number = instance_num
5209
5210 if new_instance_count < min_instance_count:
5211 raise LcmException(
5212 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5213 "scaling-group-descriptor '{}'".format(
5214 nb_scale_op, scaling_group
5215 )
5216 )
5217 for x in range(vdu_delta.get("number-of-instances", 1)):
5218 vca_scaling_info.append(
5219 {
5220 "osm_vdu_id": vdu_delta["id"],
5221 "member-vnf-index": vnf_index,
5222 "type": "delete",
5223 "vdu_index": vdu_index - 1 - x,
5224 }
5225 )
5226 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5227 for kdu_delta in delta.get("kdu-resource-delta", {}):
5228 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5229 kdu_name = kdu_profile["kdu-name"]
5230 resource_name = kdu_profile["resource-name"]
5231
5232 if not scaling_info["kdu-delete"].get(kdu_name, None):
5233 scaling_info["kdu-delete"][kdu_name] = []
5234
5235 kdur = get_kdur(db_vnfr, kdu_name)
5236 if kdur.get("helm-chart"):
5237 k8s_cluster_type = "helm-chart-v3"
5238 self.logger.debug("kdur: {}".format(kdur))
5239 if (
5240 kdur.get("helm-version")
5241 and kdur.get("helm-version") == "v2"
5242 ):
5243 k8s_cluster_type = "helm-chart"
5244 raise NotImplementedError
5245 elif kdur.get("juju-bundle"):
5246 k8s_cluster_type = "juju-bundle"
5247 else:
5248 raise LcmException(
5249 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5250 "juju-bundle. Maybe an old NBI version is running".format(
5251 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5252 )
5253 )
5254
5255 min_instance_count = 0
5256 if kdu_profile and "min-number-of-instances" in kdu_profile:
5257 min_instance_count = kdu_profile["min-number-of-instances"]
5258
5259 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5260 deployed_kdu, _ = get_deployed_kdu(
5261 nsr_deployed, kdu_name, vnf_index
5262 )
5263 if deployed_kdu is None:
5264 raise LcmException(
5265 "KDU '{}' for vnf '{}' not deployed".format(
5266 kdu_name, vnf_index
5267 )
5268 )
5269 kdu_instance = deployed_kdu.get("kdu-instance")
5270 instance_num = await self.k8scluster_map[
5271 k8s_cluster_type
5272 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5273 kdu_replica_count = instance_num - kdu_delta.get(
5274 "number-of-instances", 1
5275 )
5276
5277 if kdu_replica_count < min_instance_count < instance_num:
5278 kdu_replica_count = min_instance_count
5279 if kdu_replica_count < min_instance_count:
5280 raise LcmException(
5281 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5282 "scaling-group-descriptor '{}'".format(
5283 instance_num, scaling_group
5284 )
5285 )
5286
5287 for x in range(kdu_delta.get("number-of-instances", 1)):
5288 vca_scaling_info.append(
5289 {
5290 "osm_kdu_id": kdu_name,
5291 "member-vnf-index": vnf_index,
5292 "type": "delete",
5293 "kdu_index": instance_num - x - 1,
5294 }
5295 )
5296 scaling_info["kdu-delete"][kdu_name].append(
5297 {
5298 "member-vnf-index": vnf_index,
5299 "type": "delete",
5300 "k8s-cluster-type": k8s_cluster_type,
5301 "resource-name": resource_name,
5302 "scale": kdu_replica_count,
5303 }
5304 )
5305
5306 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5307 vdu_delete = copy(scaling_info.get("vdu-delete"))
5308 if scaling_info["scaling_direction"] == "IN":
5309 for vdur in reversed(db_vnfr["vdur"]):
5310 if vdu_delete.get(vdur["vdu-id-ref"]):
5311 vdu_delete[vdur["vdu-id-ref"]] -= 1
5312 scaling_info["vdu"].append(
5313 {
5314 "name": vdur.get("name") or vdur.get("vdu-name"),
5315 "vdu_id": vdur["vdu-id-ref"],
5316 "interface": [],
5317 }
5318 )
5319 for interface in vdur["interfaces"]:
5320 scaling_info["vdu"][-1]["interface"].append(
5321 {
5322 "name": interface["name"],
5323 "ip_address": interface["ip-address"],
5324 "mac_address": interface.get("mac-address"),
5325 }
5326 )
5327 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5328
5329 # PRE-SCALE BEGIN
5330 step = "Executing pre-scale vnf-config-primitive"
5331 if scaling_descriptor.get("scaling-config-action"):
5332 for scaling_config_action in scaling_descriptor[
5333 "scaling-config-action"
5334 ]:
5335 if (
5336 scaling_config_action.get("trigger") == "pre-scale-in"
5337 and scaling_type == "SCALE_IN"
5338 ) or (
5339 scaling_config_action.get("trigger") == "pre-scale-out"
5340 and scaling_type == "SCALE_OUT"
5341 ):
5342 vnf_config_primitive = scaling_config_action[
5343 "vnf-config-primitive-name-ref"
5344 ]
5345 step = db_nslcmop_update[
5346 "detailed-status"
5347 ] = "executing pre-scale scaling-config-action '{}'".format(
5348 vnf_config_primitive
5349 )
5350
5351 # look for primitive
5352 for config_primitive in (
5353 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5354 ).get("config-primitive", ()):
5355 if config_primitive["name"] == vnf_config_primitive:
5356 break
5357 else:
5358 raise LcmException(
5359 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5360 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5361 "primitive".format(scaling_group, vnf_config_primitive)
5362 )
5363
5364 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5365 if db_vnfr.get("additionalParamsForVnf"):
5366 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5367
5368 scale_process = "VCA"
5369 db_nsr_update["config-status"] = "configuring pre-scaling"
5370 primitive_params = self._map_primitive_params(
5371 config_primitive, {}, vnfr_params
5372 )
5373
5374 # Pre-scale retry check: Check if this sub-operation has been executed before
5375 op_index = self._check_or_add_scale_suboperation(
5376 db_nslcmop,
5377 nslcmop_id,
5378 vnf_index,
5379 vnf_config_primitive,
5380 primitive_params,
5381 "PRE-SCALE",
5382 )
5383 if op_index == self.SUBOPERATION_STATUS_SKIP:
5384 # Skip sub-operation
5385 result = "COMPLETED"
5386 result_detail = "Done"
5387 self.logger.debug(
5388 logging_text
5389 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5390 vnf_config_primitive, result, result_detail
5391 )
5392 )
5393 else:
5394 if op_index == self.SUBOPERATION_STATUS_NEW:
5395 # New sub-operation: Get index of this sub-operation
5396 op_index = (
5397 len(db_nslcmop.get("_admin", {}).get("operations"))
5398 - 1
5399 )
5400 self.logger.debug(
5401 logging_text
5402 + "vnf_config_primitive={} New sub-operation".format(
5403 vnf_config_primitive
5404 )
5405 )
5406 else:
5407 # retry: Get registered params for this existing sub-operation
5408 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5409 op_index
5410 ]
5411 vnf_index = op.get("member_vnf_index")
5412 vnf_config_primitive = op.get("primitive")
5413 primitive_params = op.get("primitive_params")
5414 self.logger.debug(
5415 logging_text
5416 + "vnf_config_primitive={} Sub-operation retry".format(
5417 vnf_config_primitive
5418 )
5419 )
5420 # Execute the primitive, either with new (first-time) or registered (reintent) args
5421 ee_descriptor_id = config_primitive.get(
5422 "execution-environment-ref"
5423 )
5424 primitive_name = config_primitive.get(
5425 "execution-environment-primitive", vnf_config_primitive
5426 )
5427 ee_id, vca_type = self._look_for_deployed_vca(
5428 nsr_deployed["VCA"],
5429 member_vnf_index=vnf_index,
5430 vdu_id=None,
5431 vdu_count_index=None,
5432 ee_descriptor_id=ee_descriptor_id,
5433 )
5434 result, result_detail = await self._ns_execute_primitive(
5435 ee_id,
5436 primitive_name,
5437 primitive_params,
5438 vca_type=vca_type,
5439 vca_id=vca_id,
5440 )
5441 self.logger.debug(
5442 logging_text
5443 + "vnf_config_primitive={} Done with result {} {}".format(
5444 vnf_config_primitive, result, result_detail
5445 )
5446 )
5447 # Update operationState = COMPLETED | FAILED
5448 self._update_suboperation_status(
5449 db_nslcmop, op_index, result, result_detail
5450 )
5451
5452 if result == "FAILED":
5453 raise LcmException(result_detail)
5454 db_nsr_update["config-status"] = old_config_status
5455 scale_process = None
5456 # PRE-SCALE END
5457
5458 db_nsr_update[
5459 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5460 ] = nb_scale_op
5461 db_nsr_update[
5462 "_admin.scaling-group.{}.time".format(admin_scale_index)
5463 ] = time()
5464
5465 # SCALE-IN VCA - BEGIN
5466 if vca_scaling_info:
5467 step = db_nslcmop_update[
5468 "detailed-status"
5469 ] = "Deleting the execution environments"
5470 scale_process = "VCA"
5471 for vca_info in vca_scaling_info:
5472 if vca_info["type"] == "delete":
5473 member_vnf_index = str(vca_info["member-vnf-index"])
5474 self.logger.debug(
5475 logging_text + "vdu info: {}".format(vca_info)
5476 )
5477 if vca_info.get("osm_vdu_id"):
5478 vdu_id = vca_info["osm_vdu_id"]
5479 vdu_index = int(vca_info["vdu_index"])
5480 stage[
5481 1
5482 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5483 member_vnf_index, vdu_id, vdu_index
5484 )
5485 else:
5486 vdu_index = 0
5487 kdu_id = vca_info["osm_kdu_id"]
5488 stage[
5489 1
5490 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5491 member_vnf_index, kdu_id, vdu_index
5492 )
5493 stage[2] = step = "Scaling in VCA"
5494 self._write_op_status(op_id=nslcmop_id, stage=stage)
5495 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5496 config_update = db_nsr["configurationStatus"]
5497 for vca_index, vca in enumerate(vca_update):
5498 if (
5499 (vca or vca.get("ee_id"))
5500 and vca["member-vnf-index"] == member_vnf_index
5501 and vca["vdu_count_index"] == vdu_index
5502 ):
5503 if vca.get("vdu_id"):
5504 config_descriptor = get_configuration(
5505 db_vnfd, vca.get("vdu_id")
5506 )
5507 elif vca.get("kdu_name"):
5508 config_descriptor = get_configuration(
5509 db_vnfd, vca.get("kdu_name")
5510 )
5511 else:
5512 config_descriptor = get_configuration(
5513 db_vnfd, db_vnfd["id"]
5514 )
5515 operation_params = (
5516 db_nslcmop.get("operationParams") or {}
5517 )
5518 exec_terminate_primitives = not operation_params.get(
5519 "skip_terminate_primitives"
5520 ) and vca.get("needed_terminate")
5521 task = asyncio.ensure_future(
5522 asyncio.wait_for(
5523 self.destroy_N2VC(
5524 logging_text,
5525 db_nslcmop,
5526 vca,
5527 config_descriptor,
5528 vca_index,
5529 destroy_ee=True,
5530 exec_primitives=exec_terminate_primitives,
5531 scaling_in=True,
5532 vca_id=vca_id,
5533 ),
5534 timeout=self.timeout_charm_delete,
5535 )
5536 )
5537 tasks_dict_info[task] = "Terminating VCA {}".format(
5538 vca.get("ee_id")
5539 )
5540 del vca_update[vca_index]
5541 del config_update[vca_index]
5542 # wait for pending tasks of terminate primitives
5543 if tasks_dict_info:
5544 self.logger.debug(
5545 logging_text
5546 + "Waiting for tasks {}".format(
5547 list(tasks_dict_info.keys())
5548 )
5549 )
5550 error_list = await self._wait_for_tasks(
5551 logging_text,
5552 tasks_dict_info,
5553 min(
5554 self.timeout_charm_delete, self.timeout_ns_terminate
5555 ),
5556 stage,
5557 nslcmop_id,
5558 )
5559 tasks_dict_info.clear()
5560 if error_list:
5561 raise LcmException("; ".join(error_list))
5562
5563 db_vca_and_config_update = {
5564 "_admin.deployed.VCA": vca_update,
5565 "configurationStatus": config_update,
5566 }
5567 self.update_db_2(
5568 "nsrs", db_nsr["_id"], db_vca_and_config_update
5569 )
5570 scale_process = None
5571 # SCALE-IN VCA - END
5572
5573 # SCALE RO - BEGIN
5574 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5575 scale_process = "RO"
5576 if self.ro_config.get("ng"):
5577 await self._scale_ng_ro(
5578 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5579 )
5580 scaling_info.pop("vdu-create", None)
5581 scaling_info.pop("vdu-delete", None)
5582
5583 scale_process = None
5584 # SCALE RO - END
5585
5586 # SCALE KDU - BEGIN
5587 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5588 scale_process = "KDU"
5589 await self._scale_kdu(
5590 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5591 )
5592 scaling_info.pop("kdu-create", None)
5593 scaling_info.pop("kdu-delete", None)
5594
5595 scale_process = None
5596 # SCALE KDU - END
5597
5598 if db_nsr_update:
5599 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5600
5601 # SCALE-UP VCA - BEGIN
5602 if vca_scaling_info:
5603 step = db_nslcmop_update[
5604 "detailed-status"
5605 ] = "Creating new execution environments"
5606 scale_process = "VCA"
5607 for vca_info in vca_scaling_info:
5608 if vca_info["type"] == "create":
5609 member_vnf_index = str(vca_info["member-vnf-index"])
5610 self.logger.debug(
5611 logging_text + "vdu info: {}".format(vca_info)
5612 )
5613 vnfd_id = db_vnfr["vnfd-ref"]
5614 if vca_info.get("osm_vdu_id"):
5615 vdu_index = int(vca_info["vdu_index"])
5616 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5617 if db_vnfr.get("additionalParamsForVnf"):
5618 deploy_params.update(
5619 parse_yaml_strings(
5620 db_vnfr["additionalParamsForVnf"].copy()
5621 )
5622 )
5623 descriptor_config = get_configuration(
5624 db_vnfd, db_vnfd["id"]
5625 )
5626 if descriptor_config:
5627 vdu_id = None
5628 vdu_name = None
5629 kdu_name = None
5630 self._deploy_n2vc(
5631 logging_text=logging_text
5632 + "member_vnf_index={} ".format(member_vnf_index),
5633 db_nsr=db_nsr,
5634 db_vnfr=db_vnfr,
5635 nslcmop_id=nslcmop_id,
5636 nsr_id=nsr_id,
5637 nsi_id=nsi_id,
5638 vnfd_id=vnfd_id,
5639 vdu_id=vdu_id,
5640 kdu_name=kdu_name,
5641 member_vnf_index=member_vnf_index,
5642 vdu_index=vdu_index,
5643 vdu_name=vdu_name,
5644 deploy_params=deploy_params,
5645 descriptor_config=descriptor_config,
5646 base_folder=base_folder,
5647 task_instantiation_info=tasks_dict_info,
5648 stage=stage,
5649 )
5650 vdu_id = vca_info["osm_vdu_id"]
5651 vdur = find_in_list(
5652 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5653 )
5654 descriptor_config = get_configuration(db_vnfd, vdu_id)
5655 if vdur.get("additionalParams"):
5656 deploy_params_vdu = parse_yaml_strings(
5657 vdur["additionalParams"]
5658 )
5659 else:
5660 deploy_params_vdu = deploy_params
5661 deploy_params_vdu["OSM"] = get_osm_params(
5662 db_vnfr, vdu_id, vdu_count_index=vdu_index
5663 )
5664 if descriptor_config:
5665 vdu_name = None
5666 kdu_name = None
5667 stage[
5668 1
5669 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5670 member_vnf_index, vdu_id, vdu_index
5671 )
5672 stage[2] = step = "Scaling out VCA"
5673 self._write_op_status(op_id=nslcmop_id, stage=stage)
5674 self._deploy_n2vc(
5675 logging_text=logging_text
5676 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5677 member_vnf_index, vdu_id, vdu_index
5678 ),
5679 db_nsr=db_nsr,
5680 db_vnfr=db_vnfr,
5681 nslcmop_id=nslcmop_id,
5682 nsr_id=nsr_id,
5683 nsi_id=nsi_id,
5684 vnfd_id=vnfd_id,
5685 vdu_id=vdu_id,
5686 kdu_name=kdu_name,
5687 member_vnf_index=member_vnf_index,
5688 vdu_index=vdu_index,
5689 vdu_name=vdu_name,
5690 deploy_params=deploy_params_vdu,
5691 descriptor_config=descriptor_config,
5692 base_folder=base_folder,
5693 task_instantiation_info=tasks_dict_info,
5694 stage=stage,
5695 )
5696 else:
5697 kdu_name = vca_info["osm_kdu_id"]
5698 descriptor_config = get_configuration(db_vnfd, kdu_name)
5699 if descriptor_config:
5700 vdu_id = None
5701 kdu_index = int(vca_info["kdu_index"])
5702 vdu_name = None
5703 kdur = next(
5704 x
5705 for x in db_vnfr["kdur"]
5706 if x["kdu-name"] == kdu_name
5707 )
5708 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5709 if kdur.get("additionalParams"):
5710 deploy_params_kdu = parse_yaml_strings(
5711 kdur["additionalParams"]
5712 )
5713
5714 self._deploy_n2vc(
5715 logging_text=logging_text,
5716 db_nsr=db_nsr,
5717 db_vnfr=db_vnfr,
5718 nslcmop_id=nslcmop_id,
5719 nsr_id=nsr_id,
5720 nsi_id=nsi_id,
5721 vnfd_id=vnfd_id,
5722 vdu_id=vdu_id,
5723 kdu_name=kdu_name,
5724 member_vnf_index=member_vnf_index,
5725 vdu_index=kdu_index,
5726 vdu_name=vdu_name,
5727 deploy_params=deploy_params_kdu,
5728 descriptor_config=descriptor_config,
5729 base_folder=base_folder,
5730 task_instantiation_info=tasks_dict_info,
5731 stage=stage,
5732 )
5733 # SCALE-UP VCA - END
5734 scale_process = None
5735
5736 # POST-SCALE BEGIN
5737 # execute primitive service POST-SCALING
5738 step = "Executing post-scale vnf-config-primitive"
5739 if scaling_descriptor.get("scaling-config-action"):
5740 for scaling_config_action in scaling_descriptor[
5741 "scaling-config-action"
5742 ]:
5743 if (
5744 scaling_config_action.get("trigger") == "post-scale-in"
5745 and scaling_type == "SCALE_IN"
5746 ) or (
5747 scaling_config_action.get("trigger") == "post-scale-out"
5748 and scaling_type == "SCALE_OUT"
5749 ):
5750 vnf_config_primitive = scaling_config_action[
5751 "vnf-config-primitive-name-ref"
5752 ]
5753 step = db_nslcmop_update[
5754 "detailed-status"
5755 ] = "executing post-scale scaling-config-action '{}'".format(
5756 vnf_config_primitive
5757 )
5758
5759 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5760 if db_vnfr.get("additionalParamsForVnf"):
5761 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5762
5763 # look for primitive
5764 for config_primitive in (
5765 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5766 ).get("config-primitive", ()):
5767 if config_primitive["name"] == vnf_config_primitive:
5768 break
5769 else:
5770 raise LcmException(
5771 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5772 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5773 "config-primitive".format(
5774 scaling_group, vnf_config_primitive
5775 )
5776 )
5777 scale_process = "VCA"
5778 db_nsr_update["config-status"] = "configuring post-scaling"
5779 primitive_params = self._map_primitive_params(
5780 config_primitive, {}, vnfr_params
5781 )
5782
5783 # Post-scale retry check: Check if this sub-operation has been executed before
5784 op_index = self._check_or_add_scale_suboperation(
5785 db_nslcmop,
5786 nslcmop_id,
5787 vnf_index,
5788 vnf_config_primitive,
5789 primitive_params,
5790 "POST-SCALE",
5791 )
5792 if op_index == self.SUBOPERATION_STATUS_SKIP:
5793 # Skip sub-operation
5794 result = "COMPLETED"
5795 result_detail = "Done"
5796 self.logger.debug(
5797 logging_text
5798 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5799 vnf_config_primitive, result, result_detail
5800 )
5801 )
5802 else:
5803 if op_index == self.SUBOPERATION_STATUS_NEW:
5804 # New sub-operation: Get index of this sub-operation
5805 op_index = (
5806 len(db_nslcmop.get("_admin", {}).get("operations"))
5807 - 1
5808 )
5809 self.logger.debug(
5810 logging_text
5811 + "vnf_config_primitive={} New sub-operation".format(
5812 vnf_config_primitive
5813 )
5814 )
5815 else:
5816 # retry: Get registered params for this existing sub-operation
5817 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5818 op_index
5819 ]
5820 vnf_index = op.get("member_vnf_index")
5821 vnf_config_primitive = op.get("primitive")
5822 primitive_params = op.get("primitive_params")
5823 self.logger.debug(
5824 logging_text
5825 + "vnf_config_primitive={} Sub-operation retry".format(
5826 vnf_config_primitive
5827 )
5828 )
5829 # Execute the primitive, either with new (first-time) or registered (reintent) args
5830 ee_descriptor_id = config_primitive.get(
5831 "execution-environment-ref"
5832 )
5833 primitive_name = config_primitive.get(
5834 "execution-environment-primitive", vnf_config_primitive
5835 )
5836 ee_id, vca_type = self._look_for_deployed_vca(
5837 nsr_deployed["VCA"],
5838 member_vnf_index=vnf_index,
5839 vdu_id=None,
5840 vdu_count_index=None,
5841 ee_descriptor_id=ee_descriptor_id,
5842 )
5843 result, result_detail = await self._ns_execute_primitive(
5844 ee_id,
5845 primitive_name,
5846 primitive_params,
5847 vca_type=vca_type,
5848 vca_id=vca_id,
5849 )
5850 self.logger.debug(
5851 logging_text
5852 + "vnf_config_primitive={} Done with result {} {}".format(
5853 vnf_config_primitive, result, result_detail
5854 )
5855 )
5856 # Update operationState = COMPLETED | FAILED
5857 self._update_suboperation_status(
5858 db_nslcmop, op_index, result, result_detail
5859 )
5860
5861 if result == "FAILED":
5862 raise LcmException(result_detail)
5863 db_nsr_update["config-status"] = old_config_status
5864 scale_process = None
5865 # POST-SCALE END
5866
5867 db_nsr_update[
5868 "detailed-status"
5869 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5870 db_nsr_update["operational-status"] = (
5871 "running"
5872 if old_operational_status == "failed"
5873 else old_operational_status
5874 )
5875 db_nsr_update["config-status"] = old_config_status
5876 return
5877 except (
5878 ROclient.ROClientException,
5879 DbException,
5880 LcmException,
5881 NgRoException,
5882 ) as e:
5883 self.logger.error(logging_text + "Exit Exception {}".format(e))
5884 exc = e
5885 except asyncio.CancelledError:
5886 self.logger.error(
5887 logging_text + "Cancelled Exception while '{}'".format(step)
5888 )
5889 exc = "Operation was cancelled"
5890 except Exception as e:
5891 exc = traceback.format_exc()
5892 self.logger.critical(
5893 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5894 exc_info=True,
5895 )
5896 finally:
5897 self._write_ns_status(
5898 nsr_id=nsr_id,
5899 ns_state=None,
5900 current_operation="IDLE",
5901 current_operation_id=None,
5902 )
5903 if tasks_dict_info:
5904 stage[1] = "Waiting for instantiate pending tasks."
5905 self.logger.debug(logging_text + stage[1])
5906 exc = await self._wait_for_tasks(
5907 logging_text,
5908 tasks_dict_info,
5909 self.timeout_ns_deploy,
5910 stage,
5911 nslcmop_id,
5912 nsr_id=nsr_id,
5913 )
5914 if exc:
5915 db_nslcmop_update[
5916 "detailed-status"
5917 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5918 nslcmop_operation_state = "FAILED"
5919 if db_nsr:
5920 db_nsr_update["operational-status"] = old_operational_status
5921 db_nsr_update["config-status"] = old_config_status
5922 db_nsr_update["detailed-status"] = ""
5923 if scale_process:
5924 if "VCA" in scale_process:
5925 db_nsr_update["config-status"] = "failed"
5926 if "RO" in scale_process:
5927 db_nsr_update["operational-status"] = "failed"
5928 db_nsr_update[
5929 "detailed-status"
5930 ] = "FAILED scaling nslcmop={} {}: {}".format(
5931 nslcmop_id, step, exc
5932 )
5933 else:
5934 error_description_nslcmop = None
5935 nslcmop_operation_state = "COMPLETED"
5936 db_nslcmop_update["detailed-status"] = "Done"
5937
5938 self._write_op_status(
5939 op_id=nslcmop_id,
5940 stage="",
5941 error_message=error_description_nslcmop,
5942 operation_state=nslcmop_operation_state,
5943 other_update=db_nslcmop_update,
5944 )
5945 if db_nsr:
5946 self._write_ns_status(
5947 nsr_id=nsr_id,
5948 ns_state=None,
5949 current_operation="IDLE",
5950 current_operation_id=None,
5951 other_update=db_nsr_update,
5952 )
5953
5954 if nslcmop_operation_state:
5955 try:
5956 msg = {
5957 "nsr_id": nsr_id,
5958 "nslcmop_id": nslcmop_id,
5959 "operationState": nslcmop_operation_state,
5960 }
5961 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5962 except Exception as e:
5963 self.logger.error(
5964 logging_text + "kafka_write notification Exception {}".format(e)
5965 )
5966 self.logger.debug(logging_text + "Exit")
5967 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5968
5969 async def _scale_kdu(
5970 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5971 ):
5972 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5973 for kdu_name in _scaling_info:
5974 for kdu_scaling_info in _scaling_info[kdu_name]:
5975 deployed_kdu, index = get_deployed_kdu(
5976 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5977 )
5978 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5979 kdu_instance = deployed_kdu["kdu-instance"]
5980 scale = int(kdu_scaling_info["scale"])
5981 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5982
5983 db_dict = {
5984 "collection": "nsrs",
5985 "filter": {"_id": nsr_id},
5986 "path": "_admin.deployed.K8s.{}".format(index),
5987 }
5988
5989 step = "scaling application {}".format(
5990 kdu_scaling_info["resource-name"]
5991 )
5992 self.logger.debug(logging_text + step)
5993
5994 if kdu_scaling_info["type"] == "delete":
5995 kdu_config = get_configuration(db_vnfd, kdu_name)
5996 if (
5997 kdu_config
5998 and kdu_config.get("terminate-config-primitive")
5999 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6000 ):
6001 terminate_config_primitive_list = kdu_config.get(
6002 "terminate-config-primitive"
6003 )
6004 terminate_config_primitive_list.sort(
6005 key=lambda val: int(val["seq"])
6006 )
6007
6008 for (
6009 terminate_config_primitive
6010 ) in terminate_config_primitive_list:
6011 primitive_params_ = self._map_primitive_params(
6012 terminate_config_primitive, {}, {}
6013 )
6014 step = "execute terminate config primitive"
6015 self.logger.debug(logging_text + step)
6016 await asyncio.wait_for(
6017 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6018 cluster_uuid=cluster_uuid,
6019 kdu_instance=kdu_instance,
6020 primitive_name=terminate_config_primitive["name"],
6021 params=primitive_params_,
6022 db_dict=db_dict,
6023 vca_id=vca_id,
6024 ),
6025 timeout=600,
6026 )
6027
6028 await asyncio.wait_for(
6029 self.k8scluster_map[k8s_cluster_type].scale(
6030 kdu_instance,
6031 scale,
6032 kdu_scaling_info["resource-name"],
6033 vca_id=vca_id,
6034 ),
6035 timeout=self.timeout_vca_on_error,
6036 )
6037
6038 if kdu_scaling_info["type"] == "create":
6039 kdu_config = get_configuration(db_vnfd, kdu_name)
6040 if (
6041 kdu_config
6042 and kdu_config.get("initial-config-primitive")
6043 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6044 ):
6045 initial_config_primitive_list = kdu_config.get(
6046 "initial-config-primitive"
6047 )
6048 initial_config_primitive_list.sort(
6049 key=lambda val: int(val["seq"])
6050 )
6051
6052 for initial_config_primitive in initial_config_primitive_list:
6053 primitive_params_ = self._map_primitive_params(
6054 initial_config_primitive, {}, {}
6055 )
6056 step = "execute initial config primitive"
6057 self.logger.debug(logging_text + step)
6058 await asyncio.wait_for(
6059 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6060 cluster_uuid=cluster_uuid,
6061 kdu_instance=kdu_instance,
6062 primitive_name=initial_config_primitive["name"],
6063 params=primitive_params_,
6064 db_dict=db_dict,
6065 vca_id=vca_id,
6066 ),
6067 timeout=600,
6068 )
6069
6070 async def _scale_ng_ro(
6071 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6072 ):
6073 nsr_id = db_nslcmop["nsInstanceId"]
6074 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6075 db_vnfrs = {}
6076
6077 # read from db: vnfd's for every vnf
6078 db_vnfds = []
6079
6080 # for each vnf in ns, read vnfd
6081 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6082 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6083 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6084 # if we haven't this vnfd, read it from db
6085 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6086 # read from db
6087 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6088 db_vnfds.append(vnfd)
6089 n2vc_key = self.n2vc.get_public_key()
6090 n2vc_key_list = [n2vc_key]
6091 self.scale_vnfr(
6092 db_vnfr,
6093 vdu_scaling_info.get("vdu-create"),
6094 vdu_scaling_info.get("vdu-delete"),
6095 mark_delete=True,
6096 )
6097 # db_vnfr has been updated, update db_vnfrs to use it
6098 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6099 await self._instantiate_ng_ro(
6100 logging_text,
6101 nsr_id,
6102 db_nsd,
6103 db_nsr,
6104 db_nslcmop,
6105 db_vnfrs,
6106 db_vnfds,
6107 n2vc_key_list,
6108 stage=stage,
6109 start_deploy=time(),
6110 timeout_ns_deploy=self.timeout_ns_deploy,
6111 )
6112 if vdu_scaling_info.get("vdu-delete"):
6113 self.scale_vnfr(
6114 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6115 )
6116
6117 async def add_prometheus_metrics(
6118 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6119 ):
6120 if not self.prometheus:
6121 return
6122 # look if exist a file called 'prometheus*.j2' and
6123 artifact_content = self.fs.dir_ls(artifact_path)
6124 job_file = next(
6125 (
6126 f
6127 for f in artifact_content
6128 if f.startswith("prometheus") and f.endswith(".j2")
6129 ),
6130 None,
6131 )
6132 if not job_file:
6133 return
6134 with self.fs.file_open((artifact_path, job_file), "r") as f:
6135 job_data = f.read()
6136
6137 # TODO get_service
6138 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6139 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6140 host_port = "80"
6141 vnfr_id = vnfr_id.replace("-", "")
6142 variables = {
6143 "JOB_NAME": vnfr_id,
6144 "TARGET_IP": target_ip,
6145 "EXPORTER_POD_IP": host_name,
6146 "EXPORTER_POD_PORT": host_port,
6147 }
6148 job_list = self.prometheus.parse_job(job_data, variables)
6149 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6150 for job in job_list:
6151 if (
6152 not isinstance(job.get("job_name"), str)
6153 or vnfr_id not in job["job_name"]
6154 ):
6155 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6156 job["nsr_id"] = nsr_id
6157 job_dict = {jl["job_name"]: jl for jl in job_list}
6158 if await self.prometheus.update(job_dict):
6159 return list(job_dict.keys())
6160
6161 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6162 """
6163 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6164
6165 :param: vim_account_id: VIM Account ID
6166
6167 :return: (cloud_name, cloud_credential)
6168 """
6169 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6170 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6171
6172 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6173 """
6174 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6175
6176 :param: vim_account_id: VIM Account ID
6177
6178 :return: (cloud_name, cloud_credential)
6179 """
6180 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6181 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")