1c18ec751aeb8ec738c77e2c6dddd1e1f4a5a95f
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vca_type == "native_charm":
1607 index_number = 0
1608 else:
1609 index_number = vdu_index or 0
1610
1611 if vnfr_id:
1612 element_type = "VNF"
1613 element_under_configuration = vnfr_id
1614 namespace += ".{}-{}".format(vnfr_id, index_number)
1615 if vdu_id:
1616 namespace += ".{}-{}".format(vdu_id, index_number)
1617 element_type = "VDU"
1618 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1619 osm_config["osm"]["vdu_id"] = vdu_id
1620 elif kdu_name:
1621 namespace += ".{}".format(kdu_name)
1622 element_type = "KDU"
1623 element_under_configuration = kdu_name
1624 osm_config["osm"]["kdu_name"] = kdu_name
1625
1626 # Get artifact path
1627 artifact_path = "{}/{}/{}/{}".format(
1628 base_folder["folder"],
1629 base_folder["pkg-dir"],
1630 "charms"
1631 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1632 else "helm-charts",
1633 vca_name,
1634 )
1635
1636 self.logger.debug("Artifact path > {}".format(artifact_path))
1637
1638 # get initial_config_primitive_list that applies to this element
1639 initial_config_primitive_list = config_descriptor.get(
1640 "initial-config-primitive"
1641 )
1642
1643 self.logger.debug(
1644 "Initial config primitive list > {}".format(
1645 initial_config_primitive_list
1646 )
1647 )
1648
1649 # add config if not present for NS charm
1650 ee_descriptor_id = ee_config_descriptor.get("id")
1651 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1652 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1653 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1654 )
1655
1656 self.logger.debug(
1657 "Initial config primitive list #2 > {}".format(
1658 initial_config_primitive_list
1659 )
1660 )
1661 # n2vc_redesign STEP 3.1
1662 # find old ee_id if exists
1663 ee_id = vca_deployed.get("ee_id")
1664
1665 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1666 # create or register execution environment in VCA
1667 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1668
1669 self._write_configuration_status(
1670 nsr_id=nsr_id,
1671 vca_index=vca_index,
1672 status="CREATING",
1673 element_under_configuration=element_under_configuration,
1674 element_type=element_type,
1675 )
1676
1677 step = "create execution environment"
1678 self.logger.debug(logging_text + step)
1679
1680 ee_id = None
1681 credentials = None
1682 if vca_type == "k8s_proxy_charm":
1683 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1684 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1685 namespace=namespace,
1686 artifact_path=artifact_path,
1687 db_dict=db_dict,
1688 vca_id=vca_id,
1689 )
1690 elif vca_type == "helm" or vca_type == "helm-v3":
1691 ee_id, credentials = await self.vca_map[
1692 vca_type
1693 ].create_execution_environment(
1694 namespace=namespace,
1695 reuse_ee_id=ee_id,
1696 db_dict=db_dict,
1697 config=osm_config,
1698 artifact_path=artifact_path,
1699 vca_type=vca_type,
1700 )
1701 else:
1702 ee_id, credentials = await self.vca_map[
1703 vca_type
1704 ].create_execution_environment(
1705 namespace=namespace,
1706 reuse_ee_id=ee_id,
1707 db_dict=db_dict,
1708 vca_id=vca_id,
1709 )
1710
1711 elif vca_type == "native_charm":
1712 step = "Waiting to VM being up and getting IP address"
1713 self.logger.debug(logging_text + step)
1714 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1715 logging_text,
1716 nsr_id,
1717 vnfr_id,
1718 vdu_id,
1719 vdu_index,
1720 user=None,
1721 pub_key=None,
1722 )
1723 credentials = {"hostname": rw_mgmt_ip}
1724 # get username
1725 username = deep_get(
1726 config_descriptor, ("config-access", "ssh-access", "default-user")
1727 )
1728 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1729 # merged. Meanwhile let's get username from initial-config-primitive
1730 if not username and initial_config_primitive_list:
1731 for config_primitive in initial_config_primitive_list:
1732 for param in config_primitive.get("parameter", ()):
1733 if param["name"] == "ssh-username":
1734 username = param["value"]
1735 break
1736 if not username:
1737 raise LcmException(
1738 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1739 "'config-access.ssh-access.default-user'"
1740 )
1741 credentials["username"] = username
1742 # n2vc_redesign STEP 3.2
1743
1744 self._write_configuration_status(
1745 nsr_id=nsr_id,
1746 vca_index=vca_index,
1747 status="REGISTERING",
1748 element_under_configuration=element_under_configuration,
1749 element_type=element_type,
1750 )
1751
1752 step = "register execution environment {}".format(credentials)
1753 self.logger.debug(logging_text + step)
1754 ee_id = await self.vca_map[vca_type].register_execution_environment(
1755 credentials=credentials,
1756 namespace=namespace,
1757 db_dict=db_dict,
1758 vca_id=vca_id,
1759 )
1760
1761 # for compatibility with MON/POL modules, the need model and application name at database
1762 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1763 ee_id_parts = ee_id.split(".")
1764 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1765 if len(ee_id_parts) >= 2:
1766 model_name = ee_id_parts[0]
1767 application_name = ee_id_parts[1]
1768 db_nsr_update[db_update_entry + "model"] = model_name
1769 db_nsr_update[db_update_entry + "application"] = application_name
1770
1771 # n2vc_redesign STEP 3.3
1772 step = "Install configuration Software"
1773
1774 self._write_configuration_status(
1775 nsr_id=nsr_id,
1776 vca_index=vca_index,
1777 status="INSTALLING SW",
1778 element_under_configuration=element_under_configuration,
1779 element_type=element_type,
1780 other_update=db_nsr_update,
1781 )
1782
1783 # TODO check if already done
1784 self.logger.debug(logging_text + step)
1785 config = None
1786 if vca_type == "native_charm":
1787 config_primitive = next(
1788 (p for p in initial_config_primitive_list if p["name"] == "config"),
1789 None,
1790 )
1791 if config_primitive:
1792 config = self._map_primitive_params(
1793 config_primitive, {}, deploy_params
1794 )
1795 num_units = 1
1796 if vca_type == "lxc_proxy_charm":
1797 if element_type == "NS":
1798 num_units = db_nsr.get("config-units") or 1
1799 elif element_type == "VNF":
1800 num_units = db_vnfr.get("config-units") or 1
1801 elif element_type == "VDU":
1802 for v in db_vnfr["vdur"]:
1803 if vdu_id == v["vdu-id-ref"]:
1804 num_units = v.get("config-units") or 1
1805 break
1806 if vca_type != "k8s_proxy_charm":
1807 await self.vca_map[vca_type].install_configuration_sw(
1808 ee_id=ee_id,
1809 artifact_path=artifact_path,
1810 db_dict=db_dict,
1811 config=config,
1812 num_units=num_units,
1813 vca_id=vca_id,
1814 vca_type=vca_type,
1815 )
1816
1817 # write in db flag of configuration_sw already installed
1818 self.update_db_2(
1819 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1820 )
1821
1822 # add relations for this VCA (wait for other peers related with this VCA)
1823 await self._add_vca_relations(
1824 logging_text=logging_text,
1825 nsr_id=nsr_id,
1826 vca_index=vca_index,
1827 vca_id=vca_id,
1828 vca_type=vca_type,
1829 )
1830
1831 # if SSH access is required, then get execution environment SSH public
1832 # if native charm we have waited already to VM be UP
1833 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1834 pub_key = None
1835 user = None
1836 # self.logger.debug("get ssh key block")
1837 if deep_get(
1838 config_descriptor, ("config-access", "ssh-access", "required")
1839 ):
1840 # self.logger.debug("ssh key needed")
1841 # Needed to inject a ssh key
1842 user = deep_get(
1843 config_descriptor,
1844 ("config-access", "ssh-access", "default-user"),
1845 )
1846 step = "Install configuration Software, getting public ssh key"
1847 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1848 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1849 )
1850
1851 step = "Insert public key into VM user={} ssh_key={}".format(
1852 user, pub_key
1853 )
1854 else:
1855 # self.logger.debug("no need to get ssh key")
1856 step = "Waiting to VM being up and getting IP address"
1857 self.logger.debug(logging_text + step)
1858
1859 # n2vc_redesign STEP 5.1
1860 # wait for RO (ip-address) Insert pub_key into VM
1861 if vnfr_id:
1862 if kdu_name:
1863 rw_mgmt_ip = await self.wait_kdu_up(
1864 logging_text, nsr_id, vnfr_id, kdu_name
1865 )
1866 else:
1867 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1868 logging_text,
1869 nsr_id,
1870 vnfr_id,
1871 vdu_id,
1872 vdu_index,
1873 user=user,
1874 pub_key=pub_key,
1875 )
1876 else:
1877 rw_mgmt_ip = None # This is for a NS configuration
1878
1879 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1880
1881 # store rw_mgmt_ip in deploy params for later replacement
1882 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1883
1884 # n2vc_redesign STEP 6 Execute initial config primitive
1885 step = "execute initial config primitive"
1886
1887 # wait for dependent primitives execution (NS -> VNF -> VDU)
1888 if initial_config_primitive_list:
1889 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1890
1891 # stage, in function of element type: vdu, kdu, vnf or ns
1892 my_vca = vca_deployed_list[vca_index]
1893 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1894 # VDU or KDU
1895 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1896 elif my_vca.get("member-vnf-index"):
1897 # VNF
1898 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1899 else:
1900 # NS
1901 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1902
1903 self._write_configuration_status(
1904 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1905 )
1906
1907 self._write_op_status(op_id=nslcmop_id, stage=stage)
1908
1909 check_if_terminated_needed = True
1910 for initial_config_primitive in initial_config_primitive_list:
1911 # adding information on the vca_deployed if it is a NS execution environment
1912 if not vca_deployed["member-vnf-index"]:
1913 deploy_params["ns_config_info"] = json.dumps(
1914 self._get_ns_config_info(nsr_id)
1915 )
1916 # TODO check if already done
1917 primitive_params_ = self._map_primitive_params(
1918 initial_config_primitive, {}, deploy_params
1919 )
1920
1921 step = "execute primitive '{}' params '{}'".format(
1922 initial_config_primitive["name"], primitive_params_
1923 )
1924 self.logger.debug(logging_text + step)
1925 await self.vca_map[vca_type].exec_primitive(
1926 ee_id=ee_id,
1927 primitive_name=initial_config_primitive["name"],
1928 params_dict=primitive_params_,
1929 db_dict=db_dict,
1930 vca_id=vca_id,
1931 vca_type=vca_type,
1932 )
1933 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1934 if check_if_terminated_needed:
1935 if config_descriptor.get("terminate-config-primitive"):
1936 self.update_db_2(
1937 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1938 )
1939 check_if_terminated_needed = False
1940
1941 # TODO register in database that primitive is done
1942
1943 # STEP 7 Configure metrics
1944 if vca_type == "helm" or vca_type == "helm-v3":
1945 prometheus_jobs = await self.add_prometheus_metrics(
1946 ee_id=ee_id,
1947 artifact_path=artifact_path,
1948 ee_config_descriptor=ee_config_descriptor,
1949 vnfr_id=vnfr_id,
1950 nsr_id=nsr_id,
1951 target_ip=rw_mgmt_ip,
1952 )
1953 if prometheus_jobs:
1954 self.update_db_2(
1955 "nsrs",
1956 nsr_id,
1957 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1958 )
1959
1960 step = "instantiated at VCA"
1961 self.logger.debug(logging_text + step)
1962
1963 self._write_configuration_status(
1964 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1965 )
1966
1967 except Exception as e: # TODO not use Exception but N2VC exception
1968 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1969 if not isinstance(
1970 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1971 ):
1972 self.logger.error(
1973 "Exception while {} : {}".format(step, e), exc_info=True
1974 )
1975 self._write_configuration_status(
1976 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1977 )
1978 raise LcmException("{} {}".format(step, e)) from e
1979
1980 def _write_ns_status(
1981 self,
1982 nsr_id: str,
1983 ns_state: str,
1984 current_operation: str,
1985 current_operation_id: str,
1986 error_description: str = None,
1987 error_detail: str = None,
1988 other_update: dict = None,
1989 ):
1990 """
1991 Update db_nsr fields.
1992 :param nsr_id:
1993 :param ns_state:
1994 :param current_operation:
1995 :param current_operation_id:
1996 :param error_description:
1997 :param error_detail:
1998 :param other_update: Other required changes at database if provided, will be cleared
1999 :return:
2000 """
2001 try:
2002 db_dict = other_update or {}
2003 db_dict[
2004 "_admin.nslcmop"
2005 ] = current_operation_id # for backward compatibility
2006 db_dict["_admin.current-operation"] = current_operation_id
2007 db_dict["_admin.operation-type"] = (
2008 current_operation if current_operation != "IDLE" else None
2009 )
2010 db_dict["currentOperation"] = current_operation
2011 db_dict["currentOperationID"] = current_operation_id
2012 db_dict["errorDescription"] = error_description
2013 db_dict["errorDetail"] = error_detail
2014
2015 if ns_state:
2016 db_dict["nsState"] = ns_state
2017 self.update_db_2("nsrs", nsr_id, db_dict)
2018 except DbException as e:
2019 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2020
2021 def _write_op_status(
2022 self,
2023 op_id: str,
2024 stage: list = None,
2025 error_message: str = None,
2026 queuePosition: int = 0,
2027 operation_state: str = None,
2028 other_update: dict = None,
2029 ):
2030 try:
2031 db_dict = other_update or {}
2032 db_dict["queuePosition"] = queuePosition
2033 if isinstance(stage, list):
2034 db_dict["stage"] = stage[0]
2035 db_dict["detailed-status"] = " ".join(stage)
2036 elif stage is not None:
2037 db_dict["stage"] = str(stage)
2038
2039 if error_message is not None:
2040 db_dict["errorMessage"] = error_message
2041 if operation_state is not None:
2042 db_dict["operationState"] = operation_state
2043 db_dict["statusEnteredTime"] = time()
2044 self.update_db_2("nslcmops", op_id, db_dict)
2045 except DbException as e:
2046 self.logger.warn(
2047 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2048 )
2049
2050 def _write_all_config_status(self, db_nsr: dict, status: str):
2051 try:
2052 nsr_id = db_nsr["_id"]
2053 # configurationStatus
2054 config_status = db_nsr.get("configurationStatus")
2055 if config_status:
2056 db_nsr_update = {
2057 "configurationStatus.{}.status".format(index): status
2058 for index, v in enumerate(config_status)
2059 if v
2060 }
2061 # update status
2062 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2063
2064 except DbException as e:
2065 self.logger.warn(
2066 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2067 )
2068
2069 def _write_configuration_status(
2070 self,
2071 nsr_id: str,
2072 vca_index: int,
2073 status: str = None,
2074 element_under_configuration: str = None,
2075 element_type: str = None,
2076 other_update: dict = None,
2077 ):
2078
2079 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2080 # .format(vca_index, status))
2081
2082 try:
2083 db_path = "configurationStatus.{}.".format(vca_index)
2084 db_dict = other_update or {}
2085 if status:
2086 db_dict[db_path + "status"] = status
2087 if element_under_configuration:
2088 db_dict[
2089 db_path + "elementUnderConfiguration"
2090 ] = element_under_configuration
2091 if element_type:
2092 db_dict[db_path + "elementType"] = element_type
2093 self.update_db_2("nsrs", nsr_id, db_dict)
2094 except DbException as e:
2095 self.logger.warn(
2096 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2097 status, nsr_id, vca_index, e
2098 )
2099 )
2100
2101 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2102 """
2103 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2104 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2105 Database is used because the result can be obtained from a different LCM worker in case of HA.
2106 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2107 :param db_nslcmop: database content of nslcmop
2108 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2109 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2110 computed 'vim-account-id'
2111 """
2112 modified = False
2113 nslcmop_id = db_nslcmop["_id"]
2114 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2115 if placement_engine == "PLA":
2116 self.logger.debug(
2117 logging_text + "Invoke and wait for placement optimization"
2118 )
2119 await self.msg.aiowrite(
2120 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2121 )
2122 db_poll_interval = 5
2123 wait = db_poll_interval * 10
2124 pla_result = None
2125 while not pla_result and wait >= 0:
2126 await asyncio.sleep(db_poll_interval)
2127 wait -= db_poll_interval
2128 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2129 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2130
2131 if not pla_result:
2132 raise LcmException(
2133 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2134 )
2135
2136 for pla_vnf in pla_result["vnf"]:
2137 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2138 if not pla_vnf.get("vimAccountId") or not vnfr:
2139 continue
2140 modified = True
2141 self.db.set_one(
2142 "vnfrs",
2143 {"_id": vnfr["_id"]},
2144 {"vim-account-id": pla_vnf["vimAccountId"]},
2145 )
2146 # Modifies db_vnfrs
2147 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2148 return modified
2149
2150 def update_nsrs_with_pla_result(self, params):
2151 try:
2152 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2153 self.update_db_2(
2154 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2155 )
2156 except Exception as e:
2157 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2158
2159 async def instantiate(self, nsr_id, nslcmop_id):
2160 """
2161
2162 :param nsr_id: ns instance to deploy
2163 :param nslcmop_id: operation to run
2164 :return:
2165 """
2166
2167 # Try to lock HA task here
2168 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2169 if not task_is_locked_by_me:
2170 self.logger.debug(
2171 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2172 )
2173 return
2174
2175 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2176 self.logger.debug(logging_text + "Enter")
2177
2178 # get all needed from database
2179
2180 # database nsrs record
2181 db_nsr = None
2182
2183 # database nslcmops record
2184 db_nslcmop = None
2185
2186 # update operation on nsrs
2187 db_nsr_update = {}
2188 # update operation on nslcmops
2189 db_nslcmop_update = {}
2190
2191 nslcmop_operation_state = None
2192 db_vnfrs = {} # vnf's info indexed by member-index
2193 # n2vc_info = {}
2194 tasks_dict_info = {} # from task to info text
2195 exc = None
2196 error_list = []
2197 stage = [
2198 "Stage 1/5: preparation of the environment.",
2199 "Waiting for previous operations to terminate.",
2200 "",
2201 ]
2202 # ^ stage, step, VIM progress
2203 try:
2204 # wait for any previous tasks in process
2205 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2206
2207 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2208 stage[1] = "Reading from database."
2209 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2210 db_nsr_update["detailed-status"] = "creating"
2211 db_nsr_update["operational-status"] = "init"
2212 self._write_ns_status(
2213 nsr_id=nsr_id,
2214 ns_state="BUILDING",
2215 current_operation="INSTANTIATING",
2216 current_operation_id=nslcmop_id,
2217 other_update=db_nsr_update,
2218 )
2219 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2220
2221 # read from db: operation
2222 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2223 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2224 ns_params = db_nslcmop.get("operationParams")
2225 if ns_params and ns_params.get("timeout_ns_deploy"):
2226 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2227 else:
2228 timeout_ns_deploy = self.timeout.get(
2229 "ns_deploy", self.timeout_ns_deploy
2230 )
2231
2232 # read from db: ns
2233 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2234 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2235 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2236 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2237 self.fs.sync(db_nsr["nsd-id"])
2238 db_nsr["nsd"] = nsd
2239 # nsr_name = db_nsr["name"] # TODO short-name??
2240
2241 # read from db: vnf's of this ns
2242 stage[1] = "Getting vnfrs from db."
2243 self.logger.debug(logging_text + stage[1])
2244 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2245
2246 # read from db: vnfd's for every vnf
2247 db_vnfds = [] # every vnfd data
2248
2249 # for each vnf in ns, read vnfd
2250 for vnfr in db_vnfrs_list:
2251 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2252 vnfd_id = vnfr["vnfd-id"]
2253 vnfd_ref = vnfr["vnfd-ref"]
2254 self.fs.sync(vnfd_id)
2255
2256 # if we haven't this vnfd, read it from db
2257 if vnfd_id not in db_vnfds:
2258 # read from db
2259 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2260 vnfd_id, vnfd_ref
2261 )
2262 self.logger.debug(logging_text + stage[1])
2263 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2264
2265 # store vnfd
2266 db_vnfds.append(vnfd)
2267
2268 # Get or generates the _admin.deployed.VCA list
2269 vca_deployed_list = None
2270 if db_nsr["_admin"].get("deployed"):
2271 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2272 if vca_deployed_list is None:
2273 vca_deployed_list = []
2274 configuration_status_list = []
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 db_nsr_update["configurationStatus"] = configuration_status_list
2277 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2278 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2279 elif isinstance(vca_deployed_list, dict):
2280 # maintain backward compatibility. Change a dict to list at database
2281 vca_deployed_list = list(vca_deployed_list.values())
2282 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2283 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2284
2285 if not isinstance(
2286 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2287 ):
2288 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2289 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2290
2291 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2292 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2293 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2294 self.db.set_list(
2295 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2296 )
2297
2298 # n2vc_redesign STEP 2 Deploy Network Scenario
2299 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2300 self._write_op_status(op_id=nslcmop_id, stage=stage)
2301
2302 stage[1] = "Deploying KDUs."
2303 # self.logger.debug(logging_text + "Before deploy_kdus")
2304 # Call to deploy_kdus in case exists the "vdu:kdu" param
2305 await self.deploy_kdus(
2306 logging_text=logging_text,
2307 nsr_id=nsr_id,
2308 nslcmop_id=nslcmop_id,
2309 db_vnfrs=db_vnfrs,
2310 db_vnfds=db_vnfds,
2311 task_instantiation_info=tasks_dict_info,
2312 )
2313
2314 stage[1] = "Getting VCA public key."
2315 # n2vc_redesign STEP 1 Get VCA public ssh-key
2316 # feature 1429. Add n2vc public key to needed VMs
2317 n2vc_key = self.n2vc.get_public_key()
2318 n2vc_key_list = [n2vc_key]
2319 if self.vca_config.get("public_key"):
2320 n2vc_key_list.append(self.vca_config["public_key"])
2321
2322 stage[1] = "Deploying NS at VIM."
2323 task_ro = asyncio.ensure_future(
2324 self.instantiate_RO(
2325 logging_text=logging_text,
2326 nsr_id=nsr_id,
2327 nsd=nsd,
2328 db_nsr=db_nsr,
2329 db_nslcmop=db_nslcmop,
2330 db_vnfrs=db_vnfrs,
2331 db_vnfds=db_vnfds,
2332 n2vc_key_list=n2vc_key_list,
2333 stage=stage,
2334 )
2335 )
2336 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2337 tasks_dict_info[task_ro] = "Deploying at VIM"
2338
2339 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2340 stage[1] = "Deploying Execution Environments."
2341 self.logger.debug(logging_text + stage[1])
2342
2343 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2344 for vnf_profile in get_vnf_profiles(nsd):
2345 vnfd_id = vnf_profile["vnfd-id"]
2346 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2347 member_vnf_index = str(vnf_profile["id"])
2348 db_vnfr = db_vnfrs[member_vnf_index]
2349 base_folder = vnfd["_admin"]["storage"]
2350 vdu_id = None
2351 vdu_index = 0
2352 vdu_name = None
2353 kdu_name = None
2354
2355 # Get additional parameters
2356 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2357 if db_vnfr.get("additionalParamsForVnf"):
2358 deploy_params.update(
2359 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2360 )
2361
2362 descriptor_config = get_configuration(vnfd, vnfd["id"])
2363 if descriptor_config:
2364 self._deploy_n2vc(
2365 logging_text=logging_text
2366 + "member_vnf_index={} ".format(member_vnf_index),
2367 db_nsr=db_nsr,
2368 db_vnfr=db_vnfr,
2369 nslcmop_id=nslcmop_id,
2370 nsr_id=nsr_id,
2371 nsi_id=nsi_id,
2372 vnfd_id=vnfd_id,
2373 vdu_id=vdu_id,
2374 kdu_name=kdu_name,
2375 member_vnf_index=member_vnf_index,
2376 vdu_index=vdu_index,
2377 vdu_name=vdu_name,
2378 deploy_params=deploy_params,
2379 descriptor_config=descriptor_config,
2380 base_folder=base_folder,
2381 task_instantiation_info=tasks_dict_info,
2382 stage=stage,
2383 )
2384
2385 # Deploy charms for each VDU that supports one.
2386 for vdud in get_vdu_list(vnfd):
2387 vdu_id = vdud["id"]
2388 descriptor_config = get_configuration(vnfd, vdu_id)
2389 vdur = find_in_list(
2390 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2391 )
2392
2393 if vdur.get("additionalParams"):
2394 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2395 else:
2396 deploy_params_vdu = deploy_params
2397 deploy_params_vdu["OSM"] = get_osm_params(
2398 db_vnfr, vdu_id, vdu_count_index=0
2399 )
2400 vdud_count = get_number_of_instances(vnfd, vdu_id)
2401
2402 self.logger.debug("VDUD > {}".format(vdud))
2403 self.logger.debug(
2404 "Descriptor config > {}".format(descriptor_config)
2405 )
2406 if descriptor_config:
2407 vdu_name = None
2408 kdu_name = None
2409 for vdu_index in range(vdud_count):
2410 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2411 self._deploy_n2vc(
2412 logging_text=logging_text
2413 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2414 member_vnf_index, vdu_id, vdu_index
2415 ),
2416 db_nsr=db_nsr,
2417 db_vnfr=db_vnfr,
2418 nslcmop_id=nslcmop_id,
2419 nsr_id=nsr_id,
2420 nsi_id=nsi_id,
2421 vnfd_id=vnfd_id,
2422 vdu_id=vdu_id,
2423 kdu_name=kdu_name,
2424 member_vnf_index=member_vnf_index,
2425 vdu_index=vdu_index,
2426 vdu_name=vdu_name,
2427 deploy_params=deploy_params_vdu,
2428 descriptor_config=descriptor_config,
2429 base_folder=base_folder,
2430 task_instantiation_info=tasks_dict_info,
2431 stage=stage,
2432 )
2433 for kdud in get_kdu_list(vnfd):
2434 kdu_name = kdud["name"]
2435 descriptor_config = get_configuration(vnfd, kdu_name)
2436 if descriptor_config:
2437 vdu_id = None
2438 vdu_index = 0
2439 vdu_name = None
2440 kdur = next(
2441 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2442 )
2443 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2444 if kdur.get("additionalParams"):
2445 deploy_params_kdu = parse_yaml_strings(
2446 kdur["additionalParams"]
2447 )
2448
2449 self._deploy_n2vc(
2450 logging_text=logging_text,
2451 db_nsr=db_nsr,
2452 db_vnfr=db_vnfr,
2453 nslcmop_id=nslcmop_id,
2454 nsr_id=nsr_id,
2455 nsi_id=nsi_id,
2456 vnfd_id=vnfd_id,
2457 vdu_id=vdu_id,
2458 kdu_name=kdu_name,
2459 member_vnf_index=member_vnf_index,
2460 vdu_index=vdu_index,
2461 vdu_name=vdu_name,
2462 deploy_params=deploy_params_kdu,
2463 descriptor_config=descriptor_config,
2464 base_folder=base_folder,
2465 task_instantiation_info=tasks_dict_info,
2466 stage=stage,
2467 )
2468
2469 # Check if this NS has a charm configuration
2470 descriptor_config = nsd.get("ns-configuration")
2471 if descriptor_config and descriptor_config.get("juju"):
2472 vnfd_id = None
2473 db_vnfr = None
2474 member_vnf_index = None
2475 vdu_id = None
2476 kdu_name = None
2477 vdu_index = 0
2478 vdu_name = None
2479
2480 # Get additional parameters
2481 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2482 if db_nsr.get("additionalParamsForNs"):
2483 deploy_params.update(
2484 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2485 )
2486 base_folder = nsd["_admin"]["storage"]
2487 self._deploy_n2vc(
2488 logging_text=logging_text,
2489 db_nsr=db_nsr,
2490 db_vnfr=db_vnfr,
2491 nslcmop_id=nslcmop_id,
2492 nsr_id=nsr_id,
2493 nsi_id=nsi_id,
2494 vnfd_id=vnfd_id,
2495 vdu_id=vdu_id,
2496 kdu_name=kdu_name,
2497 member_vnf_index=member_vnf_index,
2498 vdu_index=vdu_index,
2499 vdu_name=vdu_name,
2500 deploy_params=deploy_params,
2501 descriptor_config=descriptor_config,
2502 base_folder=base_folder,
2503 task_instantiation_info=tasks_dict_info,
2504 stage=stage,
2505 )
2506
2507 # rest of staff will be done at finally
2508
2509 except (
2510 ROclient.ROClientException,
2511 DbException,
2512 LcmException,
2513 N2VCException,
2514 ) as e:
2515 self.logger.error(
2516 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2517 )
2518 exc = e
2519 except asyncio.CancelledError:
2520 self.logger.error(
2521 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2522 )
2523 exc = "Operation was cancelled"
2524 except Exception as e:
2525 exc = traceback.format_exc()
2526 self.logger.critical(
2527 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2528 exc_info=True,
2529 )
2530 finally:
2531 if exc:
2532 error_list.append(str(exc))
2533 try:
2534 # wait for pending tasks
2535 if tasks_dict_info:
2536 stage[1] = "Waiting for instantiate pending tasks."
2537 self.logger.debug(logging_text + stage[1])
2538 error_list += await self._wait_for_tasks(
2539 logging_text,
2540 tasks_dict_info,
2541 timeout_ns_deploy,
2542 stage,
2543 nslcmop_id,
2544 nsr_id=nsr_id,
2545 )
2546 stage[1] = stage[2] = ""
2547 except asyncio.CancelledError:
2548 error_list.append("Cancelled")
2549 # TODO cancel all tasks
2550 except Exception as exc:
2551 error_list.append(str(exc))
2552
2553 # update operation-status
2554 db_nsr_update["operational-status"] = "running"
2555 # let's begin with VCA 'configured' status (later we can change it)
2556 db_nsr_update["config-status"] = "configured"
2557 for task, task_name in tasks_dict_info.items():
2558 if not task.done() or task.cancelled() or task.exception():
2559 if task_name.startswith(self.task_name_deploy_vca):
2560 # A N2VC task is pending
2561 db_nsr_update["config-status"] = "failed"
2562 else:
2563 # RO or KDU task is pending
2564 db_nsr_update["operational-status"] = "failed"
2565
2566 # update status at database
2567 if error_list:
2568 error_detail = ". ".join(error_list)
2569 self.logger.error(logging_text + error_detail)
2570 error_description_nslcmop = "{} Detail: {}".format(
2571 stage[0], error_detail
2572 )
2573 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2574 nslcmop_id, stage[0]
2575 )
2576
2577 db_nsr_update["detailed-status"] = (
2578 error_description_nsr + " Detail: " + error_detail
2579 )
2580 db_nslcmop_update["detailed-status"] = error_detail
2581 nslcmop_operation_state = "FAILED"
2582 ns_state = "BROKEN"
2583 else:
2584 error_detail = None
2585 error_description_nsr = error_description_nslcmop = None
2586 ns_state = "READY"
2587 db_nsr_update["detailed-status"] = "Done"
2588 db_nslcmop_update["detailed-status"] = "Done"
2589 nslcmop_operation_state = "COMPLETED"
2590
2591 if db_nsr:
2592 self._write_ns_status(
2593 nsr_id=nsr_id,
2594 ns_state=ns_state,
2595 current_operation="IDLE",
2596 current_operation_id=None,
2597 error_description=error_description_nsr,
2598 error_detail=error_detail,
2599 other_update=db_nsr_update,
2600 )
2601 self._write_op_status(
2602 op_id=nslcmop_id,
2603 stage="",
2604 error_message=error_description_nslcmop,
2605 operation_state=nslcmop_operation_state,
2606 other_update=db_nslcmop_update,
2607 )
2608
2609 if nslcmop_operation_state:
2610 try:
2611 await self.msg.aiowrite(
2612 "ns",
2613 "instantiated",
2614 {
2615 "nsr_id": nsr_id,
2616 "nslcmop_id": nslcmop_id,
2617 "operationState": nslcmop_operation_state,
2618 },
2619 loop=self.loop,
2620 )
2621 except Exception as e:
2622 self.logger.error(
2623 logging_text + "kafka_write notification Exception {}".format(e)
2624 )
2625
2626 self.logger.debug(logging_text + "Exit")
2627 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2628
2629 async def _add_vca_relations(
2630 self,
2631 logging_text,
2632 nsr_id,
2633 vca_index: int,
2634 timeout: int = 3600,
2635 vca_type: str = None,
2636 vca_id: str = None,
2637 ) -> bool:
2638
2639 # steps:
2640 # 1. find all relations for this VCA
2641 # 2. wait for other peers related
2642 # 3. add relations
2643
2644 try:
2645 vca_type = vca_type or "lxc_proxy_charm"
2646
2647 # STEP 1: find all relations for this VCA
2648
2649 # read nsr record
2650 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2651 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2652
2653 # this VCA data
2654 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2655
2656 # read all ns-configuration relations
2657 ns_relations = list()
2658 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2659 if db_ns_relations:
2660 for r in db_ns_relations:
2661 # check if this VCA is in the relation
2662 if my_vca.get("member-vnf-index") in (
2663 r.get("entities")[0].get("id"),
2664 r.get("entities")[1].get("id"),
2665 ):
2666 ns_relations.append(r)
2667
2668 # read all vnf-configuration relations
2669 vnf_relations = list()
2670 db_vnfd_list = db_nsr.get("vnfd-id")
2671 if db_vnfd_list:
2672 for vnfd in db_vnfd_list:
2673 db_vnf_relations = None
2674 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2675 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2676 if db_vnf_configuration:
2677 db_vnf_relations = db_vnf_configuration.get("relation", [])
2678 if db_vnf_relations:
2679 for r in db_vnf_relations:
2680 # check if this VCA is in the relation
2681 if my_vca.get("vdu_id") in (
2682 r.get("entities")[0].get("id"),
2683 r.get("entities")[1].get("id"),
2684 ):
2685 vnf_relations.append(r)
2686
2687 # if no relations, terminate
2688 if not ns_relations and not vnf_relations:
2689 self.logger.debug(logging_text + " No relations")
2690 return True
2691
2692 self.logger.debug(
2693 logging_text
2694 + " adding relations\n {}\n {}".format(
2695 ns_relations, vnf_relations
2696 )
2697 )
2698
2699 # add all relations
2700 start = time()
2701 while True:
2702 # check timeout
2703 now = time()
2704 if now - start >= timeout:
2705 self.logger.error(logging_text + " : timeout adding relations")
2706 return False
2707
2708 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2709 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2710
2711 # for each defined NS relation, find the VCA's related
2712 for r in ns_relations.copy():
2713 from_vca_ee_id = None
2714 to_vca_ee_id = None
2715 from_vca_endpoint = None
2716 to_vca_endpoint = None
2717 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2718 for vca in vca_list:
2719 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2720 "id"
2721 ) and vca.get("config_sw_installed"):
2722 from_vca_ee_id = vca.get("ee_id")
2723 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2724 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2725 "id"
2726 ) and vca.get("config_sw_installed"):
2727 to_vca_ee_id = vca.get("ee_id")
2728 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2729 if from_vca_ee_id and to_vca_ee_id:
2730 # add relation
2731 await self.vca_map[vca_type].add_relation(
2732 ee_id_1=from_vca_ee_id,
2733 ee_id_2=to_vca_ee_id,
2734 endpoint_1=from_vca_endpoint,
2735 endpoint_2=to_vca_endpoint,
2736 vca_id=vca_id,
2737 )
2738 # remove entry from relations list
2739 ns_relations.remove(r)
2740 else:
2741 # check failed peers
2742 try:
2743 vca_status_list = db_nsr.get("configurationStatus")
2744 if vca_status_list:
2745 for i in range(len(vca_list)):
2746 vca = vca_list[i]
2747 vca_status = vca_status_list[i]
2748 if vca.get("member-vnf-index") == r.get("entities")[
2749 0
2750 ].get("id"):
2751 if vca_status.get("status") == "BROKEN":
2752 # peer broken: remove relation from list
2753 ns_relations.remove(r)
2754 if vca.get("member-vnf-index") == r.get("entities")[
2755 1
2756 ].get("id"):
2757 if vca_status.get("status") == "BROKEN":
2758 # peer broken: remove relation from list
2759 ns_relations.remove(r)
2760 except Exception:
2761 # ignore
2762 pass
2763
2764 # for each defined VNF relation, find the VCA's related
2765 for r in vnf_relations.copy():
2766 from_vca_ee_id = None
2767 to_vca_ee_id = None
2768 from_vca_endpoint = None
2769 to_vca_endpoint = None
2770 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2771 for vca in vca_list:
2772 key_to_check = "vdu_id"
2773 if vca.get("vdu_id") is None:
2774 key_to_check = "vnfd_id"
2775 if vca.get(key_to_check) == r.get("entities")[0].get(
2776 "id"
2777 ) and vca.get("config_sw_installed"):
2778 from_vca_ee_id = vca.get("ee_id")
2779 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2780 if vca.get(key_to_check) == r.get("entities")[1].get(
2781 "id"
2782 ) and vca.get("config_sw_installed"):
2783 to_vca_ee_id = vca.get("ee_id")
2784 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2785 if from_vca_ee_id and to_vca_ee_id:
2786 # add relation
2787 await self.vca_map[vca_type].add_relation(
2788 ee_id_1=from_vca_ee_id,
2789 ee_id_2=to_vca_ee_id,
2790 endpoint_1=from_vca_endpoint,
2791 endpoint_2=to_vca_endpoint,
2792 vca_id=vca_id,
2793 )
2794 # remove entry from relations list
2795 vnf_relations.remove(r)
2796 else:
2797 # check failed peers
2798 try:
2799 vca_status_list = db_nsr.get("configurationStatus")
2800 if vca_status_list:
2801 for i in range(len(vca_list)):
2802 vca = vca_list[i]
2803 vca_status = vca_status_list[i]
2804 if vca.get("vdu_id") == r.get("entities")[0].get(
2805 "id"
2806 ):
2807 if vca_status.get("status") == "BROKEN":
2808 # peer broken: remove relation from list
2809 vnf_relations.remove(r)
2810 if vca.get("vdu_id") == r.get("entities")[1].get(
2811 "id"
2812 ):
2813 if vca_status.get("status") == "BROKEN":
2814 # peer broken: remove relation from list
2815 vnf_relations.remove(r)
2816 except Exception:
2817 # ignore
2818 pass
2819
2820 # wait for next try
2821 await asyncio.sleep(5.0)
2822
2823 if not ns_relations and not vnf_relations:
2824 self.logger.debug("Relations added")
2825 break
2826
2827 return True
2828
2829 except Exception as e:
2830 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2831 return False
2832
2833 async def _install_kdu(
2834 self,
2835 nsr_id: str,
2836 nsr_db_path: str,
2837 vnfr_data: dict,
2838 kdu_index: int,
2839 kdud: dict,
2840 vnfd: dict,
2841 k8s_instance_info: dict,
2842 k8params: dict = None,
2843 timeout: int = 600,
2844 vca_id: str = None,
2845 ):
2846
2847 try:
2848 k8sclustertype = k8s_instance_info["k8scluster-type"]
2849 # Instantiate kdu
2850 db_dict_install = {
2851 "collection": "nsrs",
2852 "filter": {"_id": nsr_id},
2853 "path": nsr_db_path,
2854 }
2855
2856 if k8s_instance_info.get("kdu-deployment-name"):
2857 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
2858 else:
2859 kdu_instance = self.k8scluster_map[
2860 k8sclustertype
2861 ].generate_kdu_instance_name(
2862 db_dict=db_dict_install,
2863 kdu_model=k8s_instance_info["kdu-model"],
2864 kdu_name=k8s_instance_info["kdu-name"],
2865 )
2866 self.update_db_2(
2867 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2868 )
2869 await self.k8scluster_map[k8sclustertype].install(
2870 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2871 kdu_model=k8s_instance_info["kdu-model"],
2872 atomic=True,
2873 params=k8params,
2874 db_dict=db_dict_install,
2875 timeout=timeout,
2876 kdu_name=k8s_instance_info["kdu-name"],
2877 namespace=k8s_instance_info["namespace"],
2878 kdu_instance=kdu_instance,
2879 vca_id=vca_id,
2880 )
2881 self.update_db_2(
2882 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2883 )
2884
2885 # Obtain services to obtain management service ip
2886 services = await self.k8scluster_map[k8sclustertype].get_services(
2887 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2888 kdu_instance=kdu_instance,
2889 namespace=k8s_instance_info["namespace"],
2890 )
2891
2892 # Obtain management service info (if exists)
2893 vnfr_update_dict = {}
2894 kdu_config = get_configuration(vnfd, kdud["name"])
2895 if kdu_config:
2896 target_ee_list = kdu_config.get("execution-environment-list", [])
2897 else:
2898 target_ee_list = []
2899
2900 if services:
2901 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2902 mgmt_services = [
2903 service
2904 for service in kdud.get("service", [])
2905 if service.get("mgmt-service")
2906 ]
2907 for mgmt_service in mgmt_services:
2908 for service in services:
2909 if service["name"].startswith(mgmt_service["name"]):
2910 # Mgmt service found, Obtain service ip
2911 ip = service.get("external_ip", service.get("cluster_ip"))
2912 if isinstance(ip, list) and len(ip) == 1:
2913 ip = ip[0]
2914
2915 vnfr_update_dict[
2916 "kdur.{}.ip-address".format(kdu_index)
2917 ] = ip
2918
2919 # Check if must update also mgmt ip at the vnf
2920 service_external_cp = mgmt_service.get(
2921 "external-connection-point-ref"
2922 )
2923 if service_external_cp:
2924 if (
2925 deep_get(vnfd, ("mgmt-interface", "cp"))
2926 == service_external_cp
2927 ):
2928 vnfr_update_dict["ip-address"] = ip
2929
2930 if find_in_list(
2931 target_ee_list,
2932 lambda ee: ee.get(
2933 "external-connection-point-ref", ""
2934 )
2935 == service_external_cp,
2936 ):
2937 vnfr_update_dict[
2938 "kdur.{}.ip-address".format(kdu_index)
2939 ] = ip
2940 break
2941 else:
2942 self.logger.warn(
2943 "Mgmt service name: {} not found".format(
2944 mgmt_service["name"]
2945 )
2946 )
2947
2948 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2949 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2950
2951 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2952 if (
2953 kdu_config
2954 and kdu_config.get("initial-config-primitive")
2955 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2956 ):
2957 initial_config_primitive_list = kdu_config.get(
2958 "initial-config-primitive"
2959 )
2960 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2961
2962 for initial_config_primitive in initial_config_primitive_list:
2963 primitive_params_ = self._map_primitive_params(
2964 initial_config_primitive, {}, {}
2965 )
2966
2967 await asyncio.wait_for(
2968 self.k8scluster_map[k8sclustertype].exec_primitive(
2969 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2970 kdu_instance=kdu_instance,
2971 primitive_name=initial_config_primitive["name"],
2972 params=primitive_params_,
2973 db_dict=db_dict_install,
2974 vca_id=vca_id,
2975 ),
2976 timeout=timeout,
2977 )
2978
2979 except Exception as e:
2980 # Prepare update db with error and raise exception
2981 try:
2982 self.update_db_2(
2983 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2984 )
2985 self.update_db_2(
2986 "vnfrs",
2987 vnfr_data.get("_id"),
2988 {"kdur.{}.status".format(kdu_index): "ERROR"},
2989 )
2990 except Exception:
2991 # ignore to keep original exception
2992 pass
2993 # reraise original error
2994 raise
2995
2996 return kdu_instance
2997
2998 async def deploy_kdus(
2999 self,
3000 logging_text,
3001 nsr_id,
3002 nslcmop_id,
3003 db_vnfrs,
3004 db_vnfds,
3005 task_instantiation_info,
3006 ):
3007 # Launch kdus if present in the descriptor
3008
3009 k8scluster_id_2_uuic = {
3010 "helm-chart-v3": {},
3011 "helm-chart": {},
3012 "juju-bundle": {},
3013 }
3014
3015 async def _get_cluster_id(cluster_id, cluster_type):
3016 nonlocal k8scluster_id_2_uuic
3017 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3018 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3019
3020 # check if K8scluster is creating and wait look if previous tasks in process
3021 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3022 "k8scluster", cluster_id
3023 )
3024 if task_dependency:
3025 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3026 task_name, cluster_id
3027 )
3028 self.logger.debug(logging_text + text)
3029 await asyncio.wait(task_dependency, timeout=3600)
3030
3031 db_k8scluster = self.db.get_one(
3032 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3033 )
3034 if not db_k8scluster:
3035 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3036
3037 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3038 if not k8s_id:
3039 if cluster_type == "helm-chart-v3":
3040 try:
3041 # backward compatibility for existing clusters that have not been initialized for helm v3
3042 k8s_credentials = yaml.safe_dump(
3043 db_k8scluster.get("credentials")
3044 )
3045 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3046 k8s_credentials, reuse_cluster_uuid=cluster_id
3047 )
3048 db_k8scluster_update = {}
3049 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3050 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3051 db_k8scluster_update[
3052 "_admin.helm-chart-v3.created"
3053 ] = uninstall_sw
3054 db_k8scluster_update[
3055 "_admin.helm-chart-v3.operationalState"
3056 ] = "ENABLED"
3057 self.update_db_2(
3058 "k8sclusters", cluster_id, db_k8scluster_update
3059 )
3060 except Exception as e:
3061 self.logger.error(
3062 logging_text
3063 + "error initializing helm-v3 cluster: {}".format(str(e))
3064 )
3065 raise LcmException(
3066 "K8s cluster '{}' has not been initialized for '{}'".format(
3067 cluster_id, cluster_type
3068 )
3069 )
3070 else:
3071 raise LcmException(
3072 "K8s cluster '{}' has not been initialized for '{}'".format(
3073 cluster_id, cluster_type
3074 )
3075 )
3076 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3077 return k8s_id
3078
3079 logging_text += "Deploy kdus: "
3080 step = ""
3081 try:
3082 db_nsr_update = {"_admin.deployed.K8s": []}
3083 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3084
3085 index = 0
3086 updated_cluster_list = []
3087 updated_v3_cluster_list = []
3088
3089 for vnfr_data in db_vnfrs.values():
3090 vca_id = self.get_vca_id(vnfr_data, {})
3091 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3092 # Step 0: Prepare and set parameters
3093 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3094 vnfd_id = vnfr_data.get("vnfd-id")
3095 vnfd_with_id = find_in_list(
3096 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3097 )
3098 kdud = next(
3099 kdud
3100 for kdud in vnfd_with_id["kdu"]
3101 if kdud["name"] == kdur["kdu-name"]
3102 )
3103 namespace = kdur.get("k8s-namespace")
3104 kdu_deployment_name = kdur.get("kdu-deployment-name")
3105 if kdur.get("helm-chart"):
3106 kdumodel = kdur["helm-chart"]
3107 # Default version: helm3, if helm-version is v2 assign v2
3108 k8sclustertype = "helm-chart-v3"
3109 self.logger.debug("kdur: {}".format(kdur))
3110 if (
3111 kdur.get("helm-version")
3112 and kdur.get("helm-version") == "v2"
3113 ):
3114 k8sclustertype = "helm-chart"
3115 elif kdur.get("juju-bundle"):
3116 kdumodel = kdur["juju-bundle"]
3117 k8sclustertype = "juju-bundle"
3118 else:
3119 raise LcmException(
3120 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3121 "juju-bundle. Maybe an old NBI version is running".format(
3122 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3123 )
3124 )
3125 # check if kdumodel is a file and exists
3126 try:
3127 vnfd_with_id = find_in_list(
3128 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3129 )
3130 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3131 if storage and storage.get(
3132 "pkg-dir"
3133 ): # may be not present if vnfd has not artifacts
3134 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3135 filename = "{}/{}/{}s/{}".format(
3136 storage["folder"],
3137 storage["pkg-dir"],
3138 k8sclustertype,
3139 kdumodel,
3140 )
3141 if self.fs.file_exists(
3142 filename, mode="file"
3143 ) or self.fs.file_exists(filename, mode="dir"):
3144 kdumodel = self.fs.path + filename
3145 except (asyncio.TimeoutError, asyncio.CancelledError):
3146 raise
3147 except Exception: # it is not a file
3148 pass
3149
3150 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3151 step = "Synchronize repos for k8s cluster '{}'".format(
3152 k8s_cluster_id
3153 )
3154 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3155
3156 # Synchronize repos
3157 if (
3158 k8sclustertype == "helm-chart"
3159 and cluster_uuid not in updated_cluster_list
3160 ) or (
3161 k8sclustertype == "helm-chart-v3"
3162 and cluster_uuid not in updated_v3_cluster_list
3163 ):
3164 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3165 self.k8scluster_map[k8sclustertype].synchronize_repos(
3166 cluster_uuid=cluster_uuid
3167 )
3168 )
3169 if del_repo_list or added_repo_dict:
3170 if k8sclustertype == "helm-chart":
3171 unset = {
3172 "_admin.helm_charts_added." + item: None
3173 for item in del_repo_list
3174 }
3175 updated = {
3176 "_admin.helm_charts_added." + item: name
3177 for item, name in added_repo_dict.items()
3178 }
3179 updated_cluster_list.append(cluster_uuid)
3180 elif k8sclustertype == "helm-chart-v3":
3181 unset = {
3182 "_admin.helm_charts_v3_added." + item: None
3183 for item in del_repo_list
3184 }
3185 updated = {
3186 "_admin.helm_charts_v3_added." + item: name
3187 for item, name in added_repo_dict.items()
3188 }
3189 updated_v3_cluster_list.append(cluster_uuid)
3190 self.logger.debug(
3191 logging_text + "repos synchronized on k8s cluster "
3192 "'{}' to_delete: {}, to_add: {}".format(
3193 k8s_cluster_id, del_repo_list, added_repo_dict
3194 )
3195 )
3196 self.db.set_one(
3197 "k8sclusters",
3198 {"_id": k8s_cluster_id},
3199 updated,
3200 unset=unset,
3201 )
3202
3203 # Instantiate kdu
3204 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3205 vnfr_data["member-vnf-index-ref"],
3206 kdur["kdu-name"],
3207 k8s_cluster_id,
3208 )
3209 k8s_instance_info = {
3210 "kdu-instance": None,
3211 "k8scluster-uuid": cluster_uuid,
3212 "k8scluster-type": k8sclustertype,
3213 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3214 "kdu-name": kdur["kdu-name"],
3215 "kdu-model": kdumodel,
3216 "namespace": namespace,
3217 "kdu-deployment-name": kdu_deployment_name,
3218 }
3219 db_path = "_admin.deployed.K8s.{}".format(index)
3220 db_nsr_update[db_path] = k8s_instance_info
3221 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3222 vnfd_with_id = find_in_list(
3223 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3224 )
3225 task = asyncio.ensure_future(
3226 self._install_kdu(
3227 nsr_id,
3228 db_path,
3229 vnfr_data,
3230 kdu_index,
3231 kdud,
3232 vnfd_with_id,
3233 k8s_instance_info,
3234 k8params=desc_params,
3235 timeout=600,
3236 vca_id=vca_id,
3237 )
3238 )
3239 self.lcm_tasks.register(
3240 "ns",
3241 nsr_id,
3242 nslcmop_id,
3243 "instantiate_KDU-{}".format(index),
3244 task,
3245 )
3246 task_instantiation_info[task] = "Deploying KDU {}".format(
3247 kdur["kdu-name"]
3248 )
3249
3250 index += 1
3251
3252 except (LcmException, asyncio.CancelledError):
3253 raise
3254 except Exception as e:
3255 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3256 if isinstance(e, (N2VCException, DbException)):
3257 self.logger.error(logging_text + msg)
3258 else:
3259 self.logger.critical(logging_text + msg, exc_info=True)
3260 raise LcmException(msg)
3261 finally:
3262 if db_nsr_update:
3263 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3264
3265 def _deploy_n2vc(
3266 self,
3267 logging_text,
3268 db_nsr,
3269 db_vnfr,
3270 nslcmop_id,
3271 nsr_id,
3272 nsi_id,
3273 vnfd_id,
3274 vdu_id,
3275 kdu_name,
3276 member_vnf_index,
3277 vdu_index,
3278 vdu_name,
3279 deploy_params,
3280 descriptor_config,
3281 base_folder,
3282 task_instantiation_info,
3283 stage,
3284 ):
3285 # launch instantiate_N2VC in a asyncio task and register task object
3286 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3287 # if not found, create one entry and update database
3288 # fill db_nsr._admin.deployed.VCA.<index>
3289
3290 self.logger.debug(
3291 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3292 )
3293 if "execution-environment-list" in descriptor_config:
3294 ee_list = descriptor_config.get("execution-environment-list", [])
3295 elif "juju" in descriptor_config:
3296 ee_list = [descriptor_config] # ns charms
3297 else: # other types as script are not supported
3298 ee_list = []
3299
3300 for ee_item in ee_list:
3301 self.logger.debug(
3302 logging_text
3303 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3304 ee_item.get("juju"), ee_item.get("helm-chart")
3305 )
3306 )
3307 ee_descriptor_id = ee_item.get("id")
3308 if ee_item.get("juju"):
3309 vca_name = ee_item["juju"].get("charm")
3310 vca_type = (
3311 "lxc_proxy_charm"
3312 if ee_item["juju"].get("charm") is not None
3313 else "native_charm"
3314 )
3315 if ee_item["juju"].get("cloud") == "k8s":
3316 vca_type = "k8s_proxy_charm"
3317 elif ee_item["juju"].get("proxy") is False:
3318 vca_type = "native_charm"
3319 elif ee_item.get("helm-chart"):
3320 vca_name = ee_item["helm-chart"]
3321 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3322 vca_type = "helm"
3323 else:
3324 vca_type = "helm-v3"
3325 else:
3326 self.logger.debug(
3327 logging_text + "skipping non juju neither charm configuration"
3328 )
3329 continue
3330
3331 vca_index = -1
3332 for vca_index, vca_deployed in enumerate(
3333 db_nsr["_admin"]["deployed"]["VCA"]
3334 ):
3335 if not vca_deployed:
3336 continue
3337 if (
3338 vca_deployed.get("member-vnf-index") == member_vnf_index
3339 and vca_deployed.get("vdu_id") == vdu_id
3340 and vca_deployed.get("kdu_name") == kdu_name
3341 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3342 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3343 ):
3344 break
3345 else:
3346 # not found, create one.
3347 target = (
3348 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3349 )
3350 if vdu_id:
3351 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3352 elif kdu_name:
3353 target += "/kdu/{}".format(kdu_name)
3354 vca_deployed = {
3355 "target_element": target,
3356 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3357 "member-vnf-index": member_vnf_index,
3358 "vdu_id": vdu_id,
3359 "kdu_name": kdu_name,
3360 "vdu_count_index": vdu_index,
3361 "operational-status": "init", # TODO revise
3362 "detailed-status": "", # TODO revise
3363 "step": "initial-deploy", # TODO revise
3364 "vnfd_id": vnfd_id,
3365 "vdu_name": vdu_name,
3366 "type": vca_type,
3367 "ee_descriptor_id": ee_descriptor_id,
3368 }
3369 vca_index += 1
3370
3371 # create VCA and configurationStatus in db
3372 db_dict = {
3373 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3374 "configurationStatus.{}".format(vca_index): dict(),
3375 }
3376 self.update_db_2("nsrs", nsr_id, db_dict)
3377
3378 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3379
3380 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3381 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3382 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3383
3384 # Launch task
3385 task_n2vc = asyncio.ensure_future(
3386 self.instantiate_N2VC(
3387 logging_text=logging_text,
3388 vca_index=vca_index,
3389 nsi_id=nsi_id,
3390 db_nsr=db_nsr,
3391 db_vnfr=db_vnfr,
3392 vdu_id=vdu_id,
3393 kdu_name=kdu_name,
3394 vdu_index=vdu_index,
3395 deploy_params=deploy_params,
3396 config_descriptor=descriptor_config,
3397 base_folder=base_folder,
3398 nslcmop_id=nslcmop_id,
3399 stage=stage,
3400 vca_type=vca_type,
3401 vca_name=vca_name,
3402 ee_config_descriptor=ee_item,
3403 )
3404 )
3405 self.lcm_tasks.register(
3406 "ns",
3407 nsr_id,
3408 nslcmop_id,
3409 "instantiate_N2VC-{}".format(vca_index),
3410 task_n2vc,
3411 )
3412 task_instantiation_info[
3413 task_n2vc
3414 ] = self.task_name_deploy_vca + " {}.{}".format(
3415 member_vnf_index or "", vdu_id or ""
3416 )
3417
3418 @staticmethod
3419 def _create_nslcmop(nsr_id, operation, params):
3420 """
3421 Creates a ns-lcm-opp content to be stored at database.
3422 :param nsr_id: internal id of the instance
3423 :param operation: instantiate, terminate, scale, action, ...
3424 :param params: user parameters for the operation
3425 :return: dictionary following SOL005 format
3426 """
3427 # Raise exception if invalid arguments
3428 if not (nsr_id and operation and params):
3429 raise LcmException(
3430 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3431 )
3432 now = time()
3433 _id = str(uuid4())
3434 nslcmop = {
3435 "id": _id,
3436 "_id": _id,
3437 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3438 "operationState": "PROCESSING",
3439 "statusEnteredTime": now,
3440 "nsInstanceId": nsr_id,
3441 "lcmOperationType": operation,
3442 "startTime": now,
3443 "isAutomaticInvocation": False,
3444 "operationParams": params,
3445 "isCancelPending": False,
3446 "links": {
3447 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3448 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3449 },
3450 }
3451 return nslcmop
3452
3453 def _format_additional_params(self, params):
3454 params = params or {}
3455 for key, value in params.items():
3456 if str(value).startswith("!!yaml "):
3457 params[key] = yaml.safe_load(value[7:])
3458 return params
3459
3460 def _get_terminate_primitive_params(self, seq, vnf_index):
3461 primitive = seq.get("name")
3462 primitive_params = {}
3463 params = {
3464 "member_vnf_index": vnf_index,
3465 "primitive": primitive,
3466 "primitive_params": primitive_params,
3467 }
3468 desc_params = {}
3469 return self._map_primitive_params(seq, params, desc_params)
3470
3471 # sub-operations
3472
3473 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3474 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3475 if op.get("operationState") == "COMPLETED":
3476 # b. Skip sub-operation
3477 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3478 return self.SUBOPERATION_STATUS_SKIP
3479 else:
3480 # c. retry executing sub-operation
3481 # The sub-operation exists, and operationState != 'COMPLETED'
3482 # Update operationState = 'PROCESSING' to indicate a retry.
3483 operationState = "PROCESSING"
3484 detailed_status = "In progress"
3485 self._update_suboperation_status(
3486 db_nslcmop, op_index, operationState, detailed_status
3487 )
3488 # Return the sub-operation index
3489 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3490 # with arguments extracted from the sub-operation
3491 return op_index
3492
3493 # Find a sub-operation where all keys in a matching dictionary must match
3494 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3495 def _find_suboperation(self, db_nslcmop, match):
3496 if db_nslcmop and match:
3497 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3498 for i, op in enumerate(op_list):
3499 if all(op.get(k) == match[k] for k in match):
3500 return i
3501 return self.SUBOPERATION_STATUS_NOT_FOUND
3502
3503 # Update status for a sub-operation given its index
3504 def _update_suboperation_status(
3505 self, db_nslcmop, op_index, operationState, detailed_status
3506 ):
3507 # Update DB for HA tasks
3508 q_filter = {"_id": db_nslcmop["_id"]}
3509 update_dict = {
3510 "_admin.operations.{}.operationState".format(op_index): operationState,
3511 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3512 }
3513 self.db.set_one(
3514 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3515 )
3516
3517 # Add sub-operation, return the index of the added sub-operation
3518 # Optionally, set operationState, detailed-status, and operationType
3519 # Status and type are currently set for 'scale' sub-operations:
3520 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3521 # 'detailed-status' : status message
3522 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3523 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3524 def _add_suboperation(
3525 self,
3526 db_nslcmop,
3527 vnf_index,
3528 vdu_id,
3529 vdu_count_index,
3530 vdu_name,
3531 primitive,
3532 mapped_primitive_params,
3533 operationState=None,
3534 detailed_status=None,
3535 operationType=None,
3536 RO_nsr_id=None,
3537 RO_scaling_info=None,
3538 ):
3539 if not db_nslcmop:
3540 return self.SUBOPERATION_STATUS_NOT_FOUND
3541 # Get the "_admin.operations" list, if it exists
3542 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3543 op_list = db_nslcmop_admin.get("operations")
3544 # Create or append to the "_admin.operations" list
3545 new_op = {
3546 "member_vnf_index": vnf_index,
3547 "vdu_id": vdu_id,
3548 "vdu_count_index": vdu_count_index,
3549 "primitive": primitive,
3550 "primitive_params": mapped_primitive_params,
3551 }
3552 if operationState:
3553 new_op["operationState"] = operationState
3554 if detailed_status:
3555 new_op["detailed-status"] = detailed_status
3556 if operationType:
3557 new_op["lcmOperationType"] = operationType
3558 if RO_nsr_id:
3559 new_op["RO_nsr_id"] = RO_nsr_id
3560 if RO_scaling_info:
3561 new_op["RO_scaling_info"] = RO_scaling_info
3562 if not op_list:
3563 # No existing operations, create key 'operations' with current operation as first list element
3564 db_nslcmop_admin.update({"operations": [new_op]})
3565 op_list = db_nslcmop_admin.get("operations")
3566 else:
3567 # Existing operations, append operation to list
3568 op_list.append(new_op)
3569
3570 db_nslcmop_update = {"_admin.operations": op_list}
3571 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3572 op_index = len(op_list) - 1
3573 return op_index
3574
3575 # Helper methods for scale() sub-operations
3576
3577 # pre-scale/post-scale:
3578 # Check for 3 different cases:
3579 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3580 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3581 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3582 def _check_or_add_scale_suboperation(
3583 self,
3584 db_nslcmop,
3585 vnf_index,
3586 vnf_config_primitive,
3587 primitive_params,
3588 operationType,
3589 RO_nsr_id=None,
3590 RO_scaling_info=None,
3591 ):
3592 # Find this sub-operation
3593 if RO_nsr_id and RO_scaling_info:
3594 operationType = "SCALE-RO"
3595 match = {
3596 "member_vnf_index": vnf_index,
3597 "RO_nsr_id": RO_nsr_id,
3598 "RO_scaling_info": RO_scaling_info,
3599 }
3600 else:
3601 match = {
3602 "member_vnf_index": vnf_index,
3603 "primitive": vnf_config_primitive,
3604 "primitive_params": primitive_params,
3605 "lcmOperationType": operationType,
3606 }
3607 op_index = self._find_suboperation(db_nslcmop, match)
3608 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3609 # a. New sub-operation
3610 # The sub-operation does not exist, add it.
3611 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3612 # The following parameters are set to None for all kind of scaling:
3613 vdu_id = None
3614 vdu_count_index = None
3615 vdu_name = None
3616 if RO_nsr_id and RO_scaling_info:
3617 vnf_config_primitive = None
3618 primitive_params = None
3619 else:
3620 RO_nsr_id = None
3621 RO_scaling_info = None
3622 # Initial status for sub-operation
3623 operationState = "PROCESSING"
3624 detailed_status = "In progress"
3625 # Add sub-operation for pre/post-scaling (zero or more operations)
3626 self._add_suboperation(
3627 db_nslcmop,
3628 vnf_index,
3629 vdu_id,
3630 vdu_count_index,
3631 vdu_name,
3632 vnf_config_primitive,
3633 primitive_params,
3634 operationState,
3635 detailed_status,
3636 operationType,
3637 RO_nsr_id,
3638 RO_scaling_info,
3639 )
3640 return self.SUBOPERATION_STATUS_NEW
3641 else:
3642 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3643 # or op_index (operationState != 'COMPLETED')
3644 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3645
3646 # Function to return execution_environment id
3647
3648 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3649 # TODO vdu_index_count
3650 for vca in vca_deployed_list:
3651 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3652 return vca["ee_id"]
3653
3654 async def destroy_N2VC(
3655 self,
3656 logging_text,
3657 db_nslcmop,
3658 vca_deployed,
3659 config_descriptor,
3660 vca_index,
3661 destroy_ee=True,
3662 exec_primitives=True,
3663 scaling_in=False,
3664 vca_id: str = None,
3665 ):
3666 """
3667 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3668 :param logging_text:
3669 :param db_nslcmop:
3670 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3671 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3672 :param vca_index: index in the database _admin.deployed.VCA
3673 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3674 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3675 not executed properly
3676 :param scaling_in: True destroys the application, False destroys the model
3677 :return: None or exception
3678 """
3679
3680 self.logger.debug(
3681 logging_text
3682 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3683 vca_index, vca_deployed, config_descriptor, destroy_ee
3684 )
3685 )
3686
3687 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3688
3689 # execute terminate_primitives
3690 if exec_primitives:
3691 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3692 config_descriptor.get("terminate-config-primitive"),
3693 vca_deployed.get("ee_descriptor_id"),
3694 )
3695 vdu_id = vca_deployed.get("vdu_id")
3696 vdu_count_index = vca_deployed.get("vdu_count_index")
3697 vdu_name = vca_deployed.get("vdu_name")
3698 vnf_index = vca_deployed.get("member-vnf-index")
3699 if terminate_primitives and vca_deployed.get("needed_terminate"):
3700 for seq in terminate_primitives:
3701 # For each sequence in list, get primitive and call _ns_execute_primitive()
3702 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3703 vnf_index, seq.get("name")
3704 )
3705 self.logger.debug(logging_text + step)
3706 # Create the primitive for each sequence, i.e. "primitive": "touch"
3707 primitive = seq.get("name")
3708 mapped_primitive_params = self._get_terminate_primitive_params(
3709 seq, vnf_index
3710 )
3711
3712 # Add sub-operation
3713 self._add_suboperation(
3714 db_nslcmop,
3715 vnf_index,
3716 vdu_id,
3717 vdu_count_index,
3718 vdu_name,
3719 primitive,
3720 mapped_primitive_params,
3721 )
3722 # Sub-operations: Call _ns_execute_primitive() instead of action()
3723 try:
3724 result, result_detail = await self._ns_execute_primitive(
3725 vca_deployed["ee_id"],
3726 primitive,
3727 mapped_primitive_params,
3728 vca_type=vca_type,
3729 vca_id=vca_id,
3730 )
3731 except LcmException:
3732 # this happens when VCA is not deployed. In this case it is not needed to terminate
3733 continue
3734 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3735 if result not in result_ok:
3736 raise LcmException(
3737 "terminate_primitive {} for vnf_member_index={} fails with "
3738 "error {}".format(seq.get("name"), vnf_index, result_detail)
3739 )
3740 # set that this VCA do not need terminated
3741 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3742 vca_index
3743 )
3744 self.update_db_2(
3745 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3746 )
3747
3748 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3749 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3750
3751 if destroy_ee:
3752 await self.vca_map[vca_type].delete_execution_environment(
3753 vca_deployed["ee_id"],
3754 scaling_in=scaling_in,
3755 vca_type=vca_type,
3756 vca_id=vca_id,
3757 )
3758
3759 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3760 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3761 namespace = "." + db_nsr["_id"]
3762 try:
3763 await self.n2vc.delete_namespace(
3764 namespace=namespace,
3765 total_timeout=self.timeout_charm_delete,
3766 vca_id=vca_id,
3767 )
3768 except N2VCNotFound: # already deleted. Skip
3769 pass
3770 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3771
3772 async def _terminate_RO(
3773 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3774 ):
3775 """
3776 Terminates a deployment from RO
3777 :param logging_text:
3778 :param nsr_deployed: db_nsr._admin.deployed
3779 :param nsr_id:
3780 :param nslcmop_id:
3781 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3782 this method will update only the index 2, but it will write on database the concatenated content of the list
3783 :return:
3784 """
3785 db_nsr_update = {}
3786 failed_detail = []
3787 ro_nsr_id = ro_delete_action = None
3788 if nsr_deployed and nsr_deployed.get("RO"):
3789 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3790 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3791 try:
3792 if ro_nsr_id:
3793 stage[2] = "Deleting ns from VIM."
3794 db_nsr_update["detailed-status"] = " ".join(stage)
3795 self._write_op_status(nslcmop_id, stage)
3796 self.logger.debug(logging_text + stage[2])
3797 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3798 self._write_op_status(nslcmop_id, stage)
3799 desc = await self.RO.delete("ns", ro_nsr_id)
3800 ro_delete_action = desc["action_id"]
3801 db_nsr_update[
3802 "_admin.deployed.RO.nsr_delete_action_id"
3803 ] = ro_delete_action
3804 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3805 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3806 if ro_delete_action:
3807 # wait until NS is deleted from VIM
3808 stage[2] = "Waiting ns deleted from VIM."
3809 detailed_status_old = None
3810 self.logger.debug(
3811 logging_text
3812 + stage[2]
3813 + " RO_id={} ro_delete_action={}".format(
3814 ro_nsr_id, ro_delete_action
3815 )
3816 )
3817 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3818 self._write_op_status(nslcmop_id, stage)
3819
3820 delete_timeout = 20 * 60 # 20 minutes
3821 while delete_timeout > 0:
3822 desc = await self.RO.show(
3823 "ns",
3824 item_id_name=ro_nsr_id,
3825 extra_item="action",
3826 extra_item_id=ro_delete_action,
3827 )
3828
3829 # deploymentStatus
3830 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3831
3832 ns_status, ns_status_info = self.RO.check_action_status(desc)
3833 if ns_status == "ERROR":
3834 raise ROclient.ROClientException(ns_status_info)
3835 elif ns_status == "BUILD":
3836 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3837 elif ns_status == "ACTIVE":
3838 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3839 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3840 break
3841 else:
3842 assert (
3843 False
3844 ), "ROclient.check_action_status returns unknown {}".format(
3845 ns_status
3846 )
3847 if stage[2] != detailed_status_old:
3848 detailed_status_old = stage[2]
3849 db_nsr_update["detailed-status"] = " ".join(stage)
3850 self._write_op_status(nslcmop_id, stage)
3851 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3852 await asyncio.sleep(5, loop=self.loop)
3853 delete_timeout -= 5
3854 else: # delete_timeout <= 0:
3855 raise ROclient.ROClientException(
3856 "Timeout waiting ns deleted from VIM"
3857 )
3858
3859 except Exception as e:
3860 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3861 if (
3862 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3863 ): # not found
3864 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3865 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3866 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3867 self.logger.debug(
3868 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3869 )
3870 elif (
3871 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3872 ): # conflict
3873 failed_detail.append("delete conflict: {}".format(e))
3874 self.logger.debug(
3875 logging_text
3876 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3877 )
3878 else:
3879 failed_detail.append("delete error: {}".format(e))
3880 self.logger.error(
3881 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3882 )
3883
3884 # Delete nsd
3885 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3886 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3887 try:
3888 stage[2] = "Deleting nsd from RO."
3889 db_nsr_update["detailed-status"] = " ".join(stage)
3890 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3891 self._write_op_status(nslcmop_id, stage)
3892 await self.RO.delete("nsd", ro_nsd_id)
3893 self.logger.debug(
3894 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3895 )
3896 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3897 except Exception as e:
3898 if (
3899 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3900 ): # not found
3901 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3902 self.logger.debug(
3903 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3904 )
3905 elif (
3906 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3907 ): # conflict
3908 failed_detail.append(
3909 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3910 )
3911 self.logger.debug(logging_text + failed_detail[-1])
3912 else:
3913 failed_detail.append(
3914 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3915 )
3916 self.logger.error(logging_text + failed_detail[-1])
3917
3918 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3919 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3920 if not vnf_deployed or not vnf_deployed["id"]:
3921 continue
3922 try:
3923 ro_vnfd_id = vnf_deployed["id"]
3924 stage[
3925 2
3926 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3927 vnf_deployed["member-vnf-index"], ro_vnfd_id
3928 )
3929 db_nsr_update["detailed-status"] = " ".join(stage)
3930 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3931 self._write_op_status(nslcmop_id, stage)
3932 await self.RO.delete("vnfd", ro_vnfd_id)
3933 self.logger.debug(
3934 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3935 )
3936 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3937 except Exception as e:
3938 if (
3939 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3940 ): # not found
3941 db_nsr_update[
3942 "_admin.deployed.RO.vnfd.{}.id".format(index)
3943 ] = None
3944 self.logger.debug(
3945 logging_text
3946 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3947 )
3948 elif (
3949 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3950 ): # conflict
3951 failed_detail.append(
3952 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3953 )
3954 self.logger.debug(logging_text + failed_detail[-1])
3955 else:
3956 failed_detail.append(
3957 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3958 )
3959 self.logger.error(logging_text + failed_detail[-1])
3960
3961 if failed_detail:
3962 stage[2] = "Error deleting from VIM"
3963 else:
3964 stage[2] = "Deleted from VIM"
3965 db_nsr_update["detailed-status"] = " ".join(stage)
3966 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3967 self._write_op_status(nslcmop_id, stage)
3968
3969 if failed_detail:
3970 raise LcmException("; ".join(failed_detail))
3971
3972 async def terminate(self, nsr_id, nslcmop_id):
3973 # Try to lock HA task here
3974 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3975 if not task_is_locked_by_me:
3976 return
3977
3978 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3979 self.logger.debug(logging_text + "Enter")
3980 timeout_ns_terminate = self.timeout_ns_terminate
3981 db_nsr = None
3982 db_nslcmop = None
3983 operation_params = None
3984 exc = None
3985 error_list = [] # annotates all failed error messages
3986 db_nslcmop_update = {}
3987 autoremove = False # autoremove after terminated
3988 tasks_dict_info = {}
3989 db_nsr_update = {}
3990 stage = [
3991 "Stage 1/3: Preparing task.",
3992 "Waiting for previous operations to terminate.",
3993 "",
3994 ]
3995 # ^ contains [stage, step, VIM-status]
3996 try:
3997 # wait for any previous tasks in process
3998 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3999
4000 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4001 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4002 operation_params = db_nslcmop.get("operationParams") or {}
4003 if operation_params.get("timeout_ns_terminate"):
4004 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4005 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4006 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4007
4008 db_nsr_update["operational-status"] = "terminating"
4009 db_nsr_update["config-status"] = "terminating"
4010 self._write_ns_status(
4011 nsr_id=nsr_id,
4012 ns_state="TERMINATING",
4013 current_operation="TERMINATING",
4014 current_operation_id=nslcmop_id,
4015 other_update=db_nsr_update,
4016 )
4017 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4018 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4019 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4020 return
4021
4022 stage[1] = "Getting vnf descriptors from db."
4023 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4024 db_vnfrs_dict = {
4025 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4026 }
4027 db_vnfds_from_id = {}
4028 db_vnfds_from_member_index = {}
4029 # Loop over VNFRs
4030 for vnfr in db_vnfrs_list:
4031 vnfd_id = vnfr["vnfd-id"]
4032 if vnfd_id not in db_vnfds_from_id:
4033 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4034 db_vnfds_from_id[vnfd_id] = vnfd
4035 db_vnfds_from_member_index[
4036 vnfr["member-vnf-index-ref"]
4037 ] = db_vnfds_from_id[vnfd_id]
4038
4039 # Destroy individual execution environments when there are terminating primitives.
4040 # Rest of EE will be deleted at once
4041 # TODO - check before calling _destroy_N2VC
4042 # if not operation_params.get("skip_terminate_primitives"):#
4043 # or not vca.get("needed_terminate"):
4044 stage[0] = "Stage 2/3 execute terminating primitives."
4045 self.logger.debug(logging_text + stage[0])
4046 stage[1] = "Looking execution environment that needs terminate."
4047 self.logger.debug(logging_text + stage[1])
4048
4049 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4050 config_descriptor = None
4051 vca_member_vnf_index = vca.get("member-vnf-index")
4052 vca_id = self.get_vca_id(
4053 db_vnfrs_dict.get(vca_member_vnf_index)
4054 if vca_member_vnf_index
4055 else None,
4056 db_nsr,
4057 )
4058 if not vca or not vca.get("ee_id"):
4059 continue
4060 if not vca.get("member-vnf-index"):
4061 # ns
4062 config_descriptor = db_nsr.get("ns-configuration")
4063 elif vca.get("vdu_id"):
4064 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4065 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4066 elif vca.get("kdu_name"):
4067 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4068 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4069 else:
4070 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4071 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4072 vca_type = vca.get("type")
4073 exec_terminate_primitives = not operation_params.get(
4074 "skip_terminate_primitives"
4075 ) and vca.get("needed_terminate")
4076 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4077 # pending native charms
4078 destroy_ee = (
4079 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4080 )
4081 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4082 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4083 task = asyncio.ensure_future(
4084 self.destroy_N2VC(
4085 logging_text,
4086 db_nslcmop,
4087 vca,
4088 config_descriptor,
4089 vca_index,
4090 destroy_ee,
4091 exec_terminate_primitives,
4092 vca_id=vca_id,
4093 )
4094 )
4095 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4096
4097 # wait for pending tasks of terminate primitives
4098 if tasks_dict_info:
4099 self.logger.debug(
4100 logging_text
4101 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4102 )
4103 error_list = await self._wait_for_tasks(
4104 logging_text,
4105 tasks_dict_info,
4106 min(self.timeout_charm_delete, timeout_ns_terminate),
4107 stage,
4108 nslcmop_id,
4109 )
4110 tasks_dict_info.clear()
4111 if error_list:
4112 return # raise LcmException("; ".join(error_list))
4113
4114 # remove All execution environments at once
4115 stage[0] = "Stage 3/3 delete all."
4116
4117 if nsr_deployed.get("VCA"):
4118 stage[1] = "Deleting all execution environments."
4119 self.logger.debug(logging_text + stage[1])
4120 vca_id = self.get_vca_id({}, db_nsr)
4121 task_delete_ee = asyncio.ensure_future(
4122 asyncio.wait_for(
4123 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4124 timeout=self.timeout_charm_delete,
4125 )
4126 )
4127 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4128 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4129
4130 # Delete from k8scluster
4131 stage[1] = "Deleting KDUs."
4132 self.logger.debug(logging_text + stage[1])
4133 # print(nsr_deployed)
4134 for kdu in get_iterable(nsr_deployed, "K8s"):
4135 if not kdu or not kdu.get("kdu-instance"):
4136 continue
4137 kdu_instance = kdu.get("kdu-instance")
4138 if kdu.get("k8scluster-type") in self.k8scluster_map:
4139 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4140 vca_id = self.get_vca_id({}, db_nsr)
4141 task_delete_kdu_instance = asyncio.ensure_future(
4142 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4143 cluster_uuid=kdu.get("k8scluster-uuid"),
4144 kdu_instance=kdu_instance,
4145 vca_id=vca_id,
4146 )
4147 )
4148 else:
4149 self.logger.error(
4150 logging_text
4151 + "Unknown k8s deployment type {}".format(
4152 kdu.get("k8scluster-type")
4153 )
4154 )
4155 continue
4156 tasks_dict_info[
4157 task_delete_kdu_instance
4158 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4159
4160 # remove from RO
4161 stage[1] = "Deleting ns from VIM."
4162 if self.ng_ro:
4163 task_delete_ro = asyncio.ensure_future(
4164 self._terminate_ng_ro(
4165 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4166 )
4167 )
4168 else:
4169 task_delete_ro = asyncio.ensure_future(
4170 self._terminate_RO(
4171 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4172 )
4173 )
4174 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4175
4176 # rest of staff will be done at finally
4177
4178 except (
4179 ROclient.ROClientException,
4180 DbException,
4181 LcmException,
4182 N2VCException,
4183 ) as e:
4184 self.logger.error(logging_text + "Exit Exception {}".format(e))
4185 exc = e
4186 except asyncio.CancelledError:
4187 self.logger.error(
4188 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4189 )
4190 exc = "Operation was cancelled"
4191 except Exception as e:
4192 exc = traceback.format_exc()
4193 self.logger.critical(
4194 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4195 exc_info=True,
4196 )
4197 finally:
4198 if exc:
4199 error_list.append(str(exc))
4200 try:
4201 # wait for pending tasks
4202 if tasks_dict_info:
4203 stage[1] = "Waiting for terminate pending tasks."
4204 self.logger.debug(logging_text + stage[1])
4205 error_list += await self._wait_for_tasks(
4206 logging_text,
4207 tasks_dict_info,
4208 timeout_ns_terminate,
4209 stage,
4210 nslcmop_id,
4211 )
4212 stage[1] = stage[2] = ""
4213 except asyncio.CancelledError:
4214 error_list.append("Cancelled")
4215 # TODO cancell all tasks
4216 except Exception as exc:
4217 error_list.append(str(exc))
4218 # update status at database
4219 if error_list:
4220 error_detail = "; ".join(error_list)
4221 # self.logger.error(logging_text + error_detail)
4222 error_description_nslcmop = "{} Detail: {}".format(
4223 stage[0], error_detail
4224 )
4225 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4226 nslcmop_id, stage[0]
4227 )
4228
4229 db_nsr_update["operational-status"] = "failed"
4230 db_nsr_update["detailed-status"] = (
4231 error_description_nsr + " Detail: " + error_detail
4232 )
4233 db_nslcmop_update["detailed-status"] = error_detail
4234 nslcmop_operation_state = "FAILED"
4235 ns_state = "BROKEN"
4236 else:
4237 error_detail = None
4238 error_description_nsr = error_description_nslcmop = None
4239 ns_state = "NOT_INSTANTIATED"
4240 db_nsr_update["operational-status"] = "terminated"
4241 db_nsr_update["detailed-status"] = "Done"
4242 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4243 db_nslcmop_update["detailed-status"] = "Done"
4244 nslcmop_operation_state = "COMPLETED"
4245
4246 if db_nsr:
4247 self._write_ns_status(
4248 nsr_id=nsr_id,
4249 ns_state=ns_state,
4250 current_operation="IDLE",
4251 current_operation_id=None,
4252 error_description=error_description_nsr,
4253 error_detail=error_detail,
4254 other_update=db_nsr_update,
4255 )
4256 self._write_op_status(
4257 op_id=nslcmop_id,
4258 stage="",
4259 error_message=error_description_nslcmop,
4260 operation_state=nslcmop_operation_state,
4261 other_update=db_nslcmop_update,
4262 )
4263 if ns_state == "NOT_INSTANTIATED":
4264 try:
4265 self.db.set_list(
4266 "vnfrs",
4267 {"nsr-id-ref": nsr_id},
4268 {"_admin.nsState": "NOT_INSTANTIATED"},
4269 )
4270 except DbException as e:
4271 self.logger.warn(
4272 logging_text
4273 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4274 nsr_id, e
4275 )
4276 )
4277 if operation_params:
4278 autoremove = operation_params.get("autoremove", False)
4279 if nslcmop_operation_state:
4280 try:
4281 await self.msg.aiowrite(
4282 "ns",
4283 "terminated",
4284 {
4285 "nsr_id": nsr_id,
4286 "nslcmop_id": nslcmop_id,
4287 "operationState": nslcmop_operation_state,
4288 "autoremove": autoremove,
4289 },
4290 loop=self.loop,
4291 )
4292 except Exception as e:
4293 self.logger.error(
4294 logging_text + "kafka_write notification Exception {}".format(e)
4295 )
4296
4297 self.logger.debug(logging_text + "Exit")
4298 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4299
4300 async def _wait_for_tasks(
4301 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4302 ):
4303 time_start = time()
4304 error_detail_list = []
4305 error_list = []
4306 pending_tasks = list(created_tasks_info.keys())
4307 num_tasks = len(pending_tasks)
4308 num_done = 0
4309 stage[1] = "{}/{}.".format(num_done, num_tasks)
4310 self._write_op_status(nslcmop_id, stage)
4311 while pending_tasks:
4312 new_error = None
4313 _timeout = timeout + time_start - time()
4314 done, pending_tasks = await asyncio.wait(
4315 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4316 )
4317 num_done += len(done)
4318 if not done: # Timeout
4319 for task in pending_tasks:
4320 new_error = created_tasks_info[task] + ": Timeout"
4321 error_detail_list.append(new_error)
4322 error_list.append(new_error)
4323 break
4324 for task in done:
4325 if task.cancelled():
4326 exc = "Cancelled"
4327 else:
4328 exc = task.exception()
4329 if exc:
4330 if isinstance(exc, asyncio.TimeoutError):
4331 exc = "Timeout"
4332 new_error = created_tasks_info[task] + ": {}".format(exc)
4333 error_list.append(created_tasks_info[task])
4334 error_detail_list.append(new_error)
4335 if isinstance(
4336 exc,
4337 (
4338 str,
4339 DbException,
4340 N2VCException,
4341 ROclient.ROClientException,
4342 LcmException,
4343 K8sException,
4344 NgRoException,
4345 ),
4346 ):
4347 self.logger.error(logging_text + new_error)
4348 else:
4349 exc_traceback = "".join(
4350 traceback.format_exception(None, exc, exc.__traceback__)
4351 )
4352 self.logger.error(
4353 logging_text
4354 + created_tasks_info[task]
4355 + " "
4356 + exc_traceback
4357 )
4358 else:
4359 self.logger.debug(
4360 logging_text + created_tasks_info[task] + ": Done"
4361 )
4362 stage[1] = "{}/{}.".format(num_done, num_tasks)
4363 if new_error:
4364 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4365 if nsr_id: # update also nsr
4366 self.update_db_2(
4367 "nsrs",
4368 nsr_id,
4369 {
4370 "errorDescription": "Error at: " + ", ".join(error_list),
4371 "errorDetail": ". ".join(error_detail_list),
4372 },
4373 )
4374 self._write_op_status(nslcmop_id, stage)
4375 return error_detail_list
4376
4377 @staticmethod
4378 def _map_primitive_params(primitive_desc, params, instantiation_params):
4379 """
4380 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4381 The default-value is used. If it is between < > it look for a value at instantiation_params
4382 :param primitive_desc: portion of VNFD/NSD that describes primitive
4383 :param params: Params provided by user
4384 :param instantiation_params: Instantiation params provided by user
4385 :return: a dictionary with the calculated params
4386 """
4387 calculated_params = {}
4388 for parameter in primitive_desc.get("parameter", ()):
4389 param_name = parameter["name"]
4390 if param_name in params:
4391 calculated_params[param_name] = params[param_name]
4392 elif "default-value" in parameter or "value" in parameter:
4393 if "value" in parameter:
4394 calculated_params[param_name] = parameter["value"]
4395 else:
4396 calculated_params[param_name] = parameter["default-value"]
4397 if (
4398 isinstance(calculated_params[param_name], str)
4399 and calculated_params[param_name].startswith("<")
4400 and calculated_params[param_name].endswith(">")
4401 ):
4402 if calculated_params[param_name][1:-1] in instantiation_params:
4403 calculated_params[param_name] = instantiation_params[
4404 calculated_params[param_name][1:-1]
4405 ]
4406 else:
4407 raise LcmException(
4408 "Parameter {} needed to execute primitive {} not provided".format(
4409 calculated_params[param_name], primitive_desc["name"]
4410 )
4411 )
4412 else:
4413 raise LcmException(
4414 "Parameter {} needed to execute primitive {} not provided".format(
4415 param_name, primitive_desc["name"]
4416 )
4417 )
4418
4419 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4420 calculated_params[param_name] = yaml.safe_dump(
4421 calculated_params[param_name], default_flow_style=True, width=256
4422 )
4423 elif isinstance(calculated_params[param_name], str) and calculated_params[
4424 param_name
4425 ].startswith("!!yaml "):
4426 calculated_params[param_name] = calculated_params[param_name][7:]
4427 if parameter.get("data-type") == "INTEGER":
4428 try:
4429 calculated_params[param_name] = int(calculated_params[param_name])
4430 except ValueError: # error converting string to int
4431 raise LcmException(
4432 "Parameter {} of primitive {} must be integer".format(
4433 param_name, primitive_desc["name"]
4434 )
4435 )
4436 elif parameter.get("data-type") == "BOOLEAN":
4437 calculated_params[param_name] = not (
4438 (str(calculated_params[param_name])).lower() == "false"
4439 )
4440
4441 # add always ns_config_info if primitive name is config
4442 if primitive_desc["name"] == "config":
4443 if "ns_config_info" in instantiation_params:
4444 calculated_params["ns_config_info"] = instantiation_params[
4445 "ns_config_info"
4446 ]
4447 return calculated_params
4448
4449 def _look_for_deployed_vca(
4450 self,
4451 deployed_vca,
4452 member_vnf_index,
4453 vdu_id,
4454 vdu_count_index,
4455 kdu_name=None,
4456 ee_descriptor_id=None,
4457 ):
4458 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4459 for vca in deployed_vca:
4460 if not vca:
4461 continue
4462 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4463 continue
4464 if (
4465 vdu_count_index is not None
4466 and vdu_count_index != vca["vdu_count_index"]
4467 ):
4468 continue
4469 if kdu_name and kdu_name != vca["kdu_name"]:
4470 continue
4471 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4472 continue
4473 break
4474 else:
4475 # vca_deployed not found
4476 raise LcmException(
4477 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4478 " is not deployed".format(
4479 member_vnf_index,
4480 vdu_id,
4481 vdu_count_index,
4482 kdu_name,
4483 ee_descriptor_id,
4484 )
4485 )
4486 # get ee_id
4487 ee_id = vca.get("ee_id")
4488 vca_type = vca.get(
4489 "type", "lxc_proxy_charm"
4490 ) # default value for backward compatibility - proxy charm
4491 if not ee_id:
4492 raise LcmException(
4493 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4494 "execution environment".format(
4495 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4496 )
4497 )
4498 return ee_id, vca_type
4499
4500 async def _ns_execute_primitive(
4501 self,
4502 ee_id,
4503 primitive,
4504 primitive_params,
4505 retries=0,
4506 retries_interval=30,
4507 timeout=None,
4508 vca_type=None,
4509 db_dict=None,
4510 vca_id: str = None,
4511 ) -> (str, str):
4512 try:
4513 if primitive == "config":
4514 primitive_params = {"params": primitive_params}
4515
4516 vca_type = vca_type or "lxc_proxy_charm"
4517
4518 while retries >= 0:
4519 try:
4520 output = await asyncio.wait_for(
4521 self.vca_map[vca_type].exec_primitive(
4522 ee_id=ee_id,
4523 primitive_name=primitive,
4524 params_dict=primitive_params,
4525 progress_timeout=self.timeout_progress_primitive,
4526 total_timeout=self.timeout_primitive,
4527 db_dict=db_dict,
4528 vca_id=vca_id,
4529 vca_type=vca_type,
4530 ),
4531 timeout=timeout or self.timeout_primitive,
4532 )
4533 # execution was OK
4534 break
4535 except asyncio.CancelledError:
4536 raise
4537 except Exception as e: # asyncio.TimeoutError
4538 if isinstance(e, asyncio.TimeoutError):
4539 e = "Timeout"
4540 retries -= 1
4541 if retries >= 0:
4542 self.logger.debug(
4543 "Error executing action {} on {} -> {}".format(
4544 primitive, ee_id, e
4545 )
4546 )
4547 # wait and retry
4548 await asyncio.sleep(retries_interval, loop=self.loop)
4549 else:
4550 return "FAILED", str(e)
4551
4552 return "COMPLETED", output
4553
4554 except (LcmException, asyncio.CancelledError):
4555 raise
4556 except Exception as e:
4557 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4558
4559 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4560 """
4561 Updating the vca_status with latest juju information in nsrs record
4562 :param: nsr_id: Id of the nsr
4563 :param: nslcmop_id: Id of the nslcmop
4564 :return: None
4565 """
4566
4567 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4568 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4569 vca_id = self.get_vca_id({}, db_nsr)
4570 if db_nsr["_admin"]["deployed"]["K8s"]:
4571 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4572 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4573 await self._on_update_k8s_db(
4574 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4575 )
4576 else:
4577 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4578 table, filter = "nsrs", {"_id": nsr_id}
4579 path = "_admin.deployed.VCA.{}.".format(vca_index)
4580 await self._on_update_n2vc_db(table, filter, path, {})
4581
4582 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4583 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4584
4585 async def action(self, nsr_id, nslcmop_id):
4586 # Try to lock HA task here
4587 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4588 if not task_is_locked_by_me:
4589 return
4590
4591 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4592 self.logger.debug(logging_text + "Enter")
4593 # get all needed from database
4594 db_nsr = None
4595 db_nslcmop = None
4596 db_nsr_update = {}
4597 db_nslcmop_update = {}
4598 nslcmop_operation_state = None
4599 error_description_nslcmop = None
4600 exc = None
4601 try:
4602 # wait for any previous tasks in process
4603 step = "Waiting for previous operations to terminate"
4604 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4605
4606 self._write_ns_status(
4607 nsr_id=nsr_id,
4608 ns_state=None,
4609 current_operation="RUNNING ACTION",
4610 current_operation_id=nslcmop_id,
4611 )
4612
4613 step = "Getting information from database"
4614 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4615 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4616
4617 nsr_deployed = db_nsr["_admin"].get("deployed")
4618 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4619 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4620 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4621 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4622 primitive = db_nslcmop["operationParams"]["primitive"]
4623 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4624 timeout_ns_action = db_nslcmop["operationParams"].get(
4625 "timeout_ns_action", self.timeout_primitive
4626 )
4627
4628 if vnf_index:
4629 step = "Getting vnfr from database"
4630 db_vnfr = self.db.get_one(
4631 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4632 )
4633 step = "Getting vnfd from database"
4634 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4635 else:
4636 step = "Getting nsd from database"
4637 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4638
4639 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4640 # for backward compatibility
4641 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4642 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4643 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4644 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4645
4646 # look for primitive
4647 config_primitive_desc = descriptor_configuration = None
4648 if vdu_id:
4649 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4650 elif kdu_name:
4651 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4652 elif vnf_index:
4653 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4654 else:
4655 descriptor_configuration = db_nsd.get("ns-configuration")
4656
4657 if descriptor_configuration and descriptor_configuration.get(
4658 "config-primitive"
4659 ):
4660 for config_primitive in descriptor_configuration["config-primitive"]:
4661 if config_primitive["name"] == primitive:
4662 config_primitive_desc = config_primitive
4663 break
4664
4665 if not config_primitive_desc:
4666 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4667 raise LcmException(
4668 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4669 primitive
4670 )
4671 )
4672 primitive_name = primitive
4673 ee_descriptor_id = None
4674 else:
4675 primitive_name = config_primitive_desc.get(
4676 "execution-environment-primitive", primitive
4677 )
4678 ee_descriptor_id = config_primitive_desc.get(
4679 "execution-environment-ref"
4680 )
4681
4682 if vnf_index:
4683 if vdu_id:
4684 vdur = next(
4685 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4686 )
4687 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4688 elif kdu_name:
4689 kdur = next(
4690 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4691 )
4692 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4693 else:
4694 desc_params = parse_yaml_strings(
4695 db_vnfr.get("additionalParamsForVnf")
4696 )
4697 else:
4698 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4699 if kdu_name and get_configuration(db_vnfd, kdu_name):
4700 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4701 actions = set()
4702 for primitive in kdu_configuration.get("initial-config-primitive", []):
4703 actions.add(primitive["name"])
4704 for primitive in kdu_configuration.get("config-primitive", []):
4705 actions.add(primitive["name"])
4706 kdu_action = True if primitive_name in actions else False
4707
4708 # TODO check if ns is in a proper status
4709 if kdu_name and (
4710 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4711 ):
4712 # kdur and desc_params already set from before
4713 if primitive_params:
4714 desc_params.update(primitive_params)
4715 # TODO Check if we will need something at vnf level
4716 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4717 if (
4718 kdu_name == kdu["kdu-name"]
4719 and kdu["member-vnf-index"] == vnf_index
4720 ):
4721 break
4722 else:
4723 raise LcmException(
4724 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4725 )
4726
4727 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4728 msg = "unknown k8scluster-type '{}'".format(
4729 kdu.get("k8scluster-type")
4730 )
4731 raise LcmException(msg)
4732
4733 db_dict = {
4734 "collection": "nsrs",
4735 "filter": {"_id": nsr_id},
4736 "path": "_admin.deployed.K8s.{}".format(index),
4737 }
4738 self.logger.debug(
4739 logging_text
4740 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4741 )
4742 step = "Executing kdu {}".format(primitive_name)
4743 if primitive_name == "upgrade":
4744 if desc_params.get("kdu_model"):
4745 kdu_model = desc_params.get("kdu_model")
4746 del desc_params["kdu_model"]
4747 else:
4748 kdu_model = kdu.get("kdu-model")
4749 parts = kdu_model.split(sep=":")
4750 if len(parts) == 2:
4751 kdu_model = parts[0]
4752
4753 detailed_status = await asyncio.wait_for(
4754 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4755 cluster_uuid=kdu.get("k8scluster-uuid"),
4756 kdu_instance=kdu.get("kdu-instance"),
4757 atomic=True,
4758 kdu_model=kdu_model,
4759 params=desc_params,
4760 db_dict=db_dict,
4761 timeout=timeout_ns_action,
4762 ),
4763 timeout=timeout_ns_action + 10,
4764 )
4765 self.logger.debug(
4766 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4767 )
4768 elif primitive_name == "rollback":
4769 detailed_status = await asyncio.wait_for(
4770 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4771 cluster_uuid=kdu.get("k8scluster-uuid"),
4772 kdu_instance=kdu.get("kdu-instance"),
4773 db_dict=db_dict,
4774 ),
4775 timeout=timeout_ns_action,
4776 )
4777 elif primitive_name == "status":
4778 detailed_status = await asyncio.wait_for(
4779 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4780 cluster_uuid=kdu.get("k8scluster-uuid"),
4781 kdu_instance=kdu.get("kdu-instance"),
4782 vca_id=vca_id,
4783 ),
4784 timeout=timeout_ns_action,
4785 )
4786 else:
4787 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4788 kdu["kdu-name"], nsr_id
4789 )
4790 params = self._map_primitive_params(
4791 config_primitive_desc, primitive_params, desc_params
4792 )
4793
4794 detailed_status = await asyncio.wait_for(
4795 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4796 cluster_uuid=kdu.get("k8scluster-uuid"),
4797 kdu_instance=kdu_instance,
4798 primitive_name=primitive_name,
4799 params=params,
4800 db_dict=db_dict,
4801 timeout=timeout_ns_action,
4802 vca_id=vca_id,
4803 ),
4804 timeout=timeout_ns_action,
4805 )
4806
4807 if detailed_status:
4808 nslcmop_operation_state = "COMPLETED"
4809 else:
4810 detailed_status = ""
4811 nslcmop_operation_state = "FAILED"
4812 else:
4813 ee_id, vca_type = self._look_for_deployed_vca(
4814 nsr_deployed["VCA"],
4815 member_vnf_index=vnf_index,
4816 vdu_id=vdu_id,
4817 vdu_count_index=vdu_count_index,
4818 ee_descriptor_id=ee_descriptor_id,
4819 )
4820 for vca_index, vca_deployed in enumerate(
4821 db_nsr["_admin"]["deployed"]["VCA"]
4822 ):
4823 if vca_deployed.get("member-vnf-index") == vnf_index:
4824 db_dict = {
4825 "collection": "nsrs",
4826 "filter": {"_id": nsr_id},
4827 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4828 }
4829 break
4830 (
4831 nslcmop_operation_state,
4832 detailed_status,
4833 ) = await self._ns_execute_primitive(
4834 ee_id,
4835 primitive=primitive_name,
4836 primitive_params=self._map_primitive_params(
4837 config_primitive_desc, primitive_params, desc_params
4838 ),
4839 timeout=timeout_ns_action,
4840 vca_type=vca_type,
4841 db_dict=db_dict,
4842 vca_id=vca_id,
4843 )
4844
4845 db_nslcmop_update["detailed-status"] = detailed_status
4846 error_description_nslcmop = (
4847 detailed_status if nslcmop_operation_state == "FAILED" else ""
4848 )
4849 self.logger.debug(
4850 logging_text
4851 + " task Done with result {} {}".format(
4852 nslcmop_operation_state, detailed_status
4853 )
4854 )
4855 return # database update is called inside finally
4856
4857 except (DbException, LcmException, N2VCException, K8sException) as e:
4858 self.logger.error(logging_text + "Exit Exception {}".format(e))
4859 exc = e
4860 except asyncio.CancelledError:
4861 self.logger.error(
4862 logging_text + "Cancelled Exception while '{}'".format(step)
4863 )
4864 exc = "Operation was cancelled"
4865 except asyncio.TimeoutError:
4866 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4867 exc = "Timeout"
4868 except Exception as e:
4869 exc = traceback.format_exc()
4870 self.logger.critical(
4871 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4872 exc_info=True,
4873 )
4874 finally:
4875 if exc:
4876 db_nslcmop_update[
4877 "detailed-status"
4878 ] = (
4879 detailed_status
4880 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4881 nslcmop_operation_state = "FAILED"
4882 if db_nsr:
4883 self._write_ns_status(
4884 nsr_id=nsr_id,
4885 ns_state=db_nsr[
4886 "nsState"
4887 ], # TODO check if degraded. For the moment use previous status
4888 current_operation="IDLE",
4889 current_operation_id=None,
4890 # error_description=error_description_nsr,
4891 # error_detail=error_detail,
4892 other_update=db_nsr_update,
4893 )
4894
4895 self._write_op_status(
4896 op_id=nslcmop_id,
4897 stage="",
4898 error_message=error_description_nslcmop,
4899 operation_state=nslcmop_operation_state,
4900 other_update=db_nslcmop_update,
4901 )
4902
4903 if nslcmop_operation_state:
4904 try:
4905 await self.msg.aiowrite(
4906 "ns",
4907 "actioned",
4908 {
4909 "nsr_id": nsr_id,
4910 "nslcmop_id": nslcmop_id,
4911 "operationState": nslcmop_operation_state,
4912 },
4913 loop=self.loop,
4914 )
4915 except Exception as e:
4916 self.logger.error(
4917 logging_text + "kafka_write notification Exception {}".format(e)
4918 )
4919 self.logger.debug(logging_text + "Exit")
4920 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4921 return nslcmop_operation_state, detailed_status
4922
4923 async def scale(self, nsr_id, nslcmop_id):
4924 # Try to lock HA task here
4925 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4926 if not task_is_locked_by_me:
4927 return
4928
4929 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4930 stage = ["", "", ""]
4931 tasks_dict_info = {}
4932 # ^ stage, step, VIM progress
4933 self.logger.debug(logging_text + "Enter")
4934 # get all needed from database
4935 db_nsr = None
4936 db_nslcmop_update = {}
4937 db_nsr_update = {}
4938 exc = None
4939 # in case of error, indicates what part of scale was failed to put nsr at error status
4940 scale_process = None
4941 old_operational_status = ""
4942 old_config_status = ""
4943 nsi_id = None
4944 try:
4945 # wait for any previous tasks in process
4946 step = "Waiting for previous operations to terminate"
4947 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4948 self._write_ns_status(
4949 nsr_id=nsr_id,
4950 ns_state=None,
4951 current_operation="SCALING",
4952 current_operation_id=nslcmop_id,
4953 )
4954
4955 step = "Getting nslcmop from database"
4956 self.logger.debug(
4957 step + " after having waited for previous tasks to be completed"
4958 )
4959 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4960
4961 step = "Getting nsr from database"
4962 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4963 old_operational_status = db_nsr["operational-status"]
4964 old_config_status = db_nsr["config-status"]
4965
4966 step = "Parsing scaling parameters"
4967 db_nsr_update["operational-status"] = "scaling"
4968 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4969 nsr_deployed = db_nsr["_admin"].get("deployed")
4970
4971 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4972 "scaleByStepData"
4973 ]["member-vnf-index"]
4974 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4975 "scaleByStepData"
4976 ]["scaling-group-descriptor"]
4977 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4978 # for backward compatibility
4979 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4980 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4981 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4982 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4983
4984 step = "Getting vnfr from database"
4985 db_vnfr = self.db.get_one(
4986 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4987 )
4988
4989 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4990
4991 step = "Getting vnfd from database"
4992 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4993
4994 base_folder = db_vnfd["_admin"]["storage"]
4995
4996 step = "Getting scaling-group-descriptor"
4997 scaling_descriptor = find_in_list(
4998 get_scaling_aspect(db_vnfd),
4999 lambda scale_desc: scale_desc["name"] == scaling_group,
5000 )
5001 if not scaling_descriptor:
5002 raise LcmException(
5003 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
5004 "at vnfd:scaling-group-descriptor".format(scaling_group)
5005 )
5006
5007 step = "Sending scale order to VIM"
5008 # TODO check if ns is in a proper status
5009 nb_scale_op = 0
5010 if not db_nsr["_admin"].get("scaling-group"):
5011 self.update_db_2(
5012 "nsrs",
5013 nsr_id,
5014 {
5015 "_admin.scaling-group": [
5016 {"name": scaling_group, "nb-scale-op": 0}
5017 ]
5018 },
5019 )
5020 admin_scale_index = 0
5021 else:
5022 for admin_scale_index, admin_scale_info in enumerate(
5023 db_nsr["_admin"]["scaling-group"]
5024 ):
5025 if admin_scale_info["name"] == scaling_group:
5026 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5027 break
5028 else: # not found, set index one plus last element and add new entry with the name
5029 admin_scale_index += 1
5030 db_nsr_update[
5031 "_admin.scaling-group.{}.name".format(admin_scale_index)
5032 ] = scaling_group
5033
5034 vca_scaling_info = []
5035 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5036 if scaling_type == "SCALE_OUT":
5037 if "aspect-delta-details" not in scaling_descriptor:
5038 raise LcmException(
5039 "Aspect delta details not fount in scaling descriptor {}".format(
5040 scaling_descriptor["name"]
5041 )
5042 )
5043 # count if max-instance-count is reached
5044 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5045
5046 scaling_info["scaling_direction"] = "OUT"
5047 scaling_info["vdu-create"] = {}
5048 scaling_info["kdu-create"] = {}
5049 for delta in deltas:
5050 for vdu_delta in delta.get("vdu-delta", {}):
5051 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5052 # vdu_index also provides the number of instance of the targeted vdu
5053 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5054 cloud_init_text = self._get_vdu_cloud_init_content(
5055 vdud, db_vnfd
5056 )
5057 if cloud_init_text:
5058 additional_params = (
5059 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5060 or {}
5061 )
5062 cloud_init_list = []
5063
5064 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5065 max_instance_count = 10
5066 if vdu_profile and "max-number-of-instances" in vdu_profile:
5067 max_instance_count = vdu_profile.get(
5068 "max-number-of-instances", 10
5069 )
5070
5071 default_instance_num = get_number_of_instances(
5072 db_vnfd, vdud["id"]
5073 )
5074 instances_number = vdu_delta.get("number-of-instances", 1)
5075 nb_scale_op += instances_number
5076
5077 new_instance_count = nb_scale_op + default_instance_num
5078 # Control if new count is over max and vdu count is less than max.
5079 # Then assign new instance count
5080 if new_instance_count > max_instance_count > vdu_count:
5081 instances_number = new_instance_count - max_instance_count
5082 else:
5083 instances_number = instances_number
5084
5085 if new_instance_count > max_instance_count:
5086 raise LcmException(
5087 "reached the limit of {} (max-instance-count) "
5088 "scaling-out operations for the "
5089 "scaling-group-descriptor '{}'".format(
5090 nb_scale_op, scaling_group
5091 )
5092 )
5093 for x in range(vdu_delta.get("number-of-instances", 1)):
5094 if cloud_init_text:
5095 # TODO Information of its own ip is not available because db_vnfr is not updated.
5096 additional_params["OSM"] = get_osm_params(
5097 db_vnfr, vdu_delta["id"], vdu_index + x
5098 )
5099 cloud_init_list.append(
5100 self._parse_cloud_init(
5101 cloud_init_text,
5102 additional_params,
5103 db_vnfd["id"],
5104 vdud["id"],
5105 )
5106 )
5107 vca_scaling_info.append(
5108 {
5109 "osm_vdu_id": vdu_delta["id"],
5110 "member-vnf-index": vnf_index,
5111 "type": "create",
5112 "vdu_index": vdu_index + x,
5113 }
5114 )
5115 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5116 for kdu_delta in delta.get("kdu-resource-delta", {}):
5117 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5118 kdu_name = kdu_profile["kdu-name"]
5119 resource_name = kdu_profile["resource-name"]
5120
5121 # Might have different kdus in the same delta
5122 # Should have list for each kdu
5123 if not scaling_info["kdu-create"].get(kdu_name, None):
5124 scaling_info["kdu-create"][kdu_name] = []
5125
5126 kdur = get_kdur(db_vnfr, kdu_name)
5127 if kdur.get("helm-chart"):
5128 k8s_cluster_type = "helm-chart-v3"
5129 self.logger.debug("kdur: {}".format(kdur))
5130 if (
5131 kdur.get("helm-version")
5132 and kdur.get("helm-version") == "v2"
5133 ):
5134 k8s_cluster_type = "helm-chart"
5135 raise NotImplementedError
5136 elif kdur.get("juju-bundle"):
5137 k8s_cluster_type = "juju-bundle"
5138 else:
5139 raise LcmException(
5140 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5141 "juju-bundle. Maybe an old NBI version is running".format(
5142 db_vnfr["member-vnf-index-ref"], kdu_name
5143 )
5144 )
5145
5146 max_instance_count = 10
5147 if kdu_profile and "max-number-of-instances" in kdu_profile:
5148 max_instance_count = kdu_profile.get(
5149 "max-number-of-instances", 10
5150 )
5151
5152 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5153 deployed_kdu, _ = get_deployed_kdu(
5154 nsr_deployed, kdu_name, vnf_index
5155 )
5156 if deployed_kdu is None:
5157 raise LcmException(
5158 "KDU '{}' for vnf '{}' not deployed".format(
5159 kdu_name, vnf_index
5160 )
5161 )
5162 kdu_instance = deployed_kdu.get("kdu-instance")
5163 instance_num = await self.k8scluster_map[
5164 k8s_cluster_type
5165 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5166 kdu_replica_count = instance_num + kdu_delta.get(
5167 "number-of-instances", 1
5168 )
5169
5170 # Control if new count is over max and instance_num is less than max.
5171 # Then assign max instance number to kdu replica count
5172 if kdu_replica_count > max_instance_count > instance_num:
5173 kdu_replica_count = max_instance_count
5174 if kdu_replica_count > max_instance_count:
5175 raise LcmException(
5176 "reached the limit of {} (max-instance-count) "
5177 "scaling-out operations for the "
5178 "scaling-group-descriptor '{}'".format(
5179 instance_num, scaling_group
5180 )
5181 )
5182
5183 for x in range(kdu_delta.get("number-of-instances", 1)):
5184 vca_scaling_info.append(
5185 {
5186 "osm_kdu_id": kdu_name,
5187 "member-vnf-index": vnf_index,
5188 "type": "create",
5189 "kdu_index": instance_num + x - 1,
5190 }
5191 )
5192 scaling_info["kdu-create"][kdu_name].append(
5193 {
5194 "member-vnf-index": vnf_index,
5195 "type": "create",
5196 "k8s-cluster-type": k8s_cluster_type,
5197 "resource-name": resource_name,
5198 "scale": kdu_replica_count,
5199 }
5200 )
5201 elif scaling_type == "SCALE_IN":
5202 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5203
5204 scaling_info["scaling_direction"] = "IN"
5205 scaling_info["vdu-delete"] = {}
5206 scaling_info["kdu-delete"] = {}
5207
5208 for delta in deltas:
5209 for vdu_delta in delta.get("vdu-delta", {}):
5210 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5211 min_instance_count = 0
5212 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5213 if vdu_profile and "min-number-of-instances" in vdu_profile:
5214 min_instance_count = vdu_profile["min-number-of-instances"]
5215
5216 default_instance_num = get_number_of_instances(
5217 db_vnfd, vdu_delta["id"]
5218 )
5219 instance_num = vdu_delta.get("number-of-instances", 1)
5220 nb_scale_op -= instance_num
5221
5222 new_instance_count = nb_scale_op + default_instance_num
5223
5224 if new_instance_count < min_instance_count < vdu_count:
5225 instances_number = min_instance_count - new_instance_count
5226 else:
5227 instances_number = instance_num
5228
5229 if new_instance_count < min_instance_count:
5230 raise LcmException(
5231 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5232 "scaling-group-descriptor '{}'".format(
5233 nb_scale_op, scaling_group
5234 )
5235 )
5236 for x in range(vdu_delta.get("number-of-instances", 1)):
5237 vca_scaling_info.append(
5238 {
5239 "osm_vdu_id": vdu_delta["id"],
5240 "member-vnf-index": vnf_index,
5241 "type": "delete",
5242 "vdu_index": vdu_index - 1 - x,
5243 }
5244 )
5245 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5246 for kdu_delta in delta.get("kdu-resource-delta", {}):
5247 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5248 kdu_name = kdu_profile["kdu-name"]
5249 resource_name = kdu_profile["resource-name"]
5250
5251 if not scaling_info["kdu-delete"].get(kdu_name, None):
5252 scaling_info["kdu-delete"][kdu_name] = []
5253
5254 kdur = get_kdur(db_vnfr, kdu_name)
5255 if kdur.get("helm-chart"):
5256 k8s_cluster_type = "helm-chart-v3"
5257 self.logger.debug("kdur: {}".format(kdur))
5258 if (
5259 kdur.get("helm-version")
5260 and kdur.get("helm-version") == "v2"
5261 ):
5262 k8s_cluster_type = "helm-chart"
5263 raise NotImplementedError
5264 elif kdur.get("juju-bundle"):
5265 k8s_cluster_type = "juju-bundle"
5266 else:
5267 raise LcmException(
5268 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5269 "juju-bundle. Maybe an old NBI version is running".format(
5270 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5271 )
5272 )
5273
5274 min_instance_count = 0
5275 if kdu_profile and "min-number-of-instances" in kdu_profile:
5276 min_instance_count = kdu_profile["min-number-of-instances"]
5277
5278 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5279 deployed_kdu, _ = get_deployed_kdu(
5280 nsr_deployed, kdu_name, vnf_index
5281 )
5282 if deployed_kdu is None:
5283 raise LcmException(
5284 "KDU '{}' for vnf '{}' not deployed".format(
5285 kdu_name, vnf_index
5286 )
5287 )
5288 kdu_instance = deployed_kdu.get("kdu-instance")
5289 instance_num = await self.k8scluster_map[
5290 k8s_cluster_type
5291 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5292 kdu_replica_count = instance_num - kdu_delta.get(
5293 "number-of-instances", 1
5294 )
5295
5296 if kdu_replica_count < min_instance_count < instance_num:
5297 kdu_replica_count = min_instance_count
5298 if kdu_replica_count < min_instance_count:
5299 raise LcmException(
5300 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5301 "scaling-group-descriptor '{}'".format(
5302 instance_num, scaling_group
5303 )
5304 )
5305
5306 for x in range(kdu_delta.get("number-of-instances", 1)):
5307 vca_scaling_info.append(
5308 {
5309 "osm_kdu_id": kdu_name,
5310 "member-vnf-index": vnf_index,
5311 "type": "delete",
5312 "kdu_index": instance_num - x - 1,
5313 }
5314 )
5315 scaling_info["kdu-delete"][kdu_name].append(
5316 {
5317 "member-vnf-index": vnf_index,
5318 "type": "delete",
5319 "k8s-cluster-type": k8s_cluster_type,
5320 "resource-name": resource_name,
5321 "scale": kdu_replica_count,
5322 }
5323 )
5324
5325 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5326 vdu_delete = copy(scaling_info.get("vdu-delete"))
5327 if scaling_info["scaling_direction"] == "IN":
5328 for vdur in reversed(db_vnfr["vdur"]):
5329 if vdu_delete.get(vdur["vdu-id-ref"]):
5330 vdu_delete[vdur["vdu-id-ref"]] -= 1
5331 scaling_info["vdu"].append(
5332 {
5333 "name": vdur.get("name") or vdur.get("vdu-name"),
5334 "vdu_id": vdur["vdu-id-ref"],
5335 "interface": [],
5336 }
5337 )
5338 for interface in vdur["interfaces"]:
5339 scaling_info["vdu"][-1]["interface"].append(
5340 {
5341 "name": interface["name"],
5342 "ip_address": interface["ip-address"],
5343 "mac_address": interface.get("mac-address"),
5344 }
5345 )
5346 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5347
5348 # PRE-SCALE BEGIN
5349 step = "Executing pre-scale vnf-config-primitive"
5350 if scaling_descriptor.get("scaling-config-action"):
5351 for scaling_config_action in scaling_descriptor[
5352 "scaling-config-action"
5353 ]:
5354 if (
5355 scaling_config_action.get("trigger") == "pre-scale-in"
5356 and scaling_type == "SCALE_IN"
5357 ) or (
5358 scaling_config_action.get("trigger") == "pre-scale-out"
5359 and scaling_type == "SCALE_OUT"
5360 ):
5361 vnf_config_primitive = scaling_config_action[
5362 "vnf-config-primitive-name-ref"
5363 ]
5364 step = db_nslcmop_update[
5365 "detailed-status"
5366 ] = "executing pre-scale scaling-config-action '{}'".format(
5367 vnf_config_primitive
5368 )
5369
5370 # look for primitive
5371 for config_primitive in (
5372 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5373 ).get("config-primitive", ()):
5374 if config_primitive["name"] == vnf_config_primitive:
5375 break
5376 else:
5377 raise LcmException(
5378 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5379 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5380 "primitive".format(scaling_group, vnf_config_primitive)
5381 )
5382
5383 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5384 if db_vnfr.get("additionalParamsForVnf"):
5385 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5386
5387 scale_process = "VCA"
5388 db_nsr_update["config-status"] = "configuring pre-scaling"
5389 primitive_params = self._map_primitive_params(
5390 config_primitive, {}, vnfr_params
5391 )
5392
5393 # Pre-scale retry check: Check if this sub-operation has been executed before
5394 op_index = self._check_or_add_scale_suboperation(
5395 db_nslcmop,
5396 vnf_index,
5397 vnf_config_primitive,
5398 primitive_params,
5399 "PRE-SCALE",
5400 )
5401 if op_index == self.SUBOPERATION_STATUS_SKIP:
5402 # Skip sub-operation
5403 result = "COMPLETED"
5404 result_detail = "Done"
5405 self.logger.debug(
5406 logging_text
5407 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5408 vnf_config_primitive, result, result_detail
5409 )
5410 )
5411 else:
5412 if op_index == self.SUBOPERATION_STATUS_NEW:
5413 # New sub-operation: Get index of this sub-operation
5414 op_index = (
5415 len(db_nslcmop.get("_admin", {}).get("operations"))
5416 - 1
5417 )
5418 self.logger.debug(
5419 logging_text
5420 + "vnf_config_primitive={} New sub-operation".format(
5421 vnf_config_primitive
5422 )
5423 )
5424 else:
5425 # retry: Get registered params for this existing sub-operation
5426 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5427 op_index
5428 ]
5429 vnf_index = op.get("member_vnf_index")
5430 vnf_config_primitive = op.get("primitive")
5431 primitive_params = op.get("primitive_params")
5432 self.logger.debug(
5433 logging_text
5434 + "vnf_config_primitive={} Sub-operation retry".format(
5435 vnf_config_primitive
5436 )
5437 )
5438 # Execute the primitive, either with new (first-time) or registered (reintent) args
5439 ee_descriptor_id = config_primitive.get(
5440 "execution-environment-ref"
5441 )
5442 primitive_name = config_primitive.get(
5443 "execution-environment-primitive", vnf_config_primitive
5444 )
5445 ee_id, vca_type = self._look_for_deployed_vca(
5446 nsr_deployed["VCA"],
5447 member_vnf_index=vnf_index,
5448 vdu_id=None,
5449 vdu_count_index=None,
5450 ee_descriptor_id=ee_descriptor_id,
5451 )
5452 result, result_detail = await self._ns_execute_primitive(
5453 ee_id,
5454 primitive_name,
5455 primitive_params,
5456 vca_type=vca_type,
5457 vca_id=vca_id,
5458 )
5459 self.logger.debug(
5460 logging_text
5461 + "vnf_config_primitive={} Done with result {} {}".format(
5462 vnf_config_primitive, result, result_detail
5463 )
5464 )
5465 # Update operationState = COMPLETED | FAILED
5466 self._update_suboperation_status(
5467 db_nslcmop, op_index, result, result_detail
5468 )
5469
5470 if result == "FAILED":
5471 raise LcmException(result_detail)
5472 db_nsr_update["config-status"] = old_config_status
5473 scale_process = None
5474 # PRE-SCALE END
5475
5476 db_nsr_update[
5477 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5478 ] = nb_scale_op
5479 db_nsr_update[
5480 "_admin.scaling-group.{}.time".format(admin_scale_index)
5481 ] = time()
5482
5483 # SCALE-IN VCA - BEGIN
5484 if vca_scaling_info:
5485 step = db_nslcmop_update[
5486 "detailed-status"
5487 ] = "Deleting the execution environments"
5488 scale_process = "VCA"
5489 for vca_info in vca_scaling_info:
5490 if vca_info["type"] == "delete":
5491 member_vnf_index = str(vca_info["member-vnf-index"])
5492 self.logger.debug(
5493 logging_text + "vdu info: {}".format(vca_info)
5494 )
5495 if vca_info.get("osm_vdu_id"):
5496 vdu_id = vca_info["osm_vdu_id"]
5497 vdu_index = int(vca_info["vdu_index"])
5498 stage[
5499 1
5500 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5501 member_vnf_index, vdu_id, vdu_index
5502 )
5503 else:
5504 vdu_index = 0
5505 kdu_id = vca_info["osm_kdu_id"]
5506 stage[
5507 1
5508 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5509 member_vnf_index, kdu_id, vdu_index
5510 )
5511 stage[2] = step = "Scaling in VCA"
5512 self._write_op_status(op_id=nslcmop_id, stage=stage)
5513 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5514 config_update = db_nsr["configurationStatus"]
5515 for vca_index, vca in enumerate(vca_update):
5516 if (
5517 (vca or vca.get("ee_id"))
5518 and vca["member-vnf-index"] == member_vnf_index
5519 and vca["vdu_count_index"] == vdu_index
5520 ):
5521 if vca.get("vdu_id"):
5522 config_descriptor = get_configuration(
5523 db_vnfd, vca.get("vdu_id")
5524 )
5525 elif vca.get("kdu_name"):
5526 config_descriptor = get_configuration(
5527 db_vnfd, vca.get("kdu_name")
5528 )
5529 else:
5530 config_descriptor = get_configuration(
5531 db_vnfd, db_vnfd["id"]
5532 )
5533 operation_params = (
5534 db_nslcmop.get("operationParams") or {}
5535 )
5536 exec_terminate_primitives = not operation_params.get(
5537 "skip_terminate_primitives"
5538 ) and vca.get("needed_terminate")
5539 task = asyncio.ensure_future(
5540 asyncio.wait_for(
5541 self.destroy_N2VC(
5542 logging_text,
5543 db_nslcmop,
5544 vca,
5545 config_descriptor,
5546 vca_index,
5547 destroy_ee=True,
5548 exec_primitives=exec_terminate_primitives,
5549 scaling_in=True,
5550 vca_id=vca_id,
5551 ),
5552 timeout=self.timeout_charm_delete,
5553 )
5554 )
5555 tasks_dict_info[task] = "Terminating VCA {}".format(
5556 vca.get("ee_id")
5557 )
5558 del vca_update[vca_index]
5559 del config_update[vca_index]
5560 # wait for pending tasks of terminate primitives
5561 if tasks_dict_info:
5562 self.logger.debug(
5563 logging_text
5564 + "Waiting for tasks {}".format(
5565 list(tasks_dict_info.keys())
5566 )
5567 )
5568 error_list = await self._wait_for_tasks(
5569 logging_text,
5570 tasks_dict_info,
5571 min(
5572 self.timeout_charm_delete, self.timeout_ns_terminate
5573 ),
5574 stage,
5575 nslcmop_id,
5576 )
5577 tasks_dict_info.clear()
5578 if error_list:
5579 raise LcmException("; ".join(error_list))
5580
5581 db_vca_and_config_update = {
5582 "_admin.deployed.VCA": vca_update,
5583 "configurationStatus": config_update,
5584 }
5585 self.update_db_2(
5586 "nsrs", db_nsr["_id"], db_vca_and_config_update
5587 )
5588 scale_process = None
5589 # SCALE-IN VCA - END
5590
5591 # SCALE RO - BEGIN
5592 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5593 scale_process = "RO"
5594 if self.ro_config.get("ng"):
5595 await self._scale_ng_ro(
5596 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5597 )
5598 scaling_info.pop("vdu-create", None)
5599 scaling_info.pop("vdu-delete", None)
5600
5601 scale_process = None
5602 # SCALE RO - END
5603
5604 # SCALE KDU - BEGIN
5605 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5606 scale_process = "KDU"
5607 await self._scale_kdu(
5608 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5609 )
5610 scaling_info.pop("kdu-create", None)
5611 scaling_info.pop("kdu-delete", None)
5612
5613 scale_process = None
5614 # SCALE KDU - END
5615
5616 if db_nsr_update:
5617 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5618
5619 # SCALE-UP VCA - BEGIN
5620 if vca_scaling_info:
5621 step = db_nslcmop_update[
5622 "detailed-status"
5623 ] = "Creating new execution environments"
5624 scale_process = "VCA"
5625 for vca_info in vca_scaling_info:
5626 if vca_info["type"] == "create":
5627 member_vnf_index = str(vca_info["member-vnf-index"])
5628 self.logger.debug(
5629 logging_text + "vdu info: {}".format(vca_info)
5630 )
5631 vnfd_id = db_vnfr["vnfd-ref"]
5632 if vca_info.get("osm_vdu_id"):
5633 vdu_index = int(vca_info["vdu_index"])
5634 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5635 if db_vnfr.get("additionalParamsForVnf"):
5636 deploy_params.update(
5637 parse_yaml_strings(
5638 db_vnfr["additionalParamsForVnf"].copy()
5639 )
5640 )
5641 descriptor_config = get_configuration(
5642 db_vnfd, db_vnfd["id"]
5643 )
5644 if descriptor_config:
5645 vdu_id = None
5646 vdu_name = None
5647 kdu_name = None
5648 self._deploy_n2vc(
5649 logging_text=logging_text
5650 + "member_vnf_index={} ".format(member_vnf_index),
5651 db_nsr=db_nsr,
5652 db_vnfr=db_vnfr,
5653 nslcmop_id=nslcmop_id,
5654 nsr_id=nsr_id,
5655 nsi_id=nsi_id,
5656 vnfd_id=vnfd_id,
5657 vdu_id=vdu_id,
5658 kdu_name=kdu_name,
5659 member_vnf_index=member_vnf_index,
5660 vdu_index=vdu_index,
5661 vdu_name=vdu_name,
5662 deploy_params=deploy_params,
5663 descriptor_config=descriptor_config,
5664 base_folder=base_folder,
5665 task_instantiation_info=tasks_dict_info,
5666 stage=stage,
5667 )
5668 vdu_id = vca_info["osm_vdu_id"]
5669 vdur = find_in_list(
5670 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5671 )
5672 descriptor_config = get_configuration(db_vnfd, vdu_id)
5673 if vdur.get("additionalParams"):
5674 deploy_params_vdu = parse_yaml_strings(
5675 vdur["additionalParams"]
5676 )
5677 else:
5678 deploy_params_vdu = deploy_params
5679 deploy_params_vdu["OSM"] = get_osm_params(
5680 db_vnfr, vdu_id, vdu_count_index=vdu_index
5681 )
5682 if descriptor_config:
5683 vdu_name = None
5684 kdu_name = None
5685 stage[
5686 1
5687 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5688 member_vnf_index, vdu_id, vdu_index
5689 )
5690 stage[2] = step = "Scaling out VCA"
5691 self._write_op_status(op_id=nslcmop_id, stage=stage)
5692 self._deploy_n2vc(
5693 logging_text=logging_text
5694 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5695 member_vnf_index, vdu_id, vdu_index
5696 ),
5697 db_nsr=db_nsr,
5698 db_vnfr=db_vnfr,
5699 nslcmop_id=nslcmop_id,
5700 nsr_id=nsr_id,
5701 nsi_id=nsi_id,
5702 vnfd_id=vnfd_id,
5703 vdu_id=vdu_id,
5704 kdu_name=kdu_name,
5705 member_vnf_index=member_vnf_index,
5706 vdu_index=vdu_index,
5707 vdu_name=vdu_name,
5708 deploy_params=deploy_params_vdu,
5709 descriptor_config=descriptor_config,
5710 base_folder=base_folder,
5711 task_instantiation_info=tasks_dict_info,
5712 stage=stage,
5713 )
5714 else:
5715 kdu_name = vca_info["osm_kdu_id"]
5716 descriptor_config = get_configuration(db_vnfd, kdu_name)
5717 if descriptor_config:
5718 vdu_id = None
5719 kdu_index = int(vca_info["kdu_index"])
5720 vdu_name = None
5721 kdur = next(
5722 x
5723 for x in db_vnfr["kdur"]
5724 if x["kdu-name"] == kdu_name
5725 )
5726 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5727 if kdur.get("additionalParams"):
5728 deploy_params_kdu = parse_yaml_strings(
5729 kdur["additionalParams"]
5730 )
5731
5732 self._deploy_n2vc(
5733 logging_text=logging_text,
5734 db_nsr=db_nsr,
5735 db_vnfr=db_vnfr,
5736 nslcmop_id=nslcmop_id,
5737 nsr_id=nsr_id,
5738 nsi_id=nsi_id,
5739 vnfd_id=vnfd_id,
5740 vdu_id=vdu_id,
5741 kdu_name=kdu_name,
5742 member_vnf_index=member_vnf_index,
5743 vdu_index=kdu_index,
5744 vdu_name=vdu_name,
5745 deploy_params=deploy_params_kdu,
5746 descriptor_config=descriptor_config,
5747 base_folder=base_folder,
5748 task_instantiation_info=tasks_dict_info,
5749 stage=stage,
5750 )
5751 # SCALE-UP VCA - END
5752 scale_process = None
5753
5754 # POST-SCALE BEGIN
5755 # execute primitive service POST-SCALING
5756 step = "Executing post-scale vnf-config-primitive"
5757 if scaling_descriptor.get("scaling-config-action"):
5758 for scaling_config_action in scaling_descriptor[
5759 "scaling-config-action"
5760 ]:
5761 if (
5762 scaling_config_action.get("trigger") == "post-scale-in"
5763 and scaling_type == "SCALE_IN"
5764 ) or (
5765 scaling_config_action.get("trigger") == "post-scale-out"
5766 and scaling_type == "SCALE_OUT"
5767 ):
5768 vnf_config_primitive = scaling_config_action[
5769 "vnf-config-primitive-name-ref"
5770 ]
5771 step = db_nslcmop_update[
5772 "detailed-status"
5773 ] = "executing post-scale scaling-config-action '{}'".format(
5774 vnf_config_primitive
5775 )
5776
5777 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5778 if db_vnfr.get("additionalParamsForVnf"):
5779 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5780
5781 # look for primitive
5782 for config_primitive in (
5783 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5784 ).get("config-primitive", ()):
5785 if config_primitive["name"] == vnf_config_primitive:
5786 break
5787 else:
5788 raise LcmException(
5789 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5790 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5791 "config-primitive".format(
5792 scaling_group, vnf_config_primitive
5793 )
5794 )
5795 scale_process = "VCA"
5796 db_nsr_update["config-status"] = "configuring post-scaling"
5797 primitive_params = self._map_primitive_params(
5798 config_primitive, {}, vnfr_params
5799 )
5800
5801 # Post-scale retry check: Check if this sub-operation has been executed before
5802 op_index = self._check_or_add_scale_suboperation(
5803 db_nslcmop,
5804 vnf_index,
5805 vnf_config_primitive,
5806 primitive_params,
5807 "POST-SCALE",
5808 )
5809 if op_index == self.SUBOPERATION_STATUS_SKIP:
5810 # Skip sub-operation
5811 result = "COMPLETED"
5812 result_detail = "Done"
5813 self.logger.debug(
5814 logging_text
5815 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5816 vnf_config_primitive, result, result_detail
5817 )
5818 )
5819 else:
5820 if op_index == self.SUBOPERATION_STATUS_NEW:
5821 # New sub-operation: Get index of this sub-operation
5822 op_index = (
5823 len(db_nslcmop.get("_admin", {}).get("operations"))
5824 - 1
5825 )
5826 self.logger.debug(
5827 logging_text
5828 + "vnf_config_primitive={} New sub-operation".format(
5829 vnf_config_primitive
5830 )
5831 )
5832 else:
5833 # retry: Get registered params for this existing sub-operation
5834 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5835 op_index
5836 ]
5837 vnf_index = op.get("member_vnf_index")
5838 vnf_config_primitive = op.get("primitive")
5839 primitive_params = op.get("primitive_params")
5840 self.logger.debug(
5841 logging_text
5842 + "vnf_config_primitive={} Sub-operation retry".format(
5843 vnf_config_primitive
5844 )
5845 )
5846 # Execute the primitive, either with new (first-time) or registered (reintent) args
5847 ee_descriptor_id = config_primitive.get(
5848 "execution-environment-ref"
5849 )
5850 primitive_name = config_primitive.get(
5851 "execution-environment-primitive", vnf_config_primitive
5852 )
5853 ee_id, vca_type = self._look_for_deployed_vca(
5854 nsr_deployed["VCA"],
5855 member_vnf_index=vnf_index,
5856 vdu_id=None,
5857 vdu_count_index=None,
5858 ee_descriptor_id=ee_descriptor_id,
5859 )
5860 result, result_detail = await self._ns_execute_primitive(
5861 ee_id,
5862 primitive_name,
5863 primitive_params,
5864 vca_type=vca_type,
5865 vca_id=vca_id,
5866 )
5867 self.logger.debug(
5868 logging_text
5869 + "vnf_config_primitive={} Done with result {} {}".format(
5870 vnf_config_primitive, result, result_detail
5871 )
5872 )
5873 # Update operationState = COMPLETED | FAILED
5874 self._update_suboperation_status(
5875 db_nslcmop, op_index, result, result_detail
5876 )
5877
5878 if result == "FAILED":
5879 raise LcmException(result_detail)
5880 db_nsr_update["config-status"] = old_config_status
5881 scale_process = None
5882 # POST-SCALE END
5883
5884 db_nsr_update[
5885 "detailed-status"
5886 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5887 db_nsr_update["operational-status"] = (
5888 "running"
5889 if old_operational_status == "failed"
5890 else old_operational_status
5891 )
5892 db_nsr_update["config-status"] = old_config_status
5893 return
5894 except (
5895 ROclient.ROClientException,
5896 DbException,
5897 LcmException,
5898 NgRoException,
5899 ) as e:
5900 self.logger.error(logging_text + "Exit Exception {}".format(e))
5901 exc = e
5902 except asyncio.CancelledError:
5903 self.logger.error(
5904 logging_text + "Cancelled Exception while '{}'".format(step)
5905 )
5906 exc = "Operation was cancelled"
5907 except Exception as e:
5908 exc = traceback.format_exc()
5909 self.logger.critical(
5910 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5911 exc_info=True,
5912 )
5913 finally:
5914 self._write_ns_status(
5915 nsr_id=nsr_id,
5916 ns_state=None,
5917 current_operation="IDLE",
5918 current_operation_id=None,
5919 )
5920 if tasks_dict_info:
5921 stage[1] = "Waiting for instantiate pending tasks."
5922 self.logger.debug(logging_text + stage[1])
5923 exc = await self._wait_for_tasks(
5924 logging_text,
5925 tasks_dict_info,
5926 self.timeout_ns_deploy,
5927 stage,
5928 nslcmop_id,
5929 nsr_id=nsr_id,
5930 )
5931 if exc:
5932 db_nslcmop_update[
5933 "detailed-status"
5934 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5935 nslcmop_operation_state = "FAILED"
5936 if db_nsr:
5937 db_nsr_update["operational-status"] = old_operational_status
5938 db_nsr_update["config-status"] = old_config_status
5939 db_nsr_update["detailed-status"] = ""
5940 if scale_process:
5941 if "VCA" in scale_process:
5942 db_nsr_update["config-status"] = "failed"
5943 if "RO" in scale_process:
5944 db_nsr_update["operational-status"] = "failed"
5945 db_nsr_update[
5946 "detailed-status"
5947 ] = "FAILED scaling nslcmop={} {}: {}".format(
5948 nslcmop_id, step, exc
5949 )
5950 else:
5951 error_description_nslcmop = None
5952 nslcmop_operation_state = "COMPLETED"
5953 db_nslcmop_update["detailed-status"] = "Done"
5954
5955 self._write_op_status(
5956 op_id=nslcmop_id,
5957 stage="",
5958 error_message=error_description_nslcmop,
5959 operation_state=nslcmop_operation_state,
5960 other_update=db_nslcmop_update,
5961 )
5962 if db_nsr:
5963 self._write_ns_status(
5964 nsr_id=nsr_id,
5965 ns_state=None,
5966 current_operation="IDLE",
5967 current_operation_id=None,
5968 other_update=db_nsr_update,
5969 )
5970
5971 if nslcmop_operation_state:
5972 try:
5973 msg = {
5974 "nsr_id": nsr_id,
5975 "nslcmop_id": nslcmop_id,
5976 "operationState": nslcmop_operation_state,
5977 }
5978 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5979 except Exception as e:
5980 self.logger.error(
5981 logging_text + "kafka_write notification Exception {}".format(e)
5982 )
5983 self.logger.debug(logging_text + "Exit")
5984 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5985
5986 async def _scale_kdu(
5987 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5988 ):
5989 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5990 for kdu_name in _scaling_info:
5991 for kdu_scaling_info in _scaling_info[kdu_name]:
5992 deployed_kdu, index = get_deployed_kdu(
5993 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5994 )
5995 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5996 kdu_instance = deployed_kdu["kdu-instance"]
5997 scale = int(kdu_scaling_info["scale"])
5998 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5999
6000 db_dict = {
6001 "collection": "nsrs",
6002 "filter": {"_id": nsr_id},
6003 "path": "_admin.deployed.K8s.{}".format(index),
6004 }
6005
6006 step = "scaling application {}".format(
6007 kdu_scaling_info["resource-name"]
6008 )
6009 self.logger.debug(logging_text + step)
6010
6011 if kdu_scaling_info["type"] == "delete":
6012 kdu_config = get_configuration(db_vnfd, kdu_name)
6013 if (
6014 kdu_config
6015 and kdu_config.get("terminate-config-primitive")
6016 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6017 ):
6018 terminate_config_primitive_list = kdu_config.get(
6019 "terminate-config-primitive"
6020 )
6021 terminate_config_primitive_list.sort(
6022 key=lambda val: int(val["seq"])
6023 )
6024
6025 for (
6026 terminate_config_primitive
6027 ) in terminate_config_primitive_list:
6028 primitive_params_ = self._map_primitive_params(
6029 terminate_config_primitive, {}, {}
6030 )
6031 step = "execute terminate config primitive"
6032 self.logger.debug(logging_text + step)
6033 await asyncio.wait_for(
6034 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6035 cluster_uuid=cluster_uuid,
6036 kdu_instance=kdu_instance,
6037 primitive_name=terminate_config_primitive["name"],
6038 params=primitive_params_,
6039 db_dict=db_dict,
6040 vca_id=vca_id,
6041 ),
6042 timeout=600,
6043 )
6044
6045 await asyncio.wait_for(
6046 self.k8scluster_map[k8s_cluster_type].scale(
6047 kdu_instance,
6048 scale,
6049 kdu_scaling_info["resource-name"],
6050 vca_id=vca_id,
6051 ),
6052 timeout=self.timeout_vca_on_error,
6053 )
6054
6055 if kdu_scaling_info["type"] == "create":
6056 kdu_config = get_configuration(db_vnfd, kdu_name)
6057 if (
6058 kdu_config
6059 and kdu_config.get("initial-config-primitive")
6060 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6061 ):
6062 initial_config_primitive_list = kdu_config.get(
6063 "initial-config-primitive"
6064 )
6065 initial_config_primitive_list.sort(
6066 key=lambda val: int(val["seq"])
6067 )
6068
6069 for initial_config_primitive in initial_config_primitive_list:
6070 primitive_params_ = self._map_primitive_params(
6071 initial_config_primitive, {}, {}
6072 )
6073 step = "execute initial config primitive"
6074 self.logger.debug(logging_text + step)
6075 await asyncio.wait_for(
6076 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6077 cluster_uuid=cluster_uuid,
6078 kdu_instance=kdu_instance,
6079 primitive_name=initial_config_primitive["name"],
6080 params=primitive_params_,
6081 db_dict=db_dict,
6082 vca_id=vca_id,
6083 ),
6084 timeout=600,
6085 )
6086
6087 async def _scale_ng_ro(
6088 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6089 ):
6090 nsr_id = db_nslcmop["nsInstanceId"]
6091 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6092 db_vnfrs = {}
6093
6094 # read from db: vnfd's for every vnf
6095 db_vnfds = []
6096
6097 # for each vnf in ns, read vnfd
6098 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6099 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6100 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6101 # if we haven't this vnfd, read it from db
6102 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6103 # read from db
6104 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6105 db_vnfds.append(vnfd)
6106 n2vc_key = self.n2vc.get_public_key()
6107 n2vc_key_list = [n2vc_key]
6108 self.scale_vnfr(
6109 db_vnfr,
6110 vdu_scaling_info.get("vdu-create"),
6111 vdu_scaling_info.get("vdu-delete"),
6112 mark_delete=True,
6113 )
6114 # db_vnfr has been updated, update db_vnfrs to use it
6115 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6116 await self._instantiate_ng_ro(
6117 logging_text,
6118 nsr_id,
6119 db_nsd,
6120 db_nsr,
6121 db_nslcmop,
6122 db_vnfrs,
6123 db_vnfds,
6124 n2vc_key_list,
6125 stage=stage,
6126 start_deploy=time(),
6127 timeout_ns_deploy=self.timeout_ns_deploy,
6128 )
6129 if vdu_scaling_info.get("vdu-delete"):
6130 self.scale_vnfr(
6131 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6132 )
6133
6134 async def add_prometheus_metrics(
6135 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6136 ):
6137 if not self.prometheus:
6138 return
6139 # look if exist a file called 'prometheus*.j2' and
6140 artifact_content = self.fs.dir_ls(artifact_path)
6141 job_file = next(
6142 (
6143 f
6144 for f in artifact_content
6145 if f.startswith("prometheus") and f.endswith(".j2")
6146 ),
6147 None,
6148 )
6149 if not job_file:
6150 return
6151 with self.fs.file_open((artifact_path, job_file), "r") as f:
6152 job_data = f.read()
6153
6154 # TODO get_service
6155 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6156 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6157 host_port = "80"
6158 vnfr_id = vnfr_id.replace("-", "")
6159 variables = {
6160 "JOB_NAME": vnfr_id,
6161 "TARGET_IP": target_ip,
6162 "EXPORTER_POD_IP": host_name,
6163 "EXPORTER_POD_PORT": host_port,
6164 }
6165 job_list = self.prometheus.parse_job(job_data, variables)
6166 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6167 for job in job_list:
6168 if (
6169 not isinstance(job.get("job_name"), str)
6170 or vnfr_id not in job["job_name"]
6171 ):
6172 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6173 job["nsr_id"] = nsr_id
6174 job_dict = {jl["job_name"]: jl for jl in job_list}
6175 if await self.prometheus.update(job_dict):
6176 return list(job_dict.keys())
6177
6178 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6179 """
6180 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6181
6182 :param: vim_account_id: VIM Account ID
6183
6184 :return: (cloud_name, cloud_credential)
6185 """
6186 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6187 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6188
6189 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6190 """
6191 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6192
6193 :param: vim_account_id: VIM Account ID
6194
6195 :return: (cloud_name, cloud_credential)
6196 """
6197 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6198 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")