Fix 1582 : deploy juju units = number-of-instances
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vnfr_id:
1607 element_type = "VNF"
1608 element_under_configuration = vnfr_id
1609 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1610 if vdu_id:
1611 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1612 element_type = "VDU"
1613 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1614 osm_config["osm"]["vdu_id"] = vdu_id
1615 elif kdu_name:
1616 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1617 element_type = "KDU"
1618 element_under_configuration = kdu_name
1619 osm_config["osm"]["kdu_name"] = kdu_name
1620
1621 # Get artifact path
1622 artifact_path = "{}/{}/{}/{}".format(
1623 base_folder["folder"],
1624 base_folder["pkg-dir"],
1625 "charms"
1626 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1627 else "helm-charts",
1628 vca_name,
1629 )
1630
1631 self.logger.debug("Artifact path > {}".format(artifact_path))
1632
1633 # get initial_config_primitive_list that applies to this element
1634 initial_config_primitive_list = config_descriptor.get(
1635 "initial-config-primitive"
1636 )
1637
1638 self.logger.debug(
1639 "Initial config primitive list > {}".format(
1640 initial_config_primitive_list
1641 )
1642 )
1643
1644 # add config if not present for NS charm
1645 ee_descriptor_id = ee_config_descriptor.get("id")
1646 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1647 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1648 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1649 )
1650
1651 self.logger.debug(
1652 "Initial config primitive list #2 > {}".format(
1653 initial_config_primitive_list
1654 )
1655 )
1656 # n2vc_redesign STEP 3.1
1657 # find old ee_id if exists
1658 ee_id = vca_deployed.get("ee_id")
1659
1660 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1661 # create or register execution environment in VCA
1662 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1663
1664 self._write_configuration_status(
1665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status="CREATING",
1668 element_under_configuration=element_under_configuration,
1669 element_type=element_type,
1670 )
1671
1672 step = "create execution environment"
1673 self.logger.debug(logging_text + step)
1674
1675 ee_id = None
1676 credentials = None
1677 if vca_type == "k8s_proxy_charm":
1678 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1679 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1680 namespace=namespace,
1681 artifact_path=artifact_path,
1682 db_dict=db_dict,
1683 vca_id=vca_id,
1684 )
1685 elif vca_type == "helm" or vca_type == "helm-v3":
1686 ee_id, credentials = await self.vca_map[
1687 vca_type
1688 ].create_execution_environment(
1689 namespace=namespace,
1690 reuse_ee_id=ee_id,
1691 db_dict=db_dict,
1692 config=osm_config,
1693 artifact_path=artifact_path,
1694 vca_type=vca_type,
1695 )
1696 else:
1697 ee_id, credentials = await self.vca_map[
1698 vca_type
1699 ].create_execution_environment(
1700 namespace=namespace,
1701 reuse_ee_id=ee_id,
1702 db_dict=db_dict,
1703 vca_id=vca_id,
1704 )
1705
1706 elif vca_type == "native_charm":
1707 step = "Waiting to VM being up and getting IP address"
1708 self.logger.debug(logging_text + step)
1709 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1710 logging_text,
1711 nsr_id,
1712 vnfr_id,
1713 vdu_id,
1714 vdu_index,
1715 user=None,
1716 pub_key=None,
1717 )
1718 credentials = {"hostname": rw_mgmt_ip}
1719 # get username
1720 username = deep_get(
1721 config_descriptor, ("config-access", "ssh-access", "default-user")
1722 )
1723 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1724 # merged. Meanwhile let's get username from initial-config-primitive
1725 if not username and initial_config_primitive_list:
1726 for config_primitive in initial_config_primitive_list:
1727 for param in config_primitive.get("parameter", ()):
1728 if param["name"] == "ssh-username":
1729 username = param["value"]
1730 break
1731 if not username:
1732 raise LcmException(
1733 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1734 "'config-access.ssh-access.default-user'"
1735 )
1736 credentials["username"] = username
1737 # n2vc_redesign STEP 3.2
1738
1739 self._write_configuration_status(
1740 nsr_id=nsr_id,
1741 vca_index=vca_index,
1742 status="REGISTERING",
1743 element_under_configuration=element_under_configuration,
1744 element_type=element_type,
1745 )
1746
1747 step = "register execution environment {}".format(credentials)
1748 self.logger.debug(logging_text + step)
1749 ee_id = await self.vca_map[vca_type].register_execution_environment(
1750 credentials=credentials,
1751 namespace=namespace,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 # for compatibility with MON/POL modules, the need model and application name at database
1757 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1758 ee_id_parts = ee_id.split(".")
1759 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1760 if len(ee_id_parts) >= 2:
1761 model_name = ee_id_parts[0]
1762 application_name = ee_id_parts[1]
1763 db_nsr_update[db_update_entry + "model"] = model_name
1764 db_nsr_update[db_update_entry + "application"] = application_name
1765
1766 # n2vc_redesign STEP 3.3
1767 step = "Install configuration Software"
1768
1769 self._write_configuration_status(
1770 nsr_id=nsr_id,
1771 vca_index=vca_index,
1772 status="INSTALLING SW",
1773 element_under_configuration=element_under_configuration,
1774 element_type=element_type,
1775 other_update=db_nsr_update,
1776 )
1777
1778 # TODO check if already done
1779 self.logger.debug(logging_text + step)
1780 config = None
1781 if vca_type == "native_charm":
1782 config_primitive = next(
1783 (p for p in initial_config_primitive_list if p["name"] == "config"),
1784 None,
1785 )
1786 if config_primitive:
1787 config = self._map_primitive_params(
1788 config_primitive, {}, deploy_params
1789 )
1790 num_units = 1
1791 if vca_type == "lxc_proxy_charm":
1792 if element_type == "NS":
1793 num_units = db_nsr.get("config-units") or 1
1794 elif element_type == "VNF":
1795 num_units = db_vnfr.get("config-units") or 1
1796 elif element_type == "VDU":
1797 for v in db_vnfr["vdur"]:
1798 if vdu_id == v["vdu-id-ref"]:
1799 num_units = v.get("config-units") or 1
1800 break
1801 if vca_type != "k8s_proxy_charm":
1802 await self.vca_map[vca_type].install_configuration_sw(
1803 ee_id=ee_id,
1804 artifact_path=artifact_path,
1805 db_dict=db_dict,
1806 config=config,
1807 num_units=num_units,
1808 vca_id=vca_id,
1809 )
1810
1811 # write in db flag of configuration_sw already installed
1812 self.update_db_2(
1813 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1814 )
1815
1816 # add relations for this VCA (wait for other peers related with this VCA)
1817 await self._add_vca_relations(
1818 logging_text=logging_text,
1819 nsr_id=nsr_id,
1820 vca_index=vca_index,
1821 vca_id=vca_id,
1822 vca_type=vca_type,
1823 )
1824
1825 # if SSH access is required, then get execution environment SSH public
1826 # if native charm we have waited already to VM be UP
1827 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1828 pub_key = None
1829 user = None
1830 # self.logger.debug("get ssh key block")
1831 if deep_get(
1832 config_descriptor, ("config-access", "ssh-access", "required")
1833 ):
1834 # self.logger.debug("ssh key needed")
1835 # Needed to inject a ssh key
1836 user = deep_get(
1837 config_descriptor,
1838 ("config-access", "ssh-access", "default-user"),
1839 )
1840 step = "Install configuration Software, getting public ssh key"
1841 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1842 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1843 )
1844
1845 step = "Insert public key into VM user={} ssh_key={}".format(
1846 user, pub_key
1847 )
1848 else:
1849 # self.logger.debug("no need to get ssh key")
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852
1853 # n2vc_redesign STEP 5.1
1854 # wait for RO (ip-address) Insert pub_key into VM
1855 if vnfr_id:
1856 if kdu_name:
1857 rw_mgmt_ip = await self.wait_kdu_up(
1858 logging_text, nsr_id, vnfr_id, kdu_name
1859 )
1860 else:
1861 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1862 logging_text,
1863 nsr_id,
1864 vnfr_id,
1865 vdu_id,
1866 vdu_index,
1867 user=user,
1868 pub_key=pub_key,
1869 )
1870 else:
1871 rw_mgmt_ip = None # This is for a NS configuration
1872
1873 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1874
1875 # store rw_mgmt_ip in deploy params for later replacement
1876 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1877
1878 # n2vc_redesign STEP 6 Execute initial config primitive
1879 step = "execute initial config primitive"
1880
1881 # wait for dependent primitives execution (NS -> VNF -> VDU)
1882 if initial_config_primitive_list:
1883 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1884
1885 # stage, in function of element type: vdu, kdu, vnf or ns
1886 my_vca = vca_deployed_list[vca_index]
1887 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1888 # VDU or KDU
1889 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1890 elif my_vca.get("member-vnf-index"):
1891 # VNF
1892 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1893 else:
1894 # NS
1895 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1896
1897 self._write_configuration_status(
1898 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1899 )
1900
1901 self._write_op_status(op_id=nslcmop_id, stage=stage)
1902
1903 check_if_terminated_needed = True
1904 for initial_config_primitive in initial_config_primitive_list:
1905 # adding information on the vca_deployed if it is a NS execution environment
1906 if not vca_deployed["member-vnf-index"]:
1907 deploy_params["ns_config_info"] = json.dumps(
1908 self._get_ns_config_info(nsr_id)
1909 )
1910 # TODO check if already done
1911 primitive_params_ = self._map_primitive_params(
1912 initial_config_primitive, {}, deploy_params
1913 )
1914
1915 step = "execute primitive '{}' params '{}'".format(
1916 initial_config_primitive["name"], primitive_params_
1917 )
1918 self.logger.debug(logging_text + step)
1919 await self.vca_map[vca_type].exec_primitive(
1920 ee_id=ee_id,
1921 primitive_name=initial_config_primitive["name"],
1922 params_dict=primitive_params_,
1923 db_dict=db_dict,
1924 vca_id=vca_id,
1925 )
1926 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1927 if check_if_terminated_needed:
1928 if config_descriptor.get("terminate-config-primitive"):
1929 self.update_db_2(
1930 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1931 )
1932 check_if_terminated_needed = False
1933
1934 # TODO register in database that primitive is done
1935
1936 # STEP 7 Configure metrics
1937 if vca_type == "helm" or vca_type == "helm-v3":
1938 prometheus_jobs = await self.add_prometheus_metrics(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 ee_config_descriptor=ee_config_descriptor,
1942 vnfr_id=vnfr_id,
1943 nsr_id=nsr_id,
1944 target_ip=rw_mgmt_ip,
1945 )
1946 if prometheus_jobs:
1947 self.update_db_2(
1948 "nsrs",
1949 nsr_id,
1950 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1951 )
1952
1953 step = "instantiated at VCA"
1954 self.logger.debug(logging_text + step)
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1958 )
1959
1960 except Exception as e: # TODO not use Exception but N2VC exception
1961 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1962 if not isinstance(
1963 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1964 ):
1965 self.logger.error(
1966 "Exception while {} : {}".format(step, e), exc_info=True
1967 )
1968 self._write_configuration_status(
1969 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1970 )
1971 raise LcmException("{} {}".format(step, e)) from e
1972
1973 def _write_ns_status(
1974 self,
1975 nsr_id: str,
1976 ns_state: str,
1977 current_operation: str,
1978 current_operation_id: str,
1979 error_description: str = None,
1980 error_detail: str = None,
1981 other_update: dict = None,
1982 ):
1983 """
1984 Update db_nsr fields.
1985 :param nsr_id:
1986 :param ns_state:
1987 :param current_operation:
1988 :param current_operation_id:
1989 :param error_description:
1990 :param error_detail:
1991 :param other_update: Other required changes at database if provided, will be cleared
1992 :return:
1993 """
1994 try:
1995 db_dict = other_update or {}
1996 db_dict[
1997 "_admin.nslcmop"
1998 ] = current_operation_id # for backward compatibility
1999 db_dict["_admin.current-operation"] = current_operation_id
2000 db_dict["_admin.operation-type"] = (
2001 current_operation if current_operation != "IDLE" else None
2002 )
2003 db_dict["currentOperation"] = current_operation
2004 db_dict["currentOperationID"] = current_operation_id
2005 db_dict["errorDescription"] = error_description
2006 db_dict["errorDetail"] = error_detail
2007
2008 if ns_state:
2009 db_dict["nsState"] = ns_state
2010 self.update_db_2("nsrs", nsr_id, db_dict)
2011 except DbException as e:
2012 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2013
2014 def _write_op_status(
2015 self,
2016 op_id: str,
2017 stage: list = None,
2018 error_message: str = None,
2019 queuePosition: int = 0,
2020 operation_state: str = None,
2021 other_update: dict = None,
2022 ):
2023 try:
2024 db_dict = other_update or {}
2025 db_dict["queuePosition"] = queuePosition
2026 if isinstance(stage, list):
2027 db_dict["stage"] = stage[0]
2028 db_dict["detailed-status"] = " ".join(stage)
2029 elif stage is not None:
2030 db_dict["stage"] = str(stage)
2031
2032 if error_message is not None:
2033 db_dict["errorMessage"] = error_message
2034 if operation_state is not None:
2035 db_dict["operationState"] = operation_state
2036 db_dict["statusEnteredTime"] = time()
2037 self.update_db_2("nslcmops", op_id, db_dict)
2038 except DbException as e:
2039 self.logger.warn(
2040 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2041 )
2042
2043 def _write_all_config_status(self, db_nsr: dict, status: str):
2044 try:
2045 nsr_id = db_nsr["_id"]
2046 # configurationStatus
2047 config_status = db_nsr.get("configurationStatus")
2048 if config_status:
2049 db_nsr_update = {
2050 "configurationStatus.{}.status".format(index): status
2051 for index, v in enumerate(config_status)
2052 if v
2053 }
2054 # update status
2055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2056
2057 except DbException as e:
2058 self.logger.warn(
2059 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2060 )
2061
2062 def _write_configuration_status(
2063 self,
2064 nsr_id: str,
2065 vca_index: int,
2066 status: str = None,
2067 element_under_configuration: str = None,
2068 element_type: str = None,
2069 other_update: dict = None,
2070 ):
2071
2072 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2073 # .format(vca_index, status))
2074
2075 try:
2076 db_path = "configurationStatus.{}.".format(vca_index)
2077 db_dict = other_update or {}
2078 if status:
2079 db_dict[db_path + "status"] = status
2080 if element_under_configuration:
2081 db_dict[
2082 db_path + "elementUnderConfiguration"
2083 ] = element_under_configuration
2084 if element_type:
2085 db_dict[db_path + "elementType"] = element_type
2086 self.update_db_2("nsrs", nsr_id, db_dict)
2087 except DbException as e:
2088 self.logger.warn(
2089 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2090 status, nsr_id, vca_index, e
2091 )
2092 )
2093
2094 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2095 """
2096 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2097 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2098 Database is used because the result can be obtained from a different LCM worker in case of HA.
2099 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2100 :param db_nslcmop: database content of nslcmop
2101 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2102 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2103 computed 'vim-account-id'
2104 """
2105 modified = False
2106 nslcmop_id = db_nslcmop["_id"]
2107 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2108 if placement_engine == "PLA":
2109 self.logger.debug(
2110 logging_text + "Invoke and wait for placement optimization"
2111 )
2112 await self.msg.aiowrite(
2113 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2114 )
2115 db_poll_interval = 5
2116 wait = db_poll_interval * 10
2117 pla_result = None
2118 while not pla_result and wait >= 0:
2119 await asyncio.sleep(db_poll_interval)
2120 wait -= db_poll_interval
2121 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2122 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2123
2124 if not pla_result:
2125 raise LcmException(
2126 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2127 )
2128
2129 for pla_vnf in pla_result["vnf"]:
2130 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2131 if not pla_vnf.get("vimAccountId") or not vnfr:
2132 continue
2133 modified = True
2134 self.db.set_one(
2135 "vnfrs",
2136 {"_id": vnfr["_id"]},
2137 {"vim-account-id": pla_vnf["vimAccountId"]},
2138 )
2139 # Modifies db_vnfrs
2140 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2141 return modified
2142
2143 def update_nsrs_with_pla_result(self, params):
2144 try:
2145 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2146 self.update_db_2(
2147 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2148 )
2149 except Exception as e:
2150 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2151
2152 async def instantiate(self, nsr_id, nslcmop_id):
2153 """
2154
2155 :param nsr_id: ns instance to deploy
2156 :param nslcmop_id: operation to run
2157 :return:
2158 """
2159
2160 # Try to lock HA task here
2161 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2162 if not task_is_locked_by_me:
2163 self.logger.debug(
2164 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2165 )
2166 return
2167
2168 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2169 self.logger.debug(logging_text + "Enter")
2170
2171 # get all needed from database
2172
2173 # database nsrs record
2174 db_nsr = None
2175
2176 # database nslcmops record
2177 db_nslcmop = None
2178
2179 # update operation on nsrs
2180 db_nsr_update = {}
2181 # update operation on nslcmops
2182 db_nslcmop_update = {}
2183
2184 nslcmop_operation_state = None
2185 db_vnfrs = {} # vnf's info indexed by member-index
2186 # n2vc_info = {}
2187 tasks_dict_info = {} # from task to info text
2188 exc = None
2189 error_list = []
2190 stage = [
2191 "Stage 1/5: preparation of the environment.",
2192 "Waiting for previous operations to terminate.",
2193 "",
2194 ]
2195 # ^ stage, step, VIM progress
2196 try:
2197 # wait for any previous tasks in process
2198 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2199
2200 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2201 stage[1] = "Reading from database."
2202 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2203 db_nsr_update["detailed-status"] = "creating"
2204 db_nsr_update["operational-status"] = "init"
2205 self._write_ns_status(
2206 nsr_id=nsr_id,
2207 ns_state="BUILDING",
2208 current_operation="INSTANTIATING",
2209 current_operation_id=nslcmop_id,
2210 other_update=db_nsr_update,
2211 )
2212 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2213
2214 # read from db: operation
2215 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2216 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2217 ns_params = db_nslcmop.get("operationParams")
2218 if ns_params and ns_params.get("timeout_ns_deploy"):
2219 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2220 else:
2221 timeout_ns_deploy = self.timeout.get(
2222 "ns_deploy", self.timeout_ns_deploy
2223 )
2224
2225 # read from db: ns
2226 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2228 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2229 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2230 self.fs.sync(db_nsr["nsd-id"])
2231 db_nsr["nsd"] = nsd
2232 # nsr_name = db_nsr["name"] # TODO short-name??
2233
2234 # read from db: vnf's of this ns
2235 stage[1] = "Getting vnfrs from db."
2236 self.logger.debug(logging_text + stage[1])
2237 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2238
2239 # read from db: vnfd's for every vnf
2240 db_vnfds = [] # every vnfd data
2241
2242 # for each vnf in ns, read vnfd
2243 for vnfr in db_vnfrs_list:
2244 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2245 vnfd_id = vnfr["vnfd-id"]
2246 vnfd_ref = vnfr["vnfd-ref"]
2247 self.fs.sync(vnfd_id)
2248
2249 # if we haven't this vnfd, read it from db
2250 if vnfd_id not in db_vnfds:
2251 # read from db
2252 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2253 vnfd_id, vnfd_ref
2254 )
2255 self.logger.debug(logging_text + stage[1])
2256 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2257
2258 # store vnfd
2259 db_vnfds.append(vnfd)
2260
2261 # Get or generates the _admin.deployed.VCA list
2262 vca_deployed_list = None
2263 if db_nsr["_admin"].get("deployed"):
2264 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2265 if vca_deployed_list is None:
2266 vca_deployed_list = []
2267 configuration_status_list = []
2268 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2269 db_nsr_update["configurationStatus"] = configuration_status_list
2270 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2271 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2272 elif isinstance(vca_deployed_list, dict):
2273 # maintain backward compatibility. Change a dict to list at database
2274 vca_deployed_list = list(vca_deployed_list.values())
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2277
2278 if not isinstance(
2279 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2280 ):
2281 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2282 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2283
2284 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2285 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2286 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2287 self.db.set_list(
2288 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2289 )
2290
2291 # n2vc_redesign STEP 2 Deploy Network Scenario
2292 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2293 self._write_op_status(op_id=nslcmop_id, stage=stage)
2294
2295 stage[1] = "Deploying KDUs."
2296 # self.logger.debug(logging_text + "Before deploy_kdus")
2297 # Call to deploy_kdus in case exists the "vdu:kdu" param
2298 await self.deploy_kdus(
2299 logging_text=logging_text,
2300 nsr_id=nsr_id,
2301 nslcmop_id=nslcmop_id,
2302 db_vnfrs=db_vnfrs,
2303 db_vnfds=db_vnfds,
2304 task_instantiation_info=tasks_dict_info,
2305 )
2306
2307 stage[1] = "Getting VCA public key."
2308 # n2vc_redesign STEP 1 Get VCA public ssh-key
2309 # feature 1429. Add n2vc public key to needed VMs
2310 n2vc_key = self.n2vc.get_public_key()
2311 n2vc_key_list = [n2vc_key]
2312 if self.vca_config.get("public_key"):
2313 n2vc_key_list.append(self.vca_config["public_key"])
2314
2315 stage[1] = "Deploying NS at VIM."
2316 task_ro = asyncio.ensure_future(
2317 self.instantiate_RO(
2318 logging_text=logging_text,
2319 nsr_id=nsr_id,
2320 nsd=nsd,
2321 db_nsr=db_nsr,
2322 db_nslcmop=db_nslcmop,
2323 db_vnfrs=db_vnfrs,
2324 db_vnfds=db_vnfds,
2325 n2vc_key_list=n2vc_key_list,
2326 stage=stage,
2327 )
2328 )
2329 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2330 tasks_dict_info[task_ro] = "Deploying at VIM"
2331
2332 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2333 stage[1] = "Deploying Execution Environments."
2334 self.logger.debug(logging_text + stage[1])
2335
2336 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2337 for vnf_profile in get_vnf_profiles(nsd):
2338 vnfd_id = vnf_profile["vnfd-id"]
2339 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2340 member_vnf_index = str(vnf_profile["id"])
2341 db_vnfr = db_vnfrs[member_vnf_index]
2342 base_folder = vnfd["_admin"]["storage"]
2343 vdu_id = None
2344 vdu_index = 0
2345 vdu_name = None
2346 kdu_name = None
2347
2348 # Get additional parameters
2349 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2350 if db_vnfr.get("additionalParamsForVnf"):
2351 deploy_params.update(
2352 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2353 )
2354
2355 descriptor_config = get_configuration(vnfd, vnfd["id"])
2356 if descriptor_config:
2357 self._deploy_n2vc(
2358 logging_text=logging_text
2359 + "member_vnf_index={} ".format(member_vnf_index),
2360 db_nsr=db_nsr,
2361 db_vnfr=db_vnfr,
2362 nslcmop_id=nslcmop_id,
2363 nsr_id=nsr_id,
2364 nsi_id=nsi_id,
2365 vnfd_id=vnfd_id,
2366 vdu_id=vdu_id,
2367 kdu_name=kdu_name,
2368 member_vnf_index=member_vnf_index,
2369 vdu_index=vdu_index,
2370 vdu_name=vdu_name,
2371 deploy_params=deploy_params,
2372 descriptor_config=descriptor_config,
2373 base_folder=base_folder,
2374 task_instantiation_info=tasks_dict_info,
2375 stage=stage,
2376 )
2377
2378 # Deploy charms for each VDU that supports one.
2379 for vdud in get_vdu_list(vnfd):
2380 vdu_id = vdud["id"]
2381 descriptor_config = get_configuration(vnfd, vdu_id)
2382 vdur = find_in_list(
2383 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2384 )
2385
2386 if vdur.get("additionalParams"):
2387 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2388 else:
2389 deploy_params_vdu = deploy_params
2390 deploy_params_vdu["OSM"] = get_osm_params(
2391 db_vnfr, vdu_id, vdu_count_index=0
2392 )
2393 vdud_count = get_number_of_instances(vnfd, vdu_id)
2394
2395 self.logger.debug("VDUD > {}".format(vdud))
2396 self.logger.debug(
2397 "Descriptor config > {}".format(descriptor_config)
2398 )
2399 if descriptor_config:
2400 vdu_name = None
2401 kdu_name = None
2402 for vdu_index in range(vdud_count):
2403 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2404 self._deploy_n2vc(
2405 logging_text=logging_text
2406 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2407 member_vnf_index, vdu_id, vdu_index
2408 ),
2409 db_nsr=db_nsr,
2410 db_vnfr=db_vnfr,
2411 nslcmop_id=nslcmop_id,
2412 nsr_id=nsr_id,
2413 nsi_id=nsi_id,
2414 vnfd_id=vnfd_id,
2415 vdu_id=vdu_id,
2416 kdu_name=kdu_name,
2417 member_vnf_index=member_vnf_index,
2418 vdu_index=vdu_index,
2419 vdu_name=vdu_name,
2420 deploy_params=deploy_params_vdu,
2421 descriptor_config=descriptor_config,
2422 base_folder=base_folder,
2423 task_instantiation_info=tasks_dict_info,
2424 stage=stage,
2425 )
2426 for kdud in get_kdu_list(vnfd):
2427 kdu_name = kdud["name"]
2428 descriptor_config = get_configuration(vnfd, kdu_name)
2429 if descriptor_config:
2430 vdu_id = None
2431 vdu_index = 0
2432 vdu_name = None
2433 kdur = next(
2434 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2435 )
2436 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2437 if kdur.get("additionalParams"):
2438 deploy_params_kdu = parse_yaml_strings(
2439 kdur["additionalParams"]
2440 )
2441
2442 self._deploy_n2vc(
2443 logging_text=logging_text,
2444 db_nsr=db_nsr,
2445 db_vnfr=db_vnfr,
2446 nslcmop_id=nslcmop_id,
2447 nsr_id=nsr_id,
2448 nsi_id=nsi_id,
2449 vnfd_id=vnfd_id,
2450 vdu_id=vdu_id,
2451 kdu_name=kdu_name,
2452 member_vnf_index=member_vnf_index,
2453 vdu_index=vdu_index,
2454 vdu_name=vdu_name,
2455 deploy_params=deploy_params_kdu,
2456 descriptor_config=descriptor_config,
2457 base_folder=base_folder,
2458 task_instantiation_info=tasks_dict_info,
2459 stage=stage,
2460 )
2461
2462 # Check if this NS has a charm configuration
2463 descriptor_config = nsd.get("ns-configuration")
2464 if descriptor_config and descriptor_config.get("juju"):
2465 vnfd_id = None
2466 db_vnfr = None
2467 member_vnf_index = None
2468 vdu_id = None
2469 kdu_name = None
2470 vdu_index = 0
2471 vdu_name = None
2472
2473 # Get additional parameters
2474 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2475 if db_nsr.get("additionalParamsForNs"):
2476 deploy_params.update(
2477 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2478 )
2479 base_folder = nsd["_admin"]["storage"]
2480 self._deploy_n2vc(
2481 logging_text=logging_text,
2482 db_nsr=db_nsr,
2483 db_vnfr=db_vnfr,
2484 nslcmop_id=nslcmop_id,
2485 nsr_id=nsr_id,
2486 nsi_id=nsi_id,
2487 vnfd_id=vnfd_id,
2488 vdu_id=vdu_id,
2489 kdu_name=kdu_name,
2490 member_vnf_index=member_vnf_index,
2491 vdu_index=vdu_index,
2492 vdu_name=vdu_name,
2493 deploy_params=deploy_params,
2494 descriptor_config=descriptor_config,
2495 base_folder=base_folder,
2496 task_instantiation_info=tasks_dict_info,
2497 stage=stage,
2498 )
2499
2500 # rest of staff will be done at finally
2501
2502 except (
2503 ROclient.ROClientException,
2504 DbException,
2505 LcmException,
2506 N2VCException,
2507 ) as e:
2508 self.logger.error(
2509 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2510 )
2511 exc = e
2512 except asyncio.CancelledError:
2513 self.logger.error(
2514 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2515 )
2516 exc = "Operation was cancelled"
2517 except Exception as e:
2518 exc = traceback.format_exc()
2519 self.logger.critical(
2520 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2521 exc_info=True,
2522 )
2523 finally:
2524 if exc:
2525 error_list.append(str(exc))
2526 try:
2527 # wait for pending tasks
2528 if tasks_dict_info:
2529 stage[1] = "Waiting for instantiate pending tasks."
2530 self.logger.debug(logging_text + stage[1])
2531 error_list += await self._wait_for_tasks(
2532 logging_text,
2533 tasks_dict_info,
2534 timeout_ns_deploy,
2535 stage,
2536 nslcmop_id,
2537 nsr_id=nsr_id,
2538 )
2539 stage[1] = stage[2] = ""
2540 except asyncio.CancelledError:
2541 error_list.append("Cancelled")
2542 # TODO cancel all tasks
2543 except Exception as exc:
2544 error_list.append(str(exc))
2545
2546 # update operation-status
2547 db_nsr_update["operational-status"] = "running"
2548 # let's begin with VCA 'configured' status (later we can change it)
2549 db_nsr_update["config-status"] = "configured"
2550 for task, task_name in tasks_dict_info.items():
2551 if not task.done() or task.cancelled() or task.exception():
2552 if task_name.startswith(self.task_name_deploy_vca):
2553 # A N2VC task is pending
2554 db_nsr_update["config-status"] = "failed"
2555 else:
2556 # RO or KDU task is pending
2557 db_nsr_update["operational-status"] = "failed"
2558
2559 # update status at database
2560 if error_list:
2561 error_detail = ". ".join(error_list)
2562 self.logger.error(logging_text + error_detail)
2563 error_description_nslcmop = "{} Detail: {}".format(
2564 stage[0], error_detail
2565 )
2566 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2567 nslcmop_id, stage[0]
2568 )
2569
2570 db_nsr_update["detailed-status"] = (
2571 error_description_nsr + " Detail: " + error_detail
2572 )
2573 db_nslcmop_update["detailed-status"] = error_detail
2574 nslcmop_operation_state = "FAILED"
2575 ns_state = "BROKEN"
2576 else:
2577 error_detail = None
2578 error_description_nsr = error_description_nslcmop = None
2579 ns_state = "READY"
2580 db_nsr_update["detailed-status"] = "Done"
2581 db_nslcmop_update["detailed-status"] = "Done"
2582 nslcmop_operation_state = "COMPLETED"
2583
2584 if db_nsr:
2585 self._write_ns_status(
2586 nsr_id=nsr_id,
2587 ns_state=ns_state,
2588 current_operation="IDLE",
2589 current_operation_id=None,
2590 error_description=error_description_nsr,
2591 error_detail=error_detail,
2592 other_update=db_nsr_update,
2593 )
2594 self._write_op_status(
2595 op_id=nslcmop_id,
2596 stage="",
2597 error_message=error_description_nslcmop,
2598 operation_state=nslcmop_operation_state,
2599 other_update=db_nslcmop_update,
2600 )
2601
2602 if nslcmop_operation_state:
2603 try:
2604 await self.msg.aiowrite(
2605 "ns",
2606 "instantiated",
2607 {
2608 "nsr_id": nsr_id,
2609 "nslcmop_id": nslcmop_id,
2610 "operationState": nslcmop_operation_state,
2611 },
2612 loop=self.loop,
2613 )
2614 except Exception as e:
2615 self.logger.error(
2616 logging_text + "kafka_write notification Exception {}".format(e)
2617 )
2618
2619 self.logger.debug(logging_text + "Exit")
2620 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2621
2622 async def _add_vca_relations(
2623 self,
2624 logging_text,
2625 nsr_id,
2626 vca_index: int,
2627 timeout: int = 3600,
2628 vca_type: str = None,
2629 vca_id: str = None,
2630 ) -> bool:
2631
2632 # steps:
2633 # 1. find all relations for this VCA
2634 # 2. wait for other peers related
2635 # 3. add relations
2636
2637 try:
2638 vca_type = vca_type or "lxc_proxy_charm"
2639
2640 # STEP 1: find all relations for this VCA
2641
2642 # read nsr record
2643 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2644 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2645
2646 # this VCA data
2647 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2648
2649 # read all ns-configuration relations
2650 ns_relations = list()
2651 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2652 if db_ns_relations:
2653 for r in db_ns_relations:
2654 # check if this VCA is in the relation
2655 if my_vca.get("member-vnf-index") in (
2656 r.get("entities")[0].get("id"),
2657 r.get("entities")[1].get("id"),
2658 ):
2659 ns_relations.append(r)
2660
2661 # read all vnf-configuration relations
2662 vnf_relations = list()
2663 db_vnfd_list = db_nsr.get("vnfd-id")
2664 if db_vnfd_list:
2665 for vnfd in db_vnfd_list:
2666 db_vnf_relations = None
2667 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2668 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2669 if db_vnf_configuration:
2670 db_vnf_relations = db_vnf_configuration.get("relation", [])
2671 if db_vnf_relations:
2672 for r in db_vnf_relations:
2673 # check if this VCA is in the relation
2674 if my_vca.get("vdu_id") in (
2675 r.get("entities")[0].get("id"),
2676 r.get("entities")[1].get("id"),
2677 ):
2678 vnf_relations.append(r)
2679
2680 # if no relations, terminate
2681 if not ns_relations and not vnf_relations:
2682 self.logger.debug(logging_text + " No relations")
2683 return True
2684
2685 self.logger.debug(
2686 logging_text
2687 + " adding relations\n {}\n {}".format(
2688 ns_relations, vnf_relations
2689 )
2690 )
2691
2692 # add all relations
2693 start = time()
2694 while True:
2695 # check timeout
2696 now = time()
2697 if now - start >= timeout:
2698 self.logger.error(logging_text + " : timeout adding relations")
2699 return False
2700
2701 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2702 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2703
2704 # for each defined NS relation, find the VCA's related
2705 for r in ns_relations.copy():
2706 from_vca_ee_id = None
2707 to_vca_ee_id = None
2708 from_vca_endpoint = None
2709 to_vca_endpoint = None
2710 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2711 for vca in vca_list:
2712 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2713 "id"
2714 ) and vca.get("config_sw_installed"):
2715 from_vca_ee_id = vca.get("ee_id")
2716 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2717 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2718 "id"
2719 ) and vca.get("config_sw_installed"):
2720 to_vca_ee_id = vca.get("ee_id")
2721 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2722 if from_vca_ee_id and to_vca_ee_id:
2723 # add relation
2724 await self.vca_map[vca_type].add_relation(
2725 ee_id_1=from_vca_ee_id,
2726 ee_id_2=to_vca_ee_id,
2727 endpoint_1=from_vca_endpoint,
2728 endpoint_2=to_vca_endpoint,
2729 vca_id=vca_id,
2730 )
2731 # remove entry from relations list
2732 ns_relations.remove(r)
2733 else:
2734 # check failed peers
2735 try:
2736 vca_status_list = db_nsr.get("configurationStatus")
2737 if vca_status_list:
2738 for i in range(len(vca_list)):
2739 vca = vca_list[i]
2740 vca_status = vca_status_list[i]
2741 if vca.get("member-vnf-index") == r.get("entities")[
2742 0
2743 ].get("id"):
2744 if vca_status.get("status") == "BROKEN":
2745 # peer broken: remove relation from list
2746 ns_relations.remove(r)
2747 if vca.get("member-vnf-index") == r.get("entities")[
2748 1
2749 ].get("id"):
2750 if vca_status.get("status") == "BROKEN":
2751 # peer broken: remove relation from list
2752 ns_relations.remove(r)
2753 except Exception:
2754 # ignore
2755 pass
2756
2757 # for each defined VNF relation, find the VCA's related
2758 for r in vnf_relations.copy():
2759 from_vca_ee_id = None
2760 to_vca_ee_id = None
2761 from_vca_endpoint = None
2762 to_vca_endpoint = None
2763 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2764 for vca in vca_list:
2765 key_to_check = "vdu_id"
2766 if vca.get("vdu_id") is None:
2767 key_to_check = "vnfd_id"
2768 if vca.get(key_to_check) == r.get("entities")[0].get(
2769 "id"
2770 ) and vca.get("config_sw_installed"):
2771 from_vca_ee_id = vca.get("ee_id")
2772 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2773 if vca.get(key_to_check) == r.get("entities")[1].get(
2774 "id"
2775 ) and vca.get("config_sw_installed"):
2776 to_vca_ee_id = vca.get("ee_id")
2777 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2778 if from_vca_ee_id and to_vca_ee_id:
2779 # add relation
2780 await self.vca_map[vca_type].add_relation(
2781 ee_id_1=from_vca_ee_id,
2782 ee_id_2=to_vca_ee_id,
2783 endpoint_1=from_vca_endpoint,
2784 endpoint_2=to_vca_endpoint,
2785 vca_id=vca_id,
2786 )
2787 # remove entry from relations list
2788 vnf_relations.remove(r)
2789 else:
2790 # check failed peers
2791 try:
2792 vca_status_list = db_nsr.get("configurationStatus")
2793 if vca_status_list:
2794 for i in range(len(vca_list)):
2795 vca = vca_list[i]
2796 vca_status = vca_status_list[i]
2797 if vca.get("vdu_id") == r.get("entities")[0].get(
2798 "id"
2799 ):
2800 if vca_status.get("status") == "BROKEN":
2801 # peer broken: remove relation from list
2802 vnf_relations.remove(r)
2803 if vca.get("vdu_id") == r.get("entities")[1].get(
2804 "id"
2805 ):
2806 if vca_status.get("status") == "BROKEN":
2807 # peer broken: remove relation from list
2808 vnf_relations.remove(r)
2809 except Exception:
2810 # ignore
2811 pass
2812
2813 # wait for next try
2814 await asyncio.sleep(5.0)
2815
2816 if not ns_relations and not vnf_relations:
2817 self.logger.debug("Relations added")
2818 break
2819
2820 return True
2821
2822 except Exception as e:
2823 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2824 return False
2825
2826 async def _install_kdu(
2827 self,
2828 nsr_id: str,
2829 nsr_db_path: str,
2830 vnfr_data: dict,
2831 kdu_index: int,
2832 kdud: dict,
2833 vnfd: dict,
2834 k8s_instance_info: dict,
2835 k8params: dict = None,
2836 timeout: int = 600,
2837 vca_id: str = None,
2838 ):
2839
2840 try:
2841 k8sclustertype = k8s_instance_info["k8scluster-type"]
2842 # Instantiate kdu
2843 db_dict_install = {
2844 "collection": "nsrs",
2845 "filter": {"_id": nsr_id},
2846 "path": nsr_db_path,
2847 }
2848
2849 if k8s_instance_info.get("kdu-deployment-name"):
2850 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
2851 else:
2852 kdu_instance = self.k8scluster_map[
2853 k8sclustertype
2854 ].generate_kdu_instance_name(
2855 db_dict=db_dict_install,
2856 kdu_model=k8s_instance_info["kdu-model"],
2857 kdu_name=k8s_instance_info["kdu-name"],
2858 )
2859 self.update_db_2(
2860 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2861 )
2862 await self.k8scluster_map[k8sclustertype].install(
2863 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2864 kdu_model=k8s_instance_info["kdu-model"],
2865 atomic=True,
2866 params=k8params,
2867 db_dict=db_dict_install,
2868 timeout=timeout,
2869 kdu_name=k8s_instance_info["kdu-name"],
2870 namespace=k8s_instance_info["namespace"],
2871 kdu_instance=kdu_instance,
2872 vca_id=vca_id,
2873 )
2874 self.update_db_2(
2875 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2876 )
2877
2878 # Obtain services to obtain management service ip
2879 services = await self.k8scluster_map[k8sclustertype].get_services(
2880 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2881 kdu_instance=kdu_instance,
2882 namespace=k8s_instance_info["namespace"],
2883 )
2884
2885 # Obtain management service info (if exists)
2886 vnfr_update_dict = {}
2887 kdu_config = get_configuration(vnfd, kdud["name"])
2888 if kdu_config:
2889 target_ee_list = kdu_config.get("execution-environment-list", [])
2890 else:
2891 target_ee_list = []
2892
2893 if services:
2894 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2895 mgmt_services = [
2896 service
2897 for service in kdud.get("service", [])
2898 if service.get("mgmt-service")
2899 ]
2900 for mgmt_service in mgmt_services:
2901 for service in services:
2902 if service["name"].startswith(mgmt_service["name"]):
2903 # Mgmt service found, Obtain service ip
2904 ip = service.get("external_ip", service.get("cluster_ip"))
2905 if isinstance(ip, list) and len(ip) == 1:
2906 ip = ip[0]
2907
2908 vnfr_update_dict[
2909 "kdur.{}.ip-address".format(kdu_index)
2910 ] = ip
2911
2912 # Check if must update also mgmt ip at the vnf
2913 service_external_cp = mgmt_service.get(
2914 "external-connection-point-ref"
2915 )
2916 if service_external_cp:
2917 if (
2918 deep_get(vnfd, ("mgmt-interface", "cp"))
2919 == service_external_cp
2920 ):
2921 vnfr_update_dict["ip-address"] = ip
2922
2923 if find_in_list(
2924 target_ee_list,
2925 lambda ee: ee.get(
2926 "external-connection-point-ref", ""
2927 )
2928 == service_external_cp,
2929 ):
2930 vnfr_update_dict[
2931 "kdur.{}.ip-address".format(kdu_index)
2932 ] = ip
2933 break
2934 else:
2935 self.logger.warn(
2936 "Mgmt service name: {} not found".format(
2937 mgmt_service["name"]
2938 )
2939 )
2940
2941 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2942 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2943
2944 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2945 if (
2946 kdu_config
2947 and kdu_config.get("initial-config-primitive")
2948 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2949 ):
2950 initial_config_primitive_list = kdu_config.get(
2951 "initial-config-primitive"
2952 )
2953 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2954
2955 for initial_config_primitive in initial_config_primitive_list:
2956 primitive_params_ = self._map_primitive_params(
2957 initial_config_primitive, {}, {}
2958 )
2959
2960 await asyncio.wait_for(
2961 self.k8scluster_map[k8sclustertype].exec_primitive(
2962 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2963 kdu_instance=kdu_instance,
2964 primitive_name=initial_config_primitive["name"],
2965 params=primitive_params_,
2966 db_dict=db_dict_install,
2967 vca_id=vca_id,
2968 ),
2969 timeout=timeout,
2970 )
2971
2972 except Exception as e:
2973 # Prepare update db with error and raise exception
2974 try:
2975 self.update_db_2(
2976 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2977 )
2978 self.update_db_2(
2979 "vnfrs",
2980 vnfr_data.get("_id"),
2981 {"kdur.{}.status".format(kdu_index): "ERROR"},
2982 )
2983 except Exception:
2984 # ignore to keep original exception
2985 pass
2986 # reraise original error
2987 raise
2988
2989 return kdu_instance
2990
2991 async def deploy_kdus(
2992 self,
2993 logging_text,
2994 nsr_id,
2995 nslcmop_id,
2996 db_vnfrs,
2997 db_vnfds,
2998 task_instantiation_info,
2999 ):
3000 # Launch kdus if present in the descriptor
3001
3002 k8scluster_id_2_uuic = {
3003 "helm-chart-v3": {},
3004 "helm-chart": {},
3005 "juju-bundle": {},
3006 }
3007
3008 async def _get_cluster_id(cluster_id, cluster_type):
3009 nonlocal k8scluster_id_2_uuic
3010 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3011 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3012
3013 # check if K8scluster is creating and wait look if previous tasks in process
3014 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3015 "k8scluster", cluster_id
3016 )
3017 if task_dependency:
3018 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3019 task_name, cluster_id
3020 )
3021 self.logger.debug(logging_text + text)
3022 await asyncio.wait(task_dependency, timeout=3600)
3023
3024 db_k8scluster = self.db.get_one(
3025 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3026 )
3027 if not db_k8scluster:
3028 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3029
3030 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3031 if not k8s_id:
3032 if cluster_type == "helm-chart-v3":
3033 try:
3034 # backward compatibility for existing clusters that have not been initialized for helm v3
3035 k8s_credentials = yaml.safe_dump(
3036 db_k8scluster.get("credentials")
3037 )
3038 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3039 k8s_credentials, reuse_cluster_uuid=cluster_id
3040 )
3041 db_k8scluster_update = {}
3042 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3043 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3044 db_k8scluster_update[
3045 "_admin.helm-chart-v3.created"
3046 ] = uninstall_sw
3047 db_k8scluster_update[
3048 "_admin.helm-chart-v3.operationalState"
3049 ] = "ENABLED"
3050 self.update_db_2(
3051 "k8sclusters", cluster_id, db_k8scluster_update
3052 )
3053 except Exception as e:
3054 self.logger.error(
3055 logging_text
3056 + "error initializing helm-v3 cluster: {}".format(str(e))
3057 )
3058 raise LcmException(
3059 "K8s cluster '{}' has not been initialized for '{}'".format(
3060 cluster_id, cluster_type
3061 )
3062 )
3063 else:
3064 raise LcmException(
3065 "K8s cluster '{}' has not been initialized for '{}'".format(
3066 cluster_id, cluster_type
3067 )
3068 )
3069 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3070 return k8s_id
3071
3072 logging_text += "Deploy kdus: "
3073 step = ""
3074 try:
3075 db_nsr_update = {"_admin.deployed.K8s": []}
3076 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3077
3078 index = 0
3079 updated_cluster_list = []
3080 updated_v3_cluster_list = []
3081
3082 for vnfr_data in db_vnfrs.values():
3083 vca_id = self.get_vca_id(vnfr_data, {})
3084 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3085 # Step 0: Prepare and set parameters
3086 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3087 vnfd_id = vnfr_data.get("vnfd-id")
3088 vnfd_with_id = find_in_list(
3089 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3090 )
3091 kdud = next(
3092 kdud
3093 for kdud in vnfd_with_id["kdu"]
3094 if kdud["name"] == kdur["kdu-name"]
3095 )
3096 namespace = kdur.get("k8s-namespace")
3097 kdu_deployment_name = kdur.get("kdu-deployment-name")
3098 if kdur.get("helm-chart"):
3099 kdumodel = kdur["helm-chart"]
3100 # Default version: helm3, if helm-version is v2 assign v2
3101 k8sclustertype = "helm-chart-v3"
3102 self.logger.debug("kdur: {}".format(kdur))
3103 if (
3104 kdur.get("helm-version")
3105 and kdur.get("helm-version") == "v2"
3106 ):
3107 k8sclustertype = "helm-chart"
3108 elif kdur.get("juju-bundle"):
3109 kdumodel = kdur["juju-bundle"]
3110 k8sclustertype = "juju-bundle"
3111 else:
3112 raise LcmException(
3113 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3114 "juju-bundle. Maybe an old NBI version is running".format(
3115 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3116 )
3117 )
3118 # check if kdumodel is a file and exists
3119 try:
3120 vnfd_with_id = find_in_list(
3121 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3122 )
3123 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3124 if storage and storage.get(
3125 "pkg-dir"
3126 ): # may be not present if vnfd has not artifacts
3127 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3128 filename = "{}/{}/{}s/{}".format(
3129 storage["folder"],
3130 storage["pkg-dir"],
3131 k8sclustertype,
3132 kdumodel,
3133 )
3134 if self.fs.file_exists(
3135 filename, mode="file"
3136 ) or self.fs.file_exists(filename, mode="dir"):
3137 kdumodel = self.fs.path + filename
3138 except (asyncio.TimeoutError, asyncio.CancelledError):
3139 raise
3140 except Exception: # it is not a file
3141 pass
3142
3143 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3144 step = "Synchronize repos for k8s cluster '{}'".format(
3145 k8s_cluster_id
3146 )
3147 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3148
3149 # Synchronize repos
3150 if (
3151 k8sclustertype == "helm-chart"
3152 and cluster_uuid not in updated_cluster_list
3153 ) or (
3154 k8sclustertype == "helm-chart-v3"
3155 and cluster_uuid not in updated_v3_cluster_list
3156 ):
3157 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3158 self.k8scluster_map[k8sclustertype].synchronize_repos(
3159 cluster_uuid=cluster_uuid
3160 )
3161 )
3162 if del_repo_list or added_repo_dict:
3163 if k8sclustertype == "helm-chart":
3164 unset = {
3165 "_admin.helm_charts_added." + item: None
3166 for item in del_repo_list
3167 }
3168 updated = {
3169 "_admin.helm_charts_added." + item: name
3170 for item, name in added_repo_dict.items()
3171 }
3172 updated_cluster_list.append(cluster_uuid)
3173 elif k8sclustertype == "helm-chart-v3":
3174 unset = {
3175 "_admin.helm_charts_v3_added." + item: None
3176 for item in del_repo_list
3177 }
3178 updated = {
3179 "_admin.helm_charts_v3_added." + item: name
3180 for item, name in added_repo_dict.items()
3181 }
3182 updated_v3_cluster_list.append(cluster_uuid)
3183 self.logger.debug(
3184 logging_text + "repos synchronized on k8s cluster "
3185 "'{}' to_delete: {}, to_add: {}".format(
3186 k8s_cluster_id, del_repo_list, added_repo_dict
3187 )
3188 )
3189 self.db.set_one(
3190 "k8sclusters",
3191 {"_id": k8s_cluster_id},
3192 updated,
3193 unset=unset,
3194 )
3195
3196 # Instantiate kdu
3197 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3198 vnfr_data["member-vnf-index-ref"],
3199 kdur["kdu-name"],
3200 k8s_cluster_id,
3201 )
3202 k8s_instance_info = {
3203 "kdu-instance": None,
3204 "k8scluster-uuid": cluster_uuid,
3205 "k8scluster-type": k8sclustertype,
3206 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3207 "kdu-name": kdur["kdu-name"],
3208 "kdu-model": kdumodel,
3209 "namespace": namespace,
3210 "kdu-deployment-name": kdu_deployment_name,
3211 }
3212 db_path = "_admin.deployed.K8s.{}".format(index)
3213 db_nsr_update[db_path] = k8s_instance_info
3214 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3215 vnfd_with_id = find_in_list(
3216 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3217 )
3218 task = asyncio.ensure_future(
3219 self._install_kdu(
3220 nsr_id,
3221 db_path,
3222 vnfr_data,
3223 kdu_index,
3224 kdud,
3225 vnfd_with_id,
3226 k8s_instance_info,
3227 k8params=desc_params,
3228 timeout=600,
3229 vca_id=vca_id,
3230 )
3231 )
3232 self.lcm_tasks.register(
3233 "ns",
3234 nsr_id,
3235 nslcmop_id,
3236 "instantiate_KDU-{}".format(index),
3237 task,
3238 )
3239 task_instantiation_info[task] = "Deploying KDU {}".format(
3240 kdur["kdu-name"]
3241 )
3242
3243 index += 1
3244
3245 except (LcmException, asyncio.CancelledError):
3246 raise
3247 except Exception as e:
3248 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3249 if isinstance(e, (N2VCException, DbException)):
3250 self.logger.error(logging_text + msg)
3251 else:
3252 self.logger.critical(logging_text + msg, exc_info=True)
3253 raise LcmException(msg)
3254 finally:
3255 if db_nsr_update:
3256 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3257
3258 def _deploy_n2vc(
3259 self,
3260 logging_text,
3261 db_nsr,
3262 db_vnfr,
3263 nslcmop_id,
3264 nsr_id,
3265 nsi_id,
3266 vnfd_id,
3267 vdu_id,
3268 kdu_name,
3269 member_vnf_index,
3270 vdu_index,
3271 vdu_name,
3272 deploy_params,
3273 descriptor_config,
3274 base_folder,
3275 task_instantiation_info,
3276 stage,
3277 ):
3278 # launch instantiate_N2VC in a asyncio task and register task object
3279 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3280 # if not found, create one entry and update database
3281 # fill db_nsr._admin.deployed.VCA.<index>
3282
3283 self.logger.debug(
3284 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3285 )
3286 if "execution-environment-list" in descriptor_config:
3287 ee_list = descriptor_config.get("execution-environment-list", [])
3288 elif "juju" in descriptor_config:
3289 ee_list = [descriptor_config] # ns charms
3290 else: # other types as script are not supported
3291 ee_list = []
3292
3293 for ee_item in ee_list:
3294 self.logger.debug(
3295 logging_text
3296 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3297 ee_item.get("juju"), ee_item.get("helm-chart")
3298 )
3299 )
3300 ee_descriptor_id = ee_item.get("id")
3301 if ee_item.get("juju"):
3302 vca_name = ee_item["juju"].get("charm")
3303 vca_type = (
3304 "lxc_proxy_charm"
3305 if ee_item["juju"].get("charm") is not None
3306 else "native_charm"
3307 )
3308 if ee_item["juju"].get("cloud") == "k8s":
3309 vca_type = "k8s_proxy_charm"
3310 elif ee_item["juju"].get("proxy") is False:
3311 vca_type = "native_charm"
3312 elif ee_item.get("helm-chart"):
3313 vca_name = ee_item["helm-chart"]
3314 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3315 vca_type = "helm"
3316 else:
3317 vca_type = "helm-v3"
3318 else:
3319 self.logger.debug(
3320 logging_text + "skipping non juju neither charm configuration"
3321 )
3322 continue
3323
3324 vca_index = -1
3325 for vca_index, vca_deployed in enumerate(
3326 db_nsr["_admin"]["deployed"]["VCA"]
3327 ):
3328 if not vca_deployed:
3329 continue
3330 if (
3331 vca_deployed.get("member-vnf-index") == member_vnf_index
3332 and vca_deployed.get("vdu_id") == vdu_id
3333 and vca_deployed.get("kdu_name") == kdu_name
3334 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3335 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3336 ):
3337 break
3338 else:
3339 # not found, create one.
3340 target = (
3341 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3342 )
3343 if vdu_id:
3344 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3345 elif kdu_name:
3346 target += "/kdu/{}".format(kdu_name)
3347 vca_deployed = {
3348 "target_element": target,
3349 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3350 "member-vnf-index": member_vnf_index,
3351 "vdu_id": vdu_id,
3352 "kdu_name": kdu_name,
3353 "vdu_count_index": vdu_index,
3354 "operational-status": "init", # TODO revise
3355 "detailed-status": "", # TODO revise
3356 "step": "initial-deploy", # TODO revise
3357 "vnfd_id": vnfd_id,
3358 "vdu_name": vdu_name,
3359 "type": vca_type,
3360 "ee_descriptor_id": ee_descriptor_id,
3361 }
3362 vca_index += 1
3363
3364 # create VCA and configurationStatus in db
3365 db_dict = {
3366 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3367 "configurationStatus.{}".format(vca_index): dict(),
3368 }
3369 self.update_db_2("nsrs", nsr_id, db_dict)
3370
3371 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3372
3373 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3374 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3375 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3376
3377 # Launch task
3378 task_n2vc = asyncio.ensure_future(
3379 self.instantiate_N2VC(
3380 logging_text=logging_text,
3381 vca_index=vca_index,
3382 nsi_id=nsi_id,
3383 db_nsr=db_nsr,
3384 db_vnfr=db_vnfr,
3385 vdu_id=vdu_id,
3386 kdu_name=kdu_name,
3387 vdu_index=vdu_index,
3388 deploy_params=deploy_params,
3389 config_descriptor=descriptor_config,
3390 base_folder=base_folder,
3391 nslcmop_id=nslcmop_id,
3392 stage=stage,
3393 vca_type=vca_type,
3394 vca_name=vca_name,
3395 ee_config_descriptor=ee_item,
3396 )
3397 )
3398 self.lcm_tasks.register(
3399 "ns",
3400 nsr_id,
3401 nslcmop_id,
3402 "instantiate_N2VC-{}".format(vca_index),
3403 task_n2vc,
3404 )
3405 task_instantiation_info[
3406 task_n2vc
3407 ] = self.task_name_deploy_vca + " {}.{}".format(
3408 member_vnf_index or "", vdu_id or ""
3409 )
3410
3411 @staticmethod
3412 def _create_nslcmop(nsr_id, operation, params):
3413 """
3414 Creates a ns-lcm-opp content to be stored at database.
3415 :param nsr_id: internal id of the instance
3416 :param operation: instantiate, terminate, scale, action, ...
3417 :param params: user parameters for the operation
3418 :return: dictionary following SOL005 format
3419 """
3420 # Raise exception if invalid arguments
3421 if not (nsr_id and operation and params):
3422 raise LcmException(
3423 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3424 )
3425 now = time()
3426 _id = str(uuid4())
3427 nslcmop = {
3428 "id": _id,
3429 "_id": _id,
3430 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3431 "operationState": "PROCESSING",
3432 "statusEnteredTime": now,
3433 "nsInstanceId": nsr_id,
3434 "lcmOperationType": operation,
3435 "startTime": now,
3436 "isAutomaticInvocation": False,
3437 "operationParams": params,
3438 "isCancelPending": False,
3439 "links": {
3440 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3441 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3442 },
3443 }
3444 return nslcmop
3445
3446 def _format_additional_params(self, params):
3447 params = params or {}
3448 for key, value in params.items():
3449 if str(value).startswith("!!yaml "):
3450 params[key] = yaml.safe_load(value[7:])
3451 return params
3452
3453 def _get_terminate_primitive_params(self, seq, vnf_index):
3454 primitive = seq.get("name")
3455 primitive_params = {}
3456 params = {
3457 "member_vnf_index": vnf_index,
3458 "primitive": primitive,
3459 "primitive_params": primitive_params,
3460 }
3461 desc_params = {}
3462 return self._map_primitive_params(seq, params, desc_params)
3463
3464 # sub-operations
3465
3466 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3467 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3468 if op.get("operationState") == "COMPLETED":
3469 # b. Skip sub-operation
3470 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3471 return self.SUBOPERATION_STATUS_SKIP
3472 else:
3473 # c. retry executing sub-operation
3474 # The sub-operation exists, and operationState != 'COMPLETED'
3475 # Update operationState = 'PROCESSING' to indicate a retry.
3476 operationState = "PROCESSING"
3477 detailed_status = "In progress"
3478 self._update_suboperation_status(
3479 db_nslcmop, op_index, operationState, detailed_status
3480 )
3481 # Return the sub-operation index
3482 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3483 # with arguments extracted from the sub-operation
3484 return op_index
3485
3486 # Find a sub-operation where all keys in a matching dictionary must match
3487 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3488 def _find_suboperation(self, db_nslcmop, match):
3489 if db_nslcmop and match:
3490 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3491 for i, op in enumerate(op_list):
3492 if all(op.get(k) == match[k] for k in match):
3493 return i
3494 return self.SUBOPERATION_STATUS_NOT_FOUND
3495
3496 # Update status for a sub-operation given its index
3497 def _update_suboperation_status(
3498 self, db_nslcmop, op_index, operationState, detailed_status
3499 ):
3500 # Update DB for HA tasks
3501 q_filter = {"_id": db_nslcmop["_id"]}
3502 update_dict = {
3503 "_admin.operations.{}.operationState".format(op_index): operationState,
3504 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3505 }
3506 self.db.set_one(
3507 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3508 )
3509
3510 # Add sub-operation, return the index of the added sub-operation
3511 # Optionally, set operationState, detailed-status, and operationType
3512 # Status and type are currently set for 'scale' sub-operations:
3513 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3514 # 'detailed-status' : status message
3515 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3516 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3517 def _add_suboperation(
3518 self,
3519 db_nslcmop,
3520 vnf_index,
3521 vdu_id,
3522 vdu_count_index,
3523 vdu_name,
3524 primitive,
3525 mapped_primitive_params,
3526 operationState=None,
3527 detailed_status=None,
3528 operationType=None,
3529 RO_nsr_id=None,
3530 RO_scaling_info=None,
3531 ):
3532 if not db_nslcmop:
3533 return self.SUBOPERATION_STATUS_NOT_FOUND
3534 # Get the "_admin.operations" list, if it exists
3535 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3536 op_list = db_nslcmop_admin.get("operations")
3537 # Create or append to the "_admin.operations" list
3538 new_op = {
3539 "member_vnf_index": vnf_index,
3540 "vdu_id": vdu_id,
3541 "vdu_count_index": vdu_count_index,
3542 "primitive": primitive,
3543 "primitive_params": mapped_primitive_params,
3544 }
3545 if operationState:
3546 new_op["operationState"] = operationState
3547 if detailed_status:
3548 new_op["detailed-status"] = detailed_status
3549 if operationType:
3550 new_op["lcmOperationType"] = operationType
3551 if RO_nsr_id:
3552 new_op["RO_nsr_id"] = RO_nsr_id
3553 if RO_scaling_info:
3554 new_op["RO_scaling_info"] = RO_scaling_info
3555 if not op_list:
3556 # No existing operations, create key 'operations' with current operation as first list element
3557 db_nslcmop_admin.update({"operations": [new_op]})
3558 op_list = db_nslcmop_admin.get("operations")
3559 else:
3560 # Existing operations, append operation to list
3561 op_list.append(new_op)
3562
3563 db_nslcmop_update = {"_admin.operations": op_list}
3564 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3565 op_index = len(op_list) - 1
3566 return op_index
3567
3568 # Helper methods for scale() sub-operations
3569
3570 # pre-scale/post-scale:
3571 # Check for 3 different cases:
3572 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3573 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3574 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3575 def _check_or_add_scale_suboperation(
3576 self,
3577 db_nslcmop,
3578 vnf_index,
3579 vnf_config_primitive,
3580 primitive_params,
3581 operationType,
3582 RO_nsr_id=None,
3583 RO_scaling_info=None,
3584 ):
3585 # Find this sub-operation
3586 if RO_nsr_id and RO_scaling_info:
3587 operationType = "SCALE-RO"
3588 match = {
3589 "member_vnf_index": vnf_index,
3590 "RO_nsr_id": RO_nsr_id,
3591 "RO_scaling_info": RO_scaling_info,
3592 }
3593 else:
3594 match = {
3595 "member_vnf_index": vnf_index,
3596 "primitive": vnf_config_primitive,
3597 "primitive_params": primitive_params,
3598 "lcmOperationType": operationType,
3599 }
3600 op_index = self._find_suboperation(db_nslcmop, match)
3601 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3602 # a. New sub-operation
3603 # The sub-operation does not exist, add it.
3604 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3605 # The following parameters are set to None for all kind of scaling:
3606 vdu_id = None
3607 vdu_count_index = None
3608 vdu_name = None
3609 if RO_nsr_id and RO_scaling_info:
3610 vnf_config_primitive = None
3611 primitive_params = None
3612 else:
3613 RO_nsr_id = None
3614 RO_scaling_info = None
3615 # Initial status for sub-operation
3616 operationState = "PROCESSING"
3617 detailed_status = "In progress"
3618 # Add sub-operation for pre/post-scaling (zero or more operations)
3619 self._add_suboperation(
3620 db_nslcmop,
3621 vnf_index,
3622 vdu_id,
3623 vdu_count_index,
3624 vdu_name,
3625 vnf_config_primitive,
3626 primitive_params,
3627 operationState,
3628 detailed_status,
3629 operationType,
3630 RO_nsr_id,
3631 RO_scaling_info,
3632 )
3633 return self.SUBOPERATION_STATUS_NEW
3634 else:
3635 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3636 # or op_index (operationState != 'COMPLETED')
3637 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3638
3639 # Function to return execution_environment id
3640
3641 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3642 # TODO vdu_index_count
3643 for vca in vca_deployed_list:
3644 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3645 return vca["ee_id"]
3646
3647 async def destroy_N2VC(
3648 self,
3649 logging_text,
3650 db_nslcmop,
3651 vca_deployed,
3652 config_descriptor,
3653 vca_index,
3654 destroy_ee=True,
3655 exec_primitives=True,
3656 scaling_in=False,
3657 vca_id: str = None,
3658 ):
3659 """
3660 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3661 :param logging_text:
3662 :param db_nslcmop:
3663 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3664 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3665 :param vca_index: index in the database _admin.deployed.VCA
3666 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3667 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3668 not executed properly
3669 :param scaling_in: True destroys the application, False destroys the model
3670 :return: None or exception
3671 """
3672
3673 self.logger.debug(
3674 logging_text
3675 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3676 vca_index, vca_deployed, config_descriptor, destroy_ee
3677 )
3678 )
3679
3680 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3681
3682 # execute terminate_primitives
3683 if exec_primitives:
3684 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3685 config_descriptor.get("terminate-config-primitive"),
3686 vca_deployed.get("ee_descriptor_id"),
3687 )
3688 vdu_id = vca_deployed.get("vdu_id")
3689 vdu_count_index = vca_deployed.get("vdu_count_index")
3690 vdu_name = vca_deployed.get("vdu_name")
3691 vnf_index = vca_deployed.get("member-vnf-index")
3692 if terminate_primitives and vca_deployed.get("needed_terminate"):
3693 for seq in terminate_primitives:
3694 # For each sequence in list, get primitive and call _ns_execute_primitive()
3695 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3696 vnf_index, seq.get("name")
3697 )
3698 self.logger.debug(logging_text + step)
3699 # Create the primitive for each sequence, i.e. "primitive": "touch"
3700 primitive = seq.get("name")
3701 mapped_primitive_params = self._get_terminate_primitive_params(
3702 seq, vnf_index
3703 )
3704
3705 # Add sub-operation
3706 self._add_suboperation(
3707 db_nslcmop,
3708 vnf_index,
3709 vdu_id,
3710 vdu_count_index,
3711 vdu_name,
3712 primitive,
3713 mapped_primitive_params,
3714 )
3715 # Sub-operations: Call _ns_execute_primitive() instead of action()
3716 try:
3717 result, result_detail = await self._ns_execute_primitive(
3718 vca_deployed["ee_id"],
3719 primitive,
3720 mapped_primitive_params,
3721 vca_type=vca_type,
3722 vca_id=vca_id,
3723 )
3724 except LcmException:
3725 # this happens when VCA is not deployed. In this case it is not needed to terminate
3726 continue
3727 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3728 if result not in result_ok:
3729 raise LcmException(
3730 "terminate_primitive {} for vnf_member_index={} fails with "
3731 "error {}".format(seq.get("name"), vnf_index, result_detail)
3732 )
3733 # set that this VCA do not need terminated
3734 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3735 vca_index
3736 )
3737 self.update_db_2(
3738 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3739 )
3740
3741 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3742 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3743
3744 if destroy_ee:
3745 await self.vca_map[vca_type].delete_execution_environment(
3746 vca_deployed["ee_id"],
3747 scaling_in=scaling_in,
3748 vca_id=vca_id,
3749 )
3750
3751 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3752 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3753 namespace = "." + db_nsr["_id"]
3754 try:
3755 await self.n2vc.delete_namespace(
3756 namespace=namespace,
3757 total_timeout=self.timeout_charm_delete,
3758 vca_id=vca_id,
3759 )
3760 except N2VCNotFound: # already deleted. Skip
3761 pass
3762 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3763
3764 async def _terminate_RO(
3765 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3766 ):
3767 """
3768 Terminates a deployment from RO
3769 :param logging_text:
3770 :param nsr_deployed: db_nsr._admin.deployed
3771 :param nsr_id:
3772 :param nslcmop_id:
3773 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3774 this method will update only the index 2, but it will write on database the concatenated content of the list
3775 :return:
3776 """
3777 db_nsr_update = {}
3778 failed_detail = []
3779 ro_nsr_id = ro_delete_action = None
3780 if nsr_deployed and nsr_deployed.get("RO"):
3781 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3782 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3783 try:
3784 if ro_nsr_id:
3785 stage[2] = "Deleting ns from VIM."
3786 db_nsr_update["detailed-status"] = " ".join(stage)
3787 self._write_op_status(nslcmop_id, stage)
3788 self.logger.debug(logging_text + stage[2])
3789 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3790 self._write_op_status(nslcmop_id, stage)
3791 desc = await self.RO.delete("ns", ro_nsr_id)
3792 ro_delete_action = desc["action_id"]
3793 db_nsr_update[
3794 "_admin.deployed.RO.nsr_delete_action_id"
3795 ] = ro_delete_action
3796 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3797 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3798 if ro_delete_action:
3799 # wait until NS is deleted from VIM
3800 stage[2] = "Waiting ns deleted from VIM."
3801 detailed_status_old = None
3802 self.logger.debug(
3803 logging_text
3804 + stage[2]
3805 + " RO_id={} ro_delete_action={}".format(
3806 ro_nsr_id, ro_delete_action
3807 )
3808 )
3809 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3810 self._write_op_status(nslcmop_id, stage)
3811
3812 delete_timeout = 20 * 60 # 20 minutes
3813 while delete_timeout > 0:
3814 desc = await self.RO.show(
3815 "ns",
3816 item_id_name=ro_nsr_id,
3817 extra_item="action",
3818 extra_item_id=ro_delete_action,
3819 )
3820
3821 # deploymentStatus
3822 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3823
3824 ns_status, ns_status_info = self.RO.check_action_status(desc)
3825 if ns_status == "ERROR":
3826 raise ROclient.ROClientException(ns_status_info)
3827 elif ns_status == "BUILD":
3828 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3829 elif ns_status == "ACTIVE":
3830 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3831 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3832 break
3833 else:
3834 assert (
3835 False
3836 ), "ROclient.check_action_status returns unknown {}".format(
3837 ns_status
3838 )
3839 if stage[2] != detailed_status_old:
3840 detailed_status_old = stage[2]
3841 db_nsr_update["detailed-status"] = " ".join(stage)
3842 self._write_op_status(nslcmop_id, stage)
3843 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3844 await asyncio.sleep(5, loop=self.loop)
3845 delete_timeout -= 5
3846 else: # delete_timeout <= 0:
3847 raise ROclient.ROClientException(
3848 "Timeout waiting ns deleted from VIM"
3849 )
3850
3851 except Exception as e:
3852 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3853 if (
3854 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3855 ): # not found
3856 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3857 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3858 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3859 self.logger.debug(
3860 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3861 )
3862 elif (
3863 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3864 ): # conflict
3865 failed_detail.append("delete conflict: {}".format(e))
3866 self.logger.debug(
3867 logging_text
3868 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3869 )
3870 else:
3871 failed_detail.append("delete error: {}".format(e))
3872 self.logger.error(
3873 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3874 )
3875
3876 # Delete nsd
3877 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3878 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3879 try:
3880 stage[2] = "Deleting nsd from RO."
3881 db_nsr_update["detailed-status"] = " ".join(stage)
3882 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3883 self._write_op_status(nslcmop_id, stage)
3884 await self.RO.delete("nsd", ro_nsd_id)
3885 self.logger.debug(
3886 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3887 )
3888 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3889 except Exception as e:
3890 if (
3891 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3892 ): # not found
3893 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3894 self.logger.debug(
3895 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3896 )
3897 elif (
3898 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3899 ): # conflict
3900 failed_detail.append(
3901 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3902 )
3903 self.logger.debug(logging_text + failed_detail[-1])
3904 else:
3905 failed_detail.append(
3906 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3907 )
3908 self.logger.error(logging_text + failed_detail[-1])
3909
3910 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3911 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3912 if not vnf_deployed or not vnf_deployed["id"]:
3913 continue
3914 try:
3915 ro_vnfd_id = vnf_deployed["id"]
3916 stage[
3917 2
3918 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3919 vnf_deployed["member-vnf-index"], ro_vnfd_id
3920 )
3921 db_nsr_update["detailed-status"] = " ".join(stage)
3922 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3923 self._write_op_status(nslcmop_id, stage)
3924 await self.RO.delete("vnfd", ro_vnfd_id)
3925 self.logger.debug(
3926 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3927 )
3928 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3929 except Exception as e:
3930 if (
3931 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3932 ): # not found
3933 db_nsr_update[
3934 "_admin.deployed.RO.vnfd.{}.id".format(index)
3935 ] = None
3936 self.logger.debug(
3937 logging_text
3938 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3939 )
3940 elif (
3941 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3942 ): # conflict
3943 failed_detail.append(
3944 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3945 )
3946 self.logger.debug(logging_text + failed_detail[-1])
3947 else:
3948 failed_detail.append(
3949 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3950 )
3951 self.logger.error(logging_text + failed_detail[-1])
3952
3953 if failed_detail:
3954 stage[2] = "Error deleting from VIM"
3955 else:
3956 stage[2] = "Deleted from VIM"
3957 db_nsr_update["detailed-status"] = " ".join(stage)
3958 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3959 self._write_op_status(nslcmop_id, stage)
3960
3961 if failed_detail:
3962 raise LcmException("; ".join(failed_detail))
3963
3964 async def terminate(self, nsr_id, nslcmop_id):
3965 # Try to lock HA task here
3966 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3967 if not task_is_locked_by_me:
3968 return
3969
3970 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3971 self.logger.debug(logging_text + "Enter")
3972 timeout_ns_terminate = self.timeout_ns_terminate
3973 db_nsr = None
3974 db_nslcmop = None
3975 operation_params = None
3976 exc = None
3977 error_list = [] # annotates all failed error messages
3978 db_nslcmop_update = {}
3979 autoremove = False # autoremove after terminated
3980 tasks_dict_info = {}
3981 db_nsr_update = {}
3982 stage = [
3983 "Stage 1/3: Preparing task.",
3984 "Waiting for previous operations to terminate.",
3985 "",
3986 ]
3987 # ^ contains [stage, step, VIM-status]
3988 try:
3989 # wait for any previous tasks in process
3990 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3991
3992 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3993 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3994 operation_params = db_nslcmop.get("operationParams") or {}
3995 if operation_params.get("timeout_ns_terminate"):
3996 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3997 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3998 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3999
4000 db_nsr_update["operational-status"] = "terminating"
4001 db_nsr_update["config-status"] = "terminating"
4002 self._write_ns_status(
4003 nsr_id=nsr_id,
4004 ns_state="TERMINATING",
4005 current_operation="TERMINATING",
4006 current_operation_id=nslcmop_id,
4007 other_update=db_nsr_update,
4008 )
4009 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4010 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4011 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4012 return
4013
4014 stage[1] = "Getting vnf descriptors from db."
4015 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4016 db_vnfrs_dict = {
4017 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4018 }
4019 db_vnfds_from_id = {}
4020 db_vnfds_from_member_index = {}
4021 # Loop over VNFRs
4022 for vnfr in db_vnfrs_list:
4023 vnfd_id = vnfr["vnfd-id"]
4024 if vnfd_id not in db_vnfds_from_id:
4025 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4026 db_vnfds_from_id[vnfd_id] = vnfd
4027 db_vnfds_from_member_index[
4028 vnfr["member-vnf-index-ref"]
4029 ] = db_vnfds_from_id[vnfd_id]
4030
4031 # Destroy individual execution environments when there are terminating primitives.
4032 # Rest of EE will be deleted at once
4033 # TODO - check before calling _destroy_N2VC
4034 # if not operation_params.get("skip_terminate_primitives"):#
4035 # or not vca.get("needed_terminate"):
4036 stage[0] = "Stage 2/3 execute terminating primitives."
4037 self.logger.debug(logging_text + stage[0])
4038 stage[1] = "Looking execution environment that needs terminate."
4039 self.logger.debug(logging_text + stage[1])
4040
4041 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4042 config_descriptor = None
4043 vca_member_vnf_index = vca.get("member-vnf-index")
4044 vca_id = self.get_vca_id(
4045 db_vnfrs_dict.get(vca_member_vnf_index)
4046 if vca_member_vnf_index
4047 else None,
4048 db_nsr,
4049 )
4050 if not vca or not vca.get("ee_id"):
4051 continue
4052 if not vca.get("member-vnf-index"):
4053 # ns
4054 config_descriptor = db_nsr.get("ns-configuration")
4055 elif vca.get("vdu_id"):
4056 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4057 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4058 elif vca.get("kdu_name"):
4059 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4060 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4061 else:
4062 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4063 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4064 vca_type = vca.get("type")
4065 exec_terminate_primitives = not operation_params.get(
4066 "skip_terminate_primitives"
4067 ) and vca.get("needed_terminate")
4068 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4069 # pending native charms
4070 destroy_ee = (
4071 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4072 )
4073 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4074 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4075 task = asyncio.ensure_future(
4076 self.destroy_N2VC(
4077 logging_text,
4078 db_nslcmop,
4079 vca,
4080 config_descriptor,
4081 vca_index,
4082 destroy_ee,
4083 exec_terminate_primitives,
4084 vca_id=vca_id,
4085 )
4086 )
4087 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4088
4089 # wait for pending tasks of terminate primitives
4090 if tasks_dict_info:
4091 self.logger.debug(
4092 logging_text
4093 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4094 )
4095 error_list = await self._wait_for_tasks(
4096 logging_text,
4097 tasks_dict_info,
4098 min(self.timeout_charm_delete, timeout_ns_terminate),
4099 stage,
4100 nslcmop_id,
4101 )
4102 tasks_dict_info.clear()
4103 if error_list:
4104 return # raise LcmException("; ".join(error_list))
4105
4106 # remove All execution environments at once
4107 stage[0] = "Stage 3/3 delete all."
4108
4109 if nsr_deployed.get("VCA"):
4110 stage[1] = "Deleting all execution environments."
4111 self.logger.debug(logging_text + stage[1])
4112 vca_id = self.get_vca_id({}, db_nsr)
4113 task_delete_ee = asyncio.ensure_future(
4114 asyncio.wait_for(
4115 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4116 timeout=self.timeout_charm_delete,
4117 )
4118 )
4119 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4120 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4121
4122 # Delete from k8scluster
4123 stage[1] = "Deleting KDUs."
4124 self.logger.debug(logging_text + stage[1])
4125 # print(nsr_deployed)
4126 for kdu in get_iterable(nsr_deployed, "K8s"):
4127 if not kdu or not kdu.get("kdu-instance"):
4128 continue
4129 kdu_instance = kdu.get("kdu-instance")
4130 if kdu.get("k8scluster-type") in self.k8scluster_map:
4131 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4132 vca_id = self.get_vca_id({}, db_nsr)
4133 task_delete_kdu_instance = asyncio.ensure_future(
4134 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4135 cluster_uuid=kdu.get("k8scluster-uuid"),
4136 kdu_instance=kdu_instance,
4137 vca_id=vca_id,
4138 )
4139 )
4140 else:
4141 self.logger.error(
4142 logging_text
4143 + "Unknown k8s deployment type {}".format(
4144 kdu.get("k8scluster-type")
4145 )
4146 )
4147 continue
4148 tasks_dict_info[
4149 task_delete_kdu_instance
4150 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4151
4152 # remove from RO
4153 stage[1] = "Deleting ns from VIM."
4154 if self.ng_ro:
4155 task_delete_ro = asyncio.ensure_future(
4156 self._terminate_ng_ro(
4157 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4158 )
4159 )
4160 else:
4161 task_delete_ro = asyncio.ensure_future(
4162 self._terminate_RO(
4163 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4164 )
4165 )
4166 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4167
4168 # rest of staff will be done at finally
4169
4170 except (
4171 ROclient.ROClientException,
4172 DbException,
4173 LcmException,
4174 N2VCException,
4175 ) as e:
4176 self.logger.error(logging_text + "Exit Exception {}".format(e))
4177 exc = e
4178 except asyncio.CancelledError:
4179 self.logger.error(
4180 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4181 )
4182 exc = "Operation was cancelled"
4183 except Exception as e:
4184 exc = traceback.format_exc()
4185 self.logger.critical(
4186 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4187 exc_info=True,
4188 )
4189 finally:
4190 if exc:
4191 error_list.append(str(exc))
4192 try:
4193 # wait for pending tasks
4194 if tasks_dict_info:
4195 stage[1] = "Waiting for terminate pending tasks."
4196 self.logger.debug(logging_text + stage[1])
4197 error_list += await self._wait_for_tasks(
4198 logging_text,
4199 tasks_dict_info,
4200 timeout_ns_terminate,
4201 stage,
4202 nslcmop_id,
4203 )
4204 stage[1] = stage[2] = ""
4205 except asyncio.CancelledError:
4206 error_list.append("Cancelled")
4207 # TODO cancell all tasks
4208 except Exception as exc:
4209 error_list.append(str(exc))
4210 # update status at database
4211 if error_list:
4212 error_detail = "; ".join(error_list)
4213 # self.logger.error(logging_text + error_detail)
4214 error_description_nslcmop = "{} Detail: {}".format(
4215 stage[0], error_detail
4216 )
4217 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4218 nslcmop_id, stage[0]
4219 )
4220
4221 db_nsr_update["operational-status"] = "failed"
4222 db_nsr_update["detailed-status"] = (
4223 error_description_nsr + " Detail: " + error_detail
4224 )
4225 db_nslcmop_update["detailed-status"] = error_detail
4226 nslcmop_operation_state = "FAILED"
4227 ns_state = "BROKEN"
4228 else:
4229 error_detail = None
4230 error_description_nsr = error_description_nslcmop = None
4231 ns_state = "NOT_INSTANTIATED"
4232 db_nsr_update["operational-status"] = "terminated"
4233 db_nsr_update["detailed-status"] = "Done"
4234 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4235 db_nslcmop_update["detailed-status"] = "Done"
4236 nslcmop_operation_state = "COMPLETED"
4237
4238 if db_nsr:
4239 self._write_ns_status(
4240 nsr_id=nsr_id,
4241 ns_state=ns_state,
4242 current_operation="IDLE",
4243 current_operation_id=None,
4244 error_description=error_description_nsr,
4245 error_detail=error_detail,
4246 other_update=db_nsr_update,
4247 )
4248 self._write_op_status(
4249 op_id=nslcmop_id,
4250 stage="",
4251 error_message=error_description_nslcmop,
4252 operation_state=nslcmop_operation_state,
4253 other_update=db_nslcmop_update,
4254 )
4255 if ns_state == "NOT_INSTANTIATED":
4256 try:
4257 self.db.set_list(
4258 "vnfrs",
4259 {"nsr-id-ref": nsr_id},
4260 {"_admin.nsState": "NOT_INSTANTIATED"},
4261 )
4262 except DbException as e:
4263 self.logger.warn(
4264 logging_text
4265 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4266 nsr_id, e
4267 )
4268 )
4269 if operation_params:
4270 autoremove = operation_params.get("autoremove", False)
4271 if nslcmop_operation_state:
4272 try:
4273 await self.msg.aiowrite(
4274 "ns",
4275 "terminated",
4276 {
4277 "nsr_id": nsr_id,
4278 "nslcmop_id": nslcmop_id,
4279 "operationState": nslcmop_operation_state,
4280 "autoremove": autoremove,
4281 },
4282 loop=self.loop,
4283 )
4284 except Exception as e:
4285 self.logger.error(
4286 logging_text + "kafka_write notification Exception {}".format(e)
4287 )
4288
4289 self.logger.debug(logging_text + "Exit")
4290 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4291
4292 async def _wait_for_tasks(
4293 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4294 ):
4295 time_start = time()
4296 error_detail_list = []
4297 error_list = []
4298 pending_tasks = list(created_tasks_info.keys())
4299 num_tasks = len(pending_tasks)
4300 num_done = 0
4301 stage[1] = "{}/{}.".format(num_done, num_tasks)
4302 self._write_op_status(nslcmop_id, stage)
4303 while pending_tasks:
4304 new_error = None
4305 _timeout = timeout + time_start - time()
4306 done, pending_tasks = await asyncio.wait(
4307 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4308 )
4309 num_done += len(done)
4310 if not done: # Timeout
4311 for task in pending_tasks:
4312 new_error = created_tasks_info[task] + ": Timeout"
4313 error_detail_list.append(new_error)
4314 error_list.append(new_error)
4315 break
4316 for task in done:
4317 if task.cancelled():
4318 exc = "Cancelled"
4319 else:
4320 exc = task.exception()
4321 if exc:
4322 if isinstance(exc, asyncio.TimeoutError):
4323 exc = "Timeout"
4324 new_error = created_tasks_info[task] + ": {}".format(exc)
4325 error_list.append(created_tasks_info[task])
4326 error_detail_list.append(new_error)
4327 if isinstance(
4328 exc,
4329 (
4330 str,
4331 DbException,
4332 N2VCException,
4333 ROclient.ROClientException,
4334 LcmException,
4335 K8sException,
4336 NgRoException,
4337 ),
4338 ):
4339 self.logger.error(logging_text + new_error)
4340 else:
4341 exc_traceback = "".join(
4342 traceback.format_exception(None, exc, exc.__traceback__)
4343 )
4344 self.logger.error(
4345 logging_text
4346 + created_tasks_info[task]
4347 + " "
4348 + exc_traceback
4349 )
4350 else:
4351 self.logger.debug(
4352 logging_text + created_tasks_info[task] + ": Done"
4353 )
4354 stage[1] = "{}/{}.".format(num_done, num_tasks)
4355 if new_error:
4356 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4357 if nsr_id: # update also nsr
4358 self.update_db_2(
4359 "nsrs",
4360 nsr_id,
4361 {
4362 "errorDescription": "Error at: " + ", ".join(error_list),
4363 "errorDetail": ". ".join(error_detail_list),
4364 },
4365 )
4366 self._write_op_status(nslcmop_id, stage)
4367 return error_detail_list
4368
4369 @staticmethod
4370 def _map_primitive_params(primitive_desc, params, instantiation_params):
4371 """
4372 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4373 The default-value is used. If it is between < > it look for a value at instantiation_params
4374 :param primitive_desc: portion of VNFD/NSD that describes primitive
4375 :param params: Params provided by user
4376 :param instantiation_params: Instantiation params provided by user
4377 :return: a dictionary with the calculated params
4378 """
4379 calculated_params = {}
4380 for parameter in primitive_desc.get("parameter", ()):
4381 param_name = parameter["name"]
4382 if param_name in params:
4383 calculated_params[param_name] = params[param_name]
4384 elif "default-value" in parameter or "value" in parameter:
4385 if "value" in parameter:
4386 calculated_params[param_name] = parameter["value"]
4387 else:
4388 calculated_params[param_name] = parameter["default-value"]
4389 if (
4390 isinstance(calculated_params[param_name], str)
4391 and calculated_params[param_name].startswith("<")
4392 and calculated_params[param_name].endswith(">")
4393 ):
4394 if calculated_params[param_name][1:-1] in instantiation_params:
4395 calculated_params[param_name] = instantiation_params[
4396 calculated_params[param_name][1:-1]
4397 ]
4398 else:
4399 raise LcmException(
4400 "Parameter {} needed to execute primitive {} not provided".format(
4401 calculated_params[param_name], primitive_desc["name"]
4402 )
4403 )
4404 else:
4405 raise LcmException(
4406 "Parameter {} needed to execute primitive {} not provided".format(
4407 param_name, primitive_desc["name"]
4408 )
4409 )
4410
4411 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4412 calculated_params[param_name] = yaml.safe_dump(
4413 calculated_params[param_name], default_flow_style=True, width=256
4414 )
4415 elif isinstance(calculated_params[param_name], str) and calculated_params[
4416 param_name
4417 ].startswith("!!yaml "):
4418 calculated_params[param_name] = calculated_params[param_name][7:]
4419 if parameter.get("data-type") == "INTEGER":
4420 try:
4421 calculated_params[param_name] = int(calculated_params[param_name])
4422 except ValueError: # error converting string to int
4423 raise LcmException(
4424 "Parameter {} of primitive {} must be integer".format(
4425 param_name, primitive_desc["name"]
4426 )
4427 )
4428 elif parameter.get("data-type") == "BOOLEAN":
4429 calculated_params[param_name] = not (
4430 (str(calculated_params[param_name])).lower() == "false"
4431 )
4432
4433 # add always ns_config_info if primitive name is config
4434 if primitive_desc["name"] == "config":
4435 if "ns_config_info" in instantiation_params:
4436 calculated_params["ns_config_info"] = instantiation_params[
4437 "ns_config_info"
4438 ]
4439 return calculated_params
4440
4441 def _look_for_deployed_vca(
4442 self,
4443 deployed_vca,
4444 member_vnf_index,
4445 vdu_id,
4446 vdu_count_index,
4447 kdu_name=None,
4448 ee_descriptor_id=None,
4449 ):
4450 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4451 for vca in deployed_vca:
4452 if not vca:
4453 continue
4454 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4455 continue
4456 if (
4457 vdu_count_index is not None
4458 and vdu_count_index != vca["vdu_count_index"]
4459 ):
4460 continue
4461 if kdu_name and kdu_name != vca["kdu_name"]:
4462 continue
4463 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4464 continue
4465 break
4466 else:
4467 # vca_deployed not found
4468 raise LcmException(
4469 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4470 " is not deployed".format(
4471 member_vnf_index,
4472 vdu_id,
4473 vdu_count_index,
4474 kdu_name,
4475 ee_descriptor_id,
4476 )
4477 )
4478 # get ee_id
4479 ee_id = vca.get("ee_id")
4480 vca_type = vca.get(
4481 "type", "lxc_proxy_charm"
4482 ) # default value for backward compatibility - proxy charm
4483 if not ee_id:
4484 raise LcmException(
4485 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4486 "execution environment".format(
4487 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4488 )
4489 )
4490 return ee_id, vca_type
4491
4492 async def _ns_execute_primitive(
4493 self,
4494 ee_id,
4495 primitive,
4496 primitive_params,
4497 retries=0,
4498 retries_interval=30,
4499 timeout=None,
4500 vca_type=None,
4501 db_dict=None,
4502 vca_id: str = None,
4503 ) -> (str, str):
4504 try:
4505 if primitive == "config":
4506 primitive_params = {"params": primitive_params}
4507
4508 vca_type = vca_type or "lxc_proxy_charm"
4509
4510 while retries >= 0:
4511 try:
4512 output = await asyncio.wait_for(
4513 self.vca_map[vca_type].exec_primitive(
4514 ee_id=ee_id,
4515 primitive_name=primitive,
4516 params_dict=primitive_params,
4517 progress_timeout=self.timeout_progress_primitive,
4518 total_timeout=self.timeout_primitive,
4519 db_dict=db_dict,
4520 vca_id=vca_id,
4521 ),
4522 timeout=timeout or self.timeout_primitive,
4523 )
4524 # execution was OK
4525 break
4526 except asyncio.CancelledError:
4527 raise
4528 except Exception as e: # asyncio.TimeoutError
4529 if isinstance(e, asyncio.TimeoutError):
4530 e = "Timeout"
4531 retries -= 1
4532 if retries >= 0:
4533 self.logger.debug(
4534 "Error executing action {} on {} -> {}".format(
4535 primitive, ee_id, e
4536 )
4537 )
4538 # wait and retry
4539 await asyncio.sleep(retries_interval, loop=self.loop)
4540 else:
4541 return "FAILED", str(e)
4542
4543 return "COMPLETED", output
4544
4545 except (LcmException, asyncio.CancelledError):
4546 raise
4547 except Exception as e:
4548 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4549
4550 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4551 """
4552 Updating the vca_status with latest juju information in nsrs record
4553 :param: nsr_id: Id of the nsr
4554 :param: nslcmop_id: Id of the nslcmop
4555 :return: None
4556 """
4557
4558 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4559 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4560 vca_id = self.get_vca_id({}, db_nsr)
4561 if db_nsr["_admin"]["deployed"]["K8s"]:
4562 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4563 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4564 await self._on_update_k8s_db(
4565 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4566 )
4567 else:
4568 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4569 table, filter = "nsrs", {"_id": nsr_id}
4570 path = "_admin.deployed.VCA.{}.".format(vca_index)
4571 await self._on_update_n2vc_db(table, filter, path, {})
4572
4573 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4574 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4575
4576 async def action(self, nsr_id, nslcmop_id):
4577 # Try to lock HA task here
4578 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4579 if not task_is_locked_by_me:
4580 return
4581
4582 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4583 self.logger.debug(logging_text + "Enter")
4584 # get all needed from database
4585 db_nsr = None
4586 db_nslcmop = None
4587 db_nsr_update = {}
4588 db_nslcmop_update = {}
4589 nslcmop_operation_state = None
4590 error_description_nslcmop = None
4591 exc = None
4592 try:
4593 # wait for any previous tasks in process
4594 step = "Waiting for previous operations to terminate"
4595 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4596
4597 self._write_ns_status(
4598 nsr_id=nsr_id,
4599 ns_state=None,
4600 current_operation="RUNNING ACTION",
4601 current_operation_id=nslcmop_id,
4602 )
4603
4604 step = "Getting information from database"
4605 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4606 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4607
4608 nsr_deployed = db_nsr["_admin"].get("deployed")
4609 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4610 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4611 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4612 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4613 primitive = db_nslcmop["operationParams"]["primitive"]
4614 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4615 timeout_ns_action = db_nslcmop["operationParams"].get(
4616 "timeout_ns_action", self.timeout_primitive
4617 )
4618
4619 if vnf_index:
4620 step = "Getting vnfr from database"
4621 db_vnfr = self.db.get_one(
4622 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4623 )
4624 step = "Getting vnfd from database"
4625 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4626 else:
4627 step = "Getting nsd from database"
4628 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4629
4630 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4631 # for backward compatibility
4632 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4633 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4634 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4635 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4636
4637 # look for primitive
4638 config_primitive_desc = descriptor_configuration = None
4639 if vdu_id:
4640 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4641 elif kdu_name:
4642 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4643 elif vnf_index:
4644 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4645 else:
4646 descriptor_configuration = db_nsd.get("ns-configuration")
4647
4648 if descriptor_configuration and descriptor_configuration.get(
4649 "config-primitive"
4650 ):
4651 for config_primitive in descriptor_configuration["config-primitive"]:
4652 if config_primitive["name"] == primitive:
4653 config_primitive_desc = config_primitive
4654 break
4655
4656 if not config_primitive_desc:
4657 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4658 raise LcmException(
4659 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4660 primitive
4661 )
4662 )
4663 primitive_name = primitive
4664 ee_descriptor_id = None
4665 else:
4666 primitive_name = config_primitive_desc.get(
4667 "execution-environment-primitive", primitive
4668 )
4669 ee_descriptor_id = config_primitive_desc.get(
4670 "execution-environment-ref"
4671 )
4672
4673 if vnf_index:
4674 if vdu_id:
4675 vdur = next(
4676 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4677 )
4678 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4679 elif kdu_name:
4680 kdur = next(
4681 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4682 )
4683 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4684 else:
4685 desc_params = parse_yaml_strings(
4686 db_vnfr.get("additionalParamsForVnf")
4687 )
4688 else:
4689 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4690 if kdu_name and get_configuration(db_vnfd, kdu_name):
4691 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4692 actions = set()
4693 for primitive in kdu_configuration.get("initial-config-primitive", []):
4694 actions.add(primitive["name"])
4695 for primitive in kdu_configuration.get("config-primitive", []):
4696 actions.add(primitive["name"])
4697 kdu_action = True if primitive_name in actions else False
4698
4699 # TODO check if ns is in a proper status
4700 if kdu_name and (
4701 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4702 ):
4703 # kdur and desc_params already set from before
4704 if primitive_params:
4705 desc_params.update(primitive_params)
4706 # TODO Check if we will need something at vnf level
4707 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4708 if (
4709 kdu_name == kdu["kdu-name"]
4710 and kdu["member-vnf-index"] == vnf_index
4711 ):
4712 break
4713 else:
4714 raise LcmException(
4715 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4716 )
4717
4718 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4719 msg = "unknown k8scluster-type '{}'".format(
4720 kdu.get("k8scluster-type")
4721 )
4722 raise LcmException(msg)
4723
4724 db_dict = {
4725 "collection": "nsrs",
4726 "filter": {"_id": nsr_id},
4727 "path": "_admin.deployed.K8s.{}".format(index),
4728 }
4729 self.logger.debug(
4730 logging_text
4731 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4732 )
4733 step = "Executing kdu {}".format(primitive_name)
4734 if primitive_name == "upgrade":
4735 if desc_params.get("kdu_model"):
4736 kdu_model = desc_params.get("kdu_model")
4737 del desc_params["kdu_model"]
4738 else:
4739 kdu_model = kdu.get("kdu-model")
4740 parts = kdu_model.split(sep=":")
4741 if len(parts) == 2:
4742 kdu_model = parts[0]
4743
4744 detailed_status = await asyncio.wait_for(
4745 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4746 cluster_uuid=kdu.get("k8scluster-uuid"),
4747 kdu_instance=kdu.get("kdu-instance"),
4748 atomic=True,
4749 kdu_model=kdu_model,
4750 params=desc_params,
4751 db_dict=db_dict,
4752 timeout=timeout_ns_action,
4753 ),
4754 timeout=timeout_ns_action + 10,
4755 )
4756 self.logger.debug(
4757 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4758 )
4759 elif primitive_name == "rollback":
4760 detailed_status = await asyncio.wait_for(
4761 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4762 cluster_uuid=kdu.get("k8scluster-uuid"),
4763 kdu_instance=kdu.get("kdu-instance"),
4764 db_dict=db_dict,
4765 ),
4766 timeout=timeout_ns_action,
4767 )
4768 elif primitive_name == "status":
4769 detailed_status = await asyncio.wait_for(
4770 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4771 cluster_uuid=kdu.get("k8scluster-uuid"),
4772 kdu_instance=kdu.get("kdu-instance"),
4773 vca_id=vca_id,
4774 ),
4775 timeout=timeout_ns_action,
4776 )
4777 else:
4778 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4779 kdu["kdu-name"], nsr_id
4780 )
4781 params = self._map_primitive_params(
4782 config_primitive_desc, primitive_params, desc_params
4783 )
4784
4785 detailed_status = await asyncio.wait_for(
4786 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4787 cluster_uuid=kdu.get("k8scluster-uuid"),
4788 kdu_instance=kdu_instance,
4789 primitive_name=primitive_name,
4790 params=params,
4791 db_dict=db_dict,
4792 timeout=timeout_ns_action,
4793 vca_id=vca_id,
4794 ),
4795 timeout=timeout_ns_action,
4796 )
4797
4798 if detailed_status:
4799 nslcmop_operation_state = "COMPLETED"
4800 else:
4801 detailed_status = ""
4802 nslcmop_operation_state = "FAILED"
4803 else:
4804 ee_id, vca_type = self._look_for_deployed_vca(
4805 nsr_deployed["VCA"],
4806 member_vnf_index=vnf_index,
4807 vdu_id=vdu_id,
4808 vdu_count_index=vdu_count_index,
4809 ee_descriptor_id=ee_descriptor_id,
4810 )
4811 for vca_index, vca_deployed in enumerate(
4812 db_nsr["_admin"]["deployed"]["VCA"]
4813 ):
4814 if vca_deployed.get("member-vnf-index") == vnf_index:
4815 db_dict = {
4816 "collection": "nsrs",
4817 "filter": {"_id": nsr_id},
4818 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4819 }
4820 break
4821 (
4822 nslcmop_operation_state,
4823 detailed_status,
4824 ) = await self._ns_execute_primitive(
4825 ee_id,
4826 primitive=primitive_name,
4827 primitive_params=self._map_primitive_params(
4828 config_primitive_desc, primitive_params, desc_params
4829 ),
4830 timeout=timeout_ns_action,
4831 vca_type=vca_type,
4832 db_dict=db_dict,
4833 vca_id=vca_id,
4834 )
4835
4836 db_nslcmop_update["detailed-status"] = detailed_status
4837 error_description_nslcmop = (
4838 detailed_status if nslcmop_operation_state == "FAILED" else ""
4839 )
4840 self.logger.debug(
4841 logging_text
4842 + " task Done with result {} {}".format(
4843 nslcmop_operation_state, detailed_status
4844 )
4845 )
4846 return # database update is called inside finally
4847
4848 except (DbException, LcmException, N2VCException, K8sException) as e:
4849 self.logger.error(logging_text + "Exit Exception {}".format(e))
4850 exc = e
4851 except asyncio.CancelledError:
4852 self.logger.error(
4853 logging_text + "Cancelled Exception while '{}'".format(step)
4854 )
4855 exc = "Operation was cancelled"
4856 except asyncio.TimeoutError:
4857 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4858 exc = "Timeout"
4859 except Exception as e:
4860 exc = traceback.format_exc()
4861 self.logger.critical(
4862 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4863 exc_info=True,
4864 )
4865 finally:
4866 if exc:
4867 db_nslcmop_update[
4868 "detailed-status"
4869 ] = (
4870 detailed_status
4871 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4872 nslcmop_operation_state = "FAILED"
4873 if db_nsr:
4874 self._write_ns_status(
4875 nsr_id=nsr_id,
4876 ns_state=db_nsr[
4877 "nsState"
4878 ], # TODO check if degraded. For the moment use previous status
4879 current_operation="IDLE",
4880 current_operation_id=None,
4881 # error_description=error_description_nsr,
4882 # error_detail=error_detail,
4883 other_update=db_nsr_update,
4884 )
4885
4886 self._write_op_status(
4887 op_id=nslcmop_id,
4888 stage="",
4889 error_message=error_description_nslcmop,
4890 operation_state=nslcmop_operation_state,
4891 other_update=db_nslcmop_update,
4892 )
4893
4894 if nslcmop_operation_state:
4895 try:
4896 await self.msg.aiowrite(
4897 "ns",
4898 "actioned",
4899 {
4900 "nsr_id": nsr_id,
4901 "nslcmop_id": nslcmop_id,
4902 "operationState": nslcmop_operation_state,
4903 },
4904 loop=self.loop,
4905 )
4906 except Exception as e:
4907 self.logger.error(
4908 logging_text + "kafka_write notification Exception {}".format(e)
4909 )
4910 self.logger.debug(logging_text + "Exit")
4911 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4912 return nslcmop_operation_state, detailed_status
4913
4914 async def scale(self, nsr_id, nslcmop_id):
4915 # Try to lock HA task here
4916 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4917 if not task_is_locked_by_me:
4918 return
4919
4920 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4921 stage = ["", "", ""]
4922 tasks_dict_info = {}
4923 # ^ stage, step, VIM progress
4924 self.logger.debug(logging_text + "Enter")
4925 # get all needed from database
4926 db_nsr = None
4927 db_nslcmop_update = {}
4928 db_nsr_update = {}
4929 exc = None
4930 # in case of error, indicates what part of scale was failed to put nsr at error status
4931 scale_process = None
4932 old_operational_status = ""
4933 old_config_status = ""
4934 nsi_id = None
4935 try:
4936 # wait for any previous tasks in process
4937 step = "Waiting for previous operations to terminate"
4938 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4939 self._write_ns_status(
4940 nsr_id=nsr_id,
4941 ns_state=None,
4942 current_operation="SCALING",
4943 current_operation_id=nslcmop_id,
4944 )
4945
4946 step = "Getting nslcmop from database"
4947 self.logger.debug(
4948 step + " after having waited for previous tasks to be completed"
4949 )
4950 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4951
4952 step = "Getting nsr from database"
4953 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4954 old_operational_status = db_nsr["operational-status"]
4955 old_config_status = db_nsr["config-status"]
4956
4957 step = "Parsing scaling parameters"
4958 db_nsr_update["operational-status"] = "scaling"
4959 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4960 nsr_deployed = db_nsr["_admin"].get("deployed")
4961
4962 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4963 "scaleByStepData"
4964 ]["member-vnf-index"]
4965 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4966 "scaleByStepData"
4967 ]["scaling-group-descriptor"]
4968 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4969 # for backward compatibility
4970 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4971 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4972 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4973 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4974
4975 step = "Getting vnfr from database"
4976 db_vnfr = self.db.get_one(
4977 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4978 )
4979
4980 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4981
4982 step = "Getting vnfd from database"
4983 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4984
4985 base_folder = db_vnfd["_admin"]["storage"]
4986
4987 step = "Getting scaling-group-descriptor"
4988 scaling_descriptor = find_in_list(
4989 get_scaling_aspect(db_vnfd),
4990 lambda scale_desc: scale_desc["name"] == scaling_group,
4991 )
4992 if not scaling_descriptor:
4993 raise LcmException(
4994 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4995 "at vnfd:scaling-group-descriptor".format(scaling_group)
4996 )
4997
4998 step = "Sending scale order to VIM"
4999 # TODO check if ns is in a proper status
5000 nb_scale_op = 0
5001 if not db_nsr["_admin"].get("scaling-group"):
5002 self.update_db_2(
5003 "nsrs",
5004 nsr_id,
5005 {
5006 "_admin.scaling-group": [
5007 {"name": scaling_group, "nb-scale-op": 0}
5008 ]
5009 },
5010 )
5011 admin_scale_index = 0
5012 else:
5013 for admin_scale_index, admin_scale_info in enumerate(
5014 db_nsr["_admin"]["scaling-group"]
5015 ):
5016 if admin_scale_info["name"] == scaling_group:
5017 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5018 break
5019 else: # not found, set index one plus last element and add new entry with the name
5020 admin_scale_index += 1
5021 db_nsr_update[
5022 "_admin.scaling-group.{}.name".format(admin_scale_index)
5023 ] = scaling_group
5024
5025 vca_scaling_info = []
5026 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5027 if scaling_type == "SCALE_OUT":
5028 if "aspect-delta-details" not in scaling_descriptor:
5029 raise LcmException(
5030 "Aspect delta details not fount in scaling descriptor {}".format(
5031 scaling_descriptor["name"]
5032 )
5033 )
5034 # count if max-instance-count is reached
5035 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5036
5037 scaling_info["scaling_direction"] = "OUT"
5038 scaling_info["vdu-create"] = {}
5039 scaling_info["kdu-create"] = {}
5040 for delta in deltas:
5041 for vdu_delta in delta.get("vdu-delta", {}):
5042 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5043 # vdu_index also provides the number of instance of the targeted vdu
5044 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5045 cloud_init_text = self._get_vdu_cloud_init_content(
5046 vdud, db_vnfd
5047 )
5048 if cloud_init_text:
5049 additional_params = (
5050 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5051 or {}
5052 )
5053 cloud_init_list = []
5054
5055 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5056 max_instance_count = 10
5057 if vdu_profile and "max-number-of-instances" in vdu_profile:
5058 max_instance_count = vdu_profile.get(
5059 "max-number-of-instances", 10
5060 )
5061
5062 default_instance_num = get_number_of_instances(
5063 db_vnfd, vdud["id"]
5064 )
5065 instances_number = vdu_delta.get("number-of-instances", 1)
5066 nb_scale_op += instances_number
5067
5068 new_instance_count = nb_scale_op + default_instance_num
5069 # Control if new count is over max and vdu count is less than max.
5070 # Then assign new instance count
5071 if new_instance_count > max_instance_count > vdu_count:
5072 instances_number = new_instance_count - max_instance_count
5073 else:
5074 instances_number = instances_number
5075
5076 if new_instance_count > max_instance_count:
5077 raise LcmException(
5078 "reached the limit of {} (max-instance-count) "
5079 "scaling-out operations for the "
5080 "scaling-group-descriptor '{}'".format(
5081 nb_scale_op, scaling_group
5082 )
5083 )
5084 for x in range(vdu_delta.get("number-of-instances", 1)):
5085 if cloud_init_text:
5086 # TODO Information of its own ip is not available because db_vnfr is not updated.
5087 additional_params["OSM"] = get_osm_params(
5088 db_vnfr, vdu_delta["id"], vdu_index + x
5089 )
5090 cloud_init_list.append(
5091 self._parse_cloud_init(
5092 cloud_init_text,
5093 additional_params,
5094 db_vnfd["id"],
5095 vdud["id"],
5096 )
5097 )
5098 vca_scaling_info.append(
5099 {
5100 "osm_vdu_id": vdu_delta["id"],
5101 "member-vnf-index": vnf_index,
5102 "type": "create",
5103 "vdu_index": vdu_index + x,
5104 }
5105 )
5106 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5107 for kdu_delta in delta.get("kdu-resource-delta", {}):
5108 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5109 kdu_name = kdu_profile["kdu-name"]
5110 resource_name = kdu_profile["resource-name"]
5111
5112 # Might have different kdus in the same delta
5113 # Should have list for each kdu
5114 if not scaling_info["kdu-create"].get(kdu_name, None):
5115 scaling_info["kdu-create"][kdu_name] = []
5116
5117 kdur = get_kdur(db_vnfr, kdu_name)
5118 if kdur.get("helm-chart"):
5119 k8s_cluster_type = "helm-chart-v3"
5120 self.logger.debug("kdur: {}".format(kdur))
5121 if (
5122 kdur.get("helm-version")
5123 and kdur.get("helm-version") == "v2"
5124 ):
5125 k8s_cluster_type = "helm-chart"
5126 raise NotImplementedError
5127 elif kdur.get("juju-bundle"):
5128 k8s_cluster_type = "juju-bundle"
5129 else:
5130 raise LcmException(
5131 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5132 "juju-bundle. Maybe an old NBI version is running".format(
5133 db_vnfr["member-vnf-index-ref"], kdu_name
5134 )
5135 )
5136
5137 max_instance_count = 10
5138 if kdu_profile and "max-number-of-instances" in kdu_profile:
5139 max_instance_count = kdu_profile.get(
5140 "max-number-of-instances", 10
5141 )
5142
5143 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5144 deployed_kdu, _ = get_deployed_kdu(
5145 nsr_deployed, kdu_name, vnf_index
5146 )
5147 if deployed_kdu is None:
5148 raise LcmException(
5149 "KDU '{}' for vnf '{}' not deployed".format(
5150 kdu_name, vnf_index
5151 )
5152 )
5153 kdu_instance = deployed_kdu.get("kdu-instance")
5154 instance_num = await self.k8scluster_map[
5155 k8s_cluster_type
5156 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5157 kdu_replica_count = instance_num + kdu_delta.get(
5158 "number-of-instances", 1
5159 )
5160
5161 # Control if new count is over max and instance_num is less than max.
5162 # Then assign max instance number to kdu replica count
5163 if kdu_replica_count > max_instance_count > instance_num:
5164 kdu_replica_count = max_instance_count
5165 if kdu_replica_count > max_instance_count:
5166 raise LcmException(
5167 "reached the limit of {} (max-instance-count) "
5168 "scaling-out operations for the "
5169 "scaling-group-descriptor '{}'".format(
5170 instance_num, scaling_group
5171 )
5172 )
5173
5174 for x in range(kdu_delta.get("number-of-instances", 1)):
5175 vca_scaling_info.append(
5176 {
5177 "osm_kdu_id": kdu_name,
5178 "member-vnf-index": vnf_index,
5179 "type": "create",
5180 "kdu_index": instance_num + x - 1,
5181 }
5182 )
5183 scaling_info["kdu-create"][kdu_name].append(
5184 {
5185 "member-vnf-index": vnf_index,
5186 "type": "create",
5187 "k8s-cluster-type": k8s_cluster_type,
5188 "resource-name": resource_name,
5189 "scale": kdu_replica_count,
5190 }
5191 )
5192 elif scaling_type == "SCALE_IN":
5193 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5194
5195 scaling_info["scaling_direction"] = "IN"
5196 scaling_info["vdu-delete"] = {}
5197 scaling_info["kdu-delete"] = {}
5198
5199 for delta in deltas:
5200 for vdu_delta in delta.get("vdu-delta", {}):
5201 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5202 min_instance_count = 0
5203 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5204 if vdu_profile and "min-number-of-instances" in vdu_profile:
5205 min_instance_count = vdu_profile["min-number-of-instances"]
5206
5207 default_instance_num = get_number_of_instances(
5208 db_vnfd, vdu_delta["id"]
5209 )
5210 instance_num = vdu_delta.get("number-of-instances", 1)
5211 nb_scale_op -= instance_num
5212
5213 new_instance_count = nb_scale_op + default_instance_num
5214
5215 if new_instance_count < min_instance_count < vdu_count:
5216 instances_number = min_instance_count - new_instance_count
5217 else:
5218 instances_number = instance_num
5219
5220 if new_instance_count < min_instance_count:
5221 raise LcmException(
5222 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5223 "scaling-group-descriptor '{}'".format(
5224 nb_scale_op, scaling_group
5225 )
5226 )
5227 for x in range(vdu_delta.get("number-of-instances", 1)):
5228 vca_scaling_info.append(
5229 {
5230 "osm_vdu_id": vdu_delta["id"],
5231 "member-vnf-index": vnf_index,
5232 "type": "delete",
5233 "vdu_index": vdu_index - 1 - x,
5234 }
5235 )
5236 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5237 for kdu_delta in delta.get("kdu-resource-delta", {}):
5238 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5239 kdu_name = kdu_profile["kdu-name"]
5240 resource_name = kdu_profile["resource-name"]
5241
5242 if not scaling_info["kdu-delete"].get(kdu_name, None):
5243 scaling_info["kdu-delete"][kdu_name] = []
5244
5245 kdur = get_kdur(db_vnfr, kdu_name)
5246 if kdur.get("helm-chart"):
5247 k8s_cluster_type = "helm-chart-v3"
5248 self.logger.debug("kdur: {}".format(kdur))
5249 if (
5250 kdur.get("helm-version")
5251 and kdur.get("helm-version") == "v2"
5252 ):
5253 k8s_cluster_type = "helm-chart"
5254 raise NotImplementedError
5255 elif kdur.get("juju-bundle"):
5256 k8s_cluster_type = "juju-bundle"
5257 else:
5258 raise LcmException(
5259 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5260 "juju-bundle. Maybe an old NBI version is running".format(
5261 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5262 )
5263 )
5264
5265 min_instance_count = 0
5266 if kdu_profile and "min-number-of-instances" in kdu_profile:
5267 min_instance_count = kdu_profile["min-number-of-instances"]
5268
5269 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5270 deployed_kdu, _ = get_deployed_kdu(
5271 nsr_deployed, kdu_name, vnf_index
5272 )
5273 if deployed_kdu is None:
5274 raise LcmException(
5275 "KDU '{}' for vnf '{}' not deployed".format(
5276 kdu_name, vnf_index
5277 )
5278 )
5279 kdu_instance = deployed_kdu.get("kdu-instance")
5280 instance_num = await self.k8scluster_map[
5281 k8s_cluster_type
5282 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5283 kdu_replica_count = instance_num - kdu_delta.get(
5284 "number-of-instances", 1
5285 )
5286
5287 if kdu_replica_count < min_instance_count < instance_num:
5288 kdu_replica_count = min_instance_count
5289 if kdu_replica_count < min_instance_count:
5290 raise LcmException(
5291 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5292 "scaling-group-descriptor '{}'".format(
5293 instance_num, scaling_group
5294 )
5295 )
5296
5297 for x in range(kdu_delta.get("number-of-instances", 1)):
5298 vca_scaling_info.append(
5299 {
5300 "osm_kdu_id": kdu_name,
5301 "member-vnf-index": vnf_index,
5302 "type": "delete",
5303 "kdu_index": instance_num - x - 1,
5304 }
5305 )
5306 scaling_info["kdu-delete"][kdu_name].append(
5307 {
5308 "member-vnf-index": vnf_index,
5309 "type": "delete",
5310 "k8s-cluster-type": k8s_cluster_type,
5311 "resource-name": resource_name,
5312 "scale": kdu_replica_count,
5313 }
5314 )
5315
5316 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5317 vdu_delete = copy(scaling_info.get("vdu-delete"))
5318 if scaling_info["scaling_direction"] == "IN":
5319 for vdur in reversed(db_vnfr["vdur"]):
5320 if vdu_delete.get(vdur["vdu-id-ref"]):
5321 vdu_delete[vdur["vdu-id-ref"]] -= 1
5322 scaling_info["vdu"].append(
5323 {
5324 "name": vdur.get("name") or vdur.get("vdu-name"),
5325 "vdu_id": vdur["vdu-id-ref"],
5326 "interface": [],
5327 }
5328 )
5329 for interface in vdur["interfaces"]:
5330 scaling_info["vdu"][-1]["interface"].append(
5331 {
5332 "name": interface["name"],
5333 "ip_address": interface["ip-address"],
5334 "mac_address": interface.get("mac-address"),
5335 }
5336 )
5337 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5338
5339 # PRE-SCALE BEGIN
5340 step = "Executing pre-scale vnf-config-primitive"
5341 if scaling_descriptor.get("scaling-config-action"):
5342 for scaling_config_action in scaling_descriptor[
5343 "scaling-config-action"
5344 ]:
5345 if (
5346 scaling_config_action.get("trigger") == "pre-scale-in"
5347 and scaling_type == "SCALE_IN"
5348 ) or (
5349 scaling_config_action.get("trigger") == "pre-scale-out"
5350 and scaling_type == "SCALE_OUT"
5351 ):
5352 vnf_config_primitive = scaling_config_action[
5353 "vnf-config-primitive-name-ref"
5354 ]
5355 step = db_nslcmop_update[
5356 "detailed-status"
5357 ] = "executing pre-scale scaling-config-action '{}'".format(
5358 vnf_config_primitive
5359 )
5360
5361 # look for primitive
5362 for config_primitive in (
5363 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5364 ).get("config-primitive", ()):
5365 if config_primitive["name"] == vnf_config_primitive:
5366 break
5367 else:
5368 raise LcmException(
5369 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5370 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5371 "primitive".format(scaling_group, vnf_config_primitive)
5372 )
5373
5374 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5375 if db_vnfr.get("additionalParamsForVnf"):
5376 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5377
5378 scale_process = "VCA"
5379 db_nsr_update["config-status"] = "configuring pre-scaling"
5380 primitive_params = self._map_primitive_params(
5381 config_primitive, {}, vnfr_params
5382 )
5383
5384 # Pre-scale retry check: Check if this sub-operation has been executed before
5385 op_index = self._check_or_add_scale_suboperation(
5386 db_nslcmop,
5387 vnf_index,
5388 vnf_config_primitive,
5389 primitive_params,
5390 "PRE-SCALE",
5391 )
5392 if op_index == self.SUBOPERATION_STATUS_SKIP:
5393 # Skip sub-operation
5394 result = "COMPLETED"
5395 result_detail = "Done"
5396 self.logger.debug(
5397 logging_text
5398 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5399 vnf_config_primitive, result, result_detail
5400 )
5401 )
5402 else:
5403 if op_index == self.SUBOPERATION_STATUS_NEW:
5404 # New sub-operation: Get index of this sub-operation
5405 op_index = (
5406 len(db_nslcmop.get("_admin", {}).get("operations"))
5407 - 1
5408 )
5409 self.logger.debug(
5410 logging_text
5411 + "vnf_config_primitive={} New sub-operation".format(
5412 vnf_config_primitive
5413 )
5414 )
5415 else:
5416 # retry: Get registered params for this existing sub-operation
5417 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5418 op_index
5419 ]
5420 vnf_index = op.get("member_vnf_index")
5421 vnf_config_primitive = op.get("primitive")
5422 primitive_params = op.get("primitive_params")
5423 self.logger.debug(
5424 logging_text
5425 + "vnf_config_primitive={} Sub-operation retry".format(
5426 vnf_config_primitive
5427 )
5428 )
5429 # Execute the primitive, either with new (first-time) or registered (reintent) args
5430 ee_descriptor_id = config_primitive.get(
5431 "execution-environment-ref"
5432 )
5433 primitive_name = config_primitive.get(
5434 "execution-environment-primitive", vnf_config_primitive
5435 )
5436 ee_id, vca_type = self._look_for_deployed_vca(
5437 nsr_deployed["VCA"],
5438 member_vnf_index=vnf_index,
5439 vdu_id=None,
5440 vdu_count_index=None,
5441 ee_descriptor_id=ee_descriptor_id,
5442 )
5443 result, result_detail = await self._ns_execute_primitive(
5444 ee_id,
5445 primitive_name,
5446 primitive_params,
5447 vca_type=vca_type,
5448 vca_id=vca_id,
5449 )
5450 self.logger.debug(
5451 logging_text
5452 + "vnf_config_primitive={} Done with result {} {}".format(
5453 vnf_config_primitive, result, result_detail
5454 )
5455 )
5456 # Update operationState = COMPLETED | FAILED
5457 self._update_suboperation_status(
5458 db_nslcmop, op_index, result, result_detail
5459 )
5460
5461 if result == "FAILED":
5462 raise LcmException(result_detail)
5463 db_nsr_update["config-status"] = old_config_status
5464 scale_process = None
5465 # PRE-SCALE END
5466
5467 db_nsr_update[
5468 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5469 ] = nb_scale_op
5470 db_nsr_update[
5471 "_admin.scaling-group.{}.time".format(admin_scale_index)
5472 ] = time()
5473
5474 # SCALE-IN VCA - BEGIN
5475 if vca_scaling_info:
5476 step = db_nslcmop_update[
5477 "detailed-status"
5478 ] = "Deleting the execution environments"
5479 scale_process = "VCA"
5480 for vca_info in vca_scaling_info:
5481 if vca_info["type"] == "delete":
5482 member_vnf_index = str(vca_info["member-vnf-index"])
5483 self.logger.debug(
5484 logging_text + "vdu info: {}".format(vca_info)
5485 )
5486 if vca_info.get("osm_vdu_id"):
5487 vdu_id = vca_info["osm_vdu_id"]
5488 vdu_index = int(vca_info["vdu_index"])
5489 stage[
5490 1
5491 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5492 member_vnf_index, vdu_id, vdu_index
5493 )
5494 else:
5495 vdu_index = 0
5496 kdu_id = vca_info["osm_kdu_id"]
5497 stage[
5498 1
5499 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5500 member_vnf_index, kdu_id, vdu_index
5501 )
5502 stage[2] = step = "Scaling in VCA"
5503 self._write_op_status(op_id=nslcmop_id, stage=stage)
5504 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5505 config_update = db_nsr["configurationStatus"]
5506 for vca_index, vca in enumerate(vca_update):
5507 if (
5508 (vca or vca.get("ee_id"))
5509 and vca["member-vnf-index"] == member_vnf_index
5510 and vca["vdu_count_index"] == vdu_index
5511 ):
5512 if vca.get("vdu_id"):
5513 config_descriptor = get_configuration(
5514 db_vnfd, vca.get("vdu_id")
5515 )
5516 elif vca.get("kdu_name"):
5517 config_descriptor = get_configuration(
5518 db_vnfd, vca.get("kdu_name")
5519 )
5520 else:
5521 config_descriptor = get_configuration(
5522 db_vnfd, db_vnfd["id"]
5523 )
5524 operation_params = (
5525 db_nslcmop.get("operationParams") or {}
5526 )
5527 exec_terminate_primitives = not operation_params.get(
5528 "skip_terminate_primitives"
5529 ) and vca.get("needed_terminate")
5530 task = asyncio.ensure_future(
5531 asyncio.wait_for(
5532 self.destroy_N2VC(
5533 logging_text,
5534 db_nslcmop,
5535 vca,
5536 config_descriptor,
5537 vca_index,
5538 destroy_ee=True,
5539 exec_primitives=exec_terminate_primitives,
5540 scaling_in=True,
5541 vca_id=vca_id,
5542 ),
5543 timeout=self.timeout_charm_delete,
5544 )
5545 )
5546 tasks_dict_info[task] = "Terminating VCA {}".format(
5547 vca.get("ee_id")
5548 )
5549 del vca_update[vca_index]
5550 del config_update[vca_index]
5551 # wait for pending tasks of terminate primitives
5552 if tasks_dict_info:
5553 self.logger.debug(
5554 logging_text
5555 + "Waiting for tasks {}".format(
5556 list(tasks_dict_info.keys())
5557 )
5558 )
5559 error_list = await self._wait_for_tasks(
5560 logging_text,
5561 tasks_dict_info,
5562 min(
5563 self.timeout_charm_delete, self.timeout_ns_terminate
5564 ),
5565 stage,
5566 nslcmop_id,
5567 )
5568 tasks_dict_info.clear()
5569 if error_list:
5570 raise LcmException("; ".join(error_list))
5571
5572 db_vca_and_config_update = {
5573 "_admin.deployed.VCA": vca_update,
5574 "configurationStatus": config_update,
5575 }
5576 self.update_db_2(
5577 "nsrs", db_nsr["_id"], db_vca_and_config_update
5578 )
5579 scale_process = None
5580 # SCALE-IN VCA - END
5581
5582 # SCALE RO - BEGIN
5583 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5584 scale_process = "RO"
5585 if self.ro_config.get("ng"):
5586 await self._scale_ng_ro(
5587 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5588 )
5589 scaling_info.pop("vdu-create", None)
5590 scaling_info.pop("vdu-delete", None)
5591
5592 scale_process = None
5593 # SCALE RO - END
5594
5595 # SCALE KDU - BEGIN
5596 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5597 scale_process = "KDU"
5598 await self._scale_kdu(
5599 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5600 )
5601 scaling_info.pop("kdu-create", None)
5602 scaling_info.pop("kdu-delete", None)
5603
5604 scale_process = None
5605 # SCALE KDU - END
5606
5607 if db_nsr_update:
5608 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5609
5610 # SCALE-UP VCA - BEGIN
5611 if vca_scaling_info:
5612 step = db_nslcmop_update[
5613 "detailed-status"
5614 ] = "Creating new execution environments"
5615 scale_process = "VCA"
5616 for vca_info in vca_scaling_info:
5617 if vca_info["type"] == "create":
5618 member_vnf_index = str(vca_info["member-vnf-index"])
5619 self.logger.debug(
5620 logging_text + "vdu info: {}".format(vca_info)
5621 )
5622 vnfd_id = db_vnfr["vnfd-ref"]
5623 if vca_info.get("osm_vdu_id"):
5624 vdu_index = int(vca_info["vdu_index"])
5625 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5626 if db_vnfr.get("additionalParamsForVnf"):
5627 deploy_params.update(
5628 parse_yaml_strings(
5629 db_vnfr["additionalParamsForVnf"].copy()
5630 )
5631 )
5632 descriptor_config = get_configuration(
5633 db_vnfd, db_vnfd["id"]
5634 )
5635 if descriptor_config:
5636 vdu_id = None
5637 vdu_name = None
5638 kdu_name = None
5639 self._deploy_n2vc(
5640 logging_text=logging_text
5641 + "member_vnf_index={} ".format(member_vnf_index),
5642 db_nsr=db_nsr,
5643 db_vnfr=db_vnfr,
5644 nslcmop_id=nslcmop_id,
5645 nsr_id=nsr_id,
5646 nsi_id=nsi_id,
5647 vnfd_id=vnfd_id,
5648 vdu_id=vdu_id,
5649 kdu_name=kdu_name,
5650 member_vnf_index=member_vnf_index,
5651 vdu_index=vdu_index,
5652 vdu_name=vdu_name,
5653 deploy_params=deploy_params,
5654 descriptor_config=descriptor_config,
5655 base_folder=base_folder,
5656 task_instantiation_info=tasks_dict_info,
5657 stage=stage,
5658 )
5659 vdu_id = vca_info["osm_vdu_id"]
5660 vdur = find_in_list(
5661 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5662 )
5663 descriptor_config = get_configuration(db_vnfd, vdu_id)
5664 if vdur.get("additionalParams"):
5665 deploy_params_vdu = parse_yaml_strings(
5666 vdur["additionalParams"]
5667 )
5668 else:
5669 deploy_params_vdu = deploy_params
5670 deploy_params_vdu["OSM"] = get_osm_params(
5671 db_vnfr, vdu_id, vdu_count_index=vdu_index
5672 )
5673 if descriptor_config:
5674 vdu_name = None
5675 kdu_name = None
5676 stage[
5677 1
5678 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5679 member_vnf_index, vdu_id, vdu_index
5680 )
5681 stage[2] = step = "Scaling out VCA"
5682 self._write_op_status(op_id=nslcmop_id, stage=stage)
5683 self._deploy_n2vc(
5684 logging_text=logging_text
5685 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5686 member_vnf_index, vdu_id, vdu_index
5687 ),
5688 db_nsr=db_nsr,
5689 db_vnfr=db_vnfr,
5690 nslcmop_id=nslcmop_id,
5691 nsr_id=nsr_id,
5692 nsi_id=nsi_id,
5693 vnfd_id=vnfd_id,
5694 vdu_id=vdu_id,
5695 kdu_name=kdu_name,
5696 member_vnf_index=member_vnf_index,
5697 vdu_index=vdu_index,
5698 vdu_name=vdu_name,
5699 deploy_params=deploy_params_vdu,
5700 descriptor_config=descriptor_config,
5701 base_folder=base_folder,
5702 task_instantiation_info=tasks_dict_info,
5703 stage=stage,
5704 )
5705 else:
5706 kdu_name = vca_info["osm_kdu_id"]
5707 descriptor_config = get_configuration(db_vnfd, kdu_name)
5708 if descriptor_config:
5709 vdu_id = None
5710 kdu_index = int(vca_info["kdu_index"])
5711 vdu_name = None
5712 kdur = next(
5713 x
5714 for x in db_vnfr["kdur"]
5715 if x["kdu-name"] == kdu_name
5716 )
5717 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5718 if kdur.get("additionalParams"):
5719 deploy_params_kdu = parse_yaml_strings(
5720 kdur["additionalParams"]
5721 )
5722
5723 self._deploy_n2vc(
5724 logging_text=logging_text,
5725 db_nsr=db_nsr,
5726 db_vnfr=db_vnfr,
5727 nslcmop_id=nslcmop_id,
5728 nsr_id=nsr_id,
5729 nsi_id=nsi_id,
5730 vnfd_id=vnfd_id,
5731 vdu_id=vdu_id,
5732 kdu_name=kdu_name,
5733 member_vnf_index=member_vnf_index,
5734 vdu_index=kdu_index,
5735 vdu_name=vdu_name,
5736 deploy_params=deploy_params_kdu,
5737 descriptor_config=descriptor_config,
5738 base_folder=base_folder,
5739 task_instantiation_info=tasks_dict_info,
5740 stage=stage,
5741 )
5742 # SCALE-UP VCA - END
5743 scale_process = None
5744
5745 # POST-SCALE BEGIN
5746 # execute primitive service POST-SCALING
5747 step = "Executing post-scale vnf-config-primitive"
5748 if scaling_descriptor.get("scaling-config-action"):
5749 for scaling_config_action in scaling_descriptor[
5750 "scaling-config-action"
5751 ]:
5752 if (
5753 scaling_config_action.get("trigger") == "post-scale-in"
5754 and scaling_type == "SCALE_IN"
5755 ) or (
5756 scaling_config_action.get("trigger") == "post-scale-out"
5757 and scaling_type == "SCALE_OUT"
5758 ):
5759 vnf_config_primitive = scaling_config_action[
5760 "vnf-config-primitive-name-ref"
5761 ]
5762 step = db_nslcmop_update[
5763 "detailed-status"
5764 ] = "executing post-scale scaling-config-action '{}'".format(
5765 vnf_config_primitive
5766 )
5767
5768 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5769 if db_vnfr.get("additionalParamsForVnf"):
5770 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5771
5772 # look for primitive
5773 for config_primitive in (
5774 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5775 ).get("config-primitive", ()):
5776 if config_primitive["name"] == vnf_config_primitive:
5777 break
5778 else:
5779 raise LcmException(
5780 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5781 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5782 "config-primitive".format(
5783 scaling_group, vnf_config_primitive
5784 )
5785 )
5786 scale_process = "VCA"
5787 db_nsr_update["config-status"] = "configuring post-scaling"
5788 primitive_params = self._map_primitive_params(
5789 config_primitive, {}, vnfr_params
5790 )
5791
5792 # Post-scale retry check: Check if this sub-operation has been executed before
5793 op_index = self._check_or_add_scale_suboperation(
5794 db_nslcmop,
5795 vnf_index,
5796 vnf_config_primitive,
5797 primitive_params,
5798 "POST-SCALE",
5799 )
5800 if op_index == self.SUBOPERATION_STATUS_SKIP:
5801 # Skip sub-operation
5802 result = "COMPLETED"
5803 result_detail = "Done"
5804 self.logger.debug(
5805 logging_text
5806 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5807 vnf_config_primitive, result, result_detail
5808 )
5809 )
5810 else:
5811 if op_index == self.SUBOPERATION_STATUS_NEW:
5812 # New sub-operation: Get index of this sub-operation
5813 op_index = (
5814 len(db_nslcmop.get("_admin", {}).get("operations"))
5815 - 1
5816 )
5817 self.logger.debug(
5818 logging_text
5819 + "vnf_config_primitive={} New sub-operation".format(
5820 vnf_config_primitive
5821 )
5822 )
5823 else:
5824 # retry: Get registered params for this existing sub-operation
5825 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5826 op_index
5827 ]
5828 vnf_index = op.get("member_vnf_index")
5829 vnf_config_primitive = op.get("primitive")
5830 primitive_params = op.get("primitive_params")
5831 self.logger.debug(
5832 logging_text
5833 + "vnf_config_primitive={} Sub-operation retry".format(
5834 vnf_config_primitive
5835 )
5836 )
5837 # Execute the primitive, either with new (first-time) or registered (reintent) args
5838 ee_descriptor_id = config_primitive.get(
5839 "execution-environment-ref"
5840 )
5841 primitive_name = config_primitive.get(
5842 "execution-environment-primitive", vnf_config_primitive
5843 )
5844 ee_id, vca_type = self._look_for_deployed_vca(
5845 nsr_deployed["VCA"],
5846 member_vnf_index=vnf_index,
5847 vdu_id=None,
5848 vdu_count_index=None,
5849 ee_descriptor_id=ee_descriptor_id,
5850 )
5851 result, result_detail = await self._ns_execute_primitive(
5852 ee_id,
5853 primitive_name,
5854 primitive_params,
5855 vca_type=vca_type,
5856 vca_id=vca_id,
5857 )
5858 self.logger.debug(
5859 logging_text
5860 + "vnf_config_primitive={} Done with result {} {}".format(
5861 vnf_config_primitive, result, result_detail
5862 )
5863 )
5864 # Update operationState = COMPLETED | FAILED
5865 self._update_suboperation_status(
5866 db_nslcmop, op_index, result, result_detail
5867 )
5868
5869 if result == "FAILED":
5870 raise LcmException(result_detail)
5871 db_nsr_update["config-status"] = old_config_status
5872 scale_process = None
5873 # POST-SCALE END
5874
5875 db_nsr_update[
5876 "detailed-status"
5877 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5878 db_nsr_update["operational-status"] = (
5879 "running"
5880 if old_operational_status == "failed"
5881 else old_operational_status
5882 )
5883 db_nsr_update["config-status"] = old_config_status
5884 return
5885 except (
5886 ROclient.ROClientException,
5887 DbException,
5888 LcmException,
5889 NgRoException,
5890 ) as e:
5891 self.logger.error(logging_text + "Exit Exception {}".format(e))
5892 exc = e
5893 except asyncio.CancelledError:
5894 self.logger.error(
5895 logging_text + "Cancelled Exception while '{}'".format(step)
5896 )
5897 exc = "Operation was cancelled"
5898 except Exception as e:
5899 exc = traceback.format_exc()
5900 self.logger.critical(
5901 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5902 exc_info=True,
5903 )
5904 finally:
5905 self._write_ns_status(
5906 nsr_id=nsr_id,
5907 ns_state=None,
5908 current_operation="IDLE",
5909 current_operation_id=None,
5910 )
5911 if tasks_dict_info:
5912 stage[1] = "Waiting for instantiate pending tasks."
5913 self.logger.debug(logging_text + stage[1])
5914 exc = await self._wait_for_tasks(
5915 logging_text,
5916 tasks_dict_info,
5917 self.timeout_ns_deploy,
5918 stage,
5919 nslcmop_id,
5920 nsr_id=nsr_id,
5921 )
5922 if exc:
5923 db_nslcmop_update[
5924 "detailed-status"
5925 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5926 nslcmop_operation_state = "FAILED"
5927 if db_nsr:
5928 db_nsr_update["operational-status"] = old_operational_status
5929 db_nsr_update["config-status"] = old_config_status
5930 db_nsr_update["detailed-status"] = ""
5931 if scale_process:
5932 if "VCA" in scale_process:
5933 db_nsr_update["config-status"] = "failed"
5934 if "RO" in scale_process:
5935 db_nsr_update["operational-status"] = "failed"
5936 db_nsr_update[
5937 "detailed-status"
5938 ] = "FAILED scaling nslcmop={} {}: {}".format(
5939 nslcmop_id, step, exc
5940 )
5941 else:
5942 error_description_nslcmop = None
5943 nslcmop_operation_state = "COMPLETED"
5944 db_nslcmop_update["detailed-status"] = "Done"
5945
5946 self._write_op_status(
5947 op_id=nslcmop_id,
5948 stage="",
5949 error_message=error_description_nslcmop,
5950 operation_state=nslcmop_operation_state,
5951 other_update=db_nslcmop_update,
5952 )
5953 if db_nsr:
5954 self._write_ns_status(
5955 nsr_id=nsr_id,
5956 ns_state=None,
5957 current_operation="IDLE",
5958 current_operation_id=None,
5959 other_update=db_nsr_update,
5960 )
5961
5962 if nslcmop_operation_state:
5963 try:
5964 msg = {
5965 "nsr_id": nsr_id,
5966 "nslcmop_id": nslcmop_id,
5967 "operationState": nslcmop_operation_state,
5968 }
5969 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5970 except Exception as e:
5971 self.logger.error(
5972 logging_text + "kafka_write notification Exception {}".format(e)
5973 )
5974 self.logger.debug(logging_text + "Exit")
5975 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5976
5977 async def _scale_kdu(
5978 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5979 ):
5980 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5981 for kdu_name in _scaling_info:
5982 for kdu_scaling_info in _scaling_info[kdu_name]:
5983 deployed_kdu, index = get_deployed_kdu(
5984 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5985 )
5986 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5987 kdu_instance = deployed_kdu["kdu-instance"]
5988 scale = int(kdu_scaling_info["scale"])
5989 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5990
5991 db_dict = {
5992 "collection": "nsrs",
5993 "filter": {"_id": nsr_id},
5994 "path": "_admin.deployed.K8s.{}".format(index),
5995 }
5996
5997 step = "scaling application {}".format(
5998 kdu_scaling_info["resource-name"]
5999 )
6000 self.logger.debug(logging_text + step)
6001
6002 if kdu_scaling_info["type"] == "delete":
6003 kdu_config = get_configuration(db_vnfd, kdu_name)
6004 if (
6005 kdu_config
6006 and kdu_config.get("terminate-config-primitive")
6007 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6008 ):
6009 terminate_config_primitive_list = kdu_config.get(
6010 "terminate-config-primitive"
6011 )
6012 terminate_config_primitive_list.sort(
6013 key=lambda val: int(val["seq"])
6014 )
6015
6016 for (
6017 terminate_config_primitive
6018 ) in terminate_config_primitive_list:
6019 primitive_params_ = self._map_primitive_params(
6020 terminate_config_primitive, {}, {}
6021 )
6022 step = "execute terminate config primitive"
6023 self.logger.debug(logging_text + step)
6024 await asyncio.wait_for(
6025 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6026 cluster_uuid=cluster_uuid,
6027 kdu_instance=kdu_instance,
6028 primitive_name=terminate_config_primitive["name"],
6029 params=primitive_params_,
6030 db_dict=db_dict,
6031 vca_id=vca_id,
6032 ),
6033 timeout=600,
6034 )
6035
6036 await asyncio.wait_for(
6037 self.k8scluster_map[k8s_cluster_type].scale(
6038 kdu_instance,
6039 scale,
6040 kdu_scaling_info["resource-name"],
6041 vca_id=vca_id,
6042 ),
6043 timeout=self.timeout_vca_on_error,
6044 )
6045
6046 if kdu_scaling_info["type"] == "create":
6047 kdu_config = get_configuration(db_vnfd, kdu_name)
6048 if (
6049 kdu_config
6050 and kdu_config.get("initial-config-primitive")
6051 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6052 ):
6053 initial_config_primitive_list = kdu_config.get(
6054 "initial-config-primitive"
6055 )
6056 initial_config_primitive_list.sort(
6057 key=lambda val: int(val["seq"])
6058 )
6059
6060 for initial_config_primitive in initial_config_primitive_list:
6061 primitive_params_ = self._map_primitive_params(
6062 initial_config_primitive, {}, {}
6063 )
6064 step = "execute initial config primitive"
6065 self.logger.debug(logging_text + step)
6066 await asyncio.wait_for(
6067 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6068 cluster_uuid=cluster_uuid,
6069 kdu_instance=kdu_instance,
6070 primitive_name=initial_config_primitive["name"],
6071 params=primitive_params_,
6072 db_dict=db_dict,
6073 vca_id=vca_id,
6074 ),
6075 timeout=600,
6076 )
6077
6078 async def _scale_ng_ro(
6079 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6080 ):
6081 nsr_id = db_nslcmop["nsInstanceId"]
6082 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6083 db_vnfrs = {}
6084
6085 # read from db: vnfd's for every vnf
6086 db_vnfds = []
6087
6088 # for each vnf in ns, read vnfd
6089 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6090 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6091 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6092 # if we haven't this vnfd, read it from db
6093 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6094 # read from db
6095 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6096 db_vnfds.append(vnfd)
6097 n2vc_key = self.n2vc.get_public_key()
6098 n2vc_key_list = [n2vc_key]
6099 self.scale_vnfr(
6100 db_vnfr,
6101 vdu_scaling_info.get("vdu-create"),
6102 vdu_scaling_info.get("vdu-delete"),
6103 mark_delete=True,
6104 )
6105 # db_vnfr has been updated, update db_vnfrs to use it
6106 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6107 await self._instantiate_ng_ro(
6108 logging_text,
6109 nsr_id,
6110 db_nsd,
6111 db_nsr,
6112 db_nslcmop,
6113 db_vnfrs,
6114 db_vnfds,
6115 n2vc_key_list,
6116 stage=stage,
6117 start_deploy=time(),
6118 timeout_ns_deploy=self.timeout_ns_deploy,
6119 )
6120 if vdu_scaling_info.get("vdu-delete"):
6121 self.scale_vnfr(
6122 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6123 )
6124
6125 async def add_prometheus_metrics(
6126 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6127 ):
6128 if not self.prometheus:
6129 return
6130 # look if exist a file called 'prometheus*.j2' and
6131 artifact_content = self.fs.dir_ls(artifact_path)
6132 job_file = next(
6133 (
6134 f
6135 for f in artifact_content
6136 if f.startswith("prometheus") and f.endswith(".j2")
6137 ),
6138 None,
6139 )
6140 if not job_file:
6141 return
6142 with self.fs.file_open((artifact_path, job_file), "r") as f:
6143 job_data = f.read()
6144
6145 # TODO get_service
6146 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6147 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6148 host_port = "80"
6149 vnfr_id = vnfr_id.replace("-", "")
6150 variables = {
6151 "JOB_NAME": vnfr_id,
6152 "TARGET_IP": target_ip,
6153 "EXPORTER_POD_IP": host_name,
6154 "EXPORTER_POD_PORT": host_port,
6155 }
6156 job_list = self.prometheus.parse_job(job_data, variables)
6157 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6158 for job in job_list:
6159 if (
6160 not isinstance(job.get("job_name"), str)
6161 or vnfr_id not in job["job_name"]
6162 ):
6163 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6164 job["nsr_id"] = nsr_id
6165 job_dict = {jl["job_name"]: jl for jl in job_list}
6166 if await self.prometheus.update(job_dict):
6167 return list(job_dict.keys())
6168
6169 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6170 """
6171 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6172
6173 :param: vim_account_id: VIM Account ID
6174
6175 :return: (cloud_name, cloud_credential)
6176 """
6177 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6178 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6179
6180 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6181 """
6182 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6183
6184 :param: vim_account_id: VIM Account ID
6185
6186 :return: (cloud_name, cloud_credential)
6187 """
6188 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6189 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")