Fix 1533 (nscharms): read juju from descriptor
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import (
26 Environment,
27 TemplateError,
28 TemplateNotFound,
29 StrictUndefined,
30 UndefinedError,
31 )
32
33 from osm_lcm import ROclient
34 from osm_lcm.data_utils.nsr import get_deployed_kdu
35 from osm_lcm.ng_ro import NgRoClient, NgRoException
36 from osm_lcm.lcm_utils import (
37 LcmException,
38 LcmExceptionNoMgmtIP,
39 LcmBase,
40 deep_get,
41 get_iterable,
42 populate_dict,
43 )
44 from osm_lcm.data_utils.nsd import get_vnf_profiles
45 from osm_lcm.data_utils.vnfd import (
46 get_vdu_list,
47 get_vdu_profile,
48 get_ee_sorted_initial_config_primitive_list,
49 get_ee_sorted_terminate_config_primitive_list,
50 get_kdu_list,
51 get_virtual_link_profiles,
52 get_vdu,
53 get_configuration,
54 get_vdu_index,
55 get_scaling_aspect,
56 get_number_of_instances,
57 get_juju_ee_ref,
58 get_kdu_profile,
59 )
60 from osm_lcm.data_utils.list_utils import find_in_list
61 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
62 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
63 from osm_lcm.data_utils.database.vim_account import VimAccountDB
64 from n2vc.k8s_helm_conn import K8sHelmConnector
65 from n2vc.k8s_helm3_conn import K8sHelm3Connector
66 from n2vc.k8s_juju_conn import K8sJujuConnector
67
68 from osm_common.dbbase import DbException
69 from osm_common.fsbase import FsException
70
71 from osm_lcm.data_utils.database.database import Database
72 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
73
74 from n2vc.n2vc_juju_conn import N2VCJujuConnector
75 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
76
77 from osm_lcm.lcm_helm_conn import LCMHelmConn
78
79 from copy import copy, deepcopy
80 from time import time
81 from uuid import uuid4
82
83 from random import randint
84
85 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
86
87
88 class NsLcm(LcmBase):
89 timeout_vca_on_error = (
90 5 * 60
91 ) # Time for charm from first time at blocked,error status to mark as failed
92 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
93 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
94 timeout_charm_delete = 10 * 60
95 timeout_primitive = 30 * 60 # timeout for primitive execution
96 timeout_progress_primitive = (
97 10 * 60
98 ) # timeout for some progress in a primitive execution
99
100 SUBOPERATION_STATUS_NOT_FOUND = -1
101 SUBOPERATION_STATUS_NEW = -2
102 SUBOPERATION_STATUS_SKIP = -3
103 task_name_deploy_vca = "Deploying VCA"
104
105 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
106 """
107 Init, Connect to database, filesystem storage, and messaging
108 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
109 :return: None
110 """
111 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
112
113 self.db = Database().instance.db
114 self.fs = Filesystem().instance.fs
115 self.loop = loop
116 self.lcm_tasks = lcm_tasks
117 self.timeout = config["timeout"]
118 self.ro_config = config["ro_config"]
119 self.ng_ro = config["ro_config"].get("ng")
120 self.vca_config = config["VCA"].copy()
121
122 # create N2VC connector
123 self.n2vc = N2VCJujuConnector(
124 log=self.logger,
125 loop=self.loop,
126 on_update_db=self._on_update_n2vc_db,
127 fs=self.fs,
128 db=self.db,
129 )
130
131 self.conn_helm_ee = LCMHelmConn(
132 log=self.logger,
133 loop=self.loop,
134 vca_config=self.vca_config,
135 on_update_db=self._on_update_n2vc_db,
136 )
137
138 self.k8sclusterhelm2 = K8sHelmConnector(
139 kubectl_command=self.vca_config.get("kubectlpath"),
140 helm_command=self.vca_config.get("helmpath"),
141 log=self.logger,
142 on_update_db=None,
143 fs=self.fs,
144 db=self.db,
145 )
146
147 self.k8sclusterhelm3 = K8sHelm3Connector(
148 kubectl_command=self.vca_config.get("kubectlpath"),
149 helm_command=self.vca_config.get("helm3path"),
150 fs=self.fs,
151 log=self.logger,
152 db=self.db,
153 on_update_db=None,
154 )
155
156 self.k8sclusterjuju = K8sJujuConnector(
157 kubectl_command=self.vca_config.get("kubectlpath"),
158 juju_command=self.vca_config.get("jujupath"),
159 log=self.logger,
160 loop=self.loop,
161 on_update_db=self._on_update_k8s_db,
162 fs=self.fs,
163 db=self.db,
164 )
165
166 self.k8scluster_map = {
167 "helm-chart": self.k8sclusterhelm2,
168 "helm-chart-v3": self.k8sclusterhelm3,
169 "chart": self.k8sclusterhelm3,
170 "juju-bundle": self.k8sclusterjuju,
171 "juju": self.k8sclusterjuju,
172 }
173
174 self.vca_map = {
175 "lxc_proxy_charm": self.n2vc,
176 "native_charm": self.n2vc,
177 "k8s_proxy_charm": self.n2vc,
178 "helm": self.conn_helm_ee,
179 "helm-v3": self.conn_helm_ee,
180 }
181
182 self.prometheus = prometheus
183
184 # create RO client
185 self.RO = NgRoClient(self.loop, **self.ro_config)
186
187 @staticmethod
188 def increment_ip_mac(ip_mac, vm_index=1):
189 if not isinstance(ip_mac, str):
190 return ip_mac
191 try:
192 # try with ipv4 look for last dot
193 i = ip_mac.rfind(".")
194 if i > 0:
195 i += 1
196 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
197 # try with ipv6 or mac look for last colon. Operate in hex
198 i = ip_mac.rfind(":")
199 if i > 0:
200 i += 1
201 # format in hex, len can be 2 for mac or 4 for ipv6
202 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
203 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
204 )
205 except Exception:
206 pass
207 return None
208
209 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
210
211 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
212
213 try:
214 # TODO filter RO descriptor fields...
215
216 # write to database
217 db_dict = dict()
218 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
219 db_dict["deploymentStatus"] = ro_descriptor
220 self.update_db_2("nsrs", nsrs_id, db_dict)
221
222 except Exception as e:
223 self.logger.warn(
224 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
225 )
226
227 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
228
229 # remove last dot from path (if exists)
230 if path.endswith("."):
231 path = path[:-1]
232
233 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
234 # .format(table, filter, path, updated_data))
235 try:
236
237 nsr_id = filter.get("_id")
238
239 # read ns record from database
240 nsr = self.db.get_one(table="nsrs", q_filter=filter)
241 current_ns_status = nsr.get("nsState")
242
243 # get vca status for NS
244 status_dict = await self.n2vc.get_status(
245 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
246 )
247
248 # vcaStatus
249 db_dict = dict()
250 db_dict["vcaStatus"] = status_dict
251 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
252
253 # update configurationStatus for this VCA
254 try:
255 vca_index = int(path[path.rfind(".") + 1 :])
256
257 vca_list = deep_get(
258 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
259 )
260 vca_status = vca_list[vca_index].get("status")
261
262 configuration_status_list = nsr.get("configurationStatus")
263 config_status = configuration_status_list[vca_index].get("status")
264
265 if config_status == "BROKEN" and vca_status != "failed":
266 db_dict["configurationStatus"][vca_index] = "READY"
267 elif config_status != "BROKEN" and vca_status == "failed":
268 db_dict["configurationStatus"][vca_index] = "BROKEN"
269 except Exception as e:
270 # not update configurationStatus
271 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
272
273 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
274 # if nsState = 'DEGRADED' check if all is OK
275 is_degraded = False
276 if current_ns_status in ("READY", "DEGRADED"):
277 error_description = ""
278 # check machines
279 if status_dict.get("machines"):
280 for machine_id in status_dict.get("machines"):
281 machine = status_dict.get("machines").get(machine_id)
282 # check machine agent-status
283 if machine.get("agent-status"):
284 s = machine.get("agent-status").get("status")
285 if s != "started":
286 is_degraded = True
287 error_description += (
288 "machine {} agent-status={} ; ".format(
289 machine_id, s
290 )
291 )
292 # check machine instance status
293 if machine.get("instance-status"):
294 s = machine.get("instance-status").get("status")
295 if s != "running":
296 is_degraded = True
297 error_description += (
298 "machine {} instance-status={} ; ".format(
299 machine_id, s
300 )
301 )
302 # check applications
303 if status_dict.get("applications"):
304 for app_id in status_dict.get("applications"):
305 app = status_dict.get("applications").get(app_id)
306 # check application status
307 if app.get("status"):
308 s = app.get("status").get("status")
309 if s != "active":
310 is_degraded = True
311 error_description += (
312 "application {} status={} ; ".format(app_id, s)
313 )
314
315 if error_description:
316 db_dict["errorDescription"] = error_description
317 if current_ns_status == "READY" and is_degraded:
318 db_dict["nsState"] = "DEGRADED"
319 if current_ns_status == "DEGRADED" and not is_degraded:
320 db_dict["nsState"] = "READY"
321
322 # write to database
323 self.update_db_2("nsrs", nsr_id, db_dict)
324
325 except (asyncio.CancelledError, asyncio.TimeoutError):
326 raise
327 except Exception as e:
328 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
329
330 async def _on_update_k8s_db(
331 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
332 ):
333 """
334 Updating vca status in NSR record
335 :param cluster_uuid: UUID of a k8s cluster
336 :param kdu_instance: The unique name of the KDU instance
337 :param filter: To get nsr_id
338 :return: none
339 """
340
341 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
342 # .format(cluster_uuid, kdu_instance, filter))
343
344 try:
345 nsr_id = filter.get("_id")
346
347 # get vca status for NS
348 vca_status = await self.k8sclusterjuju.status_kdu(
349 cluster_uuid,
350 kdu_instance,
351 complete_status=True,
352 yaml_format=False,
353 vca_id=vca_id,
354 )
355 # vcaStatus
356 db_dict = dict()
357 db_dict["vcaStatus"] = {nsr_id: vca_status}
358
359 await self.k8sclusterjuju.update_vca_status(
360 db_dict["vcaStatus"],
361 kdu_instance,
362 vca_id=vca_id,
363 )
364
365 # write to database
366 self.update_db_2("nsrs", nsr_id, db_dict)
367
368 except (asyncio.CancelledError, asyncio.TimeoutError):
369 raise
370 except Exception as e:
371 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
372
373 @staticmethod
374 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
375 try:
376 env = Environment(undefined=StrictUndefined)
377 template = env.from_string(cloud_init_text)
378 return template.render(additional_params or {})
379 except UndefinedError as e:
380 raise LcmException(
381 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
382 "file, must be provided in the instantiation parameters inside the "
383 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
384 )
385 except (TemplateError, TemplateNotFound) as e:
386 raise LcmException(
387 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
388 vnfd_id, vdu_id, e
389 )
390 )
391
392 def _get_vdu_cloud_init_content(self, vdu, vnfd):
393 cloud_init_content = cloud_init_file = None
394 try:
395 if vdu.get("cloud-init-file"):
396 base_folder = vnfd["_admin"]["storage"]
397 cloud_init_file = "{}/{}/cloud_init/{}".format(
398 base_folder["folder"],
399 base_folder["pkg-dir"],
400 vdu["cloud-init-file"],
401 )
402 with self.fs.file_open(cloud_init_file, "r") as ci_file:
403 cloud_init_content = ci_file.read()
404 elif vdu.get("cloud-init"):
405 cloud_init_content = vdu["cloud-init"]
406
407 return cloud_init_content
408 except FsException as e:
409 raise LcmException(
410 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
411 vnfd["id"], vdu["id"], cloud_init_file, e
412 )
413 )
414
415 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
416 vdur = next(
417 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
418 )
419 additional_params = vdur.get("additionalParams")
420 return parse_yaml_strings(additional_params)
421
422 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
423 """
424 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
425 :param vnfd: input vnfd
426 :param new_id: overrides vnf id if provided
427 :param additionalParams: Instantiation params for VNFs provided
428 :param nsrId: Id of the NSR
429 :return: copy of vnfd
430 """
431 vnfd_RO = deepcopy(vnfd)
432 # remove unused by RO configuration, monitoring, scaling and internal keys
433 vnfd_RO.pop("_id", None)
434 vnfd_RO.pop("_admin", None)
435 vnfd_RO.pop("monitoring-param", None)
436 vnfd_RO.pop("scaling-group-descriptor", None)
437 vnfd_RO.pop("kdu", None)
438 vnfd_RO.pop("k8s-cluster", None)
439 if new_id:
440 vnfd_RO["id"] = new_id
441
442 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
443 for vdu in get_iterable(vnfd_RO, "vdu"):
444 vdu.pop("cloud-init-file", None)
445 vdu.pop("cloud-init", None)
446 return vnfd_RO
447
448 @staticmethod
449 def ip_profile_2_RO(ip_profile):
450 RO_ip_profile = deepcopy(ip_profile)
451 if "dns-server" in RO_ip_profile:
452 if isinstance(RO_ip_profile["dns-server"], list):
453 RO_ip_profile["dns-address"] = []
454 for ds in RO_ip_profile.pop("dns-server"):
455 RO_ip_profile["dns-address"].append(ds["address"])
456 else:
457 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
458 if RO_ip_profile.get("ip-version") == "ipv4":
459 RO_ip_profile["ip-version"] = "IPv4"
460 if RO_ip_profile.get("ip-version") == "ipv6":
461 RO_ip_profile["ip-version"] = "IPv6"
462 if "dhcp-params" in RO_ip_profile:
463 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
464 return RO_ip_profile
465
466 def _get_ro_vim_id_for_vim_account(self, vim_account):
467 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
468 if db_vim["_admin"]["operationalState"] != "ENABLED":
469 raise LcmException(
470 "VIM={} is not available. operationalState={}".format(
471 vim_account, db_vim["_admin"]["operationalState"]
472 )
473 )
474 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
475 return RO_vim_id
476
477 def get_ro_wim_id_for_wim_account(self, wim_account):
478 if isinstance(wim_account, str):
479 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
480 if db_wim["_admin"]["operationalState"] != "ENABLED":
481 raise LcmException(
482 "WIM={} is not available. operationalState={}".format(
483 wim_account, db_wim["_admin"]["operationalState"]
484 )
485 )
486 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
487 return RO_wim_id
488 else:
489 return wim_account
490
491 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
492
493 db_vdu_push_list = []
494 db_update = {"_admin.modified": time()}
495 if vdu_create:
496 for vdu_id, vdu_count in vdu_create.items():
497 vdur = next(
498 (
499 vdur
500 for vdur in reversed(db_vnfr["vdur"])
501 if vdur["vdu-id-ref"] == vdu_id
502 ),
503 None,
504 )
505 if not vdur:
506 raise LcmException(
507 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
508 vdu_id
509 )
510 )
511
512 for count in range(vdu_count):
513 vdur_copy = deepcopy(vdur)
514 vdur_copy["status"] = "BUILD"
515 vdur_copy["status-detailed"] = None
516 vdur_copy["ip-address"]: None
517 vdur_copy["_id"] = str(uuid4())
518 vdur_copy["count-index"] += count + 1
519 vdur_copy["id"] = "{}-{}".format(
520 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
521 )
522 vdur_copy.pop("vim_info", None)
523 for iface in vdur_copy["interfaces"]:
524 if iface.get("fixed-ip"):
525 iface["ip-address"] = self.increment_ip_mac(
526 iface["ip-address"], count + 1
527 )
528 else:
529 iface.pop("ip-address", None)
530 if iface.get("fixed-mac"):
531 iface["mac-address"] = self.increment_ip_mac(
532 iface["mac-address"], count + 1
533 )
534 else:
535 iface.pop("mac-address", None)
536 iface.pop(
537 "mgmt_vnf", None
538 ) # only first vdu can be managment of vnf
539 db_vdu_push_list.append(vdur_copy)
540 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
541 if vdu_delete:
542 for vdu_id, vdu_count in vdu_delete.items():
543 if mark_delete:
544 indexes_to_delete = [
545 iv[0]
546 for iv in enumerate(db_vnfr["vdur"])
547 if iv[1]["vdu-id-ref"] == vdu_id
548 ]
549 db_update.update(
550 {
551 "vdur.{}.status".format(i): "DELETING"
552 for i in indexes_to_delete[-vdu_count:]
553 }
554 )
555 else:
556 # it must be deleted one by one because common.db does not allow otherwise
557 vdus_to_delete = [
558 v
559 for v in reversed(db_vnfr["vdur"])
560 if v["vdu-id-ref"] == vdu_id
561 ]
562 for vdu in vdus_to_delete[:vdu_count]:
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur": {"_id": vdu["_id"]}},
568 )
569 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
570 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
571 # modify passed dictionary db_vnfr
572 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
573 db_vnfr["vdur"] = db_vnfr_["vdur"]
574
575 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
576 """
577 Updates database nsr with the RO info for the created vld
578 :param ns_update_nsr: dictionary to be filled with the updated info
579 :param db_nsr: content of db_nsr. This is also modified
580 :param nsr_desc_RO: nsr descriptor from RO
581 :return: Nothing, LcmException is raised on errors
582 """
583
584 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
585 for net_RO in get_iterable(nsr_desc_RO, "nets"):
586 if vld["id"] != net_RO.get("ns_net_osm_id"):
587 continue
588 vld["vim-id"] = net_RO.get("vim_net_id")
589 vld["name"] = net_RO.get("vim_name")
590 vld["status"] = net_RO.get("status")
591 vld["status-detailed"] = net_RO.get("error_msg")
592 ns_update_nsr["vld.{}".format(vld_index)] = vld
593 break
594 else:
595 raise LcmException(
596 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
597 )
598
599 def set_vnfr_at_error(self, db_vnfrs, error_text):
600 try:
601 for db_vnfr in db_vnfrs.values():
602 vnfr_update = {"status": "ERROR"}
603 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
604 if "status" not in vdur:
605 vdur["status"] = "ERROR"
606 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
607 if error_text:
608 vdur["status-detailed"] = str(error_text)
609 vnfr_update[
610 "vdur.{}.status-detailed".format(vdu_index)
611 ] = "ERROR"
612 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
613 except DbException as e:
614 self.logger.error("Cannot update vnf. {}".format(e))
615
616 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
617 """
618 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
619 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
620 :param nsr_desc_RO: nsr descriptor from RO
621 :return: Nothing, LcmException is raised on errors
622 """
623 for vnf_index, db_vnfr in db_vnfrs.items():
624 for vnf_RO in nsr_desc_RO["vnfs"]:
625 if vnf_RO["member_vnf_index"] != vnf_index:
626 continue
627 vnfr_update = {}
628 if vnf_RO.get("ip_address"):
629 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
630 "ip_address"
631 ].split(";")[0]
632 elif not db_vnfr.get("ip-address"):
633 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
634 raise LcmExceptionNoMgmtIP(
635 "ns member_vnf_index '{}' has no IP address".format(
636 vnf_index
637 )
638 )
639
640 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
641 vdur_RO_count_index = 0
642 if vdur.get("pdu-type"):
643 continue
644 for vdur_RO in get_iterable(vnf_RO, "vms"):
645 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
646 continue
647 if vdur["count-index"] != vdur_RO_count_index:
648 vdur_RO_count_index += 1
649 continue
650 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
651 if vdur_RO.get("ip_address"):
652 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
653 else:
654 vdur["ip-address"] = None
655 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
656 vdur["name"] = vdur_RO.get("vim_name")
657 vdur["status"] = vdur_RO.get("status")
658 vdur["status-detailed"] = vdur_RO.get("error_msg")
659 for ifacer in get_iterable(vdur, "interfaces"):
660 for interface_RO in get_iterable(vdur_RO, "interfaces"):
661 if ifacer["name"] == interface_RO.get("internal_name"):
662 ifacer["ip-address"] = interface_RO.get(
663 "ip_address"
664 )
665 ifacer["mac-address"] = interface_RO.get(
666 "mac_address"
667 )
668 break
669 else:
670 raise LcmException(
671 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
672 "from VIM info".format(
673 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
674 )
675 )
676 vnfr_update["vdur.{}".format(vdu_index)] = vdur
677 break
678 else:
679 raise LcmException(
680 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
681 "VIM info".format(
682 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
683 )
684 )
685
686 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
687 for net_RO in get_iterable(nsr_desc_RO, "nets"):
688 if vld["id"] != net_RO.get("vnf_net_osm_id"):
689 continue
690 vld["vim-id"] = net_RO.get("vim_net_id")
691 vld["name"] = net_RO.get("vim_name")
692 vld["status"] = net_RO.get("status")
693 vld["status-detailed"] = net_RO.get("error_msg")
694 vnfr_update["vld.{}".format(vld_index)] = vld
695 break
696 else:
697 raise LcmException(
698 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
699 vnf_index, vld["id"]
700 )
701 )
702
703 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
704 break
705
706 else:
707 raise LcmException(
708 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
709 vnf_index
710 )
711 )
712
713 def _get_ns_config_info(self, nsr_id):
714 """
715 Generates a mapping between vnf,vdu elements and the N2VC id
716 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
717 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
718 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
719 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
720 """
721 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
722 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
723 mapping = {}
724 ns_config_info = {"osm-config-mapping": mapping}
725 for vca in vca_deployed_list:
726 if not vca["member-vnf-index"]:
727 continue
728 if not vca["vdu_id"]:
729 mapping[vca["member-vnf-index"]] = vca["application"]
730 else:
731 mapping[
732 "{}.{}.{}".format(
733 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
734 )
735 ] = vca["application"]
736 return ns_config_info
737
738 async def _instantiate_ng_ro(
739 self,
740 logging_text,
741 nsr_id,
742 nsd,
743 db_nsr,
744 db_nslcmop,
745 db_vnfrs,
746 db_vnfds,
747 n2vc_key_list,
748 stage,
749 start_deploy,
750 timeout_ns_deploy,
751 ):
752
753 db_vims = {}
754
755 def get_vim_account(vim_account_id):
756 nonlocal db_vims
757 if vim_account_id in db_vims:
758 return db_vims[vim_account_id]
759 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
760 db_vims[vim_account_id] = db_vim
761 return db_vim
762
763 # modify target_vld info with instantiation parameters
764 def parse_vld_instantiation_params(
765 target_vim, target_vld, vld_params, target_sdn
766 ):
767 if vld_params.get("ip-profile"):
768 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
769 "ip-profile"
770 ]
771 if vld_params.get("provider-network"):
772 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
773 "provider-network"
774 ]
775 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
776 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
777 "provider-network"
778 ]["sdn-ports"]
779 if vld_params.get("wimAccountId"):
780 target_wim = "wim:{}".format(vld_params["wimAccountId"])
781 target_vld["vim_info"][target_wim] = {}
782 for param in ("vim-network-name", "vim-network-id"):
783 if vld_params.get(param):
784 if isinstance(vld_params[param], dict):
785 for vim, vim_net in vld_params[param].items():
786 other_target_vim = "vim:" + vim
787 populate_dict(
788 target_vld["vim_info"],
789 (other_target_vim, param.replace("-", "_")),
790 vim_net,
791 )
792 else: # isinstance str
793 target_vld["vim_info"][target_vim][
794 param.replace("-", "_")
795 ] = vld_params[param]
796 if vld_params.get("common_id"):
797 target_vld["common_id"] = vld_params.get("common_id")
798
799 nslcmop_id = db_nslcmop["_id"]
800 target = {
801 "name": db_nsr["name"],
802 "ns": {"vld": []},
803 "vnf": [],
804 "image": deepcopy(db_nsr["image"]),
805 "flavor": deepcopy(db_nsr["flavor"]),
806 "action_id": nslcmop_id,
807 "cloud_init_content": {},
808 }
809 for image in target["image"]:
810 image["vim_info"] = {}
811 for flavor in target["flavor"]:
812 flavor["vim_info"] = {}
813
814 if db_nslcmop.get("lcmOperationType") != "instantiate":
815 # get parameters of instantiation:
816 db_nslcmop_instantiate = self.db.get_list(
817 "nslcmops",
818 {
819 "nsInstanceId": db_nslcmop["nsInstanceId"],
820 "lcmOperationType": "instantiate",
821 },
822 )[-1]
823 ns_params = db_nslcmop_instantiate.get("operationParams")
824 else:
825 ns_params = db_nslcmop.get("operationParams")
826 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
827 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
828
829 cp2target = {}
830 for vld_index, vld in enumerate(db_nsr.get("vld")):
831 target_vim = "vim:{}".format(ns_params["vimAccountId"])
832 target_vld = {
833 "id": vld["id"],
834 "name": vld["name"],
835 "mgmt-network": vld.get("mgmt-network", False),
836 "type": vld.get("type"),
837 "vim_info": {
838 target_vim: {
839 "vim_network_name": vld.get("vim-network-name"),
840 "vim_account_id": ns_params["vimAccountId"],
841 }
842 },
843 }
844 # check if this network needs SDN assist
845 if vld.get("pci-interfaces"):
846 db_vim = get_vim_account(ns_params["vimAccountId"])
847 sdnc_id = db_vim["config"].get("sdn-controller")
848 if sdnc_id:
849 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
850 target_sdn = "sdn:{}".format(sdnc_id)
851 target_vld["vim_info"][target_sdn] = {
852 "sdn": True,
853 "target_vim": target_vim,
854 "vlds": [sdn_vld],
855 "type": vld.get("type"),
856 }
857
858 nsd_vnf_profiles = get_vnf_profiles(nsd)
859 for nsd_vnf_profile in nsd_vnf_profiles:
860 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
861 if cp["virtual-link-profile-id"] == vld["id"]:
862 cp2target[
863 "member_vnf:{}.{}".format(
864 cp["constituent-cpd-id"][0][
865 "constituent-base-element-id"
866 ],
867 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
868 )
869 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
870
871 # check at nsd descriptor, if there is an ip-profile
872 vld_params = {}
873 nsd_vlp = find_in_list(
874 get_virtual_link_profiles(nsd),
875 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
876 == vld["id"],
877 )
878 if (
879 nsd_vlp
880 and nsd_vlp.get("virtual-link-protocol-data")
881 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
882 ):
883 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
884 "l3-protocol-data"
885 ]
886 ip_profile_dest_data = {}
887 if "ip-version" in ip_profile_source_data:
888 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
889 "ip-version"
890 ]
891 if "cidr" in ip_profile_source_data:
892 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
893 "cidr"
894 ]
895 if "gateway-ip" in ip_profile_source_data:
896 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
897 "gateway-ip"
898 ]
899 if "dhcp-enabled" in ip_profile_source_data:
900 ip_profile_dest_data["dhcp-params"] = {
901 "enabled": ip_profile_source_data["dhcp-enabled"]
902 }
903 vld_params["ip-profile"] = ip_profile_dest_data
904
905 # update vld_params with instantiation params
906 vld_instantiation_params = find_in_list(
907 get_iterable(ns_params, "vld"),
908 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
909 )
910 if vld_instantiation_params:
911 vld_params.update(vld_instantiation_params)
912 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
913 target["ns"]["vld"].append(target_vld)
914
915 for vnfr in db_vnfrs.values():
916 vnfd = find_in_list(
917 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
918 )
919 vnf_params = find_in_list(
920 get_iterable(ns_params, "vnf"),
921 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
922 )
923 target_vnf = deepcopy(vnfr)
924 target_vim = "vim:{}".format(vnfr["vim-account-id"])
925 for vld in target_vnf.get("vld", ()):
926 # check if connected to a ns.vld, to fill target'
927 vnf_cp = find_in_list(
928 vnfd.get("int-virtual-link-desc", ()),
929 lambda cpd: cpd.get("id") == vld["id"],
930 )
931 if vnf_cp:
932 ns_cp = "member_vnf:{}.{}".format(
933 vnfr["member-vnf-index-ref"], vnf_cp["id"]
934 )
935 if cp2target.get(ns_cp):
936 vld["target"] = cp2target[ns_cp]
937
938 vld["vim_info"] = {
939 target_vim: {"vim_network_name": vld.get("vim-network-name")}
940 }
941 # check if this network needs SDN assist
942 target_sdn = None
943 if vld.get("pci-interfaces"):
944 db_vim = get_vim_account(vnfr["vim-account-id"])
945 sdnc_id = db_vim["config"].get("sdn-controller")
946 if sdnc_id:
947 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
948 target_sdn = "sdn:{}".format(sdnc_id)
949 vld["vim_info"][target_sdn] = {
950 "sdn": True,
951 "target_vim": target_vim,
952 "vlds": [sdn_vld],
953 "type": vld.get("type"),
954 }
955
956 # check at vnfd descriptor, if there is an ip-profile
957 vld_params = {}
958 vnfd_vlp = find_in_list(
959 get_virtual_link_profiles(vnfd),
960 lambda a_link_profile: a_link_profile["id"] == vld["id"],
961 )
962 if (
963 vnfd_vlp
964 and vnfd_vlp.get("virtual-link-protocol-data")
965 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
966 ):
967 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
968 "l3-protocol-data"
969 ]
970 ip_profile_dest_data = {}
971 if "ip-version" in ip_profile_source_data:
972 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
973 "ip-version"
974 ]
975 if "cidr" in ip_profile_source_data:
976 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
977 "cidr"
978 ]
979 if "gateway-ip" in ip_profile_source_data:
980 ip_profile_dest_data[
981 "gateway-address"
982 ] = ip_profile_source_data["gateway-ip"]
983 if "dhcp-enabled" in ip_profile_source_data:
984 ip_profile_dest_data["dhcp-params"] = {
985 "enabled": ip_profile_source_data["dhcp-enabled"]
986 }
987
988 vld_params["ip-profile"] = ip_profile_dest_data
989 # update vld_params with instantiation params
990 if vnf_params:
991 vld_instantiation_params = find_in_list(
992 get_iterable(vnf_params, "internal-vld"),
993 lambda i_vld: i_vld["name"] == vld["id"],
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
998
999 vdur_list = []
1000 for vdur in target_vnf.get("vdur", ()):
1001 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1002 continue # This vdu must not be created
1003 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1004
1005 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1006
1007 if ssh_keys_all:
1008 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1009 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1010 if (
1011 vdu_configuration
1012 and vdu_configuration.get("config-access")
1013 and vdu_configuration.get("config-access").get("ssh-access")
1014 ):
1015 vdur["ssh-keys"] = ssh_keys_all
1016 vdur["ssh-access-required"] = vdu_configuration[
1017 "config-access"
1018 ]["ssh-access"]["required"]
1019 elif (
1020 vnf_configuration
1021 and vnf_configuration.get("config-access")
1022 and vnf_configuration.get("config-access").get("ssh-access")
1023 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1024 ):
1025 vdur["ssh-keys"] = ssh_keys_all
1026 vdur["ssh-access-required"] = vnf_configuration[
1027 "config-access"
1028 ]["ssh-access"]["required"]
1029 elif ssh_keys_instantiation and find_in_list(
1030 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1031 ):
1032 vdur["ssh-keys"] = ssh_keys_instantiation
1033
1034 self.logger.debug("NS > vdur > {}".format(vdur))
1035
1036 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1037 # cloud-init
1038 if vdud.get("cloud-init-file"):
1039 vdur["cloud-init"] = "{}:file:{}".format(
1040 vnfd["_id"], vdud.get("cloud-init-file")
1041 )
1042 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1043 if vdur["cloud-init"] not in target["cloud_init_content"]:
1044 base_folder = vnfd["_admin"]["storage"]
1045 cloud_init_file = "{}/{}/cloud_init/{}".format(
1046 base_folder["folder"],
1047 base_folder["pkg-dir"],
1048 vdud.get("cloud-init-file"),
1049 )
1050 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1051 target["cloud_init_content"][
1052 vdur["cloud-init"]
1053 ] = ci_file.read()
1054 elif vdud.get("cloud-init"):
1055 vdur["cloud-init"] = "{}:vdu:{}".format(
1056 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1057 )
1058 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1059 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1060 "cloud-init"
1061 ]
1062 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1063 deploy_params_vdu = self._format_additional_params(
1064 vdur.get("additionalParams") or {}
1065 )
1066 deploy_params_vdu["OSM"] = get_osm_params(
1067 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1068 )
1069 vdur["additionalParams"] = deploy_params_vdu
1070
1071 # flavor
1072 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1073 if target_vim not in ns_flavor["vim_info"]:
1074 ns_flavor["vim_info"][target_vim] = {}
1075
1076 # deal with images
1077 # in case alternative images are provided we must check if they should be applied
1078 # for the vim_type, modify the vim_type taking into account
1079 ns_image_id = int(vdur["ns-image-id"])
1080 if vdur.get("alt-image-ids"):
1081 db_vim = get_vim_account(vnfr["vim-account-id"])
1082 vim_type = db_vim["vim_type"]
1083 for alt_image_id in vdur.get("alt-image-ids"):
1084 ns_alt_image = target["image"][int(alt_image_id)]
1085 if vim_type == ns_alt_image.get("vim-type"):
1086 # must use alternative image
1087 self.logger.debug(
1088 "use alternative image id: {}".format(alt_image_id)
1089 )
1090 ns_image_id = alt_image_id
1091 vdur["ns-image-id"] = ns_image_id
1092 break
1093 ns_image = target["image"][int(ns_image_id)]
1094 if target_vim not in ns_image["vim_info"]:
1095 ns_image["vim_info"][target_vim] = {}
1096
1097 vdur["vim_info"] = {target_vim: {}}
1098 # instantiation parameters
1099 # if vnf_params:
1100 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1101 # vdud["id"]), None)
1102 vdur_list.append(vdur)
1103 target_vnf["vdur"] = vdur_list
1104 target["vnf"].append(target_vnf)
1105
1106 desc = await self.RO.deploy(nsr_id, target)
1107 self.logger.debug("RO return > {}".format(desc))
1108 action_id = desc["action_id"]
1109 await self._wait_ng_ro(
1110 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1111 )
1112
1113 # Updating NSR
1114 db_nsr_update = {
1115 "_admin.deployed.RO.operational-status": "running",
1116 "detailed-status": " ".join(stage),
1117 }
1118 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1120 self._write_op_status(nslcmop_id, stage)
1121 self.logger.debug(
1122 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1123 )
1124 return
1125
1126 async def _wait_ng_ro(
1127 self,
1128 nsr_id,
1129 action_id,
1130 nslcmop_id=None,
1131 start_time=None,
1132 timeout=600,
1133 stage=None,
1134 ):
1135 detailed_status_old = None
1136 db_nsr_update = {}
1137 start_time = start_time or time()
1138 while time() <= start_time + timeout:
1139 desc_status = await self.RO.status(nsr_id, action_id)
1140 self.logger.debug("Wait NG RO > {}".format(desc_status))
1141 if desc_status["status"] == "FAILED":
1142 raise NgRoException(desc_status["details"])
1143 elif desc_status["status"] == "BUILD":
1144 if stage:
1145 stage[2] = "VIM: ({})".format(desc_status["details"])
1146 elif desc_status["status"] == "DONE":
1147 if stage:
1148 stage[2] = "Deployed at VIM"
1149 break
1150 else:
1151 assert False, "ROclient.check_ns_status returns unknown {}".format(
1152 desc_status["status"]
1153 )
1154 if stage and nslcmop_id and stage[2] != detailed_status_old:
1155 detailed_status_old = stage[2]
1156 db_nsr_update["detailed-status"] = " ".join(stage)
1157 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1158 self._write_op_status(nslcmop_id, stage)
1159 await asyncio.sleep(15, loop=self.loop)
1160 else: # timeout_ns_deploy
1161 raise NgRoException("Timeout waiting ns to deploy")
1162
1163 async def _terminate_ng_ro(
1164 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1165 ):
1166 db_nsr_update = {}
1167 failed_detail = []
1168 action_id = None
1169 start_deploy = time()
1170 try:
1171 target = {
1172 "ns": {"vld": []},
1173 "vnf": [],
1174 "image": [],
1175 "flavor": [],
1176 "action_id": nslcmop_id,
1177 }
1178 desc = await self.RO.deploy(nsr_id, target)
1179 action_id = desc["action_id"]
1180 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1181 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1182 self.logger.debug(
1183 logging_text
1184 + "ns terminate action at RO. action_id={}".format(action_id)
1185 )
1186
1187 # wait until done
1188 delete_timeout = 20 * 60 # 20 minutes
1189 await self._wait_ng_ro(
1190 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1191 )
1192
1193 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1194 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1195 # delete all nsr
1196 await self.RO.delete(nsr_id)
1197 except Exception as e:
1198 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1199 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1200 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1201 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1202 self.logger.debug(
1203 logging_text + "RO_action_id={} already deleted".format(action_id)
1204 )
1205 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1206 failed_detail.append("delete conflict: {}".format(e))
1207 self.logger.debug(
1208 logging_text
1209 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1210 )
1211 else:
1212 failed_detail.append("delete error: {}".format(e))
1213 self.logger.error(
1214 logging_text
1215 + "RO_action_id={} delete error: {}".format(action_id, e)
1216 )
1217
1218 if failed_detail:
1219 stage[2] = "Error deleting from VIM"
1220 else:
1221 stage[2] = "Deleted from VIM"
1222 db_nsr_update["detailed-status"] = " ".join(stage)
1223 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1224 self._write_op_status(nslcmop_id, stage)
1225
1226 if failed_detail:
1227 raise LcmException("; ".join(failed_detail))
1228 return
1229
1230 async def instantiate_RO(
1231 self,
1232 logging_text,
1233 nsr_id,
1234 nsd,
1235 db_nsr,
1236 db_nslcmop,
1237 db_vnfrs,
1238 db_vnfds,
1239 n2vc_key_list,
1240 stage,
1241 ):
1242 """
1243 Instantiate at RO
1244 :param logging_text: preffix text to use at logging
1245 :param nsr_id: nsr identity
1246 :param nsd: database content of ns descriptor
1247 :param db_nsr: database content of ns record
1248 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1249 :param db_vnfrs:
1250 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1251 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1252 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1253 :return: None or exception
1254 """
1255 try:
1256 start_deploy = time()
1257 ns_params = db_nslcmop.get("operationParams")
1258 if ns_params and ns_params.get("timeout_ns_deploy"):
1259 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1260 else:
1261 timeout_ns_deploy = self.timeout.get(
1262 "ns_deploy", self.timeout_ns_deploy
1263 )
1264
1265 # Check for and optionally request placement optimization. Database will be updated if placement activated
1266 stage[2] = "Waiting for Placement."
1267 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1268 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1269 for vnfr in db_vnfrs.values():
1270 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1271 break
1272 else:
1273 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1274
1275 return await self._instantiate_ng_ro(
1276 logging_text,
1277 nsr_id,
1278 nsd,
1279 db_nsr,
1280 db_nslcmop,
1281 db_vnfrs,
1282 db_vnfds,
1283 n2vc_key_list,
1284 stage,
1285 start_deploy,
1286 timeout_ns_deploy,
1287 )
1288 except Exception as e:
1289 stage[2] = "ERROR deploying at VIM"
1290 self.set_vnfr_at_error(db_vnfrs, str(e))
1291 self.logger.error(
1292 "Error deploying at VIM {}".format(e),
1293 exc_info=not isinstance(
1294 e,
1295 (
1296 ROclient.ROClientException,
1297 LcmException,
1298 DbException,
1299 NgRoException,
1300 ),
1301 ),
1302 )
1303 raise
1304
1305 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1306 """
1307 Wait for kdu to be up, get ip address
1308 :param logging_text: prefix use for logging
1309 :param nsr_id:
1310 :param vnfr_id:
1311 :param kdu_name:
1312 :return: IP address
1313 """
1314
1315 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1316 nb_tries = 0
1317
1318 while nb_tries < 360:
1319 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1320 kdur = next(
1321 (
1322 x
1323 for x in get_iterable(db_vnfr, "kdur")
1324 if x.get("kdu-name") == kdu_name
1325 ),
1326 None,
1327 )
1328 if not kdur:
1329 raise LcmException(
1330 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1331 )
1332 if kdur.get("status"):
1333 if kdur["status"] in ("READY", "ENABLED"):
1334 return kdur.get("ip-address")
1335 else:
1336 raise LcmException(
1337 "target KDU={} is in error state".format(kdu_name)
1338 )
1339
1340 await asyncio.sleep(10, loop=self.loop)
1341 nb_tries += 1
1342 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1343
1344 async def wait_vm_up_insert_key_ro(
1345 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1346 ):
1347 """
1348 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1349 :param logging_text: prefix use for logging
1350 :param nsr_id:
1351 :param vnfr_id:
1352 :param vdu_id:
1353 :param vdu_index:
1354 :param pub_key: public ssh key to inject, None to skip
1355 :param user: user to apply the public ssh key
1356 :return: IP address
1357 """
1358
1359 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1360 ro_nsr_id = None
1361 ip_address = None
1362 nb_tries = 0
1363 target_vdu_id = None
1364 ro_retries = 0
1365
1366 while True:
1367
1368 ro_retries += 1
1369 if ro_retries >= 360: # 1 hour
1370 raise LcmException(
1371 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1372 )
1373
1374 await asyncio.sleep(10, loop=self.loop)
1375
1376 # get ip address
1377 if not target_vdu_id:
1378 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1379
1380 if not vdu_id: # for the VNF case
1381 if db_vnfr.get("status") == "ERROR":
1382 raise LcmException(
1383 "Cannot inject ssh-key because target VNF is in error state"
1384 )
1385 ip_address = db_vnfr.get("ip-address")
1386 if not ip_address:
1387 continue
1388 vdur = next(
1389 (
1390 x
1391 for x in get_iterable(db_vnfr, "vdur")
1392 if x.get("ip-address") == ip_address
1393 ),
1394 None,
1395 )
1396 else: # VDU case
1397 vdur = next(
1398 (
1399 x
1400 for x in get_iterable(db_vnfr, "vdur")
1401 if x.get("vdu-id-ref") == vdu_id
1402 and x.get("count-index") == vdu_index
1403 ),
1404 None,
1405 )
1406
1407 if (
1408 not vdur and len(db_vnfr.get("vdur", ())) == 1
1409 ): # If only one, this should be the target vdu
1410 vdur = db_vnfr["vdur"][0]
1411 if not vdur:
1412 raise LcmException(
1413 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1414 vnfr_id, vdu_id, vdu_index
1415 )
1416 )
1417 # New generation RO stores information at "vim_info"
1418 ng_ro_status = None
1419 target_vim = None
1420 if vdur.get("vim_info"):
1421 target_vim = next(
1422 t for t in vdur["vim_info"]
1423 ) # there should be only one key
1424 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1425 if (
1426 vdur.get("pdu-type")
1427 or vdur.get("status") == "ACTIVE"
1428 or ng_ro_status == "ACTIVE"
1429 ):
1430 ip_address = vdur.get("ip-address")
1431 if not ip_address:
1432 continue
1433 target_vdu_id = vdur["vdu-id-ref"]
1434 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1435 raise LcmException(
1436 "Cannot inject ssh-key because target VM is in error state"
1437 )
1438
1439 if not target_vdu_id:
1440 continue
1441
1442 # inject public key into machine
1443 if pub_key and user:
1444 self.logger.debug(logging_text + "Inserting RO key")
1445 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1446 if vdur.get("pdu-type"):
1447 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1448 return ip_address
1449 try:
1450 ro_vm_id = "{}-{}".format(
1451 db_vnfr["member-vnf-index-ref"], target_vdu_id
1452 ) # TODO add vdu_index
1453 if self.ng_ro:
1454 target = {
1455 "action": {
1456 "action": "inject_ssh_key",
1457 "key": pub_key,
1458 "user": user,
1459 },
1460 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1461 }
1462 desc = await self.RO.deploy(nsr_id, target)
1463 action_id = desc["action_id"]
1464 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1465 break
1466 else:
1467 # wait until NS is deployed at RO
1468 if not ro_nsr_id:
1469 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1470 ro_nsr_id = deep_get(
1471 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1472 )
1473 if not ro_nsr_id:
1474 continue
1475 result_dict = await self.RO.create_action(
1476 item="ns",
1477 item_id_name=ro_nsr_id,
1478 descriptor={
1479 "add_public_key": pub_key,
1480 "vms": [ro_vm_id],
1481 "user": user,
1482 },
1483 )
1484 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1485 if not result_dict or not isinstance(result_dict, dict):
1486 raise LcmException(
1487 "Unknown response from RO when injecting key"
1488 )
1489 for result in result_dict.values():
1490 if result.get("vim_result") == 200:
1491 break
1492 else:
1493 raise ROclient.ROClientException(
1494 "error injecting key: {}".format(
1495 result.get("description")
1496 )
1497 )
1498 break
1499 except NgRoException as e:
1500 raise LcmException(
1501 "Reaching max tries injecting key. Error: {}".format(e)
1502 )
1503 except ROclient.ROClientException as e:
1504 if not nb_tries:
1505 self.logger.debug(
1506 logging_text
1507 + "error injecting key: {}. Retrying until {} seconds".format(
1508 e, 20 * 10
1509 )
1510 )
1511 nb_tries += 1
1512 if nb_tries >= 20:
1513 raise LcmException(
1514 "Reaching max tries injecting key. Error: {}".format(e)
1515 )
1516 else:
1517 break
1518
1519 return ip_address
1520
1521 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1522 """
1523 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1524 """
1525 my_vca = vca_deployed_list[vca_index]
1526 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1527 # vdu or kdu: no dependencies
1528 return
1529 timeout = 300
1530 while timeout >= 0:
1531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1532 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1533 configuration_status_list = db_nsr["configurationStatus"]
1534 for index, vca_deployed in enumerate(configuration_status_list):
1535 if index == vca_index:
1536 # myself
1537 continue
1538 if not my_vca.get("member-vnf-index") or (
1539 vca_deployed.get("member-vnf-index")
1540 == my_vca.get("member-vnf-index")
1541 ):
1542 internal_status = configuration_status_list[index].get("status")
1543 if internal_status == "READY":
1544 continue
1545 elif internal_status == "BROKEN":
1546 raise LcmException(
1547 "Configuration aborted because dependent charm/s has failed"
1548 )
1549 else:
1550 break
1551 else:
1552 # no dependencies, return
1553 return
1554 await asyncio.sleep(10)
1555 timeout -= 1
1556
1557 raise LcmException("Configuration aborted because dependent charm/s timeout")
1558
1559 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1560 return deep_get(db_vnfr, ("vca-id",)) or deep_get(
1561 db_nsr, ("instantiate_params", "vcaId")
1562 )
1563
1564 async def instantiate_N2VC(
1565 self,
1566 logging_text,
1567 vca_index,
1568 nsi_id,
1569 db_nsr,
1570 db_vnfr,
1571 vdu_id,
1572 kdu_name,
1573 vdu_index,
1574 config_descriptor,
1575 deploy_params,
1576 base_folder,
1577 nslcmop_id,
1578 stage,
1579 vca_type,
1580 vca_name,
1581 ee_config_descriptor,
1582 ):
1583 nsr_id = db_nsr["_id"]
1584 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1585 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1586 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1587 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1588 db_dict = {
1589 "collection": "nsrs",
1590 "filter": {"_id": nsr_id},
1591 "path": db_update_entry,
1592 }
1593 step = ""
1594 try:
1595
1596 element_type = "NS"
1597 element_under_configuration = nsr_id
1598
1599 vnfr_id = None
1600 if db_vnfr:
1601 vnfr_id = db_vnfr["_id"]
1602 osm_config["osm"]["vnf_id"] = vnfr_id
1603
1604 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1605
1606 if vnfr_id:
1607 element_type = "VNF"
1608 element_under_configuration = vnfr_id
1609 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1610 if vdu_id:
1611 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1612 element_type = "VDU"
1613 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1614 osm_config["osm"]["vdu_id"] = vdu_id
1615 elif kdu_name:
1616 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1617 element_type = "KDU"
1618 element_under_configuration = kdu_name
1619 osm_config["osm"]["kdu_name"] = kdu_name
1620
1621 # Get artifact path
1622 artifact_path = "{}/{}/{}/{}".format(
1623 base_folder["folder"],
1624 base_folder["pkg-dir"],
1625 "charms"
1626 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1627 else "helm-charts",
1628 vca_name,
1629 )
1630
1631 self.logger.debug("Artifact path > {}".format(artifact_path))
1632
1633 # get initial_config_primitive_list that applies to this element
1634 initial_config_primitive_list = config_descriptor.get(
1635 "initial-config-primitive"
1636 )
1637
1638 self.logger.debug(
1639 "Initial config primitive list > {}".format(
1640 initial_config_primitive_list
1641 )
1642 )
1643
1644 # add config if not present for NS charm
1645 ee_descriptor_id = ee_config_descriptor.get("id")
1646 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1647 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1648 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1649 )
1650
1651 self.logger.debug(
1652 "Initial config primitive list #2 > {}".format(
1653 initial_config_primitive_list
1654 )
1655 )
1656 # n2vc_redesign STEP 3.1
1657 # find old ee_id if exists
1658 ee_id = vca_deployed.get("ee_id")
1659
1660 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1661 # create or register execution environment in VCA
1662 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1663
1664 self._write_configuration_status(
1665 nsr_id=nsr_id,
1666 vca_index=vca_index,
1667 status="CREATING",
1668 element_under_configuration=element_under_configuration,
1669 element_type=element_type,
1670 )
1671
1672 step = "create execution environment"
1673 self.logger.debug(logging_text + step)
1674
1675 ee_id = None
1676 credentials = None
1677 if vca_type == "k8s_proxy_charm":
1678 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1679 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1680 namespace=namespace,
1681 artifact_path=artifact_path,
1682 db_dict=db_dict,
1683 vca_id=vca_id,
1684 )
1685 elif vca_type == "helm" or vca_type == "helm-v3":
1686 ee_id, credentials = await self.vca_map[
1687 vca_type
1688 ].create_execution_environment(
1689 namespace=namespace,
1690 reuse_ee_id=ee_id,
1691 db_dict=db_dict,
1692 config=osm_config,
1693 artifact_path=artifact_path,
1694 vca_type=vca_type,
1695 )
1696 else:
1697 ee_id, credentials = await self.vca_map[
1698 vca_type
1699 ].create_execution_environment(
1700 namespace=namespace,
1701 reuse_ee_id=ee_id,
1702 db_dict=db_dict,
1703 vca_id=vca_id,
1704 )
1705
1706 elif vca_type == "native_charm":
1707 step = "Waiting to VM being up and getting IP address"
1708 self.logger.debug(logging_text + step)
1709 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1710 logging_text,
1711 nsr_id,
1712 vnfr_id,
1713 vdu_id,
1714 vdu_index,
1715 user=None,
1716 pub_key=None,
1717 )
1718 credentials = {"hostname": rw_mgmt_ip}
1719 # get username
1720 username = deep_get(
1721 config_descriptor, ("config-access", "ssh-access", "default-user")
1722 )
1723 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1724 # merged. Meanwhile let's get username from initial-config-primitive
1725 if not username and initial_config_primitive_list:
1726 for config_primitive in initial_config_primitive_list:
1727 for param in config_primitive.get("parameter", ()):
1728 if param["name"] == "ssh-username":
1729 username = param["value"]
1730 break
1731 if not username:
1732 raise LcmException(
1733 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1734 "'config-access.ssh-access.default-user'"
1735 )
1736 credentials["username"] = username
1737 # n2vc_redesign STEP 3.2
1738
1739 self._write_configuration_status(
1740 nsr_id=nsr_id,
1741 vca_index=vca_index,
1742 status="REGISTERING",
1743 element_under_configuration=element_under_configuration,
1744 element_type=element_type,
1745 )
1746
1747 step = "register execution environment {}".format(credentials)
1748 self.logger.debug(logging_text + step)
1749 ee_id = await self.vca_map[vca_type].register_execution_environment(
1750 credentials=credentials,
1751 namespace=namespace,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 # for compatibility with MON/POL modules, the need model and application name at database
1757 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1758 ee_id_parts = ee_id.split(".")
1759 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1760 if len(ee_id_parts) >= 2:
1761 model_name = ee_id_parts[0]
1762 application_name = ee_id_parts[1]
1763 db_nsr_update[db_update_entry + "model"] = model_name
1764 db_nsr_update[db_update_entry + "application"] = application_name
1765
1766 # n2vc_redesign STEP 3.3
1767 step = "Install configuration Software"
1768
1769 self._write_configuration_status(
1770 nsr_id=nsr_id,
1771 vca_index=vca_index,
1772 status="INSTALLING SW",
1773 element_under_configuration=element_under_configuration,
1774 element_type=element_type,
1775 other_update=db_nsr_update,
1776 )
1777
1778 # TODO check if already done
1779 self.logger.debug(logging_text + step)
1780 config = None
1781 if vca_type == "native_charm":
1782 config_primitive = next(
1783 (p for p in initial_config_primitive_list if p["name"] == "config"),
1784 None,
1785 )
1786 if config_primitive:
1787 config = self._map_primitive_params(
1788 config_primitive, {}, deploy_params
1789 )
1790 num_units = 1
1791 if vca_type == "lxc_proxy_charm":
1792 if element_type == "NS":
1793 num_units = db_nsr.get("config-units") or 1
1794 elif element_type == "VNF":
1795 num_units = db_vnfr.get("config-units") or 1
1796 elif element_type == "VDU":
1797 for v in db_vnfr["vdur"]:
1798 if vdu_id == v["vdu-id-ref"]:
1799 num_units = v.get("config-units") or 1
1800 break
1801 if vca_type != "k8s_proxy_charm":
1802 await self.vca_map[vca_type].install_configuration_sw(
1803 ee_id=ee_id,
1804 artifact_path=artifact_path,
1805 db_dict=db_dict,
1806 config=config,
1807 num_units=num_units,
1808 vca_id=vca_id,
1809 )
1810
1811 # write in db flag of configuration_sw already installed
1812 self.update_db_2(
1813 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1814 )
1815
1816 # add relations for this VCA (wait for other peers related with this VCA)
1817 await self._add_vca_relations(
1818 logging_text=logging_text,
1819 nsr_id=nsr_id,
1820 vca_index=vca_index,
1821 vca_id=vca_id,
1822 vca_type=vca_type,
1823 )
1824
1825 # if SSH access is required, then get execution environment SSH public
1826 # if native charm we have waited already to VM be UP
1827 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1828 pub_key = None
1829 user = None
1830 # self.logger.debug("get ssh key block")
1831 if deep_get(
1832 config_descriptor, ("config-access", "ssh-access", "required")
1833 ):
1834 # self.logger.debug("ssh key needed")
1835 # Needed to inject a ssh key
1836 user = deep_get(
1837 config_descriptor,
1838 ("config-access", "ssh-access", "default-user"),
1839 )
1840 step = "Install configuration Software, getting public ssh key"
1841 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1842 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1843 )
1844
1845 step = "Insert public key into VM user={} ssh_key={}".format(
1846 user, pub_key
1847 )
1848 else:
1849 # self.logger.debug("no need to get ssh key")
1850 step = "Waiting to VM being up and getting IP address"
1851 self.logger.debug(logging_text + step)
1852
1853 # n2vc_redesign STEP 5.1
1854 # wait for RO (ip-address) Insert pub_key into VM
1855 if vnfr_id:
1856 if kdu_name:
1857 rw_mgmt_ip = await self.wait_kdu_up(
1858 logging_text, nsr_id, vnfr_id, kdu_name
1859 )
1860 else:
1861 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1862 logging_text,
1863 nsr_id,
1864 vnfr_id,
1865 vdu_id,
1866 vdu_index,
1867 user=user,
1868 pub_key=pub_key,
1869 )
1870 else:
1871 rw_mgmt_ip = None # This is for a NS configuration
1872
1873 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1874
1875 # store rw_mgmt_ip in deploy params for later replacement
1876 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1877
1878 # n2vc_redesign STEP 6 Execute initial config primitive
1879 step = "execute initial config primitive"
1880
1881 # wait for dependent primitives execution (NS -> VNF -> VDU)
1882 if initial_config_primitive_list:
1883 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1884
1885 # stage, in function of element type: vdu, kdu, vnf or ns
1886 my_vca = vca_deployed_list[vca_index]
1887 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1888 # VDU or KDU
1889 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1890 elif my_vca.get("member-vnf-index"):
1891 # VNF
1892 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1893 else:
1894 # NS
1895 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1896
1897 self._write_configuration_status(
1898 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1899 )
1900
1901 self._write_op_status(op_id=nslcmop_id, stage=stage)
1902
1903 check_if_terminated_needed = True
1904 for initial_config_primitive in initial_config_primitive_list:
1905 # adding information on the vca_deployed if it is a NS execution environment
1906 if not vca_deployed["member-vnf-index"]:
1907 deploy_params["ns_config_info"] = json.dumps(
1908 self._get_ns_config_info(nsr_id)
1909 )
1910 # TODO check if already done
1911 primitive_params_ = self._map_primitive_params(
1912 initial_config_primitive, {}, deploy_params
1913 )
1914
1915 step = "execute primitive '{}' params '{}'".format(
1916 initial_config_primitive["name"], primitive_params_
1917 )
1918 self.logger.debug(logging_text + step)
1919 await self.vca_map[vca_type].exec_primitive(
1920 ee_id=ee_id,
1921 primitive_name=initial_config_primitive["name"],
1922 params_dict=primitive_params_,
1923 db_dict=db_dict,
1924 vca_id=vca_id,
1925 )
1926 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1927 if check_if_terminated_needed:
1928 if config_descriptor.get("terminate-config-primitive"):
1929 self.update_db_2(
1930 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1931 )
1932 check_if_terminated_needed = False
1933
1934 # TODO register in database that primitive is done
1935
1936 # STEP 7 Configure metrics
1937 if vca_type == "helm" or vca_type == "helm-v3":
1938 prometheus_jobs = await self.add_prometheus_metrics(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 ee_config_descriptor=ee_config_descriptor,
1942 vnfr_id=vnfr_id,
1943 nsr_id=nsr_id,
1944 target_ip=rw_mgmt_ip,
1945 )
1946 if prometheus_jobs:
1947 self.update_db_2(
1948 "nsrs",
1949 nsr_id,
1950 {db_update_entry + "prometheus_jobs": prometheus_jobs},
1951 )
1952
1953 step = "instantiated at VCA"
1954 self.logger.debug(logging_text + step)
1955
1956 self._write_configuration_status(
1957 nsr_id=nsr_id, vca_index=vca_index, status="READY"
1958 )
1959
1960 except Exception as e: # TODO not use Exception but N2VC exception
1961 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1962 if not isinstance(
1963 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
1964 ):
1965 self.logger.error(
1966 "Exception while {} : {}".format(step, e), exc_info=True
1967 )
1968 self._write_configuration_status(
1969 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
1970 )
1971 raise LcmException("{} {}".format(step, e)) from e
1972
1973 def _write_ns_status(
1974 self,
1975 nsr_id: str,
1976 ns_state: str,
1977 current_operation: str,
1978 current_operation_id: str,
1979 error_description: str = None,
1980 error_detail: str = None,
1981 other_update: dict = None,
1982 ):
1983 """
1984 Update db_nsr fields.
1985 :param nsr_id:
1986 :param ns_state:
1987 :param current_operation:
1988 :param current_operation_id:
1989 :param error_description:
1990 :param error_detail:
1991 :param other_update: Other required changes at database if provided, will be cleared
1992 :return:
1993 """
1994 try:
1995 db_dict = other_update or {}
1996 db_dict[
1997 "_admin.nslcmop"
1998 ] = current_operation_id # for backward compatibility
1999 db_dict["_admin.current-operation"] = current_operation_id
2000 db_dict["_admin.operation-type"] = (
2001 current_operation if current_operation != "IDLE" else None
2002 )
2003 db_dict["currentOperation"] = current_operation
2004 db_dict["currentOperationID"] = current_operation_id
2005 db_dict["errorDescription"] = error_description
2006 db_dict["errorDetail"] = error_detail
2007
2008 if ns_state:
2009 db_dict["nsState"] = ns_state
2010 self.update_db_2("nsrs", nsr_id, db_dict)
2011 except DbException as e:
2012 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2013
2014 def _write_op_status(
2015 self,
2016 op_id: str,
2017 stage: list = None,
2018 error_message: str = None,
2019 queuePosition: int = 0,
2020 operation_state: str = None,
2021 other_update: dict = None,
2022 ):
2023 try:
2024 db_dict = other_update or {}
2025 db_dict["queuePosition"] = queuePosition
2026 if isinstance(stage, list):
2027 db_dict["stage"] = stage[0]
2028 db_dict["detailed-status"] = " ".join(stage)
2029 elif stage is not None:
2030 db_dict["stage"] = str(stage)
2031
2032 if error_message is not None:
2033 db_dict["errorMessage"] = error_message
2034 if operation_state is not None:
2035 db_dict["operationState"] = operation_state
2036 db_dict["statusEnteredTime"] = time()
2037 self.update_db_2("nslcmops", op_id, db_dict)
2038 except DbException as e:
2039 self.logger.warn(
2040 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2041 )
2042
2043 def _write_all_config_status(self, db_nsr: dict, status: str):
2044 try:
2045 nsr_id = db_nsr["_id"]
2046 # configurationStatus
2047 config_status = db_nsr.get("configurationStatus")
2048 if config_status:
2049 db_nsr_update = {
2050 "configurationStatus.{}.status".format(index): status
2051 for index, v in enumerate(config_status)
2052 if v
2053 }
2054 # update status
2055 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2056
2057 except DbException as e:
2058 self.logger.warn(
2059 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2060 )
2061
2062 def _write_configuration_status(
2063 self,
2064 nsr_id: str,
2065 vca_index: int,
2066 status: str = None,
2067 element_under_configuration: str = None,
2068 element_type: str = None,
2069 other_update: dict = None,
2070 ):
2071
2072 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2073 # .format(vca_index, status))
2074
2075 try:
2076 db_path = "configurationStatus.{}.".format(vca_index)
2077 db_dict = other_update or {}
2078 if status:
2079 db_dict[db_path + "status"] = status
2080 if element_under_configuration:
2081 db_dict[
2082 db_path + "elementUnderConfiguration"
2083 ] = element_under_configuration
2084 if element_type:
2085 db_dict[db_path + "elementType"] = element_type
2086 self.update_db_2("nsrs", nsr_id, db_dict)
2087 except DbException as e:
2088 self.logger.warn(
2089 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2090 status, nsr_id, vca_index, e
2091 )
2092 )
2093
2094 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2095 """
2096 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2097 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2098 Database is used because the result can be obtained from a different LCM worker in case of HA.
2099 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2100 :param db_nslcmop: database content of nslcmop
2101 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2102 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2103 computed 'vim-account-id'
2104 """
2105 modified = False
2106 nslcmop_id = db_nslcmop["_id"]
2107 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2108 if placement_engine == "PLA":
2109 self.logger.debug(
2110 logging_text + "Invoke and wait for placement optimization"
2111 )
2112 await self.msg.aiowrite(
2113 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2114 )
2115 db_poll_interval = 5
2116 wait = db_poll_interval * 10
2117 pla_result = None
2118 while not pla_result and wait >= 0:
2119 await asyncio.sleep(db_poll_interval)
2120 wait -= db_poll_interval
2121 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2122 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2123
2124 if not pla_result:
2125 raise LcmException(
2126 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2127 )
2128
2129 for pla_vnf in pla_result["vnf"]:
2130 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2131 if not pla_vnf.get("vimAccountId") or not vnfr:
2132 continue
2133 modified = True
2134 self.db.set_one(
2135 "vnfrs",
2136 {"_id": vnfr["_id"]},
2137 {"vim-account-id": pla_vnf["vimAccountId"]},
2138 )
2139 # Modifies db_vnfrs
2140 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2141 return modified
2142
2143 def update_nsrs_with_pla_result(self, params):
2144 try:
2145 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2146 self.update_db_2(
2147 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2148 )
2149 except Exception as e:
2150 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2151
2152 async def instantiate(self, nsr_id, nslcmop_id):
2153 """
2154
2155 :param nsr_id: ns instance to deploy
2156 :param nslcmop_id: operation to run
2157 :return:
2158 """
2159
2160 # Try to lock HA task here
2161 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2162 if not task_is_locked_by_me:
2163 self.logger.debug(
2164 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2165 )
2166 return
2167
2168 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2169 self.logger.debug(logging_text + "Enter")
2170
2171 # get all needed from database
2172
2173 # database nsrs record
2174 db_nsr = None
2175
2176 # database nslcmops record
2177 db_nslcmop = None
2178
2179 # update operation on nsrs
2180 db_nsr_update = {}
2181 # update operation on nslcmops
2182 db_nslcmop_update = {}
2183
2184 nslcmop_operation_state = None
2185 db_vnfrs = {} # vnf's info indexed by member-index
2186 # n2vc_info = {}
2187 tasks_dict_info = {} # from task to info text
2188 exc = None
2189 error_list = []
2190 stage = [
2191 "Stage 1/5: preparation of the environment.",
2192 "Waiting for previous operations to terminate.",
2193 "",
2194 ]
2195 # ^ stage, step, VIM progress
2196 try:
2197 # wait for any previous tasks in process
2198 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2199
2200 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2201 stage[1] = "Reading from database."
2202 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2203 db_nsr_update["detailed-status"] = "creating"
2204 db_nsr_update["operational-status"] = "init"
2205 self._write_ns_status(
2206 nsr_id=nsr_id,
2207 ns_state="BUILDING",
2208 current_operation="INSTANTIATING",
2209 current_operation_id=nslcmop_id,
2210 other_update=db_nsr_update,
2211 )
2212 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2213
2214 # read from db: operation
2215 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2216 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2217 ns_params = db_nslcmop.get("operationParams")
2218 if ns_params and ns_params.get("timeout_ns_deploy"):
2219 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2220 else:
2221 timeout_ns_deploy = self.timeout.get(
2222 "ns_deploy", self.timeout_ns_deploy
2223 )
2224
2225 # read from db: ns
2226 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2228 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2229 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2230 self.fs.sync(db_nsr["nsd-id"])
2231 db_nsr["nsd"] = nsd
2232 # nsr_name = db_nsr["name"] # TODO short-name??
2233
2234 # read from db: vnf's of this ns
2235 stage[1] = "Getting vnfrs from db."
2236 self.logger.debug(logging_text + stage[1])
2237 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2238
2239 # read from db: vnfd's for every vnf
2240 db_vnfds = [] # every vnfd data
2241
2242 # for each vnf in ns, read vnfd
2243 for vnfr in db_vnfrs_list:
2244 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2245 vnfd_id = vnfr["vnfd-id"]
2246 vnfd_ref = vnfr["vnfd-ref"]
2247 self.fs.sync(vnfd_id)
2248
2249 # if we haven't this vnfd, read it from db
2250 if vnfd_id not in db_vnfds:
2251 # read from db
2252 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2253 vnfd_id, vnfd_ref
2254 )
2255 self.logger.debug(logging_text + stage[1])
2256 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2257
2258 # store vnfd
2259 db_vnfds.append(vnfd)
2260
2261 # Get or generates the _admin.deployed.VCA list
2262 vca_deployed_list = None
2263 if db_nsr["_admin"].get("deployed"):
2264 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2265 if vca_deployed_list is None:
2266 vca_deployed_list = []
2267 configuration_status_list = []
2268 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2269 db_nsr_update["configurationStatus"] = configuration_status_list
2270 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2271 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2272 elif isinstance(vca_deployed_list, dict):
2273 # maintain backward compatibility. Change a dict to list at database
2274 vca_deployed_list = list(vca_deployed_list.values())
2275 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2276 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2277
2278 if not isinstance(
2279 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2280 ):
2281 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2282 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2283
2284 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2285 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2286 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2287 self.db.set_list(
2288 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2289 )
2290
2291 # n2vc_redesign STEP 2 Deploy Network Scenario
2292 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2293 self._write_op_status(op_id=nslcmop_id, stage=stage)
2294
2295 stage[1] = "Deploying KDUs."
2296 # self.logger.debug(logging_text + "Before deploy_kdus")
2297 # Call to deploy_kdus in case exists the "vdu:kdu" param
2298 await self.deploy_kdus(
2299 logging_text=logging_text,
2300 nsr_id=nsr_id,
2301 nslcmop_id=nslcmop_id,
2302 db_vnfrs=db_vnfrs,
2303 db_vnfds=db_vnfds,
2304 task_instantiation_info=tasks_dict_info,
2305 )
2306
2307 stage[1] = "Getting VCA public key."
2308 # n2vc_redesign STEP 1 Get VCA public ssh-key
2309 # feature 1429. Add n2vc public key to needed VMs
2310 n2vc_key = self.n2vc.get_public_key()
2311 n2vc_key_list = [n2vc_key]
2312 if self.vca_config.get("public_key"):
2313 n2vc_key_list.append(self.vca_config["public_key"])
2314
2315 stage[1] = "Deploying NS at VIM."
2316 task_ro = asyncio.ensure_future(
2317 self.instantiate_RO(
2318 logging_text=logging_text,
2319 nsr_id=nsr_id,
2320 nsd=nsd,
2321 db_nsr=db_nsr,
2322 db_nslcmop=db_nslcmop,
2323 db_vnfrs=db_vnfrs,
2324 db_vnfds=db_vnfds,
2325 n2vc_key_list=n2vc_key_list,
2326 stage=stage,
2327 )
2328 )
2329 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2330 tasks_dict_info[task_ro] = "Deploying at VIM"
2331
2332 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2333 stage[1] = "Deploying Execution Environments."
2334 self.logger.debug(logging_text + stage[1])
2335
2336 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2337 for vnf_profile in get_vnf_profiles(nsd):
2338 vnfd_id = vnf_profile["vnfd-id"]
2339 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2340 member_vnf_index = str(vnf_profile["id"])
2341 db_vnfr = db_vnfrs[member_vnf_index]
2342 base_folder = vnfd["_admin"]["storage"]
2343 vdu_id = None
2344 vdu_index = 0
2345 vdu_name = None
2346 kdu_name = None
2347
2348 # Get additional parameters
2349 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2350 if db_vnfr.get("additionalParamsForVnf"):
2351 deploy_params.update(
2352 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2353 )
2354
2355 descriptor_config = get_configuration(vnfd, vnfd["id"])
2356 if descriptor_config:
2357 self._deploy_n2vc(
2358 logging_text=logging_text
2359 + "member_vnf_index={} ".format(member_vnf_index),
2360 db_nsr=db_nsr,
2361 db_vnfr=db_vnfr,
2362 nslcmop_id=nslcmop_id,
2363 nsr_id=nsr_id,
2364 nsi_id=nsi_id,
2365 vnfd_id=vnfd_id,
2366 vdu_id=vdu_id,
2367 kdu_name=kdu_name,
2368 member_vnf_index=member_vnf_index,
2369 vdu_index=vdu_index,
2370 vdu_name=vdu_name,
2371 deploy_params=deploy_params,
2372 descriptor_config=descriptor_config,
2373 base_folder=base_folder,
2374 task_instantiation_info=tasks_dict_info,
2375 stage=stage,
2376 )
2377
2378 # Deploy charms for each VDU that supports one.
2379 for vdud in get_vdu_list(vnfd):
2380 vdu_id = vdud["id"]
2381 descriptor_config = get_configuration(vnfd, vdu_id)
2382 vdur = find_in_list(
2383 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2384 )
2385
2386 if vdur.get("additionalParams"):
2387 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2388 else:
2389 deploy_params_vdu = deploy_params
2390 deploy_params_vdu["OSM"] = get_osm_params(
2391 db_vnfr, vdu_id, vdu_count_index=0
2392 )
2393 vdud_count = get_vdu_profile(vnfd, vdu_id).get(
2394 "max-number-of-instances", 1
2395 )
2396
2397 self.logger.debug("VDUD > {}".format(vdud))
2398 self.logger.debug(
2399 "Descriptor config > {}".format(descriptor_config)
2400 )
2401 if descriptor_config:
2402 vdu_name = None
2403 kdu_name = None
2404 for vdu_index in range(vdud_count):
2405 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2406 self._deploy_n2vc(
2407 logging_text=logging_text
2408 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2409 member_vnf_index, vdu_id, vdu_index
2410 ),
2411 db_nsr=db_nsr,
2412 db_vnfr=db_vnfr,
2413 nslcmop_id=nslcmop_id,
2414 nsr_id=nsr_id,
2415 nsi_id=nsi_id,
2416 vnfd_id=vnfd_id,
2417 vdu_id=vdu_id,
2418 kdu_name=kdu_name,
2419 member_vnf_index=member_vnf_index,
2420 vdu_index=vdu_index,
2421 vdu_name=vdu_name,
2422 deploy_params=deploy_params_vdu,
2423 descriptor_config=descriptor_config,
2424 base_folder=base_folder,
2425 task_instantiation_info=tasks_dict_info,
2426 stage=stage,
2427 )
2428 for kdud in get_kdu_list(vnfd):
2429 kdu_name = kdud["name"]
2430 descriptor_config = get_configuration(vnfd, kdu_name)
2431 if descriptor_config:
2432 vdu_id = None
2433 vdu_index = 0
2434 vdu_name = None
2435 kdur = next(
2436 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2437 )
2438 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2439 if kdur.get("additionalParams"):
2440 deploy_params_kdu = parse_yaml_strings(
2441 kdur["additionalParams"]
2442 )
2443
2444 self._deploy_n2vc(
2445 logging_text=logging_text,
2446 db_nsr=db_nsr,
2447 db_vnfr=db_vnfr,
2448 nslcmop_id=nslcmop_id,
2449 nsr_id=nsr_id,
2450 nsi_id=nsi_id,
2451 vnfd_id=vnfd_id,
2452 vdu_id=vdu_id,
2453 kdu_name=kdu_name,
2454 member_vnf_index=member_vnf_index,
2455 vdu_index=vdu_index,
2456 vdu_name=vdu_name,
2457 deploy_params=deploy_params_kdu,
2458 descriptor_config=descriptor_config,
2459 base_folder=base_folder,
2460 task_instantiation_info=tasks_dict_info,
2461 stage=stage,
2462 )
2463
2464 # Check if this NS has a charm configuration
2465 descriptor_config = nsd.get("ns-configuration")
2466 if descriptor_config and descriptor_config.get("juju"):
2467 vnfd_id = None
2468 db_vnfr = None
2469 member_vnf_index = None
2470 vdu_id = None
2471 kdu_name = None
2472 vdu_index = 0
2473 vdu_name = None
2474
2475 # Get additional parameters
2476 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2477 if db_nsr.get("additionalParamsForNs"):
2478 deploy_params.update(
2479 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2480 )
2481 base_folder = nsd["_admin"]["storage"]
2482 self._deploy_n2vc(
2483 logging_text=logging_text,
2484 db_nsr=db_nsr,
2485 db_vnfr=db_vnfr,
2486 nslcmop_id=nslcmop_id,
2487 nsr_id=nsr_id,
2488 nsi_id=nsi_id,
2489 vnfd_id=vnfd_id,
2490 vdu_id=vdu_id,
2491 kdu_name=kdu_name,
2492 member_vnf_index=member_vnf_index,
2493 vdu_index=vdu_index,
2494 vdu_name=vdu_name,
2495 deploy_params=deploy_params,
2496 descriptor_config=descriptor_config,
2497 base_folder=base_folder,
2498 task_instantiation_info=tasks_dict_info,
2499 stage=stage,
2500 )
2501
2502 # rest of staff will be done at finally
2503
2504 except (
2505 ROclient.ROClientException,
2506 DbException,
2507 LcmException,
2508 N2VCException,
2509 ) as e:
2510 self.logger.error(
2511 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2512 )
2513 exc = e
2514 except asyncio.CancelledError:
2515 self.logger.error(
2516 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2517 )
2518 exc = "Operation was cancelled"
2519 except Exception as e:
2520 exc = traceback.format_exc()
2521 self.logger.critical(
2522 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2523 exc_info=True,
2524 )
2525 finally:
2526 if exc:
2527 error_list.append(str(exc))
2528 try:
2529 # wait for pending tasks
2530 if tasks_dict_info:
2531 stage[1] = "Waiting for instantiate pending tasks."
2532 self.logger.debug(logging_text + stage[1])
2533 error_list += await self._wait_for_tasks(
2534 logging_text,
2535 tasks_dict_info,
2536 timeout_ns_deploy,
2537 stage,
2538 nslcmop_id,
2539 nsr_id=nsr_id,
2540 )
2541 stage[1] = stage[2] = ""
2542 except asyncio.CancelledError:
2543 error_list.append("Cancelled")
2544 # TODO cancel all tasks
2545 except Exception as exc:
2546 error_list.append(str(exc))
2547
2548 # update operation-status
2549 db_nsr_update["operational-status"] = "running"
2550 # let's begin with VCA 'configured' status (later we can change it)
2551 db_nsr_update["config-status"] = "configured"
2552 for task, task_name in tasks_dict_info.items():
2553 if not task.done() or task.cancelled() or task.exception():
2554 if task_name.startswith(self.task_name_deploy_vca):
2555 # A N2VC task is pending
2556 db_nsr_update["config-status"] = "failed"
2557 else:
2558 # RO or KDU task is pending
2559 db_nsr_update["operational-status"] = "failed"
2560
2561 # update status at database
2562 if error_list:
2563 error_detail = ". ".join(error_list)
2564 self.logger.error(logging_text + error_detail)
2565 error_description_nslcmop = "{} Detail: {}".format(
2566 stage[0], error_detail
2567 )
2568 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2569 nslcmop_id, stage[0]
2570 )
2571
2572 db_nsr_update["detailed-status"] = (
2573 error_description_nsr + " Detail: " + error_detail
2574 )
2575 db_nslcmop_update["detailed-status"] = error_detail
2576 nslcmop_operation_state = "FAILED"
2577 ns_state = "BROKEN"
2578 else:
2579 error_detail = None
2580 error_description_nsr = error_description_nslcmop = None
2581 ns_state = "READY"
2582 db_nsr_update["detailed-status"] = "Done"
2583 db_nslcmop_update["detailed-status"] = "Done"
2584 nslcmop_operation_state = "COMPLETED"
2585
2586 if db_nsr:
2587 self._write_ns_status(
2588 nsr_id=nsr_id,
2589 ns_state=ns_state,
2590 current_operation="IDLE",
2591 current_operation_id=None,
2592 error_description=error_description_nsr,
2593 error_detail=error_detail,
2594 other_update=db_nsr_update,
2595 )
2596 self._write_op_status(
2597 op_id=nslcmop_id,
2598 stage="",
2599 error_message=error_description_nslcmop,
2600 operation_state=nslcmop_operation_state,
2601 other_update=db_nslcmop_update,
2602 )
2603
2604 if nslcmop_operation_state:
2605 try:
2606 await self.msg.aiowrite(
2607 "ns",
2608 "instantiated",
2609 {
2610 "nsr_id": nsr_id,
2611 "nslcmop_id": nslcmop_id,
2612 "operationState": nslcmop_operation_state,
2613 },
2614 loop=self.loop,
2615 )
2616 except Exception as e:
2617 self.logger.error(
2618 logging_text + "kafka_write notification Exception {}".format(e)
2619 )
2620
2621 self.logger.debug(logging_text + "Exit")
2622 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2623
2624 async def _add_vca_relations(
2625 self,
2626 logging_text,
2627 nsr_id,
2628 vca_index: int,
2629 timeout: int = 3600,
2630 vca_type: str = None,
2631 vca_id: str = None,
2632 ) -> bool:
2633
2634 # steps:
2635 # 1. find all relations for this VCA
2636 # 2. wait for other peers related
2637 # 3. add relations
2638
2639 try:
2640 vca_type = vca_type or "lxc_proxy_charm"
2641
2642 # STEP 1: find all relations for this VCA
2643
2644 # read nsr record
2645 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2646 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2647
2648 # this VCA data
2649 my_vca = deep_get(db_nsr, ("_admin", "deployed", "VCA"))[vca_index]
2650
2651 # read all ns-configuration relations
2652 ns_relations = list()
2653 db_ns_relations = deep_get(nsd, ("ns-configuration", "relation"))
2654 if db_ns_relations:
2655 for r in db_ns_relations:
2656 # check if this VCA is in the relation
2657 if my_vca.get("member-vnf-index") in (
2658 r.get("entities")[0].get("id"),
2659 r.get("entities")[1].get("id"),
2660 ):
2661 ns_relations.append(r)
2662
2663 # read all vnf-configuration relations
2664 vnf_relations = list()
2665 db_vnfd_list = db_nsr.get("vnfd-id")
2666 if db_vnfd_list:
2667 for vnfd in db_vnfd_list:
2668 db_vnf_relations = None
2669 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2670 db_vnf_configuration = get_configuration(db_vnfd, db_vnfd["id"])
2671 if db_vnf_configuration:
2672 db_vnf_relations = db_vnf_configuration.get("relation", [])
2673 if db_vnf_relations:
2674 for r in db_vnf_relations:
2675 # check if this VCA is in the relation
2676 if my_vca.get("vdu_id") in (
2677 r.get("entities")[0].get("id"),
2678 r.get("entities")[1].get("id"),
2679 ):
2680 vnf_relations.append(r)
2681
2682 # if no relations, terminate
2683 if not ns_relations and not vnf_relations:
2684 self.logger.debug(logging_text + " No relations")
2685 return True
2686
2687 self.logger.debug(
2688 logging_text
2689 + " adding relations\n {}\n {}".format(
2690 ns_relations, vnf_relations
2691 )
2692 )
2693
2694 # add all relations
2695 start = time()
2696 while True:
2697 # check timeout
2698 now = time()
2699 if now - start >= timeout:
2700 self.logger.error(logging_text + " : timeout adding relations")
2701 return False
2702
2703 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2704 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2705
2706 # for each defined NS relation, find the VCA's related
2707 for r in ns_relations.copy():
2708 from_vca_ee_id = None
2709 to_vca_ee_id = None
2710 from_vca_endpoint = None
2711 to_vca_endpoint = None
2712 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2713 for vca in vca_list:
2714 if vca.get("member-vnf-index") == r.get("entities")[0].get(
2715 "id"
2716 ) and vca.get("config_sw_installed"):
2717 from_vca_ee_id = vca.get("ee_id")
2718 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2719 if vca.get("member-vnf-index") == r.get("entities")[1].get(
2720 "id"
2721 ) and vca.get("config_sw_installed"):
2722 to_vca_ee_id = vca.get("ee_id")
2723 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2724 if from_vca_ee_id and to_vca_ee_id:
2725 # add relation
2726 await self.vca_map[vca_type].add_relation(
2727 ee_id_1=from_vca_ee_id,
2728 ee_id_2=to_vca_ee_id,
2729 endpoint_1=from_vca_endpoint,
2730 endpoint_2=to_vca_endpoint,
2731 vca_id=vca_id,
2732 )
2733 # remove entry from relations list
2734 ns_relations.remove(r)
2735 else:
2736 # check failed peers
2737 try:
2738 vca_status_list = db_nsr.get("configurationStatus")
2739 if vca_status_list:
2740 for i in range(len(vca_list)):
2741 vca = vca_list[i]
2742 vca_status = vca_status_list[i]
2743 if vca.get("member-vnf-index") == r.get("entities")[
2744 0
2745 ].get("id"):
2746 if vca_status.get("status") == "BROKEN":
2747 # peer broken: remove relation from list
2748 ns_relations.remove(r)
2749 if vca.get("member-vnf-index") == r.get("entities")[
2750 1
2751 ].get("id"):
2752 if vca_status.get("status") == "BROKEN":
2753 # peer broken: remove relation from list
2754 ns_relations.remove(r)
2755 except Exception:
2756 # ignore
2757 pass
2758
2759 # for each defined VNF relation, find the VCA's related
2760 for r in vnf_relations.copy():
2761 from_vca_ee_id = None
2762 to_vca_ee_id = None
2763 from_vca_endpoint = None
2764 to_vca_endpoint = None
2765 vca_list = deep_get(db_nsr, ("_admin", "deployed", "VCA"))
2766 for vca in vca_list:
2767 key_to_check = "vdu_id"
2768 if vca.get("vdu_id") is None:
2769 key_to_check = "vnfd_id"
2770 if vca.get(key_to_check) == r.get("entities")[0].get(
2771 "id"
2772 ) and vca.get("config_sw_installed"):
2773 from_vca_ee_id = vca.get("ee_id")
2774 from_vca_endpoint = r.get("entities")[0].get("endpoint")
2775 if vca.get(key_to_check) == r.get("entities")[1].get(
2776 "id"
2777 ) and vca.get("config_sw_installed"):
2778 to_vca_ee_id = vca.get("ee_id")
2779 to_vca_endpoint = r.get("entities")[1].get("endpoint")
2780 if from_vca_ee_id and to_vca_ee_id:
2781 # add relation
2782 await self.vca_map[vca_type].add_relation(
2783 ee_id_1=from_vca_ee_id,
2784 ee_id_2=to_vca_ee_id,
2785 endpoint_1=from_vca_endpoint,
2786 endpoint_2=to_vca_endpoint,
2787 vca_id=vca_id,
2788 )
2789 # remove entry from relations list
2790 vnf_relations.remove(r)
2791 else:
2792 # check failed peers
2793 try:
2794 vca_status_list = db_nsr.get("configurationStatus")
2795 if vca_status_list:
2796 for i in range(len(vca_list)):
2797 vca = vca_list[i]
2798 vca_status = vca_status_list[i]
2799 if vca.get("vdu_id") == r.get("entities")[0].get(
2800 "id"
2801 ):
2802 if vca_status.get("status") == "BROKEN":
2803 # peer broken: remove relation from list
2804 vnf_relations.remove(r)
2805 if vca.get("vdu_id") == r.get("entities")[1].get(
2806 "id"
2807 ):
2808 if vca_status.get("status") == "BROKEN":
2809 # peer broken: remove relation from list
2810 vnf_relations.remove(r)
2811 except Exception:
2812 # ignore
2813 pass
2814
2815 # wait for next try
2816 await asyncio.sleep(5.0)
2817
2818 if not ns_relations and not vnf_relations:
2819 self.logger.debug("Relations added")
2820 break
2821
2822 return True
2823
2824 except Exception as e:
2825 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
2826 return False
2827
2828 async def _install_kdu(
2829 self,
2830 nsr_id: str,
2831 nsr_db_path: str,
2832 vnfr_data: dict,
2833 kdu_index: int,
2834 kdud: dict,
2835 vnfd: dict,
2836 k8s_instance_info: dict,
2837 k8params: dict = None,
2838 timeout: int = 600,
2839 vca_id: str = None,
2840 ):
2841
2842 try:
2843 k8sclustertype = k8s_instance_info["k8scluster-type"]
2844 # Instantiate kdu
2845 db_dict_install = {
2846 "collection": "nsrs",
2847 "filter": {"_id": nsr_id},
2848 "path": nsr_db_path,
2849 }
2850
2851 kdu_instance = self.k8scluster_map[
2852 k8sclustertype
2853 ].generate_kdu_instance_name(
2854 db_dict=db_dict_install,
2855 kdu_model=k8s_instance_info["kdu-model"],
2856 kdu_name=k8s_instance_info["kdu-name"],
2857 )
2858 self.update_db_2(
2859 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2860 )
2861 await self.k8scluster_map[k8sclustertype].install(
2862 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2863 kdu_model=k8s_instance_info["kdu-model"],
2864 atomic=True,
2865 params=k8params,
2866 db_dict=db_dict_install,
2867 timeout=timeout,
2868 kdu_name=k8s_instance_info["kdu-name"],
2869 namespace=k8s_instance_info["namespace"],
2870 kdu_instance=kdu_instance,
2871 vca_id=vca_id,
2872 )
2873 self.update_db_2(
2874 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
2875 )
2876
2877 # Obtain services to obtain management service ip
2878 services = await self.k8scluster_map[k8sclustertype].get_services(
2879 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2880 kdu_instance=kdu_instance,
2881 namespace=k8s_instance_info["namespace"],
2882 )
2883
2884 # Obtain management service info (if exists)
2885 vnfr_update_dict = {}
2886 kdu_config = get_configuration(vnfd, kdud["name"])
2887 if kdu_config:
2888 target_ee_list = kdu_config.get("execution-environment-list", [])
2889 else:
2890 target_ee_list = []
2891
2892 if services:
2893 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2894 mgmt_services = [
2895 service
2896 for service in kdud.get("service", [])
2897 if service.get("mgmt-service")
2898 ]
2899 for mgmt_service in mgmt_services:
2900 for service in services:
2901 if service["name"].startswith(mgmt_service["name"]):
2902 # Mgmt service found, Obtain service ip
2903 ip = service.get("external_ip", service.get("cluster_ip"))
2904 if isinstance(ip, list) and len(ip) == 1:
2905 ip = ip[0]
2906
2907 vnfr_update_dict[
2908 "kdur.{}.ip-address".format(kdu_index)
2909 ] = ip
2910
2911 # Check if must update also mgmt ip at the vnf
2912 service_external_cp = mgmt_service.get(
2913 "external-connection-point-ref"
2914 )
2915 if service_external_cp:
2916 if (
2917 deep_get(vnfd, ("mgmt-interface", "cp"))
2918 == service_external_cp
2919 ):
2920 vnfr_update_dict["ip-address"] = ip
2921
2922 if find_in_list(
2923 target_ee_list,
2924 lambda ee: ee.get(
2925 "external-connection-point-ref", ""
2926 )
2927 == service_external_cp,
2928 ):
2929 vnfr_update_dict[
2930 "kdur.{}.ip-address".format(kdu_index)
2931 ] = ip
2932 break
2933 else:
2934 self.logger.warn(
2935 "Mgmt service name: {} not found".format(
2936 mgmt_service["name"]
2937 )
2938 )
2939
2940 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2941 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2942
2943 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2944 if (
2945 kdu_config
2946 and kdu_config.get("initial-config-primitive")
2947 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
2948 ):
2949 initial_config_primitive_list = kdu_config.get(
2950 "initial-config-primitive"
2951 )
2952 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2953
2954 for initial_config_primitive in initial_config_primitive_list:
2955 primitive_params_ = self._map_primitive_params(
2956 initial_config_primitive, {}, {}
2957 )
2958
2959 await asyncio.wait_for(
2960 self.k8scluster_map[k8sclustertype].exec_primitive(
2961 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2962 kdu_instance=kdu_instance,
2963 primitive_name=initial_config_primitive["name"],
2964 params=primitive_params_,
2965 db_dict=db_dict_install,
2966 vca_id=vca_id,
2967 ),
2968 timeout=timeout,
2969 )
2970
2971 except Exception as e:
2972 # Prepare update db with error and raise exception
2973 try:
2974 self.update_db_2(
2975 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
2976 )
2977 self.update_db_2(
2978 "vnfrs",
2979 vnfr_data.get("_id"),
2980 {"kdur.{}.status".format(kdu_index): "ERROR"},
2981 )
2982 except Exception:
2983 # ignore to keep original exception
2984 pass
2985 # reraise original error
2986 raise
2987
2988 return kdu_instance
2989
2990 async def deploy_kdus(
2991 self,
2992 logging_text,
2993 nsr_id,
2994 nslcmop_id,
2995 db_vnfrs,
2996 db_vnfds,
2997 task_instantiation_info,
2998 ):
2999 # Launch kdus if present in the descriptor
3000
3001 k8scluster_id_2_uuic = {
3002 "helm-chart-v3": {},
3003 "helm-chart": {},
3004 "juju-bundle": {},
3005 }
3006
3007 async def _get_cluster_id(cluster_id, cluster_type):
3008 nonlocal k8scluster_id_2_uuic
3009 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3010 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3011
3012 # check if K8scluster is creating and wait look if previous tasks in process
3013 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3014 "k8scluster", cluster_id
3015 )
3016 if task_dependency:
3017 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3018 task_name, cluster_id
3019 )
3020 self.logger.debug(logging_text + text)
3021 await asyncio.wait(task_dependency, timeout=3600)
3022
3023 db_k8scluster = self.db.get_one(
3024 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3025 )
3026 if not db_k8scluster:
3027 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3028
3029 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3030 if not k8s_id:
3031 if cluster_type == "helm-chart-v3":
3032 try:
3033 # backward compatibility for existing clusters that have not been initialized for helm v3
3034 k8s_credentials = yaml.safe_dump(
3035 db_k8scluster.get("credentials")
3036 )
3037 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3038 k8s_credentials, reuse_cluster_uuid=cluster_id
3039 )
3040 db_k8scluster_update = {}
3041 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3042 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3043 db_k8scluster_update[
3044 "_admin.helm-chart-v3.created"
3045 ] = uninstall_sw
3046 db_k8scluster_update[
3047 "_admin.helm-chart-v3.operationalState"
3048 ] = "ENABLED"
3049 self.update_db_2(
3050 "k8sclusters", cluster_id, db_k8scluster_update
3051 )
3052 except Exception as e:
3053 self.logger.error(
3054 logging_text
3055 + "error initializing helm-v3 cluster: {}".format(str(e))
3056 )
3057 raise LcmException(
3058 "K8s cluster '{}' has not been initialized for '{}'".format(
3059 cluster_id, cluster_type
3060 )
3061 )
3062 else:
3063 raise LcmException(
3064 "K8s cluster '{}' has not been initialized for '{}'".format(
3065 cluster_id, cluster_type
3066 )
3067 )
3068 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3069 return k8s_id
3070
3071 logging_text += "Deploy kdus: "
3072 step = ""
3073 try:
3074 db_nsr_update = {"_admin.deployed.K8s": []}
3075 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3076
3077 index = 0
3078 updated_cluster_list = []
3079 updated_v3_cluster_list = []
3080
3081 for vnfr_data in db_vnfrs.values():
3082 vca_id = self.get_vca_id(vnfr_data, {})
3083 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3084 # Step 0: Prepare and set parameters
3085 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3086 vnfd_id = vnfr_data.get("vnfd-id")
3087 vnfd_with_id = find_in_list(
3088 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3089 )
3090 kdud = next(
3091 kdud
3092 for kdud in vnfd_with_id["kdu"]
3093 if kdud["name"] == kdur["kdu-name"]
3094 )
3095 namespace = kdur.get("k8s-namespace")
3096 if kdur.get("helm-chart"):
3097 kdumodel = kdur["helm-chart"]
3098 # Default version: helm3, if helm-version is v2 assign v2
3099 k8sclustertype = "helm-chart-v3"
3100 self.logger.debug("kdur: {}".format(kdur))
3101 if (
3102 kdur.get("helm-version")
3103 and kdur.get("helm-version") == "v2"
3104 ):
3105 k8sclustertype = "helm-chart"
3106 elif kdur.get("juju-bundle"):
3107 kdumodel = kdur["juju-bundle"]
3108 k8sclustertype = "juju-bundle"
3109 else:
3110 raise LcmException(
3111 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3112 "juju-bundle. Maybe an old NBI version is running".format(
3113 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3114 )
3115 )
3116 # check if kdumodel is a file and exists
3117 try:
3118 vnfd_with_id = find_in_list(
3119 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3120 )
3121 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3122 if storage and storage.get(
3123 "pkg-dir"
3124 ): # may be not present if vnfd has not artifacts
3125 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3126 filename = "{}/{}/{}s/{}".format(
3127 storage["folder"],
3128 storage["pkg-dir"],
3129 k8sclustertype,
3130 kdumodel,
3131 )
3132 if self.fs.file_exists(
3133 filename, mode="file"
3134 ) or self.fs.file_exists(filename, mode="dir"):
3135 kdumodel = self.fs.path + filename
3136 except (asyncio.TimeoutError, asyncio.CancelledError):
3137 raise
3138 except Exception: # it is not a file
3139 pass
3140
3141 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3142 step = "Synchronize repos for k8s cluster '{}'".format(
3143 k8s_cluster_id
3144 )
3145 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3146
3147 # Synchronize repos
3148 if (
3149 k8sclustertype == "helm-chart"
3150 and cluster_uuid not in updated_cluster_list
3151 ) or (
3152 k8sclustertype == "helm-chart-v3"
3153 and cluster_uuid not in updated_v3_cluster_list
3154 ):
3155 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3156 self.k8scluster_map[k8sclustertype].synchronize_repos(
3157 cluster_uuid=cluster_uuid
3158 )
3159 )
3160 if del_repo_list or added_repo_dict:
3161 if k8sclustertype == "helm-chart":
3162 unset = {
3163 "_admin.helm_charts_added." + item: None
3164 for item in del_repo_list
3165 }
3166 updated = {
3167 "_admin.helm_charts_added." + item: name
3168 for item, name in added_repo_dict.items()
3169 }
3170 updated_cluster_list.append(cluster_uuid)
3171 elif k8sclustertype == "helm-chart-v3":
3172 unset = {
3173 "_admin.helm_charts_v3_added." + item: None
3174 for item in del_repo_list
3175 }
3176 updated = {
3177 "_admin.helm_charts_v3_added." + item: name
3178 for item, name in added_repo_dict.items()
3179 }
3180 updated_v3_cluster_list.append(cluster_uuid)
3181 self.logger.debug(
3182 logging_text + "repos synchronized on k8s cluster "
3183 "'{}' to_delete: {}, to_add: {}".format(
3184 k8s_cluster_id, del_repo_list, added_repo_dict
3185 )
3186 )
3187 self.db.set_one(
3188 "k8sclusters",
3189 {"_id": k8s_cluster_id},
3190 updated,
3191 unset=unset,
3192 )
3193
3194 # Instantiate kdu
3195 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3196 vnfr_data["member-vnf-index-ref"],
3197 kdur["kdu-name"],
3198 k8s_cluster_id,
3199 )
3200 k8s_instance_info = {
3201 "kdu-instance": None,
3202 "k8scluster-uuid": cluster_uuid,
3203 "k8scluster-type": k8sclustertype,
3204 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3205 "kdu-name": kdur["kdu-name"],
3206 "kdu-model": kdumodel,
3207 "namespace": namespace,
3208 }
3209 db_path = "_admin.deployed.K8s.{}".format(index)
3210 db_nsr_update[db_path] = k8s_instance_info
3211 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3212 vnfd_with_id = find_in_list(
3213 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3214 )
3215 task = asyncio.ensure_future(
3216 self._install_kdu(
3217 nsr_id,
3218 db_path,
3219 vnfr_data,
3220 kdu_index,
3221 kdud,
3222 vnfd_with_id,
3223 k8s_instance_info,
3224 k8params=desc_params,
3225 timeout=600,
3226 vca_id=vca_id,
3227 )
3228 )
3229 self.lcm_tasks.register(
3230 "ns",
3231 nsr_id,
3232 nslcmop_id,
3233 "instantiate_KDU-{}".format(index),
3234 task,
3235 )
3236 task_instantiation_info[task] = "Deploying KDU {}".format(
3237 kdur["kdu-name"]
3238 )
3239
3240 index += 1
3241
3242 except (LcmException, asyncio.CancelledError):
3243 raise
3244 except Exception as e:
3245 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3246 if isinstance(e, (N2VCException, DbException)):
3247 self.logger.error(logging_text + msg)
3248 else:
3249 self.logger.critical(logging_text + msg, exc_info=True)
3250 raise LcmException(msg)
3251 finally:
3252 if db_nsr_update:
3253 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3254
3255 def _deploy_n2vc(
3256 self,
3257 logging_text,
3258 db_nsr,
3259 db_vnfr,
3260 nslcmop_id,
3261 nsr_id,
3262 nsi_id,
3263 vnfd_id,
3264 vdu_id,
3265 kdu_name,
3266 member_vnf_index,
3267 vdu_index,
3268 vdu_name,
3269 deploy_params,
3270 descriptor_config,
3271 base_folder,
3272 task_instantiation_info,
3273 stage,
3274 ):
3275 # launch instantiate_N2VC in a asyncio task and register task object
3276 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3277 # if not found, create one entry and update database
3278 # fill db_nsr._admin.deployed.VCA.<index>
3279
3280 self.logger.debug(
3281 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3282 )
3283 if "execution-environment-list" in descriptor_config:
3284 ee_list = descriptor_config.get("execution-environment-list", [])
3285 elif "juju" in descriptor_config:
3286 ee_list = [descriptor_config] # ns charms
3287 else: # other types as script are not supported
3288 ee_list = []
3289
3290 for ee_item in ee_list:
3291 self.logger.debug(
3292 logging_text
3293 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3294 ee_item.get("juju"), ee_item.get("helm-chart")
3295 )
3296 )
3297 ee_descriptor_id = ee_item.get("id")
3298 if ee_item.get("juju"):
3299 vca_name = ee_item["juju"].get("charm")
3300 vca_type = (
3301 "lxc_proxy_charm"
3302 if ee_item["juju"].get("charm") is not None
3303 else "native_charm"
3304 )
3305 if ee_item["juju"].get("cloud") == "k8s":
3306 vca_type = "k8s_proxy_charm"
3307 elif ee_item["juju"].get("proxy") is False:
3308 vca_type = "native_charm"
3309 elif ee_item.get("helm-chart"):
3310 vca_name = ee_item["helm-chart"]
3311 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3312 vca_type = "helm"
3313 else:
3314 vca_type = "helm-v3"
3315 else:
3316 self.logger.debug(
3317 logging_text + "skipping non juju neither charm configuration"
3318 )
3319 continue
3320
3321 vca_index = -1
3322 for vca_index, vca_deployed in enumerate(
3323 db_nsr["_admin"]["deployed"]["VCA"]
3324 ):
3325 if not vca_deployed:
3326 continue
3327 if (
3328 vca_deployed.get("member-vnf-index") == member_vnf_index
3329 and vca_deployed.get("vdu_id") == vdu_id
3330 and vca_deployed.get("kdu_name") == kdu_name
3331 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3332 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3333 ):
3334 break
3335 else:
3336 # not found, create one.
3337 target = (
3338 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3339 )
3340 if vdu_id:
3341 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3342 elif kdu_name:
3343 target += "/kdu/{}".format(kdu_name)
3344 vca_deployed = {
3345 "target_element": target,
3346 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3347 "member-vnf-index": member_vnf_index,
3348 "vdu_id": vdu_id,
3349 "kdu_name": kdu_name,
3350 "vdu_count_index": vdu_index,
3351 "operational-status": "init", # TODO revise
3352 "detailed-status": "", # TODO revise
3353 "step": "initial-deploy", # TODO revise
3354 "vnfd_id": vnfd_id,
3355 "vdu_name": vdu_name,
3356 "type": vca_type,
3357 "ee_descriptor_id": ee_descriptor_id,
3358 }
3359 vca_index += 1
3360
3361 # create VCA and configurationStatus in db
3362 db_dict = {
3363 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3364 "configurationStatus.{}".format(vca_index): dict(),
3365 }
3366 self.update_db_2("nsrs", nsr_id, db_dict)
3367
3368 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3369
3370 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3371 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3372 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3373
3374 # Launch task
3375 task_n2vc = asyncio.ensure_future(
3376 self.instantiate_N2VC(
3377 logging_text=logging_text,
3378 vca_index=vca_index,
3379 nsi_id=nsi_id,
3380 db_nsr=db_nsr,
3381 db_vnfr=db_vnfr,
3382 vdu_id=vdu_id,
3383 kdu_name=kdu_name,
3384 vdu_index=vdu_index,
3385 deploy_params=deploy_params,
3386 config_descriptor=descriptor_config,
3387 base_folder=base_folder,
3388 nslcmop_id=nslcmop_id,
3389 stage=stage,
3390 vca_type=vca_type,
3391 vca_name=vca_name,
3392 ee_config_descriptor=ee_item,
3393 )
3394 )
3395 self.lcm_tasks.register(
3396 "ns",
3397 nsr_id,
3398 nslcmop_id,
3399 "instantiate_N2VC-{}".format(vca_index),
3400 task_n2vc,
3401 )
3402 task_instantiation_info[
3403 task_n2vc
3404 ] = self.task_name_deploy_vca + " {}.{}".format(
3405 member_vnf_index or "", vdu_id or ""
3406 )
3407
3408 @staticmethod
3409 def _create_nslcmop(nsr_id, operation, params):
3410 """
3411 Creates a ns-lcm-opp content to be stored at database.
3412 :param nsr_id: internal id of the instance
3413 :param operation: instantiate, terminate, scale, action, ...
3414 :param params: user parameters for the operation
3415 :return: dictionary following SOL005 format
3416 """
3417 # Raise exception if invalid arguments
3418 if not (nsr_id and operation and params):
3419 raise LcmException(
3420 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3421 )
3422 now = time()
3423 _id = str(uuid4())
3424 nslcmop = {
3425 "id": _id,
3426 "_id": _id,
3427 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3428 "operationState": "PROCESSING",
3429 "statusEnteredTime": now,
3430 "nsInstanceId": nsr_id,
3431 "lcmOperationType": operation,
3432 "startTime": now,
3433 "isAutomaticInvocation": False,
3434 "operationParams": params,
3435 "isCancelPending": False,
3436 "links": {
3437 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3438 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3439 },
3440 }
3441 return nslcmop
3442
3443 def _format_additional_params(self, params):
3444 params = params or {}
3445 for key, value in params.items():
3446 if str(value).startswith("!!yaml "):
3447 params[key] = yaml.safe_load(value[7:])
3448 return params
3449
3450 def _get_terminate_primitive_params(self, seq, vnf_index):
3451 primitive = seq.get("name")
3452 primitive_params = {}
3453 params = {
3454 "member_vnf_index": vnf_index,
3455 "primitive": primitive,
3456 "primitive_params": primitive_params,
3457 }
3458 desc_params = {}
3459 return self._map_primitive_params(seq, params, desc_params)
3460
3461 # sub-operations
3462
3463 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3464 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3465 if op.get("operationState") == "COMPLETED":
3466 # b. Skip sub-operation
3467 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3468 return self.SUBOPERATION_STATUS_SKIP
3469 else:
3470 # c. retry executing sub-operation
3471 # The sub-operation exists, and operationState != 'COMPLETED'
3472 # Update operationState = 'PROCESSING' to indicate a retry.
3473 operationState = "PROCESSING"
3474 detailed_status = "In progress"
3475 self._update_suboperation_status(
3476 db_nslcmop, op_index, operationState, detailed_status
3477 )
3478 # Return the sub-operation index
3479 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3480 # with arguments extracted from the sub-operation
3481 return op_index
3482
3483 # Find a sub-operation where all keys in a matching dictionary must match
3484 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3485 def _find_suboperation(self, db_nslcmop, match):
3486 if db_nslcmop and match:
3487 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3488 for i, op in enumerate(op_list):
3489 if all(op.get(k) == match[k] for k in match):
3490 return i
3491 return self.SUBOPERATION_STATUS_NOT_FOUND
3492
3493 # Update status for a sub-operation given its index
3494 def _update_suboperation_status(
3495 self, db_nslcmop, op_index, operationState, detailed_status
3496 ):
3497 # Update DB for HA tasks
3498 q_filter = {"_id": db_nslcmop["_id"]}
3499 update_dict = {
3500 "_admin.operations.{}.operationState".format(op_index): operationState,
3501 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3502 }
3503 self.db.set_one(
3504 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3505 )
3506
3507 # Add sub-operation, return the index of the added sub-operation
3508 # Optionally, set operationState, detailed-status, and operationType
3509 # Status and type are currently set for 'scale' sub-operations:
3510 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3511 # 'detailed-status' : status message
3512 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3513 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3514 def _add_suboperation(
3515 self,
3516 db_nslcmop,
3517 vnf_index,
3518 vdu_id,
3519 vdu_count_index,
3520 vdu_name,
3521 primitive,
3522 mapped_primitive_params,
3523 operationState=None,
3524 detailed_status=None,
3525 operationType=None,
3526 RO_nsr_id=None,
3527 RO_scaling_info=None,
3528 ):
3529 if not db_nslcmop:
3530 return self.SUBOPERATION_STATUS_NOT_FOUND
3531 # Get the "_admin.operations" list, if it exists
3532 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3533 op_list = db_nslcmop_admin.get("operations")
3534 # Create or append to the "_admin.operations" list
3535 new_op = {
3536 "member_vnf_index": vnf_index,
3537 "vdu_id": vdu_id,
3538 "vdu_count_index": vdu_count_index,
3539 "primitive": primitive,
3540 "primitive_params": mapped_primitive_params,
3541 }
3542 if operationState:
3543 new_op["operationState"] = operationState
3544 if detailed_status:
3545 new_op["detailed-status"] = detailed_status
3546 if operationType:
3547 new_op["lcmOperationType"] = operationType
3548 if RO_nsr_id:
3549 new_op["RO_nsr_id"] = RO_nsr_id
3550 if RO_scaling_info:
3551 new_op["RO_scaling_info"] = RO_scaling_info
3552 if not op_list:
3553 # No existing operations, create key 'operations' with current operation as first list element
3554 db_nslcmop_admin.update({"operations": [new_op]})
3555 op_list = db_nslcmop_admin.get("operations")
3556 else:
3557 # Existing operations, append operation to list
3558 op_list.append(new_op)
3559
3560 db_nslcmop_update = {"_admin.operations": op_list}
3561 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3562 op_index = len(op_list) - 1
3563 return op_index
3564
3565 # Helper methods for scale() sub-operations
3566
3567 # pre-scale/post-scale:
3568 # Check for 3 different cases:
3569 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3570 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3571 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3572 def _check_or_add_scale_suboperation(
3573 self,
3574 db_nslcmop,
3575 vnf_index,
3576 vnf_config_primitive,
3577 primitive_params,
3578 operationType,
3579 RO_nsr_id=None,
3580 RO_scaling_info=None,
3581 ):
3582 # Find this sub-operation
3583 if RO_nsr_id and RO_scaling_info:
3584 operationType = "SCALE-RO"
3585 match = {
3586 "member_vnf_index": vnf_index,
3587 "RO_nsr_id": RO_nsr_id,
3588 "RO_scaling_info": RO_scaling_info,
3589 }
3590 else:
3591 match = {
3592 "member_vnf_index": vnf_index,
3593 "primitive": vnf_config_primitive,
3594 "primitive_params": primitive_params,
3595 "lcmOperationType": operationType,
3596 }
3597 op_index = self._find_suboperation(db_nslcmop, match)
3598 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3599 # a. New sub-operation
3600 # The sub-operation does not exist, add it.
3601 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3602 # The following parameters are set to None for all kind of scaling:
3603 vdu_id = None
3604 vdu_count_index = None
3605 vdu_name = None
3606 if RO_nsr_id and RO_scaling_info:
3607 vnf_config_primitive = None
3608 primitive_params = None
3609 else:
3610 RO_nsr_id = None
3611 RO_scaling_info = None
3612 # Initial status for sub-operation
3613 operationState = "PROCESSING"
3614 detailed_status = "In progress"
3615 # Add sub-operation for pre/post-scaling (zero or more operations)
3616 self._add_suboperation(
3617 db_nslcmop,
3618 vnf_index,
3619 vdu_id,
3620 vdu_count_index,
3621 vdu_name,
3622 vnf_config_primitive,
3623 primitive_params,
3624 operationState,
3625 detailed_status,
3626 operationType,
3627 RO_nsr_id,
3628 RO_scaling_info,
3629 )
3630 return self.SUBOPERATION_STATUS_NEW
3631 else:
3632 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3633 # or op_index (operationState != 'COMPLETED')
3634 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3635
3636 # Function to return execution_environment id
3637
3638 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3639 # TODO vdu_index_count
3640 for vca in vca_deployed_list:
3641 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3642 return vca["ee_id"]
3643
3644 async def destroy_N2VC(
3645 self,
3646 logging_text,
3647 db_nslcmop,
3648 vca_deployed,
3649 config_descriptor,
3650 vca_index,
3651 destroy_ee=True,
3652 exec_primitives=True,
3653 scaling_in=False,
3654 vca_id: str = None,
3655 ):
3656 """
3657 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3658 :param logging_text:
3659 :param db_nslcmop:
3660 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3661 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3662 :param vca_index: index in the database _admin.deployed.VCA
3663 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3664 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3665 not executed properly
3666 :param scaling_in: True destroys the application, False destroys the model
3667 :return: None or exception
3668 """
3669
3670 self.logger.debug(
3671 logging_text
3672 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3673 vca_index, vca_deployed, config_descriptor, destroy_ee
3674 )
3675 )
3676
3677 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3678
3679 # execute terminate_primitives
3680 if exec_primitives:
3681 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3682 config_descriptor.get("terminate-config-primitive"),
3683 vca_deployed.get("ee_descriptor_id"),
3684 )
3685 vdu_id = vca_deployed.get("vdu_id")
3686 vdu_count_index = vca_deployed.get("vdu_count_index")
3687 vdu_name = vca_deployed.get("vdu_name")
3688 vnf_index = vca_deployed.get("member-vnf-index")
3689 if terminate_primitives and vca_deployed.get("needed_terminate"):
3690 for seq in terminate_primitives:
3691 # For each sequence in list, get primitive and call _ns_execute_primitive()
3692 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3693 vnf_index, seq.get("name")
3694 )
3695 self.logger.debug(logging_text + step)
3696 # Create the primitive for each sequence, i.e. "primitive": "touch"
3697 primitive = seq.get("name")
3698 mapped_primitive_params = self._get_terminate_primitive_params(
3699 seq, vnf_index
3700 )
3701
3702 # Add sub-operation
3703 self._add_suboperation(
3704 db_nslcmop,
3705 vnf_index,
3706 vdu_id,
3707 vdu_count_index,
3708 vdu_name,
3709 primitive,
3710 mapped_primitive_params,
3711 )
3712 # Sub-operations: Call _ns_execute_primitive() instead of action()
3713 try:
3714 result, result_detail = await self._ns_execute_primitive(
3715 vca_deployed["ee_id"],
3716 primitive,
3717 mapped_primitive_params,
3718 vca_type=vca_type,
3719 vca_id=vca_id,
3720 )
3721 except LcmException:
3722 # this happens when VCA is not deployed. In this case it is not needed to terminate
3723 continue
3724 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3725 if result not in result_ok:
3726 raise LcmException(
3727 "terminate_primitive {} for vnf_member_index={} fails with "
3728 "error {}".format(seq.get("name"), vnf_index, result_detail)
3729 )
3730 # set that this VCA do not need terminated
3731 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3732 vca_index
3733 )
3734 self.update_db_2(
3735 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3736 )
3737
3738 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3739 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3740
3741 if destroy_ee:
3742 await self.vca_map[vca_type].delete_execution_environment(
3743 vca_deployed["ee_id"],
3744 scaling_in=scaling_in,
3745 vca_id=vca_id,
3746 )
3747
3748 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3749 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3750 namespace = "." + db_nsr["_id"]
3751 try:
3752 await self.n2vc.delete_namespace(
3753 namespace=namespace,
3754 total_timeout=self.timeout_charm_delete,
3755 vca_id=vca_id,
3756 )
3757 except N2VCNotFound: # already deleted. Skip
3758 pass
3759 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3760
3761 async def _terminate_RO(
3762 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3763 ):
3764 """
3765 Terminates a deployment from RO
3766 :param logging_text:
3767 :param nsr_deployed: db_nsr._admin.deployed
3768 :param nsr_id:
3769 :param nslcmop_id:
3770 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3771 this method will update only the index 2, but it will write on database the concatenated content of the list
3772 :return:
3773 """
3774 db_nsr_update = {}
3775 failed_detail = []
3776 ro_nsr_id = ro_delete_action = None
3777 if nsr_deployed and nsr_deployed.get("RO"):
3778 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3779 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3780 try:
3781 if ro_nsr_id:
3782 stage[2] = "Deleting ns from VIM."
3783 db_nsr_update["detailed-status"] = " ".join(stage)
3784 self._write_op_status(nslcmop_id, stage)
3785 self.logger.debug(logging_text + stage[2])
3786 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3787 self._write_op_status(nslcmop_id, stage)
3788 desc = await self.RO.delete("ns", ro_nsr_id)
3789 ro_delete_action = desc["action_id"]
3790 db_nsr_update[
3791 "_admin.deployed.RO.nsr_delete_action_id"
3792 ] = ro_delete_action
3793 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3794 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3795 if ro_delete_action:
3796 # wait until NS is deleted from VIM
3797 stage[2] = "Waiting ns deleted from VIM."
3798 detailed_status_old = None
3799 self.logger.debug(
3800 logging_text
3801 + stage[2]
3802 + " RO_id={} ro_delete_action={}".format(
3803 ro_nsr_id, ro_delete_action
3804 )
3805 )
3806 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3807 self._write_op_status(nslcmop_id, stage)
3808
3809 delete_timeout = 20 * 60 # 20 minutes
3810 while delete_timeout > 0:
3811 desc = await self.RO.show(
3812 "ns",
3813 item_id_name=ro_nsr_id,
3814 extra_item="action",
3815 extra_item_id=ro_delete_action,
3816 )
3817
3818 # deploymentStatus
3819 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3820
3821 ns_status, ns_status_info = self.RO.check_action_status(desc)
3822 if ns_status == "ERROR":
3823 raise ROclient.ROClientException(ns_status_info)
3824 elif ns_status == "BUILD":
3825 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3826 elif ns_status == "ACTIVE":
3827 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3828 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3829 break
3830 else:
3831 assert (
3832 False
3833 ), "ROclient.check_action_status returns unknown {}".format(
3834 ns_status
3835 )
3836 if stage[2] != detailed_status_old:
3837 detailed_status_old = stage[2]
3838 db_nsr_update["detailed-status"] = " ".join(stage)
3839 self._write_op_status(nslcmop_id, stage)
3840 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3841 await asyncio.sleep(5, loop=self.loop)
3842 delete_timeout -= 5
3843 else: # delete_timeout <= 0:
3844 raise ROclient.ROClientException(
3845 "Timeout waiting ns deleted from VIM"
3846 )
3847
3848 except Exception as e:
3849 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3850 if (
3851 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3852 ): # not found
3853 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3854 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3855 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3856 self.logger.debug(
3857 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
3858 )
3859 elif (
3860 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3861 ): # conflict
3862 failed_detail.append("delete conflict: {}".format(e))
3863 self.logger.debug(
3864 logging_text
3865 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
3866 )
3867 else:
3868 failed_detail.append("delete error: {}".format(e))
3869 self.logger.error(
3870 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
3871 )
3872
3873 # Delete nsd
3874 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3875 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3876 try:
3877 stage[2] = "Deleting nsd from RO."
3878 db_nsr_update["detailed-status"] = " ".join(stage)
3879 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3880 self._write_op_status(nslcmop_id, stage)
3881 await self.RO.delete("nsd", ro_nsd_id)
3882 self.logger.debug(
3883 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
3884 )
3885 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3886 except Exception as e:
3887 if (
3888 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3889 ): # not found
3890 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3891 self.logger.debug(
3892 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
3893 )
3894 elif (
3895 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3896 ): # conflict
3897 failed_detail.append(
3898 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
3899 )
3900 self.logger.debug(logging_text + failed_detail[-1])
3901 else:
3902 failed_detail.append(
3903 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
3904 )
3905 self.logger.error(logging_text + failed_detail[-1])
3906
3907 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3908 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3909 if not vnf_deployed or not vnf_deployed["id"]:
3910 continue
3911 try:
3912 ro_vnfd_id = vnf_deployed["id"]
3913 stage[
3914 2
3915 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3916 vnf_deployed["member-vnf-index"], ro_vnfd_id
3917 )
3918 db_nsr_update["detailed-status"] = " ".join(stage)
3919 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3920 self._write_op_status(nslcmop_id, stage)
3921 await self.RO.delete("vnfd", ro_vnfd_id)
3922 self.logger.debug(
3923 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
3924 )
3925 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3926 except Exception as e:
3927 if (
3928 isinstance(e, ROclient.ROClientException) and e.http_code == 404
3929 ): # not found
3930 db_nsr_update[
3931 "_admin.deployed.RO.vnfd.{}.id".format(index)
3932 ] = None
3933 self.logger.debug(
3934 logging_text
3935 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
3936 )
3937 elif (
3938 isinstance(e, ROclient.ROClientException) and e.http_code == 409
3939 ): # conflict
3940 failed_detail.append(
3941 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
3942 )
3943 self.logger.debug(logging_text + failed_detail[-1])
3944 else:
3945 failed_detail.append(
3946 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
3947 )
3948 self.logger.error(logging_text + failed_detail[-1])
3949
3950 if failed_detail:
3951 stage[2] = "Error deleting from VIM"
3952 else:
3953 stage[2] = "Deleted from VIM"
3954 db_nsr_update["detailed-status"] = " ".join(stage)
3955 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3956 self._write_op_status(nslcmop_id, stage)
3957
3958 if failed_detail:
3959 raise LcmException("; ".join(failed_detail))
3960
3961 async def terminate(self, nsr_id, nslcmop_id):
3962 # Try to lock HA task here
3963 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
3964 if not task_is_locked_by_me:
3965 return
3966
3967 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3968 self.logger.debug(logging_text + "Enter")
3969 timeout_ns_terminate = self.timeout_ns_terminate
3970 db_nsr = None
3971 db_nslcmop = None
3972 operation_params = None
3973 exc = None
3974 error_list = [] # annotates all failed error messages
3975 db_nslcmop_update = {}
3976 autoremove = False # autoremove after terminated
3977 tasks_dict_info = {}
3978 db_nsr_update = {}
3979 stage = [
3980 "Stage 1/3: Preparing task.",
3981 "Waiting for previous operations to terminate.",
3982 "",
3983 ]
3984 # ^ contains [stage, step, VIM-status]
3985 try:
3986 # wait for any previous tasks in process
3987 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
3988
3989 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3990 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3991 operation_params = db_nslcmop.get("operationParams") or {}
3992 if operation_params.get("timeout_ns_terminate"):
3993 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3994 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3995 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3996
3997 db_nsr_update["operational-status"] = "terminating"
3998 db_nsr_update["config-status"] = "terminating"
3999 self._write_ns_status(
4000 nsr_id=nsr_id,
4001 ns_state="TERMINATING",
4002 current_operation="TERMINATING",
4003 current_operation_id=nslcmop_id,
4004 other_update=db_nsr_update,
4005 )
4006 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4007 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4008 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4009 return
4010
4011 stage[1] = "Getting vnf descriptors from db."
4012 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4013 db_vnfrs_dict = {
4014 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4015 }
4016 db_vnfds_from_id = {}
4017 db_vnfds_from_member_index = {}
4018 # Loop over VNFRs
4019 for vnfr in db_vnfrs_list:
4020 vnfd_id = vnfr["vnfd-id"]
4021 if vnfd_id not in db_vnfds_from_id:
4022 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4023 db_vnfds_from_id[vnfd_id] = vnfd
4024 db_vnfds_from_member_index[
4025 vnfr["member-vnf-index-ref"]
4026 ] = db_vnfds_from_id[vnfd_id]
4027
4028 # Destroy individual execution environments when there are terminating primitives.
4029 # Rest of EE will be deleted at once
4030 # TODO - check before calling _destroy_N2VC
4031 # if not operation_params.get("skip_terminate_primitives"):#
4032 # or not vca.get("needed_terminate"):
4033 stage[0] = "Stage 2/3 execute terminating primitives."
4034 self.logger.debug(logging_text + stage[0])
4035 stage[1] = "Looking execution environment that needs terminate."
4036 self.logger.debug(logging_text + stage[1])
4037
4038 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4039 config_descriptor = None
4040 vca_member_vnf_index = vca.get("member-vnf-index")
4041 vca_id = self.get_vca_id(
4042 db_vnfrs_dict.get(vca_member_vnf_index)
4043 if vca_member_vnf_index
4044 else None,
4045 db_nsr,
4046 )
4047 if not vca or not vca.get("ee_id"):
4048 continue
4049 if not vca.get("member-vnf-index"):
4050 # ns
4051 config_descriptor = db_nsr.get("ns-configuration")
4052 elif vca.get("vdu_id"):
4053 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4054 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4055 elif vca.get("kdu_name"):
4056 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4057 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4058 else:
4059 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4060 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4061 vca_type = vca.get("type")
4062 exec_terminate_primitives = not operation_params.get(
4063 "skip_terminate_primitives"
4064 ) and vca.get("needed_terminate")
4065 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4066 # pending native charms
4067 destroy_ee = (
4068 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4069 )
4070 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4071 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4072 task = asyncio.ensure_future(
4073 self.destroy_N2VC(
4074 logging_text,
4075 db_nslcmop,
4076 vca,
4077 config_descriptor,
4078 vca_index,
4079 destroy_ee,
4080 exec_terminate_primitives,
4081 vca_id=vca_id,
4082 )
4083 )
4084 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4085
4086 # wait for pending tasks of terminate primitives
4087 if tasks_dict_info:
4088 self.logger.debug(
4089 logging_text
4090 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4091 )
4092 error_list = await self._wait_for_tasks(
4093 logging_text,
4094 tasks_dict_info,
4095 min(self.timeout_charm_delete, timeout_ns_terminate),
4096 stage,
4097 nslcmop_id,
4098 )
4099 tasks_dict_info.clear()
4100 if error_list:
4101 return # raise LcmException("; ".join(error_list))
4102
4103 # remove All execution environments at once
4104 stage[0] = "Stage 3/3 delete all."
4105
4106 if nsr_deployed.get("VCA"):
4107 stage[1] = "Deleting all execution environments."
4108 self.logger.debug(logging_text + stage[1])
4109 vca_id = self.get_vca_id({}, db_nsr)
4110 task_delete_ee = asyncio.ensure_future(
4111 asyncio.wait_for(
4112 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4113 timeout=self.timeout_charm_delete,
4114 )
4115 )
4116 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4117 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4118
4119 # Delete from k8scluster
4120 stage[1] = "Deleting KDUs."
4121 self.logger.debug(logging_text + stage[1])
4122 # print(nsr_deployed)
4123 for kdu in get_iterable(nsr_deployed, "K8s"):
4124 if not kdu or not kdu.get("kdu-instance"):
4125 continue
4126 kdu_instance = kdu.get("kdu-instance")
4127 if kdu.get("k8scluster-type") in self.k8scluster_map:
4128 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4129 vca_id = self.get_vca_id({}, db_nsr)
4130 task_delete_kdu_instance = asyncio.ensure_future(
4131 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4132 cluster_uuid=kdu.get("k8scluster-uuid"),
4133 kdu_instance=kdu_instance,
4134 vca_id=vca_id,
4135 )
4136 )
4137 else:
4138 self.logger.error(
4139 logging_text
4140 + "Unknown k8s deployment type {}".format(
4141 kdu.get("k8scluster-type")
4142 )
4143 )
4144 continue
4145 tasks_dict_info[
4146 task_delete_kdu_instance
4147 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4148
4149 # remove from RO
4150 stage[1] = "Deleting ns from VIM."
4151 if self.ng_ro:
4152 task_delete_ro = asyncio.ensure_future(
4153 self._terminate_ng_ro(
4154 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4155 )
4156 )
4157 else:
4158 task_delete_ro = asyncio.ensure_future(
4159 self._terminate_RO(
4160 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4161 )
4162 )
4163 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4164
4165 # rest of staff will be done at finally
4166
4167 except (
4168 ROclient.ROClientException,
4169 DbException,
4170 LcmException,
4171 N2VCException,
4172 ) as e:
4173 self.logger.error(logging_text + "Exit Exception {}".format(e))
4174 exc = e
4175 except asyncio.CancelledError:
4176 self.logger.error(
4177 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4178 )
4179 exc = "Operation was cancelled"
4180 except Exception as e:
4181 exc = traceback.format_exc()
4182 self.logger.critical(
4183 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4184 exc_info=True,
4185 )
4186 finally:
4187 if exc:
4188 error_list.append(str(exc))
4189 try:
4190 # wait for pending tasks
4191 if tasks_dict_info:
4192 stage[1] = "Waiting for terminate pending tasks."
4193 self.logger.debug(logging_text + stage[1])
4194 error_list += await self._wait_for_tasks(
4195 logging_text,
4196 tasks_dict_info,
4197 timeout_ns_terminate,
4198 stage,
4199 nslcmop_id,
4200 )
4201 stage[1] = stage[2] = ""
4202 except asyncio.CancelledError:
4203 error_list.append("Cancelled")
4204 # TODO cancell all tasks
4205 except Exception as exc:
4206 error_list.append(str(exc))
4207 # update status at database
4208 if error_list:
4209 error_detail = "; ".join(error_list)
4210 # self.logger.error(logging_text + error_detail)
4211 error_description_nslcmop = "{} Detail: {}".format(
4212 stage[0], error_detail
4213 )
4214 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4215 nslcmop_id, stage[0]
4216 )
4217
4218 db_nsr_update["operational-status"] = "failed"
4219 db_nsr_update["detailed-status"] = (
4220 error_description_nsr + " Detail: " + error_detail
4221 )
4222 db_nslcmop_update["detailed-status"] = error_detail
4223 nslcmop_operation_state = "FAILED"
4224 ns_state = "BROKEN"
4225 else:
4226 error_detail = None
4227 error_description_nsr = error_description_nslcmop = None
4228 ns_state = "NOT_INSTANTIATED"
4229 db_nsr_update["operational-status"] = "terminated"
4230 db_nsr_update["detailed-status"] = "Done"
4231 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4232 db_nslcmop_update["detailed-status"] = "Done"
4233 nslcmop_operation_state = "COMPLETED"
4234
4235 if db_nsr:
4236 self._write_ns_status(
4237 nsr_id=nsr_id,
4238 ns_state=ns_state,
4239 current_operation="IDLE",
4240 current_operation_id=None,
4241 error_description=error_description_nsr,
4242 error_detail=error_detail,
4243 other_update=db_nsr_update,
4244 )
4245 self._write_op_status(
4246 op_id=nslcmop_id,
4247 stage="",
4248 error_message=error_description_nslcmop,
4249 operation_state=nslcmop_operation_state,
4250 other_update=db_nslcmop_update,
4251 )
4252 if ns_state == "NOT_INSTANTIATED":
4253 try:
4254 self.db.set_list(
4255 "vnfrs",
4256 {"nsr-id-ref": nsr_id},
4257 {"_admin.nsState": "NOT_INSTANTIATED"},
4258 )
4259 except DbException as e:
4260 self.logger.warn(
4261 logging_text
4262 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4263 nsr_id, e
4264 )
4265 )
4266 if operation_params:
4267 autoremove = operation_params.get("autoremove", False)
4268 if nslcmop_operation_state:
4269 try:
4270 await self.msg.aiowrite(
4271 "ns",
4272 "terminated",
4273 {
4274 "nsr_id": nsr_id,
4275 "nslcmop_id": nslcmop_id,
4276 "operationState": nslcmop_operation_state,
4277 "autoremove": autoremove,
4278 },
4279 loop=self.loop,
4280 )
4281 except Exception as e:
4282 self.logger.error(
4283 logging_text + "kafka_write notification Exception {}".format(e)
4284 )
4285
4286 self.logger.debug(logging_text + "Exit")
4287 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4288
4289 async def _wait_for_tasks(
4290 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4291 ):
4292 time_start = time()
4293 error_detail_list = []
4294 error_list = []
4295 pending_tasks = list(created_tasks_info.keys())
4296 num_tasks = len(pending_tasks)
4297 num_done = 0
4298 stage[1] = "{}/{}.".format(num_done, num_tasks)
4299 self._write_op_status(nslcmop_id, stage)
4300 while pending_tasks:
4301 new_error = None
4302 _timeout = timeout + time_start - time()
4303 done, pending_tasks = await asyncio.wait(
4304 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4305 )
4306 num_done += len(done)
4307 if not done: # Timeout
4308 for task in pending_tasks:
4309 new_error = created_tasks_info[task] + ": Timeout"
4310 error_detail_list.append(new_error)
4311 error_list.append(new_error)
4312 break
4313 for task in done:
4314 if task.cancelled():
4315 exc = "Cancelled"
4316 else:
4317 exc = task.exception()
4318 if exc:
4319 if isinstance(exc, asyncio.TimeoutError):
4320 exc = "Timeout"
4321 new_error = created_tasks_info[task] + ": {}".format(exc)
4322 error_list.append(created_tasks_info[task])
4323 error_detail_list.append(new_error)
4324 if isinstance(
4325 exc,
4326 (
4327 str,
4328 DbException,
4329 N2VCException,
4330 ROclient.ROClientException,
4331 LcmException,
4332 K8sException,
4333 NgRoException,
4334 ),
4335 ):
4336 self.logger.error(logging_text + new_error)
4337 else:
4338 exc_traceback = "".join(
4339 traceback.format_exception(None, exc, exc.__traceback__)
4340 )
4341 self.logger.error(
4342 logging_text
4343 + created_tasks_info[task]
4344 + " "
4345 + exc_traceback
4346 )
4347 else:
4348 self.logger.debug(
4349 logging_text + created_tasks_info[task] + ": Done"
4350 )
4351 stage[1] = "{}/{}.".format(num_done, num_tasks)
4352 if new_error:
4353 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4354 if nsr_id: # update also nsr
4355 self.update_db_2(
4356 "nsrs",
4357 nsr_id,
4358 {
4359 "errorDescription": "Error at: " + ", ".join(error_list),
4360 "errorDetail": ". ".join(error_detail_list),
4361 },
4362 )
4363 self._write_op_status(nslcmop_id, stage)
4364 return error_detail_list
4365
4366 @staticmethod
4367 def _map_primitive_params(primitive_desc, params, instantiation_params):
4368 """
4369 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4370 The default-value is used. If it is between < > it look for a value at instantiation_params
4371 :param primitive_desc: portion of VNFD/NSD that describes primitive
4372 :param params: Params provided by user
4373 :param instantiation_params: Instantiation params provided by user
4374 :return: a dictionary with the calculated params
4375 """
4376 calculated_params = {}
4377 for parameter in primitive_desc.get("parameter", ()):
4378 param_name = parameter["name"]
4379 if param_name in params:
4380 calculated_params[param_name] = params[param_name]
4381 elif "default-value" in parameter or "value" in parameter:
4382 if "value" in parameter:
4383 calculated_params[param_name] = parameter["value"]
4384 else:
4385 calculated_params[param_name] = parameter["default-value"]
4386 if (
4387 isinstance(calculated_params[param_name], str)
4388 and calculated_params[param_name].startswith("<")
4389 and calculated_params[param_name].endswith(">")
4390 ):
4391 if calculated_params[param_name][1:-1] in instantiation_params:
4392 calculated_params[param_name] = instantiation_params[
4393 calculated_params[param_name][1:-1]
4394 ]
4395 else:
4396 raise LcmException(
4397 "Parameter {} needed to execute primitive {} not provided".format(
4398 calculated_params[param_name], primitive_desc["name"]
4399 )
4400 )
4401 else:
4402 raise LcmException(
4403 "Parameter {} needed to execute primitive {} not provided".format(
4404 param_name, primitive_desc["name"]
4405 )
4406 )
4407
4408 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4409 calculated_params[param_name] = yaml.safe_dump(
4410 calculated_params[param_name], default_flow_style=True, width=256
4411 )
4412 elif isinstance(calculated_params[param_name], str) and calculated_params[
4413 param_name
4414 ].startswith("!!yaml "):
4415 calculated_params[param_name] = calculated_params[param_name][7:]
4416 if parameter.get("data-type") == "INTEGER":
4417 try:
4418 calculated_params[param_name] = int(calculated_params[param_name])
4419 except ValueError: # error converting string to int
4420 raise LcmException(
4421 "Parameter {} of primitive {} must be integer".format(
4422 param_name, primitive_desc["name"]
4423 )
4424 )
4425 elif parameter.get("data-type") == "BOOLEAN":
4426 calculated_params[param_name] = not (
4427 (str(calculated_params[param_name])).lower() == "false"
4428 )
4429
4430 # add always ns_config_info if primitive name is config
4431 if primitive_desc["name"] == "config":
4432 if "ns_config_info" in instantiation_params:
4433 calculated_params["ns_config_info"] = instantiation_params[
4434 "ns_config_info"
4435 ]
4436 return calculated_params
4437
4438 def _look_for_deployed_vca(
4439 self,
4440 deployed_vca,
4441 member_vnf_index,
4442 vdu_id,
4443 vdu_count_index,
4444 kdu_name=None,
4445 ee_descriptor_id=None,
4446 ):
4447 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4448 for vca in deployed_vca:
4449 if not vca:
4450 continue
4451 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4452 continue
4453 if (
4454 vdu_count_index is not None
4455 and vdu_count_index != vca["vdu_count_index"]
4456 ):
4457 continue
4458 if kdu_name and kdu_name != vca["kdu_name"]:
4459 continue
4460 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4461 continue
4462 break
4463 else:
4464 # vca_deployed not found
4465 raise LcmException(
4466 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4467 " is not deployed".format(
4468 member_vnf_index,
4469 vdu_id,
4470 vdu_count_index,
4471 kdu_name,
4472 ee_descriptor_id,
4473 )
4474 )
4475 # get ee_id
4476 ee_id = vca.get("ee_id")
4477 vca_type = vca.get(
4478 "type", "lxc_proxy_charm"
4479 ) # default value for backward compatibility - proxy charm
4480 if not ee_id:
4481 raise LcmException(
4482 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4483 "execution environment".format(
4484 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4485 )
4486 )
4487 return ee_id, vca_type
4488
4489 async def _ns_execute_primitive(
4490 self,
4491 ee_id,
4492 primitive,
4493 primitive_params,
4494 retries=0,
4495 retries_interval=30,
4496 timeout=None,
4497 vca_type=None,
4498 db_dict=None,
4499 vca_id: str = None,
4500 ) -> (str, str):
4501 try:
4502 if primitive == "config":
4503 primitive_params = {"params": primitive_params}
4504
4505 vca_type = vca_type or "lxc_proxy_charm"
4506
4507 while retries >= 0:
4508 try:
4509 output = await asyncio.wait_for(
4510 self.vca_map[vca_type].exec_primitive(
4511 ee_id=ee_id,
4512 primitive_name=primitive,
4513 params_dict=primitive_params,
4514 progress_timeout=self.timeout_progress_primitive,
4515 total_timeout=self.timeout_primitive,
4516 db_dict=db_dict,
4517 vca_id=vca_id,
4518 ),
4519 timeout=timeout or self.timeout_primitive,
4520 )
4521 # execution was OK
4522 break
4523 except asyncio.CancelledError:
4524 raise
4525 except Exception as e: # asyncio.TimeoutError
4526 if isinstance(e, asyncio.TimeoutError):
4527 e = "Timeout"
4528 retries -= 1
4529 if retries >= 0:
4530 self.logger.debug(
4531 "Error executing action {} on {} -> {}".format(
4532 primitive, ee_id, e
4533 )
4534 )
4535 # wait and retry
4536 await asyncio.sleep(retries_interval, loop=self.loop)
4537 else:
4538 return "FAILED", str(e)
4539
4540 return "COMPLETED", output
4541
4542 except (LcmException, asyncio.CancelledError):
4543 raise
4544 except Exception as e:
4545 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4546
4547 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4548 """
4549 Updating the vca_status with latest juju information in nsrs record
4550 :param: nsr_id: Id of the nsr
4551 :param: nslcmop_id: Id of the nslcmop
4552 :return: None
4553 """
4554
4555 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4556 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4557 vca_id = self.get_vca_id({}, db_nsr)
4558 if db_nsr["_admin"]["deployed"]["K8s"]:
4559 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4560 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4561 await self._on_update_k8s_db(
4562 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4563 )
4564 else:
4565 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4566 table, filter = "nsrs", {"_id": nsr_id}
4567 path = "_admin.deployed.VCA.{}.".format(vca_index)
4568 await self._on_update_n2vc_db(table, filter, path, {})
4569
4570 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4571 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4572
4573 async def action(self, nsr_id, nslcmop_id):
4574 # Try to lock HA task here
4575 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4576 if not task_is_locked_by_me:
4577 return
4578
4579 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4580 self.logger.debug(logging_text + "Enter")
4581 # get all needed from database
4582 db_nsr = None
4583 db_nslcmop = None
4584 db_nsr_update = {}
4585 db_nslcmop_update = {}
4586 nslcmop_operation_state = None
4587 error_description_nslcmop = None
4588 exc = None
4589 try:
4590 # wait for any previous tasks in process
4591 step = "Waiting for previous operations to terminate"
4592 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4593
4594 self._write_ns_status(
4595 nsr_id=nsr_id,
4596 ns_state=None,
4597 current_operation="RUNNING ACTION",
4598 current_operation_id=nslcmop_id,
4599 )
4600
4601 step = "Getting information from database"
4602 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4603 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4604
4605 nsr_deployed = db_nsr["_admin"].get("deployed")
4606 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4607 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4608 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4609 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4610 primitive = db_nslcmop["operationParams"]["primitive"]
4611 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4612 timeout_ns_action = db_nslcmop["operationParams"].get(
4613 "timeout_ns_action", self.timeout_primitive
4614 )
4615
4616 if vnf_index:
4617 step = "Getting vnfr from database"
4618 db_vnfr = self.db.get_one(
4619 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4620 )
4621 step = "Getting vnfd from database"
4622 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4623 else:
4624 step = "Getting nsd from database"
4625 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4626
4627 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4628 # for backward compatibility
4629 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4630 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4631 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4632 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4633
4634 # look for primitive
4635 config_primitive_desc = descriptor_configuration = None
4636 if vdu_id:
4637 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4638 elif kdu_name:
4639 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4640 elif vnf_index:
4641 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4642 else:
4643 descriptor_configuration = db_nsd.get("ns-configuration")
4644
4645 if descriptor_configuration and descriptor_configuration.get(
4646 "config-primitive"
4647 ):
4648 for config_primitive in descriptor_configuration["config-primitive"]:
4649 if config_primitive["name"] == primitive:
4650 config_primitive_desc = config_primitive
4651 break
4652
4653 if not config_primitive_desc:
4654 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4655 raise LcmException(
4656 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4657 primitive
4658 )
4659 )
4660 primitive_name = primitive
4661 ee_descriptor_id = None
4662 else:
4663 primitive_name = config_primitive_desc.get(
4664 "execution-environment-primitive", primitive
4665 )
4666 ee_descriptor_id = config_primitive_desc.get(
4667 "execution-environment-ref"
4668 )
4669
4670 if vnf_index:
4671 if vdu_id:
4672 vdur = next(
4673 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4674 )
4675 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4676 elif kdu_name:
4677 kdur = next(
4678 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4679 )
4680 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4681 else:
4682 desc_params = parse_yaml_strings(
4683 db_vnfr.get("additionalParamsForVnf")
4684 )
4685 else:
4686 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4687 if kdu_name and get_configuration(db_vnfd, kdu_name):
4688 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4689 actions = set()
4690 for primitive in kdu_configuration.get("initial-config-primitive", []):
4691 actions.add(primitive["name"])
4692 for primitive in kdu_configuration.get("config-primitive", []):
4693 actions.add(primitive["name"])
4694 kdu_action = True if primitive_name in actions else False
4695
4696 # TODO check if ns is in a proper status
4697 if kdu_name and (
4698 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4699 ):
4700 # kdur and desc_params already set from before
4701 if primitive_params:
4702 desc_params.update(primitive_params)
4703 # TODO Check if we will need something at vnf level
4704 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4705 if (
4706 kdu_name == kdu["kdu-name"]
4707 and kdu["member-vnf-index"] == vnf_index
4708 ):
4709 break
4710 else:
4711 raise LcmException(
4712 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4713 )
4714
4715 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4716 msg = "unknown k8scluster-type '{}'".format(
4717 kdu.get("k8scluster-type")
4718 )
4719 raise LcmException(msg)
4720
4721 db_dict = {
4722 "collection": "nsrs",
4723 "filter": {"_id": nsr_id},
4724 "path": "_admin.deployed.K8s.{}".format(index),
4725 }
4726 self.logger.debug(
4727 logging_text
4728 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4729 )
4730 step = "Executing kdu {}".format(primitive_name)
4731 if primitive_name == "upgrade":
4732 if desc_params.get("kdu_model"):
4733 kdu_model = desc_params.get("kdu_model")
4734 del desc_params["kdu_model"]
4735 else:
4736 kdu_model = kdu.get("kdu-model")
4737 parts = kdu_model.split(sep=":")
4738 if len(parts) == 2:
4739 kdu_model = parts[0]
4740
4741 detailed_status = await asyncio.wait_for(
4742 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4743 cluster_uuid=kdu.get("k8scluster-uuid"),
4744 kdu_instance=kdu.get("kdu-instance"),
4745 atomic=True,
4746 kdu_model=kdu_model,
4747 params=desc_params,
4748 db_dict=db_dict,
4749 timeout=timeout_ns_action,
4750 ),
4751 timeout=timeout_ns_action + 10,
4752 )
4753 self.logger.debug(
4754 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4755 )
4756 elif primitive_name == "rollback":
4757 detailed_status = await asyncio.wait_for(
4758 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4759 cluster_uuid=kdu.get("k8scluster-uuid"),
4760 kdu_instance=kdu.get("kdu-instance"),
4761 db_dict=db_dict,
4762 ),
4763 timeout=timeout_ns_action,
4764 )
4765 elif primitive_name == "status":
4766 detailed_status = await asyncio.wait_for(
4767 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4768 cluster_uuid=kdu.get("k8scluster-uuid"),
4769 kdu_instance=kdu.get("kdu-instance"),
4770 vca_id=vca_id,
4771 ),
4772 timeout=timeout_ns_action,
4773 )
4774 else:
4775 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
4776 kdu["kdu-name"], nsr_id
4777 )
4778 params = self._map_primitive_params(
4779 config_primitive_desc, primitive_params, desc_params
4780 )
4781
4782 detailed_status = await asyncio.wait_for(
4783 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
4784 cluster_uuid=kdu.get("k8scluster-uuid"),
4785 kdu_instance=kdu_instance,
4786 primitive_name=primitive_name,
4787 params=params,
4788 db_dict=db_dict,
4789 timeout=timeout_ns_action,
4790 vca_id=vca_id,
4791 ),
4792 timeout=timeout_ns_action,
4793 )
4794
4795 if detailed_status:
4796 nslcmop_operation_state = "COMPLETED"
4797 else:
4798 detailed_status = ""
4799 nslcmop_operation_state = "FAILED"
4800 else:
4801 ee_id, vca_type = self._look_for_deployed_vca(
4802 nsr_deployed["VCA"],
4803 member_vnf_index=vnf_index,
4804 vdu_id=vdu_id,
4805 vdu_count_index=vdu_count_index,
4806 ee_descriptor_id=ee_descriptor_id,
4807 )
4808 for vca_index, vca_deployed in enumerate(
4809 db_nsr["_admin"]["deployed"]["VCA"]
4810 ):
4811 if vca_deployed.get("member-vnf-index") == vnf_index:
4812 db_dict = {
4813 "collection": "nsrs",
4814 "filter": {"_id": nsr_id},
4815 "path": "_admin.deployed.VCA.{}.".format(vca_index),
4816 }
4817 break
4818 (
4819 nslcmop_operation_state,
4820 detailed_status,
4821 ) = await self._ns_execute_primitive(
4822 ee_id,
4823 primitive=primitive_name,
4824 primitive_params=self._map_primitive_params(
4825 config_primitive_desc, primitive_params, desc_params
4826 ),
4827 timeout=timeout_ns_action,
4828 vca_type=vca_type,
4829 db_dict=db_dict,
4830 vca_id=vca_id,
4831 )
4832
4833 db_nslcmop_update["detailed-status"] = detailed_status
4834 error_description_nslcmop = (
4835 detailed_status if nslcmop_operation_state == "FAILED" else ""
4836 )
4837 self.logger.debug(
4838 logging_text
4839 + " task Done with result {} {}".format(
4840 nslcmop_operation_state, detailed_status
4841 )
4842 )
4843 return # database update is called inside finally
4844
4845 except (DbException, LcmException, N2VCException, K8sException) as e:
4846 self.logger.error(logging_text + "Exit Exception {}".format(e))
4847 exc = e
4848 except asyncio.CancelledError:
4849 self.logger.error(
4850 logging_text + "Cancelled Exception while '{}'".format(step)
4851 )
4852 exc = "Operation was cancelled"
4853 except asyncio.TimeoutError:
4854 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4855 exc = "Timeout"
4856 except Exception as e:
4857 exc = traceback.format_exc()
4858 self.logger.critical(
4859 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
4860 exc_info=True,
4861 )
4862 finally:
4863 if exc:
4864 db_nslcmop_update[
4865 "detailed-status"
4866 ] = (
4867 detailed_status
4868 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4869 nslcmop_operation_state = "FAILED"
4870 if db_nsr:
4871 self._write_ns_status(
4872 nsr_id=nsr_id,
4873 ns_state=db_nsr[
4874 "nsState"
4875 ], # TODO check if degraded. For the moment use previous status
4876 current_operation="IDLE",
4877 current_operation_id=None,
4878 # error_description=error_description_nsr,
4879 # error_detail=error_detail,
4880 other_update=db_nsr_update,
4881 )
4882
4883 self._write_op_status(
4884 op_id=nslcmop_id,
4885 stage="",
4886 error_message=error_description_nslcmop,
4887 operation_state=nslcmop_operation_state,
4888 other_update=db_nslcmop_update,
4889 )
4890
4891 if nslcmop_operation_state:
4892 try:
4893 await self.msg.aiowrite(
4894 "ns",
4895 "actioned",
4896 {
4897 "nsr_id": nsr_id,
4898 "nslcmop_id": nslcmop_id,
4899 "operationState": nslcmop_operation_state,
4900 },
4901 loop=self.loop,
4902 )
4903 except Exception as e:
4904 self.logger.error(
4905 logging_text + "kafka_write notification Exception {}".format(e)
4906 )
4907 self.logger.debug(logging_text + "Exit")
4908 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4909 return nslcmop_operation_state, detailed_status
4910
4911 async def scale(self, nsr_id, nslcmop_id):
4912 # Try to lock HA task here
4913 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4914 if not task_is_locked_by_me:
4915 return
4916
4917 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4918 stage = ["", "", ""]
4919 tasks_dict_info = {}
4920 # ^ stage, step, VIM progress
4921 self.logger.debug(logging_text + "Enter")
4922 # get all needed from database
4923 db_nsr = None
4924 db_nslcmop_update = {}
4925 db_nsr_update = {}
4926 exc = None
4927 # in case of error, indicates what part of scale was failed to put nsr at error status
4928 scale_process = None
4929 old_operational_status = ""
4930 old_config_status = ""
4931 nsi_id = None
4932 try:
4933 # wait for any previous tasks in process
4934 step = "Waiting for previous operations to terminate"
4935 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4936 self._write_ns_status(
4937 nsr_id=nsr_id,
4938 ns_state=None,
4939 current_operation="SCALING",
4940 current_operation_id=nslcmop_id,
4941 )
4942
4943 step = "Getting nslcmop from database"
4944 self.logger.debug(
4945 step + " after having waited for previous tasks to be completed"
4946 )
4947 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4948
4949 step = "Getting nsr from database"
4950 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4951 old_operational_status = db_nsr["operational-status"]
4952 old_config_status = db_nsr["config-status"]
4953
4954 step = "Parsing scaling parameters"
4955 db_nsr_update["operational-status"] = "scaling"
4956 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4957 nsr_deployed = db_nsr["_admin"].get("deployed")
4958
4959 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
4960 "scaleByStepData"
4961 ]["member-vnf-index"]
4962 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
4963 "scaleByStepData"
4964 ]["scaling-group-descriptor"]
4965 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4966 # for backward compatibility
4967 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4968 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4969 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4970 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4971
4972 step = "Getting vnfr from database"
4973 db_vnfr = self.db.get_one(
4974 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4975 )
4976
4977 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4978
4979 step = "Getting vnfd from database"
4980 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4981
4982 base_folder = db_vnfd["_admin"]["storage"]
4983
4984 step = "Getting scaling-group-descriptor"
4985 scaling_descriptor = find_in_list(
4986 get_scaling_aspect(db_vnfd),
4987 lambda scale_desc: scale_desc["name"] == scaling_group,
4988 )
4989 if not scaling_descriptor:
4990 raise LcmException(
4991 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4992 "at vnfd:scaling-group-descriptor".format(scaling_group)
4993 )
4994
4995 step = "Sending scale order to VIM"
4996 # TODO check if ns is in a proper status
4997 nb_scale_op = 0
4998 if not db_nsr["_admin"].get("scaling-group"):
4999 self.update_db_2(
5000 "nsrs",
5001 nsr_id,
5002 {
5003 "_admin.scaling-group": [
5004 {"name": scaling_group, "nb-scale-op": 0}
5005 ]
5006 },
5007 )
5008 admin_scale_index = 0
5009 else:
5010 for admin_scale_index, admin_scale_info in enumerate(
5011 db_nsr["_admin"]["scaling-group"]
5012 ):
5013 if admin_scale_info["name"] == scaling_group:
5014 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5015 break
5016 else: # not found, set index one plus last element and add new entry with the name
5017 admin_scale_index += 1
5018 db_nsr_update[
5019 "_admin.scaling-group.{}.name".format(admin_scale_index)
5020 ] = scaling_group
5021
5022 vca_scaling_info = []
5023 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5024 if scaling_type == "SCALE_OUT":
5025 if "aspect-delta-details" not in scaling_descriptor:
5026 raise LcmException(
5027 "Aspect delta details not fount in scaling descriptor {}".format(
5028 scaling_descriptor["name"]
5029 )
5030 )
5031 # count if max-instance-count is reached
5032 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5033
5034 scaling_info["scaling_direction"] = "OUT"
5035 scaling_info["vdu-create"] = {}
5036 scaling_info["kdu-create"] = {}
5037 for delta in deltas:
5038 for vdu_delta in delta.get("vdu-delta", {}):
5039 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5040 # vdu_index also provides the number of instance of the targeted vdu
5041 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5042 cloud_init_text = self._get_vdu_cloud_init_content(
5043 vdud, db_vnfd
5044 )
5045 if cloud_init_text:
5046 additional_params = (
5047 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5048 or {}
5049 )
5050 cloud_init_list = []
5051
5052 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5053 max_instance_count = 10
5054 if vdu_profile and "max-number-of-instances" in vdu_profile:
5055 max_instance_count = vdu_profile.get(
5056 "max-number-of-instances", 10
5057 )
5058
5059 default_instance_num = get_number_of_instances(
5060 db_vnfd, vdud["id"]
5061 )
5062 instances_number = vdu_delta.get("number-of-instances", 1)
5063 nb_scale_op += instances_number
5064
5065 new_instance_count = nb_scale_op + default_instance_num
5066 # Control if new count is over max and vdu count is less than max.
5067 # Then assign new instance count
5068 if new_instance_count > max_instance_count > vdu_count:
5069 instances_number = new_instance_count - max_instance_count
5070 else:
5071 instances_number = instances_number
5072
5073 if new_instance_count > max_instance_count:
5074 raise LcmException(
5075 "reached the limit of {} (max-instance-count) "
5076 "scaling-out operations for the "
5077 "scaling-group-descriptor '{}'".format(
5078 nb_scale_op, scaling_group
5079 )
5080 )
5081 for x in range(vdu_delta.get("number-of-instances", 1)):
5082 if cloud_init_text:
5083 # TODO Information of its own ip is not available because db_vnfr is not updated.
5084 additional_params["OSM"] = get_osm_params(
5085 db_vnfr, vdu_delta["id"], vdu_index + x
5086 )
5087 cloud_init_list.append(
5088 self._parse_cloud_init(
5089 cloud_init_text,
5090 additional_params,
5091 db_vnfd["id"],
5092 vdud["id"],
5093 )
5094 )
5095 vca_scaling_info.append(
5096 {
5097 "osm_vdu_id": vdu_delta["id"],
5098 "member-vnf-index": vnf_index,
5099 "type": "create",
5100 "vdu_index": vdu_index + x,
5101 }
5102 )
5103 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5104 for kdu_delta in delta.get("kdu-resource-delta", {}):
5105 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5106 kdu_name = kdu_profile["kdu-name"]
5107 resource_name = kdu_profile["resource-name"]
5108
5109 # Might have different kdus in the same delta
5110 # Should have list for each kdu
5111 if not scaling_info["kdu-create"].get(kdu_name, None):
5112 scaling_info["kdu-create"][kdu_name] = []
5113
5114 kdur = get_kdur(db_vnfr, kdu_name)
5115 if kdur.get("helm-chart"):
5116 k8s_cluster_type = "helm-chart-v3"
5117 self.logger.debug("kdur: {}".format(kdur))
5118 if (
5119 kdur.get("helm-version")
5120 and kdur.get("helm-version") == "v2"
5121 ):
5122 k8s_cluster_type = "helm-chart"
5123 raise NotImplementedError
5124 elif kdur.get("juju-bundle"):
5125 k8s_cluster_type = "juju-bundle"
5126 else:
5127 raise LcmException(
5128 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5129 "juju-bundle. Maybe an old NBI version is running".format(
5130 db_vnfr["member-vnf-index-ref"], kdu_name
5131 )
5132 )
5133
5134 max_instance_count = 10
5135 if kdu_profile and "max-number-of-instances" in kdu_profile:
5136 max_instance_count = kdu_profile.get(
5137 "max-number-of-instances", 10
5138 )
5139
5140 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5141 deployed_kdu, _ = get_deployed_kdu(
5142 nsr_deployed, kdu_name, vnf_index
5143 )
5144 if deployed_kdu is None:
5145 raise LcmException(
5146 "KDU '{}' for vnf '{}' not deployed".format(
5147 kdu_name, vnf_index
5148 )
5149 )
5150 kdu_instance = deployed_kdu.get("kdu-instance")
5151 instance_num = await self.k8scluster_map[
5152 k8s_cluster_type
5153 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5154 kdu_replica_count = instance_num + kdu_delta.get(
5155 "number-of-instances", 1
5156 )
5157
5158 # Control if new count is over max and instance_num is less than max.
5159 # Then assign max instance number to kdu replica count
5160 if kdu_replica_count > max_instance_count > instance_num:
5161 kdu_replica_count = max_instance_count
5162 if kdu_replica_count > max_instance_count:
5163 raise LcmException(
5164 "reached the limit of {} (max-instance-count) "
5165 "scaling-out operations for the "
5166 "scaling-group-descriptor '{}'".format(
5167 instance_num, scaling_group
5168 )
5169 )
5170
5171 for x in range(kdu_delta.get("number-of-instances", 1)):
5172 vca_scaling_info.append(
5173 {
5174 "osm_kdu_id": kdu_name,
5175 "member-vnf-index": vnf_index,
5176 "type": "create",
5177 "kdu_index": instance_num + x - 1,
5178 }
5179 )
5180 scaling_info["kdu-create"][kdu_name].append(
5181 {
5182 "member-vnf-index": vnf_index,
5183 "type": "create",
5184 "k8s-cluster-type": k8s_cluster_type,
5185 "resource-name": resource_name,
5186 "scale": kdu_replica_count,
5187 }
5188 )
5189 elif scaling_type == "SCALE_IN":
5190 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5191
5192 scaling_info["scaling_direction"] = "IN"
5193 scaling_info["vdu-delete"] = {}
5194 scaling_info["kdu-delete"] = {}
5195
5196 for delta in deltas:
5197 for vdu_delta in delta.get("vdu-delta", {}):
5198 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5199 min_instance_count = 0
5200 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5201 if vdu_profile and "min-number-of-instances" in vdu_profile:
5202 min_instance_count = vdu_profile["min-number-of-instances"]
5203
5204 default_instance_num = get_number_of_instances(
5205 db_vnfd, vdu_delta["id"]
5206 )
5207 instance_num = vdu_delta.get("number-of-instances", 1)
5208 nb_scale_op -= instance_num
5209
5210 new_instance_count = nb_scale_op + default_instance_num
5211
5212 if new_instance_count < min_instance_count < vdu_count:
5213 instances_number = min_instance_count - new_instance_count
5214 else:
5215 instances_number = instance_num
5216
5217 if new_instance_count < min_instance_count:
5218 raise LcmException(
5219 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5220 "scaling-group-descriptor '{}'".format(
5221 nb_scale_op, scaling_group
5222 )
5223 )
5224 for x in range(vdu_delta.get("number-of-instances", 1)):
5225 vca_scaling_info.append(
5226 {
5227 "osm_vdu_id": vdu_delta["id"],
5228 "member-vnf-index": vnf_index,
5229 "type": "delete",
5230 "vdu_index": vdu_index - 1 - x,
5231 }
5232 )
5233 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5234 for kdu_delta in delta.get("kdu-resource-delta", {}):
5235 kdu_profile = get_kdu_profile(db_vnfd, kdu_delta["id"])
5236 kdu_name = kdu_profile["kdu-name"]
5237 resource_name = kdu_profile["resource-name"]
5238
5239 if not scaling_info["kdu-delete"].get(kdu_name, None):
5240 scaling_info["kdu-delete"][kdu_name] = []
5241
5242 kdur = get_kdur(db_vnfr, kdu_name)
5243 if kdur.get("helm-chart"):
5244 k8s_cluster_type = "helm-chart-v3"
5245 self.logger.debug("kdur: {}".format(kdur))
5246 if (
5247 kdur.get("helm-version")
5248 and kdur.get("helm-version") == "v2"
5249 ):
5250 k8s_cluster_type = "helm-chart"
5251 raise NotImplementedError
5252 elif kdur.get("juju-bundle"):
5253 k8s_cluster_type = "juju-bundle"
5254 else:
5255 raise LcmException(
5256 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5257 "juju-bundle. Maybe an old NBI version is running".format(
5258 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5259 )
5260 )
5261
5262 min_instance_count = 0
5263 if kdu_profile and "min-number-of-instances" in kdu_profile:
5264 min_instance_count = kdu_profile["min-number-of-instances"]
5265
5266 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5267 deployed_kdu, _ = get_deployed_kdu(
5268 nsr_deployed, kdu_name, vnf_index
5269 )
5270 if deployed_kdu is None:
5271 raise LcmException(
5272 "KDU '{}' for vnf '{}' not deployed".format(
5273 kdu_name, vnf_index
5274 )
5275 )
5276 kdu_instance = deployed_kdu.get("kdu-instance")
5277 instance_num = await self.k8scluster_map[
5278 k8s_cluster_type
5279 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5280 kdu_replica_count = instance_num - kdu_delta.get(
5281 "number-of-instances", 1
5282 )
5283
5284 if kdu_replica_count < min_instance_count < instance_num:
5285 kdu_replica_count = min_instance_count
5286 if kdu_replica_count < min_instance_count:
5287 raise LcmException(
5288 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5289 "scaling-group-descriptor '{}'".format(
5290 instance_num, scaling_group
5291 )
5292 )
5293
5294 for x in range(kdu_delta.get("number-of-instances", 1)):
5295 vca_scaling_info.append(
5296 {
5297 "osm_kdu_id": kdu_name,
5298 "member-vnf-index": vnf_index,
5299 "type": "delete",
5300 "kdu_index": instance_num - x - 1,
5301 }
5302 )
5303 scaling_info["kdu-delete"][kdu_name].append(
5304 {
5305 "member-vnf-index": vnf_index,
5306 "type": "delete",
5307 "k8s-cluster-type": k8s_cluster_type,
5308 "resource-name": resource_name,
5309 "scale": kdu_replica_count,
5310 }
5311 )
5312
5313 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5314 vdu_delete = copy(scaling_info.get("vdu-delete"))
5315 if scaling_info["scaling_direction"] == "IN":
5316 for vdur in reversed(db_vnfr["vdur"]):
5317 if vdu_delete.get(vdur["vdu-id-ref"]):
5318 vdu_delete[vdur["vdu-id-ref"]] -= 1
5319 scaling_info["vdu"].append(
5320 {
5321 "name": vdur.get("name") or vdur.get("vdu-name"),
5322 "vdu_id": vdur["vdu-id-ref"],
5323 "interface": [],
5324 }
5325 )
5326 for interface in vdur["interfaces"]:
5327 scaling_info["vdu"][-1]["interface"].append(
5328 {
5329 "name": interface["name"],
5330 "ip_address": interface["ip-address"],
5331 "mac_address": interface.get("mac-address"),
5332 }
5333 )
5334 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5335
5336 # PRE-SCALE BEGIN
5337 step = "Executing pre-scale vnf-config-primitive"
5338 if scaling_descriptor.get("scaling-config-action"):
5339 for scaling_config_action in scaling_descriptor[
5340 "scaling-config-action"
5341 ]:
5342 if (
5343 scaling_config_action.get("trigger") == "pre-scale-in"
5344 and scaling_type == "SCALE_IN"
5345 ) or (
5346 scaling_config_action.get("trigger") == "pre-scale-out"
5347 and scaling_type == "SCALE_OUT"
5348 ):
5349 vnf_config_primitive = scaling_config_action[
5350 "vnf-config-primitive-name-ref"
5351 ]
5352 step = db_nslcmop_update[
5353 "detailed-status"
5354 ] = "executing pre-scale scaling-config-action '{}'".format(
5355 vnf_config_primitive
5356 )
5357
5358 # look for primitive
5359 for config_primitive in (
5360 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5361 ).get("config-primitive", ()):
5362 if config_primitive["name"] == vnf_config_primitive:
5363 break
5364 else:
5365 raise LcmException(
5366 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5367 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5368 "primitive".format(scaling_group, vnf_config_primitive)
5369 )
5370
5371 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5372 if db_vnfr.get("additionalParamsForVnf"):
5373 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5374
5375 scale_process = "VCA"
5376 db_nsr_update["config-status"] = "configuring pre-scaling"
5377 primitive_params = self._map_primitive_params(
5378 config_primitive, {}, vnfr_params
5379 )
5380
5381 # Pre-scale retry check: Check if this sub-operation has been executed before
5382 op_index = self._check_or_add_scale_suboperation(
5383 db_nslcmop,
5384 nslcmop_id,
5385 vnf_index,
5386 vnf_config_primitive,
5387 primitive_params,
5388 "PRE-SCALE",
5389 )
5390 if op_index == self.SUBOPERATION_STATUS_SKIP:
5391 # Skip sub-operation
5392 result = "COMPLETED"
5393 result_detail = "Done"
5394 self.logger.debug(
5395 logging_text
5396 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5397 vnf_config_primitive, result, result_detail
5398 )
5399 )
5400 else:
5401 if op_index == self.SUBOPERATION_STATUS_NEW:
5402 # New sub-operation: Get index of this sub-operation
5403 op_index = (
5404 len(db_nslcmop.get("_admin", {}).get("operations"))
5405 - 1
5406 )
5407 self.logger.debug(
5408 logging_text
5409 + "vnf_config_primitive={} New sub-operation".format(
5410 vnf_config_primitive
5411 )
5412 )
5413 else:
5414 # retry: Get registered params for this existing sub-operation
5415 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5416 op_index
5417 ]
5418 vnf_index = op.get("member_vnf_index")
5419 vnf_config_primitive = op.get("primitive")
5420 primitive_params = op.get("primitive_params")
5421 self.logger.debug(
5422 logging_text
5423 + "vnf_config_primitive={} Sub-operation retry".format(
5424 vnf_config_primitive
5425 )
5426 )
5427 # Execute the primitive, either with new (first-time) or registered (reintent) args
5428 ee_descriptor_id = config_primitive.get(
5429 "execution-environment-ref"
5430 )
5431 primitive_name = config_primitive.get(
5432 "execution-environment-primitive", vnf_config_primitive
5433 )
5434 ee_id, vca_type = self._look_for_deployed_vca(
5435 nsr_deployed["VCA"],
5436 member_vnf_index=vnf_index,
5437 vdu_id=None,
5438 vdu_count_index=None,
5439 ee_descriptor_id=ee_descriptor_id,
5440 )
5441 result, result_detail = await self._ns_execute_primitive(
5442 ee_id,
5443 primitive_name,
5444 primitive_params,
5445 vca_type=vca_type,
5446 vca_id=vca_id,
5447 )
5448 self.logger.debug(
5449 logging_text
5450 + "vnf_config_primitive={} Done with result {} {}".format(
5451 vnf_config_primitive, result, result_detail
5452 )
5453 )
5454 # Update operationState = COMPLETED | FAILED
5455 self._update_suboperation_status(
5456 db_nslcmop, op_index, result, result_detail
5457 )
5458
5459 if result == "FAILED":
5460 raise LcmException(result_detail)
5461 db_nsr_update["config-status"] = old_config_status
5462 scale_process = None
5463 # PRE-SCALE END
5464
5465 db_nsr_update[
5466 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5467 ] = nb_scale_op
5468 db_nsr_update[
5469 "_admin.scaling-group.{}.time".format(admin_scale_index)
5470 ] = time()
5471
5472 # SCALE-IN VCA - BEGIN
5473 if vca_scaling_info:
5474 step = db_nslcmop_update[
5475 "detailed-status"
5476 ] = "Deleting the execution environments"
5477 scale_process = "VCA"
5478 for vca_info in vca_scaling_info:
5479 if vca_info["type"] == "delete":
5480 member_vnf_index = str(vca_info["member-vnf-index"])
5481 self.logger.debug(
5482 logging_text + "vdu info: {}".format(vca_info)
5483 )
5484 if vca_info.get("osm_vdu_id"):
5485 vdu_id = vca_info["osm_vdu_id"]
5486 vdu_index = int(vca_info["vdu_index"])
5487 stage[
5488 1
5489 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5490 member_vnf_index, vdu_id, vdu_index
5491 )
5492 else:
5493 vdu_index = 0
5494 kdu_id = vca_info["osm_kdu_id"]
5495 stage[
5496 1
5497 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5498 member_vnf_index, kdu_id, vdu_index
5499 )
5500 stage[2] = step = "Scaling in VCA"
5501 self._write_op_status(op_id=nslcmop_id, stage=stage)
5502 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5503 config_update = db_nsr["configurationStatus"]
5504 for vca_index, vca in enumerate(vca_update):
5505 if (
5506 (vca or vca.get("ee_id"))
5507 and vca["member-vnf-index"] == member_vnf_index
5508 and vca["vdu_count_index"] == vdu_index
5509 ):
5510 if vca.get("vdu_id"):
5511 config_descriptor = get_configuration(
5512 db_vnfd, vca.get("vdu_id")
5513 )
5514 elif vca.get("kdu_name"):
5515 config_descriptor = get_configuration(
5516 db_vnfd, vca.get("kdu_name")
5517 )
5518 else:
5519 config_descriptor = get_configuration(
5520 db_vnfd, db_vnfd["id"]
5521 )
5522 operation_params = (
5523 db_nslcmop.get("operationParams") or {}
5524 )
5525 exec_terminate_primitives = not operation_params.get(
5526 "skip_terminate_primitives"
5527 ) and vca.get("needed_terminate")
5528 task = asyncio.ensure_future(
5529 asyncio.wait_for(
5530 self.destroy_N2VC(
5531 logging_text,
5532 db_nslcmop,
5533 vca,
5534 config_descriptor,
5535 vca_index,
5536 destroy_ee=True,
5537 exec_primitives=exec_terminate_primitives,
5538 scaling_in=True,
5539 vca_id=vca_id,
5540 ),
5541 timeout=self.timeout_charm_delete,
5542 )
5543 )
5544 tasks_dict_info[task] = "Terminating VCA {}".format(
5545 vca.get("ee_id")
5546 )
5547 del vca_update[vca_index]
5548 del config_update[vca_index]
5549 # wait for pending tasks of terminate primitives
5550 if tasks_dict_info:
5551 self.logger.debug(
5552 logging_text
5553 + "Waiting for tasks {}".format(
5554 list(tasks_dict_info.keys())
5555 )
5556 )
5557 error_list = await self._wait_for_tasks(
5558 logging_text,
5559 tasks_dict_info,
5560 min(
5561 self.timeout_charm_delete, self.timeout_ns_terminate
5562 ),
5563 stage,
5564 nslcmop_id,
5565 )
5566 tasks_dict_info.clear()
5567 if error_list:
5568 raise LcmException("; ".join(error_list))
5569
5570 db_vca_and_config_update = {
5571 "_admin.deployed.VCA": vca_update,
5572 "configurationStatus": config_update,
5573 }
5574 self.update_db_2(
5575 "nsrs", db_nsr["_id"], db_vca_and_config_update
5576 )
5577 scale_process = None
5578 # SCALE-IN VCA - END
5579
5580 # SCALE RO - BEGIN
5581 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5582 scale_process = "RO"
5583 if self.ro_config.get("ng"):
5584 await self._scale_ng_ro(
5585 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5586 )
5587 scaling_info.pop("vdu-create", None)
5588 scaling_info.pop("vdu-delete", None)
5589
5590 scale_process = None
5591 # SCALE RO - END
5592
5593 # SCALE KDU - BEGIN
5594 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5595 scale_process = "KDU"
5596 await self._scale_kdu(
5597 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5598 )
5599 scaling_info.pop("kdu-create", None)
5600 scaling_info.pop("kdu-delete", None)
5601
5602 scale_process = None
5603 # SCALE KDU - END
5604
5605 if db_nsr_update:
5606 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5607
5608 # SCALE-UP VCA - BEGIN
5609 if vca_scaling_info:
5610 step = db_nslcmop_update[
5611 "detailed-status"
5612 ] = "Creating new execution environments"
5613 scale_process = "VCA"
5614 for vca_info in vca_scaling_info:
5615 if vca_info["type"] == "create":
5616 member_vnf_index = str(vca_info["member-vnf-index"])
5617 self.logger.debug(
5618 logging_text + "vdu info: {}".format(vca_info)
5619 )
5620 vnfd_id = db_vnfr["vnfd-ref"]
5621 if vca_info.get("osm_vdu_id"):
5622 vdu_index = int(vca_info["vdu_index"])
5623 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5624 if db_vnfr.get("additionalParamsForVnf"):
5625 deploy_params.update(
5626 parse_yaml_strings(
5627 db_vnfr["additionalParamsForVnf"].copy()
5628 )
5629 )
5630 descriptor_config = get_configuration(
5631 db_vnfd, db_vnfd["id"]
5632 )
5633 if descriptor_config:
5634 vdu_id = None
5635 vdu_name = None
5636 kdu_name = None
5637 self._deploy_n2vc(
5638 logging_text=logging_text
5639 + "member_vnf_index={} ".format(member_vnf_index),
5640 db_nsr=db_nsr,
5641 db_vnfr=db_vnfr,
5642 nslcmop_id=nslcmop_id,
5643 nsr_id=nsr_id,
5644 nsi_id=nsi_id,
5645 vnfd_id=vnfd_id,
5646 vdu_id=vdu_id,
5647 kdu_name=kdu_name,
5648 member_vnf_index=member_vnf_index,
5649 vdu_index=vdu_index,
5650 vdu_name=vdu_name,
5651 deploy_params=deploy_params,
5652 descriptor_config=descriptor_config,
5653 base_folder=base_folder,
5654 task_instantiation_info=tasks_dict_info,
5655 stage=stage,
5656 )
5657 vdu_id = vca_info["osm_vdu_id"]
5658 vdur = find_in_list(
5659 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5660 )
5661 descriptor_config = get_configuration(db_vnfd, vdu_id)
5662 if vdur.get("additionalParams"):
5663 deploy_params_vdu = parse_yaml_strings(
5664 vdur["additionalParams"]
5665 )
5666 else:
5667 deploy_params_vdu = deploy_params
5668 deploy_params_vdu["OSM"] = get_osm_params(
5669 db_vnfr, vdu_id, vdu_count_index=vdu_index
5670 )
5671 if descriptor_config:
5672 vdu_name = None
5673 kdu_name = None
5674 stage[
5675 1
5676 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5677 member_vnf_index, vdu_id, vdu_index
5678 )
5679 stage[2] = step = "Scaling out VCA"
5680 self._write_op_status(op_id=nslcmop_id, stage=stage)
5681 self._deploy_n2vc(
5682 logging_text=logging_text
5683 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5684 member_vnf_index, vdu_id, vdu_index
5685 ),
5686 db_nsr=db_nsr,
5687 db_vnfr=db_vnfr,
5688 nslcmop_id=nslcmop_id,
5689 nsr_id=nsr_id,
5690 nsi_id=nsi_id,
5691 vnfd_id=vnfd_id,
5692 vdu_id=vdu_id,
5693 kdu_name=kdu_name,
5694 member_vnf_index=member_vnf_index,
5695 vdu_index=vdu_index,
5696 vdu_name=vdu_name,
5697 deploy_params=deploy_params_vdu,
5698 descriptor_config=descriptor_config,
5699 base_folder=base_folder,
5700 task_instantiation_info=tasks_dict_info,
5701 stage=stage,
5702 )
5703 else:
5704 kdu_name = vca_info["osm_kdu_id"]
5705 descriptor_config = get_configuration(db_vnfd, kdu_name)
5706 if descriptor_config:
5707 vdu_id = None
5708 kdu_index = int(vca_info["kdu_index"])
5709 vdu_name = None
5710 kdur = next(
5711 x
5712 for x in db_vnfr["kdur"]
5713 if x["kdu-name"] == kdu_name
5714 )
5715 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5716 if kdur.get("additionalParams"):
5717 deploy_params_kdu = parse_yaml_strings(
5718 kdur["additionalParams"]
5719 )
5720
5721 self._deploy_n2vc(
5722 logging_text=logging_text,
5723 db_nsr=db_nsr,
5724 db_vnfr=db_vnfr,
5725 nslcmop_id=nslcmop_id,
5726 nsr_id=nsr_id,
5727 nsi_id=nsi_id,
5728 vnfd_id=vnfd_id,
5729 vdu_id=vdu_id,
5730 kdu_name=kdu_name,
5731 member_vnf_index=member_vnf_index,
5732 vdu_index=kdu_index,
5733 vdu_name=vdu_name,
5734 deploy_params=deploy_params_kdu,
5735 descriptor_config=descriptor_config,
5736 base_folder=base_folder,
5737 task_instantiation_info=tasks_dict_info,
5738 stage=stage,
5739 )
5740 # SCALE-UP VCA - END
5741 scale_process = None
5742
5743 # POST-SCALE BEGIN
5744 # execute primitive service POST-SCALING
5745 step = "Executing post-scale vnf-config-primitive"
5746 if scaling_descriptor.get("scaling-config-action"):
5747 for scaling_config_action in scaling_descriptor[
5748 "scaling-config-action"
5749 ]:
5750 if (
5751 scaling_config_action.get("trigger") == "post-scale-in"
5752 and scaling_type == "SCALE_IN"
5753 ) or (
5754 scaling_config_action.get("trigger") == "post-scale-out"
5755 and scaling_type == "SCALE_OUT"
5756 ):
5757 vnf_config_primitive = scaling_config_action[
5758 "vnf-config-primitive-name-ref"
5759 ]
5760 step = db_nslcmop_update[
5761 "detailed-status"
5762 ] = "executing post-scale scaling-config-action '{}'".format(
5763 vnf_config_primitive
5764 )
5765
5766 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5767 if db_vnfr.get("additionalParamsForVnf"):
5768 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5769
5770 # look for primitive
5771 for config_primitive in (
5772 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5773 ).get("config-primitive", ()):
5774 if config_primitive["name"] == vnf_config_primitive:
5775 break
5776 else:
5777 raise LcmException(
5778 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
5779 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
5780 "config-primitive".format(
5781 scaling_group, vnf_config_primitive
5782 )
5783 )
5784 scale_process = "VCA"
5785 db_nsr_update["config-status"] = "configuring post-scaling"
5786 primitive_params = self._map_primitive_params(
5787 config_primitive, {}, vnfr_params
5788 )
5789
5790 # Post-scale retry check: Check if this sub-operation has been executed before
5791 op_index = self._check_or_add_scale_suboperation(
5792 db_nslcmop,
5793 nslcmop_id,
5794 vnf_index,
5795 vnf_config_primitive,
5796 primitive_params,
5797 "POST-SCALE",
5798 )
5799 if op_index == self.SUBOPERATION_STATUS_SKIP:
5800 # Skip sub-operation
5801 result = "COMPLETED"
5802 result_detail = "Done"
5803 self.logger.debug(
5804 logging_text
5805 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5806 vnf_config_primitive, result, result_detail
5807 )
5808 )
5809 else:
5810 if op_index == self.SUBOPERATION_STATUS_NEW:
5811 # New sub-operation: Get index of this sub-operation
5812 op_index = (
5813 len(db_nslcmop.get("_admin", {}).get("operations"))
5814 - 1
5815 )
5816 self.logger.debug(
5817 logging_text
5818 + "vnf_config_primitive={} New sub-operation".format(
5819 vnf_config_primitive
5820 )
5821 )
5822 else:
5823 # retry: Get registered params for this existing sub-operation
5824 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5825 op_index
5826 ]
5827 vnf_index = op.get("member_vnf_index")
5828 vnf_config_primitive = op.get("primitive")
5829 primitive_params = op.get("primitive_params")
5830 self.logger.debug(
5831 logging_text
5832 + "vnf_config_primitive={} Sub-operation retry".format(
5833 vnf_config_primitive
5834 )
5835 )
5836 # Execute the primitive, either with new (first-time) or registered (reintent) args
5837 ee_descriptor_id = config_primitive.get(
5838 "execution-environment-ref"
5839 )
5840 primitive_name = config_primitive.get(
5841 "execution-environment-primitive", vnf_config_primitive
5842 )
5843 ee_id, vca_type = self._look_for_deployed_vca(
5844 nsr_deployed["VCA"],
5845 member_vnf_index=vnf_index,
5846 vdu_id=None,
5847 vdu_count_index=None,
5848 ee_descriptor_id=ee_descriptor_id,
5849 )
5850 result, result_detail = await self._ns_execute_primitive(
5851 ee_id,
5852 primitive_name,
5853 primitive_params,
5854 vca_type=vca_type,
5855 vca_id=vca_id,
5856 )
5857 self.logger.debug(
5858 logging_text
5859 + "vnf_config_primitive={} Done with result {} {}".format(
5860 vnf_config_primitive, result, result_detail
5861 )
5862 )
5863 # Update operationState = COMPLETED | FAILED
5864 self._update_suboperation_status(
5865 db_nslcmop, op_index, result, result_detail
5866 )
5867
5868 if result == "FAILED":
5869 raise LcmException(result_detail)
5870 db_nsr_update["config-status"] = old_config_status
5871 scale_process = None
5872 # POST-SCALE END
5873
5874 db_nsr_update[
5875 "detailed-status"
5876 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
5877 db_nsr_update["operational-status"] = (
5878 "running"
5879 if old_operational_status == "failed"
5880 else old_operational_status
5881 )
5882 db_nsr_update["config-status"] = old_config_status
5883 return
5884 except (
5885 ROclient.ROClientException,
5886 DbException,
5887 LcmException,
5888 NgRoException,
5889 ) as e:
5890 self.logger.error(logging_text + "Exit Exception {}".format(e))
5891 exc = e
5892 except asyncio.CancelledError:
5893 self.logger.error(
5894 logging_text + "Cancelled Exception while '{}'".format(step)
5895 )
5896 exc = "Operation was cancelled"
5897 except Exception as e:
5898 exc = traceback.format_exc()
5899 self.logger.critical(
5900 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5901 exc_info=True,
5902 )
5903 finally:
5904 self._write_ns_status(
5905 nsr_id=nsr_id,
5906 ns_state=None,
5907 current_operation="IDLE",
5908 current_operation_id=None,
5909 )
5910 if tasks_dict_info:
5911 stage[1] = "Waiting for instantiate pending tasks."
5912 self.logger.debug(logging_text + stage[1])
5913 exc = await self._wait_for_tasks(
5914 logging_text,
5915 tasks_dict_info,
5916 self.timeout_ns_deploy,
5917 stage,
5918 nslcmop_id,
5919 nsr_id=nsr_id,
5920 )
5921 if exc:
5922 db_nslcmop_update[
5923 "detailed-status"
5924 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5925 nslcmop_operation_state = "FAILED"
5926 if db_nsr:
5927 db_nsr_update["operational-status"] = old_operational_status
5928 db_nsr_update["config-status"] = old_config_status
5929 db_nsr_update["detailed-status"] = ""
5930 if scale_process:
5931 if "VCA" in scale_process:
5932 db_nsr_update["config-status"] = "failed"
5933 if "RO" in scale_process:
5934 db_nsr_update["operational-status"] = "failed"
5935 db_nsr_update[
5936 "detailed-status"
5937 ] = "FAILED scaling nslcmop={} {}: {}".format(
5938 nslcmop_id, step, exc
5939 )
5940 else:
5941 error_description_nslcmop = None
5942 nslcmop_operation_state = "COMPLETED"
5943 db_nslcmop_update["detailed-status"] = "Done"
5944
5945 self._write_op_status(
5946 op_id=nslcmop_id,
5947 stage="",
5948 error_message=error_description_nslcmop,
5949 operation_state=nslcmop_operation_state,
5950 other_update=db_nslcmop_update,
5951 )
5952 if db_nsr:
5953 self._write_ns_status(
5954 nsr_id=nsr_id,
5955 ns_state=None,
5956 current_operation="IDLE",
5957 current_operation_id=None,
5958 other_update=db_nsr_update,
5959 )
5960
5961 if nslcmop_operation_state:
5962 try:
5963 msg = {
5964 "nsr_id": nsr_id,
5965 "nslcmop_id": nslcmop_id,
5966 "operationState": nslcmop_operation_state,
5967 }
5968 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
5969 except Exception as e:
5970 self.logger.error(
5971 logging_text + "kafka_write notification Exception {}".format(e)
5972 )
5973 self.logger.debug(logging_text + "Exit")
5974 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
5975
5976 async def _scale_kdu(
5977 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5978 ):
5979 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
5980 for kdu_name in _scaling_info:
5981 for kdu_scaling_info in _scaling_info[kdu_name]:
5982 deployed_kdu, index = get_deployed_kdu(
5983 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
5984 )
5985 cluster_uuid = deployed_kdu["k8scluster-uuid"]
5986 kdu_instance = deployed_kdu["kdu-instance"]
5987 scale = int(kdu_scaling_info["scale"])
5988 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
5989
5990 db_dict = {
5991 "collection": "nsrs",
5992 "filter": {"_id": nsr_id},
5993 "path": "_admin.deployed.K8s.{}".format(index),
5994 }
5995
5996 step = "scaling application {}".format(
5997 kdu_scaling_info["resource-name"]
5998 )
5999 self.logger.debug(logging_text + step)
6000
6001 if kdu_scaling_info["type"] == "delete":
6002 kdu_config = get_configuration(db_vnfd, kdu_name)
6003 if (
6004 kdu_config
6005 and kdu_config.get("terminate-config-primitive")
6006 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6007 ):
6008 terminate_config_primitive_list = kdu_config.get(
6009 "terminate-config-primitive"
6010 )
6011 terminate_config_primitive_list.sort(
6012 key=lambda val: int(val["seq"])
6013 )
6014
6015 for (
6016 terminate_config_primitive
6017 ) in terminate_config_primitive_list:
6018 primitive_params_ = self._map_primitive_params(
6019 terminate_config_primitive, {}, {}
6020 )
6021 step = "execute terminate config primitive"
6022 self.logger.debug(logging_text + step)
6023 await asyncio.wait_for(
6024 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6025 cluster_uuid=cluster_uuid,
6026 kdu_instance=kdu_instance,
6027 primitive_name=terminate_config_primitive["name"],
6028 params=primitive_params_,
6029 db_dict=db_dict,
6030 vca_id=vca_id,
6031 ),
6032 timeout=600,
6033 )
6034
6035 await asyncio.wait_for(
6036 self.k8scluster_map[k8s_cluster_type].scale(
6037 kdu_instance,
6038 scale,
6039 kdu_scaling_info["resource-name"],
6040 vca_id=vca_id,
6041 ),
6042 timeout=self.timeout_vca_on_error,
6043 )
6044
6045 if kdu_scaling_info["type"] == "create":
6046 kdu_config = get_configuration(db_vnfd, kdu_name)
6047 if (
6048 kdu_config
6049 and kdu_config.get("initial-config-primitive")
6050 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6051 ):
6052 initial_config_primitive_list = kdu_config.get(
6053 "initial-config-primitive"
6054 )
6055 initial_config_primitive_list.sort(
6056 key=lambda val: int(val["seq"])
6057 )
6058
6059 for initial_config_primitive in initial_config_primitive_list:
6060 primitive_params_ = self._map_primitive_params(
6061 initial_config_primitive, {}, {}
6062 )
6063 step = "execute initial config primitive"
6064 self.logger.debug(logging_text + step)
6065 await asyncio.wait_for(
6066 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6067 cluster_uuid=cluster_uuid,
6068 kdu_instance=kdu_instance,
6069 primitive_name=initial_config_primitive["name"],
6070 params=primitive_params_,
6071 db_dict=db_dict,
6072 vca_id=vca_id,
6073 ),
6074 timeout=600,
6075 )
6076
6077 async def _scale_ng_ro(
6078 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6079 ):
6080 nsr_id = db_nslcmop["nsInstanceId"]
6081 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6082 db_vnfrs = {}
6083
6084 # read from db: vnfd's for every vnf
6085 db_vnfds = []
6086
6087 # for each vnf in ns, read vnfd
6088 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6089 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6090 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6091 # if we haven't this vnfd, read it from db
6092 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6093 # read from db
6094 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6095 db_vnfds.append(vnfd)
6096 n2vc_key = self.n2vc.get_public_key()
6097 n2vc_key_list = [n2vc_key]
6098 self.scale_vnfr(
6099 db_vnfr,
6100 vdu_scaling_info.get("vdu-create"),
6101 vdu_scaling_info.get("vdu-delete"),
6102 mark_delete=True,
6103 )
6104 # db_vnfr has been updated, update db_vnfrs to use it
6105 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6106 await self._instantiate_ng_ro(
6107 logging_text,
6108 nsr_id,
6109 db_nsd,
6110 db_nsr,
6111 db_nslcmop,
6112 db_vnfrs,
6113 db_vnfds,
6114 n2vc_key_list,
6115 stage=stage,
6116 start_deploy=time(),
6117 timeout_ns_deploy=self.timeout_ns_deploy,
6118 )
6119 if vdu_scaling_info.get("vdu-delete"):
6120 self.scale_vnfr(
6121 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6122 )
6123
6124 async def add_prometheus_metrics(
6125 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6126 ):
6127 if not self.prometheus:
6128 return
6129 # look if exist a file called 'prometheus*.j2' and
6130 artifact_content = self.fs.dir_ls(artifact_path)
6131 job_file = next(
6132 (
6133 f
6134 for f in artifact_content
6135 if f.startswith("prometheus") and f.endswith(".j2")
6136 ),
6137 None,
6138 )
6139 if not job_file:
6140 return
6141 with self.fs.file_open((artifact_path, job_file), "r") as f:
6142 job_data = f.read()
6143
6144 # TODO get_service
6145 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6146 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6147 host_port = "80"
6148 vnfr_id = vnfr_id.replace("-", "")
6149 variables = {
6150 "JOB_NAME": vnfr_id,
6151 "TARGET_IP": target_ip,
6152 "EXPORTER_POD_IP": host_name,
6153 "EXPORTER_POD_PORT": host_port,
6154 }
6155 job_list = self.prometheus.parse_job(job_data, variables)
6156 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6157 for job in job_list:
6158 if (
6159 not isinstance(job.get("job_name"), str)
6160 or vnfr_id not in job["job_name"]
6161 ):
6162 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6163 job["nsr_id"] = nsr_id
6164 job_dict = {jl["job_name"]: jl for jl in job_list}
6165 if await self.prometheus.update(job_dict):
6166 return list(job_dict.keys())
6167
6168 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6169 """
6170 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6171
6172 :param: vim_account_id: VIM Account ID
6173
6174 :return: (cloud_name, cloud_credential)
6175 """
6176 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6177 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6178
6179 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6180 """
6181 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6182
6183 :param: vim_account_id: VIM Account ID
6184
6185 :return: (cloud_name, cloud_credential)
6186 """
6187 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6188 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")