Feature 10906: Support for Anti-Affinity groups
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 from typing import Any, Dict, List
21 import yaml
22 import logging
23 import logging.handlers
24 import traceback
25 import json
26 from jinja2 import (
27 Environment,
28 TemplateError,
29 TemplateNotFound,
30 StrictUndefined,
31 UndefinedError,
32 )
33
34 from osm_lcm import ROclient
35 from osm_lcm.data_utils.nsr import (
36 get_deployed_kdu,
37 get_deployed_vca,
38 get_deployed_vca_list,
39 get_nsd,
40 )
41 from osm_lcm.data_utils.vca import (
42 DeployedComponent,
43 DeployedK8sResource,
44 DeployedVCA,
45 EELevel,
46 Relation,
47 EERelation,
48 safe_get_ee_relation,
49 )
50 from osm_lcm.ng_ro import NgRoClient, NgRoException
51 from osm_lcm.lcm_utils import (
52 LcmException,
53 LcmExceptionNoMgmtIP,
54 LcmBase,
55 deep_get,
56 get_iterable,
57 populate_dict,
58 )
59 from osm_lcm.data_utils.nsd import (
60 get_ns_configuration_relation_list,
61 get_vnf_profile,
62 get_vnf_profiles,
63 )
64 from osm_lcm.data_utils.vnfd import (
65 get_relation_list,
66 get_vdu_list,
67 get_vdu_profile,
68 get_ee_sorted_initial_config_primitive_list,
69 get_ee_sorted_terminate_config_primitive_list,
70 get_kdu_list,
71 get_virtual_link_profiles,
72 get_vdu,
73 get_configuration,
74 get_vdu_index,
75 get_scaling_aspect,
76 get_number_of_instances,
77 get_juju_ee_ref,
78 get_kdu_resource_profile,
79 )
80 from osm_lcm.data_utils.list_utils import find_in_list
81 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
82 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
83 from osm_lcm.data_utils.database.vim_account import VimAccountDB
84 from n2vc.definitions import RelationEndpoint
85 from n2vc.k8s_helm_conn import K8sHelmConnector
86 from n2vc.k8s_helm3_conn import K8sHelm3Connector
87 from n2vc.k8s_juju_conn import K8sJujuConnector
88
89 from osm_common.dbbase import DbException
90 from osm_common.fsbase import FsException
91
92 from osm_lcm.data_utils.database.database import Database
93 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
94
95 from n2vc.n2vc_juju_conn import N2VCJujuConnector
96 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
97
98 from osm_lcm.lcm_helm_conn import LCMHelmConn
99 from osm_lcm.prometheus import parse_job
100
101 from copy import copy, deepcopy
102 from time import time
103 from uuid import uuid4
104
105 from random import randint
106
107 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
108
109
110 class NsLcm(LcmBase):
111 timeout_vca_on_error = (
112 5 * 60
113 ) # Time for charm from first time at blocked,error status to mark as failed
114 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
115 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
116 timeout_charm_delete = 10 * 60
117 timeout_primitive = 30 * 60 # timeout for primitive execution
118 timeout_progress_primitive = (
119 10 * 60
120 ) # timeout for some progress in a primitive execution
121
122 SUBOPERATION_STATUS_NOT_FOUND = -1
123 SUBOPERATION_STATUS_NEW = -2
124 SUBOPERATION_STATUS_SKIP = -3
125 task_name_deploy_vca = "Deploying VCA"
126
127 def __init__(self, msg, lcm_tasks, config, loop):
128 """
129 Init, Connect to database, filesystem storage, and messaging
130 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
131 :return: None
132 """
133 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
134
135 self.db = Database().instance.db
136 self.fs = Filesystem().instance.fs
137 self.loop = loop
138 self.lcm_tasks = lcm_tasks
139 self.timeout = config["timeout"]
140 self.ro_config = config["ro_config"]
141 self.ng_ro = config["ro_config"].get("ng")
142 self.vca_config = config["VCA"].copy()
143
144 # create N2VC connector
145 self.n2vc = N2VCJujuConnector(
146 log=self.logger,
147 loop=self.loop,
148 on_update_db=self._on_update_n2vc_db,
149 fs=self.fs,
150 db=self.db,
151 )
152
153 self.conn_helm_ee = LCMHelmConn(
154 log=self.logger,
155 loop=self.loop,
156 vca_config=self.vca_config,
157 on_update_db=self._on_update_n2vc_db,
158 )
159
160 self.k8sclusterhelm2 = K8sHelmConnector(
161 kubectl_command=self.vca_config.get("kubectlpath"),
162 helm_command=self.vca_config.get("helmpath"),
163 log=self.logger,
164 on_update_db=None,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.k8sclusterhelm3 = K8sHelm3Connector(
170 kubectl_command=self.vca_config.get("kubectlpath"),
171 helm_command=self.vca_config.get("helm3path"),
172 fs=self.fs,
173 log=self.logger,
174 db=self.db,
175 on_update_db=None,
176 )
177
178 self.k8sclusterjuju = K8sJujuConnector(
179 kubectl_command=self.vca_config.get("kubectlpath"),
180 juju_command=self.vca_config.get("jujupath"),
181 log=self.logger,
182 loop=self.loop,
183 on_update_db=self._on_update_k8s_db,
184 fs=self.fs,
185 db=self.db,
186 )
187
188 self.k8scluster_map = {
189 "helm-chart": self.k8sclusterhelm2,
190 "helm-chart-v3": self.k8sclusterhelm3,
191 "chart": self.k8sclusterhelm3,
192 "juju-bundle": self.k8sclusterjuju,
193 "juju": self.k8sclusterjuju,
194 }
195
196 self.vca_map = {
197 "lxc_proxy_charm": self.n2vc,
198 "native_charm": self.n2vc,
199 "k8s_proxy_charm": self.n2vc,
200 "helm": self.conn_helm_ee,
201 "helm-v3": self.conn_helm_ee,
202 }
203
204 # create RO client
205 self.RO = NgRoClient(self.loop, **self.ro_config)
206
207 @staticmethod
208 def increment_ip_mac(ip_mac, vm_index=1):
209 if not isinstance(ip_mac, str):
210 return ip_mac
211 try:
212 # try with ipv4 look for last dot
213 i = ip_mac.rfind(".")
214 if i > 0:
215 i += 1
216 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
217 # try with ipv6 or mac look for last colon. Operate in hex
218 i = ip_mac.rfind(":")
219 if i > 0:
220 i += 1
221 # format in hex, len can be 2 for mac or 4 for ipv6
222 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
223 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
224 )
225 except Exception:
226 pass
227 return None
228
229 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
230
231 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
232
233 try:
234 # TODO filter RO descriptor fields...
235
236 # write to database
237 db_dict = dict()
238 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
239 db_dict["deploymentStatus"] = ro_descriptor
240 self.update_db_2("nsrs", nsrs_id, db_dict)
241
242 except Exception as e:
243 self.logger.warn(
244 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
245 )
246
247 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
248
249 # remove last dot from path (if exists)
250 if path.endswith("."):
251 path = path[:-1]
252
253 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
254 # .format(table, filter, path, updated_data))
255 try:
256
257 nsr_id = filter.get("_id")
258
259 # read ns record from database
260 nsr = self.db.get_one(table="nsrs", q_filter=filter)
261 current_ns_status = nsr.get("nsState")
262
263 # get vca status for NS
264 status_dict = await self.n2vc.get_status(
265 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
266 )
267
268 # vcaStatus
269 db_dict = dict()
270 db_dict["vcaStatus"] = status_dict
271 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
272
273 # update configurationStatus for this VCA
274 try:
275 vca_index = int(path[path.rfind(".") + 1 :])
276
277 vca_list = deep_get(
278 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
279 )
280 vca_status = vca_list[vca_index].get("status")
281
282 configuration_status_list = nsr.get("configurationStatus")
283 config_status = configuration_status_list[vca_index].get("status")
284
285 if config_status == "BROKEN" and vca_status != "failed":
286 db_dict["configurationStatus"][vca_index] = "READY"
287 elif config_status != "BROKEN" and vca_status == "failed":
288 db_dict["configurationStatus"][vca_index] = "BROKEN"
289 except Exception as e:
290 # not update configurationStatus
291 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
292
293 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
294 # if nsState = 'DEGRADED' check if all is OK
295 is_degraded = False
296 if current_ns_status in ("READY", "DEGRADED"):
297 error_description = ""
298 # check machines
299 if status_dict.get("machines"):
300 for machine_id in status_dict.get("machines"):
301 machine = status_dict.get("machines").get(machine_id)
302 # check machine agent-status
303 if machine.get("agent-status"):
304 s = machine.get("agent-status").get("status")
305 if s != "started":
306 is_degraded = True
307 error_description += (
308 "machine {} agent-status={} ; ".format(
309 machine_id, s
310 )
311 )
312 # check machine instance status
313 if machine.get("instance-status"):
314 s = machine.get("instance-status").get("status")
315 if s != "running":
316 is_degraded = True
317 error_description += (
318 "machine {} instance-status={} ; ".format(
319 machine_id, s
320 )
321 )
322 # check applications
323 if status_dict.get("applications"):
324 for app_id in status_dict.get("applications"):
325 app = status_dict.get("applications").get(app_id)
326 # check application status
327 if app.get("status"):
328 s = app.get("status").get("status")
329 if s != "active":
330 is_degraded = True
331 error_description += (
332 "application {} status={} ; ".format(app_id, s)
333 )
334
335 if error_description:
336 db_dict["errorDescription"] = error_description
337 if current_ns_status == "READY" and is_degraded:
338 db_dict["nsState"] = "DEGRADED"
339 if current_ns_status == "DEGRADED" and not is_degraded:
340 db_dict["nsState"] = "READY"
341
342 # write to database
343 self.update_db_2("nsrs", nsr_id, db_dict)
344
345 except (asyncio.CancelledError, asyncio.TimeoutError):
346 raise
347 except Exception as e:
348 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
349
350 async def _on_update_k8s_db(
351 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
352 ):
353 """
354 Updating vca status in NSR record
355 :param cluster_uuid: UUID of a k8s cluster
356 :param kdu_instance: The unique name of the KDU instance
357 :param filter: To get nsr_id
358 :return: none
359 """
360
361 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
362 # .format(cluster_uuid, kdu_instance, filter))
363
364 try:
365 nsr_id = filter.get("_id")
366
367 # get vca status for NS
368 vca_status = await self.k8sclusterjuju.status_kdu(
369 cluster_uuid,
370 kdu_instance,
371 complete_status=True,
372 yaml_format=False,
373 vca_id=vca_id,
374 )
375 # vcaStatus
376 db_dict = dict()
377 db_dict["vcaStatus"] = {nsr_id: vca_status}
378
379 await self.k8sclusterjuju.update_vca_status(
380 db_dict["vcaStatus"],
381 kdu_instance,
382 vca_id=vca_id,
383 )
384
385 # write to database
386 self.update_db_2("nsrs", nsr_id, db_dict)
387
388 except (asyncio.CancelledError, asyncio.TimeoutError):
389 raise
390 except Exception as e:
391 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
392
393 @staticmethod
394 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
395 try:
396 env = Environment(undefined=StrictUndefined)
397 template = env.from_string(cloud_init_text)
398 return template.render(additional_params or {})
399 except UndefinedError as e:
400 raise LcmException(
401 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
402 "file, must be provided in the instantiation parameters inside the "
403 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
404 )
405 except (TemplateError, TemplateNotFound) as e:
406 raise LcmException(
407 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
408 vnfd_id, vdu_id, e
409 )
410 )
411
412 def _get_vdu_cloud_init_content(self, vdu, vnfd):
413 cloud_init_content = cloud_init_file = None
414 try:
415 if vdu.get("cloud-init-file"):
416 base_folder = vnfd["_admin"]["storage"]
417 if base_folder["pkg-dir"]:
418 cloud_init_file = "{}/{}/cloud_init/{}".format(
419 base_folder["folder"],
420 base_folder["pkg-dir"],
421 vdu["cloud-init-file"],
422 )
423 else:
424 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
425 base_folder["folder"],
426 vdu["cloud-init-file"],
427 )
428 with self.fs.file_open(cloud_init_file, "r") as ci_file:
429 cloud_init_content = ci_file.read()
430 elif vdu.get("cloud-init"):
431 cloud_init_content = vdu["cloud-init"]
432
433 return cloud_init_content
434 except FsException as e:
435 raise LcmException(
436 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
437 vnfd["id"], vdu["id"], cloud_init_file, e
438 )
439 )
440
441 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
442 vdur = next(
443 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
444 )
445 additional_params = vdur.get("additionalParams")
446 return parse_yaml_strings(additional_params)
447
448 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
449 """
450 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
451 :param vnfd: input vnfd
452 :param new_id: overrides vnf id if provided
453 :param additionalParams: Instantiation params for VNFs provided
454 :param nsrId: Id of the NSR
455 :return: copy of vnfd
456 """
457 vnfd_RO = deepcopy(vnfd)
458 # remove unused by RO configuration, monitoring, scaling and internal keys
459 vnfd_RO.pop("_id", None)
460 vnfd_RO.pop("_admin", None)
461 vnfd_RO.pop("monitoring-param", None)
462 vnfd_RO.pop("scaling-group-descriptor", None)
463 vnfd_RO.pop("kdu", None)
464 vnfd_RO.pop("k8s-cluster", None)
465 if new_id:
466 vnfd_RO["id"] = new_id
467
468 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
469 for vdu in get_iterable(vnfd_RO, "vdu"):
470 vdu.pop("cloud-init-file", None)
471 vdu.pop("cloud-init", None)
472 return vnfd_RO
473
474 @staticmethod
475 def ip_profile_2_RO(ip_profile):
476 RO_ip_profile = deepcopy(ip_profile)
477 if "dns-server" in RO_ip_profile:
478 if isinstance(RO_ip_profile["dns-server"], list):
479 RO_ip_profile["dns-address"] = []
480 for ds in RO_ip_profile.pop("dns-server"):
481 RO_ip_profile["dns-address"].append(ds["address"])
482 else:
483 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
484 if RO_ip_profile.get("ip-version") == "ipv4":
485 RO_ip_profile["ip-version"] = "IPv4"
486 if RO_ip_profile.get("ip-version") == "ipv6":
487 RO_ip_profile["ip-version"] = "IPv6"
488 if "dhcp-params" in RO_ip_profile:
489 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
490 return RO_ip_profile
491
492 def _get_ro_vim_id_for_vim_account(self, vim_account):
493 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
494 if db_vim["_admin"]["operationalState"] != "ENABLED":
495 raise LcmException(
496 "VIM={} is not available. operationalState={}".format(
497 vim_account, db_vim["_admin"]["operationalState"]
498 )
499 )
500 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
501 return RO_vim_id
502
503 def get_ro_wim_id_for_wim_account(self, wim_account):
504 if isinstance(wim_account, str):
505 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
506 if db_wim["_admin"]["operationalState"] != "ENABLED":
507 raise LcmException(
508 "WIM={} is not available. operationalState={}".format(
509 wim_account, db_wim["_admin"]["operationalState"]
510 )
511 )
512 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
513 return RO_wim_id
514 else:
515 return wim_account
516
517 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
518
519 db_vdu_push_list = []
520 db_update = {"_admin.modified": time()}
521 if vdu_create:
522 for vdu_id, vdu_count in vdu_create.items():
523 vdur = next(
524 (
525 vdur
526 for vdur in reversed(db_vnfr["vdur"])
527 if vdur["vdu-id-ref"] == vdu_id
528 ),
529 None,
530 )
531 if not vdur:
532 raise LcmException(
533 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
534 vdu_id
535 )
536 )
537
538 for count in range(vdu_count):
539 vdur_copy = deepcopy(vdur)
540 vdur_copy["status"] = "BUILD"
541 vdur_copy["status-detailed"] = None
542 vdur_copy["ip-address"] = None
543 vdur_copy["_id"] = str(uuid4())
544 vdur_copy["count-index"] += count + 1
545 vdur_copy["id"] = "{}-{}".format(
546 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
547 )
548 vdur_copy.pop("vim_info", None)
549 for iface in vdur_copy["interfaces"]:
550 if iface.get("fixed-ip"):
551 iface["ip-address"] = self.increment_ip_mac(
552 iface["ip-address"], count + 1
553 )
554 else:
555 iface.pop("ip-address", None)
556 if iface.get("fixed-mac"):
557 iface["mac-address"] = self.increment_ip_mac(
558 iface["mac-address"], count + 1
559 )
560 else:
561 iface.pop("mac-address", None)
562 iface.pop(
563 "mgmt_vnf", None
564 ) # only first vdu can be managment of vnf
565 db_vdu_push_list.append(vdur_copy)
566 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
567 if vdu_delete:
568 for vdu_id, vdu_count in vdu_delete.items():
569 if mark_delete:
570 indexes_to_delete = [
571 iv[0]
572 for iv in enumerate(db_vnfr["vdur"])
573 if iv[1]["vdu-id-ref"] == vdu_id
574 ]
575 db_update.update(
576 {
577 "vdur.{}.status".format(i): "DELETING"
578 for i in indexes_to_delete[-vdu_count:]
579 }
580 )
581 else:
582 # it must be deleted one by one because common.db does not allow otherwise
583 vdus_to_delete = [
584 v
585 for v in reversed(db_vnfr["vdur"])
586 if v["vdu-id-ref"] == vdu_id
587 ]
588 for vdu in vdus_to_delete[:vdu_count]:
589 self.db.set_one(
590 "vnfrs",
591 {"_id": db_vnfr["_id"]},
592 None,
593 pull={"vdur": {"_id": vdu["_id"]}},
594 )
595 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
596 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
597 # modify passed dictionary db_vnfr
598 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
599 db_vnfr["vdur"] = db_vnfr_["vdur"]
600
601 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
602 """
603 Updates database nsr with the RO info for the created vld
604 :param ns_update_nsr: dictionary to be filled with the updated info
605 :param db_nsr: content of db_nsr. This is also modified
606 :param nsr_desc_RO: nsr descriptor from RO
607 :return: Nothing, LcmException is raised on errors
608 """
609
610 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
611 for net_RO in get_iterable(nsr_desc_RO, "nets"):
612 if vld["id"] != net_RO.get("ns_net_osm_id"):
613 continue
614 vld["vim-id"] = net_RO.get("vim_net_id")
615 vld["name"] = net_RO.get("vim_name")
616 vld["status"] = net_RO.get("status")
617 vld["status-detailed"] = net_RO.get("error_msg")
618 ns_update_nsr["vld.{}".format(vld_index)] = vld
619 break
620 else:
621 raise LcmException(
622 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
623 )
624
625 def set_vnfr_at_error(self, db_vnfrs, error_text):
626 try:
627 for db_vnfr in db_vnfrs.values():
628 vnfr_update = {"status": "ERROR"}
629 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
630 if "status" not in vdur:
631 vdur["status"] = "ERROR"
632 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
633 if error_text:
634 vdur["status-detailed"] = str(error_text)
635 vnfr_update[
636 "vdur.{}.status-detailed".format(vdu_index)
637 ] = "ERROR"
638 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
639 except DbException as e:
640 self.logger.error("Cannot update vnf. {}".format(e))
641
642 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
643 """
644 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
645 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
646 :param nsr_desc_RO: nsr descriptor from RO
647 :return: Nothing, LcmException is raised on errors
648 """
649 for vnf_index, db_vnfr in db_vnfrs.items():
650 for vnf_RO in nsr_desc_RO["vnfs"]:
651 if vnf_RO["member_vnf_index"] != vnf_index:
652 continue
653 vnfr_update = {}
654 if vnf_RO.get("ip_address"):
655 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
656 "ip_address"
657 ].split(";")[0]
658 elif not db_vnfr.get("ip-address"):
659 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
660 raise LcmExceptionNoMgmtIP(
661 "ns member_vnf_index '{}' has no IP address".format(
662 vnf_index
663 )
664 )
665
666 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
667 vdur_RO_count_index = 0
668 if vdur.get("pdu-type"):
669 continue
670 for vdur_RO in get_iterable(vnf_RO, "vms"):
671 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
672 continue
673 if vdur["count-index"] != vdur_RO_count_index:
674 vdur_RO_count_index += 1
675 continue
676 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
677 if vdur_RO.get("ip_address"):
678 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
679 else:
680 vdur["ip-address"] = None
681 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
682 vdur["name"] = vdur_RO.get("vim_name")
683 vdur["status"] = vdur_RO.get("status")
684 vdur["status-detailed"] = vdur_RO.get("error_msg")
685 for ifacer in get_iterable(vdur, "interfaces"):
686 for interface_RO in get_iterable(vdur_RO, "interfaces"):
687 if ifacer["name"] == interface_RO.get("internal_name"):
688 ifacer["ip-address"] = interface_RO.get(
689 "ip_address"
690 )
691 ifacer["mac-address"] = interface_RO.get(
692 "mac_address"
693 )
694 break
695 else:
696 raise LcmException(
697 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
698 "from VIM info".format(
699 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
700 )
701 )
702 vnfr_update["vdur.{}".format(vdu_index)] = vdur
703 break
704 else:
705 raise LcmException(
706 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
707 "VIM info".format(
708 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
709 )
710 )
711
712 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
713 for net_RO in get_iterable(nsr_desc_RO, "nets"):
714 if vld["id"] != net_RO.get("vnf_net_osm_id"):
715 continue
716 vld["vim-id"] = net_RO.get("vim_net_id")
717 vld["name"] = net_RO.get("vim_name")
718 vld["status"] = net_RO.get("status")
719 vld["status-detailed"] = net_RO.get("error_msg")
720 vnfr_update["vld.{}".format(vld_index)] = vld
721 break
722 else:
723 raise LcmException(
724 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
725 vnf_index, vld["id"]
726 )
727 )
728
729 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
730 break
731
732 else:
733 raise LcmException(
734 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
735 vnf_index
736 )
737 )
738
739 def _get_ns_config_info(self, nsr_id):
740 """
741 Generates a mapping between vnf,vdu elements and the N2VC id
742 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
743 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
744 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
745 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
746 """
747 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
748 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
749 mapping = {}
750 ns_config_info = {"osm-config-mapping": mapping}
751 for vca in vca_deployed_list:
752 if not vca["member-vnf-index"]:
753 continue
754 if not vca["vdu_id"]:
755 mapping[vca["member-vnf-index"]] = vca["application"]
756 else:
757 mapping[
758 "{}.{}.{}".format(
759 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
760 )
761 ] = vca["application"]
762 return ns_config_info
763
764 async def _instantiate_ng_ro(
765 self,
766 logging_text,
767 nsr_id,
768 nsd,
769 db_nsr,
770 db_nslcmop,
771 db_vnfrs,
772 db_vnfds,
773 n2vc_key_list,
774 stage,
775 start_deploy,
776 timeout_ns_deploy,
777 ):
778
779 db_vims = {}
780
781 def get_vim_account(vim_account_id):
782 nonlocal db_vims
783 if vim_account_id in db_vims:
784 return db_vims[vim_account_id]
785 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
786 db_vims[vim_account_id] = db_vim
787 return db_vim
788
789 # modify target_vld info with instantiation parameters
790 def parse_vld_instantiation_params(
791 target_vim, target_vld, vld_params, target_sdn
792 ):
793 if vld_params.get("ip-profile"):
794 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
795 "ip-profile"
796 ]
797 if vld_params.get("provider-network"):
798 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
799 "provider-network"
800 ]
801 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
802 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
803 "provider-network"
804 ]["sdn-ports"]
805 if vld_params.get("wimAccountId"):
806 target_wim = "wim:{}".format(vld_params["wimAccountId"])
807 target_vld["vim_info"][target_wim] = {}
808 for param in ("vim-network-name", "vim-network-id"):
809 if vld_params.get(param):
810 if isinstance(vld_params[param], dict):
811 for vim, vim_net in vld_params[param].items():
812 other_target_vim = "vim:" + vim
813 populate_dict(
814 target_vld["vim_info"],
815 (other_target_vim, param.replace("-", "_")),
816 vim_net,
817 )
818 else: # isinstance str
819 target_vld["vim_info"][target_vim][
820 param.replace("-", "_")
821 ] = vld_params[param]
822 if vld_params.get("common_id"):
823 target_vld["common_id"] = vld_params.get("common_id")
824
825 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
826 def update_ns_vld_target(target, ns_params):
827 for vnf_params in ns_params.get("vnf", ()):
828 if vnf_params.get("vimAccountId"):
829 target_vnf = next(
830 (
831 vnfr
832 for vnfr in db_vnfrs.values()
833 if vnf_params["member-vnf-index"]
834 == vnfr["member-vnf-index-ref"]
835 ),
836 None,
837 )
838 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
839 for a_index, a_vld in enumerate(target["ns"]["vld"]):
840 target_vld = find_in_list(
841 get_iterable(vdur, "interfaces"),
842 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
843 )
844 if target_vld:
845 if vnf_params.get("vimAccountId") not in a_vld.get(
846 "vim_info", {}
847 ):
848 target["ns"]["vld"][a_index].get("vim_info").update(
849 {
850 "vim:{}".format(vnf_params["vimAccountId"]): {
851 "vim_network_name": ""
852 }
853 }
854 )
855
856 nslcmop_id = db_nslcmop["_id"]
857 target = {
858 "name": db_nsr["name"],
859 "ns": {"vld": []},
860 "vnf": [],
861 "image": deepcopy(db_nsr["image"]),
862 "flavor": deepcopy(db_nsr["flavor"]),
863 "action_id": nslcmop_id,
864 "cloud_init_content": {},
865 }
866 for image in target["image"]:
867 image["vim_info"] = {}
868 for flavor in target["flavor"]:
869 flavor["vim_info"] = {}
870 if db_nsr.get("affinity-or-anti-affinity-group"):
871 target["affinity-or-anti-affinity-group"] = deepcopy(db_nsr["affinity-or-anti-affinity-group"])
872 for affinity_or_anti_affinity_group in target["affinity-or-anti-affinity-group"]:
873 affinity_or_anti_affinity_group["vim_info"] = {}
874
875 if db_nslcmop.get("lcmOperationType") != "instantiate":
876 # get parameters of instantiation:
877 db_nslcmop_instantiate = self.db.get_list(
878 "nslcmops",
879 {
880 "nsInstanceId": db_nslcmop["nsInstanceId"],
881 "lcmOperationType": "instantiate",
882 },
883 )[-1]
884 ns_params = db_nslcmop_instantiate.get("operationParams")
885 else:
886 ns_params = db_nslcmop.get("operationParams")
887 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
888 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
889
890 cp2target = {}
891 for vld_index, vld in enumerate(db_nsr.get("vld")):
892 target_vim = "vim:{}".format(ns_params["vimAccountId"])
893 target_vld = {
894 "id": vld["id"],
895 "name": vld["name"],
896 "mgmt-network": vld.get("mgmt-network", False),
897 "type": vld.get("type"),
898 "vim_info": {
899 target_vim: {
900 "vim_network_name": vld.get("vim-network-name"),
901 "vim_account_id": ns_params["vimAccountId"],
902 }
903 },
904 }
905 # check if this network needs SDN assist
906 if vld.get("pci-interfaces"):
907 db_vim = get_vim_account(ns_params["vimAccountId"])
908 sdnc_id = db_vim["config"].get("sdn-controller")
909 if sdnc_id:
910 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
911 target_sdn = "sdn:{}".format(sdnc_id)
912 target_vld["vim_info"][target_sdn] = {
913 "sdn": True,
914 "target_vim": target_vim,
915 "vlds": [sdn_vld],
916 "type": vld.get("type"),
917 }
918
919 nsd_vnf_profiles = get_vnf_profiles(nsd)
920 for nsd_vnf_profile in nsd_vnf_profiles:
921 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
922 if cp["virtual-link-profile-id"] == vld["id"]:
923 cp2target[
924 "member_vnf:{}.{}".format(
925 cp["constituent-cpd-id"][0][
926 "constituent-base-element-id"
927 ],
928 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
929 )
930 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
931
932 # check at nsd descriptor, if there is an ip-profile
933 vld_params = {}
934 nsd_vlp = find_in_list(
935 get_virtual_link_profiles(nsd),
936 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
937 == vld["id"],
938 )
939 if (
940 nsd_vlp
941 and nsd_vlp.get("virtual-link-protocol-data")
942 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
943 ):
944 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
945 "l3-protocol-data"
946 ]
947 ip_profile_dest_data = {}
948 if "ip-version" in ip_profile_source_data:
949 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
950 "ip-version"
951 ]
952 if "cidr" in ip_profile_source_data:
953 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
954 "cidr"
955 ]
956 if "gateway-ip" in ip_profile_source_data:
957 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
958 "gateway-ip"
959 ]
960 if "dhcp-enabled" in ip_profile_source_data:
961 ip_profile_dest_data["dhcp-params"] = {
962 "enabled": ip_profile_source_data["dhcp-enabled"]
963 }
964 vld_params["ip-profile"] = ip_profile_dest_data
965
966 # update vld_params with instantiation params
967 vld_instantiation_params = find_in_list(
968 get_iterable(ns_params, "vld"),
969 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
970 )
971 if vld_instantiation_params:
972 vld_params.update(vld_instantiation_params)
973 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
974 target["ns"]["vld"].append(target_vld)
975 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
976 update_ns_vld_target(target, ns_params)
977
978 for vnfr in db_vnfrs.values():
979 vnfd = find_in_list(
980 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
981 )
982 vnf_params = find_in_list(
983 get_iterable(ns_params, "vnf"),
984 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
985 )
986 target_vnf = deepcopy(vnfr)
987 target_vim = "vim:{}".format(vnfr["vim-account-id"])
988 for vld in target_vnf.get("vld", ()):
989 # check if connected to a ns.vld, to fill target'
990 vnf_cp = find_in_list(
991 vnfd.get("int-virtual-link-desc", ()),
992 lambda cpd: cpd.get("id") == vld["id"],
993 )
994 if vnf_cp:
995 ns_cp = "member_vnf:{}.{}".format(
996 vnfr["member-vnf-index-ref"], vnf_cp["id"]
997 )
998 if cp2target.get(ns_cp):
999 vld["target"] = cp2target[ns_cp]
1000
1001 vld["vim_info"] = {
1002 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1003 }
1004 # check if this network needs SDN assist
1005 target_sdn = None
1006 if vld.get("pci-interfaces"):
1007 db_vim = get_vim_account(vnfr["vim-account-id"])
1008 sdnc_id = db_vim["config"].get("sdn-controller")
1009 if sdnc_id:
1010 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1011 target_sdn = "sdn:{}".format(sdnc_id)
1012 vld["vim_info"][target_sdn] = {
1013 "sdn": True,
1014 "target_vim": target_vim,
1015 "vlds": [sdn_vld],
1016 "type": vld.get("type"),
1017 }
1018
1019 # check at vnfd descriptor, if there is an ip-profile
1020 vld_params = {}
1021 vnfd_vlp = find_in_list(
1022 get_virtual_link_profiles(vnfd),
1023 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1024 )
1025 if (
1026 vnfd_vlp
1027 and vnfd_vlp.get("virtual-link-protocol-data")
1028 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1029 ):
1030 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1031 "l3-protocol-data"
1032 ]
1033 ip_profile_dest_data = {}
1034 if "ip-version" in ip_profile_source_data:
1035 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1036 "ip-version"
1037 ]
1038 if "cidr" in ip_profile_source_data:
1039 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1040 "cidr"
1041 ]
1042 if "gateway-ip" in ip_profile_source_data:
1043 ip_profile_dest_data[
1044 "gateway-address"
1045 ] = ip_profile_source_data["gateway-ip"]
1046 if "dhcp-enabled" in ip_profile_source_data:
1047 ip_profile_dest_data["dhcp-params"] = {
1048 "enabled": ip_profile_source_data["dhcp-enabled"]
1049 }
1050
1051 vld_params["ip-profile"] = ip_profile_dest_data
1052 # update vld_params with instantiation params
1053 if vnf_params:
1054 vld_instantiation_params = find_in_list(
1055 get_iterable(vnf_params, "internal-vld"),
1056 lambda i_vld: i_vld["name"] == vld["id"],
1057 )
1058 if vld_instantiation_params:
1059 vld_params.update(vld_instantiation_params)
1060 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1061
1062 vdur_list = []
1063 for vdur in target_vnf.get("vdur", ()):
1064 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1065 continue # This vdu must not be created
1066 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1067
1068 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1069
1070 if ssh_keys_all:
1071 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1072 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1073 if (
1074 vdu_configuration
1075 and vdu_configuration.get("config-access")
1076 and vdu_configuration.get("config-access").get("ssh-access")
1077 ):
1078 vdur["ssh-keys"] = ssh_keys_all
1079 vdur["ssh-access-required"] = vdu_configuration[
1080 "config-access"
1081 ]["ssh-access"]["required"]
1082 elif (
1083 vnf_configuration
1084 and vnf_configuration.get("config-access")
1085 and vnf_configuration.get("config-access").get("ssh-access")
1086 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1087 ):
1088 vdur["ssh-keys"] = ssh_keys_all
1089 vdur["ssh-access-required"] = vnf_configuration[
1090 "config-access"
1091 ]["ssh-access"]["required"]
1092 elif ssh_keys_instantiation and find_in_list(
1093 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1094 ):
1095 vdur["ssh-keys"] = ssh_keys_instantiation
1096
1097 self.logger.debug("NS > vdur > {}".format(vdur))
1098
1099 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1100 # cloud-init
1101 if vdud.get("cloud-init-file"):
1102 vdur["cloud-init"] = "{}:file:{}".format(
1103 vnfd["_id"], vdud.get("cloud-init-file")
1104 )
1105 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1106 if vdur["cloud-init"] not in target["cloud_init_content"]:
1107 base_folder = vnfd["_admin"]["storage"]
1108 if base_folder["pkg-dir"]:
1109 cloud_init_file = "{}/{}/cloud_init/{}".format(
1110 base_folder["folder"],
1111 base_folder["pkg-dir"],
1112 vdud.get("cloud-init-file"),
1113 )
1114 else:
1115 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1116 base_folder["folder"],
1117 vdud.get("cloud-init-file"),
1118 )
1119 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1120 target["cloud_init_content"][
1121 vdur["cloud-init"]
1122 ] = ci_file.read()
1123 elif vdud.get("cloud-init"):
1124 vdur["cloud-init"] = "{}:vdu:{}".format(
1125 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1126 )
1127 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1128 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1129 "cloud-init"
1130 ]
1131 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1132 deploy_params_vdu = self._format_additional_params(
1133 vdur.get("additionalParams") or {}
1134 )
1135 deploy_params_vdu["OSM"] = get_osm_params(
1136 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1137 )
1138 vdur["additionalParams"] = deploy_params_vdu
1139
1140 # flavor
1141 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1142 if target_vim not in ns_flavor["vim_info"]:
1143 ns_flavor["vim_info"][target_vim] = {}
1144
1145 # deal with images
1146 # in case alternative images are provided we must check if they should be applied
1147 # for the vim_type, modify the vim_type taking into account
1148 ns_image_id = int(vdur["ns-image-id"])
1149 if vdur.get("alt-image-ids"):
1150 db_vim = get_vim_account(vnfr["vim-account-id"])
1151 vim_type = db_vim["vim_type"]
1152 for alt_image_id in vdur.get("alt-image-ids"):
1153 ns_alt_image = target["image"][int(alt_image_id)]
1154 if vim_type == ns_alt_image.get("vim-type"):
1155 # must use alternative image
1156 self.logger.debug(
1157 "use alternative image id: {}".format(alt_image_id)
1158 )
1159 ns_image_id = alt_image_id
1160 vdur["ns-image-id"] = ns_image_id
1161 break
1162 ns_image = target["image"][int(ns_image_id)]
1163 if target_vim not in ns_image["vim_info"]:
1164 ns_image["vim_info"][target_vim] = {}
1165
1166 # Affinity groups
1167 if vdur.get("affinity-or-anti-affinity-group-id"):
1168 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1169 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1170 if target_vim not in ns_ags["vim_info"]:
1171 ns_ags["vim_info"][target_vim] = {}
1172
1173 vdur["vim_info"] = {target_vim: {}}
1174 # instantiation parameters
1175 # if vnf_params:
1176 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1177 # vdud["id"]), None)
1178 vdur_list.append(vdur)
1179 target_vnf["vdur"] = vdur_list
1180 target["vnf"].append(target_vnf)
1181
1182 desc = await self.RO.deploy(nsr_id, target)
1183 self.logger.debug("RO return > {}".format(desc))
1184 action_id = desc["action_id"]
1185 await self._wait_ng_ro(
1186 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1187 )
1188
1189 # Updating NSR
1190 db_nsr_update = {
1191 "_admin.deployed.RO.operational-status": "running",
1192 "detailed-status": " ".join(stage),
1193 }
1194 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1195 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1196 self._write_op_status(nslcmop_id, stage)
1197 self.logger.debug(
1198 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1199 )
1200 return
1201
1202 async def _wait_ng_ro(
1203 self,
1204 nsr_id,
1205 action_id,
1206 nslcmop_id=None,
1207 start_time=None,
1208 timeout=600,
1209 stage=None,
1210 ):
1211 detailed_status_old = None
1212 db_nsr_update = {}
1213 start_time = start_time or time()
1214 while time() <= start_time + timeout:
1215 desc_status = await self.RO.status(nsr_id, action_id)
1216 self.logger.debug("Wait NG RO > {}".format(desc_status))
1217 if desc_status["status"] == "FAILED":
1218 raise NgRoException(desc_status["details"])
1219 elif desc_status["status"] == "BUILD":
1220 if stage:
1221 stage[2] = "VIM: ({})".format(desc_status["details"])
1222 elif desc_status["status"] == "DONE":
1223 if stage:
1224 stage[2] = "Deployed at VIM"
1225 break
1226 else:
1227 assert False, "ROclient.check_ns_status returns unknown {}".format(
1228 desc_status["status"]
1229 )
1230 if stage and nslcmop_id and stage[2] != detailed_status_old:
1231 detailed_status_old = stage[2]
1232 db_nsr_update["detailed-status"] = " ".join(stage)
1233 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1234 self._write_op_status(nslcmop_id, stage)
1235 await asyncio.sleep(15, loop=self.loop)
1236 else: # timeout_ns_deploy
1237 raise NgRoException("Timeout waiting ns to deploy")
1238
1239 async def _terminate_ng_ro(
1240 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1241 ):
1242 db_nsr_update = {}
1243 failed_detail = []
1244 action_id = None
1245 start_deploy = time()
1246 try:
1247 target = {
1248 "ns": {"vld": []},
1249 "vnf": [],
1250 "image": [],
1251 "flavor": [],
1252 "action_id": nslcmop_id,
1253 }
1254 desc = await self.RO.deploy(nsr_id, target)
1255 action_id = desc["action_id"]
1256 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1257 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1258 self.logger.debug(
1259 logging_text
1260 + "ns terminate action at RO. action_id={}".format(action_id)
1261 )
1262
1263 # wait until done
1264 delete_timeout = 20 * 60 # 20 minutes
1265 await self._wait_ng_ro(
1266 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1267 )
1268
1269 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1270 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1271 # delete all nsr
1272 await self.RO.delete(nsr_id)
1273 except Exception as e:
1274 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1275 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1276 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1277 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1278 self.logger.debug(
1279 logging_text + "RO_action_id={} already deleted".format(action_id)
1280 )
1281 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1282 failed_detail.append("delete conflict: {}".format(e))
1283 self.logger.debug(
1284 logging_text
1285 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1286 )
1287 else:
1288 failed_detail.append("delete error: {}".format(e))
1289 self.logger.error(
1290 logging_text
1291 + "RO_action_id={} delete error: {}".format(action_id, e)
1292 )
1293
1294 if failed_detail:
1295 stage[2] = "Error deleting from VIM"
1296 else:
1297 stage[2] = "Deleted from VIM"
1298 db_nsr_update["detailed-status"] = " ".join(stage)
1299 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1300 self._write_op_status(nslcmop_id, stage)
1301
1302 if failed_detail:
1303 raise LcmException("; ".join(failed_detail))
1304 return
1305
1306 async def instantiate_RO(
1307 self,
1308 logging_text,
1309 nsr_id,
1310 nsd,
1311 db_nsr,
1312 db_nslcmop,
1313 db_vnfrs,
1314 db_vnfds,
1315 n2vc_key_list,
1316 stage,
1317 ):
1318 """
1319 Instantiate at RO
1320 :param logging_text: preffix text to use at logging
1321 :param nsr_id: nsr identity
1322 :param nsd: database content of ns descriptor
1323 :param db_nsr: database content of ns record
1324 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1325 :param db_vnfrs:
1326 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1327 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1328 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1329 :return: None or exception
1330 """
1331 try:
1332 start_deploy = time()
1333 ns_params = db_nslcmop.get("operationParams")
1334 if ns_params and ns_params.get("timeout_ns_deploy"):
1335 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1336 else:
1337 timeout_ns_deploy = self.timeout.get(
1338 "ns_deploy", self.timeout_ns_deploy
1339 )
1340
1341 # Check for and optionally request placement optimization. Database will be updated if placement activated
1342 stage[2] = "Waiting for Placement."
1343 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1344 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1345 for vnfr in db_vnfrs.values():
1346 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1347 break
1348 else:
1349 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1350
1351 return await self._instantiate_ng_ro(
1352 logging_text,
1353 nsr_id,
1354 nsd,
1355 db_nsr,
1356 db_nslcmop,
1357 db_vnfrs,
1358 db_vnfds,
1359 n2vc_key_list,
1360 stage,
1361 start_deploy,
1362 timeout_ns_deploy,
1363 )
1364 except Exception as e:
1365 stage[2] = "ERROR deploying at VIM"
1366 self.set_vnfr_at_error(db_vnfrs, str(e))
1367 self.logger.error(
1368 "Error deploying at VIM {}".format(e),
1369 exc_info=not isinstance(
1370 e,
1371 (
1372 ROclient.ROClientException,
1373 LcmException,
1374 DbException,
1375 NgRoException,
1376 ),
1377 ),
1378 )
1379 raise
1380
1381 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1382 """
1383 Wait for kdu to be up, get ip address
1384 :param logging_text: prefix use for logging
1385 :param nsr_id:
1386 :param vnfr_id:
1387 :param kdu_name:
1388 :return: IP address
1389 """
1390
1391 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1392 nb_tries = 0
1393
1394 while nb_tries < 360:
1395 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1396 kdur = next(
1397 (
1398 x
1399 for x in get_iterable(db_vnfr, "kdur")
1400 if x.get("kdu-name") == kdu_name
1401 ),
1402 None,
1403 )
1404 if not kdur:
1405 raise LcmException(
1406 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1407 )
1408 if kdur.get("status"):
1409 if kdur["status"] in ("READY", "ENABLED"):
1410 return kdur.get("ip-address")
1411 else:
1412 raise LcmException(
1413 "target KDU={} is in error state".format(kdu_name)
1414 )
1415
1416 await asyncio.sleep(10, loop=self.loop)
1417 nb_tries += 1
1418 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1419
1420 async def wait_vm_up_insert_key_ro(
1421 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1422 ):
1423 """
1424 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1425 :param logging_text: prefix use for logging
1426 :param nsr_id:
1427 :param vnfr_id:
1428 :param vdu_id:
1429 :param vdu_index:
1430 :param pub_key: public ssh key to inject, None to skip
1431 :param user: user to apply the public ssh key
1432 :return: IP address
1433 """
1434
1435 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1436 ro_nsr_id = None
1437 ip_address = None
1438 nb_tries = 0
1439 target_vdu_id = None
1440 ro_retries = 0
1441
1442 while True:
1443
1444 ro_retries += 1
1445 if ro_retries >= 360: # 1 hour
1446 raise LcmException(
1447 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1448 )
1449
1450 await asyncio.sleep(10, loop=self.loop)
1451
1452 # get ip address
1453 if not target_vdu_id:
1454 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1455
1456 if not vdu_id: # for the VNF case
1457 if db_vnfr.get("status") == "ERROR":
1458 raise LcmException(
1459 "Cannot inject ssh-key because target VNF is in error state"
1460 )
1461 ip_address = db_vnfr.get("ip-address")
1462 if not ip_address:
1463 continue
1464 vdur = next(
1465 (
1466 x
1467 for x in get_iterable(db_vnfr, "vdur")
1468 if x.get("ip-address") == ip_address
1469 ),
1470 None,
1471 )
1472 else: # VDU case
1473 vdur = next(
1474 (
1475 x
1476 for x in get_iterable(db_vnfr, "vdur")
1477 if x.get("vdu-id-ref") == vdu_id
1478 and x.get("count-index") == vdu_index
1479 ),
1480 None,
1481 )
1482
1483 if (
1484 not vdur and len(db_vnfr.get("vdur", ())) == 1
1485 ): # If only one, this should be the target vdu
1486 vdur = db_vnfr["vdur"][0]
1487 if not vdur:
1488 raise LcmException(
1489 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1490 vnfr_id, vdu_id, vdu_index
1491 )
1492 )
1493 # New generation RO stores information at "vim_info"
1494 ng_ro_status = None
1495 target_vim = None
1496 if vdur.get("vim_info"):
1497 target_vim = next(
1498 t for t in vdur["vim_info"]
1499 ) # there should be only one key
1500 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1501 if (
1502 vdur.get("pdu-type")
1503 or vdur.get("status") == "ACTIVE"
1504 or ng_ro_status == "ACTIVE"
1505 ):
1506 ip_address = vdur.get("ip-address")
1507 if not ip_address:
1508 continue
1509 target_vdu_id = vdur["vdu-id-ref"]
1510 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1511 raise LcmException(
1512 "Cannot inject ssh-key because target VM is in error state"
1513 )
1514
1515 if not target_vdu_id:
1516 continue
1517
1518 # inject public key into machine
1519 if pub_key and user:
1520 self.logger.debug(logging_text + "Inserting RO key")
1521 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1522 if vdur.get("pdu-type"):
1523 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1524 return ip_address
1525 try:
1526 ro_vm_id = "{}-{}".format(
1527 db_vnfr["member-vnf-index-ref"], target_vdu_id
1528 ) # TODO add vdu_index
1529 if self.ng_ro:
1530 target = {
1531 "action": {
1532 "action": "inject_ssh_key",
1533 "key": pub_key,
1534 "user": user,
1535 },
1536 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1537 }
1538 desc = await self.RO.deploy(nsr_id, target)
1539 action_id = desc["action_id"]
1540 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1541 break
1542 else:
1543 # wait until NS is deployed at RO
1544 if not ro_nsr_id:
1545 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1546 ro_nsr_id = deep_get(
1547 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1548 )
1549 if not ro_nsr_id:
1550 continue
1551 result_dict = await self.RO.create_action(
1552 item="ns",
1553 item_id_name=ro_nsr_id,
1554 descriptor={
1555 "add_public_key": pub_key,
1556 "vms": [ro_vm_id],
1557 "user": user,
1558 },
1559 )
1560 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1561 if not result_dict or not isinstance(result_dict, dict):
1562 raise LcmException(
1563 "Unknown response from RO when injecting key"
1564 )
1565 for result in result_dict.values():
1566 if result.get("vim_result") == 200:
1567 break
1568 else:
1569 raise ROclient.ROClientException(
1570 "error injecting key: {}".format(
1571 result.get("description")
1572 )
1573 )
1574 break
1575 except NgRoException as e:
1576 raise LcmException(
1577 "Reaching max tries injecting key. Error: {}".format(e)
1578 )
1579 except ROclient.ROClientException as e:
1580 if not nb_tries:
1581 self.logger.debug(
1582 logging_text
1583 + "error injecting key: {}. Retrying until {} seconds".format(
1584 e, 20 * 10
1585 )
1586 )
1587 nb_tries += 1
1588 if nb_tries >= 20:
1589 raise LcmException(
1590 "Reaching max tries injecting key. Error: {}".format(e)
1591 )
1592 else:
1593 break
1594
1595 return ip_address
1596
1597 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1598 """
1599 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1600 """
1601 my_vca = vca_deployed_list[vca_index]
1602 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1603 # vdu or kdu: no dependencies
1604 return
1605 timeout = 300
1606 while timeout >= 0:
1607 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1608 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1609 configuration_status_list = db_nsr["configurationStatus"]
1610 for index, vca_deployed in enumerate(configuration_status_list):
1611 if index == vca_index:
1612 # myself
1613 continue
1614 if not my_vca.get("member-vnf-index") or (
1615 vca_deployed.get("member-vnf-index")
1616 == my_vca.get("member-vnf-index")
1617 ):
1618 internal_status = configuration_status_list[index].get("status")
1619 if internal_status == "READY":
1620 continue
1621 elif internal_status == "BROKEN":
1622 raise LcmException(
1623 "Configuration aborted because dependent charm/s has failed"
1624 )
1625 else:
1626 break
1627 else:
1628 # no dependencies, return
1629 return
1630 await asyncio.sleep(10)
1631 timeout -= 1
1632
1633 raise LcmException("Configuration aborted because dependent charm/s timeout")
1634
1635 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1636 vca_id = None
1637 if db_vnfr:
1638 vca_id = deep_get(db_vnfr, ("vca-id",))
1639 elif db_nsr:
1640 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1641 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1642 return vca_id
1643
1644 async def instantiate_N2VC(
1645 self,
1646 logging_text,
1647 vca_index,
1648 nsi_id,
1649 db_nsr,
1650 db_vnfr,
1651 vdu_id,
1652 kdu_name,
1653 vdu_index,
1654 config_descriptor,
1655 deploy_params,
1656 base_folder,
1657 nslcmop_id,
1658 stage,
1659 vca_type,
1660 vca_name,
1661 ee_config_descriptor,
1662 ):
1663 nsr_id = db_nsr["_id"]
1664 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1665 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1666 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1667 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1668 db_dict = {
1669 "collection": "nsrs",
1670 "filter": {"_id": nsr_id},
1671 "path": db_update_entry,
1672 }
1673 step = ""
1674 try:
1675
1676 element_type = "NS"
1677 element_under_configuration = nsr_id
1678
1679 vnfr_id = None
1680 if db_vnfr:
1681 vnfr_id = db_vnfr["_id"]
1682 osm_config["osm"]["vnf_id"] = vnfr_id
1683
1684 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1685
1686 if vca_type == "native_charm":
1687 index_number = 0
1688 else:
1689 index_number = vdu_index or 0
1690
1691 if vnfr_id:
1692 element_type = "VNF"
1693 element_under_configuration = vnfr_id
1694 namespace += ".{}-{}".format(vnfr_id, index_number)
1695 if vdu_id:
1696 namespace += ".{}-{}".format(vdu_id, index_number)
1697 element_type = "VDU"
1698 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1699 osm_config["osm"]["vdu_id"] = vdu_id
1700 elif kdu_name:
1701 namespace += ".{}".format(kdu_name)
1702 element_type = "KDU"
1703 element_under_configuration = kdu_name
1704 osm_config["osm"]["kdu_name"] = kdu_name
1705
1706 # Get artifact path
1707 if base_folder["pkg-dir"]:
1708 artifact_path = "{}/{}/{}/{}".format(
1709 base_folder["folder"],
1710 base_folder["pkg-dir"],
1711 "charms"
1712 if vca_type
1713 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1714 else "helm-charts",
1715 vca_name,
1716 )
1717 else:
1718 artifact_path = "{}/Scripts/{}/{}/".format(
1719 base_folder["folder"],
1720 "charms"
1721 if vca_type
1722 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1723 else "helm-charts",
1724 vca_name,
1725 )
1726
1727 self.logger.debug("Artifact path > {}".format(artifact_path))
1728
1729 # get initial_config_primitive_list that applies to this element
1730 initial_config_primitive_list = config_descriptor.get(
1731 "initial-config-primitive"
1732 )
1733
1734 self.logger.debug(
1735 "Initial config primitive list > {}".format(
1736 initial_config_primitive_list
1737 )
1738 )
1739
1740 # add config if not present for NS charm
1741 ee_descriptor_id = ee_config_descriptor.get("id")
1742 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1743 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1744 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1745 )
1746
1747 self.logger.debug(
1748 "Initial config primitive list #2 > {}".format(
1749 initial_config_primitive_list
1750 )
1751 )
1752 # n2vc_redesign STEP 3.1
1753 # find old ee_id if exists
1754 ee_id = vca_deployed.get("ee_id")
1755
1756 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1757 # create or register execution environment in VCA
1758 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1759
1760 self._write_configuration_status(
1761 nsr_id=nsr_id,
1762 vca_index=vca_index,
1763 status="CREATING",
1764 element_under_configuration=element_under_configuration,
1765 element_type=element_type,
1766 )
1767
1768 step = "create execution environment"
1769 self.logger.debug(logging_text + step)
1770
1771 ee_id = None
1772 credentials = None
1773 if vca_type == "k8s_proxy_charm":
1774 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1775 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1776 namespace=namespace,
1777 artifact_path=artifact_path,
1778 db_dict=db_dict,
1779 vca_id=vca_id,
1780 )
1781 elif vca_type == "helm" or vca_type == "helm-v3":
1782 ee_id, credentials = await self.vca_map[
1783 vca_type
1784 ].create_execution_environment(
1785 namespace=namespace,
1786 reuse_ee_id=ee_id,
1787 db_dict=db_dict,
1788 config=osm_config,
1789 artifact_path=artifact_path,
1790 vca_type=vca_type,
1791 )
1792 else:
1793 ee_id, credentials = await self.vca_map[
1794 vca_type
1795 ].create_execution_environment(
1796 namespace=namespace,
1797 reuse_ee_id=ee_id,
1798 db_dict=db_dict,
1799 vca_id=vca_id,
1800 )
1801
1802 elif vca_type == "native_charm":
1803 step = "Waiting to VM being up and getting IP address"
1804 self.logger.debug(logging_text + step)
1805 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1806 logging_text,
1807 nsr_id,
1808 vnfr_id,
1809 vdu_id,
1810 vdu_index,
1811 user=None,
1812 pub_key=None,
1813 )
1814 credentials = {"hostname": rw_mgmt_ip}
1815 # get username
1816 username = deep_get(
1817 config_descriptor, ("config-access", "ssh-access", "default-user")
1818 )
1819 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1820 # merged. Meanwhile let's get username from initial-config-primitive
1821 if not username and initial_config_primitive_list:
1822 for config_primitive in initial_config_primitive_list:
1823 for param in config_primitive.get("parameter", ()):
1824 if param["name"] == "ssh-username":
1825 username = param["value"]
1826 break
1827 if not username:
1828 raise LcmException(
1829 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1830 "'config-access.ssh-access.default-user'"
1831 )
1832 credentials["username"] = username
1833 # n2vc_redesign STEP 3.2
1834
1835 self._write_configuration_status(
1836 nsr_id=nsr_id,
1837 vca_index=vca_index,
1838 status="REGISTERING",
1839 element_under_configuration=element_under_configuration,
1840 element_type=element_type,
1841 )
1842
1843 step = "register execution environment {}".format(credentials)
1844 self.logger.debug(logging_text + step)
1845 ee_id = await self.vca_map[vca_type].register_execution_environment(
1846 credentials=credentials,
1847 namespace=namespace,
1848 db_dict=db_dict,
1849 vca_id=vca_id,
1850 )
1851
1852 # for compatibility with MON/POL modules, the need model and application name at database
1853 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1854 ee_id_parts = ee_id.split(".")
1855 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1856 if len(ee_id_parts) >= 2:
1857 model_name = ee_id_parts[0]
1858 application_name = ee_id_parts[1]
1859 db_nsr_update[db_update_entry + "model"] = model_name
1860 db_nsr_update[db_update_entry + "application"] = application_name
1861
1862 # n2vc_redesign STEP 3.3
1863 step = "Install configuration Software"
1864
1865 self._write_configuration_status(
1866 nsr_id=nsr_id,
1867 vca_index=vca_index,
1868 status="INSTALLING SW",
1869 element_under_configuration=element_under_configuration,
1870 element_type=element_type,
1871 other_update=db_nsr_update,
1872 )
1873
1874 # TODO check if already done
1875 self.logger.debug(logging_text + step)
1876 config = None
1877 if vca_type == "native_charm":
1878 config_primitive = next(
1879 (p for p in initial_config_primitive_list if p["name"] == "config"),
1880 None,
1881 )
1882 if config_primitive:
1883 config = self._map_primitive_params(
1884 config_primitive, {}, deploy_params
1885 )
1886 num_units = 1
1887 if vca_type == "lxc_proxy_charm":
1888 if element_type == "NS":
1889 num_units = db_nsr.get("config-units") or 1
1890 elif element_type == "VNF":
1891 num_units = db_vnfr.get("config-units") or 1
1892 elif element_type == "VDU":
1893 for v in db_vnfr["vdur"]:
1894 if vdu_id == v["vdu-id-ref"]:
1895 num_units = v.get("config-units") or 1
1896 break
1897 if vca_type != "k8s_proxy_charm":
1898 await self.vca_map[vca_type].install_configuration_sw(
1899 ee_id=ee_id,
1900 artifact_path=artifact_path,
1901 db_dict=db_dict,
1902 config=config,
1903 num_units=num_units,
1904 vca_id=vca_id,
1905 vca_type=vca_type,
1906 )
1907
1908 # write in db flag of configuration_sw already installed
1909 self.update_db_2(
1910 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1911 )
1912
1913 # add relations for this VCA (wait for other peers related with this VCA)
1914 await self._add_vca_relations(
1915 logging_text=logging_text,
1916 nsr_id=nsr_id,
1917 vca_type=vca_type,
1918 vca_index=vca_index,
1919 )
1920
1921 # if SSH access is required, then get execution environment SSH public
1922 # if native charm we have waited already to VM be UP
1923 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1924 pub_key = None
1925 user = None
1926 # self.logger.debug("get ssh key block")
1927 if deep_get(
1928 config_descriptor, ("config-access", "ssh-access", "required")
1929 ):
1930 # self.logger.debug("ssh key needed")
1931 # Needed to inject a ssh key
1932 user = deep_get(
1933 config_descriptor,
1934 ("config-access", "ssh-access", "default-user"),
1935 )
1936 step = "Install configuration Software, getting public ssh key"
1937 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1938 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1939 )
1940
1941 step = "Insert public key into VM user={} ssh_key={}".format(
1942 user, pub_key
1943 )
1944 else:
1945 # self.logger.debug("no need to get ssh key")
1946 step = "Waiting to VM being up and getting IP address"
1947 self.logger.debug(logging_text + step)
1948
1949 # n2vc_redesign STEP 5.1
1950 # wait for RO (ip-address) Insert pub_key into VM
1951 if vnfr_id:
1952 if kdu_name:
1953 rw_mgmt_ip = await self.wait_kdu_up(
1954 logging_text, nsr_id, vnfr_id, kdu_name
1955 )
1956 else:
1957 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1958 logging_text,
1959 nsr_id,
1960 vnfr_id,
1961 vdu_id,
1962 vdu_index,
1963 user=user,
1964 pub_key=pub_key,
1965 )
1966 else:
1967 rw_mgmt_ip = None # This is for a NS configuration
1968
1969 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1970
1971 # store rw_mgmt_ip in deploy params for later replacement
1972 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1973
1974 # n2vc_redesign STEP 6 Execute initial config primitive
1975 step = "execute initial config primitive"
1976
1977 # wait for dependent primitives execution (NS -> VNF -> VDU)
1978 if initial_config_primitive_list:
1979 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1980
1981 # stage, in function of element type: vdu, kdu, vnf or ns
1982 my_vca = vca_deployed_list[vca_index]
1983 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1984 # VDU or KDU
1985 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1986 elif my_vca.get("member-vnf-index"):
1987 # VNF
1988 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1989 else:
1990 # NS
1991 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1992
1993 self._write_configuration_status(
1994 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1995 )
1996
1997 self._write_op_status(op_id=nslcmop_id, stage=stage)
1998
1999 check_if_terminated_needed = True
2000 for initial_config_primitive in initial_config_primitive_list:
2001 # adding information on the vca_deployed if it is a NS execution environment
2002 if not vca_deployed["member-vnf-index"]:
2003 deploy_params["ns_config_info"] = json.dumps(
2004 self._get_ns_config_info(nsr_id)
2005 )
2006 # TODO check if already done
2007 primitive_params_ = self._map_primitive_params(
2008 initial_config_primitive, {}, deploy_params
2009 )
2010
2011 step = "execute primitive '{}' params '{}'".format(
2012 initial_config_primitive["name"], primitive_params_
2013 )
2014 self.logger.debug(logging_text + step)
2015 await self.vca_map[vca_type].exec_primitive(
2016 ee_id=ee_id,
2017 primitive_name=initial_config_primitive["name"],
2018 params_dict=primitive_params_,
2019 db_dict=db_dict,
2020 vca_id=vca_id,
2021 vca_type=vca_type,
2022 )
2023 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2024 if check_if_terminated_needed:
2025 if config_descriptor.get("terminate-config-primitive"):
2026 self.update_db_2(
2027 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2028 )
2029 check_if_terminated_needed = False
2030
2031 # TODO register in database that primitive is done
2032
2033 # STEP 7 Configure metrics
2034 if vca_type == "helm" or vca_type == "helm-v3":
2035 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2036 ee_id=ee_id,
2037 artifact_path=artifact_path,
2038 ee_config_descriptor=ee_config_descriptor,
2039 vnfr_id=vnfr_id,
2040 nsr_id=nsr_id,
2041 target_ip=rw_mgmt_ip,
2042 )
2043 if prometheus_jobs:
2044 self.update_db_2(
2045 "nsrs",
2046 nsr_id,
2047 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2048 )
2049
2050 for job in prometheus_jobs:
2051 self.db.set_one(
2052 "prometheus_jobs",
2053 {"job_name": job["job_name"]},
2054 job,
2055 upsert=True,
2056 fail_on_empty=False,
2057 )
2058
2059 step = "instantiated at VCA"
2060 self.logger.debug(logging_text + step)
2061
2062 self._write_configuration_status(
2063 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2064 )
2065
2066 except Exception as e: # TODO not use Exception but N2VC exception
2067 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2068 if not isinstance(
2069 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2070 ):
2071 self.logger.error(
2072 "Exception while {} : {}".format(step, e), exc_info=True
2073 )
2074 self._write_configuration_status(
2075 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2076 )
2077 raise LcmException("{} {}".format(step, e)) from e
2078
2079 def _write_ns_status(
2080 self,
2081 nsr_id: str,
2082 ns_state: str,
2083 current_operation: str,
2084 current_operation_id: str,
2085 error_description: str = None,
2086 error_detail: str = None,
2087 other_update: dict = None,
2088 ):
2089 """
2090 Update db_nsr fields.
2091 :param nsr_id:
2092 :param ns_state:
2093 :param current_operation:
2094 :param current_operation_id:
2095 :param error_description:
2096 :param error_detail:
2097 :param other_update: Other required changes at database if provided, will be cleared
2098 :return:
2099 """
2100 try:
2101 db_dict = other_update or {}
2102 db_dict[
2103 "_admin.nslcmop"
2104 ] = current_operation_id # for backward compatibility
2105 db_dict["_admin.current-operation"] = current_operation_id
2106 db_dict["_admin.operation-type"] = (
2107 current_operation if current_operation != "IDLE" else None
2108 )
2109 db_dict["currentOperation"] = current_operation
2110 db_dict["currentOperationID"] = current_operation_id
2111 db_dict["errorDescription"] = error_description
2112 db_dict["errorDetail"] = error_detail
2113
2114 if ns_state:
2115 db_dict["nsState"] = ns_state
2116 self.update_db_2("nsrs", nsr_id, db_dict)
2117 except DbException as e:
2118 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2119
2120 def _write_op_status(
2121 self,
2122 op_id: str,
2123 stage: list = None,
2124 error_message: str = None,
2125 queuePosition: int = 0,
2126 operation_state: str = None,
2127 other_update: dict = None,
2128 ):
2129 try:
2130 db_dict = other_update or {}
2131 db_dict["queuePosition"] = queuePosition
2132 if isinstance(stage, list):
2133 db_dict["stage"] = stage[0]
2134 db_dict["detailed-status"] = " ".join(stage)
2135 elif stage is not None:
2136 db_dict["stage"] = str(stage)
2137
2138 if error_message is not None:
2139 db_dict["errorMessage"] = error_message
2140 if operation_state is not None:
2141 db_dict["operationState"] = operation_state
2142 db_dict["statusEnteredTime"] = time()
2143 self.update_db_2("nslcmops", op_id, db_dict)
2144 except DbException as e:
2145 self.logger.warn(
2146 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2147 )
2148
2149 def _write_all_config_status(self, db_nsr: dict, status: str):
2150 try:
2151 nsr_id = db_nsr["_id"]
2152 # configurationStatus
2153 config_status = db_nsr.get("configurationStatus")
2154 if config_status:
2155 db_nsr_update = {
2156 "configurationStatus.{}.status".format(index): status
2157 for index, v in enumerate(config_status)
2158 if v
2159 }
2160 # update status
2161 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2162
2163 except DbException as e:
2164 self.logger.warn(
2165 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2166 )
2167
2168 def _write_configuration_status(
2169 self,
2170 nsr_id: str,
2171 vca_index: int,
2172 status: str = None,
2173 element_under_configuration: str = None,
2174 element_type: str = None,
2175 other_update: dict = None,
2176 ):
2177
2178 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2179 # .format(vca_index, status))
2180
2181 try:
2182 db_path = "configurationStatus.{}.".format(vca_index)
2183 db_dict = other_update or {}
2184 if status:
2185 db_dict[db_path + "status"] = status
2186 if element_under_configuration:
2187 db_dict[
2188 db_path + "elementUnderConfiguration"
2189 ] = element_under_configuration
2190 if element_type:
2191 db_dict[db_path + "elementType"] = element_type
2192 self.update_db_2("nsrs", nsr_id, db_dict)
2193 except DbException as e:
2194 self.logger.warn(
2195 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2196 status, nsr_id, vca_index, e
2197 )
2198 )
2199
2200 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2201 """
2202 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2203 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2204 Database is used because the result can be obtained from a different LCM worker in case of HA.
2205 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2206 :param db_nslcmop: database content of nslcmop
2207 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2208 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2209 computed 'vim-account-id'
2210 """
2211 modified = False
2212 nslcmop_id = db_nslcmop["_id"]
2213 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2214 if placement_engine == "PLA":
2215 self.logger.debug(
2216 logging_text + "Invoke and wait for placement optimization"
2217 )
2218 await self.msg.aiowrite(
2219 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2220 )
2221 db_poll_interval = 5
2222 wait = db_poll_interval * 10
2223 pla_result = None
2224 while not pla_result and wait >= 0:
2225 await asyncio.sleep(db_poll_interval)
2226 wait -= db_poll_interval
2227 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2228 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2229
2230 if not pla_result:
2231 raise LcmException(
2232 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2233 )
2234
2235 for pla_vnf in pla_result["vnf"]:
2236 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2237 if not pla_vnf.get("vimAccountId") or not vnfr:
2238 continue
2239 modified = True
2240 self.db.set_one(
2241 "vnfrs",
2242 {"_id": vnfr["_id"]},
2243 {"vim-account-id": pla_vnf["vimAccountId"]},
2244 )
2245 # Modifies db_vnfrs
2246 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2247 return modified
2248
2249 def update_nsrs_with_pla_result(self, params):
2250 try:
2251 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2252 self.update_db_2(
2253 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2254 )
2255 except Exception as e:
2256 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2257
2258 async def instantiate(self, nsr_id, nslcmop_id):
2259 """
2260
2261 :param nsr_id: ns instance to deploy
2262 :param nslcmop_id: operation to run
2263 :return:
2264 """
2265
2266 # Try to lock HA task here
2267 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2268 if not task_is_locked_by_me:
2269 self.logger.debug(
2270 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2271 )
2272 return
2273
2274 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2275 self.logger.debug(logging_text + "Enter")
2276
2277 # get all needed from database
2278
2279 # database nsrs record
2280 db_nsr = None
2281
2282 # database nslcmops record
2283 db_nslcmop = None
2284
2285 # update operation on nsrs
2286 db_nsr_update = {}
2287 # update operation on nslcmops
2288 db_nslcmop_update = {}
2289
2290 nslcmop_operation_state = None
2291 db_vnfrs = {} # vnf's info indexed by member-index
2292 # n2vc_info = {}
2293 tasks_dict_info = {} # from task to info text
2294 exc = None
2295 error_list = []
2296 stage = [
2297 "Stage 1/5: preparation of the environment.",
2298 "Waiting for previous operations to terminate.",
2299 "",
2300 ]
2301 # ^ stage, step, VIM progress
2302 try:
2303 # wait for any previous tasks in process
2304 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2305
2306 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2307 stage[1] = "Reading from database."
2308 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2309 db_nsr_update["detailed-status"] = "creating"
2310 db_nsr_update["operational-status"] = "init"
2311 self._write_ns_status(
2312 nsr_id=nsr_id,
2313 ns_state="BUILDING",
2314 current_operation="INSTANTIATING",
2315 current_operation_id=nslcmop_id,
2316 other_update=db_nsr_update,
2317 )
2318 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2319
2320 # read from db: operation
2321 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2322 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2323 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2324 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2325 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2326 )
2327 ns_params = db_nslcmop.get("operationParams")
2328 if ns_params and ns_params.get("timeout_ns_deploy"):
2329 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2330 else:
2331 timeout_ns_deploy = self.timeout.get(
2332 "ns_deploy", self.timeout_ns_deploy
2333 )
2334
2335 # read from db: ns
2336 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2337 self.logger.debug(logging_text + stage[1])
2338 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2339 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2340 self.logger.debug(logging_text + stage[1])
2341 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2342 self.fs.sync(db_nsr["nsd-id"])
2343 db_nsr["nsd"] = nsd
2344 # nsr_name = db_nsr["name"] # TODO short-name??
2345
2346 # read from db: vnf's of this ns
2347 stage[1] = "Getting vnfrs from db."
2348 self.logger.debug(logging_text + stage[1])
2349 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2350
2351 # read from db: vnfd's for every vnf
2352 db_vnfds = [] # every vnfd data
2353
2354 # for each vnf in ns, read vnfd
2355 for vnfr in db_vnfrs_list:
2356 if vnfr.get("kdur"):
2357 kdur_list = []
2358 for kdur in vnfr["kdur"]:
2359 if kdur.get("additionalParams"):
2360 kdur["additionalParams"] = json.loads(kdur["additionalParams"])
2361 kdur_list.append(kdur)
2362 vnfr["kdur"] = kdur_list
2363
2364 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2365 vnfd_id = vnfr["vnfd-id"]
2366 vnfd_ref = vnfr["vnfd-ref"]
2367 self.fs.sync(vnfd_id)
2368
2369 # if we haven't this vnfd, read it from db
2370 if vnfd_id not in db_vnfds:
2371 # read from db
2372 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2373 vnfd_id, vnfd_ref
2374 )
2375 self.logger.debug(logging_text + stage[1])
2376 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2377
2378 # store vnfd
2379 db_vnfds.append(vnfd)
2380
2381 # Get or generates the _admin.deployed.VCA list
2382 vca_deployed_list = None
2383 if db_nsr["_admin"].get("deployed"):
2384 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2385 if vca_deployed_list is None:
2386 vca_deployed_list = []
2387 configuration_status_list = []
2388 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2389 db_nsr_update["configurationStatus"] = configuration_status_list
2390 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2391 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2392 elif isinstance(vca_deployed_list, dict):
2393 # maintain backward compatibility. Change a dict to list at database
2394 vca_deployed_list = list(vca_deployed_list.values())
2395 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2396 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2397
2398 if not isinstance(
2399 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2400 ):
2401 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2402 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2403
2404 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2405 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2406 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2407 self.db.set_list(
2408 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2409 )
2410
2411 # n2vc_redesign STEP 2 Deploy Network Scenario
2412 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2413 self._write_op_status(op_id=nslcmop_id, stage=stage)
2414
2415 stage[1] = "Deploying KDUs."
2416 # self.logger.debug(logging_text + "Before deploy_kdus")
2417 # Call to deploy_kdus in case exists the "vdu:kdu" param
2418 await self.deploy_kdus(
2419 logging_text=logging_text,
2420 nsr_id=nsr_id,
2421 nslcmop_id=nslcmop_id,
2422 db_vnfrs=db_vnfrs,
2423 db_vnfds=db_vnfds,
2424 task_instantiation_info=tasks_dict_info,
2425 )
2426
2427 stage[1] = "Getting VCA public key."
2428 # n2vc_redesign STEP 1 Get VCA public ssh-key
2429 # feature 1429. Add n2vc public key to needed VMs
2430 n2vc_key = self.n2vc.get_public_key()
2431 n2vc_key_list = [n2vc_key]
2432 if self.vca_config.get("public_key"):
2433 n2vc_key_list.append(self.vca_config["public_key"])
2434
2435 stage[1] = "Deploying NS at VIM."
2436 task_ro = asyncio.ensure_future(
2437 self.instantiate_RO(
2438 logging_text=logging_text,
2439 nsr_id=nsr_id,
2440 nsd=nsd,
2441 db_nsr=db_nsr,
2442 db_nslcmop=db_nslcmop,
2443 db_vnfrs=db_vnfrs,
2444 db_vnfds=db_vnfds,
2445 n2vc_key_list=n2vc_key_list,
2446 stage=stage,
2447 )
2448 )
2449 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2450 tasks_dict_info[task_ro] = "Deploying at VIM"
2451
2452 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2453 stage[1] = "Deploying Execution Environments."
2454 self.logger.debug(logging_text + stage[1])
2455
2456 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2457 for vnf_profile in get_vnf_profiles(nsd):
2458 vnfd_id = vnf_profile["vnfd-id"]
2459 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2460 member_vnf_index = str(vnf_profile["id"])
2461 db_vnfr = db_vnfrs[member_vnf_index]
2462 base_folder = vnfd["_admin"]["storage"]
2463 vdu_id = None
2464 vdu_index = 0
2465 vdu_name = None
2466 kdu_name = None
2467
2468 # Get additional parameters
2469 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2470 if db_vnfr.get("additionalParamsForVnf"):
2471 deploy_params.update(
2472 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2473 )
2474
2475 descriptor_config = get_configuration(vnfd, vnfd["id"])
2476 if descriptor_config:
2477 self._deploy_n2vc(
2478 logging_text=logging_text
2479 + "member_vnf_index={} ".format(member_vnf_index),
2480 db_nsr=db_nsr,
2481 db_vnfr=db_vnfr,
2482 nslcmop_id=nslcmop_id,
2483 nsr_id=nsr_id,
2484 nsi_id=nsi_id,
2485 vnfd_id=vnfd_id,
2486 vdu_id=vdu_id,
2487 kdu_name=kdu_name,
2488 member_vnf_index=member_vnf_index,
2489 vdu_index=vdu_index,
2490 vdu_name=vdu_name,
2491 deploy_params=deploy_params,
2492 descriptor_config=descriptor_config,
2493 base_folder=base_folder,
2494 task_instantiation_info=tasks_dict_info,
2495 stage=stage,
2496 )
2497
2498 # Deploy charms for each VDU that supports one.
2499 for vdud in get_vdu_list(vnfd):
2500 vdu_id = vdud["id"]
2501 descriptor_config = get_configuration(vnfd, vdu_id)
2502 vdur = find_in_list(
2503 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2504 )
2505
2506 if vdur.get("additionalParams"):
2507 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2508 else:
2509 deploy_params_vdu = deploy_params
2510 deploy_params_vdu["OSM"] = get_osm_params(
2511 db_vnfr, vdu_id, vdu_count_index=0
2512 )
2513 vdud_count = get_number_of_instances(vnfd, vdu_id)
2514
2515 self.logger.debug("VDUD > {}".format(vdud))
2516 self.logger.debug(
2517 "Descriptor config > {}".format(descriptor_config)
2518 )
2519 if descriptor_config:
2520 vdu_name = None
2521 kdu_name = None
2522 for vdu_index in range(vdud_count):
2523 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2524 self._deploy_n2vc(
2525 logging_text=logging_text
2526 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2527 member_vnf_index, vdu_id, vdu_index
2528 ),
2529 db_nsr=db_nsr,
2530 db_vnfr=db_vnfr,
2531 nslcmop_id=nslcmop_id,
2532 nsr_id=nsr_id,
2533 nsi_id=nsi_id,
2534 vnfd_id=vnfd_id,
2535 vdu_id=vdu_id,
2536 kdu_name=kdu_name,
2537 member_vnf_index=member_vnf_index,
2538 vdu_index=vdu_index,
2539 vdu_name=vdu_name,
2540 deploy_params=deploy_params_vdu,
2541 descriptor_config=descriptor_config,
2542 base_folder=base_folder,
2543 task_instantiation_info=tasks_dict_info,
2544 stage=stage,
2545 )
2546 for kdud in get_kdu_list(vnfd):
2547 kdu_name = kdud["name"]
2548 descriptor_config = get_configuration(vnfd, kdu_name)
2549 if descriptor_config:
2550 vdu_id = None
2551 vdu_index = 0
2552 vdu_name = None
2553 kdur = next(
2554 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2555 )
2556 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2557 if kdur.get("additionalParams"):
2558 deploy_params_kdu = parse_yaml_strings(
2559 kdur["additionalParams"]
2560 )
2561
2562 self._deploy_n2vc(
2563 logging_text=logging_text,
2564 db_nsr=db_nsr,
2565 db_vnfr=db_vnfr,
2566 nslcmop_id=nslcmop_id,
2567 nsr_id=nsr_id,
2568 nsi_id=nsi_id,
2569 vnfd_id=vnfd_id,
2570 vdu_id=vdu_id,
2571 kdu_name=kdu_name,
2572 member_vnf_index=member_vnf_index,
2573 vdu_index=vdu_index,
2574 vdu_name=vdu_name,
2575 deploy_params=deploy_params_kdu,
2576 descriptor_config=descriptor_config,
2577 base_folder=base_folder,
2578 task_instantiation_info=tasks_dict_info,
2579 stage=stage,
2580 )
2581
2582 # Check if this NS has a charm configuration
2583 descriptor_config = nsd.get("ns-configuration")
2584 if descriptor_config and descriptor_config.get("juju"):
2585 vnfd_id = None
2586 db_vnfr = None
2587 member_vnf_index = None
2588 vdu_id = None
2589 kdu_name = None
2590 vdu_index = 0
2591 vdu_name = None
2592
2593 # Get additional parameters
2594 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2595 if db_nsr.get("additionalParamsForNs"):
2596 deploy_params.update(
2597 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2598 )
2599 base_folder = nsd["_admin"]["storage"]
2600 self._deploy_n2vc(
2601 logging_text=logging_text,
2602 db_nsr=db_nsr,
2603 db_vnfr=db_vnfr,
2604 nslcmop_id=nslcmop_id,
2605 nsr_id=nsr_id,
2606 nsi_id=nsi_id,
2607 vnfd_id=vnfd_id,
2608 vdu_id=vdu_id,
2609 kdu_name=kdu_name,
2610 member_vnf_index=member_vnf_index,
2611 vdu_index=vdu_index,
2612 vdu_name=vdu_name,
2613 deploy_params=deploy_params,
2614 descriptor_config=descriptor_config,
2615 base_folder=base_folder,
2616 task_instantiation_info=tasks_dict_info,
2617 stage=stage,
2618 )
2619
2620 # rest of staff will be done at finally
2621
2622 except (
2623 ROclient.ROClientException,
2624 DbException,
2625 LcmException,
2626 N2VCException,
2627 ) as e:
2628 self.logger.error(
2629 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2630 )
2631 exc = e
2632 except asyncio.CancelledError:
2633 self.logger.error(
2634 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2635 )
2636 exc = "Operation was cancelled"
2637 except Exception as e:
2638 exc = traceback.format_exc()
2639 self.logger.critical(
2640 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2641 exc_info=True,
2642 )
2643 finally:
2644 if exc:
2645 error_list.append(str(exc))
2646 try:
2647 # wait for pending tasks
2648 if tasks_dict_info:
2649 stage[1] = "Waiting for instantiate pending tasks."
2650 self.logger.debug(logging_text + stage[1])
2651 error_list += await self._wait_for_tasks(
2652 logging_text,
2653 tasks_dict_info,
2654 timeout_ns_deploy,
2655 stage,
2656 nslcmop_id,
2657 nsr_id=nsr_id,
2658 )
2659 stage[1] = stage[2] = ""
2660 except asyncio.CancelledError:
2661 error_list.append("Cancelled")
2662 # TODO cancel all tasks
2663 except Exception as exc:
2664 error_list.append(str(exc))
2665
2666 # update operation-status
2667 db_nsr_update["operational-status"] = "running"
2668 # let's begin with VCA 'configured' status (later we can change it)
2669 db_nsr_update["config-status"] = "configured"
2670 for task, task_name in tasks_dict_info.items():
2671 if not task.done() or task.cancelled() or task.exception():
2672 if task_name.startswith(self.task_name_deploy_vca):
2673 # A N2VC task is pending
2674 db_nsr_update["config-status"] = "failed"
2675 else:
2676 # RO or KDU task is pending
2677 db_nsr_update["operational-status"] = "failed"
2678
2679 # update status at database
2680 if error_list:
2681 error_detail = ". ".join(error_list)
2682 self.logger.error(logging_text + error_detail)
2683 error_description_nslcmop = "{} Detail: {}".format(
2684 stage[0], error_detail
2685 )
2686 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2687 nslcmop_id, stage[0]
2688 )
2689
2690 db_nsr_update["detailed-status"] = (
2691 error_description_nsr + " Detail: " + error_detail
2692 )
2693 db_nslcmop_update["detailed-status"] = error_detail
2694 nslcmop_operation_state = "FAILED"
2695 ns_state = "BROKEN"
2696 else:
2697 error_detail = None
2698 error_description_nsr = error_description_nslcmop = None
2699 ns_state = "READY"
2700 db_nsr_update["detailed-status"] = "Done"
2701 db_nslcmop_update["detailed-status"] = "Done"
2702 nslcmop_operation_state = "COMPLETED"
2703
2704 if db_nsr:
2705 self._write_ns_status(
2706 nsr_id=nsr_id,
2707 ns_state=ns_state,
2708 current_operation="IDLE",
2709 current_operation_id=None,
2710 error_description=error_description_nsr,
2711 error_detail=error_detail,
2712 other_update=db_nsr_update,
2713 )
2714 self._write_op_status(
2715 op_id=nslcmop_id,
2716 stage="",
2717 error_message=error_description_nslcmop,
2718 operation_state=nslcmop_operation_state,
2719 other_update=db_nslcmop_update,
2720 )
2721
2722 if nslcmop_operation_state:
2723 try:
2724 await self.msg.aiowrite(
2725 "ns",
2726 "instantiated",
2727 {
2728 "nsr_id": nsr_id,
2729 "nslcmop_id": nslcmop_id,
2730 "operationState": nslcmop_operation_state,
2731 },
2732 loop=self.loop,
2733 )
2734 except Exception as e:
2735 self.logger.error(
2736 logging_text + "kafka_write notification Exception {}".format(e)
2737 )
2738
2739 self.logger.debug(logging_text + "Exit")
2740 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2741
2742 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2743 if vnfd_id not in cached_vnfds:
2744 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2745 return cached_vnfds[vnfd_id]
2746
2747 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2748 if vnf_profile_id not in cached_vnfrs:
2749 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2750 "vnfrs",
2751 {
2752 "member-vnf-index-ref": vnf_profile_id,
2753 "nsr-id-ref": nsr_id,
2754 },
2755 )
2756 return cached_vnfrs[vnf_profile_id]
2757
2758 def _is_deployed_vca_in_relation(
2759 self, vca: DeployedVCA, relation: Relation
2760 ) -> bool:
2761 found = False
2762 for endpoint in (relation.provider, relation.requirer):
2763 if endpoint["kdu-resource-profile-id"]:
2764 continue
2765 found = (
2766 vca.vnf_profile_id == endpoint.vnf_profile_id
2767 and vca.vdu_profile_id == endpoint.vdu_profile_id
2768 and vca.execution_environment_ref == endpoint.execution_environment_ref
2769 )
2770 if found:
2771 break
2772 return found
2773
2774 def _update_ee_relation_data_with_implicit_data(
2775 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2776 ):
2777 ee_relation_data = safe_get_ee_relation(
2778 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2779 )
2780 ee_relation_level = EELevel.get_level(ee_relation_data)
2781 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2782 "execution-environment-ref"
2783 ]:
2784 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2785 vnfd_id = vnf_profile["vnfd-id"]
2786 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2787 entity_id = (
2788 vnfd_id
2789 if ee_relation_level == EELevel.VNF
2790 else ee_relation_data["vdu-profile-id"]
2791 )
2792 ee = get_juju_ee_ref(db_vnfd, entity_id)
2793 if not ee:
2794 raise Exception(
2795 f"not execution environments found for ee_relation {ee_relation_data}"
2796 )
2797 ee_relation_data["execution-environment-ref"] = ee["id"]
2798 return ee_relation_data
2799
2800 def _get_ns_relations(
2801 self,
2802 nsr_id: str,
2803 nsd: Dict[str, Any],
2804 vca: DeployedVCA,
2805 cached_vnfds: Dict[str, Any],
2806 ) -> List[Relation]:
2807 relations = []
2808 db_ns_relations = get_ns_configuration_relation_list(nsd)
2809 for r in db_ns_relations:
2810 provider_dict = None
2811 requirer_dict = None
2812 if all(key in r for key in ("provider", "requirer")):
2813 provider_dict = r["provider"]
2814 requirer_dict = r["requirer"]
2815 elif "entities" in r:
2816 provider_id = r["entities"][0]["id"]
2817 provider_dict = {
2818 "nsr-id": nsr_id,
2819 "endpoint": r["entities"][0]["endpoint"],
2820 }
2821 if provider_id != nsd["id"]:
2822 provider_dict["vnf-profile-id"] = provider_id
2823 requirer_id = r["entities"][1]["id"]
2824 requirer_dict = {
2825 "nsr-id": nsr_id,
2826 "endpoint": r["entities"][1]["endpoint"],
2827 }
2828 if requirer_id != nsd["id"]:
2829 requirer_dict["vnf-profile-id"] = requirer_id
2830 else:
2831 raise Exception(
2832 "provider/requirer or entities must be included in the relation."
2833 )
2834 relation_provider = self._update_ee_relation_data_with_implicit_data(
2835 nsr_id, nsd, provider_dict, cached_vnfds
2836 )
2837 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2838 nsr_id, nsd, requirer_dict, cached_vnfds
2839 )
2840 provider = EERelation(relation_provider)
2841 requirer = EERelation(relation_requirer)
2842 relation = Relation(r["name"], provider, requirer)
2843 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2844 if vca_in_relation:
2845 relations.append(relation)
2846 return relations
2847
2848 def _get_vnf_relations(
2849 self,
2850 nsr_id: str,
2851 nsd: Dict[str, Any],
2852 vca: DeployedVCA,
2853 cached_vnfds: Dict[str, Any],
2854 ) -> List[Relation]:
2855 relations = []
2856 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2857 vnf_profile_id = vnf_profile["id"]
2858 vnfd_id = vnf_profile["vnfd-id"]
2859 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2860 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2861 for r in db_vnf_relations:
2862 provider_dict = None
2863 requirer_dict = None
2864 if all(key in r for key in ("provider", "requirer")):
2865 provider_dict = r["provider"]
2866 requirer_dict = r["requirer"]
2867 elif "entities" in r:
2868 provider_id = r["entities"][0]["id"]
2869 provider_dict = {
2870 "nsr-id": nsr_id,
2871 "vnf-profile-id": vnf_profile_id,
2872 "endpoint": r["entities"][0]["endpoint"],
2873 }
2874 if provider_id != vnfd_id:
2875 provider_dict["vdu-profile-id"] = provider_id
2876 requirer_id = r["entities"][1]["id"]
2877 requirer_dict = {
2878 "nsr-id": nsr_id,
2879 "vnf-profile-id": vnf_profile_id,
2880 "endpoint": r["entities"][1]["endpoint"],
2881 }
2882 if requirer_id != vnfd_id:
2883 requirer_dict["vdu-profile-id"] = requirer_id
2884 else:
2885 raise Exception(
2886 "provider/requirer or entities must be included in the relation."
2887 )
2888 relation_provider = self._update_ee_relation_data_with_implicit_data(
2889 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2890 )
2891 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2892 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2893 )
2894 provider = EERelation(relation_provider)
2895 requirer = EERelation(relation_requirer)
2896 relation = Relation(r["name"], provider, requirer)
2897 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2898 if vca_in_relation:
2899 relations.append(relation)
2900 return relations
2901
2902 def _get_kdu_resource_data(
2903 self,
2904 ee_relation: EERelation,
2905 db_nsr: Dict[str, Any],
2906 cached_vnfds: Dict[str, Any],
2907 ) -> DeployedK8sResource:
2908 nsd = get_nsd(db_nsr)
2909 vnf_profiles = get_vnf_profiles(nsd)
2910 vnfd_id = find_in_list(
2911 vnf_profiles,
2912 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
2913 )["vnfd-id"]
2914 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2915 kdu_resource_profile = get_kdu_resource_profile(
2916 db_vnfd, ee_relation.kdu_resource_profile_id
2917 )
2918 kdu_name = kdu_resource_profile["kdu-name"]
2919 deployed_kdu, _ = get_deployed_kdu(
2920 db_nsr.get("_admin", ()).get("deployed", ()),
2921 kdu_name,
2922 ee_relation.vnf_profile_id,
2923 )
2924 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
2925 return deployed_kdu
2926
2927 def _get_deployed_component(
2928 self,
2929 ee_relation: EERelation,
2930 db_nsr: Dict[str, Any],
2931 cached_vnfds: Dict[str, Any],
2932 ) -> DeployedComponent:
2933 nsr_id = db_nsr["_id"]
2934 deployed_component = None
2935 ee_level = EELevel.get_level(ee_relation)
2936 if ee_level == EELevel.NS:
2937 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
2938 if vca:
2939 deployed_component = DeployedVCA(nsr_id, vca)
2940 elif ee_level == EELevel.VNF:
2941 vca = get_deployed_vca(
2942 db_nsr,
2943 {
2944 "vdu_id": None,
2945 "member-vnf-index": ee_relation.vnf_profile_id,
2946 "ee_descriptor_id": ee_relation.execution_environment_ref,
2947 },
2948 )
2949 if vca:
2950 deployed_component = DeployedVCA(nsr_id, vca)
2951 elif ee_level == EELevel.VDU:
2952 vca = get_deployed_vca(
2953 db_nsr,
2954 {
2955 "vdu_id": ee_relation.vdu_profile_id,
2956 "member-vnf-index": ee_relation.vnf_profile_id,
2957 "ee_descriptor_id": ee_relation.execution_environment_ref,
2958 },
2959 )
2960 if vca:
2961 deployed_component = DeployedVCA(nsr_id, vca)
2962 elif ee_level == EELevel.KDU:
2963 kdu_resource_data = self._get_kdu_resource_data(
2964 ee_relation, db_nsr, cached_vnfds
2965 )
2966 if kdu_resource_data:
2967 deployed_component = DeployedK8sResource(kdu_resource_data)
2968 return deployed_component
2969
2970 async def _add_relation(
2971 self,
2972 relation: Relation,
2973 vca_type: str,
2974 db_nsr: Dict[str, Any],
2975 cached_vnfds: Dict[str, Any],
2976 cached_vnfrs: Dict[str, Any],
2977 ) -> bool:
2978 deployed_provider = self._get_deployed_component(
2979 relation.provider, db_nsr, cached_vnfds
2980 )
2981 deployed_requirer = self._get_deployed_component(
2982 relation.requirer, db_nsr, cached_vnfds
2983 )
2984 if (
2985 deployed_provider
2986 and deployed_requirer
2987 and deployed_provider.config_sw_installed
2988 and deployed_requirer.config_sw_installed
2989 ):
2990 provider_db_vnfr = (
2991 self._get_vnfr(
2992 relation.provider.nsr_id,
2993 relation.provider.vnf_profile_id,
2994 cached_vnfrs,
2995 )
2996 if relation.provider.vnf_profile_id
2997 else None
2998 )
2999 requirer_db_vnfr = (
3000 self._get_vnfr(
3001 relation.requirer.nsr_id,
3002 relation.requirer.vnf_profile_id,
3003 cached_vnfrs,
3004 )
3005 if relation.requirer.vnf_profile_id
3006 else None
3007 )
3008 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3009 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3010 provider_relation_endpoint = RelationEndpoint(
3011 deployed_provider.ee_id,
3012 provider_vca_id,
3013 relation.provider.endpoint,
3014 )
3015 requirer_relation_endpoint = RelationEndpoint(
3016 deployed_requirer.ee_id,
3017 requirer_vca_id,
3018 relation.requirer.endpoint,
3019 )
3020 await self.vca_map[vca_type].add_relation(
3021 provider=provider_relation_endpoint,
3022 requirer=requirer_relation_endpoint,
3023 )
3024 # remove entry from relations list
3025 return True
3026 return False
3027
3028 async def _add_vca_relations(
3029 self,
3030 logging_text,
3031 nsr_id,
3032 vca_type: str,
3033 vca_index: int,
3034 timeout: int = 3600,
3035 ) -> bool:
3036
3037 # steps:
3038 # 1. find all relations for this VCA
3039 # 2. wait for other peers related
3040 # 3. add relations
3041
3042 try:
3043 # STEP 1: find all relations for this VCA
3044
3045 # read nsr record
3046 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3047 nsd = get_nsd(db_nsr)
3048
3049 # this VCA data
3050 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3051 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3052
3053 cached_vnfds = {}
3054 cached_vnfrs = {}
3055 relations = []
3056 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3057 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3058
3059 # if no relations, terminate
3060 if not relations:
3061 self.logger.debug(logging_text + " No relations")
3062 return True
3063
3064 self.logger.debug(logging_text + " adding relations {}".format(relations))
3065
3066 # add all relations
3067 start = time()
3068 while True:
3069 # check timeout
3070 now = time()
3071 if now - start >= timeout:
3072 self.logger.error(logging_text + " : timeout adding relations")
3073 return False
3074
3075 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3077
3078 # for each relation, find the VCA's related
3079 for relation in relations.copy():
3080 added = await self._add_relation(
3081 relation,
3082 vca_type,
3083 db_nsr,
3084 cached_vnfds,
3085 cached_vnfrs,
3086 )
3087 if added:
3088 relations.remove(relation)
3089
3090 if not relations:
3091 self.logger.debug("Relations added")
3092 break
3093 await asyncio.sleep(5.0)
3094
3095 return True
3096
3097 except Exception as e:
3098 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3099 return False
3100
3101 async def _install_kdu(
3102 self,
3103 nsr_id: str,
3104 nsr_db_path: str,
3105 vnfr_data: dict,
3106 kdu_index: int,
3107 kdud: dict,
3108 vnfd: dict,
3109 k8s_instance_info: dict,
3110 k8params: dict = None,
3111 timeout: int = 600,
3112 vca_id: str = None,
3113 ):
3114
3115 try:
3116 k8sclustertype = k8s_instance_info["k8scluster-type"]
3117 # Instantiate kdu
3118 db_dict_install = {
3119 "collection": "nsrs",
3120 "filter": {"_id": nsr_id},
3121 "path": nsr_db_path,
3122 }
3123
3124 if k8s_instance_info.get("kdu-deployment-name"):
3125 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3126 else:
3127 kdu_instance = self.k8scluster_map[
3128 k8sclustertype
3129 ].generate_kdu_instance_name(
3130 db_dict=db_dict_install,
3131 kdu_model=k8s_instance_info["kdu-model"],
3132 kdu_name=k8s_instance_info["kdu-name"],
3133 )
3134 self.update_db_2(
3135 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3136 )
3137 await self.k8scluster_map[k8sclustertype].install(
3138 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3139 kdu_model=k8s_instance_info["kdu-model"],
3140 atomic=True,
3141 params=k8params,
3142 db_dict=db_dict_install,
3143 timeout=timeout,
3144 kdu_name=k8s_instance_info["kdu-name"],
3145 namespace=k8s_instance_info["namespace"],
3146 kdu_instance=kdu_instance,
3147 vca_id=vca_id,
3148 )
3149 self.update_db_2(
3150 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3151 )
3152
3153 # Obtain services to obtain management service ip
3154 services = await self.k8scluster_map[k8sclustertype].get_services(
3155 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3156 kdu_instance=kdu_instance,
3157 namespace=k8s_instance_info["namespace"],
3158 )
3159
3160 # Obtain management service info (if exists)
3161 vnfr_update_dict = {}
3162 kdu_config = get_configuration(vnfd, kdud["name"])
3163 if kdu_config:
3164 target_ee_list = kdu_config.get("execution-environment-list", [])
3165 else:
3166 target_ee_list = []
3167
3168 if services:
3169 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3170 mgmt_services = [
3171 service
3172 for service in kdud.get("service", [])
3173 if service.get("mgmt-service")
3174 ]
3175 for mgmt_service in mgmt_services:
3176 for service in services:
3177 if service["name"].startswith(mgmt_service["name"]):
3178 # Mgmt service found, Obtain service ip
3179 ip = service.get("external_ip", service.get("cluster_ip"))
3180 if isinstance(ip, list) and len(ip) == 1:
3181 ip = ip[0]
3182
3183 vnfr_update_dict[
3184 "kdur.{}.ip-address".format(kdu_index)
3185 ] = ip
3186
3187 # Check if must update also mgmt ip at the vnf
3188 service_external_cp = mgmt_service.get(
3189 "external-connection-point-ref"
3190 )
3191 if service_external_cp:
3192 if (
3193 deep_get(vnfd, ("mgmt-interface", "cp"))
3194 == service_external_cp
3195 ):
3196 vnfr_update_dict["ip-address"] = ip
3197
3198 if find_in_list(
3199 target_ee_list,
3200 lambda ee: ee.get(
3201 "external-connection-point-ref", ""
3202 )
3203 == service_external_cp,
3204 ):
3205 vnfr_update_dict[
3206 "kdur.{}.ip-address".format(kdu_index)
3207 ] = ip
3208 break
3209 else:
3210 self.logger.warn(
3211 "Mgmt service name: {} not found".format(
3212 mgmt_service["name"]
3213 )
3214 )
3215
3216 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3217 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3218
3219 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3220 if (
3221 kdu_config
3222 and kdu_config.get("initial-config-primitive")
3223 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3224 ):
3225 initial_config_primitive_list = kdu_config.get(
3226 "initial-config-primitive"
3227 )
3228 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3229
3230 for initial_config_primitive in initial_config_primitive_list:
3231 primitive_params_ = self._map_primitive_params(
3232 initial_config_primitive, {}, {}
3233 )
3234
3235 await asyncio.wait_for(
3236 self.k8scluster_map[k8sclustertype].exec_primitive(
3237 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3238 kdu_instance=kdu_instance,
3239 primitive_name=initial_config_primitive["name"],
3240 params=primitive_params_,
3241 db_dict=db_dict_install,
3242 vca_id=vca_id,
3243 ),
3244 timeout=timeout,
3245 )
3246
3247 except Exception as e:
3248 # Prepare update db with error and raise exception
3249 try:
3250 self.update_db_2(
3251 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3252 )
3253 self.update_db_2(
3254 "vnfrs",
3255 vnfr_data.get("_id"),
3256 {"kdur.{}.status".format(kdu_index): "ERROR"},
3257 )
3258 except Exception:
3259 # ignore to keep original exception
3260 pass
3261 # reraise original error
3262 raise
3263
3264 return kdu_instance
3265
3266 async def deploy_kdus(
3267 self,
3268 logging_text,
3269 nsr_id,
3270 nslcmop_id,
3271 db_vnfrs,
3272 db_vnfds,
3273 task_instantiation_info,
3274 ):
3275 # Launch kdus if present in the descriptor
3276
3277 k8scluster_id_2_uuic = {
3278 "helm-chart-v3": {},
3279 "helm-chart": {},
3280 "juju-bundle": {},
3281 }
3282
3283 async def _get_cluster_id(cluster_id, cluster_type):
3284 nonlocal k8scluster_id_2_uuic
3285 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3286 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3287
3288 # check if K8scluster is creating and wait look if previous tasks in process
3289 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3290 "k8scluster", cluster_id
3291 )
3292 if task_dependency:
3293 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3294 task_name, cluster_id
3295 )
3296 self.logger.debug(logging_text + text)
3297 await asyncio.wait(task_dependency, timeout=3600)
3298
3299 db_k8scluster = self.db.get_one(
3300 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3301 )
3302 if not db_k8scluster:
3303 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3304
3305 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3306 if not k8s_id:
3307 if cluster_type == "helm-chart-v3":
3308 try:
3309 # backward compatibility for existing clusters that have not been initialized for helm v3
3310 k8s_credentials = yaml.safe_dump(
3311 db_k8scluster.get("credentials")
3312 )
3313 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3314 k8s_credentials, reuse_cluster_uuid=cluster_id
3315 )
3316 db_k8scluster_update = {}
3317 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3318 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3319 db_k8scluster_update[
3320 "_admin.helm-chart-v3.created"
3321 ] = uninstall_sw
3322 db_k8scluster_update[
3323 "_admin.helm-chart-v3.operationalState"
3324 ] = "ENABLED"
3325 self.update_db_2(
3326 "k8sclusters", cluster_id, db_k8scluster_update
3327 )
3328 except Exception as e:
3329 self.logger.error(
3330 logging_text
3331 + "error initializing helm-v3 cluster: {}".format(str(e))
3332 )
3333 raise LcmException(
3334 "K8s cluster '{}' has not been initialized for '{}'".format(
3335 cluster_id, cluster_type
3336 )
3337 )
3338 else:
3339 raise LcmException(
3340 "K8s cluster '{}' has not been initialized for '{}'".format(
3341 cluster_id, cluster_type
3342 )
3343 )
3344 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3345 return k8s_id
3346
3347 logging_text += "Deploy kdus: "
3348 step = ""
3349 try:
3350 db_nsr_update = {"_admin.deployed.K8s": []}
3351 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3352
3353 index = 0
3354 updated_cluster_list = []
3355 updated_v3_cluster_list = []
3356
3357 for vnfr_data in db_vnfrs.values():
3358 vca_id = self.get_vca_id(vnfr_data, {})
3359 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3360 # Step 0: Prepare and set parameters
3361 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3362 vnfd_id = vnfr_data.get("vnfd-id")
3363 vnfd_with_id = find_in_list(
3364 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3365 )
3366 kdud = next(
3367 kdud
3368 for kdud in vnfd_with_id["kdu"]
3369 if kdud["name"] == kdur["kdu-name"]
3370 )
3371 namespace = kdur.get("k8s-namespace")
3372 kdu_deployment_name = kdur.get("kdu-deployment-name")
3373 if kdur.get("helm-chart"):
3374 kdumodel = kdur["helm-chart"]
3375 # Default version: helm3, if helm-version is v2 assign v2
3376 k8sclustertype = "helm-chart-v3"
3377 self.logger.debug("kdur: {}".format(kdur))
3378 if (
3379 kdur.get("helm-version")
3380 and kdur.get("helm-version") == "v2"
3381 ):
3382 k8sclustertype = "helm-chart"
3383 elif kdur.get("juju-bundle"):
3384 kdumodel = kdur["juju-bundle"]
3385 k8sclustertype = "juju-bundle"
3386 else:
3387 raise LcmException(
3388 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3389 "juju-bundle. Maybe an old NBI version is running".format(
3390 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3391 )
3392 )
3393 # check if kdumodel is a file and exists
3394 try:
3395 vnfd_with_id = find_in_list(
3396 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3397 )
3398 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3399 if storage: # may be not present if vnfd has not artifacts
3400 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3401 if storage["pkg-dir"]:
3402 filename = "{}/{}/{}s/{}".format(
3403 storage["folder"],
3404 storage["pkg-dir"],
3405 k8sclustertype,
3406 kdumodel,
3407 )
3408 else:
3409 filename = "{}/Scripts/{}s/{}".format(
3410 storage["folder"],
3411 k8sclustertype,
3412 kdumodel,
3413 )
3414 if self.fs.file_exists(
3415 filename, mode="file"
3416 ) or self.fs.file_exists(filename, mode="dir"):
3417 kdumodel = self.fs.path + filename
3418 except (asyncio.TimeoutError, asyncio.CancelledError):
3419 raise
3420 except Exception: # it is not a file
3421 pass
3422
3423 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3424 step = "Synchronize repos for k8s cluster '{}'".format(
3425 k8s_cluster_id
3426 )
3427 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3428
3429 # Synchronize repos
3430 if (
3431 k8sclustertype == "helm-chart"
3432 and cluster_uuid not in updated_cluster_list
3433 ) or (
3434 k8sclustertype == "helm-chart-v3"
3435 and cluster_uuid not in updated_v3_cluster_list
3436 ):
3437 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3438 self.k8scluster_map[k8sclustertype].synchronize_repos(
3439 cluster_uuid=cluster_uuid
3440 )
3441 )
3442 if del_repo_list or added_repo_dict:
3443 if k8sclustertype == "helm-chart":
3444 unset = {
3445 "_admin.helm_charts_added." + item: None
3446 for item in del_repo_list
3447 }
3448 updated = {
3449 "_admin.helm_charts_added." + item: name
3450 for item, name in added_repo_dict.items()
3451 }
3452 updated_cluster_list.append(cluster_uuid)
3453 elif k8sclustertype == "helm-chart-v3":
3454 unset = {
3455 "_admin.helm_charts_v3_added." + item: None
3456 for item in del_repo_list
3457 }
3458 updated = {
3459 "_admin.helm_charts_v3_added." + item: name
3460 for item, name in added_repo_dict.items()
3461 }
3462 updated_v3_cluster_list.append(cluster_uuid)
3463 self.logger.debug(
3464 logging_text + "repos synchronized on k8s cluster "
3465 "'{}' to_delete: {}, to_add: {}".format(
3466 k8s_cluster_id, del_repo_list, added_repo_dict
3467 )
3468 )
3469 self.db.set_one(
3470 "k8sclusters",
3471 {"_id": k8s_cluster_id},
3472 updated,
3473 unset=unset,
3474 )
3475
3476 # Instantiate kdu
3477 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3478 vnfr_data["member-vnf-index-ref"],
3479 kdur["kdu-name"],
3480 k8s_cluster_id,
3481 )
3482 k8s_instance_info = {
3483 "kdu-instance": None,
3484 "k8scluster-uuid": cluster_uuid,
3485 "k8scluster-type": k8sclustertype,
3486 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3487 "kdu-name": kdur["kdu-name"],
3488 "kdu-model": kdumodel,
3489 "namespace": namespace,
3490 "kdu-deployment-name": kdu_deployment_name,
3491 }
3492 db_path = "_admin.deployed.K8s.{}".format(index)
3493 db_nsr_update[db_path] = k8s_instance_info
3494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3495 vnfd_with_id = find_in_list(
3496 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3497 )
3498 task = asyncio.ensure_future(
3499 self._install_kdu(
3500 nsr_id,
3501 db_path,
3502 vnfr_data,
3503 kdu_index,
3504 kdud,
3505 vnfd_with_id,
3506 k8s_instance_info,
3507 k8params=desc_params,
3508 timeout=600,
3509 vca_id=vca_id,
3510 )
3511 )
3512 self.lcm_tasks.register(
3513 "ns",
3514 nsr_id,
3515 nslcmop_id,
3516 "instantiate_KDU-{}".format(index),
3517 task,
3518 )
3519 task_instantiation_info[task] = "Deploying KDU {}".format(
3520 kdur["kdu-name"]
3521 )
3522
3523 index += 1
3524
3525 except (LcmException, asyncio.CancelledError):
3526 raise
3527 except Exception as e:
3528 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3529 if isinstance(e, (N2VCException, DbException)):
3530 self.logger.error(logging_text + msg)
3531 else:
3532 self.logger.critical(logging_text + msg, exc_info=True)
3533 raise LcmException(msg)
3534 finally:
3535 if db_nsr_update:
3536 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3537
3538 def _deploy_n2vc(
3539 self,
3540 logging_text,
3541 db_nsr,
3542 db_vnfr,
3543 nslcmop_id,
3544 nsr_id,
3545 nsi_id,
3546 vnfd_id,
3547 vdu_id,
3548 kdu_name,
3549 member_vnf_index,
3550 vdu_index,
3551 vdu_name,
3552 deploy_params,
3553 descriptor_config,
3554 base_folder,
3555 task_instantiation_info,
3556 stage,
3557 ):
3558 # launch instantiate_N2VC in a asyncio task and register task object
3559 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3560 # if not found, create one entry and update database
3561 # fill db_nsr._admin.deployed.VCA.<index>
3562
3563 self.logger.debug(
3564 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3565 )
3566 if "execution-environment-list" in descriptor_config:
3567 ee_list = descriptor_config.get("execution-environment-list", [])
3568 elif "juju" in descriptor_config:
3569 ee_list = [descriptor_config] # ns charms
3570 else: # other types as script are not supported
3571 ee_list = []
3572
3573 for ee_item in ee_list:
3574 self.logger.debug(
3575 logging_text
3576 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3577 ee_item.get("juju"), ee_item.get("helm-chart")
3578 )
3579 )
3580 ee_descriptor_id = ee_item.get("id")
3581 if ee_item.get("juju"):
3582 vca_name = ee_item["juju"].get("charm")
3583 vca_type = (
3584 "lxc_proxy_charm"
3585 if ee_item["juju"].get("charm") is not None
3586 else "native_charm"
3587 )
3588 if ee_item["juju"].get("cloud") == "k8s":
3589 vca_type = "k8s_proxy_charm"
3590 elif ee_item["juju"].get("proxy") is False:
3591 vca_type = "native_charm"
3592 elif ee_item.get("helm-chart"):
3593 vca_name = ee_item["helm-chart"]
3594 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3595 vca_type = "helm"
3596 else:
3597 vca_type = "helm-v3"
3598 else:
3599 self.logger.debug(
3600 logging_text + "skipping non juju neither charm configuration"
3601 )
3602 continue
3603
3604 vca_index = -1
3605 for vca_index, vca_deployed in enumerate(
3606 db_nsr["_admin"]["deployed"]["VCA"]
3607 ):
3608 if not vca_deployed:
3609 continue
3610 if (
3611 vca_deployed.get("member-vnf-index") == member_vnf_index
3612 and vca_deployed.get("vdu_id") == vdu_id
3613 and vca_deployed.get("kdu_name") == kdu_name
3614 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3615 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3616 ):
3617 break
3618 else:
3619 # not found, create one.
3620 target = (
3621 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3622 )
3623 if vdu_id:
3624 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3625 elif kdu_name:
3626 target += "/kdu/{}".format(kdu_name)
3627 vca_deployed = {
3628 "target_element": target,
3629 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3630 "member-vnf-index": member_vnf_index,
3631 "vdu_id": vdu_id,
3632 "kdu_name": kdu_name,
3633 "vdu_count_index": vdu_index,
3634 "operational-status": "init", # TODO revise
3635 "detailed-status": "", # TODO revise
3636 "step": "initial-deploy", # TODO revise
3637 "vnfd_id": vnfd_id,
3638 "vdu_name": vdu_name,
3639 "type": vca_type,
3640 "ee_descriptor_id": ee_descriptor_id,
3641 }
3642 vca_index += 1
3643
3644 # create VCA and configurationStatus in db
3645 db_dict = {
3646 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3647 "configurationStatus.{}".format(vca_index): dict(),
3648 }
3649 self.update_db_2("nsrs", nsr_id, db_dict)
3650
3651 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3652
3653 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3654 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3655 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3656
3657 # Launch task
3658 task_n2vc = asyncio.ensure_future(
3659 self.instantiate_N2VC(
3660 logging_text=logging_text,
3661 vca_index=vca_index,
3662 nsi_id=nsi_id,
3663 db_nsr=db_nsr,
3664 db_vnfr=db_vnfr,
3665 vdu_id=vdu_id,
3666 kdu_name=kdu_name,
3667 vdu_index=vdu_index,
3668 deploy_params=deploy_params,
3669 config_descriptor=descriptor_config,
3670 base_folder=base_folder,
3671 nslcmop_id=nslcmop_id,
3672 stage=stage,
3673 vca_type=vca_type,
3674 vca_name=vca_name,
3675 ee_config_descriptor=ee_item,
3676 )
3677 )
3678 self.lcm_tasks.register(
3679 "ns",
3680 nsr_id,
3681 nslcmop_id,
3682 "instantiate_N2VC-{}".format(vca_index),
3683 task_n2vc,
3684 )
3685 task_instantiation_info[
3686 task_n2vc
3687 ] = self.task_name_deploy_vca + " {}.{}".format(
3688 member_vnf_index or "", vdu_id or ""
3689 )
3690
3691 @staticmethod
3692 def _create_nslcmop(nsr_id, operation, params):
3693 """
3694 Creates a ns-lcm-opp content to be stored at database.
3695 :param nsr_id: internal id of the instance
3696 :param operation: instantiate, terminate, scale, action, ...
3697 :param params: user parameters for the operation
3698 :return: dictionary following SOL005 format
3699 """
3700 # Raise exception if invalid arguments
3701 if not (nsr_id and operation and params):
3702 raise LcmException(
3703 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3704 )
3705 now = time()
3706 _id = str(uuid4())
3707 nslcmop = {
3708 "id": _id,
3709 "_id": _id,
3710 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3711 "operationState": "PROCESSING",
3712 "statusEnteredTime": now,
3713 "nsInstanceId": nsr_id,
3714 "lcmOperationType": operation,
3715 "startTime": now,
3716 "isAutomaticInvocation": False,
3717 "operationParams": params,
3718 "isCancelPending": False,
3719 "links": {
3720 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3721 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3722 },
3723 }
3724 return nslcmop
3725
3726 def _format_additional_params(self, params):
3727 params = params or {}
3728 for key, value in params.items():
3729 if str(value).startswith("!!yaml "):
3730 params[key] = yaml.safe_load(value[7:])
3731 return params
3732
3733 def _get_terminate_primitive_params(self, seq, vnf_index):
3734 primitive = seq.get("name")
3735 primitive_params = {}
3736 params = {
3737 "member_vnf_index": vnf_index,
3738 "primitive": primitive,
3739 "primitive_params": primitive_params,
3740 }
3741 desc_params = {}
3742 return self._map_primitive_params(seq, params, desc_params)
3743
3744 # sub-operations
3745
3746 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3747 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3748 if op.get("operationState") == "COMPLETED":
3749 # b. Skip sub-operation
3750 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3751 return self.SUBOPERATION_STATUS_SKIP
3752 else:
3753 # c. retry executing sub-operation
3754 # The sub-operation exists, and operationState != 'COMPLETED'
3755 # Update operationState = 'PROCESSING' to indicate a retry.
3756 operationState = "PROCESSING"
3757 detailed_status = "In progress"
3758 self._update_suboperation_status(
3759 db_nslcmop, op_index, operationState, detailed_status
3760 )
3761 # Return the sub-operation index
3762 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3763 # with arguments extracted from the sub-operation
3764 return op_index
3765
3766 # Find a sub-operation where all keys in a matching dictionary must match
3767 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3768 def _find_suboperation(self, db_nslcmop, match):
3769 if db_nslcmop and match:
3770 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3771 for i, op in enumerate(op_list):
3772 if all(op.get(k) == match[k] for k in match):
3773 return i
3774 return self.SUBOPERATION_STATUS_NOT_FOUND
3775
3776 # Update status for a sub-operation given its index
3777 def _update_suboperation_status(
3778 self, db_nslcmop, op_index, operationState, detailed_status
3779 ):
3780 # Update DB for HA tasks
3781 q_filter = {"_id": db_nslcmop["_id"]}
3782 update_dict = {
3783 "_admin.operations.{}.operationState".format(op_index): operationState,
3784 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3785 }
3786 self.db.set_one(
3787 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3788 )
3789
3790 # Add sub-operation, return the index of the added sub-operation
3791 # Optionally, set operationState, detailed-status, and operationType
3792 # Status and type are currently set for 'scale' sub-operations:
3793 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3794 # 'detailed-status' : status message
3795 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3796 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3797 def _add_suboperation(
3798 self,
3799 db_nslcmop,
3800 vnf_index,
3801 vdu_id,
3802 vdu_count_index,
3803 vdu_name,
3804 primitive,
3805 mapped_primitive_params,
3806 operationState=None,
3807 detailed_status=None,
3808 operationType=None,
3809 RO_nsr_id=None,
3810 RO_scaling_info=None,
3811 ):
3812 if not db_nslcmop:
3813 return self.SUBOPERATION_STATUS_NOT_FOUND
3814 # Get the "_admin.operations" list, if it exists
3815 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3816 op_list = db_nslcmop_admin.get("operations")
3817 # Create or append to the "_admin.operations" list
3818 new_op = {
3819 "member_vnf_index": vnf_index,
3820 "vdu_id": vdu_id,
3821 "vdu_count_index": vdu_count_index,
3822 "primitive": primitive,
3823 "primitive_params": mapped_primitive_params,
3824 }
3825 if operationState:
3826 new_op["operationState"] = operationState
3827 if detailed_status:
3828 new_op["detailed-status"] = detailed_status
3829 if operationType:
3830 new_op["lcmOperationType"] = operationType
3831 if RO_nsr_id:
3832 new_op["RO_nsr_id"] = RO_nsr_id
3833 if RO_scaling_info:
3834 new_op["RO_scaling_info"] = RO_scaling_info
3835 if not op_list:
3836 # No existing operations, create key 'operations' with current operation as first list element
3837 db_nslcmop_admin.update({"operations": [new_op]})
3838 op_list = db_nslcmop_admin.get("operations")
3839 else:
3840 # Existing operations, append operation to list
3841 op_list.append(new_op)
3842
3843 db_nslcmop_update = {"_admin.operations": op_list}
3844 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3845 op_index = len(op_list) - 1
3846 return op_index
3847
3848 # Helper methods for scale() sub-operations
3849
3850 # pre-scale/post-scale:
3851 # Check for 3 different cases:
3852 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3853 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3854 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3855 def _check_or_add_scale_suboperation(
3856 self,
3857 db_nslcmop,
3858 vnf_index,
3859 vnf_config_primitive,
3860 primitive_params,
3861 operationType,
3862 RO_nsr_id=None,
3863 RO_scaling_info=None,
3864 ):
3865 # Find this sub-operation
3866 if RO_nsr_id and RO_scaling_info:
3867 operationType = "SCALE-RO"
3868 match = {
3869 "member_vnf_index": vnf_index,
3870 "RO_nsr_id": RO_nsr_id,
3871 "RO_scaling_info": RO_scaling_info,
3872 }
3873 else:
3874 match = {
3875 "member_vnf_index": vnf_index,
3876 "primitive": vnf_config_primitive,
3877 "primitive_params": primitive_params,
3878 "lcmOperationType": operationType,
3879 }
3880 op_index = self._find_suboperation(db_nslcmop, match)
3881 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3882 # a. New sub-operation
3883 # The sub-operation does not exist, add it.
3884 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3885 # The following parameters are set to None for all kind of scaling:
3886 vdu_id = None
3887 vdu_count_index = None
3888 vdu_name = None
3889 if RO_nsr_id and RO_scaling_info:
3890 vnf_config_primitive = None
3891 primitive_params = None
3892 else:
3893 RO_nsr_id = None
3894 RO_scaling_info = None
3895 # Initial status for sub-operation
3896 operationState = "PROCESSING"
3897 detailed_status = "In progress"
3898 # Add sub-operation for pre/post-scaling (zero or more operations)
3899 self._add_suboperation(
3900 db_nslcmop,
3901 vnf_index,
3902 vdu_id,
3903 vdu_count_index,
3904 vdu_name,
3905 vnf_config_primitive,
3906 primitive_params,
3907 operationState,
3908 detailed_status,
3909 operationType,
3910 RO_nsr_id,
3911 RO_scaling_info,
3912 )
3913 return self.SUBOPERATION_STATUS_NEW
3914 else:
3915 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3916 # or op_index (operationState != 'COMPLETED')
3917 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3918
3919 # Function to return execution_environment id
3920
3921 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3922 # TODO vdu_index_count
3923 for vca in vca_deployed_list:
3924 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3925 return vca["ee_id"]
3926
3927 async def destroy_N2VC(
3928 self,
3929 logging_text,
3930 db_nslcmop,
3931 vca_deployed,
3932 config_descriptor,
3933 vca_index,
3934 destroy_ee=True,
3935 exec_primitives=True,
3936 scaling_in=False,
3937 vca_id: str = None,
3938 ):
3939 """
3940 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3941 :param logging_text:
3942 :param db_nslcmop:
3943 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3944 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3945 :param vca_index: index in the database _admin.deployed.VCA
3946 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3947 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3948 not executed properly
3949 :param scaling_in: True destroys the application, False destroys the model
3950 :return: None or exception
3951 """
3952
3953 self.logger.debug(
3954 logging_text
3955 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3956 vca_index, vca_deployed, config_descriptor, destroy_ee
3957 )
3958 )
3959
3960 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3961
3962 # execute terminate_primitives
3963 if exec_primitives:
3964 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3965 config_descriptor.get("terminate-config-primitive"),
3966 vca_deployed.get("ee_descriptor_id"),
3967 )
3968 vdu_id = vca_deployed.get("vdu_id")
3969 vdu_count_index = vca_deployed.get("vdu_count_index")
3970 vdu_name = vca_deployed.get("vdu_name")
3971 vnf_index = vca_deployed.get("member-vnf-index")
3972 if terminate_primitives and vca_deployed.get("needed_terminate"):
3973 for seq in terminate_primitives:
3974 # For each sequence in list, get primitive and call _ns_execute_primitive()
3975 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3976 vnf_index, seq.get("name")
3977 )
3978 self.logger.debug(logging_text + step)
3979 # Create the primitive for each sequence, i.e. "primitive": "touch"
3980 primitive = seq.get("name")
3981 mapped_primitive_params = self._get_terminate_primitive_params(
3982 seq, vnf_index
3983 )
3984
3985 # Add sub-operation
3986 self._add_suboperation(
3987 db_nslcmop,
3988 vnf_index,
3989 vdu_id,
3990 vdu_count_index,
3991 vdu_name,
3992 primitive,
3993 mapped_primitive_params,
3994 )
3995 # Sub-operations: Call _ns_execute_primitive() instead of action()
3996 try:
3997 result, result_detail = await self._ns_execute_primitive(
3998 vca_deployed["ee_id"],
3999 primitive,
4000 mapped_primitive_params,
4001 vca_type=vca_type,
4002 vca_id=vca_id,
4003 )
4004 except LcmException:
4005 # this happens when VCA is not deployed. In this case it is not needed to terminate
4006 continue
4007 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4008 if result not in result_ok:
4009 raise LcmException(
4010 "terminate_primitive {} for vnf_member_index={} fails with "
4011 "error {}".format(seq.get("name"), vnf_index, result_detail)
4012 )
4013 # set that this VCA do not need terminated
4014 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4015 vca_index
4016 )
4017 self.update_db_2(
4018 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4019 )
4020
4021 # Delete Prometheus Jobs if any
4022 # This uses NSR_ID, so it will destroy any jobs under this index
4023 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4024
4025 if destroy_ee:
4026 await self.vca_map[vca_type].delete_execution_environment(
4027 vca_deployed["ee_id"],
4028 scaling_in=scaling_in,
4029 vca_type=vca_type,
4030 vca_id=vca_id,
4031 )
4032
4033 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4034 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4035 namespace = "." + db_nsr["_id"]
4036 try:
4037 await self.n2vc.delete_namespace(
4038 namespace=namespace,
4039 total_timeout=self.timeout_charm_delete,
4040 vca_id=vca_id,
4041 )
4042 except N2VCNotFound: # already deleted. Skip
4043 pass
4044 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4045
4046 async def _terminate_RO(
4047 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4048 ):
4049 """
4050 Terminates a deployment from RO
4051 :param logging_text:
4052 :param nsr_deployed: db_nsr._admin.deployed
4053 :param nsr_id:
4054 :param nslcmop_id:
4055 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4056 this method will update only the index 2, but it will write on database the concatenated content of the list
4057 :return:
4058 """
4059 db_nsr_update = {}
4060 failed_detail = []
4061 ro_nsr_id = ro_delete_action = None
4062 if nsr_deployed and nsr_deployed.get("RO"):
4063 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4064 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4065 try:
4066 if ro_nsr_id:
4067 stage[2] = "Deleting ns from VIM."
4068 db_nsr_update["detailed-status"] = " ".join(stage)
4069 self._write_op_status(nslcmop_id, stage)
4070 self.logger.debug(logging_text + stage[2])
4071 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4072 self._write_op_status(nslcmop_id, stage)
4073 desc = await self.RO.delete("ns", ro_nsr_id)
4074 ro_delete_action = desc["action_id"]
4075 db_nsr_update[
4076 "_admin.deployed.RO.nsr_delete_action_id"
4077 ] = ro_delete_action
4078 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4079 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4080 if ro_delete_action:
4081 # wait until NS is deleted from VIM
4082 stage[2] = "Waiting ns deleted from VIM."
4083 detailed_status_old = None
4084 self.logger.debug(
4085 logging_text
4086 + stage[2]
4087 + " RO_id={} ro_delete_action={}".format(
4088 ro_nsr_id, ro_delete_action
4089 )
4090 )
4091 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4092 self._write_op_status(nslcmop_id, stage)
4093
4094 delete_timeout = 20 * 60 # 20 minutes
4095 while delete_timeout > 0:
4096 desc = await self.RO.show(
4097 "ns",
4098 item_id_name=ro_nsr_id,
4099 extra_item="action",
4100 extra_item_id=ro_delete_action,
4101 )
4102
4103 # deploymentStatus
4104 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4105
4106 ns_status, ns_status_info = self.RO.check_action_status(desc)
4107 if ns_status == "ERROR":
4108 raise ROclient.ROClientException(ns_status_info)
4109 elif ns_status == "BUILD":
4110 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4111 elif ns_status == "ACTIVE":
4112 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4113 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4114 break
4115 else:
4116 assert (
4117 False
4118 ), "ROclient.check_action_status returns unknown {}".format(
4119 ns_status
4120 )
4121 if stage[2] != detailed_status_old:
4122 detailed_status_old = stage[2]
4123 db_nsr_update["detailed-status"] = " ".join(stage)
4124 self._write_op_status(nslcmop_id, stage)
4125 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4126 await asyncio.sleep(5, loop=self.loop)
4127 delete_timeout -= 5
4128 else: # delete_timeout <= 0:
4129 raise ROclient.ROClientException(
4130 "Timeout waiting ns deleted from VIM"
4131 )
4132
4133 except Exception as e:
4134 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4135 if (
4136 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4137 ): # not found
4138 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4139 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4140 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4141 self.logger.debug(
4142 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4143 )
4144 elif (
4145 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4146 ): # conflict
4147 failed_detail.append("delete conflict: {}".format(e))
4148 self.logger.debug(
4149 logging_text
4150 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4151 )
4152 else:
4153 failed_detail.append("delete error: {}".format(e))
4154 self.logger.error(
4155 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4156 )
4157
4158 # Delete nsd
4159 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4160 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4161 try:
4162 stage[2] = "Deleting nsd from RO."
4163 db_nsr_update["detailed-status"] = " ".join(stage)
4164 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4165 self._write_op_status(nslcmop_id, stage)
4166 await self.RO.delete("nsd", ro_nsd_id)
4167 self.logger.debug(
4168 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4169 )
4170 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4171 except Exception as e:
4172 if (
4173 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4174 ): # not found
4175 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4176 self.logger.debug(
4177 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4178 )
4179 elif (
4180 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4181 ): # conflict
4182 failed_detail.append(
4183 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4184 )
4185 self.logger.debug(logging_text + failed_detail[-1])
4186 else:
4187 failed_detail.append(
4188 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4189 )
4190 self.logger.error(logging_text + failed_detail[-1])
4191
4192 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4193 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4194 if not vnf_deployed or not vnf_deployed["id"]:
4195 continue
4196 try:
4197 ro_vnfd_id = vnf_deployed["id"]
4198 stage[
4199 2
4200 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4201 vnf_deployed["member-vnf-index"], ro_vnfd_id
4202 )
4203 db_nsr_update["detailed-status"] = " ".join(stage)
4204 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4205 self._write_op_status(nslcmop_id, stage)
4206 await self.RO.delete("vnfd", ro_vnfd_id)
4207 self.logger.debug(
4208 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4209 )
4210 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4211 except Exception as e:
4212 if (
4213 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4214 ): # not found
4215 db_nsr_update[
4216 "_admin.deployed.RO.vnfd.{}.id".format(index)
4217 ] = None
4218 self.logger.debug(
4219 logging_text
4220 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4221 )
4222 elif (
4223 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4224 ): # conflict
4225 failed_detail.append(
4226 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4227 )
4228 self.logger.debug(logging_text + failed_detail[-1])
4229 else:
4230 failed_detail.append(
4231 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4232 )
4233 self.logger.error(logging_text + failed_detail[-1])
4234
4235 if failed_detail:
4236 stage[2] = "Error deleting from VIM"
4237 else:
4238 stage[2] = "Deleted from VIM"
4239 db_nsr_update["detailed-status"] = " ".join(stage)
4240 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4241 self._write_op_status(nslcmop_id, stage)
4242
4243 if failed_detail:
4244 raise LcmException("; ".join(failed_detail))
4245
4246 async def terminate(self, nsr_id, nslcmop_id):
4247 # Try to lock HA task here
4248 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4249 if not task_is_locked_by_me:
4250 return
4251
4252 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4253 self.logger.debug(logging_text + "Enter")
4254 timeout_ns_terminate = self.timeout_ns_terminate
4255 db_nsr = None
4256 db_nslcmop = None
4257 operation_params = None
4258 exc = None
4259 error_list = [] # annotates all failed error messages
4260 db_nslcmop_update = {}
4261 autoremove = False # autoremove after terminated
4262 tasks_dict_info = {}
4263 db_nsr_update = {}
4264 stage = [
4265 "Stage 1/3: Preparing task.",
4266 "Waiting for previous operations to terminate.",
4267 "",
4268 ]
4269 # ^ contains [stage, step, VIM-status]
4270 try:
4271 # wait for any previous tasks in process
4272 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4273
4274 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4275 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4276 operation_params = db_nslcmop.get("operationParams") or {}
4277 if operation_params.get("timeout_ns_terminate"):
4278 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4279 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4280 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4281
4282 db_nsr_update["operational-status"] = "terminating"
4283 db_nsr_update["config-status"] = "terminating"
4284 self._write_ns_status(
4285 nsr_id=nsr_id,
4286 ns_state="TERMINATING",
4287 current_operation="TERMINATING",
4288 current_operation_id=nslcmop_id,
4289 other_update=db_nsr_update,
4290 )
4291 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4292 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4293 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4294 return
4295
4296 stage[1] = "Getting vnf descriptors from db."
4297 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4298 db_vnfrs_dict = {
4299 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4300 }
4301 db_vnfds_from_id = {}
4302 db_vnfds_from_member_index = {}
4303 # Loop over VNFRs
4304 for vnfr in db_vnfrs_list:
4305 vnfd_id = vnfr["vnfd-id"]
4306 if vnfd_id not in db_vnfds_from_id:
4307 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4308 db_vnfds_from_id[vnfd_id] = vnfd
4309 db_vnfds_from_member_index[
4310 vnfr["member-vnf-index-ref"]
4311 ] = db_vnfds_from_id[vnfd_id]
4312
4313 # Destroy individual execution environments when there are terminating primitives.
4314 # Rest of EE will be deleted at once
4315 # TODO - check before calling _destroy_N2VC
4316 # if not operation_params.get("skip_terminate_primitives"):#
4317 # or not vca.get("needed_terminate"):
4318 stage[0] = "Stage 2/3 execute terminating primitives."
4319 self.logger.debug(logging_text + stage[0])
4320 stage[1] = "Looking execution environment that needs terminate."
4321 self.logger.debug(logging_text + stage[1])
4322
4323 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4324 config_descriptor = None
4325 vca_member_vnf_index = vca.get("member-vnf-index")
4326 vca_id = self.get_vca_id(
4327 db_vnfrs_dict.get(vca_member_vnf_index)
4328 if vca_member_vnf_index
4329 else None,
4330 db_nsr,
4331 )
4332 if not vca or not vca.get("ee_id"):
4333 continue
4334 if not vca.get("member-vnf-index"):
4335 # ns
4336 config_descriptor = db_nsr.get("ns-configuration")
4337 elif vca.get("vdu_id"):
4338 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4339 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4340 elif vca.get("kdu_name"):
4341 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4342 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4343 else:
4344 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4345 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4346 vca_type = vca.get("type")
4347 exec_terminate_primitives = not operation_params.get(
4348 "skip_terminate_primitives"
4349 ) and vca.get("needed_terminate")
4350 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4351 # pending native charms
4352 destroy_ee = (
4353 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4354 )
4355 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4356 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4357 task = asyncio.ensure_future(
4358 self.destroy_N2VC(
4359 logging_text,
4360 db_nslcmop,
4361 vca,
4362 config_descriptor,
4363 vca_index,
4364 destroy_ee,
4365 exec_terminate_primitives,
4366 vca_id=vca_id,
4367 )
4368 )
4369 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4370
4371 # wait for pending tasks of terminate primitives
4372 if tasks_dict_info:
4373 self.logger.debug(
4374 logging_text
4375 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4376 )
4377 error_list = await self._wait_for_tasks(
4378 logging_text,
4379 tasks_dict_info,
4380 min(self.timeout_charm_delete, timeout_ns_terminate),
4381 stage,
4382 nslcmop_id,
4383 )
4384 tasks_dict_info.clear()
4385 if error_list:
4386 return # raise LcmException("; ".join(error_list))
4387
4388 # remove All execution environments at once
4389 stage[0] = "Stage 3/3 delete all."
4390
4391 if nsr_deployed.get("VCA"):
4392 stage[1] = "Deleting all execution environments."
4393 self.logger.debug(logging_text + stage[1])
4394 vca_id = self.get_vca_id({}, db_nsr)
4395 task_delete_ee = asyncio.ensure_future(
4396 asyncio.wait_for(
4397 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4398 timeout=self.timeout_charm_delete,
4399 )
4400 )
4401 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4402 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4403
4404 # Delete from k8scluster
4405 stage[1] = "Deleting KDUs."
4406 self.logger.debug(logging_text + stage[1])
4407 # print(nsr_deployed)
4408 for kdu in get_iterable(nsr_deployed, "K8s"):
4409 if not kdu or not kdu.get("kdu-instance"):
4410 continue
4411 kdu_instance = kdu.get("kdu-instance")
4412 if kdu.get("k8scluster-type") in self.k8scluster_map:
4413 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4414 vca_id = self.get_vca_id({}, db_nsr)
4415 task_delete_kdu_instance = asyncio.ensure_future(
4416 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4417 cluster_uuid=kdu.get("k8scluster-uuid"),
4418 kdu_instance=kdu_instance,
4419 vca_id=vca_id,
4420 )
4421 )
4422 else:
4423 self.logger.error(
4424 logging_text
4425 + "Unknown k8s deployment type {}".format(
4426 kdu.get("k8scluster-type")
4427 )
4428 )
4429 continue
4430 tasks_dict_info[
4431 task_delete_kdu_instance
4432 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4433
4434 # remove from RO
4435 stage[1] = "Deleting ns from VIM."
4436 if self.ng_ro:
4437 task_delete_ro = asyncio.ensure_future(
4438 self._terminate_ng_ro(
4439 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4440 )
4441 )
4442 else:
4443 task_delete_ro = asyncio.ensure_future(
4444 self._terminate_RO(
4445 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4446 )
4447 )
4448 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4449
4450 # rest of staff will be done at finally
4451
4452 except (
4453 ROclient.ROClientException,
4454 DbException,
4455 LcmException,
4456 N2VCException,
4457 ) as e:
4458 self.logger.error(logging_text + "Exit Exception {}".format(e))
4459 exc = e
4460 except asyncio.CancelledError:
4461 self.logger.error(
4462 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4463 )
4464 exc = "Operation was cancelled"
4465 except Exception as e:
4466 exc = traceback.format_exc()
4467 self.logger.critical(
4468 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4469 exc_info=True,
4470 )
4471 finally:
4472 if exc:
4473 error_list.append(str(exc))
4474 try:
4475 # wait for pending tasks
4476 if tasks_dict_info:
4477 stage[1] = "Waiting for terminate pending tasks."
4478 self.logger.debug(logging_text + stage[1])
4479 error_list += await self._wait_for_tasks(
4480 logging_text,
4481 tasks_dict_info,
4482 timeout_ns_terminate,
4483 stage,
4484 nslcmop_id,
4485 )
4486 stage[1] = stage[2] = ""
4487 except asyncio.CancelledError:
4488 error_list.append("Cancelled")
4489 # TODO cancell all tasks
4490 except Exception as exc:
4491 error_list.append(str(exc))
4492 # update status at database
4493 if error_list:
4494 error_detail = "; ".join(error_list)
4495 # self.logger.error(logging_text + error_detail)
4496 error_description_nslcmop = "{} Detail: {}".format(
4497 stage[0], error_detail
4498 )
4499 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4500 nslcmop_id, stage[0]
4501 )
4502
4503 db_nsr_update["operational-status"] = "failed"
4504 db_nsr_update["detailed-status"] = (
4505 error_description_nsr + " Detail: " + error_detail
4506 )
4507 db_nslcmop_update["detailed-status"] = error_detail
4508 nslcmop_operation_state = "FAILED"
4509 ns_state = "BROKEN"
4510 else:
4511 error_detail = None
4512 error_description_nsr = error_description_nslcmop = None
4513 ns_state = "NOT_INSTANTIATED"
4514 db_nsr_update["operational-status"] = "terminated"
4515 db_nsr_update["detailed-status"] = "Done"
4516 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4517 db_nslcmop_update["detailed-status"] = "Done"
4518 nslcmop_operation_state = "COMPLETED"
4519
4520 if db_nsr:
4521 self._write_ns_status(
4522 nsr_id=nsr_id,
4523 ns_state=ns_state,
4524 current_operation="IDLE",
4525 current_operation_id=None,
4526 error_description=error_description_nsr,
4527 error_detail=error_detail,
4528 other_update=db_nsr_update,
4529 )
4530 self._write_op_status(
4531 op_id=nslcmop_id,
4532 stage="",
4533 error_message=error_description_nslcmop,
4534 operation_state=nslcmop_operation_state,
4535 other_update=db_nslcmop_update,
4536 )
4537 if ns_state == "NOT_INSTANTIATED":
4538 try:
4539 self.db.set_list(
4540 "vnfrs",
4541 {"nsr-id-ref": nsr_id},
4542 {"_admin.nsState": "NOT_INSTANTIATED"},
4543 )
4544 except DbException as e:
4545 self.logger.warn(
4546 logging_text
4547 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4548 nsr_id, e
4549 )
4550 )
4551 if operation_params:
4552 autoremove = operation_params.get("autoremove", False)
4553 if nslcmop_operation_state:
4554 try:
4555 await self.msg.aiowrite(
4556 "ns",
4557 "terminated",
4558 {
4559 "nsr_id": nsr_id,
4560 "nslcmop_id": nslcmop_id,
4561 "operationState": nslcmop_operation_state,
4562 "autoremove": autoremove,
4563 },
4564 loop=self.loop,
4565 )
4566 except Exception as e:
4567 self.logger.error(
4568 logging_text + "kafka_write notification Exception {}".format(e)
4569 )
4570
4571 self.logger.debug(logging_text + "Exit")
4572 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4573
4574 async def _wait_for_tasks(
4575 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4576 ):
4577 time_start = time()
4578 error_detail_list = []
4579 error_list = []
4580 pending_tasks = list(created_tasks_info.keys())
4581 num_tasks = len(pending_tasks)
4582 num_done = 0
4583 stage[1] = "{}/{}.".format(num_done, num_tasks)
4584 self._write_op_status(nslcmop_id, stage)
4585 while pending_tasks:
4586 new_error = None
4587 _timeout = timeout + time_start - time()
4588 done, pending_tasks = await asyncio.wait(
4589 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4590 )
4591 num_done += len(done)
4592 if not done: # Timeout
4593 for task in pending_tasks:
4594 new_error = created_tasks_info[task] + ": Timeout"
4595 error_detail_list.append(new_error)
4596 error_list.append(new_error)
4597 break
4598 for task in done:
4599 if task.cancelled():
4600 exc = "Cancelled"
4601 else:
4602 exc = task.exception()
4603 if exc:
4604 if isinstance(exc, asyncio.TimeoutError):
4605 exc = "Timeout"
4606 new_error = created_tasks_info[task] + ": {}".format(exc)
4607 error_list.append(created_tasks_info[task])
4608 error_detail_list.append(new_error)
4609 if isinstance(
4610 exc,
4611 (
4612 str,
4613 DbException,
4614 N2VCException,
4615 ROclient.ROClientException,
4616 LcmException,
4617 K8sException,
4618 NgRoException,
4619 ),
4620 ):
4621 self.logger.error(logging_text + new_error)
4622 else:
4623 exc_traceback = "".join(
4624 traceback.format_exception(None, exc, exc.__traceback__)
4625 )
4626 self.logger.error(
4627 logging_text
4628 + created_tasks_info[task]
4629 + " "
4630 + exc_traceback
4631 )
4632 else:
4633 self.logger.debug(
4634 logging_text + created_tasks_info[task] + ": Done"
4635 )
4636 stage[1] = "{}/{}.".format(num_done, num_tasks)
4637 if new_error:
4638 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4639 if nsr_id: # update also nsr
4640 self.update_db_2(
4641 "nsrs",
4642 nsr_id,
4643 {
4644 "errorDescription": "Error at: " + ", ".join(error_list),
4645 "errorDetail": ". ".join(error_detail_list),
4646 },
4647 )
4648 self._write_op_status(nslcmop_id, stage)
4649 return error_detail_list
4650
4651 @staticmethod
4652 def _map_primitive_params(primitive_desc, params, instantiation_params):
4653 """
4654 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4655 The default-value is used. If it is between < > it look for a value at instantiation_params
4656 :param primitive_desc: portion of VNFD/NSD that describes primitive
4657 :param params: Params provided by user
4658 :param instantiation_params: Instantiation params provided by user
4659 :return: a dictionary with the calculated params
4660 """
4661 calculated_params = {}
4662 for parameter in primitive_desc.get("parameter", ()):
4663 param_name = parameter["name"]
4664 if param_name in params:
4665 calculated_params[param_name] = params[param_name]
4666 elif "default-value" in parameter or "value" in parameter:
4667 if "value" in parameter:
4668 calculated_params[param_name] = parameter["value"]
4669 else:
4670 calculated_params[param_name] = parameter["default-value"]
4671 if (
4672 isinstance(calculated_params[param_name], str)
4673 and calculated_params[param_name].startswith("<")
4674 and calculated_params[param_name].endswith(">")
4675 ):
4676 if calculated_params[param_name][1:-1] in instantiation_params:
4677 calculated_params[param_name] = instantiation_params[
4678 calculated_params[param_name][1:-1]
4679 ]
4680 else:
4681 raise LcmException(
4682 "Parameter {} needed to execute primitive {} not provided".format(
4683 calculated_params[param_name], primitive_desc["name"]
4684 )
4685 )
4686 else:
4687 raise LcmException(
4688 "Parameter {} needed to execute primitive {} not provided".format(
4689 param_name, primitive_desc["name"]
4690 )
4691 )
4692
4693 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4694 calculated_params[param_name] = yaml.safe_dump(
4695 calculated_params[param_name], default_flow_style=True, width=256
4696 )
4697 elif isinstance(calculated_params[param_name], str) and calculated_params[
4698 param_name
4699 ].startswith("!!yaml "):
4700 calculated_params[param_name] = calculated_params[param_name][7:]
4701 if parameter.get("data-type") == "INTEGER":
4702 try:
4703 calculated_params[param_name] = int(calculated_params[param_name])
4704 except ValueError: # error converting string to int
4705 raise LcmException(
4706 "Parameter {} of primitive {} must be integer".format(
4707 param_name, primitive_desc["name"]
4708 )
4709 )
4710 elif parameter.get("data-type") == "BOOLEAN":
4711 calculated_params[param_name] = not (
4712 (str(calculated_params[param_name])).lower() == "false"
4713 )
4714
4715 # add always ns_config_info if primitive name is config
4716 if primitive_desc["name"] == "config":
4717 if "ns_config_info" in instantiation_params:
4718 calculated_params["ns_config_info"] = instantiation_params[
4719 "ns_config_info"
4720 ]
4721 return calculated_params
4722
4723 def _look_for_deployed_vca(
4724 self,
4725 deployed_vca,
4726 member_vnf_index,
4727 vdu_id,
4728 vdu_count_index,
4729 kdu_name=None,
4730 ee_descriptor_id=None,
4731 ):
4732 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4733 for vca in deployed_vca:
4734 if not vca:
4735 continue
4736 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4737 continue
4738 if (
4739 vdu_count_index is not None
4740 and vdu_count_index != vca["vdu_count_index"]
4741 ):
4742 continue
4743 if kdu_name and kdu_name != vca["kdu_name"]:
4744 continue
4745 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4746 continue
4747 break
4748 else:
4749 # vca_deployed not found
4750 raise LcmException(
4751 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4752 " is not deployed".format(
4753 member_vnf_index,
4754 vdu_id,
4755 vdu_count_index,
4756 kdu_name,
4757 ee_descriptor_id,
4758 )
4759 )
4760 # get ee_id
4761 ee_id = vca.get("ee_id")
4762 vca_type = vca.get(
4763 "type", "lxc_proxy_charm"
4764 ) # default value for backward compatibility - proxy charm
4765 if not ee_id:
4766 raise LcmException(
4767 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4768 "execution environment".format(
4769 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4770 )
4771 )
4772 return ee_id, vca_type
4773
4774 async def _ns_execute_primitive(
4775 self,
4776 ee_id,
4777 primitive,
4778 primitive_params,
4779 retries=0,
4780 retries_interval=30,
4781 timeout=None,
4782 vca_type=None,
4783 db_dict=None,
4784 vca_id: str = None,
4785 ) -> (str, str):
4786 try:
4787 if primitive == "config":
4788 primitive_params = {"params": primitive_params}
4789
4790 vca_type = vca_type or "lxc_proxy_charm"
4791
4792 while retries >= 0:
4793 try:
4794 output = await asyncio.wait_for(
4795 self.vca_map[vca_type].exec_primitive(
4796 ee_id=ee_id,
4797 primitive_name=primitive,
4798 params_dict=primitive_params,
4799 progress_timeout=self.timeout_progress_primitive,
4800 total_timeout=self.timeout_primitive,
4801 db_dict=db_dict,
4802 vca_id=vca_id,
4803 vca_type=vca_type,
4804 ),
4805 timeout=timeout or self.timeout_primitive,
4806 )
4807 # execution was OK
4808 break
4809 except asyncio.CancelledError:
4810 raise
4811 except Exception as e: # asyncio.TimeoutError
4812 if isinstance(e, asyncio.TimeoutError):
4813 e = "Timeout"
4814 retries -= 1
4815 if retries >= 0:
4816 self.logger.debug(
4817 "Error executing action {} on {} -> {}".format(
4818 primitive, ee_id, e
4819 )
4820 )
4821 # wait and retry
4822 await asyncio.sleep(retries_interval, loop=self.loop)
4823 else:
4824 return "FAILED", str(e)
4825
4826 return "COMPLETED", output
4827
4828 except (LcmException, asyncio.CancelledError):
4829 raise
4830 except Exception as e:
4831 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4832
4833 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4834 """
4835 Updating the vca_status with latest juju information in nsrs record
4836 :param: nsr_id: Id of the nsr
4837 :param: nslcmop_id: Id of the nslcmop
4838 :return: None
4839 """
4840
4841 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4842 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4843 vca_id = self.get_vca_id({}, db_nsr)
4844 if db_nsr["_admin"]["deployed"]["K8s"]:
4845 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4846 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4847 await self._on_update_k8s_db(
4848 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4849 )
4850 else:
4851 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4852 table, filter = "nsrs", {"_id": nsr_id}
4853 path = "_admin.deployed.VCA.{}.".format(vca_index)
4854 await self._on_update_n2vc_db(table, filter, path, {})
4855
4856 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4857 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4858
4859 async def action(self, nsr_id, nslcmop_id):
4860 # Try to lock HA task here
4861 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4862 if not task_is_locked_by_me:
4863 return
4864
4865 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4866 self.logger.debug(logging_text + "Enter")
4867 # get all needed from database
4868 db_nsr = None
4869 db_nslcmop = None
4870 db_nsr_update = {}
4871 db_nslcmop_update = {}
4872 nslcmop_operation_state = None
4873 error_description_nslcmop = None
4874 exc = None
4875 try:
4876 # wait for any previous tasks in process
4877 step = "Waiting for previous operations to terminate"
4878 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4879
4880 self._write_ns_status(
4881 nsr_id=nsr_id,
4882 ns_state=None,
4883 current_operation="RUNNING ACTION",
4884 current_operation_id=nslcmop_id,
4885 )
4886
4887 step = "Getting information from database"
4888 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4889 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4890 if db_nslcmop["operationParams"].get("primitive_params"):
4891 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4892 db_nslcmop["operationParams"]["primitive_params"]
4893 )
4894
4895 nsr_deployed = db_nsr["_admin"].get("deployed")
4896 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4897 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4898 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4899 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4900 primitive = db_nslcmop["operationParams"]["primitive"]
4901 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4902 timeout_ns_action = db_nslcmop["operationParams"].get(
4903 "timeout_ns_action", self.timeout_primitive
4904 )
4905
4906 if vnf_index:
4907 step = "Getting vnfr from database"
4908 db_vnfr = self.db.get_one(
4909 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4910 )
4911 if db_vnfr.get("kdur"):
4912 kdur_list = []
4913 for kdur in db_vnfr["kdur"]:
4914 if kdur.get("additionalParams"):
4915 kdur["additionalParams"] = json.loads(kdur["additionalParams"])
4916 kdur_list.append(kdur)
4917 db_vnfr["kdur"] = kdur_list
4918 step = "Getting vnfd from database"
4919 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4920
4921 # Sync filesystem before running a primitive
4922 self.fs.sync(db_vnfr["vnfd-id"])
4923 else:
4924 step = "Getting nsd from database"
4925 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4926
4927 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4928 # for backward compatibility
4929 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4930 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4931 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4932 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4933
4934 # look for primitive
4935 config_primitive_desc = descriptor_configuration = None
4936 if vdu_id:
4937 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4938 elif kdu_name:
4939 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4940 elif vnf_index:
4941 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4942 else:
4943 descriptor_configuration = db_nsd.get("ns-configuration")
4944
4945 if descriptor_configuration and descriptor_configuration.get(
4946 "config-primitive"
4947 ):
4948 for config_primitive in descriptor_configuration["config-primitive"]:
4949 if config_primitive["name"] == primitive:
4950 config_primitive_desc = config_primitive
4951 break
4952
4953 if not config_primitive_desc:
4954 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4955 raise LcmException(
4956 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4957 primitive
4958 )
4959 )
4960 primitive_name = primitive
4961 ee_descriptor_id = None
4962 else:
4963 primitive_name = config_primitive_desc.get(
4964 "execution-environment-primitive", primitive
4965 )
4966 ee_descriptor_id = config_primitive_desc.get(
4967 "execution-environment-ref"
4968 )
4969
4970 if vnf_index:
4971 if vdu_id:
4972 vdur = next(
4973 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4974 )
4975 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4976 elif kdu_name:
4977 kdur = next(
4978 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4979 )
4980 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4981 else:
4982 desc_params = parse_yaml_strings(
4983 db_vnfr.get("additionalParamsForVnf")
4984 )
4985 else:
4986 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4987 if kdu_name and get_configuration(db_vnfd, kdu_name):
4988 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4989 actions = set()
4990 for primitive in kdu_configuration.get("initial-config-primitive", []):
4991 actions.add(primitive["name"])
4992 for primitive in kdu_configuration.get("config-primitive", []):
4993 actions.add(primitive["name"])
4994 kdu_action = True if primitive_name in actions else False
4995
4996 # TODO check if ns is in a proper status
4997 if kdu_name and (
4998 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4999 ):
5000 # kdur and desc_params already set from before
5001 if primitive_params:
5002 desc_params.update(primitive_params)
5003 # TODO Check if we will need something at vnf level
5004 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5005 if (
5006 kdu_name == kdu["kdu-name"]
5007 and kdu["member-vnf-index"] == vnf_index
5008 ):
5009 break
5010 else:
5011 raise LcmException(
5012 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5013 )
5014
5015 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5016 msg = "unknown k8scluster-type '{}'".format(
5017 kdu.get("k8scluster-type")
5018 )
5019 raise LcmException(msg)
5020
5021 db_dict = {
5022 "collection": "nsrs",
5023 "filter": {"_id": nsr_id},
5024 "path": "_admin.deployed.K8s.{}".format(index),
5025 }
5026 self.logger.debug(
5027 logging_text
5028 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5029 )
5030 step = "Executing kdu {}".format(primitive_name)
5031 if primitive_name == "upgrade":
5032 if desc_params.get("kdu_model"):
5033 kdu_model = desc_params.get("kdu_model")
5034 del desc_params["kdu_model"]
5035 else:
5036 kdu_model = kdu.get("kdu-model")
5037 parts = kdu_model.split(sep=":")
5038 if len(parts) == 2:
5039 kdu_model = parts[0]
5040
5041 detailed_status = await asyncio.wait_for(
5042 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5043 cluster_uuid=kdu.get("k8scluster-uuid"),
5044 kdu_instance=kdu.get("kdu-instance"),
5045 atomic=True,
5046 kdu_model=kdu_model,
5047 params=desc_params,
5048 db_dict=db_dict,
5049 timeout=timeout_ns_action,
5050 ),
5051 timeout=timeout_ns_action + 10,
5052 )
5053 self.logger.debug(
5054 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5055 )
5056 elif primitive_name == "rollback":
5057 detailed_status = await asyncio.wait_for(
5058 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5059 cluster_uuid=kdu.get("k8scluster-uuid"),
5060 kdu_instance=kdu.get("kdu-instance"),
5061 db_dict=db_dict,
5062 ),
5063 timeout=timeout_ns_action,
5064 )
5065 elif primitive_name == "status":
5066 detailed_status = await asyncio.wait_for(
5067 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5068 cluster_uuid=kdu.get("k8scluster-uuid"),
5069 kdu_instance=kdu.get("kdu-instance"),
5070 vca_id=vca_id,
5071 ),
5072 timeout=timeout_ns_action,
5073 )
5074 else:
5075 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5076 kdu["kdu-name"], nsr_id
5077 )
5078 params = self._map_primitive_params(
5079 config_primitive_desc, primitive_params, desc_params
5080 )
5081
5082 detailed_status = await asyncio.wait_for(
5083 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5084 cluster_uuid=kdu.get("k8scluster-uuid"),
5085 kdu_instance=kdu_instance,
5086 primitive_name=primitive_name,
5087 params=params,
5088 db_dict=db_dict,
5089 timeout=timeout_ns_action,
5090 vca_id=vca_id,
5091 ),
5092 timeout=timeout_ns_action,
5093 )
5094
5095 if detailed_status:
5096 nslcmop_operation_state = "COMPLETED"
5097 else:
5098 detailed_status = ""
5099 nslcmop_operation_state = "FAILED"
5100 else:
5101 ee_id, vca_type = self._look_for_deployed_vca(
5102 nsr_deployed["VCA"],
5103 member_vnf_index=vnf_index,
5104 vdu_id=vdu_id,
5105 vdu_count_index=vdu_count_index,
5106 ee_descriptor_id=ee_descriptor_id,
5107 )
5108 for vca_index, vca_deployed in enumerate(
5109 db_nsr["_admin"]["deployed"]["VCA"]
5110 ):
5111 if vca_deployed.get("member-vnf-index") == vnf_index:
5112 db_dict = {
5113 "collection": "nsrs",
5114 "filter": {"_id": nsr_id},
5115 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5116 }
5117 break
5118 (
5119 nslcmop_operation_state,
5120 detailed_status,
5121 ) = await self._ns_execute_primitive(
5122 ee_id,
5123 primitive=primitive_name,
5124 primitive_params=self._map_primitive_params(
5125 config_primitive_desc, primitive_params, desc_params
5126 ),
5127 timeout=timeout_ns_action,
5128 vca_type=vca_type,
5129 db_dict=db_dict,
5130 vca_id=vca_id,
5131 )
5132
5133 db_nslcmop_update["detailed-status"] = detailed_status
5134 error_description_nslcmop = (
5135 detailed_status if nslcmop_operation_state == "FAILED" else ""
5136 )
5137 self.logger.debug(
5138 logging_text
5139 + " task Done with result {} {}".format(
5140 nslcmop_operation_state, detailed_status
5141 )
5142 )
5143 return # database update is called inside finally
5144
5145 except (DbException, LcmException, N2VCException, K8sException) as e:
5146 self.logger.error(logging_text + "Exit Exception {}".format(e))
5147 exc = e
5148 except asyncio.CancelledError:
5149 self.logger.error(
5150 logging_text + "Cancelled Exception while '{}'".format(step)
5151 )
5152 exc = "Operation was cancelled"
5153 except asyncio.TimeoutError:
5154 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5155 exc = "Timeout"
5156 except Exception as e:
5157 exc = traceback.format_exc()
5158 self.logger.critical(
5159 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5160 exc_info=True,
5161 )
5162 finally:
5163 if exc:
5164 db_nslcmop_update[
5165 "detailed-status"
5166 ] = (
5167 detailed_status
5168 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5169 nslcmop_operation_state = "FAILED"
5170 if db_nsr:
5171 self._write_ns_status(
5172 nsr_id=nsr_id,
5173 ns_state=db_nsr[
5174 "nsState"
5175 ], # TODO check if degraded. For the moment use previous status
5176 current_operation="IDLE",
5177 current_operation_id=None,
5178 # error_description=error_description_nsr,
5179 # error_detail=error_detail,
5180 other_update=db_nsr_update,
5181 )
5182
5183 self._write_op_status(
5184 op_id=nslcmop_id,
5185 stage="",
5186 error_message=error_description_nslcmop,
5187 operation_state=nslcmop_operation_state,
5188 other_update=db_nslcmop_update,
5189 )
5190
5191 if nslcmop_operation_state:
5192 try:
5193 await self.msg.aiowrite(
5194 "ns",
5195 "actioned",
5196 {
5197 "nsr_id": nsr_id,
5198 "nslcmop_id": nslcmop_id,
5199 "operationState": nslcmop_operation_state,
5200 },
5201 loop=self.loop,
5202 )
5203 except Exception as e:
5204 self.logger.error(
5205 logging_text + "kafka_write notification Exception {}".format(e)
5206 )
5207 self.logger.debug(logging_text + "Exit")
5208 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5209 return nslcmop_operation_state, detailed_status
5210
5211 async def scale(self, nsr_id, nslcmop_id):
5212 # Try to lock HA task here
5213 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5214 if not task_is_locked_by_me:
5215 return
5216
5217 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
5218 stage = ["", "", ""]
5219 tasks_dict_info = {}
5220 # ^ stage, step, VIM progress
5221 self.logger.debug(logging_text + "Enter")
5222 # get all needed from database
5223 db_nsr = None
5224 db_nslcmop_update = {}
5225 db_nsr_update = {}
5226 exc = None
5227 # in case of error, indicates what part of scale was failed to put nsr at error status
5228 scale_process = None
5229 old_operational_status = ""
5230 old_config_status = ""
5231 nsi_id = None
5232 try:
5233 # wait for any previous tasks in process
5234 step = "Waiting for previous operations to terminate"
5235 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5236 self._write_ns_status(
5237 nsr_id=nsr_id,
5238 ns_state=None,
5239 current_operation="SCALING",
5240 current_operation_id=nslcmop_id,
5241 )
5242
5243 step = "Getting nslcmop from database"
5244 self.logger.debug(
5245 step + " after having waited for previous tasks to be completed"
5246 )
5247 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5248
5249 step = "Getting nsr from database"
5250 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5251 old_operational_status = db_nsr["operational-status"]
5252 old_config_status = db_nsr["config-status"]
5253
5254 step = "Parsing scaling parameters"
5255 db_nsr_update["operational-status"] = "scaling"
5256 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5257 nsr_deployed = db_nsr["_admin"].get("deployed")
5258
5259 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
5260 "scaleByStepData"
5261 ]["member-vnf-index"]
5262 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
5263 "scaleByStepData"
5264 ]["scaling-group-descriptor"]
5265 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
5266 # for backward compatibility
5267 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5268 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5269 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5270 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5271
5272 step = "Getting vnfr from database"
5273 db_vnfr = self.db.get_one(
5274 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5275 )
5276
5277 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5278
5279 step = "Getting vnfd from database"
5280 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5281
5282 base_folder = db_vnfd["_admin"]["storage"]
5283
5284 step = "Getting scaling-group-descriptor"
5285 scaling_descriptor = find_in_list(
5286 get_scaling_aspect(db_vnfd),
5287 lambda scale_desc: scale_desc["name"] == scaling_group,
5288 )
5289 if not scaling_descriptor:
5290 raise LcmException(
5291 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
5292 "at vnfd:scaling-group-descriptor".format(scaling_group)
5293 )
5294
5295 step = "Sending scale order to VIM"
5296 # TODO check if ns is in a proper status
5297 nb_scale_op = 0
5298 if not db_nsr["_admin"].get("scaling-group"):
5299 self.update_db_2(
5300 "nsrs",
5301 nsr_id,
5302 {
5303 "_admin.scaling-group": [
5304 {"name": scaling_group, "nb-scale-op": 0}
5305 ]
5306 },
5307 )
5308 admin_scale_index = 0
5309 else:
5310 for admin_scale_index, admin_scale_info in enumerate(
5311 db_nsr["_admin"]["scaling-group"]
5312 ):
5313 if admin_scale_info["name"] == scaling_group:
5314 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5315 break
5316 else: # not found, set index one plus last element and add new entry with the name
5317 admin_scale_index += 1
5318 db_nsr_update[
5319 "_admin.scaling-group.{}.name".format(admin_scale_index)
5320 ] = scaling_group
5321
5322 vca_scaling_info = []
5323 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5324 if scaling_type == "SCALE_OUT":
5325 if "aspect-delta-details" not in scaling_descriptor:
5326 raise LcmException(
5327 "Aspect delta details not fount in scaling descriptor {}".format(
5328 scaling_descriptor["name"]
5329 )
5330 )
5331 # count if max-instance-count is reached
5332 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5333
5334 scaling_info["scaling_direction"] = "OUT"
5335 scaling_info["vdu-create"] = {}
5336 scaling_info["kdu-create"] = {}
5337 for delta in deltas:
5338 for vdu_delta in delta.get("vdu-delta", {}):
5339 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5340 # vdu_index also provides the number of instance of the targeted vdu
5341 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5342 cloud_init_text = self._get_vdu_cloud_init_content(
5343 vdud, db_vnfd
5344 )
5345 if cloud_init_text:
5346 additional_params = (
5347 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5348 or {}
5349 )
5350 cloud_init_list = []
5351
5352 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5353 max_instance_count = 10
5354 if vdu_profile and "max-number-of-instances" in vdu_profile:
5355 max_instance_count = vdu_profile.get(
5356 "max-number-of-instances", 10
5357 )
5358
5359 default_instance_num = get_number_of_instances(
5360 db_vnfd, vdud["id"]
5361 )
5362 instances_number = vdu_delta.get("number-of-instances", 1)
5363 nb_scale_op += instances_number
5364
5365 new_instance_count = nb_scale_op + default_instance_num
5366 # Control if new count is over max and vdu count is less than max.
5367 # Then assign new instance count
5368 if new_instance_count > max_instance_count > vdu_count:
5369 instances_number = new_instance_count - max_instance_count
5370 else:
5371 instances_number = instances_number
5372
5373 if new_instance_count > max_instance_count:
5374 raise LcmException(
5375 "reached the limit of {} (max-instance-count) "
5376 "scaling-out operations for the "
5377 "scaling-group-descriptor '{}'".format(
5378 nb_scale_op, scaling_group
5379 )
5380 )
5381 for x in range(vdu_delta.get("number-of-instances", 1)):
5382 if cloud_init_text:
5383 # TODO Information of its own ip is not available because db_vnfr is not updated.
5384 additional_params["OSM"] = get_osm_params(
5385 db_vnfr, vdu_delta["id"], vdu_index + x
5386 )
5387 cloud_init_list.append(
5388 self._parse_cloud_init(
5389 cloud_init_text,
5390 additional_params,
5391 db_vnfd["id"],
5392 vdud["id"],
5393 )
5394 )
5395 vca_scaling_info.append(
5396 {
5397 "osm_vdu_id": vdu_delta["id"],
5398 "member-vnf-index": vnf_index,
5399 "type": "create",
5400 "vdu_index": vdu_index + x,
5401 }
5402 )
5403 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5404 for kdu_delta in delta.get("kdu-resource-delta", {}):
5405 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5406 kdu_name = kdu_profile["kdu-name"]
5407 resource_name = kdu_profile.get("resource-name", "")
5408
5409 # Might have different kdus in the same delta
5410 # Should have list for each kdu
5411 if not scaling_info["kdu-create"].get(kdu_name, None):
5412 scaling_info["kdu-create"][kdu_name] = []
5413
5414 kdur = get_kdur(db_vnfr, kdu_name)
5415 if kdur.get("helm-chart"):
5416 k8s_cluster_type = "helm-chart-v3"
5417 self.logger.debug("kdur: {}".format(kdur))
5418 if (
5419 kdur.get("helm-version")
5420 and kdur.get("helm-version") == "v2"
5421 ):
5422 k8s_cluster_type = "helm-chart"
5423 elif kdur.get("juju-bundle"):
5424 k8s_cluster_type = "juju-bundle"
5425 else:
5426 raise LcmException(
5427 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5428 "juju-bundle. Maybe an old NBI version is running".format(
5429 db_vnfr["member-vnf-index-ref"], kdu_name
5430 )
5431 )
5432
5433 max_instance_count = 10
5434 if kdu_profile and "max-number-of-instances" in kdu_profile:
5435 max_instance_count = kdu_profile.get(
5436 "max-number-of-instances", 10
5437 )
5438
5439 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5440 deployed_kdu, _ = get_deployed_kdu(
5441 nsr_deployed, kdu_name, vnf_index
5442 )
5443 if deployed_kdu is None:
5444 raise LcmException(
5445 "KDU '{}' for vnf '{}' not deployed".format(
5446 kdu_name, vnf_index
5447 )
5448 )
5449 kdu_instance = deployed_kdu.get("kdu-instance")
5450 instance_num = await self.k8scluster_map[
5451 k8s_cluster_type
5452 ].get_scale_count(
5453 resource_name,
5454 kdu_instance,
5455 vca_id=vca_id,
5456 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
5457 kdu_model=deployed_kdu.get("kdu-model"),
5458 )
5459 kdu_replica_count = instance_num + kdu_delta.get(
5460 "number-of-instances", 1
5461 )
5462
5463 # Control if new count is over max and instance_num is less than max.
5464 # Then assign max instance number to kdu replica count
5465 if kdu_replica_count > max_instance_count > instance_num:
5466 kdu_replica_count = max_instance_count
5467 if kdu_replica_count > max_instance_count:
5468 raise LcmException(
5469 "reached the limit of {} (max-instance-count) "
5470 "scaling-out operations for the "
5471 "scaling-group-descriptor '{}'".format(
5472 instance_num, scaling_group
5473 )
5474 )
5475
5476 for x in range(kdu_delta.get("number-of-instances", 1)):
5477 vca_scaling_info.append(
5478 {
5479 "osm_kdu_id": kdu_name,
5480 "member-vnf-index": vnf_index,
5481 "type": "create",
5482 "kdu_index": instance_num + x - 1,
5483 }
5484 )
5485 scaling_info["kdu-create"][kdu_name].append(
5486 {
5487 "member-vnf-index": vnf_index,
5488 "type": "create",
5489 "k8s-cluster-type": k8s_cluster_type,
5490 "resource-name": resource_name,
5491 "scale": kdu_replica_count,
5492 }
5493 )
5494 elif scaling_type == "SCALE_IN":
5495 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5496
5497 scaling_info["scaling_direction"] = "IN"
5498 scaling_info["vdu-delete"] = {}
5499 scaling_info["kdu-delete"] = {}
5500
5501 for delta in deltas:
5502 for vdu_delta in delta.get("vdu-delta", {}):
5503 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5504 min_instance_count = 0
5505 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5506 if vdu_profile and "min-number-of-instances" in vdu_profile:
5507 min_instance_count = vdu_profile["min-number-of-instances"]
5508
5509 default_instance_num = get_number_of_instances(
5510 db_vnfd, vdu_delta["id"]
5511 )
5512 instance_num = vdu_delta.get("number-of-instances", 1)
5513 nb_scale_op -= instance_num
5514
5515 new_instance_count = nb_scale_op + default_instance_num
5516
5517 if new_instance_count < min_instance_count < vdu_count:
5518 instances_number = min_instance_count - new_instance_count
5519 else:
5520 instances_number = instance_num
5521
5522 if new_instance_count < min_instance_count:
5523 raise LcmException(
5524 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5525 "scaling-group-descriptor '{}'".format(
5526 nb_scale_op, scaling_group
5527 )
5528 )
5529 for x in range(vdu_delta.get("number-of-instances", 1)):
5530 vca_scaling_info.append(
5531 {
5532 "osm_vdu_id": vdu_delta["id"],
5533 "member-vnf-index": vnf_index,
5534 "type": "delete",
5535 "vdu_index": vdu_index - 1 - x,
5536 }
5537 )
5538 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5539 for kdu_delta in delta.get("kdu-resource-delta", {}):
5540 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5541 kdu_name = kdu_profile["kdu-name"]
5542 resource_name = kdu_profile.get("resource-name", "")
5543
5544 if not scaling_info["kdu-delete"].get(kdu_name, None):
5545 scaling_info["kdu-delete"][kdu_name] = []
5546
5547 kdur = get_kdur(db_vnfr, kdu_name)
5548 if kdur.get("helm-chart"):
5549 k8s_cluster_type = "helm-chart-v3"
5550 self.logger.debug("kdur: {}".format(kdur))
5551 if (
5552 kdur.get("helm-version")
5553 and kdur.get("helm-version") == "v2"
5554 ):
5555 k8s_cluster_type = "helm-chart"
5556 elif kdur.get("juju-bundle"):
5557 k8s_cluster_type = "juju-bundle"
5558 else:
5559 raise LcmException(
5560 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5561 "juju-bundle. Maybe an old NBI version is running".format(
5562 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5563 )
5564 )
5565
5566 min_instance_count = 0
5567 if kdu_profile and "min-number-of-instances" in kdu_profile:
5568 min_instance_count = kdu_profile["min-number-of-instances"]
5569
5570 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5571 deployed_kdu, _ = get_deployed_kdu(
5572 nsr_deployed, kdu_name, vnf_index
5573 )
5574 if deployed_kdu is None:
5575 raise LcmException(
5576 "KDU '{}' for vnf '{}' not deployed".format(
5577 kdu_name, vnf_index
5578 )
5579 )
5580 kdu_instance = deployed_kdu.get("kdu-instance")
5581 instance_num = await self.k8scluster_map[
5582 k8s_cluster_type
5583 ].get_scale_count(
5584 resource_name,
5585 kdu_instance,
5586 vca_id=vca_id,
5587 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
5588 kdu_model=deployed_kdu.get("kdu-model"),
5589 )
5590 kdu_replica_count = instance_num - kdu_delta.get(
5591 "number-of-instances", 1
5592 )
5593
5594 if kdu_replica_count < min_instance_count < instance_num:
5595 kdu_replica_count = min_instance_count
5596 if kdu_replica_count < min_instance_count:
5597 raise LcmException(
5598 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5599 "scaling-group-descriptor '{}'".format(
5600 instance_num, scaling_group
5601 )
5602 )
5603
5604 for x in range(kdu_delta.get("number-of-instances", 1)):
5605 vca_scaling_info.append(
5606 {
5607 "osm_kdu_id": kdu_name,
5608 "member-vnf-index": vnf_index,
5609 "type": "delete",
5610 "kdu_index": instance_num - x - 1,
5611 }
5612 )
5613 scaling_info["kdu-delete"][kdu_name].append(
5614 {
5615 "member-vnf-index": vnf_index,
5616 "type": "delete",
5617 "k8s-cluster-type": k8s_cluster_type,
5618 "resource-name": resource_name,
5619 "scale": kdu_replica_count,
5620 }
5621 )
5622
5623 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5624 vdu_delete = copy(scaling_info.get("vdu-delete"))
5625 if scaling_info["scaling_direction"] == "IN":
5626 for vdur in reversed(db_vnfr["vdur"]):
5627 if vdu_delete.get(vdur["vdu-id-ref"]):
5628 vdu_delete[vdur["vdu-id-ref"]] -= 1
5629 scaling_info["vdu"].append(
5630 {
5631 "name": vdur.get("name") or vdur.get("vdu-name"),
5632 "vdu_id": vdur["vdu-id-ref"],
5633 "interface": [],
5634 }
5635 )
5636 for interface in vdur["interfaces"]:
5637 scaling_info["vdu"][-1]["interface"].append(
5638 {
5639 "name": interface["name"],
5640 "ip_address": interface["ip-address"],
5641 "mac_address": interface.get("mac-address"),
5642 }
5643 )
5644 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5645
5646 # PRE-SCALE BEGIN
5647 step = "Executing pre-scale vnf-config-primitive"
5648 if scaling_descriptor.get("scaling-config-action"):
5649 for scaling_config_action in scaling_descriptor[
5650 "scaling-config-action"
5651 ]:
5652 if (
5653 scaling_config_action.get("trigger") == "pre-scale-in"
5654 and scaling_type == "SCALE_IN"
5655 ) or (
5656 scaling_config_action.get("trigger") == "pre-scale-out"
5657 and scaling_type == "SCALE_OUT"
5658 ):
5659 vnf_config_primitive = scaling_config_action[
5660 "vnf-config-primitive-name-ref"
5661 ]
5662 step = db_nslcmop_update[
5663 "detailed-status"
5664 ] = "executing pre-scale scaling-config-action '{}'".format(
5665 vnf_config_primitive
5666 )
5667
5668 # look for primitive
5669 for config_primitive in (
5670 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5671 ).get("config-primitive", ()):
5672 if config_primitive["name"] == vnf_config_primitive:
5673 break
5674 else:
5675 raise LcmException(
5676 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5677 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5678 "primitive".format(scaling_group, vnf_config_primitive)
5679 )
5680
5681 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5682 if db_vnfr.get("additionalParamsForVnf"):
5683 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5684
5685 scale_process = "VCA"
5686 db_nsr_update["config-status"] = "configuring pre-scaling"
5687 primitive_params = self._map_primitive_params(
5688 config_primitive, {}, vnfr_params
5689 )
5690
5691 # Pre-scale retry check: Check if this sub-operation has been executed before
5692 op_index = self._check_or_add_scale_suboperation(
5693 db_nslcmop,
5694 vnf_index,
5695 vnf_config_primitive,
5696 primitive_params,
5697 "PRE-SCALE",
5698 )
5699 if op_index == self.SUBOPERATION_STATUS_SKIP:
5700 # Skip sub-operation
5701 result = "COMPLETED"
5702 result_detail = "Done"
5703 self.logger.debug(
5704 logging_text
5705 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5706 vnf_config_primitive, result, result_detail
5707 )
5708 )
5709 else:
5710 if op_index == self.SUBOPERATION_STATUS_NEW:
5711 # New sub-operation: Get index of this sub-operation
5712 op_index = (
5713 len(db_nslcmop.get("_admin", {}).get("operations"))
5714 - 1
5715 )
5716 self.logger.debug(
5717 logging_text
5718 + "vnf_config_primitive={} New sub-operation".format(
5719 vnf_config_primitive
5720 )
5721 )
5722 else:
5723 # retry: Get registered params for this existing sub-operation
5724 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5725 op_index
5726 ]
5727 vnf_index = op.get("member_vnf_index")
5728 vnf_config_primitive = op.get("primitive")
5729 primitive_params = op.get("primitive_params")
5730 self.logger.debug(
5731 logging_text
5732 + "vnf_config_primitive={} Sub-operation retry".format(
5733 vnf_config_primitive
5734 )
5735 )
5736 # Execute the primitive, either with new (first-time) or registered (reintent) args
5737 ee_descriptor_id = config_primitive.get(
5738 "execution-environment-ref"
5739 )
5740 primitive_name = config_primitive.get(
5741 "execution-environment-primitive", vnf_config_primitive
5742 )
5743 ee_id, vca_type = self._look_for_deployed_vca(
5744 nsr_deployed["VCA"],
5745 member_vnf_index=vnf_index,
5746 vdu_id=None,
5747 vdu_count_index=None,
5748 ee_descriptor_id=ee_descriptor_id,
5749 )
5750 result, result_detail = await self._ns_execute_primitive(
5751 ee_id,
5752 primitive_name,
5753 primitive_params,
5754 vca_type=vca_type,
5755 vca_id=vca_id,
5756 )
5757 self.logger.debug(
5758 logging_text
5759 + "vnf_config_primitive={} Done with result {} {}".format(
5760 vnf_config_primitive, result, result_detail
5761 )
5762 )
5763 # Update operationState = COMPLETED | FAILED
5764 self._update_suboperation_status(
5765 db_nslcmop, op_index, result, result_detail
5766 )
5767
5768 if result == "FAILED":
5769 raise LcmException(result_detail)
5770 db_nsr_update["config-status"] = old_config_status
5771 scale_process = None
5772 # PRE-SCALE END
5773
5774 db_nsr_update[
5775 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5776 ] = nb_scale_op
5777 db_nsr_update[
5778 "_admin.scaling-group.{}.time".format(admin_scale_index)
5779 ] = time()
5780
5781 # SCALE-IN VCA - BEGIN
5782 if vca_scaling_info:
5783 step = db_nslcmop_update[
5784 "detailed-status"
5785 ] = "Deleting the execution environments"
5786 scale_process = "VCA"
5787 for vca_info in vca_scaling_info:
5788 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
5789 member_vnf_index = str(vca_info["member-vnf-index"])
5790 self.logger.debug(
5791 logging_text + "vdu info: {}".format(vca_info)
5792 )
5793 if vca_info.get("osm_vdu_id"):
5794 vdu_id = vca_info["osm_vdu_id"]
5795 vdu_index = int(vca_info["vdu_index"])
5796 stage[
5797 1
5798 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5799 member_vnf_index, vdu_id, vdu_index
5800 )
5801 stage[2] = step = "Scaling in VCA"
5802 self._write_op_status(op_id=nslcmop_id, stage=stage)
5803 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5804 config_update = db_nsr["configurationStatus"]
5805 for vca_index, vca in enumerate(vca_update):
5806 if (
5807 (vca or vca.get("ee_id"))
5808 and vca["member-vnf-index"] == member_vnf_index
5809 and vca["vdu_count_index"] == vdu_index
5810 ):
5811 if vca.get("vdu_id"):
5812 config_descriptor = get_configuration(
5813 db_vnfd, vca.get("vdu_id")
5814 )
5815 elif vca.get("kdu_name"):
5816 config_descriptor = get_configuration(
5817 db_vnfd, vca.get("kdu_name")
5818 )
5819 else:
5820 config_descriptor = get_configuration(
5821 db_vnfd, db_vnfd["id"]
5822 )
5823 operation_params = (
5824 db_nslcmop.get("operationParams") or {}
5825 )
5826 exec_terminate_primitives = not operation_params.get(
5827 "skip_terminate_primitives"
5828 ) and vca.get("needed_terminate")
5829 task = asyncio.ensure_future(
5830 asyncio.wait_for(
5831 self.destroy_N2VC(
5832 logging_text,
5833 db_nslcmop,
5834 vca,
5835 config_descriptor,
5836 vca_index,
5837 destroy_ee=True,
5838 exec_primitives=exec_terminate_primitives,
5839 scaling_in=True,
5840 vca_id=vca_id,
5841 ),
5842 timeout=self.timeout_charm_delete,
5843 )
5844 )
5845 tasks_dict_info[task] = "Terminating VCA {}".format(
5846 vca.get("ee_id")
5847 )
5848 del vca_update[vca_index]
5849 del config_update[vca_index]
5850 # wait for pending tasks of terminate primitives
5851 if tasks_dict_info:
5852 self.logger.debug(
5853 logging_text
5854 + "Waiting for tasks {}".format(
5855 list(tasks_dict_info.keys())
5856 )
5857 )
5858 error_list = await self._wait_for_tasks(
5859 logging_text,
5860 tasks_dict_info,
5861 min(
5862 self.timeout_charm_delete, self.timeout_ns_terminate
5863 ),
5864 stage,
5865 nslcmop_id,
5866 )
5867 tasks_dict_info.clear()
5868 if error_list:
5869 raise LcmException("; ".join(error_list))
5870
5871 db_vca_and_config_update = {
5872 "_admin.deployed.VCA": vca_update,
5873 "configurationStatus": config_update,
5874 }
5875 self.update_db_2(
5876 "nsrs", db_nsr["_id"], db_vca_and_config_update
5877 )
5878 scale_process = None
5879 # SCALE-IN VCA - END
5880
5881 # SCALE RO - BEGIN
5882 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5883 scale_process = "RO"
5884 if self.ro_config.get("ng"):
5885 await self._scale_ng_ro(
5886 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5887 )
5888 scaling_info.pop("vdu-create", None)
5889 scaling_info.pop("vdu-delete", None)
5890
5891 scale_process = None
5892 # SCALE RO - END
5893
5894 # SCALE KDU - BEGIN
5895 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5896 scale_process = "KDU"
5897 await self._scale_kdu(
5898 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5899 )
5900 scaling_info.pop("kdu-create", None)
5901 scaling_info.pop("kdu-delete", None)
5902
5903 scale_process = None
5904 # SCALE KDU - END
5905
5906 if db_nsr_update:
5907 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5908
5909 # SCALE-UP VCA - BEGIN
5910 if vca_scaling_info:
5911 step = db_nslcmop_update[
5912 "detailed-status"
5913 ] = "Creating new execution environments"
5914 scale_process = "VCA"
5915 for vca_info in vca_scaling_info:
5916 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
5917 member_vnf_index = str(vca_info["member-vnf-index"])
5918 self.logger.debug(
5919 logging_text + "vdu info: {}".format(vca_info)
5920 )
5921 vnfd_id = db_vnfr["vnfd-ref"]
5922 if vca_info.get("osm_vdu_id"):
5923 vdu_index = int(vca_info["vdu_index"])
5924 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5925 if db_vnfr.get("additionalParamsForVnf"):
5926 deploy_params.update(
5927 parse_yaml_strings(
5928 db_vnfr["additionalParamsForVnf"].copy()
5929 )
5930 )
5931 descriptor_config = get_configuration(
5932 db_vnfd, db_vnfd["id"]
5933 )
5934 if descriptor_config:
5935 vdu_id = None
5936 vdu_name = None
5937 kdu_name = None
5938 self._deploy_n2vc(
5939 logging_text=logging_text
5940 + "member_vnf_index={} ".format(member_vnf_index),
5941 db_nsr=db_nsr,
5942 db_vnfr=db_vnfr,
5943 nslcmop_id=nslcmop_id,
5944 nsr_id=nsr_id,
5945 nsi_id=nsi_id,
5946 vnfd_id=vnfd_id,
5947 vdu_id=vdu_id,
5948 kdu_name=kdu_name,
5949 member_vnf_index=member_vnf_index,
5950 vdu_index=vdu_index,
5951 vdu_name=vdu_name,
5952 deploy_params=deploy_params,
5953 descriptor_config=descriptor_config,
5954 base_folder=base_folder,
5955 task_instantiation_info=tasks_dict_info,
5956 stage=stage,
5957 )
5958 vdu_id = vca_info["osm_vdu_id"]
5959 vdur = find_in_list(
5960 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5961 )
5962 descriptor_config = get_configuration(db_vnfd, vdu_id)
5963 if vdur.get("additionalParams"):
5964 deploy_params_vdu = parse_yaml_strings(
5965 vdur["additionalParams"]
5966 )
5967 else:
5968 deploy_params_vdu = deploy_params
5969 deploy_params_vdu["OSM"] = get_osm_params(
5970 db_vnfr, vdu_id, vdu_count_index=vdu_index
5971 )
5972 if descriptor_config:
5973 vdu_name = None
5974 kdu_name = None
5975 stage[
5976 1
5977 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5978 member_vnf_index, vdu_id, vdu_index
5979 )
5980 stage[2] = step = "Scaling out VCA"
5981 self._write_op_status(op_id=nslcmop_id, stage=stage)
5982 self._deploy_n2vc(
5983 logging_text=logging_text
5984 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5985 member_vnf_index, vdu_id, vdu_index
5986 ),
5987 db_nsr=db_nsr,
5988 db_vnfr=db_vnfr,
5989 nslcmop_id=nslcmop_id,
5990 nsr_id=nsr_id,
5991 nsi_id=nsi_id,
5992 vnfd_id=vnfd_id,
5993 vdu_id=vdu_id,
5994 kdu_name=kdu_name,
5995 member_vnf_index=member_vnf_index,
5996 vdu_index=vdu_index,
5997 vdu_name=vdu_name,
5998 deploy_params=deploy_params_vdu,
5999 descriptor_config=descriptor_config,
6000 base_folder=base_folder,
6001 task_instantiation_info=tasks_dict_info,
6002 stage=stage,
6003 )
6004 # SCALE-UP VCA - END
6005 scale_process = None
6006
6007 # POST-SCALE BEGIN
6008 # execute primitive service POST-SCALING
6009 step = "Executing post-scale vnf-config-primitive"
6010 if scaling_descriptor.get("scaling-config-action"):
6011 for scaling_config_action in scaling_descriptor[
6012 "scaling-config-action"
6013 ]:
6014 if (
6015 scaling_config_action.get("trigger") == "post-scale-in"
6016 and scaling_type == "SCALE_IN"
6017 ) or (
6018 scaling_config_action.get("trigger") == "post-scale-out"
6019 and scaling_type == "SCALE_OUT"
6020 ):
6021 vnf_config_primitive = scaling_config_action[
6022 "vnf-config-primitive-name-ref"
6023 ]
6024 step = db_nslcmop_update[
6025 "detailed-status"
6026 ] = "executing post-scale scaling-config-action '{}'".format(
6027 vnf_config_primitive
6028 )
6029
6030 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6031 if db_vnfr.get("additionalParamsForVnf"):
6032 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6033
6034 # look for primitive
6035 for config_primitive in (
6036 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6037 ).get("config-primitive", ()):
6038 if config_primitive["name"] == vnf_config_primitive:
6039 break
6040 else:
6041 raise LcmException(
6042 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6043 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6044 "config-primitive".format(
6045 scaling_group, vnf_config_primitive
6046 )
6047 )
6048 scale_process = "VCA"
6049 db_nsr_update["config-status"] = "configuring post-scaling"
6050 primitive_params = self._map_primitive_params(
6051 config_primitive, {}, vnfr_params
6052 )
6053
6054 # Post-scale retry check: Check if this sub-operation has been executed before
6055 op_index = self._check_or_add_scale_suboperation(
6056 db_nslcmop,
6057 vnf_index,
6058 vnf_config_primitive,
6059 primitive_params,
6060 "POST-SCALE",
6061 )
6062 if op_index == self.SUBOPERATION_STATUS_SKIP:
6063 # Skip sub-operation
6064 result = "COMPLETED"
6065 result_detail = "Done"
6066 self.logger.debug(
6067 logging_text
6068 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6069 vnf_config_primitive, result, result_detail
6070 )
6071 )
6072 else:
6073 if op_index == self.SUBOPERATION_STATUS_NEW:
6074 # New sub-operation: Get index of this sub-operation
6075 op_index = (
6076 len(db_nslcmop.get("_admin", {}).get("operations"))
6077 - 1
6078 )
6079 self.logger.debug(
6080 logging_text
6081 + "vnf_config_primitive={} New sub-operation".format(
6082 vnf_config_primitive
6083 )
6084 )
6085 else:
6086 # retry: Get registered params for this existing sub-operation
6087 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6088 op_index
6089 ]
6090 vnf_index = op.get("member_vnf_index")
6091 vnf_config_primitive = op.get("primitive")
6092 primitive_params = op.get("primitive_params")
6093 self.logger.debug(
6094 logging_text
6095 + "vnf_config_primitive={} Sub-operation retry".format(
6096 vnf_config_primitive
6097 )
6098 )
6099 # Execute the primitive, either with new (first-time) or registered (reintent) args
6100 ee_descriptor_id = config_primitive.get(
6101 "execution-environment-ref"
6102 )
6103 primitive_name = config_primitive.get(
6104 "execution-environment-primitive", vnf_config_primitive
6105 )
6106 ee_id, vca_type = self._look_for_deployed_vca(
6107 nsr_deployed["VCA"],
6108 member_vnf_index=vnf_index,
6109 vdu_id=None,
6110 vdu_count_index=None,
6111 ee_descriptor_id=ee_descriptor_id,
6112 )
6113 result, result_detail = await self._ns_execute_primitive(
6114 ee_id,
6115 primitive_name,
6116 primitive_params,
6117 vca_type=vca_type,
6118 vca_id=vca_id,
6119 )
6120 self.logger.debug(
6121 logging_text
6122 + "vnf_config_primitive={} Done with result {} {}".format(
6123 vnf_config_primitive, result, result_detail
6124 )
6125 )
6126 # Update operationState = COMPLETED | FAILED
6127 self._update_suboperation_status(
6128 db_nslcmop, op_index, result, result_detail
6129 )
6130
6131 if result == "FAILED":
6132 raise LcmException(result_detail)
6133 db_nsr_update["config-status"] = old_config_status
6134 scale_process = None
6135 # POST-SCALE END
6136
6137 db_nsr_update[
6138 "detailed-status"
6139 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6140 db_nsr_update["operational-status"] = (
6141 "running"
6142 if old_operational_status == "failed"
6143 else old_operational_status
6144 )
6145 db_nsr_update["config-status"] = old_config_status
6146 return
6147 except (
6148 ROclient.ROClientException,
6149 DbException,
6150 LcmException,
6151 NgRoException,
6152 ) as e:
6153 self.logger.error(logging_text + "Exit Exception {}".format(e))
6154 exc = e
6155 except asyncio.CancelledError:
6156 self.logger.error(
6157 logging_text + "Cancelled Exception while '{}'".format(step)
6158 )
6159 exc = "Operation was cancelled"
6160 except Exception as e:
6161 exc = traceback.format_exc()
6162 self.logger.critical(
6163 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6164 exc_info=True,
6165 )
6166 finally:
6167 self._write_ns_status(
6168 nsr_id=nsr_id,
6169 ns_state=None,
6170 current_operation="IDLE",
6171 current_operation_id=None,
6172 )
6173 if tasks_dict_info:
6174 stage[1] = "Waiting for instantiate pending tasks."
6175 self.logger.debug(logging_text + stage[1])
6176 exc = await self._wait_for_tasks(
6177 logging_text,
6178 tasks_dict_info,
6179 self.timeout_ns_deploy,
6180 stage,
6181 nslcmop_id,
6182 nsr_id=nsr_id,
6183 )
6184 if exc:
6185 db_nslcmop_update[
6186 "detailed-status"
6187 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6188 nslcmop_operation_state = "FAILED"
6189 if db_nsr:
6190 db_nsr_update["operational-status"] = old_operational_status
6191 db_nsr_update["config-status"] = old_config_status
6192 db_nsr_update["detailed-status"] = ""
6193 if scale_process:
6194 if "VCA" in scale_process:
6195 db_nsr_update["config-status"] = "failed"
6196 if "RO" in scale_process:
6197 db_nsr_update["operational-status"] = "failed"
6198 db_nsr_update[
6199 "detailed-status"
6200 ] = "FAILED scaling nslcmop={} {}: {}".format(
6201 nslcmop_id, step, exc
6202 )
6203 else:
6204 error_description_nslcmop = None
6205 nslcmop_operation_state = "COMPLETED"
6206 db_nslcmop_update["detailed-status"] = "Done"
6207
6208 self._write_op_status(
6209 op_id=nslcmop_id,
6210 stage="",
6211 error_message=error_description_nslcmop,
6212 operation_state=nslcmop_operation_state,
6213 other_update=db_nslcmop_update,
6214 )
6215 if db_nsr:
6216 self._write_ns_status(
6217 nsr_id=nsr_id,
6218 ns_state=None,
6219 current_operation="IDLE",
6220 current_operation_id=None,
6221 other_update=db_nsr_update,
6222 )
6223
6224 if nslcmop_operation_state:
6225 try:
6226 msg = {
6227 "nsr_id": nsr_id,
6228 "nslcmop_id": nslcmop_id,
6229 "operationState": nslcmop_operation_state,
6230 }
6231 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
6232 except Exception as e:
6233 self.logger.error(
6234 logging_text + "kafka_write notification Exception {}".format(e)
6235 )
6236 self.logger.debug(logging_text + "Exit")
6237 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
6238
6239 async def _scale_kdu(
6240 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6241 ):
6242 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
6243 for kdu_name in _scaling_info:
6244 for kdu_scaling_info in _scaling_info[kdu_name]:
6245 deployed_kdu, index = get_deployed_kdu(
6246 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
6247 )
6248 cluster_uuid = deployed_kdu["k8scluster-uuid"]
6249 kdu_instance = deployed_kdu["kdu-instance"]
6250 kdu_model = deployed_kdu.get("kdu-model")
6251 scale = int(kdu_scaling_info["scale"])
6252 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
6253
6254 db_dict = {
6255 "collection": "nsrs",
6256 "filter": {"_id": nsr_id},
6257 "path": "_admin.deployed.K8s.{}".format(index),
6258 }
6259
6260 step = "scaling application {}".format(
6261 kdu_scaling_info["resource-name"]
6262 )
6263 self.logger.debug(logging_text + step)
6264
6265 if kdu_scaling_info["type"] == "delete":
6266 kdu_config = get_configuration(db_vnfd, kdu_name)
6267 if (
6268 kdu_config
6269 and kdu_config.get("terminate-config-primitive")
6270 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6271 ):
6272 terminate_config_primitive_list = kdu_config.get(
6273 "terminate-config-primitive"
6274 )
6275 terminate_config_primitive_list.sort(
6276 key=lambda val: int(val["seq"])
6277 )
6278
6279 for (
6280 terminate_config_primitive
6281 ) in terminate_config_primitive_list:
6282 primitive_params_ = self._map_primitive_params(
6283 terminate_config_primitive, {}, {}
6284 )
6285 step = "execute terminate config primitive"
6286 self.logger.debug(logging_text + step)
6287 await asyncio.wait_for(
6288 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6289 cluster_uuid=cluster_uuid,
6290 kdu_instance=kdu_instance,
6291 primitive_name=terminate_config_primitive["name"],
6292 params=primitive_params_,
6293 db_dict=db_dict,
6294 vca_id=vca_id,
6295 ),
6296 timeout=600,
6297 )
6298
6299 await asyncio.wait_for(
6300 self.k8scluster_map[k8s_cluster_type].scale(
6301 kdu_instance,
6302 scale,
6303 kdu_scaling_info["resource-name"],
6304 vca_id=vca_id,
6305 cluster_uuid=cluster_uuid,
6306 kdu_model=kdu_model,
6307 atomic=True,
6308 db_dict=db_dict,
6309 ),
6310 timeout=self.timeout_vca_on_error,
6311 )
6312
6313 if kdu_scaling_info["type"] == "create":
6314 kdu_config = get_configuration(db_vnfd, kdu_name)
6315 if (
6316 kdu_config
6317 and kdu_config.get("initial-config-primitive")
6318 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6319 ):
6320 initial_config_primitive_list = kdu_config.get(
6321 "initial-config-primitive"
6322 )
6323 initial_config_primitive_list.sort(
6324 key=lambda val: int(val["seq"])
6325 )
6326
6327 for initial_config_primitive in initial_config_primitive_list:
6328 primitive_params_ = self._map_primitive_params(
6329 initial_config_primitive, {}, {}
6330 )
6331 step = "execute initial config primitive"
6332 self.logger.debug(logging_text + step)
6333 await asyncio.wait_for(
6334 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6335 cluster_uuid=cluster_uuid,
6336 kdu_instance=kdu_instance,
6337 primitive_name=initial_config_primitive["name"],
6338 params=primitive_params_,
6339 db_dict=db_dict,
6340 vca_id=vca_id,
6341 ),
6342 timeout=600,
6343 )
6344
6345 async def _scale_ng_ro(
6346 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6347 ):
6348 nsr_id = db_nslcmop["nsInstanceId"]
6349 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6350 db_vnfrs = {}
6351
6352 # read from db: vnfd's for every vnf
6353 db_vnfds = []
6354
6355 # for each vnf in ns, read vnfd
6356 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6357 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6358 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6359 # if we haven't this vnfd, read it from db
6360 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6361 # read from db
6362 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6363 db_vnfds.append(vnfd)
6364 n2vc_key = self.n2vc.get_public_key()
6365 n2vc_key_list = [n2vc_key]
6366 self.scale_vnfr(
6367 db_vnfr,
6368 vdu_scaling_info.get("vdu-create"),
6369 vdu_scaling_info.get("vdu-delete"),
6370 mark_delete=True,
6371 )
6372 # db_vnfr has been updated, update db_vnfrs to use it
6373 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6374 await self._instantiate_ng_ro(
6375 logging_text,
6376 nsr_id,
6377 db_nsd,
6378 db_nsr,
6379 db_nslcmop,
6380 db_vnfrs,
6381 db_vnfds,
6382 n2vc_key_list,
6383 stage=stage,
6384 start_deploy=time(),
6385 timeout_ns_deploy=self.timeout_ns_deploy,
6386 )
6387 if vdu_scaling_info.get("vdu-delete"):
6388 self.scale_vnfr(
6389 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6390 )
6391
6392 async def extract_prometheus_scrape_jobs(
6393 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6394 ):
6395 # look if exist a file called 'prometheus*.j2' and
6396 artifact_content = self.fs.dir_ls(artifact_path)
6397 job_file = next(
6398 (
6399 f
6400 for f in artifact_content
6401 if f.startswith("prometheus") and f.endswith(".j2")
6402 ),
6403 None,
6404 )
6405 if not job_file:
6406 return
6407 with self.fs.file_open((artifact_path, job_file), "r") as f:
6408 job_data = f.read()
6409
6410 # TODO get_service
6411 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6412 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6413 host_port = "80"
6414 vnfr_id = vnfr_id.replace("-", "")
6415 variables = {
6416 "JOB_NAME": vnfr_id,
6417 "TARGET_IP": target_ip,
6418 "EXPORTER_POD_IP": host_name,
6419 "EXPORTER_POD_PORT": host_port,
6420 }
6421 job_list = parse_job(job_data, variables)
6422 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6423 for job in job_list:
6424 if (
6425 not isinstance(job.get("job_name"), str)
6426 or vnfr_id not in job["job_name"]
6427 ):
6428 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6429 job["nsr_id"] = nsr_id
6430 job["vnfr_id"] = vnfr_id
6431 return job_list
6432
6433 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6434 """
6435 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6436
6437 :param: vim_account_id: VIM Account ID
6438
6439 :return: (cloud_name, cloud_credential)
6440 """
6441 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6442 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6443
6444 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6445 """
6446 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6447
6448 :param: vim_account_id: VIM Account ID
6449
6450 :return: (cloud_name, cloud_credential)
6451 """
6452 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6453 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")