feat(prometheus): prometheus config no longer depends on LCM
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 from typing import Any, Dict, List
21 import yaml
22 import logging
23 import logging.handlers
24 import traceback
25 import json
26 from jinja2 import (
27 Environment,
28 TemplateError,
29 TemplateNotFound,
30 StrictUndefined,
31 UndefinedError,
32 )
33
34 from osm_lcm import ROclient
35 from osm_lcm.data_utils.nsr import (
36 get_deployed_kdu,
37 get_deployed_vca,
38 get_deployed_vca_list,
39 get_nsd,
40 )
41 from osm_lcm.data_utils.vca import (
42 DeployedComponent,
43 DeployedK8sResource,
44 DeployedVCA,
45 EELevel,
46 Relation,
47 EERelation,
48 safe_get_ee_relation,
49 )
50 from osm_lcm.ng_ro import NgRoClient, NgRoException
51 from osm_lcm.lcm_utils import (
52 LcmException,
53 LcmExceptionNoMgmtIP,
54 LcmBase,
55 deep_get,
56 get_iterable,
57 populate_dict,
58 )
59 from osm_lcm.data_utils.nsd import (
60 get_ns_configuration_relation_list,
61 get_vnf_profile,
62 get_vnf_profiles,
63 )
64 from osm_lcm.data_utils.vnfd import (
65 get_relation_list,
66 get_vdu_list,
67 get_vdu_profile,
68 get_ee_sorted_initial_config_primitive_list,
69 get_ee_sorted_terminate_config_primitive_list,
70 get_kdu_list,
71 get_virtual_link_profiles,
72 get_vdu,
73 get_configuration,
74 get_vdu_index,
75 get_scaling_aspect,
76 get_number_of_instances,
77 get_juju_ee_ref,
78 get_kdu_resource_profile,
79 )
80 from osm_lcm.data_utils.list_utils import find_in_list
81 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
82 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
83 from osm_lcm.data_utils.database.vim_account import VimAccountDB
84 from n2vc.definitions import RelationEndpoint
85 from n2vc.k8s_helm_conn import K8sHelmConnector
86 from n2vc.k8s_helm3_conn import K8sHelm3Connector
87 from n2vc.k8s_juju_conn import K8sJujuConnector
88
89 from osm_common.dbbase import DbException
90 from osm_common.fsbase import FsException
91
92 from osm_lcm.data_utils.database.database import Database
93 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
94
95 from n2vc.n2vc_juju_conn import N2VCJujuConnector
96 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
97
98 from osm_lcm.lcm_helm_conn import LCMHelmConn
99 from osm_lcm.prometheus import parse_job
100
101 from copy import copy, deepcopy
102 from time import time
103 from uuid import uuid4
104
105 from random import randint
106
107 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
108
109
110 class NsLcm(LcmBase):
111 timeout_vca_on_error = (
112 5 * 60
113 ) # Time for charm from first time at blocked,error status to mark as failed
114 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
115 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
116 timeout_charm_delete = 10 * 60
117 timeout_primitive = 30 * 60 # timeout for primitive execution
118 timeout_progress_primitive = (
119 10 * 60
120 ) # timeout for some progress in a primitive execution
121
122 SUBOPERATION_STATUS_NOT_FOUND = -1
123 SUBOPERATION_STATUS_NEW = -2
124 SUBOPERATION_STATUS_SKIP = -3
125 task_name_deploy_vca = "Deploying VCA"
126
127 def __init__(self, msg, lcm_tasks, config, loop):
128 """
129 Init, Connect to database, filesystem storage, and messaging
130 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
131 :return: None
132 """
133 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
134
135 self.db = Database().instance.db
136 self.fs = Filesystem().instance.fs
137 self.loop = loop
138 self.lcm_tasks = lcm_tasks
139 self.timeout = config["timeout"]
140 self.ro_config = config["ro_config"]
141 self.ng_ro = config["ro_config"].get("ng")
142 self.vca_config = config["VCA"].copy()
143
144 # create N2VC connector
145 self.n2vc = N2VCJujuConnector(
146 log=self.logger,
147 loop=self.loop,
148 on_update_db=self._on_update_n2vc_db,
149 fs=self.fs,
150 db=self.db,
151 )
152
153 self.conn_helm_ee = LCMHelmConn(
154 log=self.logger,
155 loop=self.loop,
156 vca_config=self.vca_config,
157 on_update_db=self._on_update_n2vc_db,
158 )
159
160 self.k8sclusterhelm2 = K8sHelmConnector(
161 kubectl_command=self.vca_config.get("kubectlpath"),
162 helm_command=self.vca_config.get("helmpath"),
163 log=self.logger,
164 on_update_db=None,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.k8sclusterhelm3 = K8sHelm3Connector(
170 kubectl_command=self.vca_config.get("kubectlpath"),
171 helm_command=self.vca_config.get("helm3path"),
172 fs=self.fs,
173 log=self.logger,
174 db=self.db,
175 on_update_db=None,
176 )
177
178 self.k8sclusterjuju = K8sJujuConnector(
179 kubectl_command=self.vca_config.get("kubectlpath"),
180 juju_command=self.vca_config.get("jujupath"),
181 log=self.logger,
182 loop=self.loop,
183 on_update_db=self._on_update_k8s_db,
184 fs=self.fs,
185 db=self.db,
186 )
187
188 self.k8scluster_map = {
189 "helm-chart": self.k8sclusterhelm2,
190 "helm-chart-v3": self.k8sclusterhelm3,
191 "chart": self.k8sclusterhelm3,
192 "juju-bundle": self.k8sclusterjuju,
193 "juju": self.k8sclusterjuju,
194 }
195
196 self.vca_map = {
197 "lxc_proxy_charm": self.n2vc,
198 "native_charm": self.n2vc,
199 "k8s_proxy_charm": self.n2vc,
200 "helm": self.conn_helm_ee,
201 "helm-v3": self.conn_helm_ee,
202 }
203
204 # create RO client
205 self.RO = NgRoClient(self.loop, **self.ro_config)
206
207 @staticmethod
208 def increment_ip_mac(ip_mac, vm_index=1):
209 if not isinstance(ip_mac, str):
210 return ip_mac
211 try:
212 # try with ipv4 look for last dot
213 i = ip_mac.rfind(".")
214 if i > 0:
215 i += 1
216 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
217 # try with ipv6 or mac look for last colon. Operate in hex
218 i = ip_mac.rfind(":")
219 if i > 0:
220 i += 1
221 # format in hex, len can be 2 for mac or 4 for ipv6
222 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
223 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
224 )
225 except Exception:
226 pass
227 return None
228
229 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
230
231 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
232
233 try:
234 # TODO filter RO descriptor fields...
235
236 # write to database
237 db_dict = dict()
238 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
239 db_dict["deploymentStatus"] = ro_descriptor
240 self.update_db_2("nsrs", nsrs_id, db_dict)
241
242 except Exception as e:
243 self.logger.warn(
244 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
245 )
246
247 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
248
249 # remove last dot from path (if exists)
250 if path.endswith("."):
251 path = path[:-1]
252
253 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
254 # .format(table, filter, path, updated_data))
255 try:
256
257 nsr_id = filter.get("_id")
258
259 # read ns record from database
260 nsr = self.db.get_one(table="nsrs", q_filter=filter)
261 current_ns_status = nsr.get("nsState")
262
263 # get vca status for NS
264 status_dict = await self.n2vc.get_status(
265 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
266 )
267
268 # vcaStatus
269 db_dict = dict()
270 db_dict["vcaStatus"] = status_dict
271 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
272
273 # update configurationStatus for this VCA
274 try:
275 vca_index = int(path[path.rfind(".") + 1 :])
276
277 vca_list = deep_get(
278 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
279 )
280 vca_status = vca_list[vca_index].get("status")
281
282 configuration_status_list = nsr.get("configurationStatus")
283 config_status = configuration_status_list[vca_index].get("status")
284
285 if config_status == "BROKEN" and vca_status != "failed":
286 db_dict["configurationStatus"][vca_index] = "READY"
287 elif config_status != "BROKEN" and vca_status == "failed":
288 db_dict["configurationStatus"][vca_index] = "BROKEN"
289 except Exception as e:
290 # not update configurationStatus
291 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
292
293 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
294 # if nsState = 'DEGRADED' check if all is OK
295 is_degraded = False
296 if current_ns_status in ("READY", "DEGRADED"):
297 error_description = ""
298 # check machines
299 if status_dict.get("machines"):
300 for machine_id in status_dict.get("machines"):
301 machine = status_dict.get("machines").get(machine_id)
302 # check machine agent-status
303 if machine.get("agent-status"):
304 s = machine.get("agent-status").get("status")
305 if s != "started":
306 is_degraded = True
307 error_description += (
308 "machine {} agent-status={} ; ".format(
309 machine_id, s
310 )
311 )
312 # check machine instance status
313 if machine.get("instance-status"):
314 s = machine.get("instance-status").get("status")
315 if s != "running":
316 is_degraded = True
317 error_description += (
318 "machine {} instance-status={} ; ".format(
319 machine_id, s
320 )
321 )
322 # check applications
323 if status_dict.get("applications"):
324 for app_id in status_dict.get("applications"):
325 app = status_dict.get("applications").get(app_id)
326 # check application status
327 if app.get("status"):
328 s = app.get("status").get("status")
329 if s != "active":
330 is_degraded = True
331 error_description += (
332 "application {} status={} ; ".format(app_id, s)
333 )
334
335 if error_description:
336 db_dict["errorDescription"] = error_description
337 if current_ns_status == "READY" and is_degraded:
338 db_dict["nsState"] = "DEGRADED"
339 if current_ns_status == "DEGRADED" and not is_degraded:
340 db_dict["nsState"] = "READY"
341
342 # write to database
343 self.update_db_2("nsrs", nsr_id, db_dict)
344
345 except (asyncio.CancelledError, asyncio.TimeoutError):
346 raise
347 except Exception as e:
348 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
349
350 async def _on_update_k8s_db(
351 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
352 ):
353 """
354 Updating vca status in NSR record
355 :param cluster_uuid: UUID of a k8s cluster
356 :param kdu_instance: The unique name of the KDU instance
357 :param filter: To get nsr_id
358 :return: none
359 """
360
361 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
362 # .format(cluster_uuid, kdu_instance, filter))
363
364 try:
365 nsr_id = filter.get("_id")
366
367 # get vca status for NS
368 vca_status = await self.k8sclusterjuju.status_kdu(
369 cluster_uuid,
370 kdu_instance,
371 complete_status=True,
372 yaml_format=False,
373 vca_id=vca_id,
374 )
375 # vcaStatus
376 db_dict = dict()
377 db_dict["vcaStatus"] = {nsr_id: vca_status}
378
379 await self.k8sclusterjuju.update_vca_status(
380 db_dict["vcaStatus"],
381 kdu_instance,
382 vca_id=vca_id,
383 )
384
385 # write to database
386 self.update_db_2("nsrs", nsr_id, db_dict)
387
388 except (asyncio.CancelledError, asyncio.TimeoutError):
389 raise
390 except Exception as e:
391 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
392
393 @staticmethod
394 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
395 try:
396 env = Environment(undefined=StrictUndefined)
397 template = env.from_string(cloud_init_text)
398 return template.render(additional_params or {})
399 except UndefinedError as e:
400 raise LcmException(
401 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
402 "file, must be provided in the instantiation parameters inside the "
403 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
404 )
405 except (TemplateError, TemplateNotFound) as e:
406 raise LcmException(
407 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
408 vnfd_id, vdu_id, e
409 )
410 )
411
412 def _get_vdu_cloud_init_content(self, vdu, vnfd):
413 cloud_init_content = cloud_init_file = None
414 try:
415 if vdu.get("cloud-init-file"):
416 base_folder = vnfd["_admin"]["storage"]
417 if base_folder["pkg-dir"]:
418 cloud_init_file = "{}/{}/cloud_init/{}".format(
419 base_folder["folder"],
420 base_folder["pkg-dir"],
421 vdu["cloud-init-file"],
422 )
423 else:
424 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
425 base_folder["folder"],
426 vdu["cloud-init-file"],
427 )
428 with self.fs.file_open(cloud_init_file, "r") as ci_file:
429 cloud_init_content = ci_file.read()
430 elif vdu.get("cloud-init"):
431 cloud_init_content = vdu["cloud-init"]
432
433 return cloud_init_content
434 except FsException as e:
435 raise LcmException(
436 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
437 vnfd["id"], vdu["id"], cloud_init_file, e
438 )
439 )
440
441 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
442 vdur = next(
443 vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]
444 )
445 additional_params = vdur.get("additionalParams")
446 return parse_yaml_strings(additional_params)
447
448 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
449 """
450 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
451 :param vnfd: input vnfd
452 :param new_id: overrides vnf id if provided
453 :param additionalParams: Instantiation params for VNFs provided
454 :param nsrId: Id of the NSR
455 :return: copy of vnfd
456 """
457 vnfd_RO = deepcopy(vnfd)
458 # remove unused by RO configuration, monitoring, scaling and internal keys
459 vnfd_RO.pop("_id", None)
460 vnfd_RO.pop("_admin", None)
461 vnfd_RO.pop("monitoring-param", None)
462 vnfd_RO.pop("scaling-group-descriptor", None)
463 vnfd_RO.pop("kdu", None)
464 vnfd_RO.pop("k8s-cluster", None)
465 if new_id:
466 vnfd_RO["id"] = new_id
467
468 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
469 for vdu in get_iterable(vnfd_RO, "vdu"):
470 vdu.pop("cloud-init-file", None)
471 vdu.pop("cloud-init", None)
472 return vnfd_RO
473
474 @staticmethod
475 def ip_profile_2_RO(ip_profile):
476 RO_ip_profile = deepcopy(ip_profile)
477 if "dns-server" in RO_ip_profile:
478 if isinstance(RO_ip_profile["dns-server"], list):
479 RO_ip_profile["dns-address"] = []
480 for ds in RO_ip_profile.pop("dns-server"):
481 RO_ip_profile["dns-address"].append(ds["address"])
482 else:
483 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
484 if RO_ip_profile.get("ip-version") == "ipv4":
485 RO_ip_profile["ip-version"] = "IPv4"
486 if RO_ip_profile.get("ip-version") == "ipv6":
487 RO_ip_profile["ip-version"] = "IPv6"
488 if "dhcp-params" in RO_ip_profile:
489 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
490 return RO_ip_profile
491
492 def _get_ro_vim_id_for_vim_account(self, vim_account):
493 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
494 if db_vim["_admin"]["operationalState"] != "ENABLED":
495 raise LcmException(
496 "VIM={} is not available. operationalState={}".format(
497 vim_account, db_vim["_admin"]["operationalState"]
498 )
499 )
500 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
501 return RO_vim_id
502
503 def get_ro_wim_id_for_wim_account(self, wim_account):
504 if isinstance(wim_account, str):
505 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
506 if db_wim["_admin"]["operationalState"] != "ENABLED":
507 raise LcmException(
508 "WIM={} is not available. operationalState={}".format(
509 wim_account, db_wim["_admin"]["operationalState"]
510 )
511 )
512 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
513 return RO_wim_id
514 else:
515 return wim_account
516
517 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
518
519 db_vdu_push_list = []
520 db_update = {"_admin.modified": time()}
521 if vdu_create:
522 for vdu_id, vdu_count in vdu_create.items():
523 vdur = next(
524 (
525 vdur
526 for vdur in reversed(db_vnfr["vdur"])
527 if vdur["vdu-id-ref"] == vdu_id
528 ),
529 None,
530 )
531 if not vdur:
532 raise LcmException(
533 "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
534 vdu_id
535 )
536 )
537
538 for count in range(vdu_count):
539 vdur_copy = deepcopy(vdur)
540 vdur_copy["status"] = "BUILD"
541 vdur_copy["status-detailed"] = None
542 vdur_copy["ip-address"]: None
543 vdur_copy["_id"] = str(uuid4())
544 vdur_copy["count-index"] += count + 1
545 vdur_copy["id"] = "{}-{}".format(
546 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
547 )
548 vdur_copy.pop("vim_info", None)
549 for iface in vdur_copy["interfaces"]:
550 if iface.get("fixed-ip"):
551 iface["ip-address"] = self.increment_ip_mac(
552 iface["ip-address"], count + 1
553 )
554 else:
555 iface.pop("ip-address", None)
556 if iface.get("fixed-mac"):
557 iface["mac-address"] = self.increment_ip_mac(
558 iface["mac-address"], count + 1
559 )
560 else:
561 iface.pop("mac-address", None)
562 iface.pop(
563 "mgmt_vnf", None
564 ) # only first vdu can be managment of vnf
565 db_vdu_push_list.append(vdur_copy)
566 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
567 if vdu_delete:
568 for vdu_id, vdu_count in vdu_delete.items():
569 if mark_delete:
570 indexes_to_delete = [
571 iv[0]
572 for iv in enumerate(db_vnfr["vdur"])
573 if iv[1]["vdu-id-ref"] == vdu_id
574 ]
575 db_update.update(
576 {
577 "vdur.{}.status".format(i): "DELETING"
578 for i in indexes_to_delete[-vdu_count:]
579 }
580 )
581 else:
582 # it must be deleted one by one because common.db does not allow otherwise
583 vdus_to_delete = [
584 v
585 for v in reversed(db_vnfr["vdur"])
586 if v["vdu-id-ref"] == vdu_id
587 ]
588 for vdu in vdus_to_delete[:vdu_count]:
589 self.db.set_one(
590 "vnfrs",
591 {"_id": db_vnfr["_id"]},
592 None,
593 pull={"vdur": {"_id": vdu["_id"]}},
594 )
595 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
596 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
597 # modify passed dictionary db_vnfr
598 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
599 db_vnfr["vdur"] = db_vnfr_["vdur"]
600
601 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
602 """
603 Updates database nsr with the RO info for the created vld
604 :param ns_update_nsr: dictionary to be filled with the updated info
605 :param db_nsr: content of db_nsr. This is also modified
606 :param nsr_desc_RO: nsr descriptor from RO
607 :return: Nothing, LcmException is raised on errors
608 """
609
610 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
611 for net_RO in get_iterable(nsr_desc_RO, "nets"):
612 if vld["id"] != net_RO.get("ns_net_osm_id"):
613 continue
614 vld["vim-id"] = net_RO.get("vim_net_id")
615 vld["name"] = net_RO.get("vim_name")
616 vld["status"] = net_RO.get("status")
617 vld["status-detailed"] = net_RO.get("error_msg")
618 ns_update_nsr["vld.{}".format(vld_index)] = vld
619 break
620 else:
621 raise LcmException(
622 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
623 )
624
625 def set_vnfr_at_error(self, db_vnfrs, error_text):
626 try:
627 for db_vnfr in db_vnfrs.values():
628 vnfr_update = {"status": "ERROR"}
629 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
630 if "status" not in vdur:
631 vdur["status"] = "ERROR"
632 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
633 if error_text:
634 vdur["status-detailed"] = str(error_text)
635 vnfr_update[
636 "vdur.{}.status-detailed".format(vdu_index)
637 ] = "ERROR"
638 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
639 except DbException as e:
640 self.logger.error("Cannot update vnf. {}".format(e))
641
642 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
643 """
644 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
645 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
646 :param nsr_desc_RO: nsr descriptor from RO
647 :return: Nothing, LcmException is raised on errors
648 """
649 for vnf_index, db_vnfr in db_vnfrs.items():
650 for vnf_RO in nsr_desc_RO["vnfs"]:
651 if vnf_RO["member_vnf_index"] != vnf_index:
652 continue
653 vnfr_update = {}
654 if vnf_RO.get("ip_address"):
655 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
656 "ip_address"
657 ].split(";")[0]
658 elif not db_vnfr.get("ip-address"):
659 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
660 raise LcmExceptionNoMgmtIP(
661 "ns member_vnf_index '{}' has no IP address".format(
662 vnf_index
663 )
664 )
665
666 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
667 vdur_RO_count_index = 0
668 if vdur.get("pdu-type"):
669 continue
670 for vdur_RO in get_iterable(vnf_RO, "vms"):
671 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
672 continue
673 if vdur["count-index"] != vdur_RO_count_index:
674 vdur_RO_count_index += 1
675 continue
676 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
677 if vdur_RO.get("ip_address"):
678 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
679 else:
680 vdur["ip-address"] = None
681 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
682 vdur["name"] = vdur_RO.get("vim_name")
683 vdur["status"] = vdur_RO.get("status")
684 vdur["status-detailed"] = vdur_RO.get("error_msg")
685 for ifacer in get_iterable(vdur, "interfaces"):
686 for interface_RO in get_iterable(vdur_RO, "interfaces"):
687 if ifacer["name"] == interface_RO.get("internal_name"):
688 ifacer["ip-address"] = interface_RO.get(
689 "ip_address"
690 )
691 ifacer["mac-address"] = interface_RO.get(
692 "mac_address"
693 )
694 break
695 else:
696 raise LcmException(
697 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
698 "from VIM info".format(
699 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
700 )
701 )
702 vnfr_update["vdur.{}".format(vdu_index)] = vdur
703 break
704 else:
705 raise LcmException(
706 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
707 "VIM info".format(
708 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
709 )
710 )
711
712 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
713 for net_RO in get_iterable(nsr_desc_RO, "nets"):
714 if vld["id"] != net_RO.get("vnf_net_osm_id"):
715 continue
716 vld["vim-id"] = net_RO.get("vim_net_id")
717 vld["name"] = net_RO.get("vim_name")
718 vld["status"] = net_RO.get("status")
719 vld["status-detailed"] = net_RO.get("error_msg")
720 vnfr_update["vld.{}".format(vld_index)] = vld
721 break
722 else:
723 raise LcmException(
724 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
725 vnf_index, vld["id"]
726 )
727 )
728
729 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
730 break
731
732 else:
733 raise LcmException(
734 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
735 vnf_index
736 )
737 )
738
739 def _get_ns_config_info(self, nsr_id):
740 """
741 Generates a mapping between vnf,vdu elements and the N2VC id
742 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
743 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
744 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
745 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
746 """
747 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
748 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
749 mapping = {}
750 ns_config_info = {"osm-config-mapping": mapping}
751 for vca in vca_deployed_list:
752 if not vca["member-vnf-index"]:
753 continue
754 if not vca["vdu_id"]:
755 mapping[vca["member-vnf-index"]] = vca["application"]
756 else:
757 mapping[
758 "{}.{}.{}".format(
759 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
760 )
761 ] = vca["application"]
762 return ns_config_info
763
764 async def _instantiate_ng_ro(
765 self,
766 logging_text,
767 nsr_id,
768 nsd,
769 db_nsr,
770 db_nslcmop,
771 db_vnfrs,
772 db_vnfds,
773 n2vc_key_list,
774 stage,
775 start_deploy,
776 timeout_ns_deploy,
777 ):
778
779 db_vims = {}
780
781 def get_vim_account(vim_account_id):
782 nonlocal db_vims
783 if vim_account_id in db_vims:
784 return db_vims[vim_account_id]
785 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
786 db_vims[vim_account_id] = db_vim
787 return db_vim
788
789 # modify target_vld info with instantiation parameters
790 def parse_vld_instantiation_params(
791 target_vim, target_vld, vld_params, target_sdn
792 ):
793 if vld_params.get("ip-profile"):
794 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
795 "ip-profile"
796 ]
797 if vld_params.get("provider-network"):
798 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
799 "provider-network"
800 ]
801 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
802 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
803 "provider-network"
804 ]["sdn-ports"]
805 if vld_params.get("wimAccountId"):
806 target_wim = "wim:{}".format(vld_params["wimAccountId"])
807 target_vld["vim_info"][target_wim] = {}
808 for param in ("vim-network-name", "vim-network-id"):
809 if vld_params.get(param):
810 if isinstance(vld_params[param], dict):
811 for vim, vim_net in vld_params[param].items():
812 other_target_vim = "vim:" + vim
813 populate_dict(
814 target_vld["vim_info"],
815 (other_target_vim, param.replace("-", "_")),
816 vim_net,
817 )
818 else: # isinstance str
819 target_vld["vim_info"][target_vim][
820 param.replace("-", "_")
821 ] = vld_params[param]
822 if vld_params.get("common_id"):
823 target_vld["common_id"] = vld_params.get("common_id")
824
825 nslcmop_id = db_nslcmop["_id"]
826 target = {
827 "name": db_nsr["name"],
828 "ns": {"vld": []},
829 "vnf": [],
830 "image": deepcopy(db_nsr["image"]),
831 "flavor": deepcopy(db_nsr["flavor"]),
832 "action_id": nslcmop_id,
833 "cloud_init_content": {},
834 }
835 for image in target["image"]:
836 image["vim_info"] = {}
837 for flavor in target["flavor"]:
838 flavor["vim_info"] = {}
839
840 if db_nslcmop.get("lcmOperationType") != "instantiate":
841 # get parameters of instantiation:
842 db_nslcmop_instantiate = self.db.get_list(
843 "nslcmops",
844 {
845 "nsInstanceId": db_nslcmop["nsInstanceId"],
846 "lcmOperationType": "instantiate",
847 },
848 )[-1]
849 ns_params = db_nslcmop_instantiate.get("operationParams")
850 else:
851 ns_params = db_nslcmop.get("operationParams")
852 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
853 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
854
855 cp2target = {}
856 for vld_index, vld in enumerate(db_nsr.get("vld")):
857 target_vim = "vim:{}".format(ns_params["vimAccountId"])
858 target_vld = {
859 "id": vld["id"],
860 "name": vld["name"],
861 "mgmt-network": vld.get("mgmt-network", False),
862 "type": vld.get("type"),
863 "vim_info": {
864 target_vim: {
865 "vim_network_name": vld.get("vim-network-name"),
866 "vim_account_id": ns_params["vimAccountId"],
867 }
868 },
869 }
870 # check if this network needs SDN assist
871 if vld.get("pci-interfaces"):
872 db_vim = get_vim_account(ns_params["vimAccountId"])
873 sdnc_id = db_vim["config"].get("sdn-controller")
874 if sdnc_id:
875 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
876 target_sdn = "sdn:{}".format(sdnc_id)
877 target_vld["vim_info"][target_sdn] = {
878 "sdn": True,
879 "target_vim": target_vim,
880 "vlds": [sdn_vld],
881 "type": vld.get("type"),
882 }
883
884 nsd_vnf_profiles = get_vnf_profiles(nsd)
885 for nsd_vnf_profile in nsd_vnf_profiles:
886 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
887 if cp["virtual-link-profile-id"] == vld["id"]:
888 cp2target[
889 "member_vnf:{}.{}".format(
890 cp["constituent-cpd-id"][0][
891 "constituent-base-element-id"
892 ],
893 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
894 )
895 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
896
897 # check at nsd descriptor, if there is an ip-profile
898 vld_params = {}
899 nsd_vlp = find_in_list(
900 get_virtual_link_profiles(nsd),
901 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
902 == vld["id"],
903 )
904 if (
905 nsd_vlp
906 and nsd_vlp.get("virtual-link-protocol-data")
907 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
908 ):
909 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
910 "l3-protocol-data"
911 ]
912 ip_profile_dest_data = {}
913 if "ip-version" in ip_profile_source_data:
914 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
915 "ip-version"
916 ]
917 if "cidr" in ip_profile_source_data:
918 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
919 "cidr"
920 ]
921 if "gateway-ip" in ip_profile_source_data:
922 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
923 "gateway-ip"
924 ]
925 if "dhcp-enabled" in ip_profile_source_data:
926 ip_profile_dest_data["dhcp-params"] = {
927 "enabled": ip_profile_source_data["dhcp-enabled"]
928 }
929 vld_params["ip-profile"] = ip_profile_dest_data
930
931 # update vld_params with instantiation params
932 vld_instantiation_params = find_in_list(
933 get_iterable(ns_params, "vld"),
934 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
935 )
936 if vld_instantiation_params:
937 vld_params.update(vld_instantiation_params)
938 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
939 target["ns"]["vld"].append(target_vld)
940
941 for vnfr in db_vnfrs.values():
942 vnfd = find_in_list(
943 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
944 )
945 vnf_params = find_in_list(
946 get_iterable(ns_params, "vnf"),
947 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
948 )
949 target_vnf = deepcopy(vnfr)
950 target_vim = "vim:{}".format(vnfr["vim-account-id"])
951 for vld in target_vnf.get("vld", ()):
952 # check if connected to a ns.vld, to fill target'
953 vnf_cp = find_in_list(
954 vnfd.get("int-virtual-link-desc", ()),
955 lambda cpd: cpd.get("id") == vld["id"],
956 )
957 if vnf_cp:
958 ns_cp = "member_vnf:{}.{}".format(
959 vnfr["member-vnf-index-ref"], vnf_cp["id"]
960 )
961 if cp2target.get(ns_cp):
962 vld["target"] = cp2target[ns_cp]
963
964 vld["vim_info"] = {
965 target_vim: {"vim_network_name": vld.get("vim-network-name")}
966 }
967 # check if this network needs SDN assist
968 target_sdn = None
969 if vld.get("pci-interfaces"):
970 db_vim = get_vim_account(vnfr["vim-account-id"])
971 sdnc_id = db_vim["config"].get("sdn-controller")
972 if sdnc_id:
973 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
974 target_sdn = "sdn:{}".format(sdnc_id)
975 vld["vim_info"][target_sdn] = {
976 "sdn": True,
977 "target_vim": target_vim,
978 "vlds": [sdn_vld],
979 "type": vld.get("type"),
980 }
981
982 # check at vnfd descriptor, if there is an ip-profile
983 vld_params = {}
984 vnfd_vlp = find_in_list(
985 get_virtual_link_profiles(vnfd),
986 lambda a_link_profile: a_link_profile["id"] == vld["id"],
987 )
988 if (
989 vnfd_vlp
990 and vnfd_vlp.get("virtual-link-protocol-data")
991 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
992 ):
993 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
994 "l3-protocol-data"
995 ]
996 ip_profile_dest_data = {}
997 if "ip-version" in ip_profile_source_data:
998 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
999 "ip-version"
1000 ]
1001 if "cidr" in ip_profile_source_data:
1002 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1003 "cidr"
1004 ]
1005 if "gateway-ip" in ip_profile_source_data:
1006 ip_profile_dest_data[
1007 "gateway-address"
1008 ] = ip_profile_source_data["gateway-ip"]
1009 if "dhcp-enabled" in ip_profile_source_data:
1010 ip_profile_dest_data["dhcp-params"] = {
1011 "enabled": ip_profile_source_data["dhcp-enabled"]
1012 }
1013
1014 vld_params["ip-profile"] = ip_profile_dest_data
1015 # update vld_params with instantiation params
1016 if vnf_params:
1017 vld_instantiation_params = find_in_list(
1018 get_iterable(vnf_params, "internal-vld"),
1019 lambda i_vld: i_vld["name"] == vld["id"],
1020 )
1021 if vld_instantiation_params:
1022 vld_params.update(vld_instantiation_params)
1023 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1024
1025 vdur_list = []
1026 for vdur in target_vnf.get("vdur", ()):
1027 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1028 continue # This vdu must not be created
1029 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1030
1031 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1032
1033 if ssh_keys_all:
1034 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1035 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1036 if (
1037 vdu_configuration
1038 and vdu_configuration.get("config-access")
1039 and vdu_configuration.get("config-access").get("ssh-access")
1040 ):
1041 vdur["ssh-keys"] = ssh_keys_all
1042 vdur["ssh-access-required"] = vdu_configuration[
1043 "config-access"
1044 ]["ssh-access"]["required"]
1045 elif (
1046 vnf_configuration
1047 and vnf_configuration.get("config-access")
1048 and vnf_configuration.get("config-access").get("ssh-access")
1049 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1050 ):
1051 vdur["ssh-keys"] = ssh_keys_all
1052 vdur["ssh-access-required"] = vnf_configuration[
1053 "config-access"
1054 ]["ssh-access"]["required"]
1055 elif ssh_keys_instantiation and find_in_list(
1056 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1057 ):
1058 vdur["ssh-keys"] = ssh_keys_instantiation
1059
1060 self.logger.debug("NS > vdur > {}".format(vdur))
1061
1062 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1063 # cloud-init
1064 if vdud.get("cloud-init-file"):
1065 vdur["cloud-init"] = "{}:file:{}".format(
1066 vnfd["_id"], vdud.get("cloud-init-file")
1067 )
1068 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1069 if vdur["cloud-init"] not in target["cloud_init_content"]:
1070 base_folder = vnfd["_admin"]["storage"]
1071 if base_folder["pkg-dir"]:
1072 cloud_init_file = "{}/{}/cloud_init/{}".format(
1073 base_folder["folder"],
1074 base_folder["pkg-dir"],
1075 vdud.get("cloud-init-file"),
1076 )
1077 else:
1078 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1079 base_folder["folder"],
1080 vdud.get("cloud-init-file"),
1081 )
1082 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1083 target["cloud_init_content"][
1084 vdur["cloud-init"]
1085 ] = ci_file.read()
1086 elif vdud.get("cloud-init"):
1087 vdur["cloud-init"] = "{}:vdu:{}".format(
1088 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1089 )
1090 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1091 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1092 "cloud-init"
1093 ]
1094 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1095 deploy_params_vdu = self._format_additional_params(
1096 vdur.get("additionalParams") or {}
1097 )
1098 deploy_params_vdu["OSM"] = get_osm_params(
1099 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1100 )
1101 vdur["additionalParams"] = deploy_params_vdu
1102
1103 # flavor
1104 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1105 if target_vim not in ns_flavor["vim_info"]:
1106 ns_flavor["vim_info"][target_vim] = {}
1107
1108 # deal with images
1109 # in case alternative images are provided we must check if they should be applied
1110 # for the vim_type, modify the vim_type taking into account
1111 ns_image_id = int(vdur["ns-image-id"])
1112 if vdur.get("alt-image-ids"):
1113 db_vim = get_vim_account(vnfr["vim-account-id"])
1114 vim_type = db_vim["vim_type"]
1115 for alt_image_id in vdur.get("alt-image-ids"):
1116 ns_alt_image = target["image"][int(alt_image_id)]
1117 if vim_type == ns_alt_image.get("vim-type"):
1118 # must use alternative image
1119 self.logger.debug(
1120 "use alternative image id: {}".format(alt_image_id)
1121 )
1122 ns_image_id = alt_image_id
1123 vdur["ns-image-id"] = ns_image_id
1124 break
1125 ns_image = target["image"][int(ns_image_id)]
1126 if target_vim not in ns_image["vim_info"]:
1127 ns_image["vim_info"][target_vim] = {}
1128
1129 vdur["vim_info"] = {target_vim: {}}
1130 # instantiation parameters
1131 # if vnf_params:
1132 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1133 # vdud["id"]), None)
1134 vdur_list.append(vdur)
1135 target_vnf["vdur"] = vdur_list
1136 target["vnf"].append(target_vnf)
1137
1138 desc = await self.RO.deploy(nsr_id, target)
1139 self.logger.debug("RO return > {}".format(desc))
1140 action_id = desc["action_id"]
1141 await self._wait_ng_ro(
1142 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1143 )
1144
1145 # Updating NSR
1146 db_nsr_update = {
1147 "_admin.deployed.RO.operational-status": "running",
1148 "detailed-status": " ".join(stage),
1149 }
1150 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1151 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1152 self._write_op_status(nslcmop_id, stage)
1153 self.logger.debug(
1154 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1155 )
1156 return
1157
1158 async def _wait_ng_ro(
1159 self,
1160 nsr_id,
1161 action_id,
1162 nslcmop_id=None,
1163 start_time=None,
1164 timeout=600,
1165 stage=None,
1166 ):
1167 detailed_status_old = None
1168 db_nsr_update = {}
1169 start_time = start_time or time()
1170 while time() <= start_time + timeout:
1171 desc_status = await self.RO.status(nsr_id, action_id)
1172 self.logger.debug("Wait NG RO > {}".format(desc_status))
1173 if desc_status["status"] == "FAILED":
1174 raise NgRoException(desc_status["details"])
1175 elif desc_status["status"] == "BUILD":
1176 if stage:
1177 stage[2] = "VIM: ({})".format(desc_status["details"])
1178 elif desc_status["status"] == "DONE":
1179 if stage:
1180 stage[2] = "Deployed at VIM"
1181 break
1182 else:
1183 assert False, "ROclient.check_ns_status returns unknown {}".format(
1184 desc_status["status"]
1185 )
1186 if stage and nslcmop_id and stage[2] != detailed_status_old:
1187 detailed_status_old = stage[2]
1188 db_nsr_update["detailed-status"] = " ".join(stage)
1189 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1190 self._write_op_status(nslcmop_id, stage)
1191 await asyncio.sleep(15, loop=self.loop)
1192 else: # timeout_ns_deploy
1193 raise NgRoException("Timeout waiting ns to deploy")
1194
1195 async def _terminate_ng_ro(
1196 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1197 ):
1198 db_nsr_update = {}
1199 failed_detail = []
1200 action_id = None
1201 start_deploy = time()
1202 try:
1203 target = {
1204 "ns": {"vld": []},
1205 "vnf": [],
1206 "image": [],
1207 "flavor": [],
1208 "action_id": nslcmop_id,
1209 }
1210 desc = await self.RO.deploy(nsr_id, target)
1211 action_id = desc["action_id"]
1212 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1213 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1214 self.logger.debug(
1215 logging_text
1216 + "ns terminate action at RO. action_id={}".format(action_id)
1217 )
1218
1219 # wait until done
1220 delete_timeout = 20 * 60 # 20 minutes
1221 await self._wait_ng_ro(
1222 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1223 )
1224
1225 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1226 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1227 # delete all nsr
1228 await self.RO.delete(nsr_id)
1229 except Exception as e:
1230 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1231 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1232 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1233 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1234 self.logger.debug(
1235 logging_text + "RO_action_id={} already deleted".format(action_id)
1236 )
1237 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1238 failed_detail.append("delete conflict: {}".format(e))
1239 self.logger.debug(
1240 logging_text
1241 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1242 )
1243 else:
1244 failed_detail.append("delete error: {}".format(e))
1245 self.logger.error(
1246 logging_text
1247 + "RO_action_id={} delete error: {}".format(action_id, e)
1248 )
1249
1250 if failed_detail:
1251 stage[2] = "Error deleting from VIM"
1252 else:
1253 stage[2] = "Deleted from VIM"
1254 db_nsr_update["detailed-status"] = " ".join(stage)
1255 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1256 self._write_op_status(nslcmop_id, stage)
1257
1258 if failed_detail:
1259 raise LcmException("; ".join(failed_detail))
1260 return
1261
1262 async def instantiate_RO(
1263 self,
1264 logging_text,
1265 nsr_id,
1266 nsd,
1267 db_nsr,
1268 db_nslcmop,
1269 db_vnfrs,
1270 db_vnfds,
1271 n2vc_key_list,
1272 stage,
1273 ):
1274 """
1275 Instantiate at RO
1276 :param logging_text: preffix text to use at logging
1277 :param nsr_id: nsr identity
1278 :param nsd: database content of ns descriptor
1279 :param db_nsr: database content of ns record
1280 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1281 :param db_vnfrs:
1282 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1283 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1284 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1285 :return: None or exception
1286 """
1287 try:
1288 start_deploy = time()
1289 ns_params = db_nslcmop.get("operationParams")
1290 if ns_params and ns_params.get("timeout_ns_deploy"):
1291 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1292 else:
1293 timeout_ns_deploy = self.timeout.get(
1294 "ns_deploy", self.timeout_ns_deploy
1295 )
1296
1297 # Check for and optionally request placement optimization. Database will be updated if placement activated
1298 stage[2] = "Waiting for Placement."
1299 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1300 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1301 for vnfr in db_vnfrs.values():
1302 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1303 break
1304 else:
1305 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1306
1307 return await self._instantiate_ng_ro(
1308 logging_text,
1309 nsr_id,
1310 nsd,
1311 db_nsr,
1312 db_nslcmop,
1313 db_vnfrs,
1314 db_vnfds,
1315 n2vc_key_list,
1316 stage,
1317 start_deploy,
1318 timeout_ns_deploy,
1319 )
1320 except Exception as e:
1321 stage[2] = "ERROR deploying at VIM"
1322 self.set_vnfr_at_error(db_vnfrs, str(e))
1323 self.logger.error(
1324 "Error deploying at VIM {}".format(e),
1325 exc_info=not isinstance(
1326 e,
1327 (
1328 ROclient.ROClientException,
1329 LcmException,
1330 DbException,
1331 NgRoException,
1332 ),
1333 ),
1334 )
1335 raise
1336
1337 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1338 """
1339 Wait for kdu to be up, get ip address
1340 :param logging_text: prefix use for logging
1341 :param nsr_id:
1342 :param vnfr_id:
1343 :param kdu_name:
1344 :return: IP address
1345 """
1346
1347 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1348 nb_tries = 0
1349
1350 while nb_tries < 360:
1351 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1352 kdur = next(
1353 (
1354 x
1355 for x in get_iterable(db_vnfr, "kdur")
1356 if x.get("kdu-name") == kdu_name
1357 ),
1358 None,
1359 )
1360 if not kdur:
1361 raise LcmException(
1362 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1363 )
1364 if kdur.get("status"):
1365 if kdur["status"] in ("READY", "ENABLED"):
1366 return kdur.get("ip-address")
1367 else:
1368 raise LcmException(
1369 "target KDU={} is in error state".format(kdu_name)
1370 )
1371
1372 await asyncio.sleep(10, loop=self.loop)
1373 nb_tries += 1
1374 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1375
1376 async def wait_vm_up_insert_key_ro(
1377 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1378 ):
1379 """
1380 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1381 :param logging_text: prefix use for logging
1382 :param nsr_id:
1383 :param vnfr_id:
1384 :param vdu_id:
1385 :param vdu_index:
1386 :param pub_key: public ssh key to inject, None to skip
1387 :param user: user to apply the public ssh key
1388 :return: IP address
1389 """
1390
1391 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1392 ro_nsr_id = None
1393 ip_address = None
1394 nb_tries = 0
1395 target_vdu_id = None
1396 ro_retries = 0
1397
1398 while True:
1399
1400 ro_retries += 1
1401 if ro_retries >= 360: # 1 hour
1402 raise LcmException(
1403 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1404 )
1405
1406 await asyncio.sleep(10, loop=self.loop)
1407
1408 # get ip address
1409 if not target_vdu_id:
1410 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1411
1412 if not vdu_id: # for the VNF case
1413 if db_vnfr.get("status") == "ERROR":
1414 raise LcmException(
1415 "Cannot inject ssh-key because target VNF is in error state"
1416 )
1417 ip_address = db_vnfr.get("ip-address")
1418 if not ip_address:
1419 continue
1420 vdur = next(
1421 (
1422 x
1423 for x in get_iterable(db_vnfr, "vdur")
1424 if x.get("ip-address") == ip_address
1425 ),
1426 None,
1427 )
1428 else: # VDU case
1429 vdur = next(
1430 (
1431 x
1432 for x in get_iterable(db_vnfr, "vdur")
1433 if x.get("vdu-id-ref") == vdu_id
1434 and x.get("count-index") == vdu_index
1435 ),
1436 None,
1437 )
1438
1439 if (
1440 not vdur and len(db_vnfr.get("vdur", ())) == 1
1441 ): # If only one, this should be the target vdu
1442 vdur = db_vnfr["vdur"][0]
1443 if not vdur:
1444 raise LcmException(
1445 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1446 vnfr_id, vdu_id, vdu_index
1447 )
1448 )
1449 # New generation RO stores information at "vim_info"
1450 ng_ro_status = None
1451 target_vim = None
1452 if vdur.get("vim_info"):
1453 target_vim = next(
1454 t for t in vdur["vim_info"]
1455 ) # there should be only one key
1456 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1457 if (
1458 vdur.get("pdu-type")
1459 or vdur.get("status") == "ACTIVE"
1460 or ng_ro_status == "ACTIVE"
1461 ):
1462 ip_address = vdur.get("ip-address")
1463 if not ip_address:
1464 continue
1465 target_vdu_id = vdur["vdu-id-ref"]
1466 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1467 raise LcmException(
1468 "Cannot inject ssh-key because target VM is in error state"
1469 )
1470
1471 if not target_vdu_id:
1472 continue
1473
1474 # inject public key into machine
1475 if pub_key and user:
1476 self.logger.debug(logging_text + "Inserting RO key")
1477 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1478 if vdur.get("pdu-type"):
1479 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1480 return ip_address
1481 try:
1482 ro_vm_id = "{}-{}".format(
1483 db_vnfr["member-vnf-index-ref"], target_vdu_id
1484 ) # TODO add vdu_index
1485 if self.ng_ro:
1486 target = {
1487 "action": {
1488 "action": "inject_ssh_key",
1489 "key": pub_key,
1490 "user": user,
1491 },
1492 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1493 }
1494 desc = await self.RO.deploy(nsr_id, target)
1495 action_id = desc["action_id"]
1496 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1497 break
1498 else:
1499 # wait until NS is deployed at RO
1500 if not ro_nsr_id:
1501 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1502 ro_nsr_id = deep_get(
1503 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1504 )
1505 if not ro_nsr_id:
1506 continue
1507 result_dict = await self.RO.create_action(
1508 item="ns",
1509 item_id_name=ro_nsr_id,
1510 descriptor={
1511 "add_public_key": pub_key,
1512 "vms": [ro_vm_id],
1513 "user": user,
1514 },
1515 )
1516 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1517 if not result_dict or not isinstance(result_dict, dict):
1518 raise LcmException(
1519 "Unknown response from RO when injecting key"
1520 )
1521 for result in result_dict.values():
1522 if result.get("vim_result") == 200:
1523 break
1524 else:
1525 raise ROclient.ROClientException(
1526 "error injecting key: {}".format(
1527 result.get("description")
1528 )
1529 )
1530 break
1531 except NgRoException as e:
1532 raise LcmException(
1533 "Reaching max tries injecting key. Error: {}".format(e)
1534 )
1535 except ROclient.ROClientException as e:
1536 if not nb_tries:
1537 self.logger.debug(
1538 logging_text
1539 + "error injecting key: {}. Retrying until {} seconds".format(
1540 e, 20 * 10
1541 )
1542 )
1543 nb_tries += 1
1544 if nb_tries >= 20:
1545 raise LcmException(
1546 "Reaching max tries injecting key. Error: {}".format(e)
1547 )
1548 else:
1549 break
1550
1551 return ip_address
1552
1553 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1554 """
1555 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1556 """
1557 my_vca = vca_deployed_list[vca_index]
1558 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1559 # vdu or kdu: no dependencies
1560 return
1561 timeout = 300
1562 while timeout >= 0:
1563 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1564 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1565 configuration_status_list = db_nsr["configurationStatus"]
1566 for index, vca_deployed in enumerate(configuration_status_list):
1567 if index == vca_index:
1568 # myself
1569 continue
1570 if not my_vca.get("member-vnf-index") or (
1571 vca_deployed.get("member-vnf-index")
1572 == my_vca.get("member-vnf-index")
1573 ):
1574 internal_status = configuration_status_list[index].get("status")
1575 if internal_status == "READY":
1576 continue
1577 elif internal_status == "BROKEN":
1578 raise LcmException(
1579 "Configuration aborted because dependent charm/s has failed"
1580 )
1581 else:
1582 break
1583 else:
1584 # no dependencies, return
1585 return
1586 await asyncio.sleep(10)
1587 timeout -= 1
1588
1589 raise LcmException("Configuration aborted because dependent charm/s timeout")
1590
1591 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1592 vca_id = None
1593 if db_vnfr:
1594 vca_id = deep_get(db_vnfr, ("vca-id",))
1595 elif db_nsr:
1596 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1597 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1598 return vca_id
1599
1600 async def instantiate_N2VC(
1601 self,
1602 logging_text,
1603 vca_index,
1604 nsi_id,
1605 db_nsr,
1606 db_vnfr,
1607 vdu_id,
1608 kdu_name,
1609 vdu_index,
1610 config_descriptor,
1611 deploy_params,
1612 base_folder,
1613 nslcmop_id,
1614 stage,
1615 vca_type,
1616 vca_name,
1617 ee_config_descriptor,
1618 ):
1619 nsr_id = db_nsr["_id"]
1620 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1621 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1622 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1623 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1624 db_dict = {
1625 "collection": "nsrs",
1626 "filter": {"_id": nsr_id},
1627 "path": db_update_entry,
1628 }
1629 step = ""
1630 try:
1631
1632 element_type = "NS"
1633 element_under_configuration = nsr_id
1634
1635 vnfr_id = None
1636 if db_vnfr:
1637 vnfr_id = db_vnfr["_id"]
1638 osm_config["osm"]["vnf_id"] = vnfr_id
1639
1640 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1641
1642 if vca_type == "native_charm":
1643 index_number = 0
1644 else:
1645 index_number = vdu_index or 0
1646
1647 if vnfr_id:
1648 element_type = "VNF"
1649 element_under_configuration = vnfr_id
1650 namespace += ".{}-{}".format(vnfr_id, index_number)
1651 if vdu_id:
1652 namespace += ".{}-{}".format(vdu_id, index_number)
1653 element_type = "VDU"
1654 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1655 osm_config["osm"]["vdu_id"] = vdu_id
1656 elif kdu_name:
1657 namespace += ".{}".format(kdu_name)
1658 element_type = "KDU"
1659 element_under_configuration = kdu_name
1660 osm_config["osm"]["kdu_name"] = kdu_name
1661
1662 # Get artifact path
1663 if base_folder["pkg-dir"]:
1664 artifact_path = "{}/{}/{}/{}".format(
1665 base_folder["folder"],
1666 base_folder["pkg-dir"],
1667 "charms"
1668 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1669 else "helm-charts",
1670 vca_name,
1671 )
1672 else:
1673 artifact_path = "{}/Scripts/{}/{}/".format(
1674 base_folder["folder"],
1675 "charms"
1676 if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1677 else "helm-charts",
1678 vca_name,
1679 )
1680
1681 self.logger.debug("Artifact path > {}".format(artifact_path))
1682
1683 # get initial_config_primitive_list that applies to this element
1684 initial_config_primitive_list = config_descriptor.get(
1685 "initial-config-primitive"
1686 )
1687
1688 self.logger.debug(
1689 "Initial config primitive list > {}".format(
1690 initial_config_primitive_list
1691 )
1692 )
1693
1694 # add config if not present for NS charm
1695 ee_descriptor_id = ee_config_descriptor.get("id")
1696 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1697 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1698 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1699 )
1700
1701 self.logger.debug(
1702 "Initial config primitive list #2 > {}".format(
1703 initial_config_primitive_list
1704 )
1705 )
1706 # n2vc_redesign STEP 3.1
1707 # find old ee_id if exists
1708 ee_id = vca_deployed.get("ee_id")
1709
1710 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1711 # create or register execution environment in VCA
1712 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1713
1714 self._write_configuration_status(
1715 nsr_id=nsr_id,
1716 vca_index=vca_index,
1717 status="CREATING",
1718 element_under_configuration=element_under_configuration,
1719 element_type=element_type,
1720 )
1721
1722 step = "create execution environment"
1723 self.logger.debug(logging_text + step)
1724
1725 ee_id = None
1726 credentials = None
1727 if vca_type == "k8s_proxy_charm":
1728 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1729 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1730 namespace=namespace,
1731 artifact_path=artifact_path,
1732 db_dict=db_dict,
1733 vca_id=vca_id,
1734 )
1735 elif vca_type == "helm" or vca_type == "helm-v3":
1736 ee_id, credentials = await self.vca_map[
1737 vca_type
1738 ].create_execution_environment(
1739 namespace=namespace,
1740 reuse_ee_id=ee_id,
1741 db_dict=db_dict,
1742 config=osm_config,
1743 artifact_path=artifact_path,
1744 vca_type=vca_type,
1745 )
1746 else:
1747 ee_id, credentials = await self.vca_map[
1748 vca_type
1749 ].create_execution_environment(
1750 namespace=namespace,
1751 reuse_ee_id=ee_id,
1752 db_dict=db_dict,
1753 vca_id=vca_id,
1754 )
1755
1756 elif vca_type == "native_charm":
1757 step = "Waiting to VM being up and getting IP address"
1758 self.logger.debug(logging_text + step)
1759 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1760 logging_text,
1761 nsr_id,
1762 vnfr_id,
1763 vdu_id,
1764 vdu_index,
1765 user=None,
1766 pub_key=None,
1767 )
1768 credentials = {"hostname": rw_mgmt_ip}
1769 # get username
1770 username = deep_get(
1771 config_descriptor, ("config-access", "ssh-access", "default-user")
1772 )
1773 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1774 # merged. Meanwhile let's get username from initial-config-primitive
1775 if not username and initial_config_primitive_list:
1776 for config_primitive in initial_config_primitive_list:
1777 for param in config_primitive.get("parameter", ()):
1778 if param["name"] == "ssh-username":
1779 username = param["value"]
1780 break
1781 if not username:
1782 raise LcmException(
1783 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1784 "'config-access.ssh-access.default-user'"
1785 )
1786 credentials["username"] = username
1787 # n2vc_redesign STEP 3.2
1788
1789 self._write_configuration_status(
1790 nsr_id=nsr_id,
1791 vca_index=vca_index,
1792 status="REGISTERING",
1793 element_under_configuration=element_under_configuration,
1794 element_type=element_type,
1795 )
1796
1797 step = "register execution environment {}".format(credentials)
1798 self.logger.debug(logging_text + step)
1799 ee_id = await self.vca_map[vca_type].register_execution_environment(
1800 credentials=credentials,
1801 namespace=namespace,
1802 db_dict=db_dict,
1803 vca_id=vca_id,
1804 )
1805
1806 # for compatibility with MON/POL modules, the need model and application name at database
1807 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1808 ee_id_parts = ee_id.split(".")
1809 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1810 if len(ee_id_parts) >= 2:
1811 model_name = ee_id_parts[0]
1812 application_name = ee_id_parts[1]
1813 db_nsr_update[db_update_entry + "model"] = model_name
1814 db_nsr_update[db_update_entry + "application"] = application_name
1815
1816 # n2vc_redesign STEP 3.3
1817 step = "Install configuration Software"
1818
1819 self._write_configuration_status(
1820 nsr_id=nsr_id,
1821 vca_index=vca_index,
1822 status="INSTALLING SW",
1823 element_under_configuration=element_under_configuration,
1824 element_type=element_type,
1825 other_update=db_nsr_update,
1826 )
1827
1828 # TODO check if already done
1829 self.logger.debug(logging_text + step)
1830 config = None
1831 if vca_type == "native_charm":
1832 config_primitive = next(
1833 (p for p in initial_config_primitive_list if p["name"] == "config"),
1834 None,
1835 )
1836 if config_primitive:
1837 config = self._map_primitive_params(
1838 config_primitive, {}, deploy_params
1839 )
1840 num_units = 1
1841 if vca_type == "lxc_proxy_charm":
1842 if element_type == "NS":
1843 num_units = db_nsr.get("config-units") or 1
1844 elif element_type == "VNF":
1845 num_units = db_vnfr.get("config-units") or 1
1846 elif element_type == "VDU":
1847 for v in db_vnfr["vdur"]:
1848 if vdu_id == v["vdu-id-ref"]:
1849 num_units = v.get("config-units") or 1
1850 break
1851 if vca_type != "k8s_proxy_charm":
1852 await self.vca_map[vca_type].install_configuration_sw(
1853 ee_id=ee_id,
1854 artifact_path=artifact_path,
1855 db_dict=db_dict,
1856 config=config,
1857 num_units=num_units,
1858 vca_id=vca_id,
1859 vca_type=vca_type,
1860 )
1861
1862 # write in db flag of configuration_sw already installed
1863 self.update_db_2(
1864 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1865 )
1866
1867 # add relations for this VCA (wait for other peers related with this VCA)
1868 await self._add_vca_relations(
1869 logging_text=logging_text,
1870 nsr_id=nsr_id,
1871 vca_type=vca_type,
1872 vca_index=vca_index,
1873 )
1874
1875 # if SSH access is required, then get execution environment SSH public
1876 # if native charm we have waited already to VM be UP
1877 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1878 pub_key = None
1879 user = None
1880 # self.logger.debug("get ssh key block")
1881 if deep_get(
1882 config_descriptor, ("config-access", "ssh-access", "required")
1883 ):
1884 # self.logger.debug("ssh key needed")
1885 # Needed to inject a ssh key
1886 user = deep_get(
1887 config_descriptor,
1888 ("config-access", "ssh-access", "default-user"),
1889 )
1890 step = "Install configuration Software, getting public ssh key"
1891 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1892 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1893 )
1894
1895 step = "Insert public key into VM user={} ssh_key={}".format(
1896 user, pub_key
1897 )
1898 else:
1899 # self.logger.debug("no need to get ssh key")
1900 step = "Waiting to VM being up and getting IP address"
1901 self.logger.debug(logging_text + step)
1902
1903 # n2vc_redesign STEP 5.1
1904 # wait for RO (ip-address) Insert pub_key into VM
1905 if vnfr_id:
1906 if kdu_name:
1907 rw_mgmt_ip = await self.wait_kdu_up(
1908 logging_text, nsr_id, vnfr_id, kdu_name
1909 )
1910 else:
1911 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1912 logging_text,
1913 nsr_id,
1914 vnfr_id,
1915 vdu_id,
1916 vdu_index,
1917 user=user,
1918 pub_key=pub_key,
1919 )
1920 else:
1921 rw_mgmt_ip = None # This is for a NS configuration
1922
1923 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1924
1925 # store rw_mgmt_ip in deploy params for later replacement
1926 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1927
1928 # n2vc_redesign STEP 6 Execute initial config primitive
1929 step = "execute initial config primitive"
1930
1931 # wait for dependent primitives execution (NS -> VNF -> VDU)
1932 if initial_config_primitive_list:
1933 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1934
1935 # stage, in function of element type: vdu, kdu, vnf or ns
1936 my_vca = vca_deployed_list[vca_index]
1937 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1938 # VDU or KDU
1939 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
1940 elif my_vca.get("member-vnf-index"):
1941 # VNF
1942 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
1943 else:
1944 # NS
1945 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
1946
1947 self._write_configuration_status(
1948 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
1949 )
1950
1951 self._write_op_status(op_id=nslcmop_id, stage=stage)
1952
1953 check_if_terminated_needed = True
1954 for initial_config_primitive in initial_config_primitive_list:
1955 # adding information on the vca_deployed if it is a NS execution environment
1956 if not vca_deployed["member-vnf-index"]:
1957 deploy_params["ns_config_info"] = json.dumps(
1958 self._get_ns_config_info(nsr_id)
1959 )
1960 # TODO check if already done
1961 primitive_params_ = self._map_primitive_params(
1962 initial_config_primitive, {}, deploy_params
1963 )
1964
1965 step = "execute primitive '{}' params '{}'".format(
1966 initial_config_primitive["name"], primitive_params_
1967 )
1968 self.logger.debug(logging_text + step)
1969 await self.vca_map[vca_type].exec_primitive(
1970 ee_id=ee_id,
1971 primitive_name=initial_config_primitive["name"],
1972 params_dict=primitive_params_,
1973 db_dict=db_dict,
1974 vca_id=vca_id,
1975 vca_type=vca_type,
1976 )
1977 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1978 if check_if_terminated_needed:
1979 if config_descriptor.get("terminate-config-primitive"):
1980 self.update_db_2(
1981 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
1982 )
1983 check_if_terminated_needed = False
1984
1985 # TODO register in database that primitive is done
1986
1987 # STEP 7 Configure metrics
1988 if vca_type == "helm" or vca_type == "helm-v3":
1989 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
1990 ee_id=ee_id,
1991 artifact_path=artifact_path,
1992 ee_config_descriptor=ee_config_descriptor,
1993 vnfr_id=vnfr_id,
1994 nsr_id=nsr_id,
1995 target_ip=rw_mgmt_ip,
1996 )
1997 if prometheus_jobs:
1998 self.update_db_2(
1999 "nsrs",
2000 nsr_id,
2001 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2002 )
2003
2004 for job in prometheus_jobs:
2005 self.db.set_one(
2006 "prometheus_jobs",
2007 {
2008 "job_name": job["job_name"]
2009 },
2010 job,
2011 upsert=True,
2012 fail_on_empty=False
2013 )
2014
2015 step = "instantiated at VCA"
2016 self.logger.debug(logging_text + step)
2017
2018 self._write_configuration_status(
2019 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2020 )
2021
2022 except Exception as e: # TODO not use Exception but N2VC exception
2023 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2024 if not isinstance(
2025 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2026 ):
2027 self.logger.error(
2028 "Exception while {} : {}".format(step, e), exc_info=True
2029 )
2030 self._write_configuration_status(
2031 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2032 )
2033 raise LcmException("{} {}".format(step, e)) from e
2034
2035 def _write_ns_status(
2036 self,
2037 nsr_id: str,
2038 ns_state: str,
2039 current_operation: str,
2040 current_operation_id: str,
2041 error_description: str = None,
2042 error_detail: str = None,
2043 other_update: dict = None,
2044 ):
2045 """
2046 Update db_nsr fields.
2047 :param nsr_id:
2048 :param ns_state:
2049 :param current_operation:
2050 :param current_operation_id:
2051 :param error_description:
2052 :param error_detail:
2053 :param other_update: Other required changes at database if provided, will be cleared
2054 :return:
2055 """
2056 try:
2057 db_dict = other_update or {}
2058 db_dict[
2059 "_admin.nslcmop"
2060 ] = current_operation_id # for backward compatibility
2061 db_dict["_admin.current-operation"] = current_operation_id
2062 db_dict["_admin.operation-type"] = (
2063 current_operation if current_operation != "IDLE" else None
2064 )
2065 db_dict["currentOperation"] = current_operation
2066 db_dict["currentOperationID"] = current_operation_id
2067 db_dict["errorDescription"] = error_description
2068 db_dict["errorDetail"] = error_detail
2069
2070 if ns_state:
2071 db_dict["nsState"] = ns_state
2072 self.update_db_2("nsrs", nsr_id, db_dict)
2073 except DbException as e:
2074 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2075
2076 def _write_op_status(
2077 self,
2078 op_id: str,
2079 stage: list = None,
2080 error_message: str = None,
2081 queuePosition: int = 0,
2082 operation_state: str = None,
2083 other_update: dict = None,
2084 ):
2085 try:
2086 db_dict = other_update or {}
2087 db_dict["queuePosition"] = queuePosition
2088 if isinstance(stage, list):
2089 db_dict["stage"] = stage[0]
2090 db_dict["detailed-status"] = " ".join(stage)
2091 elif stage is not None:
2092 db_dict["stage"] = str(stage)
2093
2094 if error_message is not None:
2095 db_dict["errorMessage"] = error_message
2096 if operation_state is not None:
2097 db_dict["operationState"] = operation_state
2098 db_dict["statusEnteredTime"] = time()
2099 self.update_db_2("nslcmops", op_id, db_dict)
2100 except DbException as e:
2101 self.logger.warn(
2102 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2103 )
2104
2105 def _write_all_config_status(self, db_nsr: dict, status: str):
2106 try:
2107 nsr_id = db_nsr["_id"]
2108 # configurationStatus
2109 config_status = db_nsr.get("configurationStatus")
2110 if config_status:
2111 db_nsr_update = {
2112 "configurationStatus.{}.status".format(index): status
2113 for index, v in enumerate(config_status)
2114 if v
2115 }
2116 # update status
2117 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2118
2119 except DbException as e:
2120 self.logger.warn(
2121 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2122 )
2123
2124 def _write_configuration_status(
2125 self,
2126 nsr_id: str,
2127 vca_index: int,
2128 status: str = None,
2129 element_under_configuration: str = None,
2130 element_type: str = None,
2131 other_update: dict = None,
2132 ):
2133
2134 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2135 # .format(vca_index, status))
2136
2137 try:
2138 db_path = "configurationStatus.{}.".format(vca_index)
2139 db_dict = other_update or {}
2140 if status:
2141 db_dict[db_path + "status"] = status
2142 if element_under_configuration:
2143 db_dict[
2144 db_path + "elementUnderConfiguration"
2145 ] = element_under_configuration
2146 if element_type:
2147 db_dict[db_path + "elementType"] = element_type
2148 self.update_db_2("nsrs", nsr_id, db_dict)
2149 except DbException as e:
2150 self.logger.warn(
2151 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2152 status, nsr_id, vca_index, e
2153 )
2154 )
2155
2156 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2157 """
2158 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2159 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2160 Database is used because the result can be obtained from a different LCM worker in case of HA.
2161 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2162 :param db_nslcmop: database content of nslcmop
2163 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2164 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2165 computed 'vim-account-id'
2166 """
2167 modified = False
2168 nslcmop_id = db_nslcmop["_id"]
2169 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2170 if placement_engine == "PLA":
2171 self.logger.debug(
2172 logging_text + "Invoke and wait for placement optimization"
2173 )
2174 await self.msg.aiowrite(
2175 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2176 )
2177 db_poll_interval = 5
2178 wait = db_poll_interval * 10
2179 pla_result = None
2180 while not pla_result and wait >= 0:
2181 await asyncio.sleep(db_poll_interval)
2182 wait -= db_poll_interval
2183 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2184 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2185
2186 if not pla_result:
2187 raise LcmException(
2188 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2189 )
2190
2191 for pla_vnf in pla_result["vnf"]:
2192 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2193 if not pla_vnf.get("vimAccountId") or not vnfr:
2194 continue
2195 modified = True
2196 self.db.set_one(
2197 "vnfrs",
2198 {"_id": vnfr["_id"]},
2199 {"vim-account-id": pla_vnf["vimAccountId"]},
2200 )
2201 # Modifies db_vnfrs
2202 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2203 return modified
2204
2205 def update_nsrs_with_pla_result(self, params):
2206 try:
2207 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2208 self.update_db_2(
2209 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2210 )
2211 except Exception as e:
2212 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2213
2214 async def instantiate(self, nsr_id, nslcmop_id):
2215 """
2216
2217 :param nsr_id: ns instance to deploy
2218 :param nslcmop_id: operation to run
2219 :return:
2220 """
2221
2222 # Try to lock HA task here
2223 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2224 if not task_is_locked_by_me:
2225 self.logger.debug(
2226 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2227 )
2228 return
2229
2230 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2231 self.logger.debug(logging_text + "Enter")
2232
2233 # get all needed from database
2234
2235 # database nsrs record
2236 db_nsr = None
2237
2238 # database nslcmops record
2239 db_nslcmop = None
2240
2241 # update operation on nsrs
2242 db_nsr_update = {}
2243 # update operation on nslcmops
2244 db_nslcmop_update = {}
2245
2246 nslcmop_operation_state = None
2247 db_vnfrs = {} # vnf's info indexed by member-index
2248 # n2vc_info = {}
2249 tasks_dict_info = {} # from task to info text
2250 exc = None
2251 error_list = []
2252 stage = [
2253 "Stage 1/5: preparation of the environment.",
2254 "Waiting for previous operations to terminate.",
2255 "",
2256 ]
2257 # ^ stage, step, VIM progress
2258 try:
2259 # wait for any previous tasks in process
2260 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2261
2262 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2263 stage[1] = "Reading from database."
2264 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2265 db_nsr_update["detailed-status"] = "creating"
2266 db_nsr_update["operational-status"] = "init"
2267 self._write_ns_status(
2268 nsr_id=nsr_id,
2269 ns_state="BUILDING",
2270 current_operation="INSTANTIATING",
2271 current_operation_id=nslcmop_id,
2272 other_update=db_nsr_update,
2273 )
2274 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2275
2276 # read from db: operation
2277 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2278 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2279 ns_params = db_nslcmop.get("operationParams")
2280 if ns_params and ns_params.get("timeout_ns_deploy"):
2281 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2282 else:
2283 timeout_ns_deploy = self.timeout.get(
2284 "ns_deploy", self.timeout_ns_deploy
2285 )
2286
2287 # read from db: ns
2288 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2289 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2290 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2291 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2292 self.fs.sync(db_nsr["nsd-id"])
2293 db_nsr["nsd"] = nsd
2294 # nsr_name = db_nsr["name"] # TODO short-name??
2295
2296 # read from db: vnf's of this ns
2297 stage[1] = "Getting vnfrs from db."
2298 self.logger.debug(logging_text + stage[1])
2299 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2300
2301 # read from db: vnfd's for every vnf
2302 db_vnfds = [] # every vnfd data
2303
2304 # for each vnf in ns, read vnfd
2305 for vnfr in db_vnfrs_list:
2306 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2307 vnfd_id = vnfr["vnfd-id"]
2308 vnfd_ref = vnfr["vnfd-ref"]
2309 self.fs.sync(vnfd_id)
2310
2311 # if we haven't this vnfd, read it from db
2312 if vnfd_id not in db_vnfds:
2313 # read from db
2314 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2315 vnfd_id, vnfd_ref
2316 )
2317 self.logger.debug(logging_text + stage[1])
2318 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2319
2320 # store vnfd
2321 db_vnfds.append(vnfd)
2322
2323 # Get or generates the _admin.deployed.VCA list
2324 vca_deployed_list = None
2325 if db_nsr["_admin"].get("deployed"):
2326 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2327 if vca_deployed_list is None:
2328 vca_deployed_list = []
2329 configuration_status_list = []
2330 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2331 db_nsr_update["configurationStatus"] = configuration_status_list
2332 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2333 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2334 elif isinstance(vca_deployed_list, dict):
2335 # maintain backward compatibility. Change a dict to list at database
2336 vca_deployed_list = list(vca_deployed_list.values())
2337 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2338 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2339
2340 if not isinstance(
2341 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2342 ):
2343 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2344 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2345
2346 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2347 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2348 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2349 self.db.set_list(
2350 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2351 )
2352
2353 # n2vc_redesign STEP 2 Deploy Network Scenario
2354 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2355 self._write_op_status(op_id=nslcmop_id, stage=stage)
2356
2357 stage[1] = "Deploying KDUs."
2358 # self.logger.debug(logging_text + "Before deploy_kdus")
2359 # Call to deploy_kdus in case exists the "vdu:kdu" param
2360 await self.deploy_kdus(
2361 logging_text=logging_text,
2362 nsr_id=nsr_id,
2363 nslcmop_id=nslcmop_id,
2364 db_vnfrs=db_vnfrs,
2365 db_vnfds=db_vnfds,
2366 task_instantiation_info=tasks_dict_info,
2367 )
2368
2369 stage[1] = "Getting VCA public key."
2370 # n2vc_redesign STEP 1 Get VCA public ssh-key
2371 # feature 1429. Add n2vc public key to needed VMs
2372 n2vc_key = self.n2vc.get_public_key()
2373 n2vc_key_list = [n2vc_key]
2374 if self.vca_config.get("public_key"):
2375 n2vc_key_list.append(self.vca_config["public_key"])
2376
2377 stage[1] = "Deploying NS at VIM."
2378 task_ro = asyncio.ensure_future(
2379 self.instantiate_RO(
2380 logging_text=logging_text,
2381 nsr_id=nsr_id,
2382 nsd=nsd,
2383 db_nsr=db_nsr,
2384 db_nslcmop=db_nslcmop,
2385 db_vnfrs=db_vnfrs,
2386 db_vnfds=db_vnfds,
2387 n2vc_key_list=n2vc_key_list,
2388 stage=stage,
2389 )
2390 )
2391 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2392 tasks_dict_info[task_ro] = "Deploying at VIM"
2393
2394 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2395 stage[1] = "Deploying Execution Environments."
2396 self.logger.debug(logging_text + stage[1])
2397
2398 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2399 for vnf_profile in get_vnf_profiles(nsd):
2400 vnfd_id = vnf_profile["vnfd-id"]
2401 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2402 member_vnf_index = str(vnf_profile["id"])
2403 db_vnfr = db_vnfrs[member_vnf_index]
2404 base_folder = vnfd["_admin"]["storage"]
2405 vdu_id = None
2406 vdu_index = 0
2407 vdu_name = None
2408 kdu_name = None
2409
2410 # Get additional parameters
2411 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2412 if db_vnfr.get("additionalParamsForVnf"):
2413 deploy_params.update(
2414 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2415 )
2416
2417 descriptor_config = get_configuration(vnfd, vnfd["id"])
2418 if descriptor_config:
2419 self._deploy_n2vc(
2420 logging_text=logging_text
2421 + "member_vnf_index={} ".format(member_vnf_index),
2422 db_nsr=db_nsr,
2423 db_vnfr=db_vnfr,
2424 nslcmop_id=nslcmop_id,
2425 nsr_id=nsr_id,
2426 nsi_id=nsi_id,
2427 vnfd_id=vnfd_id,
2428 vdu_id=vdu_id,
2429 kdu_name=kdu_name,
2430 member_vnf_index=member_vnf_index,
2431 vdu_index=vdu_index,
2432 vdu_name=vdu_name,
2433 deploy_params=deploy_params,
2434 descriptor_config=descriptor_config,
2435 base_folder=base_folder,
2436 task_instantiation_info=tasks_dict_info,
2437 stage=stage,
2438 )
2439
2440 # Deploy charms for each VDU that supports one.
2441 for vdud in get_vdu_list(vnfd):
2442 vdu_id = vdud["id"]
2443 descriptor_config = get_configuration(vnfd, vdu_id)
2444 vdur = find_in_list(
2445 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2446 )
2447
2448 if vdur.get("additionalParams"):
2449 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2450 else:
2451 deploy_params_vdu = deploy_params
2452 deploy_params_vdu["OSM"] = get_osm_params(
2453 db_vnfr, vdu_id, vdu_count_index=0
2454 )
2455 vdud_count = get_number_of_instances(vnfd, vdu_id)
2456
2457 self.logger.debug("VDUD > {}".format(vdud))
2458 self.logger.debug(
2459 "Descriptor config > {}".format(descriptor_config)
2460 )
2461 if descriptor_config:
2462 vdu_name = None
2463 kdu_name = None
2464 for vdu_index in range(vdud_count):
2465 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2466 self._deploy_n2vc(
2467 logging_text=logging_text
2468 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2469 member_vnf_index, vdu_id, vdu_index
2470 ),
2471 db_nsr=db_nsr,
2472 db_vnfr=db_vnfr,
2473 nslcmop_id=nslcmop_id,
2474 nsr_id=nsr_id,
2475 nsi_id=nsi_id,
2476 vnfd_id=vnfd_id,
2477 vdu_id=vdu_id,
2478 kdu_name=kdu_name,
2479 member_vnf_index=member_vnf_index,
2480 vdu_index=vdu_index,
2481 vdu_name=vdu_name,
2482 deploy_params=deploy_params_vdu,
2483 descriptor_config=descriptor_config,
2484 base_folder=base_folder,
2485 task_instantiation_info=tasks_dict_info,
2486 stage=stage,
2487 )
2488 for kdud in get_kdu_list(vnfd):
2489 kdu_name = kdud["name"]
2490 descriptor_config = get_configuration(vnfd, kdu_name)
2491 if descriptor_config:
2492 vdu_id = None
2493 vdu_index = 0
2494 vdu_name = None
2495 kdur = next(
2496 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2497 )
2498 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2499 if kdur.get("additionalParams"):
2500 deploy_params_kdu = parse_yaml_strings(
2501 kdur["additionalParams"]
2502 )
2503
2504 self._deploy_n2vc(
2505 logging_text=logging_text,
2506 db_nsr=db_nsr,
2507 db_vnfr=db_vnfr,
2508 nslcmop_id=nslcmop_id,
2509 nsr_id=nsr_id,
2510 nsi_id=nsi_id,
2511 vnfd_id=vnfd_id,
2512 vdu_id=vdu_id,
2513 kdu_name=kdu_name,
2514 member_vnf_index=member_vnf_index,
2515 vdu_index=vdu_index,
2516 vdu_name=vdu_name,
2517 deploy_params=deploy_params_kdu,
2518 descriptor_config=descriptor_config,
2519 base_folder=base_folder,
2520 task_instantiation_info=tasks_dict_info,
2521 stage=stage,
2522 )
2523
2524 # Check if this NS has a charm configuration
2525 descriptor_config = nsd.get("ns-configuration")
2526 if descriptor_config and descriptor_config.get("juju"):
2527 vnfd_id = None
2528 db_vnfr = None
2529 member_vnf_index = None
2530 vdu_id = None
2531 kdu_name = None
2532 vdu_index = 0
2533 vdu_name = None
2534
2535 # Get additional parameters
2536 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2537 if db_nsr.get("additionalParamsForNs"):
2538 deploy_params.update(
2539 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2540 )
2541 base_folder = nsd["_admin"]["storage"]
2542 self._deploy_n2vc(
2543 logging_text=logging_text,
2544 db_nsr=db_nsr,
2545 db_vnfr=db_vnfr,
2546 nslcmop_id=nslcmop_id,
2547 nsr_id=nsr_id,
2548 nsi_id=nsi_id,
2549 vnfd_id=vnfd_id,
2550 vdu_id=vdu_id,
2551 kdu_name=kdu_name,
2552 member_vnf_index=member_vnf_index,
2553 vdu_index=vdu_index,
2554 vdu_name=vdu_name,
2555 deploy_params=deploy_params,
2556 descriptor_config=descriptor_config,
2557 base_folder=base_folder,
2558 task_instantiation_info=tasks_dict_info,
2559 stage=stage,
2560 )
2561
2562 # rest of staff will be done at finally
2563
2564 except (
2565 ROclient.ROClientException,
2566 DbException,
2567 LcmException,
2568 N2VCException,
2569 ) as e:
2570 self.logger.error(
2571 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2572 )
2573 exc = e
2574 except asyncio.CancelledError:
2575 self.logger.error(
2576 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2577 )
2578 exc = "Operation was cancelled"
2579 except Exception as e:
2580 exc = traceback.format_exc()
2581 self.logger.critical(
2582 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2583 exc_info=True,
2584 )
2585 finally:
2586 if exc:
2587 error_list.append(str(exc))
2588 try:
2589 # wait for pending tasks
2590 if tasks_dict_info:
2591 stage[1] = "Waiting for instantiate pending tasks."
2592 self.logger.debug(logging_text + stage[1])
2593 error_list += await self._wait_for_tasks(
2594 logging_text,
2595 tasks_dict_info,
2596 timeout_ns_deploy,
2597 stage,
2598 nslcmop_id,
2599 nsr_id=nsr_id,
2600 )
2601 stage[1] = stage[2] = ""
2602 except asyncio.CancelledError:
2603 error_list.append("Cancelled")
2604 # TODO cancel all tasks
2605 except Exception as exc:
2606 error_list.append(str(exc))
2607
2608 # update operation-status
2609 db_nsr_update["operational-status"] = "running"
2610 # let's begin with VCA 'configured' status (later we can change it)
2611 db_nsr_update["config-status"] = "configured"
2612 for task, task_name in tasks_dict_info.items():
2613 if not task.done() or task.cancelled() or task.exception():
2614 if task_name.startswith(self.task_name_deploy_vca):
2615 # A N2VC task is pending
2616 db_nsr_update["config-status"] = "failed"
2617 else:
2618 # RO or KDU task is pending
2619 db_nsr_update["operational-status"] = "failed"
2620
2621 # update status at database
2622 if error_list:
2623 error_detail = ". ".join(error_list)
2624 self.logger.error(logging_text + error_detail)
2625 error_description_nslcmop = "{} Detail: {}".format(
2626 stage[0], error_detail
2627 )
2628 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2629 nslcmop_id, stage[0]
2630 )
2631
2632 db_nsr_update["detailed-status"] = (
2633 error_description_nsr + " Detail: " + error_detail
2634 )
2635 db_nslcmop_update["detailed-status"] = error_detail
2636 nslcmop_operation_state = "FAILED"
2637 ns_state = "BROKEN"
2638 else:
2639 error_detail = None
2640 error_description_nsr = error_description_nslcmop = None
2641 ns_state = "READY"
2642 db_nsr_update["detailed-status"] = "Done"
2643 db_nslcmop_update["detailed-status"] = "Done"
2644 nslcmop_operation_state = "COMPLETED"
2645
2646 if db_nsr:
2647 self._write_ns_status(
2648 nsr_id=nsr_id,
2649 ns_state=ns_state,
2650 current_operation="IDLE",
2651 current_operation_id=None,
2652 error_description=error_description_nsr,
2653 error_detail=error_detail,
2654 other_update=db_nsr_update,
2655 )
2656 self._write_op_status(
2657 op_id=nslcmop_id,
2658 stage="",
2659 error_message=error_description_nslcmop,
2660 operation_state=nslcmop_operation_state,
2661 other_update=db_nslcmop_update,
2662 )
2663
2664 if nslcmop_operation_state:
2665 try:
2666 await self.msg.aiowrite(
2667 "ns",
2668 "instantiated",
2669 {
2670 "nsr_id": nsr_id,
2671 "nslcmop_id": nslcmop_id,
2672 "operationState": nslcmop_operation_state,
2673 },
2674 loop=self.loop,
2675 )
2676 except Exception as e:
2677 self.logger.error(
2678 logging_text + "kafka_write notification Exception {}".format(e)
2679 )
2680
2681 self.logger.debug(logging_text + "Exit")
2682 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2683
2684 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2685 if vnfd_id not in cached_vnfds:
2686 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2687 return cached_vnfds[vnfd_id]
2688
2689 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2690 if vnf_profile_id not in cached_vnfrs:
2691 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2692 "vnfrs",
2693 {
2694 "member-vnf-index-ref": vnf_profile_id,
2695 "nsr-id-ref": nsr_id,
2696 },
2697 )
2698 return cached_vnfrs[vnf_profile_id]
2699
2700 def _is_deployed_vca_in_relation(
2701 self, vca: DeployedVCA, relation: Relation
2702 ) -> bool:
2703 found = False
2704 for endpoint in (relation.provider, relation.requirer):
2705 if endpoint["kdu-resource-profile-id"]:
2706 continue
2707 found = (
2708 vca.vnf_profile_id == endpoint.vnf_profile_id
2709 and vca.vdu_profile_id == endpoint.vdu_profile_id
2710 and vca.execution_environment_ref == endpoint.execution_environment_ref
2711 )
2712 if found:
2713 break
2714 return found
2715
2716 def _update_ee_relation_data_with_implicit_data(
2717 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2718 ):
2719 ee_relation_data = safe_get_ee_relation(
2720 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2721 )
2722 ee_relation_level = EELevel.get_level(ee_relation_data)
2723 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2724 "execution-environment-ref"
2725 ]:
2726 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2727 vnfd_id = vnf_profile["vnfd-id"]
2728 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2729 entity_id = (
2730 vnfd_id
2731 if ee_relation_level == EELevel.VNF
2732 else ee_relation_data["vdu-profile-id"]
2733 )
2734 ee = get_juju_ee_ref(db_vnfd, entity_id)
2735 if not ee:
2736 raise Exception(
2737 f"not execution environments found for ee_relation {ee_relation_data}"
2738 )
2739 ee_relation_data["execution-environment-ref"] = ee["id"]
2740 return ee_relation_data
2741
2742 def _get_ns_relations(
2743 self,
2744 nsr_id: str,
2745 nsd: Dict[str, Any],
2746 vca: DeployedVCA,
2747 cached_vnfds: Dict[str, Any],
2748 ) -> List[Relation]:
2749 relations = []
2750 db_ns_relations = get_ns_configuration_relation_list(nsd)
2751 for r in db_ns_relations:
2752 provider_dict = None
2753 requirer_dict = None
2754 if all(key in r for key in ("provider", "requirer")):
2755 provider_dict = r["provider"]
2756 requirer_dict = r["requirer"]
2757 elif "entities" in r:
2758 provider_id = r["entities"][0]["id"]
2759 provider_dict = {
2760 "nsr-id": nsr_id,
2761 "endpoint": r["entities"][0]["endpoint"],
2762 }
2763 if provider_id != nsd["id"]:
2764 provider_dict["vnf-profile-id"] = provider_id
2765 requirer_id = r["entities"][1]["id"]
2766 requirer_dict = {
2767 "nsr-id": nsr_id,
2768 "endpoint": r["entities"][1]["endpoint"],
2769 }
2770 if requirer_id != nsd["id"]:
2771 requirer_dict["vnf-profile-id"] = requirer_id
2772 else:
2773 raise Exception("provider/requirer or entities must be included in the relation.")
2774 relation_provider = self._update_ee_relation_data_with_implicit_data(
2775 nsr_id, nsd, provider_dict, cached_vnfds
2776 )
2777 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2778 nsr_id, nsd, requirer_dict, cached_vnfds
2779 )
2780 provider = EERelation(relation_provider)
2781 requirer = EERelation(relation_requirer)
2782 relation = Relation(r["name"], provider, requirer)
2783 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2784 if vca_in_relation:
2785 relations.append(relation)
2786 return relations
2787
2788 def _get_vnf_relations(
2789 self,
2790 nsr_id: str,
2791 nsd: Dict[str, Any],
2792 vca: DeployedVCA,
2793 cached_vnfds: Dict[str, Any],
2794 ) -> List[Relation]:
2795 relations = []
2796 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2797 vnf_profile_id = vnf_profile["id"]
2798 vnfd_id = vnf_profile["vnfd-id"]
2799 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2800 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2801 for r in db_vnf_relations:
2802 provider_dict = None
2803 requirer_dict = None
2804 if all(key in r for key in ("provider", "requirer")):
2805 provider_dict = r["provider"]
2806 requirer_dict = r["requirer"]
2807 elif "entities" in r:
2808 provider_id = r["entities"][0]["id"]
2809 provider_dict = {
2810 "nsr-id": nsr_id,
2811 "vnf-profile-id": vnf_profile_id,
2812 "endpoint": r["entities"][0]["endpoint"],
2813 }
2814 if provider_id != vnfd_id:
2815 provider_dict["vdu-profile-id"] = provider_id
2816 requirer_id = r["entities"][1]["id"]
2817 requirer_dict = {
2818 "nsr-id": nsr_id,
2819 "vnf-profile-id": vnf_profile_id,
2820 "endpoint": r["entities"][1]["endpoint"],
2821 }
2822 if requirer_id != vnfd_id:
2823 requirer_dict["vdu-profile-id"] = requirer_id
2824 else:
2825 raise Exception("provider/requirer or entities must be included in the relation.")
2826 relation_provider = self._update_ee_relation_data_with_implicit_data(
2827 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2828 )
2829 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2830 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2831 )
2832 provider = EERelation(relation_provider)
2833 requirer = EERelation(relation_requirer)
2834 relation = Relation(r["name"], provider, requirer)
2835 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2836 if vca_in_relation:
2837 relations.append(relation)
2838 return relations
2839
2840 def _get_kdu_resource_data(
2841 self,
2842 ee_relation: EERelation,
2843 db_nsr: Dict[str, Any],
2844 cached_vnfds: Dict[str, Any],
2845 ) -> DeployedK8sResource:
2846 nsd = get_nsd(db_nsr)
2847 vnf_profiles = get_vnf_profiles(nsd)
2848 vnfd_id = find_in_list(
2849 vnf_profiles,
2850 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
2851 )["vnfd-id"]
2852 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2853 kdu_resource_profile = get_kdu_resource_profile(
2854 db_vnfd, ee_relation.kdu_resource_profile_id
2855 )
2856 kdu_name = kdu_resource_profile["kdu-name"]
2857 deployed_kdu, _ = get_deployed_kdu(
2858 db_nsr.get("_admin", ()).get("deployed", ()),
2859 kdu_name,
2860 ee_relation.vnf_profile_id,
2861 )
2862 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
2863 return deployed_kdu
2864
2865 def _get_deployed_component(
2866 self,
2867 ee_relation: EERelation,
2868 db_nsr: Dict[str, Any],
2869 cached_vnfds: Dict[str, Any],
2870 ) -> DeployedComponent:
2871 nsr_id = db_nsr["_id"]
2872 deployed_component = None
2873 ee_level = EELevel.get_level(ee_relation)
2874 if ee_level == EELevel.NS:
2875 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
2876 if vca:
2877 deployed_component = DeployedVCA(nsr_id, vca)
2878 elif ee_level == EELevel.VNF:
2879 vca = get_deployed_vca(
2880 db_nsr,
2881 {
2882 "vdu_id": None,
2883 "member-vnf-index": ee_relation.vnf_profile_id,
2884 "ee_descriptor_id": ee_relation.execution_environment_ref,
2885 },
2886 )
2887 if vca:
2888 deployed_component = DeployedVCA(nsr_id, vca)
2889 elif ee_level == EELevel.VDU:
2890 vca = get_deployed_vca(
2891 db_nsr,
2892 {
2893 "vdu_id": ee_relation.vdu_profile_id,
2894 "member-vnf-index": ee_relation.vnf_profile_id,
2895 "ee_descriptor_id": ee_relation.execution_environment_ref,
2896 },
2897 )
2898 if vca:
2899 deployed_component = DeployedVCA(nsr_id, vca)
2900 elif ee_level == EELevel.KDU:
2901 kdu_resource_data = self._get_kdu_resource_data(
2902 ee_relation, db_nsr, cached_vnfds
2903 )
2904 if kdu_resource_data:
2905 deployed_component = DeployedK8sResource(kdu_resource_data)
2906 return deployed_component
2907
2908 async def _add_relation(
2909 self,
2910 relation: Relation,
2911 vca_type: str,
2912 db_nsr: Dict[str, Any],
2913 cached_vnfds: Dict[str, Any],
2914 cached_vnfrs: Dict[str, Any],
2915 ) -> bool:
2916 deployed_provider = self._get_deployed_component(
2917 relation.provider, db_nsr, cached_vnfds
2918 )
2919 deployed_requirer = self._get_deployed_component(
2920 relation.requirer, db_nsr, cached_vnfds
2921 )
2922 if (
2923 deployed_provider
2924 and deployed_requirer
2925 and deployed_provider.config_sw_installed
2926 and deployed_requirer.config_sw_installed
2927 ):
2928 provider_db_vnfr = (
2929 self._get_vnfr(
2930 relation.provider.nsr_id,
2931 relation.provider.vnf_profile_id,
2932 cached_vnfrs,
2933 )
2934 if relation.provider.vnf_profile_id
2935 else None
2936 )
2937 requirer_db_vnfr = (
2938 self._get_vnfr(
2939 relation.requirer.nsr_id,
2940 relation.requirer.vnf_profile_id,
2941 cached_vnfrs,
2942 )
2943 if relation.requirer.vnf_profile_id
2944 else None
2945 )
2946 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
2947 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
2948 provider_relation_endpoint = RelationEndpoint(
2949 deployed_provider.ee_id,
2950 provider_vca_id,
2951 relation.provider.endpoint,
2952 )
2953 requirer_relation_endpoint = RelationEndpoint(
2954 deployed_requirer.ee_id,
2955 requirer_vca_id,
2956 relation.requirer.endpoint,
2957 )
2958 await self.vca_map[vca_type].add_relation(
2959 provider=provider_relation_endpoint,
2960 requirer=requirer_relation_endpoint,
2961 )
2962 # remove entry from relations list
2963 return True
2964 return False
2965
2966 async def _add_vca_relations(
2967 self,
2968 logging_text,
2969 nsr_id,
2970 vca_type: str,
2971 vca_index: int,
2972 timeout: int = 3600,
2973 ) -> bool:
2974
2975 # steps:
2976 # 1. find all relations for this VCA
2977 # 2. wait for other peers related
2978 # 3. add relations
2979
2980 try:
2981 # STEP 1: find all relations for this VCA
2982
2983 # read nsr record
2984 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2985 nsd = get_nsd(db_nsr)
2986
2987 # this VCA data
2988 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
2989 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
2990
2991 cached_vnfds = {}
2992 cached_vnfrs = {}
2993 relations = []
2994 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
2995 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
2996
2997 # if no relations, terminate
2998 if not relations:
2999 self.logger.debug(logging_text + " No relations")
3000 return True
3001
3002 self.logger.debug(logging_text + " adding relations {}".format(relations))
3003
3004 # add all relations
3005 start = time()
3006 while True:
3007 # check timeout
3008 now = time()
3009 if now - start >= timeout:
3010 self.logger.error(logging_text + " : timeout adding relations")
3011 return False
3012
3013 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3014 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3015
3016 # for each relation, find the VCA's related
3017 for relation in relations.copy():
3018 added = await self._add_relation(
3019 relation,
3020 vca_type,
3021 db_nsr,
3022 cached_vnfds,
3023 cached_vnfrs,
3024 )
3025 if added:
3026 relations.remove(relation)
3027
3028 if not relations:
3029 self.logger.debug("Relations added")
3030 break
3031 await asyncio.sleep(5.0)
3032
3033 return True
3034
3035 except Exception as e:
3036 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3037 return False
3038
3039 async def _install_kdu(
3040 self,
3041 nsr_id: str,
3042 nsr_db_path: str,
3043 vnfr_data: dict,
3044 kdu_index: int,
3045 kdud: dict,
3046 vnfd: dict,
3047 k8s_instance_info: dict,
3048 k8params: dict = None,
3049 timeout: int = 600,
3050 vca_id: str = None,
3051 ):
3052
3053 try:
3054 k8sclustertype = k8s_instance_info["k8scluster-type"]
3055 # Instantiate kdu
3056 db_dict_install = {
3057 "collection": "nsrs",
3058 "filter": {"_id": nsr_id},
3059 "path": nsr_db_path,
3060 }
3061
3062 if k8s_instance_info.get("kdu-deployment-name"):
3063 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3064 else:
3065 kdu_instance = self.k8scluster_map[
3066 k8sclustertype
3067 ].generate_kdu_instance_name(
3068 db_dict=db_dict_install,
3069 kdu_model=k8s_instance_info["kdu-model"],
3070 kdu_name=k8s_instance_info["kdu-name"],
3071 )
3072 self.update_db_2(
3073 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3074 )
3075 await self.k8scluster_map[k8sclustertype].install(
3076 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3077 kdu_model=k8s_instance_info["kdu-model"],
3078 atomic=True,
3079 params=k8params,
3080 db_dict=db_dict_install,
3081 timeout=timeout,
3082 kdu_name=k8s_instance_info["kdu-name"],
3083 namespace=k8s_instance_info["namespace"],
3084 kdu_instance=kdu_instance,
3085 vca_id=vca_id,
3086 )
3087 self.update_db_2(
3088 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3089 )
3090
3091 # Obtain services to obtain management service ip
3092 services = await self.k8scluster_map[k8sclustertype].get_services(
3093 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3094 kdu_instance=kdu_instance,
3095 namespace=k8s_instance_info["namespace"],
3096 )
3097
3098 # Obtain management service info (if exists)
3099 vnfr_update_dict = {}
3100 kdu_config = get_configuration(vnfd, kdud["name"])
3101 if kdu_config:
3102 target_ee_list = kdu_config.get("execution-environment-list", [])
3103 else:
3104 target_ee_list = []
3105
3106 if services:
3107 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3108 mgmt_services = [
3109 service
3110 for service in kdud.get("service", [])
3111 if service.get("mgmt-service")
3112 ]
3113 for mgmt_service in mgmt_services:
3114 for service in services:
3115 if service["name"].startswith(mgmt_service["name"]):
3116 # Mgmt service found, Obtain service ip
3117 ip = service.get("external_ip", service.get("cluster_ip"))
3118 if isinstance(ip, list) and len(ip) == 1:
3119 ip = ip[0]
3120
3121 vnfr_update_dict[
3122 "kdur.{}.ip-address".format(kdu_index)
3123 ] = ip
3124
3125 # Check if must update also mgmt ip at the vnf
3126 service_external_cp = mgmt_service.get(
3127 "external-connection-point-ref"
3128 )
3129 if service_external_cp:
3130 if (
3131 deep_get(vnfd, ("mgmt-interface", "cp"))
3132 == service_external_cp
3133 ):
3134 vnfr_update_dict["ip-address"] = ip
3135
3136 if find_in_list(
3137 target_ee_list,
3138 lambda ee: ee.get(
3139 "external-connection-point-ref", ""
3140 )
3141 == service_external_cp,
3142 ):
3143 vnfr_update_dict[
3144 "kdur.{}.ip-address".format(kdu_index)
3145 ] = ip
3146 break
3147 else:
3148 self.logger.warn(
3149 "Mgmt service name: {} not found".format(
3150 mgmt_service["name"]
3151 )
3152 )
3153
3154 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3155 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3156
3157 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3158 if (
3159 kdu_config
3160 and kdu_config.get("initial-config-primitive")
3161 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3162 ):
3163 initial_config_primitive_list = kdu_config.get(
3164 "initial-config-primitive"
3165 )
3166 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3167
3168 for initial_config_primitive in initial_config_primitive_list:
3169 primitive_params_ = self._map_primitive_params(
3170 initial_config_primitive, {}, {}
3171 )
3172
3173 await asyncio.wait_for(
3174 self.k8scluster_map[k8sclustertype].exec_primitive(
3175 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3176 kdu_instance=kdu_instance,
3177 primitive_name=initial_config_primitive["name"],
3178 params=primitive_params_,
3179 db_dict=db_dict_install,
3180 vca_id=vca_id,
3181 ),
3182 timeout=timeout,
3183 )
3184
3185 except Exception as e:
3186 # Prepare update db with error and raise exception
3187 try:
3188 self.update_db_2(
3189 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3190 )
3191 self.update_db_2(
3192 "vnfrs",
3193 vnfr_data.get("_id"),
3194 {"kdur.{}.status".format(kdu_index): "ERROR"},
3195 )
3196 except Exception:
3197 # ignore to keep original exception
3198 pass
3199 # reraise original error
3200 raise
3201
3202 return kdu_instance
3203
3204 async def deploy_kdus(
3205 self,
3206 logging_text,
3207 nsr_id,
3208 nslcmop_id,
3209 db_vnfrs,
3210 db_vnfds,
3211 task_instantiation_info,
3212 ):
3213 # Launch kdus if present in the descriptor
3214
3215 k8scluster_id_2_uuic = {
3216 "helm-chart-v3": {},
3217 "helm-chart": {},
3218 "juju-bundle": {},
3219 }
3220
3221 async def _get_cluster_id(cluster_id, cluster_type):
3222 nonlocal k8scluster_id_2_uuic
3223 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3224 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3225
3226 # check if K8scluster is creating and wait look if previous tasks in process
3227 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3228 "k8scluster", cluster_id
3229 )
3230 if task_dependency:
3231 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3232 task_name, cluster_id
3233 )
3234 self.logger.debug(logging_text + text)
3235 await asyncio.wait(task_dependency, timeout=3600)
3236
3237 db_k8scluster = self.db.get_one(
3238 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3239 )
3240 if not db_k8scluster:
3241 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3242
3243 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3244 if not k8s_id:
3245 if cluster_type == "helm-chart-v3":
3246 try:
3247 # backward compatibility for existing clusters that have not been initialized for helm v3
3248 k8s_credentials = yaml.safe_dump(
3249 db_k8scluster.get("credentials")
3250 )
3251 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3252 k8s_credentials, reuse_cluster_uuid=cluster_id
3253 )
3254 db_k8scluster_update = {}
3255 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3256 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3257 db_k8scluster_update[
3258 "_admin.helm-chart-v3.created"
3259 ] = uninstall_sw
3260 db_k8scluster_update[
3261 "_admin.helm-chart-v3.operationalState"
3262 ] = "ENABLED"
3263 self.update_db_2(
3264 "k8sclusters", cluster_id, db_k8scluster_update
3265 )
3266 except Exception as e:
3267 self.logger.error(
3268 logging_text
3269 + "error initializing helm-v3 cluster: {}".format(str(e))
3270 )
3271 raise LcmException(
3272 "K8s cluster '{}' has not been initialized for '{}'".format(
3273 cluster_id, cluster_type
3274 )
3275 )
3276 else:
3277 raise LcmException(
3278 "K8s cluster '{}' has not been initialized for '{}'".format(
3279 cluster_id, cluster_type
3280 )
3281 )
3282 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3283 return k8s_id
3284
3285 logging_text += "Deploy kdus: "
3286 step = ""
3287 try:
3288 db_nsr_update = {"_admin.deployed.K8s": []}
3289 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3290
3291 index = 0
3292 updated_cluster_list = []
3293 updated_v3_cluster_list = []
3294
3295 for vnfr_data in db_vnfrs.values():
3296 vca_id = self.get_vca_id(vnfr_data, {})
3297 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3298 # Step 0: Prepare and set parameters
3299 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3300 vnfd_id = vnfr_data.get("vnfd-id")
3301 vnfd_with_id = find_in_list(
3302 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3303 )
3304 kdud = next(
3305 kdud
3306 for kdud in vnfd_with_id["kdu"]
3307 if kdud["name"] == kdur["kdu-name"]
3308 )
3309 namespace = kdur.get("k8s-namespace")
3310 kdu_deployment_name = kdur.get("kdu-deployment-name")
3311 if kdur.get("helm-chart"):
3312 kdumodel = kdur["helm-chart"]
3313 # Default version: helm3, if helm-version is v2 assign v2
3314 k8sclustertype = "helm-chart-v3"
3315 self.logger.debug("kdur: {}".format(kdur))
3316 if (
3317 kdur.get("helm-version")
3318 and kdur.get("helm-version") == "v2"
3319 ):
3320 k8sclustertype = "helm-chart"
3321 elif kdur.get("juju-bundle"):
3322 kdumodel = kdur["juju-bundle"]
3323 k8sclustertype = "juju-bundle"
3324 else:
3325 raise LcmException(
3326 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3327 "juju-bundle. Maybe an old NBI version is running".format(
3328 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3329 )
3330 )
3331 # check if kdumodel is a file and exists
3332 try:
3333 vnfd_with_id = find_in_list(
3334 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3335 )
3336 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3337 if storage: # may be not present if vnfd has not artifacts
3338 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3339 if storage["pkg-dir"]:
3340 filename = "{}/{}/{}s/{}".format(
3341 storage["folder"],
3342 storage["pkg-dir"],
3343 k8sclustertype,
3344 kdumodel,
3345 )
3346 else:
3347 filename = "{}/Scripts/{}s/{}".format(
3348 storage["folder"],
3349 k8sclustertype,
3350 kdumodel,
3351 )
3352 if self.fs.file_exists(
3353 filename, mode="file"
3354 ) or self.fs.file_exists(filename, mode="dir"):
3355 kdumodel = self.fs.path + filename
3356 except (asyncio.TimeoutError, asyncio.CancelledError):
3357 raise
3358 except Exception: # it is not a file
3359 pass
3360
3361 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3362 step = "Synchronize repos for k8s cluster '{}'".format(
3363 k8s_cluster_id
3364 )
3365 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3366
3367 # Synchronize repos
3368 if (
3369 k8sclustertype == "helm-chart"
3370 and cluster_uuid not in updated_cluster_list
3371 ) or (
3372 k8sclustertype == "helm-chart-v3"
3373 and cluster_uuid not in updated_v3_cluster_list
3374 ):
3375 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3376 self.k8scluster_map[k8sclustertype].synchronize_repos(
3377 cluster_uuid=cluster_uuid
3378 )
3379 )
3380 if del_repo_list or added_repo_dict:
3381 if k8sclustertype == "helm-chart":
3382 unset = {
3383 "_admin.helm_charts_added." + item: None
3384 for item in del_repo_list
3385 }
3386 updated = {
3387 "_admin.helm_charts_added." + item: name
3388 for item, name in added_repo_dict.items()
3389 }
3390 updated_cluster_list.append(cluster_uuid)
3391 elif k8sclustertype == "helm-chart-v3":
3392 unset = {
3393 "_admin.helm_charts_v3_added." + item: None
3394 for item in del_repo_list
3395 }
3396 updated = {
3397 "_admin.helm_charts_v3_added." + item: name
3398 for item, name in added_repo_dict.items()
3399 }
3400 updated_v3_cluster_list.append(cluster_uuid)
3401 self.logger.debug(
3402 logging_text + "repos synchronized on k8s cluster "
3403 "'{}' to_delete: {}, to_add: {}".format(
3404 k8s_cluster_id, del_repo_list, added_repo_dict
3405 )
3406 )
3407 self.db.set_one(
3408 "k8sclusters",
3409 {"_id": k8s_cluster_id},
3410 updated,
3411 unset=unset,
3412 )
3413
3414 # Instantiate kdu
3415 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3416 vnfr_data["member-vnf-index-ref"],
3417 kdur["kdu-name"],
3418 k8s_cluster_id,
3419 )
3420 k8s_instance_info = {
3421 "kdu-instance": None,
3422 "k8scluster-uuid": cluster_uuid,
3423 "k8scluster-type": k8sclustertype,
3424 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3425 "kdu-name": kdur["kdu-name"],
3426 "kdu-model": kdumodel,
3427 "namespace": namespace,
3428 "kdu-deployment-name": kdu_deployment_name,
3429 }
3430 db_path = "_admin.deployed.K8s.{}".format(index)
3431 db_nsr_update[db_path] = k8s_instance_info
3432 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3433 vnfd_with_id = find_in_list(
3434 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3435 )
3436 task = asyncio.ensure_future(
3437 self._install_kdu(
3438 nsr_id,
3439 db_path,
3440 vnfr_data,
3441 kdu_index,
3442 kdud,
3443 vnfd_with_id,
3444 k8s_instance_info,
3445 k8params=desc_params,
3446 timeout=600,
3447 vca_id=vca_id,
3448 )
3449 )
3450 self.lcm_tasks.register(
3451 "ns",
3452 nsr_id,
3453 nslcmop_id,
3454 "instantiate_KDU-{}".format(index),
3455 task,
3456 )
3457 task_instantiation_info[task] = "Deploying KDU {}".format(
3458 kdur["kdu-name"]
3459 )
3460
3461 index += 1
3462
3463 except (LcmException, asyncio.CancelledError):
3464 raise
3465 except Exception as e:
3466 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3467 if isinstance(e, (N2VCException, DbException)):
3468 self.logger.error(logging_text + msg)
3469 else:
3470 self.logger.critical(logging_text + msg, exc_info=True)
3471 raise LcmException(msg)
3472 finally:
3473 if db_nsr_update:
3474 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3475
3476 def _deploy_n2vc(
3477 self,
3478 logging_text,
3479 db_nsr,
3480 db_vnfr,
3481 nslcmop_id,
3482 nsr_id,
3483 nsi_id,
3484 vnfd_id,
3485 vdu_id,
3486 kdu_name,
3487 member_vnf_index,
3488 vdu_index,
3489 vdu_name,
3490 deploy_params,
3491 descriptor_config,
3492 base_folder,
3493 task_instantiation_info,
3494 stage,
3495 ):
3496 # launch instantiate_N2VC in a asyncio task and register task object
3497 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3498 # if not found, create one entry and update database
3499 # fill db_nsr._admin.deployed.VCA.<index>
3500
3501 self.logger.debug(
3502 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3503 )
3504 if "execution-environment-list" in descriptor_config:
3505 ee_list = descriptor_config.get("execution-environment-list", [])
3506 elif "juju" in descriptor_config:
3507 ee_list = [descriptor_config] # ns charms
3508 else: # other types as script are not supported
3509 ee_list = []
3510
3511 for ee_item in ee_list:
3512 self.logger.debug(
3513 logging_text
3514 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3515 ee_item.get("juju"), ee_item.get("helm-chart")
3516 )
3517 )
3518 ee_descriptor_id = ee_item.get("id")
3519 if ee_item.get("juju"):
3520 vca_name = ee_item["juju"].get("charm")
3521 vca_type = (
3522 "lxc_proxy_charm"
3523 if ee_item["juju"].get("charm") is not None
3524 else "native_charm"
3525 )
3526 if ee_item["juju"].get("cloud") == "k8s":
3527 vca_type = "k8s_proxy_charm"
3528 elif ee_item["juju"].get("proxy") is False:
3529 vca_type = "native_charm"
3530 elif ee_item.get("helm-chart"):
3531 vca_name = ee_item["helm-chart"]
3532 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3533 vca_type = "helm"
3534 else:
3535 vca_type = "helm-v3"
3536 else:
3537 self.logger.debug(
3538 logging_text + "skipping non juju neither charm configuration"
3539 )
3540 continue
3541
3542 vca_index = -1
3543 for vca_index, vca_deployed in enumerate(
3544 db_nsr["_admin"]["deployed"]["VCA"]
3545 ):
3546 if not vca_deployed:
3547 continue
3548 if (
3549 vca_deployed.get("member-vnf-index") == member_vnf_index
3550 and vca_deployed.get("vdu_id") == vdu_id
3551 and vca_deployed.get("kdu_name") == kdu_name
3552 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3553 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3554 ):
3555 break
3556 else:
3557 # not found, create one.
3558 target = (
3559 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3560 )
3561 if vdu_id:
3562 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3563 elif kdu_name:
3564 target += "/kdu/{}".format(kdu_name)
3565 vca_deployed = {
3566 "target_element": target,
3567 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3568 "member-vnf-index": member_vnf_index,
3569 "vdu_id": vdu_id,
3570 "kdu_name": kdu_name,
3571 "vdu_count_index": vdu_index,
3572 "operational-status": "init", # TODO revise
3573 "detailed-status": "", # TODO revise
3574 "step": "initial-deploy", # TODO revise
3575 "vnfd_id": vnfd_id,
3576 "vdu_name": vdu_name,
3577 "type": vca_type,
3578 "ee_descriptor_id": ee_descriptor_id,
3579 }
3580 vca_index += 1
3581
3582 # create VCA and configurationStatus in db
3583 db_dict = {
3584 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3585 "configurationStatus.{}".format(vca_index): dict(),
3586 }
3587 self.update_db_2("nsrs", nsr_id, db_dict)
3588
3589 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3590
3591 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3592 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3593 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3594
3595 # Launch task
3596 task_n2vc = asyncio.ensure_future(
3597 self.instantiate_N2VC(
3598 logging_text=logging_text,
3599 vca_index=vca_index,
3600 nsi_id=nsi_id,
3601 db_nsr=db_nsr,
3602 db_vnfr=db_vnfr,
3603 vdu_id=vdu_id,
3604 kdu_name=kdu_name,
3605 vdu_index=vdu_index,
3606 deploy_params=deploy_params,
3607 config_descriptor=descriptor_config,
3608 base_folder=base_folder,
3609 nslcmop_id=nslcmop_id,
3610 stage=stage,
3611 vca_type=vca_type,
3612 vca_name=vca_name,
3613 ee_config_descriptor=ee_item,
3614 )
3615 )
3616 self.lcm_tasks.register(
3617 "ns",
3618 nsr_id,
3619 nslcmop_id,
3620 "instantiate_N2VC-{}".format(vca_index),
3621 task_n2vc,
3622 )
3623 task_instantiation_info[
3624 task_n2vc
3625 ] = self.task_name_deploy_vca + " {}.{}".format(
3626 member_vnf_index or "", vdu_id or ""
3627 )
3628
3629 @staticmethod
3630 def _create_nslcmop(nsr_id, operation, params):
3631 """
3632 Creates a ns-lcm-opp content to be stored at database.
3633 :param nsr_id: internal id of the instance
3634 :param operation: instantiate, terminate, scale, action, ...
3635 :param params: user parameters for the operation
3636 :return: dictionary following SOL005 format
3637 """
3638 # Raise exception if invalid arguments
3639 if not (nsr_id and operation and params):
3640 raise LcmException(
3641 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3642 )
3643 now = time()
3644 _id = str(uuid4())
3645 nslcmop = {
3646 "id": _id,
3647 "_id": _id,
3648 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3649 "operationState": "PROCESSING",
3650 "statusEnteredTime": now,
3651 "nsInstanceId": nsr_id,
3652 "lcmOperationType": operation,
3653 "startTime": now,
3654 "isAutomaticInvocation": False,
3655 "operationParams": params,
3656 "isCancelPending": False,
3657 "links": {
3658 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3659 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3660 },
3661 }
3662 return nslcmop
3663
3664 def _format_additional_params(self, params):
3665 params = params or {}
3666 for key, value in params.items():
3667 if str(value).startswith("!!yaml "):
3668 params[key] = yaml.safe_load(value[7:])
3669 return params
3670
3671 def _get_terminate_primitive_params(self, seq, vnf_index):
3672 primitive = seq.get("name")
3673 primitive_params = {}
3674 params = {
3675 "member_vnf_index": vnf_index,
3676 "primitive": primitive,
3677 "primitive_params": primitive_params,
3678 }
3679 desc_params = {}
3680 return self._map_primitive_params(seq, params, desc_params)
3681
3682 # sub-operations
3683
3684 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3685 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3686 if op.get("operationState") == "COMPLETED":
3687 # b. Skip sub-operation
3688 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3689 return self.SUBOPERATION_STATUS_SKIP
3690 else:
3691 # c. retry executing sub-operation
3692 # The sub-operation exists, and operationState != 'COMPLETED'
3693 # Update operationState = 'PROCESSING' to indicate a retry.
3694 operationState = "PROCESSING"
3695 detailed_status = "In progress"
3696 self._update_suboperation_status(
3697 db_nslcmop, op_index, operationState, detailed_status
3698 )
3699 # Return the sub-operation index
3700 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3701 # with arguments extracted from the sub-operation
3702 return op_index
3703
3704 # Find a sub-operation where all keys in a matching dictionary must match
3705 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3706 def _find_suboperation(self, db_nslcmop, match):
3707 if db_nslcmop and match:
3708 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3709 for i, op in enumerate(op_list):
3710 if all(op.get(k) == match[k] for k in match):
3711 return i
3712 return self.SUBOPERATION_STATUS_NOT_FOUND
3713
3714 # Update status for a sub-operation given its index
3715 def _update_suboperation_status(
3716 self, db_nslcmop, op_index, operationState, detailed_status
3717 ):
3718 # Update DB for HA tasks
3719 q_filter = {"_id": db_nslcmop["_id"]}
3720 update_dict = {
3721 "_admin.operations.{}.operationState".format(op_index): operationState,
3722 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3723 }
3724 self.db.set_one(
3725 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3726 )
3727
3728 # Add sub-operation, return the index of the added sub-operation
3729 # Optionally, set operationState, detailed-status, and operationType
3730 # Status and type are currently set for 'scale' sub-operations:
3731 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3732 # 'detailed-status' : status message
3733 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3734 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3735 def _add_suboperation(
3736 self,
3737 db_nslcmop,
3738 vnf_index,
3739 vdu_id,
3740 vdu_count_index,
3741 vdu_name,
3742 primitive,
3743 mapped_primitive_params,
3744 operationState=None,
3745 detailed_status=None,
3746 operationType=None,
3747 RO_nsr_id=None,
3748 RO_scaling_info=None,
3749 ):
3750 if not db_nslcmop:
3751 return self.SUBOPERATION_STATUS_NOT_FOUND
3752 # Get the "_admin.operations" list, if it exists
3753 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3754 op_list = db_nslcmop_admin.get("operations")
3755 # Create or append to the "_admin.operations" list
3756 new_op = {
3757 "member_vnf_index": vnf_index,
3758 "vdu_id": vdu_id,
3759 "vdu_count_index": vdu_count_index,
3760 "primitive": primitive,
3761 "primitive_params": mapped_primitive_params,
3762 }
3763 if operationState:
3764 new_op["operationState"] = operationState
3765 if detailed_status:
3766 new_op["detailed-status"] = detailed_status
3767 if operationType:
3768 new_op["lcmOperationType"] = operationType
3769 if RO_nsr_id:
3770 new_op["RO_nsr_id"] = RO_nsr_id
3771 if RO_scaling_info:
3772 new_op["RO_scaling_info"] = RO_scaling_info
3773 if not op_list:
3774 # No existing operations, create key 'operations' with current operation as first list element
3775 db_nslcmop_admin.update({"operations": [new_op]})
3776 op_list = db_nslcmop_admin.get("operations")
3777 else:
3778 # Existing operations, append operation to list
3779 op_list.append(new_op)
3780
3781 db_nslcmop_update = {"_admin.operations": op_list}
3782 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3783 op_index = len(op_list) - 1
3784 return op_index
3785
3786 # Helper methods for scale() sub-operations
3787
3788 # pre-scale/post-scale:
3789 # Check for 3 different cases:
3790 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3791 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3792 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3793 def _check_or_add_scale_suboperation(
3794 self,
3795 db_nslcmop,
3796 vnf_index,
3797 vnf_config_primitive,
3798 primitive_params,
3799 operationType,
3800 RO_nsr_id=None,
3801 RO_scaling_info=None,
3802 ):
3803 # Find this sub-operation
3804 if RO_nsr_id and RO_scaling_info:
3805 operationType = "SCALE-RO"
3806 match = {
3807 "member_vnf_index": vnf_index,
3808 "RO_nsr_id": RO_nsr_id,
3809 "RO_scaling_info": RO_scaling_info,
3810 }
3811 else:
3812 match = {
3813 "member_vnf_index": vnf_index,
3814 "primitive": vnf_config_primitive,
3815 "primitive_params": primitive_params,
3816 "lcmOperationType": operationType,
3817 }
3818 op_index = self._find_suboperation(db_nslcmop, match)
3819 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3820 # a. New sub-operation
3821 # The sub-operation does not exist, add it.
3822 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3823 # The following parameters are set to None for all kind of scaling:
3824 vdu_id = None
3825 vdu_count_index = None
3826 vdu_name = None
3827 if RO_nsr_id and RO_scaling_info:
3828 vnf_config_primitive = None
3829 primitive_params = None
3830 else:
3831 RO_nsr_id = None
3832 RO_scaling_info = None
3833 # Initial status for sub-operation
3834 operationState = "PROCESSING"
3835 detailed_status = "In progress"
3836 # Add sub-operation for pre/post-scaling (zero or more operations)
3837 self._add_suboperation(
3838 db_nslcmop,
3839 vnf_index,
3840 vdu_id,
3841 vdu_count_index,
3842 vdu_name,
3843 vnf_config_primitive,
3844 primitive_params,
3845 operationState,
3846 detailed_status,
3847 operationType,
3848 RO_nsr_id,
3849 RO_scaling_info,
3850 )
3851 return self.SUBOPERATION_STATUS_NEW
3852 else:
3853 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3854 # or op_index (operationState != 'COMPLETED')
3855 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3856
3857 # Function to return execution_environment id
3858
3859 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3860 # TODO vdu_index_count
3861 for vca in vca_deployed_list:
3862 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3863 return vca["ee_id"]
3864
3865 async def destroy_N2VC(
3866 self,
3867 logging_text,
3868 db_nslcmop,
3869 vca_deployed,
3870 config_descriptor,
3871 vca_index,
3872 destroy_ee=True,
3873 exec_primitives=True,
3874 scaling_in=False,
3875 vca_id: str = None,
3876 ):
3877 """
3878 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3879 :param logging_text:
3880 :param db_nslcmop:
3881 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3882 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3883 :param vca_index: index in the database _admin.deployed.VCA
3884 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3885 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3886 not executed properly
3887 :param scaling_in: True destroys the application, False destroys the model
3888 :return: None or exception
3889 """
3890
3891 self.logger.debug(
3892 logging_text
3893 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3894 vca_index, vca_deployed, config_descriptor, destroy_ee
3895 )
3896 )
3897
3898 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3899
3900 # execute terminate_primitives
3901 if exec_primitives:
3902 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3903 config_descriptor.get("terminate-config-primitive"),
3904 vca_deployed.get("ee_descriptor_id"),
3905 )
3906 vdu_id = vca_deployed.get("vdu_id")
3907 vdu_count_index = vca_deployed.get("vdu_count_index")
3908 vdu_name = vca_deployed.get("vdu_name")
3909 vnf_index = vca_deployed.get("member-vnf-index")
3910 if terminate_primitives and vca_deployed.get("needed_terminate"):
3911 for seq in terminate_primitives:
3912 # For each sequence in list, get primitive and call _ns_execute_primitive()
3913 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3914 vnf_index, seq.get("name")
3915 )
3916 self.logger.debug(logging_text + step)
3917 # Create the primitive for each sequence, i.e. "primitive": "touch"
3918 primitive = seq.get("name")
3919 mapped_primitive_params = self._get_terminate_primitive_params(
3920 seq, vnf_index
3921 )
3922
3923 # Add sub-operation
3924 self._add_suboperation(
3925 db_nslcmop,
3926 vnf_index,
3927 vdu_id,
3928 vdu_count_index,
3929 vdu_name,
3930 primitive,
3931 mapped_primitive_params,
3932 )
3933 # Sub-operations: Call _ns_execute_primitive() instead of action()
3934 try:
3935 result, result_detail = await self._ns_execute_primitive(
3936 vca_deployed["ee_id"],
3937 primitive,
3938 mapped_primitive_params,
3939 vca_type=vca_type,
3940 vca_id=vca_id,
3941 )
3942 except LcmException:
3943 # this happens when VCA is not deployed. In this case it is not needed to terminate
3944 continue
3945 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
3946 if result not in result_ok:
3947 raise LcmException(
3948 "terminate_primitive {} for vnf_member_index={} fails with "
3949 "error {}".format(seq.get("name"), vnf_index, result_detail)
3950 )
3951 # set that this VCA do not need terminated
3952 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
3953 vca_index
3954 )
3955 self.update_db_2(
3956 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
3957 )
3958
3959 # Delete Prometheus Jobs if any
3960 # This uses NSR_ID, so it will destroy any jobs under this index
3961 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
3962
3963 if destroy_ee:
3964 await self.vca_map[vca_type].delete_execution_environment(
3965 vca_deployed["ee_id"],
3966 scaling_in=scaling_in,
3967 vca_type=vca_type,
3968 vca_id=vca_id,
3969 )
3970
3971 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
3972 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
3973 namespace = "." + db_nsr["_id"]
3974 try:
3975 await self.n2vc.delete_namespace(
3976 namespace=namespace,
3977 total_timeout=self.timeout_charm_delete,
3978 vca_id=vca_id,
3979 )
3980 except N2VCNotFound: # already deleted. Skip
3981 pass
3982 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
3983
3984 async def _terminate_RO(
3985 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
3986 ):
3987 """
3988 Terminates a deployment from RO
3989 :param logging_text:
3990 :param nsr_deployed: db_nsr._admin.deployed
3991 :param nsr_id:
3992 :param nslcmop_id:
3993 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3994 this method will update only the index 2, but it will write on database the concatenated content of the list
3995 :return:
3996 """
3997 db_nsr_update = {}
3998 failed_detail = []
3999 ro_nsr_id = ro_delete_action = None
4000 if nsr_deployed and nsr_deployed.get("RO"):
4001 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4002 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4003 try:
4004 if ro_nsr_id:
4005 stage[2] = "Deleting ns from VIM."
4006 db_nsr_update["detailed-status"] = " ".join(stage)
4007 self._write_op_status(nslcmop_id, stage)
4008 self.logger.debug(logging_text + stage[2])
4009 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4010 self._write_op_status(nslcmop_id, stage)
4011 desc = await self.RO.delete("ns", ro_nsr_id)
4012 ro_delete_action = desc["action_id"]
4013 db_nsr_update[
4014 "_admin.deployed.RO.nsr_delete_action_id"
4015 ] = ro_delete_action
4016 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4017 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4018 if ro_delete_action:
4019 # wait until NS is deleted from VIM
4020 stage[2] = "Waiting ns deleted from VIM."
4021 detailed_status_old = None
4022 self.logger.debug(
4023 logging_text
4024 + stage[2]
4025 + " RO_id={} ro_delete_action={}".format(
4026 ro_nsr_id, ro_delete_action
4027 )
4028 )
4029 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4030 self._write_op_status(nslcmop_id, stage)
4031
4032 delete_timeout = 20 * 60 # 20 minutes
4033 while delete_timeout > 0:
4034 desc = await self.RO.show(
4035 "ns",
4036 item_id_name=ro_nsr_id,
4037 extra_item="action",
4038 extra_item_id=ro_delete_action,
4039 )
4040
4041 # deploymentStatus
4042 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4043
4044 ns_status, ns_status_info = self.RO.check_action_status(desc)
4045 if ns_status == "ERROR":
4046 raise ROclient.ROClientException(ns_status_info)
4047 elif ns_status == "BUILD":
4048 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4049 elif ns_status == "ACTIVE":
4050 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4051 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4052 break
4053 else:
4054 assert (
4055 False
4056 ), "ROclient.check_action_status returns unknown {}".format(
4057 ns_status
4058 )
4059 if stage[2] != detailed_status_old:
4060 detailed_status_old = stage[2]
4061 db_nsr_update["detailed-status"] = " ".join(stage)
4062 self._write_op_status(nslcmop_id, stage)
4063 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4064 await asyncio.sleep(5, loop=self.loop)
4065 delete_timeout -= 5
4066 else: # delete_timeout <= 0:
4067 raise ROclient.ROClientException(
4068 "Timeout waiting ns deleted from VIM"
4069 )
4070
4071 except Exception as e:
4072 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4073 if (
4074 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4075 ): # not found
4076 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4077 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4078 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4079 self.logger.debug(
4080 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4081 )
4082 elif (
4083 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4084 ): # conflict
4085 failed_detail.append("delete conflict: {}".format(e))
4086 self.logger.debug(
4087 logging_text
4088 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4089 )
4090 else:
4091 failed_detail.append("delete error: {}".format(e))
4092 self.logger.error(
4093 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4094 )
4095
4096 # Delete nsd
4097 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4098 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4099 try:
4100 stage[2] = "Deleting nsd from RO."
4101 db_nsr_update["detailed-status"] = " ".join(stage)
4102 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4103 self._write_op_status(nslcmop_id, stage)
4104 await self.RO.delete("nsd", ro_nsd_id)
4105 self.logger.debug(
4106 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4107 )
4108 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4109 except Exception as e:
4110 if (
4111 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4112 ): # not found
4113 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4114 self.logger.debug(
4115 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4116 )
4117 elif (
4118 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4119 ): # conflict
4120 failed_detail.append(
4121 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4122 )
4123 self.logger.debug(logging_text + failed_detail[-1])
4124 else:
4125 failed_detail.append(
4126 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4127 )
4128 self.logger.error(logging_text + failed_detail[-1])
4129
4130 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4131 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4132 if not vnf_deployed or not vnf_deployed["id"]:
4133 continue
4134 try:
4135 ro_vnfd_id = vnf_deployed["id"]
4136 stage[
4137 2
4138 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4139 vnf_deployed["member-vnf-index"], ro_vnfd_id
4140 )
4141 db_nsr_update["detailed-status"] = " ".join(stage)
4142 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4143 self._write_op_status(nslcmop_id, stage)
4144 await self.RO.delete("vnfd", ro_vnfd_id)
4145 self.logger.debug(
4146 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4147 )
4148 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4149 except Exception as e:
4150 if (
4151 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4152 ): # not found
4153 db_nsr_update[
4154 "_admin.deployed.RO.vnfd.{}.id".format(index)
4155 ] = None
4156 self.logger.debug(
4157 logging_text
4158 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4159 )
4160 elif (
4161 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4162 ): # conflict
4163 failed_detail.append(
4164 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4165 )
4166 self.logger.debug(logging_text + failed_detail[-1])
4167 else:
4168 failed_detail.append(
4169 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4170 )
4171 self.logger.error(logging_text + failed_detail[-1])
4172
4173 if failed_detail:
4174 stage[2] = "Error deleting from VIM"
4175 else:
4176 stage[2] = "Deleted from VIM"
4177 db_nsr_update["detailed-status"] = " ".join(stage)
4178 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4179 self._write_op_status(nslcmop_id, stage)
4180
4181 if failed_detail:
4182 raise LcmException("; ".join(failed_detail))
4183
4184 async def terminate(self, nsr_id, nslcmop_id):
4185 # Try to lock HA task here
4186 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4187 if not task_is_locked_by_me:
4188 return
4189
4190 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4191 self.logger.debug(logging_text + "Enter")
4192 timeout_ns_terminate = self.timeout_ns_terminate
4193 db_nsr = None
4194 db_nslcmop = None
4195 operation_params = None
4196 exc = None
4197 error_list = [] # annotates all failed error messages
4198 db_nslcmop_update = {}
4199 autoremove = False # autoremove after terminated
4200 tasks_dict_info = {}
4201 db_nsr_update = {}
4202 stage = [
4203 "Stage 1/3: Preparing task.",
4204 "Waiting for previous operations to terminate.",
4205 "",
4206 ]
4207 # ^ contains [stage, step, VIM-status]
4208 try:
4209 # wait for any previous tasks in process
4210 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4211
4212 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4213 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4214 operation_params = db_nslcmop.get("operationParams") or {}
4215 if operation_params.get("timeout_ns_terminate"):
4216 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4217 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4218 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4219
4220 db_nsr_update["operational-status"] = "terminating"
4221 db_nsr_update["config-status"] = "terminating"
4222 self._write_ns_status(
4223 nsr_id=nsr_id,
4224 ns_state="TERMINATING",
4225 current_operation="TERMINATING",
4226 current_operation_id=nslcmop_id,
4227 other_update=db_nsr_update,
4228 )
4229 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4230 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4231 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4232 return
4233
4234 stage[1] = "Getting vnf descriptors from db."
4235 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4236 db_vnfrs_dict = {
4237 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4238 }
4239 db_vnfds_from_id = {}
4240 db_vnfds_from_member_index = {}
4241 # Loop over VNFRs
4242 for vnfr in db_vnfrs_list:
4243 vnfd_id = vnfr["vnfd-id"]
4244 if vnfd_id not in db_vnfds_from_id:
4245 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4246 db_vnfds_from_id[vnfd_id] = vnfd
4247 db_vnfds_from_member_index[
4248 vnfr["member-vnf-index-ref"]
4249 ] = db_vnfds_from_id[vnfd_id]
4250
4251 # Destroy individual execution environments when there are terminating primitives.
4252 # Rest of EE will be deleted at once
4253 # TODO - check before calling _destroy_N2VC
4254 # if not operation_params.get("skip_terminate_primitives"):#
4255 # or not vca.get("needed_terminate"):
4256 stage[0] = "Stage 2/3 execute terminating primitives."
4257 self.logger.debug(logging_text + stage[0])
4258 stage[1] = "Looking execution environment that needs terminate."
4259 self.logger.debug(logging_text + stage[1])
4260
4261 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4262 config_descriptor = None
4263 vca_member_vnf_index = vca.get("member-vnf-index")
4264 vca_id = self.get_vca_id(
4265 db_vnfrs_dict.get(vca_member_vnf_index)
4266 if vca_member_vnf_index
4267 else None,
4268 db_nsr,
4269 )
4270 if not vca or not vca.get("ee_id"):
4271 continue
4272 if not vca.get("member-vnf-index"):
4273 # ns
4274 config_descriptor = db_nsr.get("ns-configuration")
4275 elif vca.get("vdu_id"):
4276 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4277 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4278 elif vca.get("kdu_name"):
4279 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4280 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4281 else:
4282 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4283 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4284 vca_type = vca.get("type")
4285 exec_terminate_primitives = not operation_params.get(
4286 "skip_terminate_primitives"
4287 ) and vca.get("needed_terminate")
4288 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4289 # pending native charms
4290 destroy_ee = (
4291 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4292 )
4293 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4294 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4295 task = asyncio.ensure_future(
4296 self.destroy_N2VC(
4297 logging_text,
4298 db_nslcmop,
4299 vca,
4300 config_descriptor,
4301 vca_index,
4302 destroy_ee,
4303 exec_terminate_primitives,
4304 vca_id=vca_id,
4305 )
4306 )
4307 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4308
4309 # wait for pending tasks of terminate primitives
4310 if tasks_dict_info:
4311 self.logger.debug(
4312 logging_text
4313 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4314 )
4315 error_list = await self._wait_for_tasks(
4316 logging_text,
4317 tasks_dict_info,
4318 min(self.timeout_charm_delete, timeout_ns_terminate),
4319 stage,
4320 nslcmop_id,
4321 )
4322 tasks_dict_info.clear()
4323 if error_list:
4324 return # raise LcmException("; ".join(error_list))
4325
4326 # remove All execution environments at once
4327 stage[0] = "Stage 3/3 delete all."
4328
4329 if nsr_deployed.get("VCA"):
4330 stage[1] = "Deleting all execution environments."
4331 self.logger.debug(logging_text + stage[1])
4332 vca_id = self.get_vca_id({}, db_nsr)
4333 task_delete_ee = asyncio.ensure_future(
4334 asyncio.wait_for(
4335 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4336 timeout=self.timeout_charm_delete,
4337 )
4338 )
4339 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4340 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4341
4342 # Delete from k8scluster
4343 stage[1] = "Deleting KDUs."
4344 self.logger.debug(logging_text + stage[1])
4345 # print(nsr_deployed)
4346 for kdu in get_iterable(nsr_deployed, "K8s"):
4347 if not kdu or not kdu.get("kdu-instance"):
4348 continue
4349 kdu_instance = kdu.get("kdu-instance")
4350 if kdu.get("k8scluster-type") in self.k8scluster_map:
4351 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4352 vca_id = self.get_vca_id({}, db_nsr)
4353 task_delete_kdu_instance = asyncio.ensure_future(
4354 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4355 cluster_uuid=kdu.get("k8scluster-uuid"),
4356 kdu_instance=kdu_instance,
4357 vca_id=vca_id,
4358 )
4359 )
4360 else:
4361 self.logger.error(
4362 logging_text
4363 + "Unknown k8s deployment type {}".format(
4364 kdu.get("k8scluster-type")
4365 )
4366 )
4367 continue
4368 tasks_dict_info[
4369 task_delete_kdu_instance
4370 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4371
4372 # remove from RO
4373 stage[1] = "Deleting ns from VIM."
4374 if self.ng_ro:
4375 task_delete_ro = asyncio.ensure_future(
4376 self._terminate_ng_ro(
4377 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4378 )
4379 )
4380 else:
4381 task_delete_ro = asyncio.ensure_future(
4382 self._terminate_RO(
4383 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4384 )
4385 )
4386 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4387
4388 # rest of staff will be done at finally
4389
4390 except (
4391 ROclient.ROClientException,
4392 DbException,
4393 LcmException,
4394 N2VCException,
4395 ) as e:
4396 self.logger.error(logging_text + "Exit Exception {}".format(e))
4397 exc = e
4398 except asyncio.CancelledError:
4399 self.logger.error(
4400 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4401 )
4402 exc = "Operation was cancelled"
4403 except Exception as e:
4404 exc = traceback.format_exc()
4405 self.logger.critical(
4406 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4407 exc_info=True,
4408 )
4409 finally:
4410 if exc:
4411 error_list.append(str(exc))
4412 try:
4413 # wait for pending tasks
4414 if tasks_dict_info:
4415 stage[1] = "Waiting for terminate pending tasks."
4416 self.logger.debug(logging_text + stage[1])
4417 error_list += await self._wait_for_tasks(
4418 logging_text,
4419 tasks_dict_info,
4420 timeout_ns_terminate,
4421 stage,
4422 nslcmop_id,
4423 )
4424 stage[1] = stage[2] = ""
4425 except asyncio.CancelledError:
4426 error_list.append("Cancelled")
4427 # TODO cancell all tasks
4428 except Exception as exc:
4429 error_list.append(str(exc))
4430 # update status at database
4431 if error_list:
4432 error_detail = "; ".join(error_list)
4433 # self.logger.error(logging_text + error_detail)
4434 error_description_nslcmop = "{} Detail: {}".format(
4435 stage[0], error_detail
4436 )
4437 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4438 nslcmop_id, stage[0]
4439 )
4440
4441 db_nsr_update["operational-status"] = "failed"
4442 db_nsr_update["detailed-status"] = (
4443 error_description_nsr + " Detail: " + error_detail
4444 )
4445 db_nslcmop_update["detailed-status"] = error_detail
4446 nslcmop_operation_state = "FAILED"
4447 ns_state = "BROKEN"
4448 else:
4449 error_detail = None
4450 error_description_nsr = error_description_nslcmop = None
4451 ns_state = "NOT_INSTANTIATED"
4452 db_nsr_update["operational-status"] = "terminated"
4453 db_nsr_update["detailed-status"] = "Done"
4454 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4455 db_nslcmop_update["detailed-status"] = "Done"
4456 nslcmop_operation_state = "COMPLETED"
4457
4458 if db_nsr:
4459 self._write_ns_status(
4460 nsr_id=nsr_id,
4461 ns_state=ns_state,
4462 current_operation="IDLE",
4463 current_operation_id=None,
4464 error_description=error_description_nsr,
4465 error_detail=error_detail,
4466 other_update=db_nsr_update,
4467 )
4468 self._write_op_status(
4469 op_id=nslcmop_id,
4470 stage="",
4471 error_message=error_description_nslcmop,
4472 operation_state=nslcmop_operation_state,
4473 other_update=db_nslcmop_update,
4474 )
4475 if ns_state == "NOT_INSTANTIATED":
4476 try:
4477 self.db.set_list(
4478 "vnfrs",
4479 {"nsr-id-ref": nsr_id},
4480 {"_admin.nsState": "NOT_INSTANTIATED"},
4481 )
4482 except DbException as e:
4483 self.logger.warn(
4484 logging_text
4485 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4486 nsr_id, e
4487 )
4488 )
4489 if operation_params:
4490 autoremove = operation_params.get("autoremove", False)
4491 if nslcmop_operation_state:
4492 try:
4493 await self.msg.aiowrite(
4494 "ns",
4495 "terminated",
4496 {
4497 "nsr_id": nsr_id,
4498 "nslcmop_id": nslcmop_id,
4499 "operationState": nslcmop_operation_state,
4500 "autoremove": autoremove,
4501 },
4502 loop=self.loop,
4503 )
4504 except Exception as e:
4505 self.logger.error(
4506 logging_text + "kafka_write notification Exception {}".format(e)
4507 )
4508
4509 self.logger.debug(logging_text + "Exit")
4510 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4511
4512 async def _wait_for_tasks(
4513 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4514 ):
4515 time_start = time()
4516 error_detail_list = []
4517 error_list = []
4518 pending_tasks = list(created_tasks_info.keys())
4519 num_tasks = len(pending_tasks)
4520 num_done = 0
4521 stage[1] = "{}/{}.".format(num_done, num_tasks)
4522 self._write_op_status(nslcmop_id, stage)
4523 while pending_tasks:
4524 new_error = None
4525 _timeout = timeout + time_start - time()
4526 done, pending_tasks = await asyncio.wait(
4527 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4528 )
4529 num_done += len(done)
4530 if not done: # Timeout
4531 for task in pending_tasks:
4532 new_error = created_tasks_info[task] + ": Timeout"
4533 error_detail_list.append(new_error)
4534 error_list.append(new_error)
4535 break
4536 for task in done:
4537 if task.cancelled():
4538 exc = "Cancelled"
4539 else:
4540 exc = task.exception()
4541 if exc:
4542 if isinstance(exc, asyncio.TimeoutError):
4543 exc = "Timeout"
4544 new_error = created_tasks_info[task] + ": {}".format(exc)
4545 error_list.append(created_tasks_info[task])
4546 error_detail_list.append(new_error)
4547 if isinstance(
4548 exc,
4549 (
4550 str,
4551 DbException,
4552 N2VCException,
4553 ROclient.ROClientException,
4554 LcmException,
4555 K8sException,
4556 NgRoException,
4557 ),
4558 ):
4559 self.logger.error(logging_text + new_error)
4560 else:
4561 exc_traceback = "".join(
4562 traceback.format_exception(None, exc, exc.__traceback__)
4563 )
4564 self.logger.error(
4565 logging_text
4566 + created_tasks_info[task]
4567 + " "
4568 + exc_traceback
4569 )
4570 else:
4571 self.logger.debug(
4572 logging_text + created_tasks_info[task] + ": Done"
4573 )
4574 stage[1] = "{}/{}.".format(num_done, num_tasks)
4575 if new_error:
4576 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4577 if nsr_id: # update also nsr
4578 self.update_db_2(
4579 "nsrs",
4580 nsr_id,
4581 {
4582 "errorDescription": "Error at: " + ", ".join(error_list),
4583 "errorDetail": ". ".join(error_detail_list),
4584 },
4585 )
4586 self._write_op_status(nslcmop_id, stage)
4587 return error_detail_list
4588
4589 @staticmethod
4590 def _map_primitive_params(primitive_desc, params, instantiation_params):
4591 """
4592 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4593 The default-value is used. If it is between < > it look for a value at instantiation_params
4594 :param primitive_desc: portion of VNFD/NSD that describes primitive
4595 :param params: Params provided by user
4596 :param instantiation_params: Instantiation params provided by user
4597 :return: a dictionary with the calculated params
4598 """
4599 calculated_params = {}
4600 for parameter in primitive_desc.get("parameter", ()):
4601 param_name = parameter["name"]
4602 if param_name in params:
4603 calculated_params[param_name] = params[param_name]
4604 elif "default-value" in parameter or "value" in parameter:
4605 if "value" in parameter:
4606 calculated_params[param_name] = parameter["value"]
4607 else:
4608 calculated_params[param_name] = parameter["default-value"]
4609 if (
4610 isinstance(calculated_params[param_name], str)
4611 and calculated_params[param_name].startswith("<")
4612 and calculated_params[param_name].endswith(">")
4613 ):
4614 if calculated_params[param_name][1:-1] in instantiation_params:
4615 calculated_params[param_name] = instantiation_params[
4616 calculated_params[param_name][1:-1]
4617 ]
4618 else:
4619 raise LcmException(
4620 "Parameter {} needed to execute primitive {} not provided".format(
4621 calculated_params[param_name], primitive_desc["name"]
4622 )
4623 )
4624 else:
4625 raise LcmException(
4626 "Parameter {} needed to execute primitive {} not provided".format(
4627 param_name, primitive_desc["name"]
4628 )
4629 )
4630
4631 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4632 calculated_params[param_name] = yaml.safe_dump(
4633 calculated_params[param_name], default_flow_style=True, width=256
4634 )
4635 elif isinstance(calculated_params[param_name], str) and calculated_params[
4636 param_name
4637 ].startswith("!!yaml "):
4638 calculated_params[param_name] = calculated_params[param_name][7:]
4639 if parameter.get("data-type") == "INTEGER":
4640 try:
4641 calculated_params[param_name] = int(calculated_params[param_name])
4642 except ValueError: # error converting string to int
4643 raise LcmException(
4644 "Parameter {} of primitive {} must be integer".format(
4645 param_name, primitive_desc["name"]
4646 )
4647 )
4648 elif parameter.get("data-type") == "BOOLEAN":
4649 calculated_params[param_name] = not (
4650 (str(calculated_params[param_name])).lower() == "false"
4651 )
4652
4653 # add always ns_config_info if primitive name is config
4654 if primitive_desc["name"] == "config":
4655 if "ns_config_info" in instantiation_params:
4656 calculated_params["ns_config_info"] = instantiation_params[
4657 "ns_config_info"
4658 ]
4659 return calculated_params
4660
4661 def _look_for_deployed_vca(
4662 self,
4663 deployed_vca,
4664 member_vnf_index,
4665 vdu_id,
4666 vdu_count_index,
4667 kdu_name=None,
4668 ee_descriptor_id=None,
4669 ):
4670 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4671 for vca in deployed_vca:
4672 if not vca:
4673 continue
4674 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4675 continue
4676 if (
4677 vdu_count_index is not None
4678 and vdu_count_index != vca["vdu_count_index"]
4679 ):
4680 continue
4681 if kdu_name and kdu_name != vca["kdu_name"]:
4682 continue
4683 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4684 continue
4685 break
4686 else:
4687 # vca_deployed not found
4688 raise LcmException(
4689 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4690 " is not deployed".format(
4691 member_vnf_index,
4692 vdu_id,
4693 vdu_count_index,
4694 kdu_name,
4695 ee_descriptor_id,
4696 )
4697 )
4698 # get ee_id
4699 ee_id = vca.get("ee_id")
4700 vca_type = vca.get(
4701 "type", "lxc_proxy_charm"
4702 ) # default value for backward compatibility - proxy charm
4703 if not ee_id:
4704 raise LcmException(
4705 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4706 "execution environment".format(
4707 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4708 )
4709 )
4710 return ee_id, vca_type
4711
4712 async def _ns_execute_primitive(
4713 self,
4714 ee_id,
4715 primitive,
4716 primitive_params,
4717 retries=0,
4718 retries_interval=30,
4719 timeout=None,
4720 vca_type=None,
4721 db_dict=None,
4722 vca_id: str = None,
4723 ) -> (str, str):
4724 try:
4725 if primitive == "config":
4726 primitive_params = {"params": primitive_params}
4727
4728 vca_type = vca_type or "lxc_proxy_charm"
4729
4730 while retries >= 0:
4731 try:
4732 output = await asyncio.wait_for(
4733 self.vca_map[vca_type].exec_primitive(
4734 ee_id=ee_id,
4735 primitive_name=primitive,
4736 params_dict=primitive_params,
4737 progress_timeout=self.timeout_progress_primitive,
4738 total_timeout=self.timeout_primitive,
4739 db_dict=db_dict,
4740 vca_id=vca_id,
4741 vca_type=vca_type,
4742 ),
4743 timeout=timeout or self.timeout_primitive,
4744 )
4745 # execution was OK
4746 break
4747 except asyncio.CancelledError:
4748 raise
4749 except Exception as e: # asyncio.TimeoutError
4750 if isinstance(e, asyncio.TimeoutError):
4751 e = "Timeout"
4752 retries -= 1
4753 if retries >= 0:
4754 self.logger.debug(
4755 "Error executing action {} on {} -> {}".format(
4756 primitive, ee_id, e
4757 )
4758 )
4759 # wait and retry
4760 await asyncio.sleep(retries_interval, loop=self.loop)
4761 else:
4762 return "FAILED", str(e)
4763
4764 return "COMPLETED", output
4765
4766 except (LcmException, asyncio.CancelledError):
4767 raise
4768 except Exception as e:
4769 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4770
4771 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4772 """
4773 Updating the vca_status with latest juju information in nsrs record
4774 :param: nsr_id: Id of the nsr
4775 :param: nslcmop_id: Id of the nslcmop
4776 :return: None
4777 """
4778
4779 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4780 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4781 vca_id = self.get_vca_id({}, db_nsr)
4782 if db_nsr["_admin"]["deployed"]["K8s"]:
4783 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4784 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4785 await self._on_update_k8s_db(
4786 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4787 )
4788 else:
4789 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4790 table, filter = "nsrs", {"_id": nsr_id}
4791 path = "_admin.deployed.VCA.{}.".format(vca_index)
4792 await self._on_update_n2vc_db(table, filter, path, {})
4793
4794 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4795 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4796
4797 async def action(self, nsr_id, nslcmop_id):
4798 # Try to lock HA task here
4799 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4800 if not task_is_locked_by_me:
4801 return
4802
4803 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4804 self.logger.debug(logging_text + "Enter")
4805 # get all needed from database
4806 db_nsr = None
4807 db_nslcmop = None
4808 db_nsr_update = {}
4809 db_nslcmop_update = {}
4810 nslcmop_operation_state = None
4811 error_description_nslcmop = None
4812 exc = None
4813 try:
4814 # wait for any previous tasks in process
4815 step = "Waiting for previous operations to terminate"
4816 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4817
4818 self._write_ns_status(
4819 nsr_id=nsr_id,
4820 ns_state=None,
4821 current_operation="RUNNING ACTION",
4822 current_operation_id=nslcmop_id,
4823 )
4824
4825 step = "Getting information from database"
4826 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4827 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4828
4829 nsr_deployed = db_nsr["_admin"].get("deployed")
4830 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4831 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4832 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4833 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4834 primitive = db_nslcmop["operationParams"]["primitive"]
4835 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4836 timeout_ns_action = db_nslcmop["operationParams"].get(
4837 "timeout_ns_action", self.timeout_primitive
4838 )
4839
4840 if vnf_index:
4841 step = "Getting vnfr from database"
4842 db_vnfr = self.db.get_one(
4843 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4844 )
4845 step = "Getting vnfd from database"
4846 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4847 else:
4848 step = "Getting nsd from database"
4849 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4850
4851 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4852 # for backward compatibility
4853 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4854 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4855 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4856 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4857
4858 # look for primitive
4859 config_primitive_desc = descriptor_configuration = None
4860 if vdu_id:
4861 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4862 elif kdu_name:
4863 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4864 elif vnf_index:
4865 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4866 else:
4867 descriptor_configuration = db_nsd.get("ns-configuration")
4868
4869 if descriptor_configuration and descriptor_configuration.get(
4870 "config-primitive"
4871 ):
4872 for config_primitive in descriptor_configuration["config-primitive"]:
4873 if config_primitive["name"] == primitive:
4874 config_primitive_desc = config_primitive
4875 break
4876
4877 if not config_primitive_desc:
4878 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4879 raise LcmException(
4880 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4881 primitive
4882 )
4883 )
4884 primitive_name = primitive
4885 ee_descriptor_id = None
4886 else:
4887 primitive_name = config_primitive_desc.get(
4888 "execution-environment-primitive", primitive
4889 )
4890 ee_descriptor_id = config_primitive_desc.get(
4891 "execution-environment-ref"
4892 )
4893
4894 if vnf_index:
4895 if vdu_id:
4896 vdur = next(
4897 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4898 )
4899 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4900 elif kdu_name:
4901 kdur = next(
4902 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4903 )
4904 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4905 else:
4906 desc_params = parse_yaml_strings(
4907 db_vnfr.get("additionalParamsForVnf")
4908 )
4909 else:
4910 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4911 if kdu_name and get_configuration(db_vnfd, kdu_name):
4912 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4913 actions = set()
4914 for primitive in kdu_configuration.get("initial-config-primitive", []):
4915 actions.add(primitive["name"])
4916 for primitive in kdu_configuration.get("config-primitive", []):
4917 actions.add(primitive["name"])
4918 kdu_action = True if primitive_name in actions else False
4919
4920 # TODO check if ns is in a proper status
4921 if kdu_name and (
4922 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4923 ):
4924 # kdur and desc_params already set from before
4925 if primitive_params:
4926 desc_params.update(primitive_params)
4927 # TODO Check if we will need something at vnf level
4928 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4929 if (
4930 kdu_name == kdu["kdu-name"]
4931 and kdu["member-vnf-index"] == vnf_index
4932 ):
4933 break
4934 else:
4935 raise LcmException(
4936 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4937 )
4938
4939 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4940 msg = "unknown k8scluster-type '{}'".format(
4941 kdu.get("k8scluster-type")
4942 )
4943 raise LcmException(msg)
4944
4945 db_dict = {
4946 "collection": "nsrs",
4947 "filter": {"_id": nsr_id},
4948 "path": "_admin.deployed.K8s.{}".format(index),
4949 }
4950 self.logger.debug(
4951 logging_text
4952 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
4953 )
4954 step = "Executing kdu {}".format(primitive_name)
4955 if primitive_name == "upgrade":
4956 if desc_params.get("kdu_model"):
4957 kdu_model = desc_params.get("kdu_model")
4958 del desc_params["kdu_model"]
4959 else:
4960 kdu_model = kdu.get("kdu-model")
4961 parts = kdu_model.split(sep=":")
4962 if len(parts) == 2:
4963 kdu_model = parts[0]
4964
4965 detailed_status = await asyncio.wait_for(
4966 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
4967 cluster_uuid=kdu.get("k8scluster-uuid"),
4968 kdu_instance=kdu.get("kdu-instance"),
4969 atomic=True,
4970 kdu_model=kdu_model,
4971 params=desc_params,
4972 db_dict=db_dict,
4973 timeout=timeout_ns_action,
4974 ),
4975 timeout=timeout_ns_action + 10,
4976 )
4977 self.logger.debug(
4978 logging_text + " Upgrade of kdu {} done".format(detailed_status)
4979 )
4980 elif primitive_name == "rollback":
4981 detailed_status = await asyncio.wait_for(
4982 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
4983 cluster_uuid=kdu.get("k8scluster-uuid"),
4984 kdu_instance=kdu.get("kdu-instance"),
4985 db_dict=db_dict,
4986 ),
4987 timeout=timeout_ns_action,
4988 )
4989 elif primitive_name == "status":
4990 detailed_status = await asyncio.wait_for(
4991 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
4992 cluster_uuid=kdu.get("k8scluster-uuid"),
4993 kdu_instance=kdu.get("kdu-instance"),
4994 vca_id=vca_id,
4995 ),
4996 timeout=timeout_ns_action,
4997 )
4998 else:
4999 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5000 kdu["kdu-name"], nsr_id
5001 )
5002 params = self._map_primitive_params(
5003 config_primitive_desc, primitive_params, desc_params
5004 )
5005
5006 detailed_status = await asyncio.wait_for(
5007 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5008 cluster_uuid=kdu.get("k8scluster-uuid"),
5009 kdu_instance=kdu_instance,
5010 primitive_name=primitive_name,
5011 params=params,
5012 db_dict=db_dict,
5013 timeout=timeout_ns_action,
5014 vca_id=vca_id,
5015 ),
5016 timeout=timeout_ns_action,
5017 )
5018
5019 if detailed_status:
5020 nslcmop_operation_state = "COMPLETED"
5021 else:
5022 detailed_status = ""
5023 nslcmop_operation_state = "FAILED"
5024 else:
5025 ee_id, vca_type = self._look_for_deployed_vca(
5026 nsr_deployed["VCA"],
5027 member_vnf_index=vnf_index,
5028 vdu_id=vdu_id,
5029 vdu_count_index=vdu_count_index,
5030 ee_descriptor_id=ee_descriptor_id,
5031 )
5032 for vca_index, vca_deployed in enumerate(
5033 db_nsr["_admin"]["deployed"]["VCA"]
5034 ):
5035 if vca_deployed.get("member-vnf-index") == vnf_index:
5036 db_dict = {
5037 "collection": "nsrs",
5038 "filter": {"_id": nsr_id},
5039 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5040 }
5041 break
5042 (
5043 nslcmop_operation_state,
5044 detailed_status,
5045 ) = await self._ns_execute_primitive(
5046 ee_id,
5047 primitive=primitive_name,
5048 primitive_params=self._map_primitive_params(
5049 config_primitive_desc, primitive_params, desc_params
5050 ),
5051 timeout=timeout_ns_action,
5052 vca_type=vca_type,
5053 db_dict=db_dict,
5054 vca_id=vca_id,
5055 )
5056
5057 db_nslcmop_update["detailed-status"] = detailed_status
5058 error_description_nslcmop = (
5059 detailed_status if nslcmop_operation_state == "FAILED" else ""
5060 )
5061 self.logger.debug(
5062 logging_text
5063 + " task Done with result {} {}".format(
5064 nslcmop_operation_state, detailed_status
5065 )
5066 )
5067 return # database update is called inside finally
5068
5069 except (DbException, LcmException, N2VCException, K8sException) as e:
5070 self.logger.error(logging_text + "Exit Exception {}".format(e))
5071 exc = e
5072 except asyncio.CancelledError:
5073 self.logger.error(
5074 logging_text + "Cancelled Exception while '{}'".format(step)
5075 )
5076 exc = "Operation was cancelled"
5077 except asyncio.TimeoutError:
5078 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5079 exc = "Timeout"
5080 except Exception as e:
5081 exc = traceback.format_exc()
5082 self.logger.critical(
5083 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5084 exc_info=True,
5085 )
5086 finally:
5087 if exc:
5088 db_nslcmop_update[
5089 "detailed-status"
5090 ] = (
5091 detailed_status
5092 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5093 nslcmop_operation_state = "FAILED"
5094 if db_nsr:
5095 self._write_ns_status(
5096 nsr_id=nsr_id,
5097 ns_state=db_nsr[
5098 "nsState"
5099 ], # TODO check if degraded. For the moment use previous status
5100 current_operation="IDLE",
5101 current_operation_id=None,
5102 # error_description=error_description_nsr,
5103 # error_detail=error_detail,
5104 other_update=db_nsr_update,
5105 )
5106
5107 self._write_op_status(
5108 op_id=nslcmop_id,
5109 stage="",
5110 error_message=error_description_nslcmop,
5111 operation_state=nslcmop_operation_state,
5112 other_update=db_nslcmop_update,
5113 )
5114
5115 if nslcmop_operation_state:
5116 try:
5117 await self.msg.aiowrite(
5118 "ns",
5119 "actioned",
5120 {
5121 "nsr_id": nsr_id,
5122 "nslcmop_id": nslcmop_id,
5123 "operationState": nslcmop_operation_state,
5124 },
5125 loop=self.loop,
5126 )
5127 except Exception as e:
5128 self.logger.error(
5129 logging_text + "kafka_write notification Exception {}".format(e)
5130 )
5131 self.logger.debug(logging_text + "Exit")
5132 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5133 return nslcmop_operation_state, detailed_status
5134
5135 async def scale(self, nsr_id, nslcmop_id):
5136 # Try to lock HA task here
5137 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5138 if not task_is_locked_by_me:
5139 return
5140
5141 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
5142 stage = ["", "", ""]
5143 tasks_dict_info = {}
5144 # ^ stage, step, VIM progress
5145 self.logger.debug(logging_text + "Enter")
5146 # get all needed from database
5147 db_nsr = None
5148 db_nslcmop_update = {}
5149 db_nsr_update = {}
5150 exc = None
5151 # in case of error, indicates what part of scale was failed to put nsr at error status
5152 scale_process = None
5153 old_operational_status = ""
5154 old_config_status = ""
5155 nsi_id = None
5156 try:
5157 # wait for any previous tasks in process
5158 step = "Waiting for previous operations to terminate"
5159 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5160 self._write_ns_status(
5161 nsr_id=nsr_id,
5162 ns_state=None,
5163 current_operation="SCALING",
5164 current_operation_id=nslcmop_id,
5165 )
5166
5167 step = "Getting nslcmop from database"
5168 self.logger.debug(
5169 step + " after having waited for previous tasks to be completed"
5170 )
5171 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5172
5173 step = "Getting nsr from database"
5174 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5175 old_operational_status = db_nsr["operational-status"]
5176 old_config_status = db_nsr["config-status"]
5177
5178 step = "Parsing scaling parameters"
5179 db_nsr_update["operational-status"] = "scaling"
5180 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5181 nsr_deployed = db_nsr["_admin"].get("deployed")
5182
5183 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
5184 "scaleByStepData"
5185 ]["member-vnf-index"]
5186 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
5187 "scaleByStepData"
5188 ]["scaling-group-descriptor"]
5189 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
5190 # for backward compatibility
5191 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5192 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5193 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5194 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5195
5196 step = "Getting vnfr from database"
5197 db_vnfr = self.db.get_one(
5198 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5199 )
5200
5201 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5202
5203 step = "Getting vnfd from database"
5204 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5205
5206 base_folder = db_vnfd["_admin"]["storage"]
5207
5208 step = "Getting scaling-group-descriptor"
5209 scaling_descriptor = find_in_list(
5210 get_scaling_aspect(db_vnfd),
5211 lambda scale_desc: scale_desc["name"] == scaling_group,
5212 )
5213 if not scaling_descriptor:
5214 raise LcmException(
5215 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
5216 "at vnfd:scaling-group-descriptor".format(scaling_group)
5217 )
5218
5219 step = "Sending scale order to VIM"
5220 # TODO check if ns is in a proper status
5221 nb_scale_op = 0
5222 if not db_nsr["_admin"].get("scaling-group"):
5223 self.update_db_2(
5224 "nsrs",
5225 nsr_id,
5226 {
5227 "_admin.scaling-group": [
5228 {"name": scaling_group, "nb-scale-op": 0}
5229 ]
5230 },
5231 )
5232 admin_scale_index = 0
5233 else:
5234 for admin_scale_index, admin_scale_info in enumerate(
5235 db_nsr["_admin"]["scaling-group"]
5236 ):
5237 if admin_scale_info["name"] == scaling_group:
5238 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5239 break
5240 else: # not found, set index one plus last element and add new entry with the name
5241 admin_scale_index += 1
5242 db_nsr_update[
5243 "_admin.scaling-group.{}.name".format(admin_scale_index)
5244 ] = scaling_group
5245
5246 vca_scaling_info = []
5247 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5248 if scaling_type == "SCALE_OUT":
5249 if "aspect-delta-details" not in scaling_descriptor:
5250 raise LcmException(
5251 "Aspect delta details not fount in scaling descriptor {}".format(
5252 scaling_descriptor["name"]
5253 )
5254 )
5255 # count if max-instance-count is reached
5256 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5257
5258 scaling_info["scaling_direction"] = "OUT"
5259 scaling_info["vdu-create"] = {}
5260 scaling_info["kdu-create"] = {}
5261 for delta in deltas:
5262 for vdu_delta in delta.get("vdu-delta", {}):
5263 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5264 # vdu_index also provides the number of instance of the targeted vdu
5265 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5266 cloud_init_text = self._get_vdu_cloud_init_content(
5267 vdud, db_vnfd
5268 )
5269 if cloud_init_text:
5270 additional_params = (
5271 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5272 or {}
5273 )
5274 cloud_init_list = []
5275
5276 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5277 max_instance_count = 10
5278 if vdu_profile and "max-number-of-instances" in vdu_profile:
5279 max_instance_count = vdu_profile.get(
5280 "max-number-of-instances", 10
5281 )
5282
5283 default_instance_num = get_number_of_instances(
5284 db_vnfd, vdud["id"]
5285 )
5286 instances_number = vdu_delta.get("number-of-instances", 1)
5287 nb_scale_op += instances_number
5288
5289 new_instance_count = nb_scale_op + default_instance_num
5290 # Control if new count is over max and vdu count is less than max.
5291 # Then assign new instance count
5292 if new_instance_count > max_instance_count > vdu_count:
5293 instances_number = new_instance_count - max_instance_count
5294 else:
5295 instances_number = instances_number
5296
5297 if new_instance_count > max_instance_count:
5298 raise LcmException(
5299 "reached the limit of {} (max-instance-count) "
5300 "scaling-out operations for the "
5301 "scaling-group-descriptor '{}'".format(
5302 nb_scale_op, scaling_group
5303 )
5304 )
5305 for x in range(vdu_delta.get("number-of-instances", 1)):
5306 if cloud_init_text:
5307 # TODO Information of its own ip is not available because db_vnfr is not updated.
5308 additional_params["OSM"] = get_osm_params(
5309 db_vnfr, vdu_delta["id"], vdu_index + x
5310 )
5311 cloud_init_list.append(
5312 self._parse_cloud_init(
5313 cloud_init_text,
5314 additional_params,
5315 db_vnfd["id"],
5316 vdud["id"],
5317 )
5318 )
5319 vca_scaling_info.append(
5320 {
5321 "osm_vdu_id": vdu_delta["id"],
5322 "member-vnf-index": vnf_index,
5323 "type": "create",
5324 "vdu_index": vdu_index + x,
5325 }
5326 )
5327 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5328 for kdu_delta in delta.get("kdu-resource-delta", {}):
5329 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5330 kdu_name = kdu_profile["kdu-name"]
5331 resource_name = kdu_profile["resource-name"]
5332
5333 # Might have different kdus in the same delta
5334 # Should have list for each kdu
5335 if not scaling_info["kdu-create"].get(kdu_name, None):
5336 scaling_info["kdu-create"][kdu_name] = []
5337
5338 kdur = get_kdur(db_vnfr, kdu_name)
5339 if kdur.get("helm-chart"):
5340 k8s_cluster_type = "helm-chart-v3"
5341 self.logger.debug("kdur: {}".format(kdur))
5342 if (
5343 kdur.get("helm-version")
5344 and kdur.get("helm-version") == "v2"
5345 ):
5346 k8s_cluster_type = "helm-chart"
5347 raise NotImplementedError
5348 elif kdur.get("juju-bundle"):
5349 k8s_cluster_type = "juju-bundle"
5350 else:
5351 raise LcmException(
5352 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5353 "juju-bundle. Maybe an old NBI version is running".format(
5354 db_vnfr["member-vnf-index-ref"], kdu_name
5355 )
5356 )
5357
5358 max_instance_count = 10
5359 if kdu_profile and "max-number-of-instances" in kdu_profile:
5360 max_instance_count = kdu_profile.get(
5361 "max-number-of-instances", 10
5362 )
5363
5364 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5365 deployed_kdu, _ = get_deployed_kdu(
5366 nsr_deployed, kdu_name, vnf_index
5367 )
5368 if deployed_kdu is None:
5369 raise LcmException(
5370 "KDU '{}' for vnf '{}' not deployed".format(
5371 kdu_name, vnf_index
5372 )
5373 )
5374 kdu_instance = deployed_kdu.get("kdu-instance")
5375 instance_num = await self.k8scluster_map[
5376 k8s_cluster_type
5377 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5378 kdu_replica_count = instance_num + kdu_delta.get(
5379 "number-of-instances", 1
5380 )
5381
5382 # Control if new count is over max and instance_num is less than max.
5383 # Then assign max instance number to kdu replica count
5384 if kdu_replica_count > max_instance_count > instance_num:
5385 kdu_replica_count = max_instance_count
5386 if kdu_replica_count > max_instance_count:
5387 raise LcmException(
5388 "reached the limit of {} (max-instance-count) "
5389 "scaling-out operations for the "
5390 "scaling-group-descriptor '{}'".format(
5391 instance_num, scaling_group
5392 )
5393 )
5394
5395 for x in range(kdu_delta.get("number-of-instances", 1)):
5396 vca_scaling_info.append(
5397 {
5398 "osm_kdu_id": kdu_name,
5399 "member-vnf-index": vnf_index,
5400 "type": "create",
5401 "kdu_index": instance_num + x - 1,
5402 }
5403 )
5404 scaling_info["kdu-create"][kdu_name].append(
5405 {
5406 "member-vnf-index": vnf_index,
5407 "type": "create",
5408 "k8s-cluster-type": k8s_cluster_type,
5409 "resource-name": resource_name,
5410 "scale": kdu_replica_count,
5411 }
5412 )
5413 elif scaling_type == "SCALE_IN":
5414 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5415
5416 scaling_info["scaling_direction"] = "IN"
5417 scaling_info["vdu-delete"] = {}
5418 scaling_info["kdu-delete"] = {}
5419
5420 for delta in deltas:
5421 for vdu_delta in delta.get("vdu-delta", {}):
5422 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5423 min_instance_count = 0
5424 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5425 if vdu_profile and "min-number-of-instances" in vdu_profile:
5426 min_instance_count = vdu_profile["min-number-of-instances"]
5427
5428 default_instance_num = get_number_of_instances(
5429 db_vnfd, vdu_delta["id"]
5430 )
5431 instance_num = vdu_delta.get("number-of-instances", 1)
5432 nb_scale_op -= instance_num
5433
5434 new_instance_count = nb_scale_op + default_instance_num
5435
5436 if new_instance_count < min_instance_count < vdu_count:
5437 instances_number = min_instance_count - new_instance_count
5438 else:
5439 instances_number = instance_num
5440
5441 if new_instance_count < min_instance_count:
5442 raise LcmException(
5443 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5444 "scaling-group-descriptor '{}'".format(
5445 nb_scale_op, scaling_group
5446 )
5447 )
5448 for x in range(vdu_delta.get("number-of-instances", 1)):
5449 vca_scaling_info.append(
5450 {
5451 "osm_vdu_id": vdu_delta["id"],
5452 "member-vnf-index": vnf_index,
5453 "type": "delete",
5454 "vdu_index": vdu_index - 1 - x,
5455 }
5456 )
5457 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5458 for kdu_delta in delta.get("kdu-resource-delta", {}):
5459 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5460 kdu_name = kdu_profile["kdu-name"]
5461 resource_name = kdu_profile["resource-name"]
5462
5463 if not scaling_info["kdu-delete"].get(kdu_name, None):
5464 scaling_info["kdu-delete"][kdu_name] = []
5465
5466 kdur = get_kdur(db_vnfr, kdu_name)
5467 if kdur.get("helm-chart"):
5468 k8s_cluster_type = "helm-chart-v3"
5469 self.logger.debug("kdur: {}".format(kdur))
5470 if (
5471 kdur.get("helm-version")
5472 and kdur.get("helm-version") == "v2"
5473 ):
5474 k8s_cluster_type = "helm-chart"
5475 raise NotImplementedError
5476 elif kdur.get("juju-bundle"):
5477 k8s_cluster_type = "juju-bundle"
5478 else:
5479 raise LcmException(
5480 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5481 "juju-bundle. Maybe an old NBI version is running".format(
5482 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5483 )
5484 )
5485
5486 min_instance_count = 0
5487 if kdu_profile and "min-number-of-instances" in kdu_profile:
5488 min_instance_count = kdu_profile["min-number-of-instances"]
5489
5490 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5491 deployed_kdu, _ = get_deployed_kdu(
5492 nsr_deployed, kdu_name, vnf_index
5493 )
5494 if deployed_kdu is None:
5495 raise LcmException(
5496 "KDU '{}' for vnf '{}' not deployed".format(
5497 kdu_name, vnf_index
5498 )
5499 )
5500 kdu_instance = deployed_kdu.get("kdu-instance")
5501 instance_num = await self.k8scluster_map[
5502 k8s_cluster_type
5503 ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id)
5504 kdu_replica_count = instance_num - kdu_delta.get(
5505 "number-of-instances", 1
5506 )
5507
5508 if kdu_replica_count < min_instance_count < instance_num:
5509 kdu_replica_count = min_instance_count
5510 if kdu_replica_count < min_instance_count:
5511 raise LcmException(
5512 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5513 "scaling-group-descriptor '{}'".format(
5514 instance_num, scaling_group
5515 )
5516 )
5517
5518 for x in range(kdu_delta.get("number-of-instances", 1)):
5519 vca_scaling_info.append(
5520 {
5521 "osm_kdu_id": kdu_name,
5522 "member-vnf-index": vnf_index,
5523 "type": "delete",
5524 "kdu_index": instance_num - x - 1,
5525 }
5526 )
5527 scaling_info["kdu-delete"][kdu_name].append(
5528 {
5529 "member-vnf-index": vnf_index,
5530 "type": "delete",
5531 "k8s-cluster-type": k8s_cluster_type,
5532 "resource-name": resource_name,
5533 "scale": kdu_replica_count,
5534 }
5535 )
5536
5537 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5538 vdu_delete = copy(scaling_info.get("vdu-delete"))
5539 if scaling_info["scaling_direction"] == "IN":
5540 for vdur in reversed(db_vnfr["vdur"]):
5541 if vdu_delete.get(vdur["vdu-id-ref"]):
5542 vdu_delete[vdur["vdu-id-ref"]] -= 1
5543 scaling_info["vdu"].append(
5544 {
5545 "name": vdur.get("name") or vdur.get("vdu-name"),
5546 "vdu_id": vdur["vdu-id-ref"],
5547 "interface": [],
5548 }
5549 )
5550 for interface in vdur["interfaces"]:
5551 scaling_info["vdu"][-1]["interface"].append(
5552 {
5553 "name": interface["name"],
5554 "ip_address": interface["ip-address"],
5555 "mac_address": interface.get("mac-address"),
5556 }
5557 )
5558 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5559
5560 # PRE-SCALE BEGIN
5561 step = "Executing pre-scale vnf-config-primitive"
5562 if scaling_descriptor.get("scaling-config-action"):
5563 for scaling_config_action in scaling_descriptor[
5564 "scaling-config-action"
5565 ]:
5566 if (
5567 scaling_config_action.get("trigger") == "pre-scale-in"
5568 and scaling_type == "SCALE_IN"
5569 ) or (
5570 scaling_config_action.get("trigger") == "pre-scale-out"
5571 and scaling_type == "SCALE_OUT"
5572 ):
5573 vnf_config_primitive = scaling_config_action[
5574 "vnf-config-primitive-name-ref"
5575 ]
5576 step = db_nslcmop_update[
5577 "detailed-status"
5578 ] = "executing pre-scale scaling-config-action '{}'".format(
5579 vnf_config_primitive
5580 )
5581
5582 # look for primitive
5583 for config_primitive in (
5584 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5585 ).get("config-primitive", ()):
5586 if config_primitive["name"] == vnf_config_primitive:
5587 break
5588 else:
5589 raise LcmException(
5590 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5591 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5592 "primitive".format(scaling_group, vnf_config_primitive)
5593 )
5594
5595 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5596 if db_vnfr.get("additionalParamsForVnf"):
5597 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5598
5599 scale_process = "VCA"
5600 db_nsr_update["config-status"] = "configuring pre-scaling"
5601 primitive_params = self._map_primitive_params(
5602 config_primitive, {}, vnfr_params
5603 )
5604
5605 # Pre-scale retry check: Check if this sub-operation has been executed before
5606 op_index = self._check_or_add_scale_suboperation(
5607 db_nslcmop,
5608 vnf_index,
5609 vnf_config_primitive,
5610 primitive_params,
5611 "PRE-SCALE",
5612 )
5613 if op_index == self.SUBOPERATION_STATUS_SKIP:
5614 # Skip sub-operation
5615 result = "COMPLETED"
5616 result_detail = "Done"
5617 self.logger.debug(
5618 logging_text
5619 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5620 vnf_config_primitive, result, result_detail
5621 )
5622 )
5623 else:
5624 if op_index == self.SUBOPERATION_STATUS_NEW:
5625 # New sub-operation: Get index of this sub-operation
5626 op_index = (
5627 len(db_nslcmop.get("_admin", {}).get("operations"))
5628 - 1
5629 )
5630 self.logger.debug(
5631 logging_text
5632 + "vnf_config_primitive={} New sub-operation".format(
5633 vnf_config_primitive
5634 )
5635 )
5636 else:
5637 # retry: Get registered params for this existing sub-operation
5638 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5639 op_index
5640 ]
5641 vnf_index = op.get("member_vnf_index")
5642 vnf_config_primitive = op.get("primitive")
5643 primitive_params = op.get("primitive_params")
5644 self.logger.debug(
5645 logging_text
5646 + "vnf_config_primitive={} Sub-operation retry".format(
5647 vnf_config_primitive
5648 )
5649 )
5650 # Execute the primitive, either with new (first-time) or registered (reintent) args
5651 ee_descriptor_id = config_primitive.get(
5652 "execution-environment-ref"
5653 )
5654 primitive_name = config_primitive.get(
5655 "execution-environment-primitive", vnf_config_primitive
5656 )
5657 ee_id, vca_type = self._look_for_deployed_vca(
5658 nsr_deployed["VCA"],
5659 member_vnf_index=vnf_index,
5660 vdu_id=None,
5661 vdu_count_index=None,
5662 ee_descriptor_id=ee_descriptor_id,
5663 )
5664 result, result_detail = await self._ns_execute_primitive(
5665 ee_id,
5666 primitive_name,
5667 primitive_params,
5668 vca_type=vca_type,
5669 vca_id=vca_id,
5670 )
5671 self.logger.debug(
5672 logging_text
5673 + "vnf_config_primitive={} Done with result {} {}".format(
5674 vnf_config_primitive, result, result_detail
5675 )
5676 )
5677 # Update operationState = COMPLETED | FAILED
5678 self._update_suboperation_status(
5679 db_nslcmop, op_index, result, result_detail
5680 )
5681
5682 if result == "FAILED":
5683 raise LcmException(result_detail)
5684 db_nsr_update["config-status"] = old_config_status
5685 scale_process = None
5686 # PRE-SCALE END
5687
5688 db_nsr_update[
5689 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5690 ] = nb_scale_op
5691 db_nsr_update[
5692 "_admin.scaling-group.{}.time".format(admin_scale_index)
5693 ] = time()
5694
5695 # SCALE-IN VCA - BEGIN
5696 if vca_scaling_info:
5697 step = db_nslcmop_update[
5698 "detailed-status"
5699 ] = "Deleting the execution environments"
5700 scale_process = "VCA"
5701 for vca_info in vca_scaling_info:
5702 if vca_info["type"] == "delete":
5703 member_vnf_index = str(vca_info["member-vnf-index"])
5704 self.logger.debug(
5705 logging_text + "vdu info: {}".format(vca_info)
5706 )
5707 if vca_info.get("osm_vdu_id"):
5708 vdu_id = vca_info["osm_vdu_id"]
5709 vdu_index = int(vca_info["vdu_index"])
5710 stage[
5711 1
5712 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5713 member_vnf_index, vdu_id, vdu_index
5714 )
5715 else:
5716 vdu_index = 0
5717 kdu_id = vca_info["osm_kdu_id"]
5718 stage[
5719 1
5720 ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format(
5721 member_vnf_index, kdu_id, vdu_index
5722 )
5723 stage[2] = step = "Scaling in VCA"
5724 self._write_op_status(op_id=nslcmop_id, stage=stage)
5725 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5726 config_update = db_nsr["configurationStatus"]
5727 for vca_index, vca in enumerate(vca_update):
5728 if (
5729 (vca or vca.get("ee_id"))
5730 and vca["member-vnf-index"] == member_vnf_index
5731 and vca["vdu_count_index"] == vdu_index
5732 ):
5733 if vca.get("vdu_id"):
5734 config_descriptor = get_configuration(
5735 db_vnfd, vca.get("vdu_id")
5736 )
5737 elif vca.get("kdu_name"):
5738 config_descriptor = get_configuration(
5739 db_vnfd, vca.get("kdu_name")
5740 )
5741 else:
5742 config_descriptor = get_configuration(
5743 db_vnfd, db_vnfd["id"]
5744 )
5745 operation_params = (
5746 db_nslcmop.get("operationParams") or {}
5747 )
5748 exec_terminate_primitives = not operation_params.get(
5749 "skip_terminate_primitives"
5750 ) and vca.get("needed_terminate")
5751 task = asyncio.ensure_future(
5752 asyncio.wait_for(
5753 self.destroy_N2VC(
5754 logging_text,
5755 db_nslcmop,
5756 vca,
5757 config_descriptor,
5758 vca_index,
5759 destroy_ee=True,
5760 exec_primitives=exec_terminate_primitives,
5761 scaling_in=True,
5762 vca_id=vca_id,
5763 ),
5764 timeout=self.timeout_charm_delete,
5765 )
5766 )
5767 tasks_dict_info[task] = "Terminating VCA {}".format(
5768 vca.get("ee_id")
5769 )
5770 del vca_update[vca_index]
5771 del config_update[vca_index]
5772 # wait for pending tasks of terminate primitives
5773 if tasks_dict_info:
5774 self.logger.debug(
5775 logging_text
5776 + "Waiting for tasks {}".format(
5777 list(tasks_dict_info.keys())
5778 )
5779 )
5780 error_list = await self._wait_for_tasks(
5781 logging_text,
5782 tasks_dict_info,
5783 min(
5784 self.timeout_charm_delete, self.timeout_ns_terminate
5785 ),
5786 stage,
5787 nslcmop_id,
5788 )
5789 tasks_dict_info.clear()
5790 if error_list:
5791 raise LcmException("; ".join(error_list))
5792
5793 db_vca_and_config_update = {
5794 "_admin.deployed.VCA": vca_update,
5795 "configurationStatus": config_update,
5796 }
5797 self.update_db_2(
5798 "nsrs", db_nsr["_id"], db_vca_and_config_update
5799 )
5800 scale_process = None
5801 # SCALE-IN VCA - END
5802
5803 # SCALE RO - BEGIN
5804 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5805 scale_process = "RO"
5806 if self.ro_config.get("ng"):
5807 await self._scale_ng_ro(
5808 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5809 )
5810 scaling_info.pop("vdu-create", None)
5811 scaling_info.pop("vdu-delete", None)
5812
5813 scale_process = None
5814 # SCALE RO - END
5815
5816 # SCALE KDU - BEGIN
5817 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5818 scale_process = "KDU"
5819 await self._scale_kdu(
5820 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5821 )
5822 scaling_info.pop("kdu-create", None)
5823 scaling_info.pop("kdu-delete", None)
5824
5825 scale_process = None
5826 # SCALE KDU - END
5827
5828 if db_nsr_update:
5829 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5830
5831 # SCALE-UP VCA - BEGIN
5832 if vca_scaling_info:
5833 step = db_nslcmop_update[
5834 "detailed-status"
5835 ] = "Creating new execution environments"
5836 scale_process = "VCA"
5837 for vca_info in vca_scaling_info:
5838 if vca_info["type"] == "create":
5839 member_vnf_index = str(vca_info["member-vnf-index"])
5840 self.logger.debug(
5841 logging_text + "vdu info: {}".format(vca_info)
5842 )
5843 vnfd_id = db_vnfr["vnfd-ref"]
5844 if vca_info.get("osm_vdu_id"):
5845 vdu_index = int(vca_info["vdu_index"])
5846 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5847 if db_vnfr.get("additionalParamsForVnf"):
5848 deploy_params.update(
5849 parse_yaml_strings(
5850 db_vnfr["additionalParamsForVnf"].copy()
5851 )
5852 )
5853 descriptor_config = get_configuration(
5854 db_vnfd, db_vnfd["id"]
5855 )
5856 if descriptor_config:
5857 vdu_id = None
5858 vdu_name = None
5859 kdu_name = None
5860 self._deploy_n2vc(
5861 logging_text=logging_text
5862 + "member_vnf_index={} ".format(member_vnf_index),
5863 db_nsr=db_nsr,
5864 db_vnfr=db_vnfr,
5865 nslcmop_id=nslcmop_id,
5866 nsr_id=nsr_id,
5867 nsi_id=nsi_id,
5868 vnfd_id=vnfd_id,
5869 vdu_id=vdu_id,
5870 kdu_name=kdu_name,
5871 member_vnf_index=member_vnf_index,
5872 vdu_index=vdu_index,
5873 vdu_name=vdu_name,
5874 deploy_params=deploy_params,
5875 descriptor_config=descriptor_config,
5876 base_folder=base_folder,
5877 task_instantiation_info=tasks_dict_info,
5878 stage=stage,
5879 )
5880 vdu_id = vca_info["osm_vdu_id"]
5881 vdur = find_in_list(
5882 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5883 )
5884 descriptor_config = get_configuration(db_vnfd, vdu_id)
5885 if vdur.get("additionalParams"):
5886 deploy_params_vdu = parse_yaml_strings(
5887 vdur["additionalParams"]
5888 )
5889 else:
5890 deploy_params_vdu = deploy_params
5891 deploy_params_vdu["OSM"] = get_osm_params(
5892 db_vnfr, vdu_id, vdu_count_index=vdu_index
5893 )
5894 if descriptor_config:
5895 vdu_name = None
5896 kdu_name = None
5897 stage[
5898 1
5899 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5900 member_vnf_index, vdu_id, vdu_index
5901 )
5902 stage[2] = step = "Scaling out VCA"
5903 self._write_op_status(op_id=nslcmop_id, stage=stage)
5904 self._deploy_n2vc(
5905 logging_text=logging_text
5906 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5907 member_vnf_index, vdu_id, vdu_index
5908 ),
5909 db_nsr=db_nsr,
5910 db_vnfr=db_vnfr,
5911 nslcmop_id=nslcmop_id,
5912 nsr_id=nsr_id,
5913 nsi_id=nsi_id,
5914 vnfd_id=vnfd_id,
5915 vdu_id=vdu_id,
5916 kdu_name=kdu_name,
5917 member_vnf_index=member_vnf_index,
5918 vdu_index=vdu_index,
5919 vdu_name=vdu_name,
5920 deploy_params=deploy_params_vdu,
5921 descriptor_config=descriptor_config,
5922 base_folder=base_folder,
5923 task_instantiation_info=tasks_dict_info,
5924 stage=stage,
5925 )
5926 else:
5927 kdu_name = vca_info["osm_kdu_id"]
5928 descriptor_config = get_configuration(db_vnfd, kdu_name)
5929 if descriptor_config:
5930 vdu_id = None
5931 kdu_index = int(vca_info["kdu_index"])
5932 vdu_name = None
5933 kdur = next(
5934 x
5935 for x in db_vnfr["kdur"]
5936 if x["kdu-name"] == kdu_name
5937 )
5938 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
5939 if kdur.get("additionalParams"):
5940 deploy_params_kdu = parse_yaml_strings(
5941 kdur["additionalParams"]
5942 )
5943
5944 self._deploy_n2vc(
5945 logging_text=logging_text,
5946 db_nsr=db_nsr,
5947 db_vnfr=db_vnfr,
5948 nslcmop_id=nslcmop_id,
5949 nsr_id=nsr_id,
5950 nsi_id=nsi_id,
5951 vnfd_id=vnfd_id,
5952 vdu_id=vdu_id,
5953 kdu_name=kdu_name,
5954 member_vnf_index=member_vnf_index,
5955 vdu_index=kdu_index,
5956 vdu_name=vdu_name,
5957 deploy_params=deploy_params_kdu,
5958 descriptor_config=descriptor_config,
5959 base_folder=base_folder,
5960 task_instantiation_info=tasks_dict_info,
5961 stage=stage,
5962 )
5963 # SCALE-UP VCA - END
5964 scale_process = None
5965
5966 # POST-SCALE BEGIN
5967 # execute primitive service POST-SCALING
5968 step = "Executing post-scale vnf-config-primitive"
5969 if scaling_descriptor.get("scaling-config-action"):
5970 for scaling_config_action in scaling_descriptor[
5971 "scaling-config-action"
5972 ]:
5973 if (
5974 scaling_config_action.get("trigger") == "post-scale-in"
5975 and scaling_type == "SCALE_IN"
5976 ) or (
5977 scaling_config_action.get("trigger") == "post-scale-out"
5978 and scaling_type == "SCALE_OUT"
5979 ):
5980 vnf_config_primitive = scaling_config_action[
5981 "vnf-config-primitive-name-ref"
5982 ]
5983 step = db_nslcmop_update[
5984 "detailed-status"
5985 ] = "executing post-scale scaling-config-action '{}'".format(
5986 vnf_config_primitive
5987 )
5988
5989 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5990 if db_vnfr.get("additionalParamsForVnf"):
5991 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5992
5993 # look for primitive
5994 for config_primitive in (
5995 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5996 ).get("config-primitive", ()):
5997 if config_primitive["name"] == vnf_config_primitive:
5998 break
5999 else:
6000 raise LcmException(
6001 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6002 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6003 "config-primitive".format(
6004 scaling_group, vnf_config_primitive
6005 )
6006 )
6007 scale_process = "VCA"
6008 db_nsr_update["config-status"] = "configuring post-scaling"
6009 primitive_params = self._map_primitive_params(
6010 config_primitive, {}, vnfr_params
6011 )
6012
6013 # Post-scale retry check: Check if this sub-operation has been executed before
6014 op_index = self._check_or_add_scale_suboperation(
6015 db_nslcmop,
6016 vnf_index,
6017 vnf_config_primitive,
6018 primitive_params,
6019 "POST-SCALE",
6020 )
6021 if op_index == self.SUBOPERATION_STATUS_SKIP:
6022 # Skip sub-operation
6023 result = "COMPLETED"
6024 result_detail = "Done"
6025 self.logger.debug(
6026 logging_text
6027 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6028 vnf_config_primitive, result, result_detail
6029 )
6030 )
6031 else:
6032 if op_index == self.SUBOPERATION_STATUS_NEW:
6033 # New sub-operation: Get index of this sub-operation
6034 op_index = (
6035 len(db_nslcmop.get("_admin", {}).get("operations"))
6036 - 1
6037 )
6038 self.logger.debug(
6039 logging_text
6040 + "vnf_config_primitive={} New sub-operation".format(
6041 vnf_config_primitive
6042 )
6043 )
6044 else:
6045 # retry: Get registered params for this existing sub-operation
6046 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6047 op_index
6048 ]
6049 vnf_index = op.get("member_vnf_index")
6050 vnf_config_primitive = op.get("primitive")
6051 primitive_params = op.get("primitive_params")
6052 self.logger.debug(
6053 logging_text
6054 + "vnf_config_primitive={} Sub-operation retry".format(
6055 vnf_config_primitive
6056 )
6057 )
6058 # Execute the primitive, either with new (first-time) or registered (reintent) args
6059 ee_descriptor_id = config_primitive.get(
6060 "execution-environment-ref"
6061 )
6062 primitive_name = config_primitive.get(
6063 "execution-environment-primitive", vnf_config_primitive
6064 )
6065 ee_id, vca_type = self._look_for_deployed_vca(
6066 nsr_deployed["VCA"],
6067 member_vnf_index=vnf_index,
6068 vdu_id=None,
6069 vdu_count_index=None,
6070 ee_descriptor_id=ee_descriptor_id,
6071 )
6072 result, result_detail = await self._ns_execute_primitive(
6073 ee_id,
6074 primitive_name,
6075 primitive_params,
6076 vca_type=vca_type,
6077 vca_id=vca_id,
6078 )
6079 self.logger.debug(
6080 logging_text
6081 + "vnf_config_primitive={} Done with result {} {}".format(
6082 vnf_config_primitive, result, result_detail
6083 )
6084 )
6085 # Update operationState = COMPLETED | FAILED
6086 self._update_suboperation_status(
6087 db_nslcmop, op_index, result, result_detail
6088 )
6089
6090 if result == "FAILED":
6091 raise LcmException(result_detail)
6092 db_nsr_update["config-status"] = old_config_status
6093 scale_process = None
6094 # POST-SCALE END
6095
6096 db_nsr_update[
6097 "detailed-status"
6098 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6099 db_nsr_update["operational-status"] = (
6100 "running"
6101 if old_operational_status == "failed"
6102 else old_operational_status
6103 )
6104 db_nsr_update["config-status"] = old_config_status
6105 return
6106 except (
6107 ROclient.ROClientException,
6108 DbException,
6109 LcmException,
6110 NgRoException,
6111 ) as e:
6112 self.logger.error(logging_text + "Exit Exception {}".format(e))
6113 exc = e
6114 except asyncio.CancelledError:
6115 self.logger.error(
6116 logging_text + "Cancelled Exception while '{}'".format(step)
6117 )
6118 exc = "Operation was cancelled"
6119 except Exception as e:
6120 exc = traceback.format_exc()
6121 self.logger.critical(
6122 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6123 exc_info=True,
6124 )
6125 finally:
6126 self._write_ns_status(
6127 nsr_id=nsr_id,
6128 ns_state=None,
6129 current_operation="IDLE",
6130 current_operation_id=None,
6131 )
6132 if tasks_dict_info:
6133 stage[1] = "Waiting for instantiate pending tasks."
6134 self.logger.debug(logging_text + stage[1])
6135 exc = await self._wait_for_tasks(
6136 logging_text,
6137 tasks_dict_info,
6138 self.timeout_ns_deploy,
6139 stage,
6140 nslcmop_id,
6141 nsr_id=nsr_id,
6142 )
6143 if exc:
6144 db_nslcmop_update[
6145 "detailed-status"
6146 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6147 nslcmop_operation_state = "FAILED"
6148 if db_nsr:
6149 db_nsr_update["operational-status"] = old_operational_status
6150 db_nsr_update["config-status"] = old_config_status
6151 db_nsr_update["detailed-status"] = ""
6152 if scale_process:
6153 if "VCA" in scale_process:
6154 db_nsr_update["config-status"] = "failed"
6155 if "RO" in scale_process:
6156 db_nsr_update["operational-status"] = "failed"
6157 db_nsr_update[
6158 "detailed-status"
6159 ] = "FAILED scaling nslcmop={} {}: {}".format(
6160 nslcmop_id, step, exc
6161 )
6162 else:
6163 error_description_nslcmop = None
6164 nslcmop_operation_state = "COMPLETED"
6165 db_nslcmop_update["detailed-status"] = "Done"
6166
6167 self._write_op_status(
6168 op_id=nslcmop_id,
6169 stage="",
6170 error_message=error_description_nslcmop,
6171 operation_state=nslcmop_operation_state,
6172 other_update=db_nslcmop_update,
6173 )
6174 if db_nsr:
6175 self._write_ns_status(
6176 nsr_id=nsr_id,
6177 ns_state=None,
6178 current_operation="IDLE",
6179 current_operation_id=None,
6180 other_update=db_nsr_update,
6181 )
6182
6183 if nslcmop_operation_state:
6184 try:
6185 msg = {
6186 "nsr_id": nsr_id,
6187 "nslcmop_id": nslcmop_id,
6188 "operationState": nslcmop_operation_state,
6189 }
6190 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
6191 except Exception as e:
6192 self.logger.error(
6193 logging_text + "kafka_write notification Exception {}".format(e)
6194 )
6195 self.logger.debug(logging_text + "Exit")
6196 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
6197
6198 async def _scale_kdu(
6199 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6200 ):
6201 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
6202 for kdu_name in _scaling_info:
6203 for kdu_scaling_info in _scaling_info[kdu_name]:
6204 deployed_kdu, index = get_deployed_kdu(
6205 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
6206 )
6207 cluster_uuid = deployed_kdu["k8scluster-uuid"]
6208 kdu_instance = deployed_kdu["kdu-instance"]
6209 scale = int(kdu_scaling_info["scale"])
6210 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
6211
6212 db_dict = {
6213 "collection": "nsrs",
6214 "filter": {"_id": nsr_id},
6215 "path": "_admin.deployed.K8s.{}".format(index),
6216 }
6217
6218 step = "scaling application {}".format(
6219 kdu_scaling_info["resource-name"]
6220 )
6221 self.logger.debug(logging_text + step)
6222
6223 if kdu_scaling_info["type"] == "delete":
6224 kdu_config = get_configuration(db_vnfd, kdu_name)
6225 if (
6226 kdu_config
6227 and kdu_config.get("terminate-config-primitive")
6228 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6229 ):
6230 terminate_config_primitive_list = kdu_config.get(
6231 "terminate-config-primitive"
6232 )
6233 terminate_config_primitive_list.sort(
6234 key=lambda val: int(val["seq"])
6235 )
6236
6237 for (
6238 terminate_config_primitive
6239 ) in terminate_config_primitive_list:
6240 primitive_params_ = self._map_primitive_params(
6241 terminate_config_primitive, {}, {}
6242 )
6243 step = "execute terminate config primitive"
6244 self.logger.debug(logging_text + step)
6245 await asyncio.wait_for(
6246 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6247 cluster_uuid=cluster_uuid,
6248 kdu_instance=kdu_instance,
6249 primitive_name=terminate_config_primitive["name"],
6250 params=primitive_params_,
6251 db_dict=db_dict,
6252 vca_id=vca_id,
6253 ),
6254 timeout=600,
6255 )
6256
6257 await asyncio.wait_for(
6258 self.k8scluster_map[k8s_cluster_type].scale(
6259 kdu_instance,
6260 scale,
6261 kdu_scaling_info["resource-name"],
6262 vca_id=vca_id,
6263 ),
6264 timeout=self.timeout_vca_on_error,
6265 )
6266
6267 if kdu_scaling_info["type"] == "create":
6268 kdu_config = get_configuration(db_vnfd, kdu_name)
6269 if (
6270 kdu_config
6271 and kdu_config.get("initial-config-primitive")
6272 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6273 ):
6274 initial_config_primitive_list = kdu_config.get(
6275 "initial-config-primitive"
6276 )
6277 initial_config_primitive_list.sort(
6278 key=lambda val: int(val["seq"])
6279 )
6280
6281 for initial_config_primitive in initial_config_primitive_list:
6282 primitive_params_ = self._map_primitive_params(
6283 initial_config_primitive, {}, {}
6284 )
6285 step = "execute initial config primitive"
6286 self.logger.debug(logging_text + step)
6287 await asyncio.wait_for(
6288 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6289 cluster_uuid=cluster_uuid,
6290 kdu_instance=kdu_instance,
6291 primitive_name=initial_config_primitive["name"],
6292 params=primitive_params_,
6293 db_dict=db_dict,
6294 vca_id=vca_id,
6295 ),
6296 timeout=600,
6297 )
6298
6299 async def _scale_ng_ro(
6300 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6301 ):
6302 nsr_id = db_nslcmop["nsInstanceId"]
6303 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6304 db_vnfrs = {}
6305
6306 # read from db: vnfd's for every vnf
6307 db_vnfds = []
6308
6309 # for each vnf in ns, read vnfd
6310 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6311 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6312 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6313 # if we haven't this vnfd, read it from db
6314 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6315 # read from db
6316 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6317 db_vnfds.append(vnfd)
6318 n2vc_key = self.n2vc.get_public_key()
6319 n2vc_key_list = [n2vc_key]
6320 self.scale_vnfr(
6321 db_vnfr,
6322 vdu_scaling_info.get("vdu-create"),
6323 vdu_scaling_info.get("vdu-delete"),
6324 mark_delete=True,
6325 )
6326 # db_vnfr has been updated, update db_vnfrs to use it
6327 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6328 await self._instantiate_ng_ro(
6329 logging_text,
6330 nsr_id,
6331 db_nsd,
6332 db_nsr,
6333 db_nslcmop,
6334 db_vnfrs,
6335 db_vnfds,
6336 n2vc_key_list,
6337 stage=stage,
6338 start_deploy=time(),
6339 timeout_ns_deploy=self.timeout_ns_deploy,
6340 )
6341 if vdu_scaling_info.get("vdu-delete"):
6342 self.scale_vnfr(
6343 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6344 )
6345
6346 async def extract_prometheus_scrape_jobs(
6347 self,
6348 ee_id,
6349 artifact_path,
6350 ee_config_descriptor,
6351 vnfr_id,
6352 nsr_id,
6353 target_ip
6354 ):
6355 # look if exist a file called 'prometheus*.j2' and
6356 artifact_content = self.fs.dir_ls(artifact_path)
6357 job_file = next(
6358 (
6359 f
6360 for f in artifact_content
6361 if f.startswith("prometheus") and f.endswith(".j2")
6362 ),
6363 None,
6364 )
6365 if not job_file:
6366 return
6367 with self.fs.file_open((artifact_path, job_file), "r") as f:
6368 job_data = f.read()
6369
6370 # TODO get_service
6371 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6372 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6373 host_port = "80"
6374 vnfr_id = vnfr_id.replace("-", "")
6375 variables = {
6376 "JOB_NAME": vnfr_id,
6377 "TARGET_IP": target_ip,
6378 "EXPORTER_POD_IP": host_name,
6379 "EXPORTER_POD_PORT": host_port,
6380 }
6381 job_list = parse_job(job_data, variables)
6382 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6383 for job in job_list:
6384 if (
6385 not isinstance(job.get("job_name"), str)
6386 or vnfr_id not in job["job_name"]
6387 ):
6388 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6389 job["nsr_id"] = nsr_id
6390 job["vnfr_id"] = vnfr_id
6391 return job_list
6392
6393 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6394 """
6395 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6396
6397 :param: vim_account_id: VIM Account ID
6398
6399 :return: (cloud_name, cloud_credential)
6400 """
6401 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6402 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6403
6404 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6405 """
6406 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6407
6408 :param: vim_account_id: VIM Account ID
6409
6410 :return: (cloud_name, cloud_credential)
6411 """
6412 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6413 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")