Fix bug 1865: Manually scaling VDU from 0 to 1 instance fails
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 from typing import Any, Dict, List
21 import yaml
22 import logging
23 import logging.handlers
24 import traceback
25 import json
26 from jinja2 import (
27 Environment,
28 TemplateError,
29 TemplateNotFound,
30 StrictUndefined,
31 UndefinedError,
32 )
33
34 from osm_lcm import ROclient
35 from osm_lcm.data_utils.nsr import (
36 get_deployed_kdu,
37 get_deployed_vca,
38 get_deployed_vca_list,
39 get_nsd,
40 )
41 from osm_lcm.data_utils.vca import (
42 DeployedComponent,
43 DeployedK8sResource,
44 DeployedVCA,
45 EELevel,
46 Relation,
47 EERelation,
48 safe_get_ee_relation,
49 )
50 from osm_lcm.ng_ro import NgRoClient, NgRoException
51 from osm_lcm.lcm_utils import (
52 LcmException,
53 LcmExceptionNoMgmtIP,
54 LcmBase,
55 deep_get,
56 get_iterable,
57 populate_dict,
58 )
59 from osm_lcm.data_utils.nsd import (
60 get_ns_configuration_relation_list,
61 get_vnf_profile,
62 get_vnf_profiles,
63 )
64 from osm_lcm.data_utils.vnfd import (
65 get_relation_list,
66 get_vdu_list,
67 get_vdu_profile,
68 get_ee_sorted_initial_config_primitive_list,
69 get_ee_sorted_terminate_config_primitive_list,
70 get_kdu_list,
71 get_virtual_link_profiles,
72 get_vdu,
73 get_configuration,
74 get_vdu_index,
75 get_scaling_aspect,
76 get_number_of_instances,
77 get_juju_ee_ref,
78 get_kdu_resource_profile,
79 )
80 from osm_lcm.data_utils.list_utils import find_in_list
81 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
82 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
83 from osm_lcm.data_utils.database.vim_account import VimAccountDB
84 from n2vc.definitions import RelationEndpoint
85 from n2vc.k8s_helm_conn import K8sHelmConnector
86 from n2vc.k8s_helm3_conn import K8sHelm3Connector
87 from n2vc.k8s_juju_conn import K8sJujuConnector
88
89 from osm_common.dbbase import DbException
90 from osm_common.fsbase import FsException
91
92 from osm_lcm.data_utils.database.database import Database
93 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
94
95 from n2vc.n2vc_juju_conn import N2VCJujuConnector
96 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
97
98 from osm_lcm.lcm_helm_conn import LCMHelmConn
99 from osm_lcm.prometheus import parse_job
100
101 from copy import copy, deepcopy
102 from time import time
103 from uuid import uuid4
104
105 from random import randint
106
107 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
108
109
110 class NsLcm(LcmBase):
111 timeout_vca_on_error = (
112 5 * 60
113 ) # Time for charm from first time at blocked,error status to mark as failed
114 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
115 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
116 timeout_charm_delete = 10 * 60
117 timeout_primitive = 30 * 60 # timeout for primitive execution
118 timeout_progress_primitive = (
119 10 * 60
120 ) # timeout for some progress in a primitive execution
121
122 SUBOPERATION_STATUS_NOT_FOUND = -1
123 SUBOPERATION_STATUS_NEW = -2
124 SUBOPERATION_STATUS_SKIP = -3
125 task_name_deploy_vca = "Deploying VCA"
126
127 def __init__(self, msg, lcm_tasks, config, loop):
128 """
129 Init, Connect to database, filesystem storage, and messaging
130 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
131 :return: None
132 """
133 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
134
135 self.db = Database().instance.db
136 self.fs = Filesystem().instance.fs
137 self.loop = loop
138 self.lcm_tasks = lcm_tasks
139 self.timeout = config["timeout"]
140 self.ro_config = config["ro_config"]
141 self.ng_ro = config["ro_config"].get("ng")
142 self.vca_config = config["VCA"].copy()
143
144 # create N2VC connector
145 self.n2vc = N2VCJujuConnector(
146 log=self.logger,
147 loop=self.loop,
148 on_update_db=self._on_update_n2vc_db,
149 fs=self.fs,
150 db=self.db,
151 )
152
153 self.conn_helm_ee = LCMHelmConn(
154 log=self.logger,
155 loop=self.loop,
156 vca_config=self.vca_config,
157 on_update_db=self._on_update_n2vc_db,
158 )
159
160 self.k8sclusterhelm2 = K8sHelmConnector(
161 kubectl_command=self.vca_config.get("kubectlpath"),
162 helm_command=self.vca_config.get("helmpath"),
163 log=self.logger,
164 on_update_db=None,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.k8sclusterhelm3 = K8sHelm3Connector(
170 kubectl_command=self.vca_config.get("kubectlpath"),
171 helm_command=self.vca_config.get("helm3path"),
172 fs=self.fs,
173 log=self.logger,
174 db=self.db,
175 on_update_db=None,
176 )
177
178 self.k8sclusterjuju = K8sJujuConnector(
179 kubectl_command=self.vca_config.get("kubectlpath"),
180 juju_command=self.vca_config.get("jujupath"),
181 log=self.logger,
182 loop=self.loop,
183 on_update_db=self._on_update_k8s_db,
184 fs=self.fs,
185 db=self.db,
186 )
187
188 self.k8scluster_map = {
189 "helm-chart": self.k8sclusterhelm2,
190 "helm-chart-v3": self.k8sclusterhelm3,
191 "chart": self.k8sclusterhelm3,
192 "juju-bundle": self.k8sclusterjuju,
193 "juju": self.k8sclusterjuju,
194 }
195
196 self.vca_map = {
197 "lxc_proxy_charm": self.n2vc,
198 "native_charm": self.n2vc,
199 "k8s_proxy_charm": self.n2vc,
200 "helm": self.conn_helm_ee,
201 "helm-v3": self.conn_helm_ee,
202 }
203
204 # create RO client
205 self.RO = NgRoClient(self.loop, **self.ro_config)
206
207 @staticmethod
208 def increment_ip_mac(ip_mac, vm_index=1):
209 if not isinstance(ip_mac, str):
210 return ip_mac
211 try:
212 # try with ipv4 look for last dot
213 i = ip_mac.rfind(".")
214 if i > 0:
215 i += 1
216 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
217 # try with ipv6 or mac look for last colon. Operate in hex
218 i = ip_mac.rfind(":")
219 if i > 0:
220 i += 1
221 # format in hex, len can be 2 for mac or 4 for ipv6
222 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
223 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
224 )
225 except Exception:
226 pass
227 return None
228
229 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
230
231 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
232
233 try:
234 # TODO filter RO descriptor fields...
235
236 # write to database
237 db_dict = dict()
238 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
239 db_dict["deploymentStatus"] = ro_descriptor
240 self.update_db_2("nsrs", nsrs_id, db_dict)
241
242 except Exception as e:
243 self.logger.warn(
244 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
245 )
246
247 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
248
249 # remove last dot from path (if exists)
250 if path.endswith("."):
251 path = path[:-1]
252
253 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
254 # .format(table, filter, path, updated_data))
255 try:
256
257 nsr_id = filter.get("_id")
258
259 # read ns record from database
260 nsr = self.db.get_one(table="nsrs", q_filter=filter)
261 current_ns_status = nsr.get("nsState")
262
263 # get vca status for NS
264 status_dict = await self.n2vc.get_status(
265 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
266 )
267
268 # vcaStatus
269 db_dict = dict()
270 db_dict["vcaStatus"] = status_dict
271 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
272
273 # update configurationStatus for this VCA
274 try:
275 vca_index = int(path[path.rfind(".") + 1 :])
276
277 vca_list = deep_get(
278 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
279 )
280 vca_status = vca_list[vca_index].get("status")
281
282 configuration_status_list = nsr.get("configurationStatus")
283 config_status = configuration_status_list[vca_index].get("status")
284
285 if config_status == "BROKEN" and vca_status != "failed":
286 db_dict["configurationStatus"][vca_index] = "READY"
287 elif config_status != "BROKEN" and vca_status == "failed":
288 db_dict["configurationStatus"][vca_index] = "BROKEN"
289 except Exception as e:
290 # not update configurationStatus
291 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
292
293 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
294 # if nsState = 'DEGRADED' check if all is OK
295 is_degraded = False
296 if current_ns_status in ("READY", "DEGRADED"):
297 error_description = ""
298 # check machines
299 if status_dict.get("machines"):
300 for machine_id in status_dict.get("machines"):
301 machine = status_dict.get("machines").get(machine_id)
302 # check machine agent-status
303 if machine.get("agent-status"):
304 s = machine.get("agent-status").get("status")
305 if s != "started":
306 is_degraded = True
307 error_description += (
308 "machine {} agent-status={} ; ".format(
309 machine_id, s
310 )
311 )
312 # check machine instance status
313 if machine.get("instance-status"):
314 s = machine.get("instance-status").get("status")
315 if s != "running":
316 is_degraded = True
317 error_description += (
318 "machine {} instance-status={} ; ".format(
319 machine_id, s
320 )
321 )
322 # check applications
323 if status_dict.get("applications"):
324 for app_id in status_dict.get("applications"):
325 app = status_dict.get("applications").get(app_id)
326 # check application status
327 if app.get("status"):
328 s = app.get("status").get("status")
329 if s != "active":
330 is_degraded = True
331 error_description += (
332 "application {} status={} ; ".format(app_id, s)
333 )
334
335 if error_description:
336 db_dict["errorDescription"] = error_description
337 if current_ns_status == "READY" and is_degraded:
338 db_dict["nsState"] = "DEGRADED"
339 if current_ns_status == "DEGRADED" and not is_degraded:
340 db_dict["nsState"] = "READY"
341
342 # write to database
343 self.update_db_2("nsrs", nsr_id, db_dict)
344
345 except (asyncio.CancelledError, asyncio.TimeoutError):
346 raise
347 except Exception as e:
348 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
349
350 async def _on_update_k8s_db(
351 self, cluster_uuid, kdu_instance, filter=None, vca_id=None
352 ):
353 """
354 Updating vca status in NSR record
355 :param cluster_uuid: UUID of a k8s cluster
356 :param kdu_instance: The unique name of the KDU instance
357 :param filter: To get nsr_id
358 :return: none
359 """
360
361 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
362 # .format(cluster_uuid, kdu_instance, filter))
363
364 try:
365 nsr_id = filter.get("_id")
366
367 # get vca status for NS
368 vca_status = await self.k8sclusterjuju.status_kdu(
369 cluster_uuid,
370 kdu_instance,
371 complete_status=True,
372 yaml_format=False,
373 vca_id=vca_id,
374 )
375 # vcaStatus
376 db_dict = dict()
377 db_dict["vcaStatus"] = {nsr_id: vca_status}
378
379 await self.k8sclusterjuju.update_vca_status(
380 db_dict["vcaStatus"],
381 kdu_instance,
382 vca_id=vca_id,
383 )
384
385 # write to database
386 self.update_db_2("nsrs", nsr_id, db_dict)
387
388 except (asyncio.CancelledError, asyncio.TimeoutError):
389 raise
390 except Exception as e:
391 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
392
393 @staticmethod
394 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
395 try:
396 env = Environment(undefined=StrictUndefined)
397 template = env.from_string(cloud_init_text)
398 return template.render(additional_params or {})
399 except UndefinedError as e:
400 raise LcmException(
401 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
402 "file, must be provided in the instantiation parameters inside the "
403 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
404 )
405 except (TemplateError, TemplateNotFound) as e:
406 raise LcmException(
407 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
408 vnfd_id, vdu_id, e
409 )
410 )
411
412 def _get_vdu_cloud_init_content(self, vdu, vnfd):
413 cloud_init_content = cloud_init_file = None
414 try:
415 if vdu.get("cloud-init-file"):
416 base_folder = vnfd["_admin"]["storage"]
417 if base_folder["pkg-dir"]:
418 cloud_init_file = "{}/{}/cloud_init/{}".format(
419 base_folder["folder"],
420 base_folder["pkg-dir"],
421 vdu["cloud-init-file"],
422 )
423 else:
424 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
425 base_folder["folder"],
426 vdu["cloud-init-file"],
427 )
428 with self.fs.file_open(cloud_init_file, "r") as ci_file:
429 cloud_init_content = ci_file.read()
430 elif vdu.get("cloud-init"):
431 cloud_init_content = vdu["cloud-init"]
432
433 return cloud_init_content
434 except FsException as e:
435 raise LcmException(
436 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
437 vnfd["id"], vdu["id"], cloud_init_file, e
438 )
439 )
440
441 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
442 vdur = next(
443 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]),
444 {}
445 )
446 additional_params = vdur.get("additionalParams")
447 return parse_yaml_strings(additional_params)
448
449 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
450 """
451 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
452 :param vnfd: input vnfd
453 :param new_id: overrides vnf id if provided
454 :param additionalParams: Instantiation params for VNFs provided
455 :param nsrId: Id of the NSR
456 :return: copy of vnfd
457 """
458 vnfd_RO = deepcopy(vnfd)
459 # remove unused by RO configuration, monitoring, scaling and internal keys
460 vnfd_RO.pop("_id", None)
461 vnfd_RO.pop("_admin", None)
462 vnfd_RO.pop("monitoring-param", None)
463 vnfd_RO.pop("scaling-group-descriptor", None)
464 vnfd_RO.pop("kdu", None)
465 vnfd_RO.pop("k8s-cluster", None)
466 if new_id:
467 vnfd_RO["id"] = new_id
468
469 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
470 for vdu in get_iterable(vnfd_RO, "vdu"):
471 vdu.pop("cloud-init-file", None)
472 vdu.pop("cloud-init", None)
473 return vnfd_RO
474
475 @staticmethod
476 def ip_profile_2_RO(ip_profile):
477 RO_ip_profile = deepcopy(ip_profile)
478 if "dns-server" in RO_ip_profile:
479 if isinstance(RO_ip_profile["dns-server"], list):
480 RO_ip_profile["dns-address"] = []
481 for ds in RO_ip_profile.pop("dns-server"):
482 RO_ip_profile["dns-address"].append(ds["address"])
483 else:
484 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
485 if RO_ip_profile.get("ip-version") == "ipv4":
486 RO_ip_profile["ip-version"] = "IPv4"
487 if RO_ip_profile.get("ip-version") == "ipv6":
488 RO_ip_profile["ip-version"] = "IPv6"
489 if "dhcp-params" in RO_ip_profile:
490 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
491 return RO_ip_profile
492
493 def _get_ro_vim_id_for_vim_account(self, vim_account):
494 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
495 if db_vim["_admin"]["operationalState"] != "ENABLED":
496 raise LcmException(
497 "VIM={} is not available. operationalState={}".format(
498 vim_account, db_vim["_admin"]["operationalState"]
499 )
500 )
501 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
502 return RO_vim_id
503
504 def get_ro_wim_id_for_wim_account(self, wim_account):
505 if isinstance(wim_account, str):
506 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
507 if db_wim["_admin"]["operationalState"] != "ENABLED":
508 raise LcmException(
509 "WIM={} is not available. operationalState={}".format(
510 wim_account, db_wim["_admin"]["operationalState"]
511 )
512 )
513 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
514 return RO_wim_id
515 else:
516 return wim_account
517
518 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
519
520 db_vdu_push_list = []
521 template_vdur = []
522 db_update = {"_admin.modified": time()}
523 if vdu_create:
524 for vdu_id, vdu_count in vdu_create.items():
525 vdur = next(
526 (
527 vdur
528 for vdur in reversed(db_vnfr["vdur"])
529 if vdur["vdu-id-ref"] == vdu_id
530 ),
531 None,
532 )
533 if not vdur:
534 # Read the template saved in the db:
535 self.logger.debug(f"No vdur in the database. Using the vdur-template to scale")
536 vdur_template = db_vnfr.get("vdur-template")
537 if not vdur_template:
538 raise LcmException(
539 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
540 vdu_id
541 )
542 )
543 vdur = vdur_template[0]
544 #Delete a template from the database after using it
545 self.db.set_one("vnfrs",
546 {"_id": db_vnfr["_id"]},
547 None,
548 pull={"vdur-template": {"_id": vdur['_id']}}
549 )
550 for count in range(vdu_count):
551 vdur_copy = deepcopy(vdur)
552 vdur_copy["status"] = "BUILD"
553 vdur_copy["status-detailed"] = None
554 vdur_copy["ip-address"] = None
555 vdur_copy["_id"] = str(uuid4())
556 vdur_copy["count-index"] += count + 1
557 vdur_copy["id"] = "{}-{}".format(
558 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
559 )
560 vdur_copy.pop("vim_info", None)
561 for iface in vdur_copy["interfaces"]:
562 if iface.get("fixed-ip"):
563 iface["ip-address"] = self.increment_ip_mac(
564 iface["ip-address"], count + 1
565 )
566 else:
567 iface.pop("ip-address", None)
568 if iface.get("fixed-mac"):
569 iface["mac-address"] = self.increment_ip_mac(
570 iface["mac-address"], count + 1
571 )
572 else:
573 iface.pop("mac-address", None)
574 if db_vnfr["vdur"]:
575 iface.pop(
576 "mgmt_vnf", None
577 ) # only first vdu can be managment of vnf
578 db_vdu_push_list.append(vdur_copy)
579 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
580 if vdu_delete:
581 if len(db_vnfr["vdur"]) == 1:
582 # The scale will move to 0 instances
583 self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur")
584 template_vdur = [db_vnfr["vdur"][0]]
585 for vdu_id, vdu_count in vdu_delete.items():
586 if mark_delete:
587 indexes_to_delete = [
588 iv[0]
589 for iv in enumerate(db_vnfr["vdur"])
590 if iv[1]["vdu-id-ref"] == vdu_id
591 ]
592 db_update.update(
593 {
594 "vdur.{}.status".format(i): "DELETING"
595 for i in indexes_to_delete[-vdu_count:]
596 }
597 )
598 else:
599 # it must be deleted one by one because common.db does not allow otherwise
600 vdus_to_delete = [
601 v
602 for v in reversed(db_vnfr["vdur"])
603 if v["vdu-id-ref"] == vdu_id
604 ]
605 for vdu in vdus_to_delete[:vdu_count]:
606 self.db.set_one(
607 "vnfrs",
608 {"_id": db_vnfr["_id"]},
609 None,
610 pull={"vdur": {"_id": vdu["_id"]}},
611 )
612 db_push = {}
613 if db_vdu_push_list:
614 db_push["vdur"] = db_vdu_push_list
615 if template_vdur:
616 db_push["vdur-template"] = template_vdur
617 if not db_push:
618 db_push = None
619 db_vnfr["vdur-template"] = template_vdur
620 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
621 # modify passed dictionary db_vnfr
622 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
623 db_vnfr["vdur"] = db_vnfr_["vdur"]
624
625 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
626 """
627 Updates database nsr with the RO info for the created vld
628 :param ns_update_nsr: dictionary to be filled with the updated info
629 :param db_nsr: content of db_nsr. This is also modified
630 :param nsr_desc_RO: nsr descriptor from RO
631 :return: Nothing, LcmException is raised on errors
632 """
633
634 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
635 for net_RO in get_iterable(nsr_desc_RO, "nets"):
636 if vld["id"] != net_RO.get("ns_net_osm_id"):
637 continue
638 vld["vim-id"] = net_RO.get("vim_net_id")
639 vld["name"] = net_RO.get("vim_name")
640 vld["status"] = net_RO.get("status")
641 vld["status-detailed"] = net_RO.get("error_msg")
642 ns_update_nsr["vld.{}".format(vld_index)] = vld
643 break
644 else:
645 raise LcmException(
646 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
647 )
648
649 def set_vnfr_at_error(self, db_vnfrs, error_text):
650 try:
651 for db_vnfr in db_vnfrs.values():
652 vnfr_update = {"status": "ERROR"}
653 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
654 if "status" not in vdur:
655 vdur["status"] = "ERROR"
656 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
657 if error_text:
658 vdur["status-detailed"] = str(error_text)
659 vnfr_update[
660 "vdur.{}.status-detailed".format(vdu_index)
661 ] = "ERROR"
662 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
663 except DbException as e:
664 self.logger.error("Cannot update vnf. {}".format(e))
665
666 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
667 """
668 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
669 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
670 :param nsr_desc_RO: nsr descriptor from RO
671 :return: Nothing, LcmException is raised on errors
672 """
673 for vnf_index, db_vnfr in db_vnfrs.items():
674 for vnf_RO in nsr_desc_RO["vnfs"]:
675 if vnf_RO["member_vnf_index"] != vnf_index:
676 continue
677 vnfr_update = {}
678 if vnf_RO.get("ip_address"):
679 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
680 "ip_address"
681 ].split(";")[0]
682 elif not db_vnfr.get("ip-address"):
683 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
684 raise LcmExceptionNoMgmtIP(
685 "ns member_vnf_index '{}' has no IP address".format(
686 vnf_index
687 )
688 )
689
690 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
691 vdur_RO_count_index = 0
692 if vdur.get("pdu-type"):
693 continue
694 for vdur_RO in get_iterable(vnf_RO, "vms"):
695 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
696 continue
697 if vdur["count-index"] != vdur_RO_count_index:
698 vdur_RO_count_index += 1
699 continue
700 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
701 if vdur_RO.get("ip_address"):
702 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
703 else:
704 vdur["ip-address"] = None
705 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
706 vdur["name"] = vdur_RO.get("vim_name")
707 vdur["status"] = vdur_RO.get("status")
708 vdur["status-detailed"] = vdur_RO.get("error_msg")
709 for ifacer in get_iterable(vdur, "interfaces"):
710 for interface_RO in get_iterable(vdur_RO, "interfaces"):
711 if ifacer["name"] == interface_RO.get("internal_name"):
712 ifacer["ip-address"] = interface_RO.get(
713 "ip_address"
714 )
715 ifacer["mac-address"] = interface_RO.get(
716 "mac_address"
717 )
718 break
719 else:
720 raise LcmException(
721 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
722 "from VIM info".format(
723 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
724 )
725 )
726 vnfr_update["vdur.{}".format(vdu_index)] = vdur
727 break
728 else:
729 raise LcmException(
730 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
731 "VIM info".format(
732 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
733 )
734 )
735
736 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
737 for net_RO in get_iterable(nsr_desc_RO, "nets"):
738 if vld["id"] != net_RO.get("vnf_net_osm_id"):
739 continue
740 vld["vim-id"] = net_RO.get("vim_net_id")
741 vld["name"] = net_RO.get("vim_name")
742 vld["status"] = net_RO.get("status")
743 vld["status-detailed"] = net_RO.get("error_msg")
744 vnfr_update["vld.{}".format(vld_index)] = vld
745 break
746 else:
747 raise LcmException(
748 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
749 vnf_index, vld["id"]
750 )
751 )
752
753 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
754 break
755
756 else:
757 raise LcmException(
758 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
759 vnf_index
760 )
761 )
762
763 def _get_ns_config_info(self, nsr_id):
764 """
765 Generates a mapping between vnf,vdu elements and the N2VC id
766 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
767 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
768 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
769 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
770 """
771 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
772 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
773 mapping = {}
774 ns_config_info = {"osm-config-mapping": mapping}
775 for vca in vca_deployed_list:
776 if not vca["member-vnf-index"]:
777 continue
778 if not vca["vdu_id"]:
779 mapping[vca["member-vnf-index"]] = vca["application"]
780 else:
781 mapping[
782 "{}.{}.{}".format(
783 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
784 )
785 ] = vca["application"]
786 return ns_config_info
787
788 async def _instantiate_ng_ro(
789 self,
790 logging_text,
791 nsr_id,
792 nsd,
793 db_nsr,
794 db_nslcmop,
795 db_vnfrs,
796 db_vnfds,
797 n2vc_key_list,
798 stage,
799 start_deploy,
800 timeout_ns_deploy,
801 ):
802
803 db_vims = {}
804
805 def get_vim_account(vim_account_id):
806 nonlocal db_vims
807 if vim_account_id in db_vims:
808 return db_vims[vim_account_id]
809 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
810 db_vims[vim_account_id] = db_vim
811 return db_vim
812
813 # modify target_vld info with instantiation parameters
814 def parse_vld_instantiation_params(
815 target_vim, target_vld, vld_params, target_sdn
816 ):
817 if vld_params.get("ip-profile"):
818 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
819 "ip-profile"
820 ]
821 if vld_params.get("provider-network"):
822 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
823 "provider-network"
824 ]
825 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
826 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
827 "provider-network"
828 ]["sdn-ports"]
829 if vld_params.get("wimAccountId"):
830 target_wim = "wim:{}".format(vld_params["wimAccountId"])
831 target_vld["vim_info"][target_wim] = {}
832 for param in ("vim-network-name", "vim-network-id"):
833 if vld_params.get(param):
834 if isinstance(vld_params[param], dict):
835 for vim, vim_net in vld_params[param].items():
836 other_target_vim = "vim:" + vim
837 populate_dict(
838 target_vld["vim_info"],
839 (other_target_vim, param.replace("-", "_")),
840 vim_net,
841 )
842 else: # isinstance str
843 target_vld["vim_info"][target_vim][
844 param.replace("-", "_")
845 ] = vld_params[param]
846 if vld_params.get("common_id"):
847 target_vld["common_id"] = vld_params.get("common_id")
848
849 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
850 def update_ns_vld_target(target, ns_params):
851 for vnf_params in ns_params.get("vnf", ()):
852 if vnf_params.get("vimAccountId"):
853 target_vnf = next(
854 (
855 vnfr
856 for vnfr in db_vnfrs.values()
857 if vnf_params["member-vnf-index"]
858 == vnfr["member-vnf-index-ref"]
859 ),
860 None,
861 )
862 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
863 for a_index, a_vld in enumerate(target["ns"]["vld"]):
864 target_vld = find_in_list(
865 get_iterable(vdur, "interfaces"),
866 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
867 )
868 if target_vld:
869 if vnf_params.get("vimAccountId") not in a_vld.get(
870 "vim_info", {}
871 ):
872 target["ns"]["vld"][a_index].get("vim_info").update(
873 {
874 "vim:{}".format(vnf_params["vimAccountId"]): {
875 "vim_network_name": ""
876 }
877 }
878 )
879
880 nslcmop_id = db_nslcmop["_id"]
881 target = {
882 "name": db_nsr["name"],
883 "ns": {"vld": []},
884 "vnf": [],
885 "image": deepcopy(db_nsr["image"]),
886 "flavor": deepcopy(db_nsr["flavor"]),
887 "action_id": nslcmop_id,
888 "cloud_init_content": {},
889 }
890 for image in target["image"]:
891 image["vim_info"] = {}
892 for flavor in target["flavor"]:
893 flavor["vim_info"] = {}
894 if db_nsr.get("affinity-or-anti-affinity-group"):
895 target["affinity-or-anti-affinity-group"] = deepcopy(db_nsr["affinity-or-anti-affinity-group"])
896 for affinity_or_anti_affinity_group in target["affinity-or-anti-affinity-group"]:
897 affinity_or_anti_affinity_group["vim_info"] = {}
898
899 if db_nslcmop.get("lcmOperationType") != "instantiate":
900 # get parameters of instantiation:
901 db_nslcmop_instantiate = self.db.get_list(
902 "nslcmops",
903 {
904 "nsInstanceId": db_nslcmop["nsInstanceId"],
905 "lcmOperationType": "instantiate",
906 },
907 )[-1]
908 ns_params = db_nslcmop_instantiate.get("operationParams")
909 else:
910 ns_params = db_nslcmop.get("operationParams")
911 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
912 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
913
914 cp2target = {}
915 for vld_index, vld in enumerate(db_nsr.get("vld")):
916 target_vim = "vim:{}".format(ns_params["vimAccountId"])
917 target_vld = {
918 "id": vld["id"],
919 "name": vld["name"],
920 "mgmt-network": vld.get("mgmt-network", False),
921 "type": vld.get("type"),
922 "vim_info": {
923 target_vim: {
924 "vim_network_name": vld.get("vim-network-name"),
925 "vim_account_id": ns_params["vimAccountId"],
926 }
927 },
928 }
929 # check if this network needs SDN assist
930 if vld.get("pci-interfaces"):
931 db_vim = get_vim_account(ns_params["vimAccountId"])
932 sdnc_id = db_vim["config"].get("sdn-controller")
933 if sdnc_id:
934 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
935 target_sdn = "sdn:{}".format(sdnc_id)
936 target_vld["vim_info"][target_sdn] = {
937 "sdn": True,
938 "target_vim": target_vim,
939 "vlds": [sdn_vld],
940 "type": vld.get("type"),
941 }
942
943 nsd_vnf_profiles = get_vnf_profiles(nsd)
944 for nsd_vnf_profile in nsd_vnf_profiles:
945 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
946 if cp["virtual-link-profile-id"] == vld["id"]:
947 cp2target[
948 "member_vnf:{}.{}".format(
949 cp["constituent-cpd-id"][0][
950 "constituent-base-element-id"
951 ],
952 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
953 )
954 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
955
956 # check at nsd descriptor, if there is an ip-profile
957 vld_params = {}
958 nsd_vlp = find_in_list(
959 get_virtual_link_profiles(nsd),
960 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
961 == vld["id"],
962 )
963 if (
964 nsd_vlp
965 and nsd_vlp.get("virtual-link-protocol-data")
966 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
967 ):
968 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
969 "l3-protocol-data"
970 ]
971 ip_profile_dest_data = {}
972 if "ip-version" in ip_profile_source_data:
973 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
974 "ip-version"
975 ]
976 if "cidr" in ip_profile_source_data:
977 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
978 "cidr"
979 ]
980 if "gateway-ip" in ip_profile_source_data:
981 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
982 "gateway-ip"
983 ]
984 if "dhcp-enabled" in ip_profile_source_data:
985 ip_profile_dest_data["dhcp-params"] = {
986 "enabled": ip_profile_source_data["dhcp-enabled"]
987 }
988 vld_params["ip-profile"] = ip_profile_dest_data
989
990 # update vld_params with instantiation params
991 vld_instantiation_params = find_in_list(
992 get_iterable(ns_params, "vld"),
993 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
994 )
995 if vld_instantiation_params:
996 vld_params.update(vld_instantiation_params)
997 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
998 target["ns"]["vld"].append(target_vld)
999 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1000 update_ns_vld_target(target, ns_params)
1001
1002 for vnfr in db_vnfrs.values():
1003 vnfd = find_in_list(
1004 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1005 )
1006 vnf_params = find_in_list(
1007 get_iterable(ns_params, "vnf"),
1008 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1009 )
1010 target_vnf = deepcopy(vnfr)
1011 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1012 for vld in target_vnf.get("vld", ()):
1013 # check if connected to a ns.vld, to fill target'
1014 vnf_cp = find_in_list(
1015 vnfd.get("int-virtual-link-desc", ()),
1016 lambda cpd: cpd.get("id") == vld["id"],
1017 )
1018 if vnf_cp:
1019 ns_cp = "member_vnf:{}.{}".format(
1020 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1021 )
1022 if cp2target.get(ns_cp):
1023 vld["target"] = cp2target[ns_cp]
1024
1025 vld["vim_info"] = {
1026 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1027 }
1028 # check if this network needs SDN assist
1029 target_sdn = None
1030 if vld.get("pci-interfaces"):
1031 db_vim = get_vim_account(vnfr["vim-account-id"])
1032 sdnc_id = db_vim["config"].get("sdn-controller")
1033 if sdnc_id:
1034 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1035 target_sdn = "sdn:{}".format(sdnc_id)
1036 vld["vim_info"][target_sdn] = {
1037 "sdn": True,
1038 "target_vim": target_vim,
1039 "vlds": [sdn_vld],
1040 "type": vld.get("type"),
1041 }
1042
1043 # check at vnfd descriptor, if there is an ip-profile
1044 vld_params = {}
1045 vnfd_vlp = find_in_list(
1046 get_virtual_link_profiles(vnfd),
1047 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1048 )
1049 if (
1050 vnfd_vlp
1051 and vnfd_vlp.get("virtual-link-protocol-data")
1052 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1053 ):
1054 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1055 "l3-protocol-data"
1056 ]
1057 ip_profile_dest_data = {}
1058 if "ip-version" in ip_profile_source_data:
1059 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1060 "ip-version"
1061 ]
1062 if "cidr" in ip_profile_source_data:
1063 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1064 "cidr"
1065 ]
1066 if "gateway-ip" in ip_profile_source_data:
1067 ip_profile_dest_data[
1068 "gateway-address"
1069 ] = ip_profile_source_data["gateway-ip"]
1070 if "dhcp-enabled" in ip_profile_source_data:
1071 ip_profile_dest_data["dhcp-params"] = {
1072 "enabled": ip_profile_source_data["dhcp-enabled"]
1073 }
1074
1075 vld_params["ip-profile"] = ip_profile_dest_data
1076 # update vld_params with instantiation params
1077 if vnf_params:
1078 vld_instantiation_params = find_in_list(
1079 get_iterable(vnf_params, "internal-vld"),
1080 lambda i_vld: i_vld["name"] == vld["id"],
1081 )
1082 if vld_instantiation_params:
1083 vld_params.update(vld_instantiation_params)
1084 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1085
1086 vdur_list = []
1087 for vdur in target_vnf.get("vdur", ()):
1088 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1089 continue # This vdu must not be created
1090 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1091
1092 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1093
1094 if ssh_keys_all:
1095 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1096 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1097 if (
1098 vdu_configuration
1099 and vdu_configuration.get("config-access")
1100 and vdu_configuration.get("config-access").get("ssh-access")
1101 ):
1102 vdur["ssh-keys"] = ssh_keys_all
1103 vdur["ssh-access-required"] = vdu_configuration[
1104 "config-access"
1105 ]["ssh-access"]["required"]
1106 elif (
1107 vnf_configuration
1108 and vnf_configuration.get("config-access")
1109 and vnf_configuration.get("config-access").get("ssh-access")
1110 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1111 ):
1112 vdur["ssh-keys"] = ssh_keys_all
1113 vdur["ssh-access-required"] = vnf_configuration[
1114 "config-access"
1115 ]["ssh-access"]["required"]
1116 elif ssh_keys_instantiation and find_in_list(
1117 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1118 ):
1119 vdur["ssh-keys"] = ssh_keys_instantiation
1120
1121 self.logger.debug("NS > vdur > {}".format(vdur))
1122
1123 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1124 # cloud-init
1125 if vdud.get("cloud-init-file"):
1126 vdur["cloud-init"] = "{}:file:{}".format(
1127 vnfd["_id"], vdud.get("cloud-init-file")
1128 )
1129 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1130 if vdur["cloud-init"] not in target["cloud_init_content"]:
1131 base_folder = vnfd["_admin"]["storage"]
1132 if base_folder["pkg-dir"]:
1133 cloud_init_file = "{}/{}/cloud_init/{}".format(
1134 base_folder["folder"],
1135 base_folder["pkg-dir"],
1136 vdud.get("cloud-init-file"),
1137 )
1138 else:
1139 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1140 base_folder["folder"],
1141 vdud.get("cloud-init-file"),
1142 )
1143 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1144 target["cloud_init_content"][
1145 vdur["cloud-init"]
1146 ] = ci_file.read()
1147 elif vdud.get("cloud-init"):
1148 vdur["cloud-init"] = "{}:vdu:{}".format(
1149 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1150 )
1151 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1152 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1153 "cloud-init"
1154 ]
1155 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1156 deploy_params_vdu = self._format_additional_params(
1157 vdur.get("additionalParams") or {}
1158 )
1159 deploy_params_vdu["OSM"] = get_osm_params(
1160 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1161 )
1162 vdur["additionalParams"] = deploy_params_vdu
1163
1164 # flavor
1165 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1166 if target_vim not in ns_flavor["vim_info"]:
1167 ns_flavor["vim_info"][target_vim] = {}
1168
1169 # deal with images
1170 # in case alternative images are provided we must check if they should be applied
1171 # for the vim_type, modify the vim_type taking into account
1172 ns_image_id = int(vdur["ns-image-id"])
1173 if vdur.get("alt-image-ids"):
1174 db_vim = get_vim_account(vnfr["vim-account-id"])
1175 vim_type = db_vim["vim_type"]
1176 for alt_image_id in vdur.get("alt-image-ids"):
1177 ns_alt_image = target["image"][int(alt_image_id)]
1178 if vim_type == ns_alt_image.get("vim-type"):
1179 # must use alternative image
1180 self.logger.debug(
1181 "use alternative image id: {}".format(alt_image_id)
1182 )
1183 ns_image_id = alt_image_id
1184 vdur["ns-image-id"] = ns_image_id
1185 break
1186 ns_image = target["image"][int(ns_image_id)]
1187 if target_vim not in ns_image["vim_info"]:
1188 ns_image["vim_info"][target_vim] = {}
1189
1190 # Affinity groups
1191 if vdur.get("affinity-or-anti-affinity-group-id"):
1192 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1193 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1194 if target_vim not in ns_ags["vim_info"]:
1195 ns_ags["vim_info"][target_vim] = {}
1196
1197 vdur["vim_info"] = {target_vim: {}}
1198 # instantiation parameters
1199 # if vnf_params:
1200 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1201 # vdud["id"]), None)
1202 vdur_list.append(vdur)
1203 target_vnf["vdur"] = vdur_list
1204 target["vnf"].append(target_vnf)
1205
1206 desc = await self.RO.deploy(nsr_id, target)
1207 self.logger.debug("RO return > {}".format(desc))
1208 action_id = desc["action_id"]
1209 await self._wait_ng_ro(
1210 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1211 )
1212
1213 # Updating NSR
1214 db_nsr_update = {
1215 "_admin.deployed.RO.operational-status": "running",
1216 "detailed-status": " ".join(stage),
1217 }
1218 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1219 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1220 self._write_op_status(nslcmop_id, stage)
1221 self.logger.debug(
1222 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1223 )
1224 return
1225
1226 async def _wait_ng_ro(
1227 self,
1228 nsr_id,
1229 action_id,
1230 nslcmop_id=None,
1231 start_time=None,
1232 timeout=600,
1233 stage=None,
1234 ):
1235 detailed_status_old = None
1236 db_nsr_update = {}
1237 start_time = start_time or time()
1238 while time() <= start_time + timeout:
1239 desc_status = await self.RO.status(nsr_id, action_id)
1240 self.logger.debug("Wait NG RO > {}".format(desc_status))
1241 if desc_status["status"] == "FAILED":
1242 raise NgRoException(desc_status["details"])
1243 elif desc_status["status"] == "BUILD":
1244 if stage:
1245 stage[2] = "VIM: ({})".format(desc_status["details"])
1246 elif desc_status["status"] == "DONE":
1247 if stage:
1248 stage[2] = "Deployed at VIM"
1249 break
1250 else:
1251 assert False, "ROclient.check_ns_status returns unknown {}".format(
1252 desc_status["status"]
1253 )
1254 if stage and nslcmop_id and stage[2] != detailed_status_old:
1255 detailed_status_old = stage[2]
1256 db_nsr_update["detailed-status"] = " ".join(stage)
1257 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1258 self._write_op_status(nslcmop_id, stage)
1259 await asyncio.sleep(15, loop=self.loop)
1260 else: # timeout_ns_deploy
1261 raise NgRoException("Timeout waiting ns to deploy")
1262
1263 async def _terminate_ng_ro(
1264 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1265 ):
1266 db_nsr_update = {}
1267 failed_detail = []
1268 action_id = None
1269 start_deploy = time()
1270 try:
1271 target = {
1272 "ns": {"vld": []},
1273 "vnf": [],
1274 "image": [],
1275 "flavor": [],
1276 "action_id": nslcmop_id,
1277 }
1278 desc = await self.RO.deploy(nsr_id, target)
1279 action_id = desc["action_id"]
1280 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1281 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1282 self.logger.debug(
1283 logging_text
1284 + "ns terminate action at RO. action_id={}".format(action_id)
1285 )
1286
1287 # wait until done
1288 delete_timeout = 20 * 60 # 20 minutes
1289 await self._wait_ng_ro(
1290 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1291 )
1292
1293 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1294 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1295 # delete all nsr
1296 await self.RO.delete(nsr_id)
1297 except Exception as e:
1298 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1299 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1300 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1301 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1302 self.logger.debug(
1303 logging_text + "RO_action_id={} already deleted".format(action_id)
1304 )
1305 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1306 failed_detail.append("delete conflict: {}".format(e))
1307 self.logger.debug(
1308 logging_text
1309 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1310 )
1311 else:
1312 failed_detail.append("delete error: {}".format(e))
1313 self.logger.error(
1314 logging_text
1315 + "RO_action_id={} delete error: {}".format(action_id, e)
1316 )
1317
1318 if failed_detail:
1319 stage[2] = "Error deleting from VIM"
1320 else:
1321 stage[2] = "Deleted from VIM"
1322 db_nsr_update["detailed-status"] = " ".join(stage)
1323 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1324 self._write_op_status(nslcmop_id, stage)
1325
1326 if failed_detail:
1327 raise LcmException("; ".join(failed_detail))
1328 return
1329
1330 async def instantiate_RO(
1331 self,
1332 logging_text,
1333 nsr_id,
1334 nsd,
1335 db_nsr,
1336 db_nslcmop,
1337 db_vnfrs,
1338 db_vnfds,
1339 n2vc_key_list,
1340 stage,
1341 ):
1342 """
1343 Instantiate at RO
1344 :param logging_text: preffix text to use at logging
1345 :param nsr_id: nsr identity
1346 :param nsd: database content of ns descriptor
1347 :param db_nsr: database content of ns record
1348 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1349 :param db_vnfrs:
1350 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1351 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1352 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1353 :return: None or exception
1354 """
1355 try:
1356 start_deploy = time()
1357 ns_params = db_nslcmop.get("operationParams")
1358 if ns_params and ns_params.get("timeout_ns_deploy"):
1359 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1360 else:
1361 timeout_ns_deploy = self.timeout.get(
1362 "ns_deploy", self.timeout_ns_deploy
1363 )
1364
1365 # Check for and optionally request placement optimization. Database will be updated if placement activated
1366 stage[2] = "Waiting for Placement."
1367 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1368 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1369 for vnfr in db_vnfrs.values():
1370 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1371 break
1372 else:
1373 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1374
1375 return await self._instantiate_ng_ro(
1376 logging_text,
1377 nsr_id,
1378 nsd,
1379 db_nsr,
1380 db_nslcmop,
1381 db_vnfrs,
1382 db_vnfds,
1383 n2vc_key_list,
1384 stage,
1385 start_deploy,
1386 timeout_ns_deploy,
1387 )
1388 except Exception as e:
1389 stage[2] = "ERROR deploying at VIM"
1390 self.set_vnfr_at_error(db_vnfrs, str(e))
1391 self.logger.error(
1392 "Error deploying at VIM {}".format(e),
1393 exc_info=not isinstance(
1394 e,
1395 (
1396 ROclient.ROClientException,
1397 LcmException,
1398 DbException,
1399 NgRoException,
1400 ),
1401 ),
1402 )
1403 raise
1404
1405 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1406 """
1407 Wait for kdu to be up, get ip address
1408 :param logging_text: prefix use for logging
1409 :param nsr_id:
1410 :param vnfr_id:
1411 :param kdu_name:
1412 :return: IP address
1413 """
1414
1415 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1416 nb_tries = 0
1417
1418 while nb_tries < 360:
1419 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1420 kdur = next(
1421 (
1422 x
1423 for x in get_iterable(db_vnfr, "kdur")
1424 if x.get("kdu-name") == kdu_name
1425 ),
1426 None,
1427 )
1428 if not kdur:
1429 raise LcmException(
1430 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1431 )
1432 if kdur.get("status"):
1433 if kdur["status"] in ("READY", "ENABLED"):
1434 return kdur.get("ip-address")
1435 else:
1436 raise LcmException(
1437 "target KDU={} is in error state".format(kdu_name)
1438 )
1439
1440 await asyncio.sleep(10, loop=self.loop)
1441 nb_tries += 1
1442 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1443
1444 async def wait_vm_up_insert_key_ro(
1445 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1446 ):
1447 """
1448 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1449 :param logging_text: prefix use for logging
1450 :param nsr_id:
1451 :param vnfr_id:
1452 :param vdu_id:
1453 :param vdu_index:
1454 :param pub_key: public ssh key to inject, None to skip
1455 :param user: user to apply the public ssh key
1456 :return: IP address
1457 """
1458
1459 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1460 ro_nsr_id = None
1461 ip_address = None
1462 nb_tries = 0
1463 target_vdu_id = None
1464 ro_retries = 0
1465
1466 while True:
1467
1468 ro_retries += 1
1469 if ro_retries >= 360: # 1 hour
1470 raise LcmException(
1471 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1472 )
1473
1474 await asyncio.sleep(10, loop=self.loop)
1475
1476 # get ip address
1477 if not target_vdu_id:
1478 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1479
1480 if not vdu_id: # for the VNF case
1481 if db_vnfr.get("status") == "ERROR":
1482 raise LcmException(
1483 "Cannot inject ssh-key because target VNF is in error state"
1484 )
1485 ip_address = db_vnfr.get("ip-address")
1486 if not ip_address:
1487 continue
1488 vdur = next(
1489 (
1490 x
1491 for x in get_iterable(db_vnfr, "vdur")
1492 if x.get("ip-address") == ip_address
1493 ),
1494 None,
1495 )
1496 else: # VDU case
1497 vdur = next(
1498 (
1499 x
1500 for x in get_iterable(db_vnfr, "vdur")
1501 if x.get("vdu-id-ref") == vdu_id
1502 and x.get("count-index") == vdu_index
1503 ),
1504 None,
1505 )
1506
1507 if (
1508 not vdur and len(db_vnfr.get("vdur", ())) == 1
1509 ): # If only one, this should be the target vdu
1510 vdur = db_vnfr["vdur"][0]
1511 if not vdur:
1512 raise LcmException(
1513 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1514 vnfr_id, vdu_id, vdu_index
1515 )
1516 )
1517 # New generation RO stores information at "vim_info"
1518 ng_ro_status = None
1519 target_vim = None
1520 if vdur.get("vim_info"):
1521 target_vim = next(
1522 t for t in vdur["vim_info"]
1523 ) # there should be only one key
1524 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1525 if (
1526 vdur.get("pdu-type")
1527 or vdur.get("status") == "ACTIVE"
1528 or ng_ro_status == "ACTIVE"
1529 ):
1530 ip_address = vdur.get("ip-address")
1531 if not ip_address:
1532 continue
1533 target_vdu_id = vdur["vdu-id-ref"]
1534 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1535 raise LcmException(
1536 "Cannot inject ssh-key because target VM is in error state"
1537 )
1538
1539 if not target_vdu_id:
1540 continue
1541
1542 # inject public key into machine
1543 if pub_key and user:
1544 self.logger.debug(logging_text + "Inserting RO key")
1545 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1546 if vdur.get("pdu-type"):
1547 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1548 return ip_address
1549 try:
1550 ro_vm_id = "{}-{}".format(
1551 db_vnfr["member-vnf-index-ref"], target_vdu_id
1552 ) # TODO add vdu_index
1553 if self.ng_ro:
1554 target = {
1555 "action": {
1556 "action": "inject_ssh_key",
1557 "key": pub_key,
1558 "user": user,
1559 },
1560 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1561 }
1562 desc = await self.RO.deploy(nsr_id, target)
1563 action_id = desc["action_id"]
1564 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1565 break
1566 else:
1567 # wait until NS is deployed at RO
1568 if not ro_nsr_id:
1569 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1570 ro_nsr_id = deep_get(
1571 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1572 )
1573 if not ro_nsr_id:
1574 continue
1575 result_dict = await self.RO.create_action(
1576 item="ns",
1577 item_id_name=ro_nsr_id,
1578 descriptor={
1579 "add_public_key": pub_key,
1580 "vms": [ro_vm_id],
1581 "user": user,
1582 },
1583 )
1584 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1585 if not result_dict or not isinstance(result_dict, dict):
1586 raise LcmException(
1587 "Unknown response from RO when injecting key"
1588 )
1589 for result in result_dict.values():
1590 if result.get("vim_result") == 200:
1591 break
1592 else:
1593 raise ROclient.ROClientException(
1594 "error injecting key: {}".format(
1595 result.get("description")
1596 )
1597 )
1598 break
1599 except NgRoException as e:
1600 raise LcmException(
1601 "Reaching max tries injecting key. Error: {}".format(e)
1602 )
1603 except ROclient.ROClientException as e:
1604 if not nb_tries:
1605 self.logger.debug(
1606 logging_text
1607 + "error injecting key: {}. Retrying until {} seconds".format(
1608 e, 20 * 10
1609 )
1610 )
1611 nb_tries += 1
1612 if nb_tries >= 20:
1613 raise LcmException(
1614 "Reaching max tries injecting key. Error: {}".format(e)
1615 )
1616 else:
1617 break
1618
1619 return ip_address
1620
1621 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1622 """
1623 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1624 """
1625 my_vca = vca_deployed_list[vca_index]
1626 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1627 # vdu or kdu: no dependencies
1628 return
1629 timeout = 300
1630 while timeout >= 0:
1631 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1632 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1633 configuration_status_list = db_nsr["configurationStatus"]
1634 for index, vca_deployed in enumerate(configuration_status_list):
1635 if index == vca_index:
1636 # myself
1637 continue
1638 if not my_vca.get("member-vnf-index") or (
1639 vca_deployed.get("member-vnf-index")
1640 == my_vca.get("member-vnf-index")
1641 ):
1642 internal_status = configuration_status_list[index].get("status")
1643 if internal_status == "READY":
1644 continue
1645 elif internal_status == "BROKEN":
1646 raise LcmException(
1647 "Configuration aborted because dependent charm/s has failed"
1648 )
1649 else:
1650 break
1651 else:
1652 # no dependencies, return
1653 return
1654 await asyncio.sleep(10)
1655 timeout -= 1
1656
1657 raise LcmException("Configuration aborted because dependent charm/s timeout")
1658
1659 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1660 vca_id = None
1661 if db_vnfr:
1662 vca_id = deep_get(db_vnfr, ("vca-id",))
1663 elif db_nsr:
1664 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1665 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1666 return vca_id
1667
1668 async def instantiate_N2VC(
1669 self,
1670 logging_text,
1671 vca_index,
1672 nsi_id,
1673 db_nsr,
1674 db_vnfr,
1675 vdu_id,
1676 kdu_name,
1677 vdu_index,
1678 config_descriptor,
1679 deploy_params,
1680 base_folder,
1681 nslcmop_id,
1682 stage,
1683 vca_type,
1684 vca_name,
1685 ee_config_descriptor,
1686 ):
1687 nsr_id = db_nsr["_id"]
1688 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1689 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1690 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1691 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1692 db_dict = {
1693 "collection": "nsrs",
1694 "filter": {"_id": nsr_id},
1695 "path": db_update_entry,
1696 }
1697 step = ""
1698 try:
1699
1700 element_type = "NS"
1701 element_under_configuration = nsr_id
1702
1703 vnfr_id = None
1704 if db_vnfr:
1705 vnfr_id = db_vnfr["_id"]
1706 osm_config["osm"]["vnf_id"] = vnfr_id
1707
1708 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1709
1710 if vca_type == "native_charm":
1711 index_number = 0
1712 else:
1713 index_number = vdu_index or 0
1714
1715 if vnfr_id:
1716 element_type = "VNF"
1717 element_under_configuration = vnfr_id
1718 namespace += ".{}-{}".format(vnfr_id, index_number)
1719 if vdu_id:
1720 namespace += ".{}-{}".format(vdu_id, index_number)
1721 element_type = "VDU"
1722 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1723 osm_config["osm"]["vdu_id"] = vdu_id
1724 elif kdu_name:
1725 namespace += ".{}".format(kdu_name)
1726 element_type = "KDU"
1727 element_under_configuration = kdu_name
1728 osm_config["osm"]["kdu_name"] = kdu_name
1729
1730 # Get artifact path
1731 if base_folder["pkg-dir"]:
1732 artifact_path = "{}/{}/{}/{}".format(
1733 base_folder["folder"],
1734 base_folder["pkg-dir"],
1735 "charms"
1736 if vca_type
1737 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1738 else "helm-charts",
1739 vca_name,
1740 )
1741 else:
1742 artifact_path = "{}/Scripts/{}/{}/".format(
1743 base_folder["folder"],
1744 "charms"
1745 if vca_type
1746 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1747 else "helm-charts",
1748 vca_name,
1749 )
1750
1751 self.logger.debug("Artifact path > {}".format(artifact_path))
1752
1753 # get initial_config_primitive_list that applies to this element
1754 initial_config_primitive_list = config_descriptor.get(
1755 "initial-config-primitive"
1756 )
1757
1758 self.logger.debug(
1759 "Initial config primitive list > {}".format(
1760 initial_config_primitive_list
1761 )
1762 )
1763
1764 # add config if not present for NS charm
1765 ee_descriptor_id = ee_config_descriptor.get("id")
1766 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1767 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1768 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1769 )
1770
1771 self.logger.debug(
1772 "Initial config primitive list #2 > {}".format(
1773 initial_config_primitive_list
1774 )
1775 )
1776 # n2vc_redesign STEP 3.1
1777 # find old ee_id if exists
1778 ee_id = vca_deployed.get("ee_id")
1779
1780 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1781 # create or register execution environment in VCA
1782 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1783
1784 self._write_configuration_status(
1785 nsr_id=nsr_id,
1786 vca_index=vca_index,
1787 status="CREATING",
1788 element_under_configuration=element_under_configuration,
1789 element_type=element_type,
1790 )
1791
1792 step = "create execution environment"
1793 self.logger.debug(logging_text + step)
1794
1795 ee_id = None
1796 credentials = None
1797 if vca_type == "k8s_proxy_charm":
1798 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1799 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1800 namespace=namespace,
1801 artifact_path=artifact_path,
1802 db_dict=db_dict,
1803 vca_id=vca_id,
1804 )
1805 elif vca_type == "helm" or vca_type == "helm-v3":
1806 ee_id, credentials = await self.vca_map[
1807 vca_type
1808 ].create_execution_environment(
1809 namespace=namespace,
1810 reuse_ee_id=ee_id,
1811 db_dict=db_dict,
1812 config=osm_config,
1813 artifact_path=artifact_path,
1814 vca_type=vca_type,
1815 )
1816 else:
1817 ee_id, credentials = await self.vca_map[
1818 vca_type
1819 ].create_execution_environment(
1820 namespace=namespace,
1821 reuse_ee_id=ee_id,
1822 db_dict=db_dict,
1823 vca_id=vca_id,
1824 )
1825
1826 elif vca_type == "native_charm":
1827 step = "Waiting to VM being up and getting IP address"
1828 self.logger.debug(logging_text + step)
1829 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1830 logging_text,
1831 nsr_id,
1832 vnfr_id,
1833 vdu_id,
1834 vdu_index,
1835 user=None,
1836 pub_key=None,
1837 )
1838 credentials = {"hostname": rw_mgmt_ip}
1839 # get username
1840 username = deep_get(
1841 config_descriptor, ("config-access", "ssh-access", "default-user")
1842 )
1843 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1844 # merged. Meanwhile let's get username from initial-config-primitive
1845 if not username and initial_config_primitive_list:
1846 for config_primitive in initial_config_primitive_list:
1847 for param in config_primitive.get("parameter", ()):
1848 if param["name"] == "ssh-username":
1849 username = param["value"]
1850 break
1851 if not username:
1852 raise LcmException(
1853 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1854 "'config-access.ssh-access.default-user'"
1855 )
1856 credentials["username"] = username
1857 # n2vc_redesign STEP 3.2
1858
1859 self._write_configuration_status(
1860 nsr_id=nsr_id,
1861 vca_index=vca_index,
1862 status="REGISTERING",
1863 element_under_configuration=element_under_configuration,
1864 element_type=element_type,
1865 )
1866
1867 step = "register execution environment {}".format(credentials)
1868 self.logger.debug(logging_text + step)
1869 ee_id = await self.vca_map[vca_type].register_execution_environment(
1870 credentials=credentials,
1871 namespace=namespace,
1872 db_dict=db_dict,
1873 vca_id=vca_id,
1874 )
1875
1876 # for compatibility with MON/POL modules, the need model and application name at database
1877 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1878 ee_id_parts = ee_id.split(".")
1879 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1880 if len(ee_id_parts) >= 2:
1881 model_name = ee_id_parts[0]
1882 application_name = ee_id_parts[1]
1883 db_nsr_update[db_update_entry + "model"] = model_name
1884 db_nsr_update[db_update_entry + "application"] = application_name
1885
1886 # n2vc_redesign STEP 3.3
1887 step = "Install configuration Software"
1888
1889 self._write_configuration_status(
1890 nsr_id=nsr_id,
1891 vca_index=vca_index,
1892 status="INSTALLING SW",
1893 element_under_configuration=element_under_configuration,
1894 element_type=element_type,
1895 other_update=db_nsr_update,
1896 )
1897
1898 # TODO check if already done
1899 self.logger.debug(logging_text + step)
1900 config = None
1901 if vca_type == "native_charm":
1902 config_primitive = next(
1903 (p for p in initial_config_primitive_list if p["name"] == "config"),
1904 None,
1905 )
1906 if config_primitive:
1907 config = self._map_primitive_params(
1908 config_primitive, {}, deploy_params
1909 )
1910 num_units = 1
1911 if vca_type == "lxc_proxy_charm":
1912 if element_type == "NS":
1913 num_units = db_nsr.get("config-units") or 1
1914 elif element_type == "VNF":
1915 num_units = db_vnfr.get("config-units") or 1
1916 elif element_type == "VDU":
1917 for v in db_vnfr["vdur"]:
1918 if vdu_id == v["vdu-id-ref"]:
1919 num_units = v.get("config-units") or 1
1920 break
1921 if vca_type != "k8s_proxy_charm":
1922 await self.vca_map[vca_type].install_configuration_sw(
1923 ee_id=ee_id,
1924 artifact_path=artifact_path,
1925 db_dict=db_dict,
1926 config=config,
1927 num_units=num_units,
1928 vca_id=vca_id,
1929 vca_type=vca_type,
1930 )
1931
1932 # write in db flag of configuration_sw already installed
1933 self.update_db_2(
1934 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1935 )
1936
1937 # add relations for this VCA (wait for other peers related with this VCA)
1938 await self._add_vca_relations(
1939 logging_text=logging_text,
1940 nsr_id=nsr_id,
1941 vca_type=vca_type,
1942 vca_index=vca_index,
1943 )
1944
1945 # if SSH access is required, then get execution environment SSH public
1946 # if native charm we have waited already to VM be UP
1947 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1948 pub_key = None
1949 user = None
1950 # self.logger.debug("get ssh key block")
1951 if deep_get(
1952 config_descriptor, ("config-access", "ssh-access", "required")
1953 ):
1954 # self.logger.debug("ssh key needed")
1955 # Needed to inject a ssh key
1956 user = deep_get(
1957 config_descriptor,
1958 ("config-access", "ssh-access", "default-user"),
1959 )
1960 step = "Install configuration Software, getting public ssh key"
1961 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1962 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1963 )
1964
1965 step = "Insert public key into VM user={} ssh_key={}".format(
1966 user, pub_key
1967 )
1968 else:
1969 # self.logger.debug("no need to get ssh key")
1970 step = "Waiting to VM being up and getting IP address"
1971 self.logger.debug(logging_text + step)
1972
1973 # n2vc_redesign STEP 5.1
1974 # wait for RO (ip-address) Insert pub_key into VM
1975 if vnfr_id:
1976 if kdu_name:
1977 rw_mgmt_ip = await self.wait_kdu_up(
1978 logging_text, nsr_id, vnfr_id, kdu_name
1979 )
1980 else:
1981 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1982 logging_text,
1983 nsr_id,
1984 vnfr_id,
1985 vdu_id,
1986 vdu_index,
1987 user=user,
1988 pub_key=pub_key,
1989 )
1990 else:
1991 rw_mgmt_ip = None # This is for a NS configuration
1992
1993 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
1994
1995 # store rw_mgmt_ip in deploy params for later replacement
1996 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1997
1998 # n2vc_redesign STEP 6 Execute initial config primitive
1999 step = "execute initial config primitive"
2000
2001 # wait for dependent primitives execution (NS -> VNF -> VDU)
2002 if initial_config_primitive_list:
2003 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2004
2005 # stage, in function of element type: vdu, kdu, vnf or ns
2006 my_vca = vca_deployed_list[vca_index]
2007 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2008 # VDU or KDU
2009 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2010 elif my_vca.get("member-vnf-index"):
2011 # VNF
2012 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2013 else:
2014 # NS
2015 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2016
2017 self._write_configuration_status(
2018 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2019 )
2020
2021 self._write_op_status(op_id=nslcmop_id, stage=stage)
2022
2023 check_if_terminated_needed = True
2024 for initial_config_primitive in initial_config_primitive_list:
2025 # adding information on the vca_deployed if it is a NS execution environment
2026 if not vca_deployed["member-vnf-index"]:
2027 deploy_params["ns_config_info"] = json.dumps(
2028 self._get_ns_config_info(nsr_id)
2029 )
2030 # TODO check if already done
2031 primitive_params_ = self._map_primitive_params(
2032 initial_config_primitive, {}, deploy_params
2033 )
2034
2035 step = "execute primitive '{}' params '{}'".format(
2036 initial_config_primitive["name"], primitive_params_
2037 )
2038 self.logger.debug(logging_text + step)
2039 await self.vca_map[vca_type].exec_primitive(
2040 ee_id=ee_id,
2041 primitive_name=initial_config_primitive["name"],
2042 params_dict=primitive_params_,
2043 db_dict=db_dict,
2044 vca_id=vca_id,
2045 vca_type=vca_type,
2046 )
2047 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2048 if check_if_terminated_needed:
2049 if config_descriptor.get("terminate-config-primitive"):
2050 self.update_db_2(
2051 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2052 )
2053 check_if_terminated_needed = False
2054
2055 # TODO register in database that primitive is done
2056
2057 # STEP 7 Configure metrics
2058 if vca_type == "helm" or vca_type == "helm-v3":
2059 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2060 ee_id=ee_id,
2061 artifact_path=artifact_path,
2062 ee_config_descriptor=ee_config_descriptor,
2063 vnfr_id=vnfr_id,
2064 nsr_id=nsr_id,
2065 target_ip=rw_mgmt_ip,
2066 )
2067 if prometheus_jobs:
2068 self.update_db_2(
2069 "nsrs",
2070 nsr_id,
2071 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2072 )
2073
2074 for job in prometheus_jobs:
2075 self.db.set_one(
2076 "prometheus_jobs",
2077 {"job_name": job["job_name"]},
2078 job,
2079 upsert=True,
2080 fail_on_empty=False,
2081 )
2082
2083 step = "instantiated at VCA"
2084 self.logger.debug(logging_text + step)
2085
2086 self._write_configuration_status(
2087 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2088 )
2089
2090 except Exception as e: # TODO not use Exception but N2VC exception
2091 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2092 if not isinstance(
2093 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2094 ):
2095 self.logger.error(
2096 "Exception while {} : {}".format(step, e), exc_info=True
2097 )
2098 self._write_configuration_status(
2099 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2100 )
2101 raise LcmException("{} {}".format(step, e)) from e
2102
2103 def _write_ns_status(
2104 self,
2105 nsr_id: str,
2106 ns_state: str,
2107 current_operation: str,
2108 current_operation_id: str,
2109 error_description: str = None,
2110 error_detail: str = None,
2111 other_update: dict = None,
2112 ):
2113 """
2114 Update db_nsr fields.
2115 :param nsr_id:
2116 :param ns_state:
2117 :param current_operation:
2118 :param current_operation_id:
2119 :param error_description:
2120 :param error_detail:
2121 :param other_update: Other required changes at database if provided, will be cleared
2122 :return:
2123 """
2124 try:
2125 db_dict = other_update or {}
2126 db_dict[
2127 "_admin.nslcmop"
2128 ] = current_operation_id # for backward compatibility
2129 db_dict["_admin.current-operation"] = current_operation_id
2130 db_dict["_admin.operation-type"] = (
2131 current_operation if current_operation != "IDLE" else None
2132 )
2133 db_dict["currentOperation"] = current_operation
2134 db_dict["currentOperationID"] = current_operation_id
2135 db_dict["errorDescription"] = error_description
2136 db_dict["errorDetail"] = error_detail
2137
2138 if ns_state:
2139 db_dict["nsState"] = ns_state
2140 self.update_db_2("nsrs", nsr_id, db_dict)
2141 except DbException as e:
2142 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2143
2144 def _write_op_status(
2145 self,
2146 op_id: str,
2147 stage: list = None,
2148 error_message: str = None,
2149 queuePosition: int = 0,
2150 operation_state: str = None,
2151 other_update: dict = None,
2152 ):
2153 try:
2154 db_dict = other_update or {}
2155 db_dict["queuePosition"] = queuePosition
2156 if isinstance(stage, list):
2157 db_dict["stage"] = stage[0]
2158 db_dict["detailed-status"] = " ".join(stage)
2159 elif stage is not None:
2160 db_dict["stage"] = str(stage)
2161
2162 if error_message is not None:
2163 db_dict["errorMessage"] = error_message
2164 if operation_state is not None:
2165 db_dict["operationState"] = operation_state
2166 db_dict["statusEnteredTime"] = time()
2167 self.update_db_2("nslcmops", op_id, db_dict)
2168 except DbException as e:
2169 self.logger.warn(
2170 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2171 )
2172
2173 def _write_all_config_status(self, db_nsr: dict, status: str):
2174 try:
2175 nsr_id = db_nsr["_id"]
2176 # configurationStatus
2177 config_status = db_nsr.get("configurationStatus")
2178 if config_status:
2179 db_nsr_update = {
2180 "configurationStatus.{}.status".format(index): status
2181 for index, v in enumerate(config_status)
2182 if v
2183 }
2184 # update status
2185 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2186
2187 except DbException as e:
2188 self.logger.warn(
2189 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2190 )
2191
2192 def _write_configuration_status(
2193 self,
2194 nsr_id: str,
2195 vca_index: int,
2196 status: str = None,
2197 element_under_configuration: str = None,
2198 element_type: str = None,
2199 other_update: dict = None,
2200 ):
2201
2202 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2203 # .format(vca_index, status))
2204
2205 try:
2206 db_path = "configurationStatus.{}.".format(vca_index)
2207 db_dict = other_update or {}
2208 if status:
2209 db_dict[db_path + "status"] = status
2210 if element_under_configuration:
2211 db_dict[
2212 db_path + "elementUnderConfiguration"
2213 ] = element_under_configuration
2214 if element_type:
2215 db_dict[db_path + "elementType"] = element_type
2216 self.update_db_2("nsrs", nsr_id, db_dict)
2217 except DbException as e:
2218 self.logger.warn(
2219 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2220 status, nsr_id, vca_index, e
2221 )
2222 )
2223
2224 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2225 """
2226 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2227 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2228 Database is used because the result can be obtained from a different LCM worker in case of HA.
2229 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2230 :param db_nslcmop: database content of nslcmop
2231 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2232 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2233 computed 'vim-account-id'
2234 """
2235 modified = False
2236 nslcmop_id = db_nslcmop["_id"]
2237 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2238 if placement_engine == "PLA":
2239 self.logger.debug(
2240 logging_text + "Invoke and wait for placement optimization"
2241 )
2242 await self.msg.aiowrite(
2243 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2244 )
2245 db_poll_interval = 5
2246 wait = db_poll_interval * 10
2247 pla_result = None
2248 while not pla_result and wait >= 0:
2249 await asyncio.sleep(db_poll_interval)
2250 wait -= db_poll_interval
2251 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2252 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2253
2254 if not pla_result:
2255 raise LcmException(
2256 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2257 )
2258
2259 for pla_vnf in pla_result["vnf"]:
2260 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2261 if not pla_vnf.get("vimAccountId") or not vnfr:
2262 continue
2263 modified = True
2264 self.db.set_one(
2265 "vnfrs",
2266 {"_id": vnfr["_id"]},
2267 {"vim-account-id": pla_vnf["vimAccountId"]},
2268 )
2269 # Modifies db_vnfrs
2270 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2271 return modified
2272
2273 def update_nsrs_with_pla_result(self, params):
2274 try:
2275 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2276 self.update_db_2(
2277 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2278 )
2279 except Exception as e:
2280 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2281
2282 async def instantiate(self, nsr_id, nslcmop_id):
2283 """
2284
2285 :param nsr_id: ns instance to deploy
2286 :param nslcmop_id: operation to run
2287 :return:
2288 """
2289
2290 # Try to lock HA task here
2291 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2292 if not task_is_locked_by_me:
2293 self.logger.debug(
2294 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2295 )
2296 return
2297
2298 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2299 self.logger.debug(logging_text + "Enter")
2300
2301 # get all needed from database
2302
2303 # database nsrs record
2304 db_nsr = None
2305
2306 # database nslcmops record
2307 db_nslcmop = None
2308
2309 # update operation on nsrs
2310 db_nsr_update = {}
2311 # update operation on nslcmops
2312 db_nslcmop_update = {}
2313
2314 nslcmop_operation_state = None
2315 db_vnfrs = {} # vnf's info indexed by member-index
2316 # n2vc_info = {}
2317 tasks_dict_info = {} # from task to info text
2318 exc = None
2319 error_list = []
2320 stage = [
2321 "Stage 1/5: preparation of the environment.",
2322 "Waiting for previous operations to terminate.",
2323 "",
2324 ]
2325 # ^ stage, step, VIM progress
2326 try:
2327 # wait for any previous tasks in process
2328 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2329
2330 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2331 stage[1] = "Reading from database."
2332 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2333 db_nsr_update["detailed-status"] = "creating"
2334 db_nsr_update["operational-status"] = "init"
2335 self._write_ns_status(
2336 nsr_id=nsr_id,
2337 ns_state="BUILDING",
2338 current_operation="INSTANTIATING",
2339 current_operation_id=nslcmop_id,
2340 other_update=db_nsr_update,
2341 )
2342 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2343
2344 # read from db: operation
2345 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2346 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2347 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2348 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2349 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2350 )
2351 ns_params = db_nslcmop.get("operationParams")
2352 if ns_params and ns_params.get("timeout_ns_deploy"):
2353 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2354 else:
2355 timeout_ns_deploy = self.timeout.get(
2356 "ns_deploy", self.timeout_ns_deploy
2357 )
2358
2359 # read from db: ns
2360 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2361 self.logger.debug(logging_text + stage[1])
2362 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2363 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2364 self.logger.debug(logging_text + stage[1])
2365 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2366 self.fs.sync(db_nsr["nsd-id"])
2367 db_nsr["nsd"] = nsd
2368 # nsr_name = db_nsr["name"] # TODO short-name??
2369
2370 # read from db: vnf's of this ns
2371 stage[1] = "Getting vnfrs from db."
2372 self.logger.debug(logging_text + stage[1])
2373 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2374
2375 # read from db: vnfd's for every vnf
2376 db_vnfds = [] # every vnfd data
2377
2378 # for each vnf in ns, read vnfd
2379 for vnfr in db_vnfrs_list:
2380 if vnfr.get("kdur"):
2381 kdur_list = []
2382 for kdur in vnfr["kdur"]:
2383 if kdur.get("additionalParams"):
2384 kdur["additionalParams"] = json.loads(kdur["additionalParams"])
2385 kdur_list.append(kdur)
2386 vnfr["kdur"] = kdur_list
2387
2388 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2389 vnfd_id = vnfr["vnfd-id"]
2390 vnfd_ref = vnfr["vnfd-ref"]
2391 self.fs.sync(vnfd_id)
2392
2393 # if we haven't this vnfd, read it from db
2394 if vnfd_id not in db_vnfds:
2395 # read from db
2396 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2397 vnfd_id, vnfd_ref
2398 )
2399 self.logger.debug(logging_text + stage[1])
2400 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2401
2402 # store vnfd
2403 db_vnfds.append(vnfd)
2404
2405 # Get or generates the _admin.deployed.VCA list
2406 vca_deployed_list = None
2407 if db_nsr["_admin"].get("deployed"):
2408 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2409 if vca_deployed_list is None:
2410 vca_deployed_list = []
2411 configuration_status_list = []
2412 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2413 db_nsr_update["configurationStatus"] = configuration_status_list
2414 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2415 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2416 elif isinstance(vca_deployed_list, dict):
2417 # maintain backward compatibility. Change a dict to list at database
2418 vca_deployed_list = list(vca_deployed_list.values())
2419 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2420 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2421
2422 if not isinstance(
2423 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2424 ):
2425 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2426 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2427
2428 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2429 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2430 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2431 self.db.set_list(
2432 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2433 )
2434
2435 # n2vc_redesign STEP 2 Deploy Network Scenario
2436 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2437 self._write_op_status(op_id=nslcmop_id, stage=stage)
2438
2439 stage[1] = "Deploying KDUs."
2440 # self.logger.debug(logging_text + "Before deploy_kdus")
2441 # Call to deploy_kdus in case exists the "vdu:kdu" param
2442 await self.deploy_kdus(
2443 logging_text=logging_text,
2444 nsr_id=nsr_id,
2445 nslcmop_id=nslcmop_id,
2446 db_vnfrs=db_vnfrs,
2447 db_vnfds=db_vnfds,
2448 task_instantiation_info=tasks_dict_info,
2449 )
2450
2451 stage[1] = "Getting VCA public key."
2452 # n2vc_redesign STEP 1 Get VCA public ssh-key
2453 # feature 1429. Add n2vc public key to needed VMs
2454 n2vc_key = self.n2vc.get_public_key()
2455 n2vc_key_list = [n2vc_key]
2456 if self.vca_config.get("public_key"):
2457 n2vc_key_list.append(self.vca_config["public_key"])
2458
2459 stage[1] = "Deploying NS at VIM."
2460 task_ro = asyncio.ensure_future(
2461 self.instantiate_RO(
2462 logging_text=logging_text,
2463 nsr_id=nsr_id,
2464 nsd=nsd,
2465 db_nsr=db_nsr,
2466 db_nslcmop=db_nslcmop,
2467 db_vnfrs=db_vnfrs,
2468 db_vnfds=db_vnfds,
2469 n2vc_key_list=n2vc_key_list,
2470 stage=stage,
2471 )
2472 )
2473 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2474 tasks_dict_info[task_ro] = "Deploying at VIM"
2475
2476 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2477 stage[1] = "Deploying Execution Environments."
2478 self.logger.debug(logging_text + stage[1])
2479
2480 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2481 for vnf_profile in get_vnf_profiles(nsd):
2482 vnfd_id = vnf_profile["vnfd-id"]
2483 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2484 member_vnf_index = str(vnf_profile["id"])
2485 db_vnfr = db_vnfrs[member_vnf_index]
2486 base_folder = vnfd["_admin"]["storage"]
2487 vdu_id = None
2488 vdu_index = 0
2489 vdu_name = None
2490 kdu_name = None
2491
2492 # Get additional parameters
2493 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2494 if db_vnfr.get("additionalParamsForVnf"):
2495 deploy_params.update(
2496 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2497 )
2498
2499 descriptor_config = get_configuration(vnfd, vnfd["id"])
2500 if descriptor_config:
2501 self._deploy_n2vc(
2502 logging_text=logging_text
2503 + "member_vnf_index={} ".format(member_vnf_index),
2504 db_nsr=db_nsr,
2505 db_vnfr=db_vnfr,
2506 nslcmop_id=nslcmop_id,
2507 nsr_id=nsr_id,
2508 nsi_id=nsi_id,
2509 vnfd_id=vnfd_id,
2510 vdu_id=vdu_id,
2511 kdu_name=kdu_name,
2512 member_vnf_index=member_vnf_index,
2513 vdu_index=vdu_index,
2514 vdu_name=vdu_name,
2515 deploy_params=deploy_params,
2516 descriptor_config=descriptor_config,
2517 base_folder=base_folder,
2518 task_instantiation_info=tasks_dict_info,
2519 stage=stage,
2520 )
2521
2522 # Deploy charms for each VDU that supports one.
2523 for vdud in get_vdu_list(vnfd):
2524 vdu_id = vdud["id"]
2525 descriptor_config = get_configuration(vnfd, vdu_id)
2526 vdur = find_in_list(
2527 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2528 )
2529
2530 if vdur.get("additionalParams"):
2531 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2532 else:
2533 deploy_params_vdu = deploy_params
2534 deploy_params_vdu["OSM"] = get_osm_params(
2535 db_vnfr, vdu_id, vdu_count_index=0
2536 )
2537 vdud_count = get_number_of_instances(vnfd, vdu_id)
2538
2539 self.logger.debug("VDUD > {}".format(vdud))
2540 self.logger.debug(
2541 "Descriptor config > {}".format(descriptor_config)
2542 )
2543 if descriptor_config:
2544 vdu_name = None
2545 kdu_name = None
2546 for vdu_index in range(vdud_count):
2547 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2548 self._deploy_n2vc(
2549 logging_text=logging_text
2550 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2551 member_vnf_index, vdu_id, vdu_index
2552 ),
2553 db_nsr=db_nsr,
2554 db_vnfr=db_vnfr,
2555 nslcmop_id=nslcmop_id,
2556 nsr_id=nsr_id,
2557 nsi_id=nsi_id,
2558 vnfd_id=vnfd_id,
2559 vdu_id=vdu_id,
2560 kdu_name=kdu_name,
2561 member_vnf_index=member_vnf_index,
2562 vdu_index=vdu_index,
2563 vdu_name=vdu_name,
2564 deploy_params=deploy_params_vdu,
2565 descriptor_config=descriptor_config,
2566 base_folder=base_folder,
2567 task_instantiation_info=tasks_dict_info,
2568 stage=stage,
2569 )
2570 for kdud in get_kdu_list(vnfd):
2571 kdu_name = kdud["name"]
2572 descriptor_config = get_configuration(vnfd, kdu_name)
2573 if descriptor_config:
2574 vdu_id = None
2575 vdu_index = 0
2576 vdu_name = None
2577 kdur = next(
2578 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2579 )
2580 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2581 if kdur.get("additionalParams"):
2582 deploy_params_kdu = parse_yaml_strings(
2583 kdur["additionalParams"]
2584 )
2585
2586 self._deploy_n2vc(
2587 logging_text=logging_text,
2588 db_nsr=db_nsr,
2589 db_vnfr=db_vnfr,
2590 nslcmop_id=nslcmop_id,
2591 nsr_id=nsr_id,
2592 nsi_id=nsi_id,
2593 vnfd_id=vnfd_id,
2594 vdu_id=vdu_id,
2595 kdu_name=kdu_name,
2596 member_vnf_index=member_vnf_index,
2597 vdu_index=vdu_index,
2598 vdu_name=vdu_name,
2599 deploy_params=deploy_params_kdu,
2600 descriptor_config=descriptor_config,
2601 base_folder=base_folder,
2602 task_instantiation_info=tasks_dict_info,
2603 stage=stage,
2604 )
2605
2606 # Check if this NS has a charm configuration
2607 descriptor_config = nsd.get("ns-configuration")
2608 if descriptor_config and descriptor_config.get("juju"):
2609 vnfd_id = None
2610 db_vnfr = None
2611 member_vnf_index = None
2612 vdu_id = None
2613 kdu_name = None
2614 vdu_index = 0
2615 vdu_name = None
2616
2617 # Get additional parameters
2618 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2619 if db_nsr.get("additionalParamsForNs"):
2620 deploy_params.update(
2621 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2622 )
2623 base_folder = nsd["_admin"]["storage"]
2624 self._deploy_n2vc(
2625 logging_text=logging_text,
2626 db_nsr=db_nsr,
2627 db_vnfr=db_vnfr,
2628 nslcmop_id=nslcmop_id,
2629 nsr_id=nsr_id,
2630 nsi_id=nsi_id,
2631 vnfd_id=vnfd_id,
2632 vdu_id=vdu_id,
2633 kdu_name=kdu_name,
2634 member_vnf_index=member_vnf_index,
2635 vdu_index=vdu_index,
2636 vdu_name=vdu_name,
2637 deploy_params=deploy_params,
2638 descriptor_config=descriptor_config,
2639 base_folder=base_folder,
2640 task_instantiation_info=tasks_dict_info,
2641 stage=stage,
2642 )
2643
2644 # rest of staff will be done at finally
2645
2646 except (
2647 ROclient.ROClientException,
2648 DbException,
2649 LcmException,
2650 N2VCException,
2651 ) as e:
2652 self.logger.error(
2653 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2654 )
2655 exc = e
2656 except asyncio.CancelledError:
2657 self.logger.error(
2658 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2659 )
2660 exc = "Operation was cancelled"
2661 except Exception as e:
2662 exc = traceback.format_exc()
2663 self.logger.critical(
2664 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2665 exc_info=True,
2666 )
2667 finally:
2668 if exc:
2669 error_list.append(str(exc))
2670 try:
2671 # wait for pending tasks
2672 if tasks_dict_info:
2673 stage[1] = "Waiting for instantiate pending tasks."
2674 self.logger.debug(logging_text + stage[1])
2675 error_list += await self._wait_for_tasks(
2676 logging_text,
2677 tasks_dict_info,
2678 timeout_ns_deploy,
2679 stage,
2680 nslcmop_id,
2681 nsr_id=nsr_id,
2682 )
2683 stage[1] = stage[2] = ""
2684 except asyncio.CancelledError:
2685 error_list.append("Cancelled")
2686 # TODO cancel all tasks
2687 except Exception as exc:
2688 error_list.append(str(exc))
2689
2690 # update operation-status
2691 db_nsr_update["operational-status"] = "running"
2692 # let's begin with VCA 'configured' status (later we can change it)
2693 db_nsr_update["config-status"] = "configured"
2694 for task, task_name in tasks_dict_info.items():
2695 if not task.done() or task.cancelled() or task.exception():
2696 if task_name.startswith(self.task_name_deploy_vca):
2697 # A N2VC task is pending
2698 db_nsr_update["config-status"] = "failed"
2699 else:
2700 # RO or KDU task is pending
2701 db_nsr_update["operational-status"] = "failed"
2702
2703 # update status at database
2704 if error_list:
2705 error_detail = ". ".join(error_list)
2706 self.logger.error(logging_text + error_detail)
2707 error_description_nslcmop = "{} Detail: {}".format(
2708 stage[0], error_detail
2709 )
2710 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2711 nslcmop_id, stage[0]
2712 )
2713
2714 db_nsr_update["detailed-status"] = (
2715 error_description_nsr + " Detail: " + error_detail
2716 )
2717 db_nslcmop_update["detailed-status"] = error_detail
2718 nslcmop_operation_state = "FAILED"
2719 ns_state = "BROKEN"
2720 else:
2721 error_detail = None
2722 error_description_nsr = error_description_nslcmop = None
2723 ns_state = "READY"
2724 db_nsr_update["detailed-status"] = "Done"
2725 db_nslcmop_update["detailed-status"] = "Done"
2726 nslcmop_operation_state = "COMPLETED"
2727
2728 if db_nsr:
2729 self._write_ns_status(
2730 nsr_id=nsr_id,
2731 ns_state=ns_state,
2732 current_operation="IDLE",
2733 current_operation_id=None,
2734 error_description=error_description_nsr,
2735 error_detail=error_detail,
2736 other_update=db_nsr_update,
2737 )
2738 self._write_op_status(
2739 op_id=nslcmop_id,
2740 stage="",
2741 error_message=error_description_nslcmop,
2742 operation_state=nslcmop_operation_state,
2743 other_update=db_nslcmop_update,
2744 )
2745
2746 if nslcmop_operation_state:
2747 try:
2748 await self.msg.aiowrite(
2749 "ns",
2750 "instantiated",
2751 {
2752 "nsr_id": nsr_id,
2753 "nslcmop_id": nslcmop_id,
2754 "operationState": nslcmop_operation_state,
2755 },
2756 loop=self.loop,
2757 )
2758 except Exception as e:
2759 self.logger.error(
2760 logging_text + "kafka_write notification Exception {}".format(e)
2761 )
2762
2763 self.logger.debug(logging_text + "Exit")
2764 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2765
2766 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2767 if vnfd_id not in cached_vnfds:
2768 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2769 return cached_vnfds[vnfd_id]
2770
2771 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2772 if vnf_profile_id not in cached_vnfrs:
2773 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2774 "vnfrs",
2775 {
2776 "member-vnf-index-ref": vnf_profile_id,
2777 "nsr-id-ref": nsr_id,
2778 },
2779 )
2780 return cached_vnfrs[vnf_profile_id]
2781
2782 def _is_deployed_vca_in_relation(
2783 self, vca: DeployedVCA, relation: Relation
2784 ) -> bool:
2785 found = False
2786 for endpoint in (relation.provider, relation.requirer):
2787 if endpoint["kdu-resource-profile-id"]:
2788 continue
2789 found = (
2790 vca.vnf_profile_id == endpoint.vnf_profile_id
2791 and vca.vdu_profile_id == endpoint.vdu_profile_id
2792 and vca.execution_environment_ref == endpoint.execution_environment_ref
2793 )
2794 if found:
2795 break
2796 return found
2797
2798 def _update_ee_relation_data_with_implicit_data(
2799 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2800 ):
2801 ee_relation_data = safe_get_ee_relation(
2802 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2803 )
2804 ee_relation_level = EELevel.get_level(ee_relation_data)
2805 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2806 "execution-environment-ref"
2807 ]:
2808 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2809 vnfd_id = vnf_profile["vnfd-id"]
2810 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2811 entity_id = (
2812 vnfd_id
2813 if ee_relation_level == EELevel.VNF
2814 else ee_relation_data["vdu-profile-id"]
2815 )
2816 ee = get_juju_ee_ref(db_vnfd, entity_id)
2817 if not ee:
2818 raise Exception(
2819 f"not execution environments found for ee_relation {ee_relation_data}"
2820 )
2821 ee_relation_data["execution-environment-ref"] = ee["id"]
2822 return ee_relation_data
2823
2824 def _get_ns_relations(
2825 self,
2826 nsr_id: str,
2827 nsd: Dict[str, Any],
2828 vca: DeployedVCA,
2829 cached_vnfds: Dict[str, Any],
2830 ) -> List[Relation]:
2831 relations = []
2832 db_ns_relations = get_ns_configuration_relation_list(nsd)
2833 for r in db_ns_relations:
2834 provider_dict = None
2835 requirer_dict = None
2836 if all(key in r for key in ("provider", "requirer")):
2837 provider_dict = r["provider"]
2838 requirer_dict = r["requirer"]
2839 elif "entities" in r:
2840 provider_id = r["entities"][0]["id"]
2841 provider_dict = {
2842 "nsr-id": nsr_id,
2843 "endpoint": r["entities"][0]["endpoint"],
2844 }
2845 if provider_id != nsd["id"]:
2846 provider_dict["vnf-profile-id"] = provider_id
2847 requirer_id = r["entities"][1]["id"]
2848 requirer_dict = {
2849 "nsr-id": nsr_id,
2850 "endpoint": r["entities"][1]["endpoint"],
2851 }
2852 if requirer_id != nsd["id"]:
2853 requirer_dict["vnf-profile-id"] = requirer_id
2854 else:
2855 raise Exception(
2856 "provider/requirer or entities must be included in the relation."
2857 )
2858 relation_provider = self._update_ee_relation_data_with_implicit_data(
2859 nsr_id, nsd, provider_dict, cached_vnfds
2860 )
2861 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2862 nsr_id, nsd, requirer_dict, cached_vnfds
2863 )
2864 provider = EERelation(relation_provider)
2865 requirer = EERelation(relation_requirer)
2866 relation = Relation(r["name"], provider, requirer)
2867 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2868 if vca_in_relation:
2869 relations.append(relation)
2870 return relations
2871
2872 def _get_vnf_relations(
2873 self,
2874 nsr_id: str,
2875 nsd: Dict[str, Any],
2876 vca: DeployedVCA,
2877 cached_vnfds: Dict[str, Any],
2878 ) -> List[Relation]:
2879 relations = []
2880 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2881 vnf_profile_id = vnf_profile["id"]
2882 vnfd_id = vnf_profile["vnfd-id"]
2883 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2884 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2885 for r in db_vnf_relations:
2886 provider_dict = None
2887 requirer_dict = None
2888 if all(key in r for key in ("provider", "requirer")):
2889 provider_dict = r["provider"]
2890 requirer_dict = r["requirer"]
2891 elif "entities" in r:
2892 provider_id = r["entities"][0]["id"]
2893 provider_dict = {
2894 "nsr-id": nsr_id,
2895 "vnf-profile-id": vnf_profile_id,
2896 "endpoint": r["entities"][0]["endpoint"],
2897 }
2898 if provider_id != vnfd_id:
2899 provider_dict["vdu-profile-id"] = provider_id
2900 requirer_id = r["entities"][1]["id"]
2901 requirer_dict = {
2902 "nsr-id": nsr_id,
2903 "vnf-profile-id": vnf_profile_id,
2904 "endpoint": r["entities"][1]["endpoint"],
2905 }
2906 if requirer_id != vnfd_id:
2907 requirer_dict["vdu-profile-id"] = requirer_id
2908 else:
2909 raise Exception(
2910 "provider/requirer or entities must be included in the relation."
2911 )
2912 relation_provider = self._update_ee_relation_data_with_implicit_data(
2913 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2914 )
2915 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2916 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2917 )
2918 provider = EERelation(relation_provider)
2919 requirer = EERelation(relation_requirer)
2920 relation = Relation(r["name"], provider, requirer)
2921 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2922 if vca_in_relation:
2923 relations.append(relation)
2924 return relations
2925
2926 def _get_kdu_resource_data(
2927 self,
2928 ee_relation: EERelation,
2929 db_nsr: Dict[str, Any],
2930 cached_vnfds: Dict[str, Any],
2931 ) -> DeployedK8sResource:
2932 nsd = get_nsd(db_nsr)
2933 vnf_profiles = get_vnf_profiles(nsd)
2934 vnfd_id = find_in_list(
2935 vnf_profiles,
2936 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
2937 )["vnfd-id"]
2938 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2939 kdu_resource_profile = get_kdu_resource_profile(
2940 db_vnfd, ee_relation.kdu_resource_profile_id
2941 )
2942 kdu_name = kdu_resource_profile["kdu-name"]
2943 deployed_kdu, _ = get_deployed_kdu(
2944 db_nsr.get("_admin", ()).get("deployed", ()),
2945 kdu_name,
2946 ee_relation.vnf_profile_id,
2947 )
2948 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
2949 return deployed_kdu
2950
2951 def _get_deployed_component(
2952 self,
2953 ee_relation: EERelation,
2954 db_nsr: Dict[str, Any],
2955 cached_vnfds: Dict[str, Any],
2956 ) -> DeployedComponent:
2957 nsr_id = db_nsr["_id"]
2958 deployed_component = None
2959 ee_level = EELevel.get_level(ee_relation)
2960 if ee_level == EELevel.NS:
2961 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
2962 if vca:
2963 deployed_component = DeployedVCA(nsr_id, vca)
2964 elif ee_level == EELevel.VNF:
2965 vca = get_deployed_vca(
2966 db_nsr,
2967 {
2968 "vdu_id": None,
2969 "member-vnf-index": ee_relation.vnf_profile_id,
2970 "ee_descriptor_id": ee_relation.execution_environment_ref,
2971 },
2972 )
2973 if vca:
2974 deployed_component = DeployedVCA(nsr_id, vca)
2975 elif ee_level == EELevel.VDU:
2976 vca = get_deployed_vca(
2977 db_nsr,
2978 {
2979 "vdu_id": ee_relation.vdu_profile_id,
2980 "member-vnf-index": ee_relation.vnf_profile_id,
2981 "ee_descriptor_id": ee_relation.execution_environment_ref,
2982 },
2983 )
2984 if vca:
2985 deployed_component = DeployedVCA(nsr_id, vca)
2986 elif ee_level == EELevel.KDU:
2987 kdu_resource_data = self._get_kdu_resource_data(
2988 ee_relation, db_nsr, cached_vnfds
2989 )
2990 if kdu_resource_data:
2991 deployed_component = DeployedK8sResource(kdu_resource_data)
2992 return deployed_component
2993
2994 async def _add_relation(
2995 self,
2996 relation: Relation,
2997 vca_type: str,
2998 db_nsr: Dict[str, Any],
2999 cached_vnfds: Dict[str, Any],
3000 cached_vnfrs: Dict[str, Any],
3001 ) -> bool:
3002 deployed_provider = self._get_deployed_component(
3003 relation.provider, db_nsr, cached_vnfds
3004 )
3005 deployed_requirer = self._get_deployed_component(
3006 relation.requirer, db_nsr, cached_vnfds
3007 )
3008 if (
3009 deployed_provider
3010 and deployed_requirer
3011 and deployed_provider.config_sw_installed
3012 and deployed_requirer.config_sw_installed
3013 ):
3014 provider_db_vnfr = (
3015 self._get_vnfr(
3016 relation.provider.nsr_id,
3017 relation.provider.vnf_profile_id,
3018 cached_vnfrs,
3019 )
3020 if relation.provider.vnf_profile_id
3021 else None
3022 )
3023 requirer_db_vnfr = (
3024 self._get_vnfr(
3025 relation.requirer.nsr_id,
3026 relation.requirer.vnf_profile_id,
3027 cached_vnfrs,
3028 )
3029 if relation.requirer.vnf_profile_id
3030 else None
3031 )
3032 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3033 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3034 provider_relation_endpoint = RelationEndpoint(
3035 deployed_provider.ee_id,
3036 provider_vca_id,
3037 relation.provider.endpoint,
3038 )
3039 requirer_relation_endpoint = RelationEndpoint(
3040 deployed_requirer.ee_id,
3041 requirer_vca_id,
3042 relation.requirer.endpoint,
3043 )
3044 await self.vca_map[vca_type].add_relation(
3045 provider=provider_relation_endpoint,
3046 requirer=requirer_relation_endpoint,
3047 )
3048 # remove entry from relations list
3049 return True
3050 return False
3051
3052 async def _add_vca_relations(
3053 self,
3054 logging_text,
3055 nsr_id,
3056 vca_type: str,
3057 vca_index: int,
3058 timeout: int = 3600,
3059 ) -> bool:
3060
3061 # steps:
3062 # 1. find all relations for this VCA
3063 # 2. wait for other peers related
3064 # 3. add relations
3065
3066 try:
3067 # STEP 1: find all relations for this VCA
3068
3069 # read nsr record
3070 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3071 nsd = get_nsd(db_nsr)
3072
3073 # this VCA data
3074 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3075 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3076
3077 cached_vnfds = {}
3078 cached_vnfrs = {}
3079 relations = []
3080 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3081 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3082
3083 # if no relations, terminate
3084 if not relations:
3085 self.logger.debug(logging_text + " No relations")
3086 return True
3087
3088 self.logger.debug(logging_text + " adding relations {}".format(relations))
3089
3090 # add all relations
3091 start = time()
3092 while True:
3093 # check timeout
3094 now = time()
3095 if now - start >= timeout:
3096 self.logger.error(logging_text + " : timeout adding relations")
3097 return False
3098
3099 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3100 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3101
3102 # for each relation, find the VCA's related
3103 for relation in relations.copy():
3104 added = await self._add_relation(
3105 relation,
3106 vca_type,
3107 db_nsr,
3108 cached_vnfds,
3109 cached_vnfrs,
3110 )
3111 if added:
3112 relations.remove(relation)
3113
3114 if not relations:
3115 self.logger.debug("Relations added")
3116 break
3117 await asyncio.sleep(5.0)
3118
3119 return True
3120
3121 except Exception as e:
3122 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3123 return False
3124
3125 async def _install_kdu(
3126 self,
3127 nsr_id: str,
3128 nsr_db_path: str,
3129 vnfr_data: dict,
3130 kdu_index: int,
3131 kdud: dict,
3132 vnfd: dict,
3133 k8s_instance_info: dict,
3134 k8params: dict = None,
3135 timeout: int = 600,
3136 vca_id: str = None,
3137 ):
3138
3139 try:
3140 k8sclustertype = k8s_instance_info["k8scluster-type"]
3141 # Instantiate kdu
3142 db_dict_install = {
3143 "collection": "nsrs",
3144 "filter": {"_id": nsr_id},
3145 "path": nsr_db_path,
3146 }
3147
3148 if k8s_instance_info.get("kdu-deployment-name"):
3149 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3150 else:
3151 kdu_instance = self.k8scluster_map[
3152 k8sclustertype
3153 ].generate_kdu_instance_name(
3154 db_dict=db_dict_install,
3155 kdu_model=k8s_instance_info["kdu-model"],
3156 kdu_name=k8s_instance_info["kdu-name"],
3157 )
3158 self.update_db_2(
3159 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3160 )
3161 await self.k8scluster_map[k8sclustertype].install(
3162 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3163 kdu_model=k8s_instance_info["kdu-model"],
3164 atomic=True,
3165 params=k8params,
3166 db_dict=db_dict_install,
3167 timeout=timeout,
3168 kdu_name=k8s_instance_info["kdu-name"],
3169 namespace=k8s_instance_info["namespace"],
3170 kdu_instance=kdu_instance,
3171 vca_id=vca_id,
3172 )
3173 self.update_db_2(
3174 "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}
3175 )
3176
3177 # Obtain services to obtain management service ip
3178 services = await self.k8scluster_map[k8sclustertype].get_services(
3179 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3180 kdu_instance=kdu_instance,
3181 namespace=k8s_instance_info["namespace"],
3182 )
3183
3184 # Obtain management service info (if exists)
3185 vnfr_update_dict = {}
3186 kdu_config = get_configuration(vnfd, kdud["name"])
3187 if kdu_config:
3188 target_ee_list = kdu_config.get("execution-environment-list", [])
3189 else:
3190 target_ee_list = []
3191
3192 if services:
3193 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3194 mgmt_services = [
3195 service
3196 for service in kdud.get("service", [])
3197 if service.get("mgmt-service")
3198 ]
3199 for mgmt_service in mgmt_services:
3200 for service in services:
3201 if service["name"].startswith(mgmt_service["name"]):
3202 # Mgmt service found, Obtain service ip
3203 ip = service.get("external_ip", service.get("cluster_ip"))
3204 if isinstance(ip, list) and len(ip) == 1:
3205 ip = ip[0]
3206
3207 vnfr_update_dict[
3208 "kdur.{}.ip-address".format(kdu_index)
3209 ] = ip
3210
3211 # Check if must update also mgmt ip at the vnf
3212 service_external_cp = mgmt_service.get(
3213 "external-connection-point-ref"
3214 )
3215 if service_external_cp:
3216 if (
3217 deep_get(vnfd, ("mgmt-interface", "cp"))
3218 == service_external_cp
3219 ):
3220 vnfr_update_dict["ip-address"] = ip
3221
3222 if find_in_list(
3223 target_ee_list,
3224 lambda ee: ee.get(
3225 "external-connection-point-ref", ""
3226 )
3227 == service_external_cp,
3228 ):
3229 vnfr_update_dict[
3230 "kdur.{}.ip-address".format(kdu_index)
3231 ] = ip
3232 break
3233 else:
3234 self.logger.warn(
3235 "Mgmt service name: {} not found".format(
3236 mgmt_service["name"]
3237 )
3238 )
3239
3240 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3241 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3242
3243 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3244 if (
3245 kdu_config
3246 and kdu_config.get("initial-config-primitive")
3247 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3248 ):
3249 initial_config_primitive_list = kdu_config.get(
3250 "initial-config-primitive"
3251 )
3252 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3253
3254 for initial_config_primitive in initial_config_primitive_list:
3255 primitive_params_ = self._map_primitive_params(
3256 initial_config_primitive, {}, {}
3257 )
3258
3259 await asyncio.wait_for(
3260 self.k8scluster_map[k8sclustertype].exec_primitive(
3261 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3262 kdu_instance=kdu_instance,
3263 primitive_name=initial_config_primitive["name"],
3264 params=primitive_params_,
3265 db_dict=db_dict_install,
3266 vca_id=vca_id,
3267 ),
3268 timeout=timeout,
3269 )
3270
3271 except Exception as e:
3272 # Prepare update db with error and raise exception
3273 try:
3274 self.update_db_2(
3275 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3276 )
3277 self.update_db_2(
3278 "vnfrs",
3279 vnfr_data.get("_id"),
3280 {"kdur.{}.status".format(kdu_index): "ERROR"},
3281 )
3282 except Exception:
3283 # ignore to keep original exception
3284 pass
3285 # reraise original error
3286 raise
3287
3288 return kdu_instance
3289
3290 async def deploy_kdus(
3291 self,
3292 logging_text,
3293 nsr_id,
3294 nslcmop_id,
3295 db_vnfrs,
3296 db_vnfds,
3297 task_instantiation_info,
3298 ):
3299 # Launch kdus if present in the descriptor
3300
3301 k8scluster_id_2_uuic = {
3302 "helm-chart-v3": {},
3303 "helm-chart": {},
3304 "juju-bundle": {},
3305 }
3306
3307 async def _get_cluster_id(cluster_id, cluster_type):
3308 nonlocal k8scluster_id_2_uuic
3309 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3310 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3311
3312 # check if K8scluster is creating and wait look if previous tasks in process
3313 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3314 "k8scluster", cluster_id
3315 )
3316 if task_dependency:
3317 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3318 task_name, cluster_id
3319 )
3320 self.logger.debug(logging_text + text)
3321 await asyncio.wait(task_dependency, timeout=3600)
3322
3323 db_k8scluster = self.db.get_one(
3324 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3325 )
3326 if not db_k8scluster:
3327 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3328
3329 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3330 if not k8s_id:
3331 if cluster_type == "helm-chart-v3":
3332 try:
3333 # backward compatibility for existing clusters that have not been initialized for helm v3
3334 k8s_credentials = yaml.safe_dump(
3335 db_k8scluster.get("credentials")
3336 )
3337 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3338 k8s_credentials, reuse_cluster_uuid=cluster_id
3339 )
3340 db_k8scluster_update = {}
3341 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3342 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3343 db_k8scluster_update[
3344 "_admin.helm-chart-v3.created"
3345 ] = uninstall_sw
3346 db_k8scluster_update[
3347 "_admin.helm-chart-v3.operationalState"
3348 ] = "ENABLED"
3349 self.update_db_2(
3350 "k8sclusters", cluster_id, db_k8scluster_update
3351 )
3352 except Exception as e:
3353 self.logger.error(
3354 logging_text
3355 + "error initializing helm-v3 cluster: {}".format(str(e))
3356 )
3357 raise LcmException(
3358 "K8s cluster '{}' has not been initialized for '{}'".format(
3359 cluster_id, cluster_type
3360 )
3361 )
3362 else:
3363 raise LcmException(
3364 "K8s cluster '{}' has not been initialized for '{}'".format(
3365 cluster_id, cluster_type
3366 )
3367 )
3368 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3369 return k8s_id
3370
3371 logging_text += "Deploy kdus: "
3372 step = ""
3373 try:
3374 db_nsr_update = {"_admin.deployed.K8s": []}
3375 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3376
3377 index = 0
3378 updated_cluster_list = []
3379 updated_v3_cluster_list = []
3380
3381 for vnfr_data in db_vnfrs.values():
3382 vca_id = self.get_vca_id(vnfr_data, {})
3383 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3384 # Step 0: Prepare and set parameters
3385 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3386 vnfd_id = vnfr_data.get("vnfd-id")
3387 vnfd_with_id = find_in_list(
3388 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3389 )
3390 kdud = next(
3391 kdud
3392 for kdud in vnfd_with_id["kdu"]
3393 if kdud["name"] == kdur["kdu-name"]
3394 )
3395 namespace = kdur.get("k8s-namespace")
3396 kdu_deployment_name = kdur.get("kdu-deployment-name")
3397 if kdur.get("helm-chart"):
3398 kdumodel = kdur["helm-chart"]
3399 # Default version: helm3, if helm-version is v2 assign v2
3400 k8sclustertype = "helm-chart-v3"
3401 self.logger.debug("kdur: {}".format(kdur))
3402 if (
3403 kdur.get("helm-version")
3404 and kdur.get("helm-version") == "v2"
3405 ):
3406 k8sclustertype = "helm-chart"
3407 elif kdur.get("juju-bundle"):
3408 kdumodel = kdur["juju-bundle"]
3409 k8sclustertype = "juju-bundle"
3410 else:
3411 raise LcmException(
3412 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3413 "juju-bundle. Maybe an old NBI version is running".format(
3414 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3415 )
3416 )
3417 # check if kdumodel is a file and exists
3418 try:
3419 vnfd_with_id = find_in_list(
3420 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3421 )
3422 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3423 if storage: # may be not present if vnfd has not artifacts
3424 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3425 if storage["pkg-dir"]:
3426 filename = "{}/{}/{}s/{}".format(
3427 storage["folder"],
3428 storage["pkg-dir"],
3429 k8sclustertype,
3430 kdumodel,
3431 )
3432 else:
3433 filename = "{}/Scripts/{}s/{}".format(
3434 storage["folder"],
3435 k8sclustertype,
3436 kdumodel,
3437 )
3438 if self.fs.file_exists(
3439 filename, mode="file"
3440 ) or self.fs.file_exists(filename, mode="dir"):
3441 kdumodel = self.fs.path + filename
3442 except (asyncio.TimeoutError, asyncio.CancelledError):
3443 raise
3444 except Exception: # it is not a file
3445 pass
3446
3447 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3448 step = "Synchronize repos for k8s cluster '{}'".format(
3449 k8s_cluster_id
3450 )
3451 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3452
3453 # Synchronize repos
3454 if (
3455 k8sclustertype == "helm-chart"
3456 and cluster_uuid not in updated_cluster_list
3457 ) or (
3458 k8sclustertype == "helm-chart-v3"
3459 and cluster_uuid not in updated_v3_cluster_list
3460 ):
3461 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3462 self.k8scluster_map[k8sclustertype].synchronize_repos(
3463 cluster_uuid=cluster_uuid
3464 )
3465 )
3466 if del_repo_list or added_repo_dict:
3467 if k8sclustertype == "helm-chart":
3468 unset = {
3469 "_admin.helm_charts_added." + item: None
3470 for item in del_repo_list
3471 }
3472 updated = {
3473 "_admin.helm_charts_added." + item: name
3474 for item, name in added_repo_dict.items()
3475 }
3476 updated_cluster_list.append(cluster_uuid)
3477 elif k8sclustertype == "helm-chart-v3":
3478 unset = {
3479 "_admin.helm_charts_v3_added." + item: None
3480 for item in del_repo_list
3481 }
3482 updated = {
3483 "_admin.helm_charts_v3_added." + item: name
3484 for item, name in added_repo_dict.items()
3485 }
3486 updated_v3_cluster_list.append(cluster_uuid)
3487 self.logger.debug(
3488 logging_text + "repos synchronized on k8s cluster "
3489 "'{}' to_delete: {}, to_add: {}".format(
3490 k8s_cluster_id, del_repo_list, added_repo_dict
3491 )
3492 )
3493 self.db.set_one(
3494 "k8sclusters",
3495 {"_id": k8s_cluster_id},
3496 updated,
3497 unset=unset,
3498 )
3499
3500 # Instantiate kdu
3501 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3502 vnfr_data["member-vnf-index-ref"],
3503 kdur["kdu-name"],
3504 k8s_cluster_id,
3505 )
3506 k8s_instance_info = {
3507 "kdu-instance": None,
3508 "k8scluster-uuid": cluster_uuid,
3509 "k8scluster-type": k8sclustertype,
3510 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3511 "kdu-name": kdur["kdu-name"],
3512 "kdu-model": kdumodel,
3513 "namespace": namespace,
3514 "kdu-deployment-name": kdu_deployment_name,
3515 }
3516 db_path = "_admin.deployed.K8s.{}".format(index)
3517 db_nsr_update[db_path] = k8s_instance_info
3518 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3519 vnfd_with_id = find_in_list(
3520 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3521 )
3522 task = asyncio.ensure_future(
3523 self._install_kdu(
3524 nsr_id,
3525 db_path,
3526 vnfr_data,
3527 kdu_index,
3528 kdud,
3529 vnfd_with_id,
3530 k8s_instance_info,
3531 k8params=desc_params,
3532 timeout=600,
3533 vca_id=vca_id,
3534 )
3535 )
3536 self.lcm_tasks.register(
3537 "ns",
3538 nsr_id,
3539 nslcmop_id,
3540 "instantiate_KDU-{}".format(index),
3541 task,
3542 )
3543 task_instantiation_info[task] = "Deploying KDU {}".format(
3544 kdur["kdu-name"]
3545 )
3546
3547 index += 1
3548
3549 except (LcmException, asyncio.CancelledError):
3550 raise
3551 except Exception as e:
3552 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3553 if isinstance(e, (N2VCException, DbException)):
3554 self.logger.error(logging_text + msg)
3555 else:
3556 self.logger.critical(logging_text + msg, exc_info=True)
3557 raise LcmException(msg)
3558 finally:
3559 if db_nsr_update:
3560 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3561
3562 def _deploy_n2vc(
3563 self,
3564 logging_text,
3565 db_nsr,
3566 db_vnfr,
3567 nslcmop_id,
3568 nsr_id,
3569 nsi_id,
3570 vnfd_id,
3571 vdu_id,
3572 kdu_name,
3573 member_vnf_index,
3574 vdu_index,
3575 vdu_name,
3576 deploy_params,
3577 descriptor_config,
3578 base_folder,
3579 task_instantiation_info,
3580 stage,
3581 ):
3582 # launch instantiate_N2VC in a asyncio task and register task object
3583 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3584 # if not found, create one entry and update database
3585 # fill db_nsr._admin.deployed.VCA.<index>
3586
3587 self.logger.debug(
3588 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3589 )
3590 if "execution-environment-list" in descriptor_config:
3591 ee_list = descriptor_config.get("execution-environment-list", [])
3592 elif "juju" in descriptor_config:
3593 ee_list = [descriptor_config] # ns charms
3594 else: # other types as script are not supported
3595 ee_list = []
3596
3597 for ee_item in ee_list:
3598 self.logger.debug(
3599 logging_text
3600 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3601 ee_item.get("juju"), ee_item.get("helm-chart")
3602 )
3603 )
3604 ee_descriptor_id = ee_item.get("id")
3605 if ee_item.get("juju"):
3606 vca_name = ee_item["juju"].get("charm")
3607 vca_type = (
3608 "lxc_proxy_charm"
3609 if ee_item["juju"].get("charm") is not None
3610 else "native_charm"
3611 )
3612 if ee_item["juju"].get("cloud") == "k8s":
3613 vca_type = "k8s_proxy_charm"
3614 elif ee_item["juju"].get("proxy") is False:
3615 vca_type = "native_charm"
3616 elif ee_item.get("helm-chart"):
3617 vca_name = ee_item["helm-chart"]
3618 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3619 vca_type = "helm"
3620 else:
3621 vca_type = "helm-v3"
3622 else:
3623 self.logger.debug(
3624 logging_text + "skipping non juju neither charm configuration"
3625 )
3626 continue
3627
3628 vca_index = -1
3629 for vca_index, vca_deployed in enumerate(
3630 db_nsr["_admin"]["deployed"]["VCA"]
3631 ):
3632 if not vca_deployed:
3633 continue
3634 if (
3635 vca_deployed.get("member-vnf-index") == member_vnf_index
3636 and vca_deployed.get("vdu_id") == vdu_id
3637 and vca_deployed.get("kdu_name") == kdu_name
3638 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3639 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3640 ):
3641 break
3642 else:
3643 # not found, create one.
3644 target = (
3645 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3646 )
3647 if vdu_id:
3648 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3649 elif kdu_name:
3650 target += "/kdu/{}".format(kdu_name)
3651 vca_deployed = {
3652 "target_element": target,
3653 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3654 "member-vnf-index": member_vnf_index,
3655 "vdu_id": vdu_id,
3656 "kdu_name": kdu_name,
3657 "vdu_count_index": vdu_index,
3658 "operational-status": "init", # TODO revise
3659 "detailed-status": "", # TODO revise
3660 "step": "initial-deploy", # TODO revise
3661 "vnfd_id": vnfd_id,
3662 "vdu_name": vdu_name,
3663 "type": vca_type,
3664 "ee_descriptor_id": ee_descriptor_id,
3665 }
3666 vca_index += 1
3667
3668 # create VCA and configurationStatus in db
3669 db_dict = {
3670 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3671 "configurationStatus.{}".format(vca_index): dict(),
3672 }
3673 self.update_db_2("nsrs", nsr_id, db_dict)
3674
3675 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3676
3677 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3678 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3679 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3680
3681 # Launch task
3682 task_n2vc = asyncio.ensure_future(
3683 self.instantiate_N2VC(
3684 logging_text=logging_text,
3685 vca_index=vca_index,
3686 nsi_id=nsi_id,
3687 db_nsr=db_nsr,
3688 db_vnfr=db_vnfr,
3689 vdu_id=vdu_id,
3690 kdu_name=kdu_name,
3691 vdu_index=vdu_index,
3692 deploy_params=deploy_params,
3693 config_descriptor=descriptor_config,
3694 base_folder=base_folder,
3695 nslcmop_id=nslcmop_id,
3696 stage=stage,
3697 vca_type=vca_type,
3698 vca_name=vca_name,
3699 ee_config_descriptor=ee_item,
3700 )
3701 )
3702 self.lcm_tasks.register(
3703 "ns",
3704 nsr_id,
3705 nslcmop_id,
3706 "instantiate_N2VC-{}".format(vca_index),
3707 task_n2vc,
3708 )
3709 task_instantiation_info[
3710 task_n2vc
3711 ] = self.task_name_deploy_vca + " {}.{}".format(
3712 member_vnf_index or "", vdu_id or ""
3713 )
3714
3715 @staticmethod
3716 def _create_nslcmop(nsr_id, operation, params):
3717 """
3718 Creates a ns-lcm-opp content to be stored at database.
3719 :param nsr_id: internal id of the instance
3720 :param operation: instantiate, terminate, scale, action, ...
3721 :param params: user parameters for the operation
3722 :return: dictionary following SOL005 format
3723 """
3724 # Raise exception if invalid arguments
3725 if not (nsr_id and operation and params):
3726 raise LcmException(
3727 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3728 )
3729 now = time()
3730 _id = str(uuid4())
3731 nslcmop = {
3732 "id": _id,
3733 "_id": _id,
3734 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3735 "operationState": "PROCESSING",
3736 "statusEnteredTime": now,
3737 "nsInstanceId": nsr_id,
3738 "lcmOperationType": operation,
3739 "startTime": now,
3740 "isAutomaticInvocation": False,
3741 "operationParams": params,
3742 "isCancelPending": False,
3743 "links": {
3744 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3745 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3746 },
3747 }
3748 return nslcmop
3749
3750 def _format_additional_params(self, params):
3751 params = params or {}
3752 for key, value in params.items():
3753 if str(value).startswith("!!yaml "):
3754 params[key] = yaml.safe_load(value[7:])
3755 return params
3756
3757 def _get_terminate_primitive_params(self, seq, vnf_index):
3758 primitive = seq.get("name")
3759 primitive_params = {}
3760 params = {
3761 "member_vnf_index": vnf_index,
3762 "primitive": primitive,
3763 "primitive_params": primitive_params,
3764 }
3765 desc_params = {}
3766 return self._map_primitive_params(seq, params, desc_params)
3767
3768 # sub-operations
3769
3770 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3771 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3772 if op.get("operationState") == "COMPLETED":
3773 # b. Skip sub-operation
3774 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3775 return self.SUBOPERATION_STATUS_SKIP
3776 else:
3777 # c. retry executing sub-operation
3778 # The sub-operation exists, and operationState != 'COMPLETED'
3779 # Update operationState = 'PROCESSING' to indicate a retry.
3780 operationState = "PROCESSING"
3781 detailed_status = "In progress"
3782 self._update_suboperation_status(
3783 db_nslcmop, op_index, operationState, detailed_status
3784 )
3785 # Return the sub-operation index
3786 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3787 # with arguments extracted from the sub-operation
3788 return op_index
3789
3790 # Find a sub-operation where all keys in a matching dictionary must match
3791 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3792 def _find_suboperation(self, db_nslcmop, match):
3793 if db_nslcmop and match:
3794 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3795 for i, op in enumerate(op_list):
3796 if all(op.get(k) == match[k] for k in match):
3797 return i
3798 return self.SUBOPERATION_STATUS_NOT_FOUND
3799
3800 # Update status for a sub-operation given its index
3801 def _update_suboperation_status(
3802 self, db_nslcmop, op_index, operationState, detailed_status
3803 ):
3804 # Update DB for HA tasks
3805 q_filter = {"_id": db_nslcmop["_id"]}
3806 update_dict = {
3807 "_admin.operations.{}.operationState".format(op_index): operationState,
3808 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3809 }
3810 self.db.set_one(
3811 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3812 )
3813
3814 # Add sub-operation, return the index of the added sub-operation
3815 # Optionally, set operationState, detailed-status, and operationType
3816 # Status and type are currently set for 'scale' sub-operations:
3817 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3818 # 'detailed-status' : status message
3819 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3820 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3821 def _add_suboperation(
3822 self,
3823 db_nslcmop,
3824 vnf_index,
3825 vdu_id,
3826 vdu_count_index,
3827 vdu_name,
3828 primitive,
3829 mapped_primitive_params,
3830 operationState=None,
3831 detailed_status=None,
3832 operationType=None,
3833 RO_nsr_id=None,
3834 RO_scaling_info=None,
3835 ):
3836 if not db_nslcmop:
3837 return self.SUBOPERATION_STATUS_NOT_FOUND
3838 # Get the "_admin.operations" list, if it exists
3839 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3840 op_list = db_nslcmop_admin.get("operations")
3841 # Create or append to the "_admin.operations" list
3842 new_op = {
3843 "member_vnf_index": vnf_index,
3844 "vdu_id": vdu_id,
3845 "vdu_count_index": vdu_count_index,
3846 "primitive": primitive,
3847 "primitive_params": mapped_primitive_params,
3848 }
3849 if operationState:
3850 new_op["operationState"] = operationState
3851 if detailed_status:
3852 new_op["detailed-status"] = detailed_status
3853 if operationType:
3854 new_op["lcmOperationType"] = operationType
3855 if RO_nsr_id:
3856 new_op["RO_nsr_id"] = RO_nsr_id
3857 if RO_scaling_info:
3858 new_op["RO_scaling_info"] = RO_scaling_info
3859 if not op_list:
3860 # No existing operations, create key 'operations' with current operation as first list element
3861 db_nslcmop_admin.update({"operations": [new_op]})
3862 op_list = db_nslcmop_admin.get("operations")
3863 else:
3864 # Existing operations, append operation to list
3865 op_list.append(new_op)
3866
3867 db_nslcmop_update = {"_admin.operations": op_list}
3868 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3869 op_index = len(op_list) - 1
3870 return op_index
3871
3872 # Helper methods for scale() sub-operations
3873
3874 # pre-scale/post-scale:
3875 # Check for 3 different cases:
3876 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3877 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3878 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3879 def _check_or_add_scale_suboperation(
3880 self,
3881 db_nslcmop,
3882 vnf_index,
3883 vnf_config_primitive,
3884 primitive_params,
3885 operationType,
3886 RO_nsr_id=None,
3887 RO_scaling_info=None,
3888 ):
3889 # Find this sub-operation
3890 if RO_nsr_id and RO_scaling_info:
3891 operationType = "SCALE-RO"
3892 match = {
3893 "member_vnf_index": vnf_index,
3894 "RO_nsr_id": RO_nsr_id,
3895 "RO_scaling_info": RO_scaling_info,
3896 }
3897 else:
3898 match = {
3899 "member_vnf_index": vnf_index,
3900 "primitive": vnf_config_primitive,
3901 "primitive_params": primitive_params,
3902 "lcmOperationType": operationType,
3903 }
3904 op_index = self._find_suboperation(db_nslcmop, match)
3905 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3906 # a. New sub-operation
3907 # The sub-operation does not exist, add it.
3908 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3909 # The following parameters are set to None for all kind of scaling:
3910 vdu_id = None
3911 vdu_count_index = None
3912 vdu_name = None
3913 if RO_nsr_id and RO_scaling_info:
3914 vnf_config_primitive = None
3915 primitive_params = None
3916 else:
3917 RO_nsr_id = None
3918 RO_scaling_info = None
3919 # Initial status for sub-operation
3920 operationState = "PROCESSING"
3921 detailed_status = "In progress"
3922 # Add sub-operation for pre/post-scaling (zero or more operations)
3923 self._add_suboperation(
3924 db_nslcmop,
3925 vnf_index,
3926 vdu_id,
3927 vdu_count_index,
3928 vdu_name,
3929 vnf_config_primitive,
3930 primitive_params,
3931 operationState,
3932 detailed_status,
3933 operationType,
3934 RO_nsr_id,
3935 RO_scaling_info,
3936 )
3937 return self.SUBOPERATION_STATUS_NEW
3938 else:
3939 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3940 # or op_index (operationState != 'COMPLETED')
3941 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3942
3943 # Function to return execution_environment id
3944
3945 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3946 # TODO vdu_index_count
3947 for vca in vca_deployed_list:
3948 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3949 return vca["ee_id"]
3950
3951 async def destroy_N2VC(
3952 self,
3953 logging_text,
3954 db_nslcmop,
3955 vca_deployed,
3956 config_descriptor,
3957 vca_index,
3958 destroy_ee=True,
3959 exec_primitives=True,
3960 scaling_in=False,
3961 vca_id: str = None,
3962 ):
3963 """
3964 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3965 :param logging_text:
3966 :param db_nslcmop:
3967 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3968 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3969 :param vca_index: index in the database _admin.deployed.VCA
3970 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3971 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3972 not executed properly
3973 :param scaling_in: True destroys the application, False destroys the model
3974 :return: None or exception
3975 """
3976
3977 self.logger.debug(
3978 logging_text
3979 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3980 vca_index, vca_deployed, config_descriptor, destroy_ee
3981 )
3982 )
3983
3984 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3985
3986 # execute terminate_primitives
3987 if exec_primitives:
3988 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
3989 config_descriptor.get("terminate-config-primitive"),
3990 vca_deployed.get("ee_descriptor_id"),
3991 )
3992 vdu_id = vca_deployed.get("vdu_id")
3993 vdu_count_index = vca_deployed.get("vdu_count_index")
3994 vdu_name = vca_deployed.get("vdu_name")
3995 vnf_index = vca_deployed.get("member-vnf-index")
3996 if terminate_primitives and vca_deployed.get("needed_terminate"):
3997 for seq in terminate_primitives:
3998 # For each sequence in list, get primitive and call _ns_execute_primitive()
3999 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4000 vnf_index, seq.get("name")
4001 )
4002 self.logger.debug(logging_text + step)
4003 # Create the primitive for each sequence, i.e. "primitive": "touch"
4004 primitive = seq.get("name")
4005 mapped_primitive_params = self._get_terminate_primitive_params(
4006 seq, vnf_index
4007 )
4008
4009 # Add sub-operation
4010 self._add_suboperation(
4011 db_nslcmop,
4012 vnf_index,
4013 vdu_id,
4014 vdu_count_index,
4015 vdu_name,
4016 primitive,
4017 mapped_primitive_params,
4018 )
4019 # Sub-operations: Call _ns_execute_primitive() instead of action()
4020 try:
4021 result, result_detail = await self._ns_execute_primitive(
4022 vca_deployed["ee_id"],
4023 primitive,
4024 mapped_primitive_params,
4025 vca_type=vca_type,
4026 vca_id=vca_id,
4027 )
4028 except LcmException:
4029 # this happens when VCA is not deployed. In this case it is not needed to terminate
4030 continue
4031 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4032 if result not in result_ok:
4033 raise LcmException(
4034 "terminate_primitive {} for vnf_member_index={} fails with "
4035 "error {}".format(seq.get("name"), vnf_index, result_detail)
4036 )
4037 # set that this VCA do not need terminated
4038 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4039 vca_index
4040 )
4041 self.update_db_2(
4042 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4043 )
4044
4045 # Delete Prometheus Jobs if any
4046 # This uses NSR_ID, so it will destroy any jobs under this index
4047 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4048
4049 if destroy_ee:
4050 await self.vca_map[vca_type].delete_execution_environment(
4051 vca_deployed["ee_id"],
4052 scaling_in=scaling_in,
4053 vca_type=vca_type,
4054 vca_id=vca_id,
4055 )
4056
4057 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4058 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4059 namespace = "." + db_nsr["_id"]
4060 try:
4061 await self.n2vc.delete_namespace(
4062 namespace=namespace,
4063 total_timeout=self.timeout_charm_delete,
4064 vca_id=vca_id,
4065 )
4066 except N2VCNotFound: # already deleted. Skip
4067 pass
4068 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4069
4070 async def _terminate_RO(
4071 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4072 ):
4073 """
4074 Terminates a deployment from RO
4075 :param logging_text:
4076 :param nsr_deployed: db_nsr._admin.deployed
4077 :param nsr_id:
4078 :param nslcmop_id:
4079 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4080 this method will update only the index 2, but it will write on database the concatenated content of the list
4081 :return:
4082 """
4083 db_nsr_update = {}
4084 failed_detail = []
4085 ro_nsr_id = ro_delete_action = None
4086 if nsr_deployed and nsr_deployed.get("RO"):
4087 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4088 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4089 try:
4090 if ro_nsr_id:
4091 stage[2] = "Deleting ns from VIM."
4092 db_nsr_update["detailed-status"] = " ".join(stage)
4093 self._write_op_status(nslcmop_id, stage)
4094 self.logger.debug(logging_text + stage[2])
4095 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4096 self._write_op_status(nslcmop_id, stage)
4097 desc = await self.RO.delete("ns", ro_nsr_id)
4098 ro_delete_action = desc["action_id"]
4099 db_nsr_update[
4100 "_admin.deployed.RO.nsr_delete_action_id"
4101 ] = ro_delete_action
4102 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4103 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4104 if ro_delete_action:
4105 # wait until NS is deleted from VIM
4106 stage[2] = "Waiting ns deleted from VIM."
4107 detailed_status_old = None
4108 self.logger.debug(
4109 logging_text
4110 + stage[2]
4111 + " RO_id={} ro_delete_action={}".format(
4112 ro_nsr_id, ro_delete_action
4113 )
4114 )
4115 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4116 self._write_op_status(nslcmop_id, stage)
4117
4118 delete_timeout = 20 * 60 # 20 minutes
4119 while delete_timeout > 0:
4120 desc = await self.RO.show(
4121 "ns",
4122 item_id_name=ro_nsr_id,
4123 extra_item="action",
4124 extra_item_id=ro_delete_action,
4125 )
4126
4127 # deploymentStatus
4128 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4129
4130 ns_status, ns_status_info = self.RO.check_action_status(desc)
4131 if ns_status == "ERROR":
4132 raise ROclient.ROClientException(ns_status_info)
4133 elif ns_status == "BUILD":
4134 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4135 elif ns_status == "ACTIVE":
4136 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4137 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4138 break
4139 else:
4140 assert (
4141 False
4142 ), "ROclient.check_action_status returns unknown {}".format(
4143 ns_status
4144 )
4145 if stage[2] != detailed_status_old:
4146 detailed_status_old = stage[2]
4147 db_nsr_update["detailed-status"] = " ".join(stage)
4148 self._write_op_status(nslcmop_id, stage)
4149 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4150 await asyncio.sleep(5, loop=self.loop)
4151 delete_timeout -= 5
4152 else: # delete_timeout <= 0:
4153 raise ROclient.ROClientException(
4154 "Timeout waiting ns deleted from VIM"
4155 )
4156
4157 except Exception as e:
4158 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4159 if (
4160 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4161 ): # not found
4162 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4163 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4164 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4165 self.logger.debug(
4166 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4167 )
4168 elif (
4169 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4170 ): # conflict
4171 failed_detail.append("delete conflict: {}".format(e))
4172 self.logger.debug(
4173 logging_text
4174 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4175 )
4176 else:
4177 failed_detail.append("delete error: {}".format(e))
4178 self.logger.error(
4179 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4180 )
4181
4182 # Delete nsd
4183 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4184 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4185 try:
4186 stage[2] = "Deleting nsd from RO."
4187 db_nsr_update["detailed-status"] = " ".join(stage)
4188 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4189 self._write_op_status(nslcmop_id, stage)
4190 await self.RO.delete("nsd", ro_nsd_id)
4191 self.logger.debug(
4192 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4193 )
4194 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4195 except Exception as e:
4196 if (
4197 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4198 ): # not found
4199 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4200 self.logger.debug(
4201 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4202 )
4203 elif (
4204 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4205 ): # conflict
4206 failed_detail.append(
4207 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4208 )
4209 self.logger.debug(logging_text + failed_detail[-1])
4210 else:
4211 failed_detail.append(
4212 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4213 )
4214 self.logger.error(logging_text + failed_detail[-1])
4215
4216 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4217 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4218 if not vnf_deployed or not vnf_deployed["id"]:
4219 continue
4220 try:
4221 ro_vnfd_id = vnf_deployed["id"]
4222 stage[
4223 2
4224 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4225 vnf_deployed["member-vnf-index"], ro_vnfd_id
4226 )
4227 db_nsr_update["detailed-status"] = " ".join(stage)
4228 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4229 self._write_op_status(nslcmop_id, stage)
4230 await self.RO.delete("vnfd", ro_vnfd_id)
4231 self.logger.debug(
4232 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4233 )
4234 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4235 except Exception as e:
4236 if (
4237 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4238 ): # not found
4239 db_nsr_update[
4240 "_admin.deployed.RO.vnfd.{}.id".format(index)
4241 ] = None
4242 self.logger.debug(
4243 logging_text
4244 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4245 )
4246 elif (
4247 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4248 ): # conflict
4249 failed_detail.append(
4250 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4251 )
4252 self.logger.debug(logging_text + failed_detail[-1])
4253 else:
4254 failed_detail.append(
4255 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4256 )
4257 self.logger.error(logging_text + failed_detail[-1])
4258
4259 if failed_detail:
4260 stage[2] = "Error deleting from VIM"
4261 else:
4262 stage[2] = "Deleted from VIM"
4263 db_nsr_update["detailed-status"] = " ".join(stage)
4264 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4265 self._write_op_status(nslcmop_id, stage)
4266
4267 if failed_detail:
4268 raise LcmException("; ".join(failed_detail))
4269
4270 async def terminate(self, nsr_id, nslcmop_id):
4271 # Try to lock HA task here
4272 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4273 if not task_is_locked_by_me:
4274 return
4275
4276 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4277 self.logger.debug(logging_text + "Enter")
4278 timeout_ns_terminate = self.timeout_ns_terminate
4279 db_nsr = None
4280 db_nslcmop = None
4281 operation_params = None
4282 exc = None
4283 error_list = [] # annotates all failed error messages
4284 db_nslcmop_update = {}
4285 autoremove = False # autoremove after terminated
4286 tasks_dict_info = {}
4287 db_nsr_update = {}
4288 stage = [
4289 "Stage 1/3: Preparing task.",
4290 "Waiting for previous operations to terminate.",
4291 "",
4292 ]
4293 # ^ contains [stage, step, VIM-status]
4294 try:
4295 # wait for any previous tasks in process
4296 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4297
4298 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4299 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4300 operation_params = db_nslcmop.get("operationParams") or {}
4301 if operation_params.get("timeout_ns_terminate"):
4302 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4303 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4304 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4305
4306 db_nsr_update["operational-status"] = "terminating"
4307 db_nsr_update["config-status"] = "terminating"
4308 self._write_ns_status(
4309 nsr_id=nsr_id,
4310 ns_state="TERMINATING",
4311 current_operation="TERMINATING",
4312 current_operation_id=nslcmop_id,
4313 other_update=db_nsr_update,
4314 )
4315 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4316 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4317 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4318 return
4319
4320 stage[1] = "Getting vnf descriptors from db."
4321 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4322 db_vnfrs_dict = {
4323 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4324 }
4325 db_vnfds_from_id = {}
4326 db_vnfds_from_member_index = {}
4327 # Loop over VNFRs
4328 for vnfr in db_vnfrs_list:
4329 vnfd_id = vnfr["vnfd-id"]
4330 if vnfd_id not in db_vnfds_from_id:
4331 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4332 db_vnfds_from_id[vnfd_id] = vnfd
4333 db_vnfds_from_member_index[
4334 vnfr["member-vnf-index-ref"]
4335 ] = db_vnfds_from_id[vnfd_id]
4336
4337 # Destroy individual execution environments when there are terminating primitives.
4338 # Rest of EE will be deleted at once
4339 # TODO - check before calling _destroy_N2VC
4340 # if not operation_params.get("skip_terminate_primitives"):#
4341 # or not vca.get("needed_terminate"):
4342 stage[0] = "Stage 2/3 execute terminating primitives."
4343 self.logger.debug(logging_text + stage[0])
4344 stage[1] = "Looking execution environment that needs terminate."
4345 self.logger.debug(logging_text + stage[1])
4346
4347 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4348 config_descriptor = None
4349 vca_member_vnf_index = vca.get("member-vnf-index")
4350 vca_id = self.get_vca_id(
4351 db_vnfrs_dict.get(vca_member_vnf_index)
4352 if vca_member_vnf_index
4353 else None,
4354 db_nsr,
4355 )
4356 if not vca or not vca.get("ee_id"):
4357 continue
4358 if not vca.get("member-vnf-index"):
4359 # ns
4360 config_descriptor = db_nsr.get("ns-configuration")
4361 elif vca.get("vdu_id"):
4362 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4363 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4364 elif vca.get("kdu_name"):
4365 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4366 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4367 else:
4368 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4369 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4370 vca_type = vca.get("type")
4371 exec_terminate_primitives = not operation_params.get(
4372 "skip_terminate_primitives"
4373 ) and vca.get("needed_terminate")
4374 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4375 # pending native charms
4376 destroy_ee = (
4377 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4378 )
4379 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4380 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4381 task = asyncio.ensure_future(
4382 self.destroy_N2VC(
4383 logging_text,
4384 db_nslcmop,
4385 vca,
4386 config_descriptor,
4387 vca_index,
4388 destroy_ee,
4389 exec_terminate_primitives,
4390 vca_id=vca_id,
4391 )
4392 )
4393 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4394
4395 # wait for pending tasks of terminate primitives
4396 if tasks_dict_info:
4397 self.logger.debug(
4398 logging_text
4399 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4400 )
4401 error_list = await self._wait_for_tasks(
4402 logging_text,
4403 tasks_dict_info,
4404 min(self.timeout_charm_delete, timeout_ns_terminate),
4405 stage,
4406 nslcmop_id,
4407 )
4408 tasks_dict_info.clear()
4409 if error_list:
4410 return # raise LcmException("; ".join(error_list))
4411
4412 # remove All execution environments at once
4413 stage[0] = "Stage 3/3 delete all."
4414
4415 if nsr_deployed.get("VCA"):
4416 stage[1] = "Deleting all execution environments."
4417 self.logger.debug(logging_text + stage[1])
4418 vca_id = self.get_vca_id({}, db_nsr)
4419 task_delete_ee = asyncio.ensure_future(
4420 asyncio.wait_for(
4421 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4422 timeout=self.timeout_charm_delete,
4423 )
4424 )
4425 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4426 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4427
4428 # Delete from k8scluster
4429 stage[1] = "Deleting KDUs."
4430 self.logger.debug(logging_text + stage[1])
4431 # print(nsr_deployed)
4432 for kdu in get_iterable(nsr_deployed, "K8s"):
4433 if not kdu or not kdu.get("kdu-instance"):
4434 continue
4435 kdu_instance = kdu.get("kdu-instance")
4436 if kdu.get("k8scluster-type") in self.k8scluster_map:
4437 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4438 vca_id = self.get_vca_id({}, db_nsr)
4439 task_delete_kdu_instance = asyncio.ensure_future(
4440 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4441 cluster_uuid=kdu.get("k8scluster-uuid"),
4442 kdu_instance=kdu_instance,
4443 vca_id=vca_id,
4444 )
4445 )
4446 else:
4447 self.logger.error(
4448 logging_text
4449 + "Unknown k8s deployment type {}".format(
4450 kdu.get("k8scluster-type")
4451 )
4452 )
4453 continue
4454 tasks_dict_info[
4455 task_delete_kdu_instance
4456 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4457
4458 # remove from RO
4459 stage[1] = "Deleting ns from VIM."
4460 if self.ng_ro:
4461 task_delete_ro = asyncio.ensure_future(
4462 self._terminate_ng_ro(
4463 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4464 )
4465 )
4466 else:
4467 task_delete_ro = asyncio.ensure_future(
4468 self._terminate_RO(
4469 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4470 )
4471 )
4472 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4473
4474 # rest of staff will be done at finally
4475
4476 except (
4477 ROclient.ROClientException,
4478 DbException,
4479 LcmException,
4480 N2VCException,
4481 ) as e:
4482 self.logger.error(logging_text + "Exit Exception {}".format(e))
4483 exc = e
4484 except asyncio.CancelledError:
4485 self.logger.error(
4486 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4487 )
4488 exc = "Operation was cancelled"
4489 except Exception as e:
4490 exc = traceback.format_exc()
4491 self.logger.critical(
4492 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4493 exc_info=True,
4494 )
4495 finally:
4496 if exc:
4497 error_list.append(str(exc))
4498 try:
4499 # wait for pending tasks
4500 if tasks_dict_info:
4501 stage[1] = "Waiting for terminate pending tasks."
4502 self.logger.debug(logging_text + stage[1])
4503 error_list += await self._wait_for_tasks(
4504 logging_text,
4505 tasks_dict_info,
4506 timeout_ns_terminate,
4507 stage,
4508 nslcmop_id,
4509 )
4510 stage[1] = stage[2] = ""
4511 except asyncio.CancelledError:
4512 error_list.append("Cancelled")
4513 # TODO cancell all tasks
4514 except Exception as exc:
4515 error_list.append(str(exc))
4516 # update status at database
4517 if error_list:
4518 error_detail = "; ".join(error_list)
4519 # self.logger.error(logging_text + error_detail)
4520 error_description_nslcmop = "{} Detail: {}".format(
4521 stage[0], error_detail
4522 )
4523 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4524 nslcmop_id, stage[0]
4525 )
4526
4527 db_nsr_update["operational-status"] = "failed"
4528 db_nsr_update["detailed-status"] = (
4529 error_description_nsr + " Detail: " + error_detail
4530 )
4531 db_nslcmop_update["detailed-status"] = error_detail
4532 nslcmop_operation_state = "FAILED"
4533 ns_state = "BROKEN"
4534 else:
4535 error_detail = None
4536 error_description_nsr = error_description_nslcmop = None
4537 ns_state = "NOT_INSTANTIATED"
4538 db_nsr_update["operational-status"] = "terminated"
4539 db_nsr_update["detailed-status"] = "Done"
4540 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4541 db_nslcmop_update["detailed-status"] = "Done"
4542 nslcmop_operation_state = "COMPLETED"
4543
4544 if db_nsr:
4545 self._write_ns_status(
4546 nsr_id=nsr_id,
4547 ns_state=ns_state,
4548 current_operation="IDLE",
4549 current_operation_id=None,
4550 error_description=error_description_nsr,
4551 error_detail=error_detail,
4552 other_update=db_nsr_update,
4553 )
4554 self._write_op_status(
4555 op_id=nslcmop_id,
4556 stage="",
4557 error_message=error_description_nslcmop,
4558 operation_state=nslcmop_operation_state,
4559 other_update=db_nslcmop_update,
4560 )
4561 if ns_state == "NOT_INSTANTIATED":
4562 try:
4563 self.db.set_list(
4564 "vnfrs",
4565 {"nsr-id-ref": nsr_id},
4566 {"_admin.nsState": "NOT_INSTANTIATED"},
4567 )
4568 except DbException as e:
4569 self.logger.warn(
4570 logging_text
4571 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4572 nsr_id, e
4573 )
4574 )
4575 if operation_params:
4576 autoremove = operation_params.get("autoremove", False)
4577 if nslcmop_operation_state:
4578 try:
4579 await self.msg.aiowrite(
4580 "ns",
4581 "terminated",
4582 {
4583 "nsr_id": nsr_id,
4584 "nslcmop_id": nslcmop_id,
4585 "operationState": nslcmop_operation_state,
4586 "autoremove": autoremove,
4587 },
4588 loop=self.loop,
4589 )
4590 except Exception as e:
4591 self.logger.error(
4592 logging_text + "kafka_write notification Exception {}".format(e)
4593 )
4594
4595 self.logger.debug(logging_text + "Exit")
4596 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4597
4598 async def _wait_for_tasks(
4599 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4600 ):
4601 time_start = time()
4602 error_detail_list = []
4603 error_list = []
4604 pending_tasks = list(created_tasks_info.keys())
4605 num_tasks = len(pending_tasks)
4606 num_done = 0
4607 stage[1] = "{}/{}.".format(num_done, num_tasks)
4608 self._write_op_status(nslcmop_id, stage)
4609 while pending_tasks:
4610 new_error = None
4611 _timeout = timeout + time_start - time()
4612 done, pending_tasks = await asyncio.wait(
4613 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4614 )
4615 num_done += len(done)
4616 if not done: # Timeout
4617 for task in pending_tasks:
4618 new_error = created_tasks_info[task] + ": Timeout"
4619 error_detail_list.append(new_error)
4620 error_list.append(new_error)
4621 break
4622 for task in done:
4623 if task.cancelled():
4624 exc = "Cancelled"
4625 else:
4626 exc = task.exception()
4627 if exc:
4628 if isinstance(exc, asyncio.TimeoutError):
4629 exc = "Timeout"
4630 new_error = created_tasks_info[task] + ": {}".format(exc)
4631 error_list.append(created_tasks_info[task])
4632 error_detail_list.append(new_error)
4633 if isinstance(
4634 exc,
4635 (
4636 str,
4637 DbException,
4638 N2VCException,
4639 ROclient.ROClientException,
4640 LcmException,
4641 K8sException,
4642 NgRoException,
4643 ),
4644 ):
4645 self.logger.error(logging_text + new_error)
4646 else:
4647 exc_traceback = "".join(
4648 traceback.format_exception(None, exc, exc.__traceback__)
4649 )
4650 self.logger.error(
4651 logging_text
4652 + created_tasks_info[task]
4653 + " "
4654 + exc_traceback
4655 )
4656 else:
4657 self.logger.debug(
4658 logging_text + created_tasks_info[task] + ": Done"
4659 )
4660 stage[1] = "{}/{}.".format(num_done, num_tasks)
4661 if new_error:
4662 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4663 if nsr_id: # update also nsr
4664 self.update_db_2(
4665 "nsrs",
4666 nsr_id,
4667 {
4668 "errorDescription": "Error at: " + ", ".join(error_list),
4669 "errorDetail": ". ".join(error_detail_list),
4670 },
4671 )
4672 self._write_op_status(nslcmop_id, stage)
4673 return error_detail_list
4674
4675 @staticmethod
4676 def _map_primitive_params(primitive_desc, params, instantiation_params):
4677 """
4678 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4679 The default-value is used. If it is between < > it look for a value at instantiation_params
4680 :param primitive_desc: portion of VNFD/NSD that describes primitive
4681 :param params: Params provided by user
4682 :param instantiation_params: Instantiation params provided by user
4683 :return: a dictionary with the calculated params
4684 """
4685 calculated_params = {}
4686 for parameter in primitive_desc.get("parameter", ()):
4687 param_name = parameter["name"]
4688 if param_name in params:
4689 calculated_params[param_name] = params[param_name]
4690 elif "default-value" in parameter or "value" in parameter:
4691 if "value" in parameter:
4692 calculated_params[param_name] = parameter["value"]
4693 else:
4694 calculated_params[param_name] = parameter["default-value"]
4695 if (
4696 isinstance(calculated_params[param_name], str)
4697 and calculated_params[param_name].startswith("<")
4698 and calculated_params[param_name].endswith(">")
4699 ):
4700 if calculated_params[param_name][1:-1] in instantiation_params:
4701 calculated_params[param_name] = instantiation_params[
4702 calculated_params[param_name][1:-1]
4703 ]
4704 else:
4705 raise LcmException(
4706 "Parameter {} needed to execute primitive {} not provided".format(
4707 calculated_params[param_name], primitive_desc["name"]
4708 )
4709 )
4710 else:
4711 raise LcmException(
4712 "Parameter {} needed to execute primitive {} not provided".format(
4713 param_name, primitive_desc["name"]
4714 )
4715 )
4716
4717 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4718 calculated_params[param_name] = yaml.safe_dump(
4719 calculated_params[param_name], default_flow_style=True, width=256
4720 )
4721 elif isinstance(calculated_params[param_name], str) and calculated_params[
4722 param_name
4723 ].startswith("!!yaml "):
4724 calculated_params[param_name] = calculated_params[param_name][7:]
4725 if parameter.get("data-type") == "INTEGER":
4726 try:
4727 calculated_params[param_name] = int(calculated_params[param_name])
4728 except ValueError: # error converting string to int
4729 raise LcmException(
4730 "Parameter {} of primitive {} must be integer".format(
4731 param_name, primitive_desc["name"]
4732 )
4733 )
4734 elif parameter.get("data-type") == "BOOLEAN":
4735 calculated_params[param_name] = not (
4736 (str(calculated_params[param_name])).lower() == "false"
4737 )
4738
4739 # add always ns_config_info if primitive name is config
4740 if primitive_desc["name"] == "config":
4741 if "ns_config_info" in instantiation_params:
4742 calculated_params["ns_config_info"] = instantiation_params[
4743 "ns_config_info"
4744 ]
4745 return calculated_params
4746
4747 def _look_for_deployed_vca(
4748 self,
4749 deployed_vca,
4750 member_vnf_index,
4751 vdu_id,
4752 vdu_count_index,
4753 kdu_name=None,
4754 ee_descriptor_id=None,
4755 ):
4756 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4757 for vca in deployed_vca:
4758 if not vca:
4759 continue
4760 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4761 continue
4762 if (
4763 vdu_count_index is not None
4764 and vdu_count_index != vca["vdu_count_index"]
4765 ):
4766 continue
4767 if kdu_name and kdu_name != vca["kdu_name"]:
4768 continue
4769 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4770 continue
4771 break
4772 else:
4773 # vca_deployed not found
4774 raise LcmException(
4775 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4776 " is not deployed".format(
4777 member_vnf_index,
4778 vdu_id,
4779 vdu_count_index,
4780 kdu_name,
4781 ee_descriptor_id,
4782 )
4783 )
4784 # get ee_id
4785 ee_id = vca.get("ee_id")
4786 vca_type = vca.get(
4787 "type", "lxc_proxy_charm"
4788 ) # default value for backward compatibility - proxy charm
4789 if not ee_id:
4790 raise LcmException(
4791 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4792 "execution environment".format(
4793 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4794 )
4795 )
4796 return ee_id, vca_type
4797
4798 async def _ns_execute_primitive(
4799 self,
4800 ee_id,
4801 primitive,
4802 primitive_params,
4803 retries=0,
4804 retries_interval=30,
4805 timeout=None,
4806 vca_type=None,
4807 db_dict=None,
4808 vca_id: str = None,
4809 ) -> (str, str):
4810 try:
4811 if primitive == "config":
4812 primitive_params = {"params": primitive_params}
4813
4814 vca_type = vca_type or "lxc_proxy_charm"
4815
4816 while retries >= 0:
4817 try:
4818 output = await asyncio.wait_for(
4819 self.vca_map[vca_type].exec_primitive(
4820 ee_id=ee_id,
4821 primitive_name=primitive,
4822 params_dict=primitive_params,
4823 progress_timeout=self.timeout_progress_primitive,
4824 total_timeout=self.timeout_primitive,
4825 db_dict=db_dict,
4826 vca_id=vca_id,
4827 vca_type=vca_type,
4828 ),
4829 timeout=timeout or self.timeout_primitive,
4830 )
4831 # execution was OK
4832 break
4833 except asyncio.CancelledError:
4834 raise
4835 except Exception as e: # asyncio.TimeoutError
4836 if isinstance(e, asyncio.TimeoutError):
4837 e = "Timeout"
4838 retries -= 1
4839 if retries >= 0:
4840 self.logger.debug(
4841 "Error executing action {} on {} -> {}".format(
4842 primitive, ee_id, e
4843 )
4844 )
4845 # wait and retry
4846 await asyncio.sleep(retries_interval, loop=self.loop)
4847 else:
4848 return "FAILED", str(e)
4849
4850 return "COMPLETED", output
4851
4852 except (LcmException, asyncio.CancelledError):
4853 raise
4854 except Exception as e:
4855 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4856
4857 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4858 """
4859 Updating the vca_status with latest juju information in nsrs record
4860 :param: nsr_id: Id of the nsr
4861 :param: nslcmop_id: Id of the nslcmop
4862 :return: None
4863 """
4864
4865 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4866 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4867 vca_id = self.get_vca_id({}, db_nsr)
4868 if db_nsr["_admin"]["deployed"]["K8s"]:
4869 for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4870 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
4871 await self._on_update_k8s_db(
4872 cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
4873 )
4874 else:
4875 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4876 table, filter = "nsrs", {"_id": nsr_id}
4877 path = "_admin.deployed.VCA.{}.".format(vca_index)
4878 await self._on_update_n2vc_db(table, filter, path, {})
4879
4880 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4881 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4882
4883 async def action(self, nsr_id, nslcmop_id):
4884 # Try to lock HA task here
4885 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4886 if not task_is_locked_by_me:
4887 return
4888
4889 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4890 self.logger.debug(logging_text + "Enter")
4891 # get all needed from database
4892 db_nsr = None
4893 db_nslcmop = None
4894 db_nsr_update = {}
4895 db_nslcmop_update = {}
4896 nslcmop_operation_state = None
4897 error_description_nslcmop = None
4898 exc = None
4899 try:
4900 # wait for any previous tasks in process
4901 step = "Waiting for previous operations to terminate"
4902 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4903
4904 self._write_ns_status(
4905 nsr_id=nsr_id,
4906 ns_state=None,
4907 current_operation="RUNNING ACTION",
4908 current_operation_id=nslcmop_id,
4909 )
4910
4911 step = "Getting information from database"
4912 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4913 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4914 if db_nslcmop["operationParams"].get("primitive_params"):
4915 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4916 db_nslcmop["operationParams"]["primitive_params"]
4917 )
4918
4919 nsr_deployed = db_nsr["_admin"].get("deployed")
4920 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4921 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4922 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4923 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4924 primitive = db_nslcmop["operationParams"]["primitive"]
4925 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4926 timeout_ns_action = db_nslcmop["operationParams"].get(
4927 "timeout_ns_action", self.timeout_primitive
4928 )
4929
4930 if vnf_index:
4931 step = "Getting vnfr from database"
4932 db_vnfr = self.db.get_one(
4933 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4934 )
4935 if db_vnfr.get("kdur"):
4936 kdur_list = []
4937 for kdur in db_vnfr["kdur"]:
4938 if kdur.get("additionalParams"):
4939 kdur["additionalParams"] = json.loads(kdur["additionalParams"])
4940 kdur_list.append(kdur)
4941 db_vnfr["kdur"] = kdur_list
4942 step = "Getting vnfd from database"
4943 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4944
4945 # Sync filesystem before running a primitive
4946 self.fs.sync(db_vnfr["vnfd-id"])
4947 else:
4948 step = "Getting nsd from database"
4949 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4950
4951 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4952 # for backward compatibility
4953 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4954 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4955 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4956 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4957
4958 # look for primitive
4959 config_primitive_desc = descriptor_configuration = None
4960 if vdu_id:
4961 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4962 elif kdu_name:
4963 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4964 elif vnf_index:
4965 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4966 else:
4967 descriptor_configuration = db_nsd.get("ns-configuration")
4968
4969 if descriptor_configuration and descriptor_configuration.get(
4970 "config-primitive"
4971 ):
4972 for config_primitive in descriptor_configuration["config-primitive"]:
4973 if config_primitive["name"] == primitive:
4974 config_primitive_desc = config_primitive
4975 break
4976
4977 if not config_primitive_desc:
4978 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4979 raise LcmException(
4980 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4981 primitive
4982 )
4983 )
4984 primitive_name = primitive
4985 ee_descriptor_id = None
4986 else:
4987 primitive_name = config_primitive_desc.get(
4988 "execution-environment-primitive", primitive
4989 )
4990 ee_descriptor_id = config_primitive_desc.get(
4991 "execution-environment-ref"
4992 )
4993
4994 if vnf_index:
4995 if vdu_id:
4996 vdur = next(
4997 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4998 )
4999 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5000 elif kdu_name:
5001 kdur = next(
5002 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5003 )
5004 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5005 else:
5006 desc_params = parse_yaml_strings(
5007 db_vnfr.get("additionalParamsForVnf")
5008 )
5009 else:
5010 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5011 if kdu_name and get_configuration(db_vnfd, kdu_name):
5012 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5013 actions = set()
5014 for primitive in kdu_configuration.get("initial-config-primitive", []):
5015 actions.add(primitive["name"])
5016 for primitive in kdu_configuration.get("config-primitive", []):
5017 actions.add(primitive["name"])
5018 kdu_action = True if primitive_name in actions else False
5019
5020 # TODO check if ns is in a proper status
5021 if kdu_name and (
5022 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5023 ):
5024 # kdur and desc_params already set from before
5025 if primitive_params:
5026 desc_params.update(primitive_params)
5027 # TODO Check if we will need something at vnf level
5028 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5029 if (
5030 kdu_name == kdu["kdu-name"]
5031 and kdu["member-vnf-index"] == vnf_index
5032 ):
5033 break
5034 else:
5035 raise LcmException(
5036 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5037 )
5038
5039 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5040 msg = "unknown k8scluster-type '{}'".format(
5041 kdu.get("k8scluster-type")
5042 )
5043 raise LcmException(msg)
5044
5045 db_dict = {
5046 "collection": "nsrs",
5047 "filter": {"_id": nsr_id},
5048 "path": "_admin.deployed.K8s.{}".format(index),
5049 }
5050 self.logger.debug(
5051 logging_text
5052 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5053 )
5054 step = "Executing kdu {}".format(primitive_name)
5055 if primitive_name == "upgrade":
5056 if desc_params.get("kdu_model"):
5057 kdu_model = desc_params.get("kdu_model")
5058 del desc_params["kdu_model"]
5059 else:
5060 kdu_model = kdu.get("kdu-model")
5061 parts = kdu_model.split(sep=":")
5062 if len(parts) == 2:
5063 kdu_model = parts[0]
5064
5065 detailed_status = await asyncio.wait_for(
5066 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5067 cluster_uuid=kdu.get("k8scluster-uuid"),
5068 kdu_instance=kdu.get("kdu-instance"),
5069 atomic=True,
5070 kdu_model=kdu_model,
5071 params=desc_params,
5072 db_dict=db_dict,
5073 timeout=timeout_ns_action,
5074 ),
5075 timeout=timeout_ns_action + 10,
5076 )
5077 self.logger.debug(
5078 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5079 )
5080 elif primitive_name == "rollback":
5081 detailed_status = await asyncio.wait_for(
5082 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5083 cluster_uuid=kdu.get("k8scluster-uuid"),
5084 kdu_instance=kdu.get("kdu-instance"),
5085 db_dict=db_dict,
5086 ),
5087 timeout=timeout_ns_action,
5088 )
5089 elif primitive_name == "status":
5090 detailed_status = await asyncio.wait_for(
5091 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5092 cluster_uuid=kdu.get("k8scluster-uuid"),
5093 kdu_instance=kdu.get("kdu-instance"),
5094 vca_id=vca_id,
5095 ),
5096 timeout=timeout_ns_action,
5097 )
5098 else:
5099 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5100 kdu["kdu-name"], nsr_id
5101 )
5102 params = self._map_primitive_params(
5103 config_primitive_desc, primitive_params, desc_params
5104 )
5105
5106 detailed_status = await asyncio.wait_for(
5107 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5108 cluster_uuid=kdu.get("k8scluster-uuid"),
5109 kdu_instance=kdu_instance,
5110 primitive_name=primitive_name,
5111 params=params,
5112 db_dict=db_dict,
5113 timeout=timeout_ns_action,
5114 vca_id=vca_id,
5115 ),
5116 timeout=timeout_ns_action,
5117 )
5118
5119 if detailed_status:
5120 nslcmop_operation_state = "COMPLETED"
5121 else:
5122 detailed_status = ""
5123 nslcmop_operation_state = "FAILED"
5124 else:
5125 ee_id, vca_type = self._look_for_deployed_vca(
5126 nsr_deployed["VCA"],
5127 member_vnf_index=vnf_index,
5128 vdu_id=vdu_id,
5129 vdu_count_index=vdu_count_index,
5130 ee_descriptor_id=ee_descriptor_id,
5131 )
5132 for vca_index, vca_deployed in enumerate(
5133 db_nsr["_admin"]["deployed"]["VCA"]
5134 ):
5135 if vca_deployed.get("member-vnf-index") == vnf_index:
5136 db_dict = {
5137 "collection": "nsrs",
5138 "filter": {"_id": nsr_id},
5139 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5140 }
5141 break
5142 (
5143 nslcmop_operation_state,
5144 detailed_status,
5145 ) = await self._ns_execute_primitive(
5146 ee_id,
5147 primitive=primitive_name,
5148 primitive_params=self._map_primitive_params(
5149 config_primitive_desc, primitive_params, desc_params
5150 ),
5151 timeout=timeout_ns_action,
5152 vca_type=vca_type,
5153 db_dict=db_dict,
5154 vca_id=vca_id,
5155 )
5156
5157 db_nslcmop_update["detailed-status"] = detailed_status
5158 error_description_nslcmop = (
5159 detailed_status if nslcmop_operation_state == "FAILED" else ""
5160 )
5161 self.logger.debug(
5162 logging_text
5163 + " task Done with result {} {}".format(
5164 nslcmop_operation_state, detailed_status
5165 )
5166 )
5167 return # database update is called inside finally
5168
5169 except (DbException, LcmException, N2VCException, K8sException) as e:
5170 self.logger.error(logging_text + "Exit Exception {}".format(e))
5171 exc = e
5172 except asyncio.CancelledError:
5173 self.logger.error(
5174 logging_text + "Cancelled Exception while '{}'".format(step)
5175 )
5176 exc = "Operation was cancelled"
5177 except asyncio.TimeoutError:
5178 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5179 exc = "Timeout"
5180 except Exception as e:
5181 exc = traceback.format_exc()
5182 self.logger.critical(
5183 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5184 exc_info=True,
5185 )
5186 finally:
5187 if exc:
5188 db_nslcmop_update[
5189 "detailed-status"
5190 ] = (
5191 detailed_status
5192 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5193 nslcmop_operation_state = "FAILED"
5194 if db_nsr:
5195 self._write_ns_status(
5196 nsr_id=nsr_id,
5197 ns_state=db_nsr[
5198 "nsState"
5199 ], # TODO check if degraded. For the moment use previous status
5200 current_operation="IDLE",
5201 current_operation_id=None,
5202 # error_description=error_description_nsr,
5203 # error_detail=error_detail,
5204 other_update=db_nsr_update,
5205 )
5206
5207 self._write_op_status(
5208 op_id=nslcmop_id,
5209 stage="",
5210 error_message=error_description_nslcmop,
5211 operation_state=nslcmop_operation_state,
5212 other_update=db_nslcmop_update,
5213 )
5214
5215 if nslcmop_operation_state:
5216 try:
5217 await self.msg.aiowrite(
5218 "ns",
5219 "actioned",
5220 {
5221 "nsr_id": nsr_id,
5222 "nslcmop_id": nslcmop_id,
5223 "operationState": nslcmop_operation_state,
5224 },
5225 loop=self.loop,
5226 )
5227 except Exception as e:
5228 self.logger.error(
5229 logging_text + "kafka_write notification Exception {}".format(e)
5230 )
5231 self.logger.debug(logging_text + "Exit")
5232 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5233 return nslcmop_operation_state, detailed_status
5234
5235 async def scale(self, nsr_id, nslcmop_id):
5236 # Try to lock HA task here
5237 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5238 if not task_is_locked_by_me:
5239 return
5240
5241 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
5242 stage = ["", "", ""]
5243 tasks_dict_info = {}
5244 # ^ stage, step, VIM progress
5245 self.logger.debug(logging_text + "Enter")
5246 # get all needed from database
5247 db_nsr = None
5248 db_nslcmop_update = {}
5249 db_nsr_update = {}
5250 exc = None
5251 # in case of error, indicates what part of scale was failed to put nsr at error status
5252 scale_process = None
5253 old_operational_status = ""
5254 old_config_status = ""
5255 nsi_id = None
5256 try:
5257 # wait for any previous tasks in process
5258 step = "Waiting for previous operations to terminate"
5259 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5260 self._write_ns_status(
5261 nsr_id=nsr_id,
5262 ns_state=None,
5263 current_operation="SCALING",
5264 current_operation_id=nslcmop_id,
5265 )
5266
5267 step = "Getting nslcmop from database"
5268 self.logger.debug(
5269 step + " after having waited for previous tasks to be completed"
5270 )
5271 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5272
5273 step = "Getting nsr from database"
5274 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5275 old_operational_status = db_nsr["operational-status"]
5276 old_config_status = db_nsr["config-status"]
5277
5278 step = "Parsing scaling parameters"
5279 db_nsr_update["operational-status"] = "scaling"
5280 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5281 nsr_deployed = db_nsr["_admin"].get("deployed")
5282
5283 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
5284 "scaleByStepData"
5285 ]["member-vnf-index"]
5286 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
5287 "scaleByStepData"
5288 ]["scaling-group-descriptor"]
5289 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
5290 # for backward compatibility
5291 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5292 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5293 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5294 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5295
5296 step = "Getting vnfr from database"
5297 db_vnfr = self.db.get_one(
5298 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5299 )
5300
5301 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5302
5303 step = "Getting vnfd from database"
5304 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5305
5306 base_folder = db_vnfd["_admin"]["storage"]
5307
5308 step = "Getting scaling-group-descriptor"
5309 scaling_descriptor = find_in_list(
5310 get_scaling_aspect(db_vnfd),
5311 lambda scale_desc: scale_desc["name"] == scaling_group,
5312 )
5313 if not scaling_descriptor:
5314 raise LcmException(
5315 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
5316 "at vnfd:scaling-group-descriptor".format(scaling_group)
5317 )
5318
5319 step = "Sending scale order to VIM"
5320 # TODO check if ns is in a proper status
5321 nb_scale_op = 0
5322 if not db_nsr["_admin"].get("scaling-group"):
5323 self.update_db_2(
5324 "nsrs",
5325 nsr_id,
5326 {
5327 "_admin.scaling-group": [
5328 {"name": scaling_group, "nb-scale-op": 0}
5329 ]
5330 },
5331 )
5332 admin_scale_index = 0
5333 else:
5334 for admin_scale_index, admin_scale_info in enumerate(
5335 db_nsr["_admin"]["scaling-group"]
5336 ):
5337 if admin_scale_info["name"] == scaling_group:
5338 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
5339 break
5340 else: # not found, set index one plus last element and add new entry with the name
5341 admin_scale_index += 1
5342 db_nsr_update[
5343 "_admin.scaling-group.{}.name".format(admin_scale_index)
5344 ] = scaling_group
5345
5346 vca_scaling_info = []
5347 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
5348 if scaling_type == "SCALE_OUT":
5349 if "aspect-delta-details" not in scaling_descriptor:
5350 raise LcmException(
5351 "Aspect delta details not fount in scaling descriptor {}".format(
5352 scaling_descriptor["name"]
5353 )
5354 )
5355 # count if max-instance-count is reached
5356 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5357
5358 scaling_info["scaling_direction"] = "OUT"
5359 scaling_info["vdu-create"] = {}
5360 scaling_info["kdu-create"] = {}
5361 for delta in deltas:
5362 for vdu_delta in delta.get("vdu-delta", {}):
5363 vdud = get_vdu(db_vnfd, vdu_delta["id"])
5364 # vdu_index also provides the number of instance of the targeted vdu
5365 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5366 cloud_init_text = self._get_vdu_cloud_init_content(
5367 vdud, db_vnfd
5368 )
5369 if cloud_init_text:
5370 additional_params = (
5371 self._get_vdu_additional_params(db_vnfr, vdud["id"])
5372 or {}
5373 )
5374 cloud_init_list = []
5375
5376 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5377 max_instance_count = 10
5378 if vdu_profile and "max-number-of-instances" in vdu_profile:
5379 max_instance_count = vdu_profile.get(
5380 "max-number-of-instances", 10
5381 )
5382
5383 default_instance_num = get_number_of_instances(
5384 db_vnfd, vdud["id"]
5385 )
5386 instances_number = vdu_delta.get("number-of-instances", 1)
5387 nb_scale_op += instances_number
5388
5389 new_instance_count = nb_scale_op + default_instance_num
5390 # Control if new count is over max and vdu count is less than max.
5391 # Then assign new instance count
5392 if new_instance_count > max_instance_count > vdu_count:
5393 instances_number = new_instance_count - max_instance_count
5394 else:
5395 instances_number = instances_number
5396
5397 if new_instance_count > max_instance_count:
5398 raise LcmException(
5399 "reached the limit of {} (max-instance-count) "
5400 "scaling-out operations for the "
5401 "scaling-group-descriptor '{}'".format(
5402 nb_scale_op, scaling_group
5403 )
5404 )
5405 for x in range(vdu_delta.get("number-of-instances", 1)):
5406 if cloud_init_text:
5407 # TODO Information of its own ip is not available because db_vnfr is not updated.
5408 additional_params["OSM"] = get_osm_params(
5409 db_vnfr, vdu_delta["id"], vdu_index + x
5410 )
5411 cloud_init_list.append(
5412 self._parse_cloud_init(
5413 cloud_init_text,
5414 additional_params,
5415 db_vnfd["id"],
5416 vdud["id"],
5417 )
5418 )
5419 vca_scaling_info.append(
5420 {
5421 "osm_vdu_id": vdu_delta["id"],
5422 "member-vnf-index": vnf_index,
5423 "type": "create",
5424 "vdu_index": vdu_index + x,
5425 }
5426 )
5427 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
5428 for kdu_delta in delta.get("kdu-resource-delta", {}):
5429 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5430 kdu_name = kdu_profile["kdu-name"]
5431 resource_name = kdu_profile.get("resource-name", "")
5432
5433 # Might have different kdus in the same delta
5434 # Should have list for each kdu
5435 if not scaling_info["kdu-create"].get(kdu_name, None):
5436 scaling_info["kdu-create"][kdu_name] = []
5437
5438 kdur = get_kdur(db_vnfr, kdu_name)
5439 if kdur.get("helm-chart"):
5440 k8s_cluster_type = "helm-chart-v3"
5441 self.logger.debug("kdur: {}".format(kdur))
5442 if (
5443 kdur.get("helm-version")
5444 and kdur.get("helm-version") == "v2"
5445 ):
5446 k8s_cluster_type = "helm-chart"
5447 elif kdur.get("juju-bundle"):
5448 k8s_cluster_type = "juju-bundle"
5449 else:
5450 raise LcmException(
5451 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5452 "juju-bundle. Maybe an old NBI version is running".format(
5453 db_vnfr["member-vnf-index-ref"], kdu_name
5454 )
5455 )
5456
5457 max_instance_count = 10
5458 if kdu_profile and "max-number-of-instances" in kdu_profile:
5459 max_instance_count = kdu_profile.get(
5460 "max-number-of-instances", 10
5461 )
5462
5463 nb_scale_op += kdu_delta.get("number-of-instances", 1)
5464 deployed_kdu, _ = get_deployed_kdu(
5465 nsr_deployed, kdu_name, vnf_index
5466 )
5467 if deployed_kdu is None:
5468 raise LcmException(
5469 "KDU '{}' for vnf '{}' not deployed".format(
5470 kdu_name, vnf_index
5471 )
5472 )
5473 kdu_instance = deployed_kdu.get("kdu-instance")
5474 instance_num = await self.k8scluster_map[
5475 k8s_cluster_type
5476 ].get_scale_count(
5477 resource_name,
5478 kdu_instance,
5479 vca_id=vca_id,
5480 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
5481 kdu_model=deployed_kdu.get("kdu-model"),
5482 )
5483 kdu_replica_count = instance_num + kdu_delta.get(
5484 "number-of-instances", 1
5485 )
5486
5487 # Control if new count is over max and instance_num is less than max.
5488 # Then assign max instance number to kdu replica count
5489 if kdu_replica_count > max_instance_count > instance_num:
5490 kdu_replica_count = max_instance_count
5491 if kdu_replica_count > max_instance_count:
5492 raise LcmException(
5493 "reached the limit of {} (max-instance-count) "
5494 "scaling-out operations for the "
5495 "scaling-group-descriptor '{}'".format(
5496 instance_num, scaling_group
5497 )
5498 )
5499
5500 for x in range(kdu_delta.get("number-of-instances", 1)):
5501 vca_scaling_info.append(
5502 {
5503 "osm_kdu_id": kdu_name,
5504 "member-vnf-index": vnf_index,
5505 "type": "create",
5506 "kdu_index": instance_num + x - 1,
5507 }
5508 )
5509 scaling_info["kdu-create"][kdu_name].append(
5510 {
5511 "member-vnf-index": vnf_index,
5512 "type": "create",
5513 "k8s-cluster-type": k8s_cluster_type,
5514 "resource-name": resource_name,
5515 "scale": kdu_replica_count,
5516 }
5517 )
5518 elif scaling_type == "SCALE_IN":
5519 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
5520
5521 scaling_info["scaling_direction"] = "IN"
5522 scaling_info["vdu-delete"] = {}
5523 scaling_info["kdu-delete"] = {}
5524
5525 for delta in deltas:
5526 for vdu_delta in delta.get("vdu-delta", {}):
5527 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
5528 min_instance_count = 0
5529 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
5530 if vdu_profile and "min-number-of-instances" in vdu_profile:
5531 min_instance_count = vdu_profile["min-number-of-instances"]
5532
5533 default_instance_num = get_number_of_instances(
5534 db_vnfd, vdu_delta["id"]
5535 )
5536 instance_num = vdu_delta.get("number-of-instances", 1)
5537 nb_scale_op -= instance_num
5538
5539 new_instance_count = nb_scale_op + default_instance_num
5540
5541 if new_instance_count < min_instance_count < vdu_count:
5542 instances_number = min_instance_count - new_instance_count
5543 else:
5544 instances_number = instance_num
5545
5546 if new_instance_count < min_instance_count:
5547 raise LcmException(
5548 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5549 "scaling-group-descriptor '{}'".format(
5550 nb_scale_op, scaling_group
5551 )
5552 )
5553 for x in range(vdu_delta.get("number-of-instances", 1)):
5554 vca_scaling_info.append(
5555 {
5556 "osm_vdu_id": vdu_delta["id"],
5557 "member-vnf-index": vnf_index,
5558 "type": "delete",
5559 "vdu_index": vdu_index - 1 - x,
5560 }
5561 )
5562 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
5563 for kdu_delta in delta.get("kdu-resource-delta", {}):
5564 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
5565 kdu_name = kdu_profile["kdu-name"]
5566 resource_name = kdu_profile.get("resource-name", "")
5567
5568 if not scaling_info["kdu-delete"].get(kdu_name, None):
5569 scaling_info["kdu-delete"][kdu_name] = []
5570
5571 kdur = get_kdur(db_vnfr, kdu_name)
5572 if kdur.get("helm-chart"):
5573 k8s_cluster_type = "helm-chart-v3"
5574 self.logger.debug("kdur: {}".format(kdur))
5575 if (
5576 kdur.get("helm-version")
5577 and kdur.get("helm-version") == "v2"
5578 ):
5579 k8s_cluster_type = "helm-chart"
5580 elif kdur.get("juju-bundle"):
5581 k8s_cluster_type = "juju-bundle"
5582 else:
5583 raise LcmException(
5584 "kdu type for kdu='{}.{}' is neither helm-chart nor "
5585 "juju-bundle. Maybe an old NBI version is running".format(
5586 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
5587 )
5588 )
5589
5590 min_instance_count = 0
5591 if kdu_profile and "min-number-of-instances" in kdu_profile:
5592 min_instance_count = kdu_profile["min-number-of-instances"]
5593
5594 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
5595 deployed_kdu, _ = get_deployed_kdu(
5596 nsr_deployed, kdu_name, vnf_index
5597 )
5598 if deployed_kdu is None:
5599 raise LcmException(
5600 "KDU '{}' for vnf '{}' not deployed".format(
5601 kdu_name, vnf_index
5602 )
5603 )
5604 kdu_instance = deployed_kdu.get("kdu-instance")
5605 instance_num = await self.k8scluster_map[
5606 k8s_cluster_type
5607 ].get_scale_count(
5608 resource_name,
5609 kdu_instance,
5610 vca_id=vca_id,
5611 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
5612 kdu_model=deployed_kdu.get("kdu-model"),
5613 )
5614 kdu_replica_count = instance_num - kdu_delta.get(
5615 "number-of-instances", 1
5616 )
5617
5618 if kdu_replica_count < min_instance_count < instance_num:
5619 kdu_replica_count = min_instance_count
5620 if kdu_replica_count < min_instance_count:
5621 raise LcmException(
5622 "reached the limit of {} (min-instance-count) scaling-in operations for the "
5623 "scaling-group-descriptor '{}'".format(
5624 instance_num, scaling_group
5625 )
5626 )
5627
5628 for x in range(kdu_delta.get("number-of-instances", 1)):
5629 vca_scaling_info.append(
5630 {
5631 "osm_kdu_id": kdu_name,
5632 "member-vnf-index": vnf_index,
5633 "type": "delete",
5634 "kdu_index": instance_num - x - 1,
5635 }
5636 )
5637 scaling_info["kdu-delete"][kdu_name].append(
5638 {
5639 "member-vnf-index": vnf_index,
5640 "type": "delete",
5641 "k8s-cluster-type": k8s_cluster_type,
5642 "resource-name": resource_name,
5643 "scale": kdu_replica_count,
5644 }
5645 )
5646
5647 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
5648 vdu_delete = copy(scaling_info.get("vdu-delete"))
5649 if scaling_info["scaling_direction"] == "IN":
5650 for vdur in reversed(db_vnfr["vdur"]):
5651 if vdu_delete.get(vdur["vdu-id-ref"]):
5652 vdu_delete[vdur["vdu-id-ref"]] -= 1
5653 scaling_info["vdu"].append(
5654 {
5655 "name": vdur.get("name") or vdur.get("vdu-name"),
5656 "vdu_id": vdur["vdu-id-ref"],
5657 "interface": [],
5658 }
5659 )
5660 for interface in vdur["interfaces"]:
5661 scaling_info["vdu"][-1]["interface"].append(
5662 {
5663 "name": interface["name"],
5664 "ip_address": interface["ip-address"],
5665 "mac_address": interface.get("mac-address"),
5666 }
5667 )
5668 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
5669
5670 # PRE-SCALE BEGIN
5671 step = "Executing pre-scale vnf-config-primitive"
5672 if scaling_descriptor.get("scaling-config-action"):
5673 for scaling_config_action in scaling_descriptor[
5674 "scaling-config-action"
5675 ]:
5676 if (
5677 scaling_config_action.get("trigger") == "pre-scale-in"
5678 and scaling_type == "SCALE_IN"
5679 ) or (
5680 scaling_config_action.get("trigger") == "pre-scale-out"
5681 and scaling_type == "SCALE_OUT"
5682 ):
5683 vnf_config_primitive = scaling_config_action[
5684 "vnf-config-primitive-name-ref"
5685 ]
5686 step = db_nslcmop_update[
5687 "detailed-status"
5688 ] = "executing pre-scale scaling-config-action '{}'".format(
5689 vnf_config_primitive
5690 )
5691
5692 # look for primitive
5693 for config_primitive in (
5694 get_configuration(db_vnfd, db_vnfd["id"]) or {}
5695 ).get("config-primitive", ()):
5696 if config_primitive["name"] == vnf_config_primitive:
5697 break
5698 else:
5699 raise LcmException(
5700 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
5701 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
5702 "primitive".format(scaling_group, vnf_config_primitive)
5703 )
5704
5705 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
5706 if db_vnfr.get("additionalParamsForVnf"):
5707 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
5708
5709 scale_process = "VCA"
5710 db_nsr_update["config-status"] = "configuring pre-scaling"
5711 primitive_params = self._map_primitive_params(
5712 config_primitive, {}, vnfr_params
5713 )
5714
5715 # Pre-scale retry check: Check if this sub-operation has been executed before
5716 op_index = self._check_or_add_scale_suboperation(
5717 db_nslcmop,
5718 vnf_index,
5719 vnf_config_primitive,
5720 primitive_params,
5721 "PRE-SCALE",
5722 )
5723 if op_index == self.SUBOPERATION_STATUS_SKIP:
5724 # Skip sub-operation
5725 result = "COMPLETED"
5726 result_detail = "Done"
5727 self.logger.debug(
5728 logging_text
5729 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
5730 vnf_config_primitive, result, result_detail
5731 )
5732 )
5733 else:
5734 if op_index == self.SUBOPERATION_STATUS_NEW:
5735 # New sub-operation: Get index of this sub-operation
5736 op_index = (
5737 len(db_nslcmop.get("_admin", {}).get("operations"))
5738 - 1
5739 )
5740 self.logger.debug(
5741 logging_text
5742 + "vnf_config_primitive={} New sub-operation".format(
5743 vnf_config_primitive
5744 )
5745 )
5746 else:
5747 # retry: Get registered params for this existing sub-operation
5748 op = db_nslcmop.get("_admin", {}).get("operations", [])[
5749 op_index
5750 ]
5751 vnf_index = op.get("member_vnf_index")
5752 vnf_config_primitive = op.get("primitive")
5753 primitive_params = op.get("primitive_params")
5754 self.logger.debug(
5755 logging_text
5756 + "vnf_config_primitive={} Sub-operation retry".format(
5757 vnf_config_primitive
5758 )
5759 )
5760 # Execute the primitive, either with new (first-time) or registered (reintent) args
5761 ee_descriptor_id = config_primitive.get(
5762 "execution-environment-ref"
5763 )
5764 primitive_name = config_primitive.get(
5765 "execution-environment-primitive", vnf_config_primitive
5766 )
5767 ee_id, vca_type = self._look_for_deployed_vca(
5768 nsr_deployed["VCA"],
5769 member_vnf_index=vnf_index,
5770 vdu_id=None,
5771 vdu_count_index=None,
5772 ee_descriptor_id=ee_descriptor_id,
5773 )
5774 result, result_detail = await self._ns_execute_primitive(
5775 ee_id,
5776 primitive_name,
5777 primitive_params,
5778 vca_type=vca_type,
5779 vca_id=vca_id,
5780 )
5781 self.logger.debug(
5782 logging_text
5783 + "vnf_config_primitive={} Done with result {} {}".format(
5784 vnf_config_primitive, result, result_detail
5785 )
5786 )
5787 # Update operationState = COMPLETED | FAILED
5788 self._update_suboperation_status(
5789 db_nslcmop, op_index, result, result_detail
5790 )
5791
5792 if result == "FAILED":
5793 raise LcmException(result_detail)
5794 db_nsr_update["config-status"] = old_config_status
5795 scale_process = None
5796 # PRE-SCALE END
5797
5798 db_nsr_update[
5799 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
5800 ] = nb_scale_op
5801 db_nsr_update[
5802 "_admin.scaling-group.{}.time".format(admin_scale_index)
5803 ] = time()
5804
5805 # SCALE-IN VCA - BEGIN
5806 if vca_scaling_info:
5807 step = db_nslcmop_update[
5808 "detailed-status"
5809 ] = "Deleting the execution environments"
5810 scale_process = "VCA"
5811 for vca_info in vca_scaling_info:
5812 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
5813 member_vnf_index = str(vca_info["member-vnf-index"])
5814 self.logger.debug(
5815 logging_text + "vdu info: {}".format(vca_info)
5816 )
5817 if vca_info.get("osm_vdu_id"):
5818 vdu_id = vca_info["osm_vdu_id"]
5819 vdu_index = int(vca_info["vdu_index"])
5820 stage[
5821 1
5822 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
5823 member_vnf_index, vdu_id, vdu_index
5824 )
5825 stage[2] = step = "Scaling in VCA"
5826 self._write_op_status(op_id=nslcmop_id, stage=stage)
5827 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
5828 config_update = db_nsr["configurationStatus"]
5829 for vca_index, vca in enumerate(vca_update):
5830 if (
5831 (vca or vca.get("ee_id"))
5832 and vca["member-vnf-index"] == member_vnf_index
5833 and vca["vdu_count_index"] == vdu_index
5834 ):
5835 if vca.get("vdu_id"):
5836 config_descriptor = get_configuration(
5837 db_vnfd, vca.get("vdu_id")
5838 )
5839 elif vca.get("kdu_name"):
5840 config_descriptor = get_configuration(
5841 db_vnfd, vca.get("kdu_name")
5842 )
5843 else:
5844 config_descriptor = get_configuration(
5845 db_vnfd, db_vnfd["id"]
5846 )
5847 operation_params = (
5848 db_nslcmop.get("operationParams") or {}
5849 )
5850 exec_terminate_primitives = not operation_params.get(
5851 "skip_terminate_primitives"
5852 ) and vca.get("needed_terminate")
5853 task = asyncio.ensure_future(
5854 asyncio.wait_for(
5855 self.destroy_N2VC(
5856 logging_text,
5857 db_nslcmop,
5858 vca,
5859 config_descriptor,
5860 vca_index,
5861 destroy_ee=True,
5862 exec_primitives=exec_terminate_primitives,
5863 scaling_in=True,
5864 vca_id=vca_id,
5865 ),
5866 timeout=self.timeout_charm_delete,
5867 )
5868 )
5869 tasks_dict_info[task] = "Terminating VCA {}".format(
5870 vca.get("ee_id")
5871 )
5872 del vca_update[vca_index]
5873 del config_update[vca_index]
5874 # wait for pending tasks of terminate primitives
5875 if tasks_dict_info:
5876 self.logger.debug(
5877 logging_text
5878 + "Waiting for tasks {}".format(
5879 list(tasks_dict_info.keys())
5880 )
5881 )
5882 error_list = await self._wait_for_tasks(
5883 logging_text,
5884 tasks_dict_info,
5885 min(
5886 self.timeout_charm_delete, self.timeout_ns_terminate
5887 ),
5888 stage,
5889 nslcmop_id,
5890 )
5891 tasks_dict_info.clear()
5892 if error_list:
5893 raise LcmException("; ".join(error_list))
5894
5895 db_vca_and_config_update = {
5896 "_admin.deployed.VCA": vca_update,
5897 "configurationStatus": config_update,
5898 }
5899 self.update_db_2(
5900 "nsrs", db_nsr["_id"], db_vca_and_config_update
5901 )
5902 scale_process = None
5903 # SCALE-IN VCA - END
5904
5905 # SCALE RO - BEGIN
5906 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
5907 scale_process = "RO"
5908 if self.ro_config.get("ng"):
5909 await self._scale_ng_ro(
5910 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
5911 )
5912 scaling_info.pop("vdu-create", None)
5913 scaling_info.pop("vdu-delete", None)
5914
5915 scale_process = None
5916 # SCALE RO - END
5917
5918 # SCALE KDU - BEGIN
5919 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
5920 scale_process = "KDU"
5921 await self._scale_kdu(
5922 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
5923 )
5924 scaling_info.pop("kdu-create", None)
5925 scaling_info.pop("kdu-delete", None)
5926
5927 scale_process = None
5928 # SCALE KDU - END
5929
5930 if db_nsr_update:
5931 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5932
5933 # SCALE-UP VCA - BEGIN
5934 if vca_scaling_info:
5935 step = db_nslcmop_update[
5936 "detailed-status"
5937 ] = "Creating new execution environments"
5938 scale_process = "VCA"
5939 for vca_info in vca_scaling_info:
5940 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
5941 member_vnf_index = str(vca_info["member-vnf-index"])
5942 self.logger.debug(
5943 logging_text + "vdu info: {}".format(vca_info)
5944 )
5945 vnfd_id = db_vnfr["vnfd-ref"]
5946 if vca_info.get("osm_vdu_id"):
5947 vdu_index = int(vca_info["vdu_index"])
5948 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5949 if db_vnfr.get("additionalParamsForVnf"):
5950 deploy_params.update(
5951 parse_yaml_strings(
5952 db_vnfr["additionalParamsForVnf"].copy()
5953 )
5954 )
5955 descriptor_config = get_configuration(
5956 db_vnfd, db_vnfd["id"]
5957 )
5958 if descriptor_config:
5959 vdu_id = None
5960 vdu_name = None
5961 kdu_name = None
5962 self._deploy_n2vc(
5963 logging_text=logging_text
5964 + "member_vnf_index={} ".format(member_vnf_index),
5965 db_nsr=db_nsr,
5966 db_vnfr=db_vnfr,
5967 nslcmop_id=nslcmop_id,
5968 nsr_id=nsr_id,
5969 nsi_id=nsi_id,
5970 vnfd_id=vnfd_id,
5971 vdu_id=vdu_id,
5972 kdu_name=kdu_name,
5973 member_vnf_index=member_vnf_index,
5974 vdu_index=vdu_index,
5975 vdu_name=vdu_name,
5976 deploy_params=deploy_params,
5977 descriptor_config=descriptor_config,
5978 base_folder=base_folder,
5979 task_instantiation_info=tasks_dict_info,
5980 stage=stage,
5981 )
5982 vdu_id = vca_info["osm_vdu_id"]
5983 vdur = find_in_list(
5984 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
5985 )
5986 descriptor_config = get_configuration(db_vnfd, vdu_id)
5987 if vdur.get("additionalParams"):
5988 deploy_params_vdu = parse_yaml_strings(
5989 vdur["additionalParams"]
5990 )
5991 else:
5992 deploy_params_vdu = deploy_params
5993 deploy_params_vdu["OSM"] = get_osm_params(
5994 db_vnfr, vdu_id, vdu_count_index=vdu_index
5995 )
5996 if descriptor_config:
5997 vdu_name = None
5998 kdu_name = None
5999 stage[
6000 1
6001 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6002 member_vnf_index, vdu_id, vdu_index
6003 )
6004 stage[2] = step = "Scaling out VCA"
6005 self._write_op_status(op_id=nslcmop_id, stage=stage)
6006 self._deploy_n2vc(
6007 logging_text=logging_text
6008 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6009 member_vnf_index, vdu_id, vdu_index
6010 ),
6011 db_nsr=db_nsr,
6012 db_vnfr=db_vnfr,
6013 nslcmop_id=nslcmop_id,
6014 nsr_id=nsr_id,
6015 nsi_id=nsi_id,
6016 vnfd_id=vnfd_id,
6017 vdu_id=vdu_id,
6018 kdu_name=kdu_name,
6019 member_vnf_index=member_vnf_index,
6020 vdu_index=vdu_index,
6021 vdu_name=vdu_name,
6022 deploy_params=deploy_params_vdu,
6023 descriptor_config=descriptor_config,
6024 base_folder=base_folder,
6025 task_instantiation_info=tasks_dict_info,
6026 stage=stage,
6027 )
6028 # SCALE-UP VCA - END
6029 scale_process = None
6030
6031 # POST-SCALE BEGIN
6032 # execute primitive service POST-SCALING
6033 step = "Executing post-scale vnf-config-primitive"
6034 if scaling_descriptor.get("scaling-config-action"):
6035 for scaling_config_action in scaling_descriptor[
6036 "scaling-config-action"
6037 ]:
6038 if (
6039 scaling_config_action.get("trigger") == "post-scale-in"
6040 and scaling_type == "SCALE_IN"
6041 ) or (
6042 scaling_config_action.get("trigger") == "post-scale-out"
6043 and scaling_type == "SCALE_OUT"
6044 ):
6045 vnf_config_primitive = scaling_config_action[
6046 "vnf-config-primitive-name-ref"
6047 ]
6048 step = db_nslcmop_update[
6049 "detailed-status"
6050 ] = "executing post-scale scaling-config-action '{}'".format(
6051 vnf_config_primitive
6052 )
6053
6054 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6055 if db_vnfr.get("additionalParamsForVnf"):
6056 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6057
6058 # look for primitive
6059 for config_primitive in (
6060 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6061 ).get("config-primitive", ()):
6062 if config_primitive["name"] == vnf_config_primitive:
6063 break
6064 else:
6065 raise LcmException(
6066 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6067 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6068 "config-primitive".format(
6069 scaling_group, vnf_config_primitive
6070 )
6071 )
6072 scale_process = "VCA"
6073 db_nsr_update["config-status"] = "configuring post-scaling"
6074 primitive_params = self._map_primitive_params(
6075 config_primitive, {}, vnfr_params
6076 )
6077
6078 # Post-scale retry check: Check if this sub-operation has been executed before
6079 op_index = self._check_or_add_scale_suboperation(
6080 db_nslcmop,
6081 vnf_index,
6082 vnf_config_primitive,
6083 primitive_params,
6084 "POST-SCALE",
6085 )
6086 if op_index == self.SUBOPERATION_STATUS_SKIP:
6087 # Skip sub-operation
6088 result = "COMPLETED"
6089 result_detail = "Done"
6090 self.logger.debug(
6091 logging_text
6092 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6093 vnf_config_primitive, result, result_detail
6094 )
6095 )
6096 else:
6097 if op_index == self.SUBOPERATION_STATUS_NEW:
6098 # New sub-operation: Get index of this sub-operation
6099 op_index = (
6100 len(db_nslcmop.get("_admin", {}).get("operations"))
6101 - 1
6102 )
6103 self.logger.debug(
6104 logging_text
6105 + "vnf_config_primitive={} New sub-operation".format(
6106 vnf_config_primitive
6107 )
6108 )
6109 else:
6110 # retry: Get registered params for this existing sub-operation
6111 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6112 op_index
6113 ]
6114 vnf_index = op.get("member_vnf_index")
6115 vnf_config_primitive = op.get("primitive")
6116 primitive_params = op.get("primitive_params")
6117 self.logger.debug(
6118 logging_text
6119 + "vnf_config_primitive={} Sub-operation retry".format(
6120 vnf_config_primitive
6121 )
6122 )
6123 # Execute the primitive, either with new (first-time) or registered (reintent) args
6124 ee_descriptor_id = config_primitive.get(
6125 "execution-environment-ref"
6126 )
6127 primitive_name = config_primitive.get(
6128 "execution-environment-primitive", vnf_config_primitive
6129 )
6130 ee_id, vca_type = self._look_for_deployed_vca(
6131 nsr_deployed["VCA"],
6132 member_vnf_index=vnf_index,
6133 vdu_id=None,
6134 vdu_count_index=None,
6135 ee_descriptor_id=ee_descriptor_id,
6136 )
6137 result, result_detail = await self._ns_execute_primitive(
6138 ee_id,
6139 primitive_name,
6140 primitive_params,
6141 vca_type=vca_type,
6142 vca_id=vca_id,
6143 )
6144 self.logger.debug(
6145 logging_text
6146 + "vnf_config_primitive={} Done with result {} {}".format(
6147 vnf_config_primitive, result, result_detail
6148 )
6149 )
6150 # Update operationState = COMPLETED | FAILED
6151 self._update_suboperation_status(
6152 db_nslcmop, op_index, result, result_detail
6153 )
6154
6155 if result == "FAILED":
6156 raise LcmException(result_detail)
6157 db_nsr_update["config-status"] = old_config_status
6158 scale_process = None
6159 # POST-SCALE END
6160
6161 db_nsr_update[
6162 "detailed-status"
6163 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6164 db_nsr_update["operational-status"] = (
6165 "running"
6166 if old_operational_status == "failed"
6167 else old_operational_status
6168 )
6169 db_nsr_update["config-status"] = old_config_status
6170 return
6171 except (
6172 ROclient.ROClientException,
6173 DbException,
6174 LcmException,
6175 NgRoException,
6176 ) as e:
6177 self.logger.error(logging_text + "Exit Exception {}".format(e))
6178 exc = e
6179 except asyncio.CancelledError:
6180 self.logger.error(
6181 logging_text + "Cancelled Exception while '{}'".format(step)
6182 )
6183 exc = "Operation was cancelled"
6184 except Exception as e:
6185 exc = traceback.format_exc()
6186 self.logger.critical(
6187 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6188 exc_info=True,
6189 )
6190 finally:
6191 self._write_ns_status(
6192 nsr_id=nsr_id,
6193 ns_state=None,
6194 current_operation="IDLE",
6195 current_operation_id=None,
6196 )
6197 if tasks_dict_info:
6198 stage[1] = "Waiting for instantiate pending tasks."
6199 self.logger.debug(logging_text + stage[1])
6200 exc = await self._wait_for_tasks(
6201 logging_text,
6202 tasks_dict_info,
6203 self.timeout_ns_deploy,
6204 stage,
6205 nslcmop_id,
6206 nsr_id=nsr_id,
6207 )
6208 if exc:
6209 db_nslcmop_update[
6210 "detailed-status"
6211 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6212 nslcmop_operation_state = "FAILED"
6213 if db_nsr:
6214 db_nsr_update["operational-status"] = old_operational_status
6215 db_nsr_update["config-status"] = old_config_status
6216 db_nsr_update["detailed-status"] = ""
6217 if scale_process:
6218 if "VCA" in scale_process:
6219 db_nsr_update["config-status"] = "failed"
6220 if "RO" in scale_process:
6221 db_nsr_update["operational-status"] = "failed"
6222 db_nsr_update[
6223 "detailed-status"
6224 ] = "FAILED scaling nslcmop={} {}: {}".format(
6225 nslcmop_id, step, exc
6226 )
6227 else:
6228 error_description_nslcmop = None
6229 nslcmop_operation_state = "COMPLETED"
6230 db_nslcmop_update["detailed-status"] = "Done"
6231
6232 self._write_op_status(
6233 op_id=nslcmop_id,
6234 stage="",
6235 error_message=error_description_nslcmop,
6236 operation_state=nslcmop_operation_state,
6237 other_update=db_nslcmop_update,
6238 )
6239 if db_nsr:
6240 self._write_ns_status(
6241 nsr_id=nsr_id,
6242 ns_state=None,
6243 current_operation="IDLE",
6244 current_operation_id=None,
6245 other_update=db_nsr_update,
6246 )
6247
6248 if nslcmop_operation_state:
6249 try:
6250 msg = {
6251 "nsr_id": nsr_id,
6252 "nslcmop_id": nslcmop_id,
6253 "operationState": nslcmop_operation_state,
6254 }
6255 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
6256 except Exception as e:
6257 self.logger.error(
6258 logging_text + "kafka_write notification Exception {}".format(e)
6259 )
6260 self.logger.debug(logging_text + "Exit")
6261 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
6262
6263 async def _scale_kdu(
6264 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6265 ):
6266 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
6267 for kdu_name in _scaling_info:
6268 for kdu_scaling_info in _scaling_info[kdu_name]:
6269 deployed_kdu, index = get_deployed_kdu(
6270 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
6271 )
6272 cluster_uuid = deployed_kdu["k8scluster-uuid"]
6273 kdu_instance = deployed_kdu["kdu-instance"]
6274 kdu_model = deployed_kdu.get("kdu-model")
6275 scale = int(kdu_scaling_info["scale"])
6276 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
6277
6278 db_dict = {
6279 "collection": "nsrs",
6280 "filter": {"_id": nsr_id},
6281 "path": "_admin.deployed.K8s.{}".format(index),
6282 }
6283
6284 step = "scaling application {}".format(
6285 kdu_scaling_info["resource-name"]
6286 )
6287 self.logger.debug(logging_text + step)
6288
6289 if kdu_scaling_info["type"] == "delete":
6290 kdu_config = get_configuration(db_vnfd, kdu_name)
6291 if (
6292 kdu_config
6293 and kdu_config.get("terminate-config-primitive")
6294 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6295 ):
6296 terminate_config_primitive_list = kdu_config.get(
6297 "terminate-config-primitive"
6298 )
6299 terminate_config_primitive_list.sort(
6300 key=lambda val: int(val["seq"])
6301 )
6302
6303 for (
6304 terminate_config_primitive
6305 ) in terminate_config_primitive_list:
6306 primitive_params_ = self._map_primitive_params(
6307 terminate_config_primitive, {}, {}
6308 )
6309 step = "execute terminate config primitive"
6310 self.logger.debug(logging_text + step)
6311 await asyncio.wait_for(
6312 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6313 cluster_uuid=cluster_uuid,
6314 kdu_instance=kdu_instance,
6315 primitive_name=terminate_config_primitive["name"],
6316 params=primitive_params_,
6317 db_dict=db_dict,
6318 vca_id=vca_id,
6319 ),
6320 timeout=600,
6321 )
6322
6323 await asyncio.wait_for(
6324 self.k8scluster_map[k8s_cluster_type].scale(
6325 kdu_instance,
6326 scale,
6327 kdu_scaling_info["resource-name"],
6328 vca_id=vca_id,
6329 cluster_uuid=cluster_uuid,
6330 kdu_model=kdu_model,
6331 atomic=True,
6332 db_dict=db_dict,
6333 ),
6334 timeout=self.timeout_vca_on_error,
6335 )
6336
6337 if kdu_scaling_info["type"] == "create":
6338 kdu_config = get_configuration(db_vnfd, kdu_name)
6339 if (
6340 kdu_config
6341 and kdu_config.get("initial-config-primitive")
6342 and get_juju_ee_ref(db_vnfd, kdu_name) is None
6343 ):
6344 initial_config_primitive_list = kdu_config.get(
6345 "initial-config-primitive"
6346 )
6347 initial_config_primitive_list.sort(
6348 key=lambda val: int(val["seq"])
6349 )
6350
6351 for initial_config_primitive in initial_config_primitive_list:
6352 primitive_params_ = self._map_primitive_params(
6353 initial_config_primitive, {}, {}
6354 )
6355 step = "execute initial config primitive"
6356 self.logger.debug(logging_text + step)
6357 await asyncio.wait_for(
6358 self.k8scluster_map[k8s_cluster_type].exec_primitive(
6359 cluster_uuid=cluster_uuid,
6360 kdu_instance=kdu_instance,
6361 primitive_name=initial_config_primitive["name"],
6362 params=primitive_params_,
6363 db_dict=db_dict,
6364 vca_id=vca_id,
6365 ),
6366 timeout=600,
6367 )
6368
6369 async def _scale_ng_ro(
6370 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
6371 ):
6372 nsr_id = db_nslcmop["nsInstanceId"]
6373 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
6374 db_vnfrs = {}
6375
6376 # read from db: vnfd's for every vnf
6377 db_vnfds = []
6378
6379 # for each vnf in ns, read vnfd
6380 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
6381 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
6382 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
6383 # if we haven't this vnfd, read it from db
6384 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
6385 # read from db
6386 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
6387 db_vnfds.append(vnfd)
6388 n2vc_key = self.n2vc.get_public_key()
6389 n2vc_key_list = [n2vc_key]
6390 self.scale_vnfr(
6391 db_vnfr,
6392 vdu_scaling_info.get("vdu-create"),
6393 vdu_scaling_info.get("vdu-delete"),
6394 mark_delete=True,
6395 )
6396 # db_vnfr has been updated, update db_vnfrs to use it
6397 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
6398 await self._instantiate_ng_ro(
6399 logging_text,
6400 nsr_id,
6401 db_nsd,
6402 db_nsr,
6403 db_nslcmop,
6404 db_vnfrs,
6405 db_vnfds,
6406 n2vc_key_list,
6407 stage=stage,
6408 start_deploy=time(),
6409 timeout_ns_deploy=self.timeout_ns_deploy,
6410 )
6411 if vdu_scaling_info.get("vdu-delete"):
6412 self.scale_vnfr(
6413 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
6414 )
6415
6416 async def extract_prometheus_scrape_jobs(
6417 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
6418 ):
6419 # look if exist a file called 'prometheus*.j2' and
6420 artifact_content = self.fs.dir_ls(artifact_path)
6421 job_file = next(
6422 (
6423 f
6424 for f in artifact_content
6425 if f.startswith("prometheus") and f.endswith(".j2")
6426 ),
6427 None,
6428 )
6429 if not job_file:
6430 return
6431 with self.fs.file_open((artifact_path, job_file), "r") as f:
6432 job_data = f.read()
6433
6434 # TODO get_service
6435 _, _, service = ee_id.partition(".") # remove prefix "namespace."
6436 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
6437 host_port = "80"
6438 vnfr_id = vnfr_id.replace("-", "")
6439 variables = {
6440 "JOB_NAME": vnfr_id,
6441 "TARGET_IP": target_ip,
6442 "EXPORTER_POD_IP": host_name,
6443 "EXPORTER_POD_PORT": host_port,
6444 }
6445 job_list = parse_job(job_data, variables)
6446 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
6447 for job in job_list:
6448 if (
6449 not isinstance(job.get("job_name"), str)
6450 or vnfr_id not in job["job_name"]
6451 ):
6452 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
6453 job["nsr_id"] = nsr_id
6454 job["vnfr_id"] = vnfr_id
6455 return job_list
6456
6457 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6458 """
6459 Get VCA Cloud and VCA Cloud Credentials for the VIM account
6460
6461 :param: vim_account_id: VIM Account ID
6462
6463 :return: (cloud_name, cloud_credential)
6464 """
6465 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6466 return config.get("vca_cloud"), config.get("vca_cloud_credential")
6467
6468 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
6469 """
6470 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
6471
6472 :param: vim_account_id: VIM Account ID
6473
6474 :return: (cloud_name, cloud_credential)
6475 """
6476 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
6477 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")