Bug 2000 fixed: the namespace for the Juju Bundle is now updated within the KDU insta...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
88 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
89 from osm_lcm.data_utils.database.vim_account import VimAccountDB
90 from n2vc.definitions import RelationEndpoint
91 from n2vc.k8s_helm_conn import K8sHelmConnector
92 from n2vc.k8s_helm3_conn import K8sHelm3Connector
93 from n2vc.k8s_juju_conn import K8sJujuConnector
94
95 from osm_common.dbbase import DbException
96 from osm_common.fsbase import FsException
97
98 from osm_lcm.data_utils.database.database import Database
99 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
100
101 from n2vc.n2vc_juju_conn import N2VCJujuConnector
102 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
103
104 from osm_lcm.lcm_helm_conn import LCMHelmConn
105 from osm_lcm.osm_config import OsmConfigBuilder
106 from osm_lcm.prometheus import parse_job
107
108 from copy import copy, deepcopy
109 from time import time
110 from uuid import uuid4
111
112 from random import randint
113
114 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
115
116
117 class NsLcm(LcmBase):
118 timeout_vca_on_error = (
119 5 * 60
120 ) # Time for charm from first time at blocked,error status to mark as failed
121 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
122 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
123 timeout_charm_delete = 10 * 60
124 timeout_primitive = 30 * 60 # timeout for primitive execution
125 timeout_ns_update = 30 * 60 # timeout for ns update
126 timeout_progress_primitive = (
127 10 * 60
128 ) # timeout for some progress in a primitive execution
129 timeout_migrate = 1800 # default global timeout for migrating vnfs
130
131 SUBOPERATION_STATUS_NOT_FOUND = -1
132 SUBOPERATION_STATUS_NEW = -2
133 SUBOPERATION_STATUS_SKIP = -3
134 task_name_deploy_vca = "Deploying VCA"
135
136 def __init__(self, msg, lcm_tasks, config, loop):
137 """
138 Init, Connect to database, filesystem storage, and messaging
139 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
140 :return: None
141 """
142 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
143
144 self.db = Database().instance.db
145 self.fs = Filesystem().instance.fs
146 self.loop = loop
147 self.lcm_tasks = lcm_tasks
148 self.timeout = config["timeout"]
149 self.ro_config = config["ro_config"]
150 self.ng_ro = config["ro_config"].get("ng")
151 self.vca_config = config["VCA"].copy()
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.get("kubectlpath"),
171 helm_command=self.vca_config.get("helmpath"),
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.get("kubectlpath"),
180 helm_command=self.vca_config.get("helm3path"),
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.get("kubectlpath"),
189 juju_command=self.vca_config.get("jujupath"),
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config)
215
216 @staticmethod
217 def increment_ip_mac(ip_mac, vm_index=1):
218 if not isinstance(ip_mac, str):
219 return ip_mac
220 try:
221 # try with ipv4 look for last dot
222 i = ip_mac.rfind(".")
223 if i > 0:
224 i += 1
225 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
226 # try with ipv6 or mac look for last colon. Operate in hex
227 i = ip_mac.rfind(":")
228 if i > 0:
229 i += 1
230 # format in hex, len can be 2 for mac or 4 for ipv6
231 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
232 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
233 )
234 except Exception:
235 pass
236 return None
237
238 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
239
240 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
241
242 try:
243 # TODO filter RO descriptor fields...
244
245 # write to database
246 db_dict = dict()
247 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
248 db_dict["deploymentStatus"] = ro_descriptor
249 self.update_db_2("nsrs", nsrs_id, db_dict)
250
251 except Exception as e:
252 self.logger.warn(
253 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
254 )
255
256 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
257
258 # remove last dot from path (if exists)
259 if path.endswith("."):
260 path = path[:-1]
261
262 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
263 # .format(table, filter, path, updated_data))
264 try:
265
266 nsr_id = filter.get("_id")
267
268 # read ns record from database
269 nsr = self.db.get_one(table="nsrs", q_filter=filter)
270 current_ns_status = nsr.get("nsState")
271
272 # get vca status for NS
273 status_dict = await self.n2vc.get_status(
274 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
275 )
276
277 # vcaStatus
278 db_dict = dict()
279 db_dict["vcaStatus"] = status_dict
280 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
281
282 # update configurationStatus for this VCA
283 try:
284 vca_index = int(path[path.rfind(".") + 1 :])
285
286 vca_list = deep_get(
287 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
288 )
289 vca_status = vca_list[vca_index].get("status")
290
291 configuration_status_list = nsr.get("configurationStatus")
292 config_status = configuration_status_list[vca_index].get("status")
293
294 if config_status == "BROKEN" and vca_status != "failed":
295 db_dict["configurationStatus"][vca_index] = "READY"
296 elif config_status != "BROKEN" and vca_status == "failed":
297 db_dict["configurationStatus"][vca_index] = "BROKEN"
298 except Exception as e:
299 # not update configurationStatus
300 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
301
302 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
303 # if nsState = 'DEGRADED' check if all is OK
304 is_degraded = False
305 if current_ns_status in ("READY", "DEGRADED"):
306 error_description = ""
307 # check machines
308 if status_dict.get("machines"):
309 for machine_id in status_dict.get("machines"):
310 machine = status_dict.get("machines").get(machine_id)
311 # check machine agent-status
312 if machine.get("agent-status"):
313 s = machine.get("agent-status").get("status")
314 if s != "started":
315 is_degraded = True
316 error_description += (
317 "machine {} agent-status={} ; ".format(
318 machine_id, s
319 )
320 )
321 # check machine instance status
322 if machine.get("instance-status"):
323 s = machine.get("instance-status").get("status")
324 if s != "running":
325 is_degraded = True
326 error_description += (
327 "machine {} instance-status={} ; ".format(
328 machine_id, s
329 )
330 )
331 # check applications
332 if status_dict.get("applications"):
333 for app_id in status_dict.get("applications"):
334 app = status_dict.get("applications").get(app_id)
335 # check application status
336 if app.get("status"):
337 s = app.get("status").get("status")
338 if s != "active":
339 is_degraded = True
340 error_description += (
341 "application {} status={} ; ".format(app_id, s)
342 )
343
344 if error_description:
345 db_dict["errorDescription"] = error_description
346 if current_ns_status == "READY" and is_degraded:
347 db_dict["nsState"] = "DEGRADED"
348 if current_ns_status == "DEGRADED" and not is_degraded:
349 db_dict["nsState"] = "READY"
350
351 # write to database
352 self.update_db_2("nsrs", nsr_id, db_dict)
353
354 except (asyncio.CancelledError, asyncio.TimeoutError):
355 raise
356 except Exception as e:
357 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
358
359 async def _on_update_k8s_db(
360 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
361 ):
362 """
363 Updating vca status in NSR record
364 :param cluster_uuid: UUID of a k8s cluster
365 :param kdu_instance: The unique name of the KDU instance
366 :param filter: To get nsr_id
367 :cluster_type: The cluster type (juju, k8s)
368 :return: none
369 """
370
371 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
372 # .format(cluster_uuid, kdu_instance, filter))
373
374 nsr_id = filter.get("_id")
375 try:
376 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
377 cluster_uuid=cluster_uuid,
378 kdu_instance=kdu_instance,
379 yaml_format=False,
380 complete_status=True,
381 vca_id=vca_id,
382 )
383
384 # vcaStatus
385 db_dict = dict()
386 db_dict["vcaStatus"] = {nsr_id: vca_status}
387
388 if cluster_type in ("juju-bundle", "juju"):
389 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
390 # status in a similar way between Juju Bundles and Helm Charts on this side
391 await self.k8sclusterjuju.update_vca_status(
392 db_dict["vcaStatus"],
393 kdu_instance,
394 vca_id=vca_id,
395 )
396
397 self.logger.debug(
398 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
399 )
400
401 # write to database
402 self.update_db_2("nsrs", nsr_id, db_dict)
403 except (asyncio.CancelledError, asyncio.TimeoutError):
404 raise
405 except Exception as e:
406 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
407
408 @staticmethod
409 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
410 try:
411 env = Environment(undefined=StrictUndefined)
412 template = env.from_string(cloud_init_text)
413 return template.render(additional_params or {})
414 except UndefinedError as e:
415 raise LcmException(
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
419 )
420 except (TemplateError, TemplateNotFound) as e:
421 raise LcmException(
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
423 vnfd_id, vdu_id, e
424 )
425 )
426
427 def _get_vdu_cloud_init_content(self, vdu, vnfd):
428 cloud_init_content = cloud_init_file = None
429 try:
430 if vdu.get("cloud-init-file"):
431 base_folder = vnfd["_admin"]["storage"]
432 if base_folder["pkg-dir"]:
433 cloud_init_file = "{}/{}/cloud_init/{}".format(
434 base_folder["folder"],
435 base_folder["pkg-dir"],
436 vdu["cloud-init-file"],
437 )
438 else:
439 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
440 base_folder["folder"],
441 vdu["cloud-init-file"],
442 )
443 with self.fs.file_open(cloud_init_file, "r") as ci_file:
444 cloud_init_content = ci_file.read()
445 elif vdu.get("cloud-init"):
446 cloud_init_content = vdu["cloud-init"]
447
448 return cloud_init_content
449 except FsException as e:
450 raise LcmException(
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd["id"], vdu["id"], cloud_init_file, e
453 )
454 )
455
456 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
457 vdur = next(
458 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]),
459 {}
460 )
461 additional_params = vdur.get("additionalParams")
462 return parse_yaml_strings(additional_params)
463
464 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
465 """
466 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
467 :param vnfd: input vnfd
468 :param new_id: overrides vnf id if provided
469 :param additionalParams: Instantiation params for VNFs provided
470 :param nsrId: Id of the NSR
471 :return: copy of vnfd
472 """
473 vnfd_RO = deepcopy(vnfd)
474 # remove unused by RO configuration, monitoring, scaling and internal keys
475 vnfd_RO.pop("_id", None)
476 vnfd_RO.pop("_admin", None)
477 vnfd_RO.pop("monitoring-param", None)
478 vnfd_RO.pop("scaling-group-descriptor", None)
479 vnfd_RO.pop("kdu", None)
480 vnfd_RO.pop("k8s-cluster", None)
481 if new_id:
482 vnfd_RO["id"] = new_id
483
484 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
485 for vdu in get_iterable(vnfd_RO, "vdu"):
486 vdu.pop("cloud-init-file", None)
487 vdu.pop("cloud-init", None)
488 return vnfd_RO
489
490 @staticmethod
491 def ip_profile_2_RO(ip_profile):
492 RO_ip_profile = deepcopy(ip_profile)
493 if "dns-server" in RO_ip_profile:
494 if isinstance(RO_ip_profile["dns-server"], list):
495 RO_ip_profile["dns-address"] = []
496 for ds in RO_ip_profile.pop("dns-server"):
497 RO_ip_profile["dns-address"].append(ds["address"])
498 else:
499 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
500 if RO_ip_profile.get("ip-version") == "ipv4":
501 RO_ip_profile["ip-version"] = "IPv4"
502 if RO_ip_profile.get("ip-version") == "ipv6":
503 RO_ip_profile["ip-version"] = "IPv6"
504 if "dhcp-params" in RO_ip_profile:
505 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
506 return RO_ip_profile
507
508 def _get_ro_vim_id_for_vim_account(self, vim_account):
509 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
510 if db_vim["_admin"]["operationalState"] != "ENABLED":
511 raise LcmException(
512 "VIM={} is not available. operationalState={}".format(
513 vim_account, db_vim["_admin"]["operationalState"]
514 )
515 )
516 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
517 return RO_vim_id
518
519 def get_ro_wim_id_for_wim_account(self, wim_account):
520 if isinstance(wim_account, str):
521 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
522 if db_wim["_admin"]["operationalState"] != "ENABLED":
523 raise LcmException(
524 "WIM={} is not available. operationalState={}".format(
525 wim_account, db_wim["_admin"]["operationalState"]
526 )
527 )
528 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
529 return RO_wim_id
530 else:
531 return wim_account
532
533 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
534
535 db_vdu_push_list = []
536 template_vdur = []
537 db_update = {"_admin.modified": time()}
538 if vdu_create:
539 for vdu_id, vdu_count in vdu_create.items():
540 vdur = next(
541 (
542 vdur
543 for vdur in reversed(db_vnfr["vdur"])
544 if vdur["vdu-id-ref"] == vdu_id
545 ),
546 None,
547 )
548 if not vdur:
549 # Read the template saved in the db:
550 self.logger.debug(f"No vdur in the database. Using the vdur-template to scale")
551 vdur_template = db_vnfr.get("vdur-template")
552 if not vdur_template:
553 raise LcmException(
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
555 vdu_id
556 )
557 )
558 vdur = vdur_template[0]
559 #Delete a template from the database after using it
560 self.db.set_one("vnfrs",
561 {"_id": db_vnfr["_id"]},
562 None,
563 pull={"vdur-template": {"_id": vdur['_id']}}
564 )
565 for count in range(vdu_count):
566 vdur_copy = deepcopy(vdur)
567 vdur_copy["status"] = "BUILD"
568 vdur_copy["status-detailed"] = None
569 vdur_copy["ip-address"] = None
570 vdur_copy["_id"] = str(uuid4())
571 vdur_copy["count-index"] += count + 1
572 vdur_copy["id"] = "{}-{}".format(
573 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
574 )
575 vdur_copy.pop("vim_info", None)
576 for iface in vdur_copy["interfaces"]:
577 if iface.get("fixed-ip"):
578 iface["ip-address"] = self.increment_ip_mac(
579 iface["ip-address"], count + 1
580 )
581 else:
582 iface.pop("ip-address", None)
583 if iface.get("fixed-mac"):
584 iface["mac-address"] = self.increment_ip_mac(
585 iface["mac-address"], count + 1
586 )
587 else:
588 iface.pop("mac-address", None)
589 if db_vnfr["vdur"]:
590 iface.pop(
591 "mgmt_vnf", None
592 ) # only first vdu can be managment of vnf
593 db_vdu_push_list.append(vdur_copy)
594 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
595 if vdu_delete:
596 if len(db_vnfr["vdur"]) == 1:
597 # The scale will move to 0 instances
598 self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur")
599 template_vdur = [db_vnfr["vdur"][0]]
600 for vdu_id, vdu_count in vdu_delete.items():
601 if mark_delete:
602 indexes_to_delete = [
603 iv[0]
604 for iv in enumerate(db_vnfr["vdur"])
605 if iv[1]["vdu-id-ref"] == vdu_id
606 ]
607 db_update.update(
608 {
609 "vdur.{}.status".format(i): "DELETING"
610 for i in indexes_to_delete[-vdu_count:]
611 }
612 )
613 else:
614 # it must be deleted one by one because common.db does not allow otherwise
615 vdus_to_delete = [
616 v
617 for v in reversed(db_vnfr["vdur"])
618 if v["vdu-id-ref"] == vdu_id
619 ]
620 for vdu in vdus_to_delete[:vdu_count]:
621 self.db.set_one(
622 "vnfrs",
623 {"_id": db_vnfr["_id"]},
624 None,
625 pull={"vdur": {"_id": vdu["_id"]}},
626 )
627 db_push = {}
628 if db_vdu_push_list:
629 db_push["vdur"] = db_vdu_push_list
630 if template_vdur:
631 db_push["vdur-template"] = template_vdur
632 if not db_push:
633 db_push = None
634 db_vnfr["vdur-template"] = template_vdur
635 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
636 # modify passed dictionary db_vnfr
637 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
638 db_vnfr["vdur"] = db_vnfr_["vdur"]
639
640 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
641 """
642 Updates database nsr with the RO info for the created vld
643 :param ns_update_nsr: dictionary to be filled with the updated info
644 :param db_nsr: content of db_nsr. This is also modified
645 :param nsr_desc_RO: nsr descriptor from RO
646 :return: Nothing, LcmException is raised on errors
647 """
648
649 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
650 for net_RO in get_iterable(nsr_desc_RO, "nets"):
651 if vld["id"] != net_RO.get("ns_net_osm_id"):
652 continue
653 vld["vim-id"] = net_RO.get("vim_net_id")
654 vld["name"] = net_RO.get("vim_name")
655 vld["status"] = net_RO.get("status")
656 vld["status-detailed"] = net_RO.get("error_msg")
657 ns_update_nsr["vld.{}".format(vld_index)] = vld
658 break
659 else:
660 raise LcmException(
661 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
662 )
663
664 def set_vnfr_at_error(self, db_vnfrs, error_text):
665 try:
666 for db_vnfr in db_vnfrs.values():
667 vnfr_update = {"status": "ERROR"}
668 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
669 if "status" not in vdur:
670 vdur["status"] = "ERROR"
671 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
672 if error_text:
673 vdur["status-detailed"] = str(error_text)
674 vnfr_update[
675 "vdur.{}.status-detailed".format(vdu_index)
676 ] = "ERROR"
677 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
678 except DbException as e:
679 self.logger.error("Cannot update vnf. {}".format(e))
680
681 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
682 """
683 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
684 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
685 :param nsr_desc_RO: nsr descriptor from RO
686 :return: Nothing, LcmException is raised on errors
687 """
688 for vnf_index, db_vnfr in db_vnfrs.items():
689 for vnf_RO in nsr_desc_RO["vnfs"]:
690 if vnf_RO["member_vnf_index"] != vnf_index:
691 continue
692 vnfr_update = {}
693 if vnf_RO.get("ip_address"):
694 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
695 "ip_address"
696 ].split(";")[0]
697 elif not db_vnfr.get("ip-address"):
698 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
699 raise LcmExceptionNoMgmtIP(
700 "ns member_vnf_index '{}' has no IP address".format(
701 vnf_index
702 )
703 )
704
705 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
706 vdur_RO_count_index = 0
707 if vdur.get("pdu-type"):
708 continue
709 for vdur_RO in get_iterable(vnf_RO, "vms"):
710 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
711 continue
712 if vdur["count-index"] != vdur_RO_count_index:
713 vdur_RO_count_index += 1
714 continue
715 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
716 if vdur_RO.get("ip_address"):
717 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
718 else:
719 vdur["ip-address"] = None
720 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
721 vdur["name"] = vdur_RO.get("vim_name")
722 vdur["status"] = vdur_RO.get("status")
723 vdur["status-detailed"] = vdur_RO.get("error_msg")
724 for ifacer in get_iterable(vdur, "interfaces"):
725 for interface_RO in get_iterable(vdur_RO, "interfaces"):
726 if ifacer["name"] == interface_RO.get("internal_name"):
727 ifacer["ip-address"] = interface_RO.get(
728 "ip_address"
729 )
730 ifacer["mac-address"] = interface_RO.get(
731 "mac_address"
732 )
733 break
734 else:
735 raise LcmException(
736 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
737 "from VIM info".format(
738 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
739 )
740 )
741 vnfr_update["vdur.{}".format(vdu_index)] = vdur
742 break
743 else:
744 raise LcmException(
745 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
746 "VIM info".format(
747 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
748 )
749 )
750
751 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
752 for net_RO in get_iterable(nsr_desc_RO, "nets"):
753 if vld["id"] != net_RO.get("vnf_net_osm_id"):
754 continue
755 vld["vim-id"] = net_RO.get("vim_net_id")
756 vld["name"] = net_RO.get("vim_name")
757 vld["status"] = net_RO.get("status")
758 vld["status-detailed"] = net_RO.get("error_msg")
759 vnfr_update["vld.{}".format(vld_index)] = vld
760 break
761 else:
762 raise LcmException(
763 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
764 vnf_index, vld["id"]
765 )
766 )
767
768 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
769 break
770
771 else:
772 raise LcmException(
773 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
774 vnf_index
775 )
776 )
777
778 def _get_ns_config_info(self, nsr_id):
779 """
780 Generates a mapping between vnf,vdu elements and the N2VC id
781 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
782 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
783 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
784 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
785 """
786 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
787 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
788 mapping = {}
789 ns_config_info = {"osm-config-mapping": mapping}
790 for vca in vca_deployed_list:
791 if not vca["member-vnf-index"]:
792 continue
793 if not vca["vdu_id"]:
794 mapping[vca["member-vnf-index"]] = vca["application"]
795 else:
796 mapping[
797 "{}.{}.{}".format(
798 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
799 )
800 ] = vca["application"]
801 return ns_config_info
802
803 async def _instantiate_ng_ro(
804 self,
805 logging_text,
806 nsr_id,
807 nsd,
808 db_nsr,
809 db_nslcmop,
810 db_vnfrs,
811 db_vnfds,
812 n2vc_key_list,
813 stage,
814 start_deploy,
815 timeout_ns_deploy,
816 ):
817
818 db_vims = {}
819
820 def get_vim_account(vim_account_id):
821 nonlocal db_vims
822 if vim_account_id in db_vims:
823 return db_vims[vim_account_id]
824 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
825 db_vims[vim_account_id] = db_vim
826 return db_vim
827
828 # modify target_vld info with instantiation parameters
829 def parse_vld_instantiation_params(
830 target_vim, target_vld, vld_params, target_sdn
831 ):
832 if vld_params.get("ip-profile"):
833 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
834 "ip-profile"
835 ]
836 if vld_params.get("provider-network"):
837 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
838 "provider-network"
839 ]
840 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
841 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
842 "provider-network"
843 ]["sdn-ports"]
844 if vld_params.get("wimAccountId"):
845 target_wim = "wim:{}".format(vld_params["wimAccountId"])
846 target_vld["vim_info"][target_wim] = {}
847 for param in ("vim-network-name", "vim-network-id"):
848 if vld_params.get(param):
849 if isinstance(vld_params[param], dict):
850 for vim, vim_net in vld_params[param].items():
851 other_target_vim = "vim:" + vim
852 populate_dict(
853 target_vld["vim_info"],
854 (other_target_vim, param.replace("-", "_")),
855 vim_net,
856 )
857 else: # isinstance str
858 target_vld["vim_info"][target_vim][
859 param.replace("-", "_")
860 ] = vld_params[param]
861 if vld_params.get("common_id"):
862 target_vld["common_id"] = vld_params.get("common_id")
863
864 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
865 def update_ns_vld_target(target, ns_params):
866 for vnf_params in ns_params.get("vnf", ()):
867 if vnf_params.get("vimAccountId"):
868 target_vnf = next(
869 (
870 vnfr
871 for vnfr in db_vnfrs.values()
872 if vnf_params["member-vnf-index"]
873 == vnfr["member-vnf-index-ref"]
874 ),
875 None,
876 )
877 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
878 for a_index, a_vld in enumerate(target["ns"]["vld"]):
879 target_vld = find_in_list(
880 get_iterable(vdur, "interfaces"),
881 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
882 )
883 if target_vld:
884 if vnf_params.get("vimAccountId") not in a_vld.get(
885 "vim_info", {}
886 ):
887 target["ns"]["vld"][a_index].get("vim_info").update(
888 {
889 "vim:{}".format(vnf_params["vimAccountId"]): {
890 "vim_network_name": ""
891 }
892 }
893 )
894
895 nslcmop_id = db_nslcmop["_id"]
896 target = {
897 "name": db_nsr["name"],
898 "ns": {"vld": []},
899 "vnf": [],
900 "image": deepcopy(db_nsr["image"]),
901 "flavor": deepcopy(db_nsr["flavor"]),
902 "action_id": nslcmop_id,
903 "cloud_init_content": {},
904 }
905 for image in target["image"]:
906 image["vim_info"] = {}
907 for flavor in target["flavor"]:
908 flavor["vim_info"] = {}
909 if db_nsr.get("affinity-or-anti-affinity-group"):
910 target["affinity-or-anti-affinity-group"] = deepcopy(
911 db_nsr["affinity-or-anti-affinity-group"]
912 )
913 for affinity_or_anti_affinity_group in target[
914 "affinity-or-anti-affinity-group"
915 ]:
916 affinity_or_anti_affinity_group["vim_info"] = {}
917
918 if db_nslcmop.get("lcmOperationType") != "instantiate":
919 # get parameters of instantiation:
920 db_nslcmop_instantiate = self.db.get_list(
921 "nslcmops",
922 {
923 "nsInstanceId": db_nslcmop["nsInstanceId"],
924 "lcmOperationType": "instantiate",
925 },
926 )[-1]
927 ns_params = db_nslcmop_instantiate.get("operationParams")
928 else:
929 ns_params = db_nslcmop.get("operationParams")
930 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
931 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
932
933 cp2target = {}
934 for vld_index, vld in enumerate(db_nsr.get("vld")):
935 target_vim = "vim:{}".format(ns_params["vimAccountId"])
936 target_vld = {
937 "id": vld["id"],
938 "name": vld["name"],
939 "mgmt-network": vld.get("mgmt-network", False),
940 "type": vld.get("type"),
941 "vim_info": {
942 target_vim: {
943 "vim_network_name": vld.get("vim-network-name"),
944 "vim_account_id": ns_params["vimAccountId"],
945 }
946 },
947 }
948 # check if this network needs SDN assist
949 if vld.get("pci-interfaces"):
950 db_vim = get_vim_account(ns_params["vimAccountId"])
951 sdnc_id = db_vim["config"].get("sdn-controller")
952 if sdnc_id:
953 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
954 target_sdn = "sdn:{}".format(sdnc_id)
955 target_vld["vim_info"][target_sdn] = {
956 "sdn": True,
957 "target_vim": target_vim,
958 "vlds": [sdn_vld],
959 "type": vld.get("type"),
960 }
961
962 nsd_vnf_profiles = get_vnf_profiles(nsd)
963 for nsd_vnf_profile in nsd_vnf_profiles:
964 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
965 if cp["virtual-link-profile-id"] == vld["id"]:
966 cp2target[
967 "member_vnf:{}.{}".format(
968 cp["constituent-cpd-id"][0][
969 "constituent-base-element-id"
970 ],
971 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
972 )
973 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
974
975 # check at nsd descriptor, if there is an ip-profile
976 vld_params = {}
977 nsd_vlp = find_in_list(
978 get_virtual_link_profiles(nsd),
979 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
980 == vld["id"],
981 )
982 if (
983 nsd_vlp
984 and nsd_vlp.get("virtual-link-protocol-data")
985 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
986 ):
987 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
988 "l3-protocol-data"
989 ]
990 ip_profile_dest_data = {}
991 if "ip-version" in ip_profile_source_data:
992 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
993 "ip-version"
994 ]
995 if "cidr" in ip_profile_source_data:
996 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
997 "cidr"
998 ]
999 if "gateway-ip" in ip_profile_source_data:
1000 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1001 "gateway-ip"
1002 ]
1003 if "dhcp-enabled" in ip_profile_source_data:
1004 ip_profile_dest_data["dhcp-params"] = {
1005 "enabled": ip_profile_source_data["dhcp-enabled"]
1006 }
1007 vld_params["ip-profile"] = ip_profile_dest_data
1008
1009 # update vld_params with instantiation params
1010 vld_instantiation_params = find_in_list(
1011 get_iterable(ns_params, "vld"),
1012 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1013 )
1014 if vld_instantiation_params:
1015 vld_params.update(vld_instantiation_params)
1016 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1017 target["ns"]["vld"].append(target_vld)
1018 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1019 update_ns_vld_target(target, ns_params)
1020
1021 for vnfr in db_vnfrs.values():
1022 vnfd = find_in_list(
1023 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1024 )
1025 vnf_params = find_in_list(
1026 get_iterable(ns_params, "vnf"),
1027 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1028 )
1029 target_vnf = deepcopy(vnfr)
1030 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1031 for vld in target_vnf.get("vld", ()):
1032 # check if connected to a ns.vld, to fill target'
1033 vnf_cp = find_in_list(
1034 vnfd.get("int-virtual-link-desc", ()),
1035 lambda cpd: cpd.get("id") == vld["id"],
1036 )
1037 if vnf_cp:
1038 ns_cp = "member_vnf:{}.{}".format(
1039 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1040 )
1041 if cp2target.get(ns_cp):
1042 vld["target"] = cp2target[ns_cp]
1043
1044 vld["vim_info"] = {
1045 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1046 }
1047 # check if this network needs SDN assist
1048 target_sdn = None
1049 if vld.get("pci-interfaces"):
1050 db_vim = get_vim_account(vnfr["vim-account-id"])
1051 sdnc_id = db_vim["config"].get("sdn-controller")
1052 if sdnc_id:
1053 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1054 target_sdn = "sdn:{}".format(sdnc_id)
1055 vld["vim_info"][target_sdn] = {
1056 "sdn": True,
1057 "target_vim": target_vim,
1058 "vlds": [sdn_vld],
1059 "type": vld.get("type"),
1060 }
1061
1062 # check at vnfd descriptor, if there is an ip-profile
1063 vld_params = {}
1064 vnfd_vlp = find_in_list(
1065 get_virtual_link_profiles(vnfd),
1066 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1067 )
1068 if (
1069 vnfd_vlp
1070 and vnfd_vlp.get("virtual-link-protocol-data")
1071 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1072 ):
1073 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1074 "l3-protocol-data"
1075 ]
1076 ip_profile_dest_data = {}
1077 if "ip-version" in ip_profile_source_data:
1078 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1079 "ip-version"
1080 ]
1081 if "cidr" in ip_profile_source_data:
1082 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1083 "cidr"
1084 ]
1085 if "gateway-ip" in ip_profile_source_data:
1086 ip_profile_dest_data[
1087 "gateway-address"
1088 ] = ip_profile_source_data["gateway-ip"]
1089 if "dhcp-enabled" in ip_profile_source_data:
1090 ip_profile_dest_data["dhcp-params"] = {
1091 "enabled": ip_profile_source_data["dhcp-enabled"]
1092 }
1093
1094 vld_params["ip-profile"] = ip_profile_dest_data
1095 # update vld_params with instantiation params
1096 if vnf_params:
1097 vld_instantiation_params = find_in_list(
1098 get_iterable(vnf_params, "internal-vld"),
1099 lambda i_vld: i_vld["name"] == vld["id"],
1100 )
1101 if vld_instantiation_params:
1102 vld_params.update(vld_instantiation_params)
1103 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1104
1105 vdur_list = []
1106 for vdur in target_vnf.get("vdur", ()):
1107 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1108 continue # This vdu must not be created
1109 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1110
1111 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1112
1113 if ssh_keys_all:
1114 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1115 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1116 if (
1117 vdu_configuration
1118 and vdu_configuration.get("config-access")
1119 and vdu_configuration.get("config-access").get("ssh-access")
1120 ):
1121 vdur["ssh-keys"] = ssh_keys_all
1122 vdur["ssh-access-required"] = vdu_configuration[
1123 "config-access"
1124 ]["ssh-access"]["required"]
1125 elif (
1126 vnf_configuration
1127 and vnf_configuration.get("config-access")
1128 and vnf_configuration.get("config-access").get("ssh-access")
1129 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1130 ):
1131 vdur["ssh-keys"] = ssh_keys_all
1132 vdur["ssh-access-required"] = vnf_configuration[
1133 "config-access"
1134 ]["ssh-access"]["required"]
1135 elif ssh_keys_instantiation and find_in_list(
1136 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1137 ):
1138 vdur["ssh-keys"] = ssh_keys_instantiation
1139
1140 self.logger.debug("NS > vdur > {}".format(vdur))
1141
1142 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1143 # cloud-init
1144 if vdud.get("cloud-init-file"):
1145 vdur["cloud-init"] = "{}:file:{}".format(
1146 vnfd["_id"], vdud.get("cloud-init-file")
1147 )
1148 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1149 if vdur["cloud-init"] not in target["cloud_init_content"]:
1150 base_folder = vnfd["_admin"]["storage"]
1151 if base_folder["pkg-dir"]:
1152 cloud_init_file = "{}/{}/cloud_init/{}".format(
1153 base_folder["folder"],
1154 base_folder["pkg-dir"],
1155 vdud.get("cloud-init-file"),
1156 )
1157 else:
1158 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1159 base_folder["folder"],
1160 vdud.get("cloud-init-file"),
1161 )
1162 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1163 target["cloud_init_content"][
1164 vdur["cloud-init"]
1165 ] = ci_file.read()
1166 elif vdud.get("cloud-init"):
1167 vdur["cloud-init"] = "{}:vdu:{}".format(
1168 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1169 )
1170 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1171 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1172 "cloud-init"
1173 ]
1174 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1175 deploy_params_vdu = self._format_additional_params(
1176 vdur.get("additionalParams") or {}
1177 )
1178 deploy_params_vdu["OSM"] = get_osm_params(
1179 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1180 )
1181 vdur["additionalParams"] = deploy_params_vdu
1182
1183 # flavor
1184 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1185 if target_vim not in ns_flavor["vim_info"]:
1186 ns_flavor["vim_info"][target_vim] = {}
1187
1188 # deal with images
1189 # in case alternative images are provided we must check if they should be applied
1190 # for the vim_type, modify the vim_type taking into account
1191 ns_image_id = int(vdur["ns-image-id"])
1192 if vdur.get("alt-image-ids"):
1193 db_vim = get_vim_account(vnfr["vim-account-id"])
1194 vim_type = db_vim["vim_type"]
1195 for alt_image_id in vdur.get("alt-image-ids"):
1196 ns_alt_image = target["image"][int(alt_image_id)]
1197 if vim_type == ns_alt_image.get("vim-type"):
1198 # must use alternative image
1199 self.logger.debug(
1200 "use alternative image id: {}".format(alt_image_id)
1201 )
1202 ns_image_id = alt_image_id
1203 vdur["ns-image-id"] = ns_image_id
1204 break
1205 ns_image = target["image"][int(ns_image_id)]
1206 if target_vim not in ns_image["vim_info"]:
1207 ns_image["vim_info"][target_vim] = {}
1208
1209 # Affinity groups
1210 if vdur.get("affinity-or-anti-affinity-group-id"):
1211 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1212 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1213 if target_vim not in ns_ags["vim_info"]:
1214 ns_ags["vim_info"][target_vim] = {}
1215
1216 vdur["vim_info"] = {target_vim: {}}
1217 # instantiation parameters
1218 # if vnf_params:
1219 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1220 # vdud["id"]), None)
1221 vdur_list.append(vdur)
1222 target_vnf["vdur"] = vdur_list
1223 target["vnf"].append(target_vnf)
1224
1225 desc = await self.RO.deploy(nsr_id, target)
1226 self.logger.debug("RO return > {}".format(desc))
1227 action_id = desc["action_id"]
1228 await self._wait_ng_ro(
1229 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage
1230 )
1231
1232 # Updating NSR
1233 db_nsr_update = {
1234 "_admin.deployed.RO.operational-status": "running",
1235 "detailed-status": " ".join(stage),
1236 }
1237 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1238 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1239 self._write_op_status(nslcmop_id, stage)
1240 self.logger.debug(
1241 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1242 )
1243 return
1244
1245 async def _wait_ng_ro(
1246 self,
1247 nsr_id,
1248 action_id,
1249 nslcmop_id=None,
1250 start_time=None,
1251 timeout=600,
1252 stage=None,
1253 ):
1254 detailed_status_old = None
1255 db_nsr_update = {}
1256 start_time = start_time or time()
1257 while time() <= start_time + timeout:
1258 desc_status = await self.RO.status(nsr_id, action_id)
1259 self.logger.debug("Wait NG RO > {}".format(desc_status))
1260 if desc_status["status"] == "FAILED":
1261 raise NgRoException(desc_status["details"])
1262 elif desc_status["status"] == "BUILD":
1263 if stage:
1264 stage[2] = "VIM: ({})".format(desc_status["details"])
1265 elif desc_status["status"] == "DONE":
1266 if stage:
1267 stage[2] = "Deployed at VIM"
1268 break
1269 else:
1270 assert False, "ROclient.check_ns_status returns unknown {}".format(
1271 desc_status["status"]
1272 )
1273 if stage and nslcmop_id and stage[2] != detailed_status_old:
1274 detailed_status_old = stage[2]
1275 db_nsr_update["detailed-status"] = " ".join(stage)
1276 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1277 self._write_op_status(nslcmop_id, stage)
1278 await asyncio.sleep(15, loop=self.loop)
1279 else: # timeout_ns_deploy
1280 raise NgRoException("Timeout waiting ns to deploy")
1281
1282 async def _terminate_ng_ro(
1283 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1284 ):
1285 db_nsr_update = {}
1286 failed_detail = []
1287 action_id = None
1288 start_deploy = time()
1289 try:
1290 target = {
1291 "ns": {"vld": []},
1292 "vnf": [],
1293 "image": [],
1294 "flavor": [],
1295 "action_id": nslcmop_id,
1296 }
1297 desc = await self.RO.deploy(nsr_id, target)
1298 action_id = desc["action_id"]
1299 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1300 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1301 self.logger.debug(
1302 logging_text
1303 + "ns terminate action at RO. action_id={}".format(action_id)
1304 )
1305
1306 # wait until done
1307 delete_timeout = 20 * 60 # 20 minutes
1308 await self._wait_ng_ro(
1309 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage
1310 )
1311
1312 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1313 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1314 # delete all nsr
1315 await self.RO.delete(nsr_id)
1316 except Exception as e:
1317 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1318 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1319 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1320 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1321 self.logger.debug(
1322 logging_text + "RO_action_id={} already deleted".format(action_id)
1323 )
1324 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1325 failed_detail.append("delete conflict: {}".format(e))
1326 self.logger.debug(
1327 logging_text
1328 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1329 )
1330 else:
1331 failed_detail.append("delete error: {}".format(e))
1332 self.logger.error(
1333 logging_text
1334 + "RO_action_id={} delete error: {}".format(action_id, e)
1335 )
1336
1337 if failed_detail:
1338 stage[2] = "Error deleting from VIM"
1339 else:
1340 stage[2] = "Deleted from VIM"
1341 db_nsr_update["detailed-status"] = " ".join(stage)
1342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1343 self._write_op_status(nslcmop_id, stage)
1344
1345 if failed_detail:
1346 raise LcmException("; ".join(failed_detail))
1347 return
1348
1349 async def instantiate_RO(
1350 self,
1351 logging_text,
1352 nsr_id,
1353 nsd,
1354 db_nsr,
1355 db_nslcmop,
1356 db_vnfrs,
1357 db_vnfds,
1358 n2vc_key_list,
1359 stage,
1360 ):
1361 """
1362 Instantiate at RO
1363 :param logging_text: preffix text to use at logging
1364 :param nsr_id: nsr identity
1365 :param nsd: database content of ns descriptor
1366 :param db_nsr: database content of ns record
1367 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1368 :param db_vnfrs:
1369 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1370 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1371 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1372 :return: None or exception
1373 """
1374 try:
1375 start_deploy = time()
1376 ns_params = db_nslcmop.get("operationParams")
1377 if ns_params and ns_params.get("timeout_ns_deploy"):
1378 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1379 else:
1380 timeout_ns_deploy = self.timeout.get(
1381 "ns_deploy", self.timeout_ns_deploy
1382 )
1383
1384 # Check for and optionally request placement optimization. Database will be updated if placement activated
1385 stage[2] = "Waiting for Placement."
1386 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1387 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1388 for vnfr in db_vnfrs.values():
1389 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1390 break
1391 else:
1392 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1393
1394 return await self._instantiate_ng_ro(
1395 logging_text,
1396 nsr_id,
1397 nsd,
1398 db_nsr,
1399 db_nslcmop,
1400 db_vnfrs,
1401 db_vnfds,
1402 n2vc_key_list,
1403 stage,
1404 start_deploy,
1405 timeout_ns_deploy,
1406 )
1407 except Exception as e:
1408 stage[2] = "ERROR deploying at VIM"
1409 self.set_vnfr_at_error(db_vnfrs, str(e))
1410 self.logger.error(
1411 "Error deploying at VIM {}".format(e),
1412 exc_info=not isinstance(
1413 e,
1414 (
1415 ROclient.ROClientException,
1416 LcmException,
1417 DbException,
1418 NgRoException,
1419 ),
1420 ),
1421 )
1422 raise
1423
1424 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1425 """
1426 Wait for kdu to be up, get ip address
1427 :param logging_text: prefix use for logging
1428 :param nsr_id:
1429 :param vnfr_id:
1430 :param kdu_name:
1431 :return: IP address, K8s services
1432 """
1433
1434 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1435 nb_tries = 0
1436
1437 while nb_tries < 360:
1438 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1439 kdur = next(
1440 (
1441 x
1442 for x in get_iterable(db_vnfr, "kdur")
1443 if x.get("kdu-name") == kdu_name
1444 ),
1445 None,
1446 )
1447 if not kdur:
1448 raise LcmException(
1449 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1450 )
1451 if kdur.get("status"):
1452 if kdur["status"] in ("READY", "ENABLED"):
1453 return kdur.get("ip-address"), kdur.get("services")
1454 else:
1455 raise LcmException(
1456 "target KDU={} is in error state".format(kdu_name)
1457 )
1458
1459 await asyncio.sleep(10, loop=self.loop)
1460 nb_tries += 1
1461 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1462
1463 async def wait_vm_up_insert_key_ro(
1464 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1465 ):
1466 """
1467 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1468 :param logging_text: prefix use for logging
1469 :param nsr_id:
1470 :param vnfr_id:
1471 :param vdu_id:
1472 :param vdu_index:
1473 :param pub_key: public ssh key to inject, None to skip
1474 :param user: user to apply the public ssh key
1475 :return: IP address
1476 """
1477
1478 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1479 ro_nsr_id = None
1480 ip_address = None
1481 nb_tries = 0
1482 target_vdu_id = None
1483 ro_retries = 0
1484
1485 while True:
1486
1487 ro_retries += 1
1488 if ro_retries >= 360: # 1 hour
1489 raise LcmException(
1490 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1491 )
1492
1493 await asyncio.sleep(10, loop=self.loop)
1494
1495 # get ip address
1496 if not target_vdu_id:
1497 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1498
1499 if not vdu_id: # for the VNF case
1500 if db_vnfr.get("status") == "ERROR":
1501 raise LcmException(
1502 "Cannot inject ssh-key because target VNF is in error state"
1503 )
1504 ip_address = db_vnfr.get("ip-address")
1505 if not ip_address:
1506 continue
1507 vdur = next(
1508 (
1509 x
1510 for x in get_iterable(db_vnfr, "vdur")
1511 if x.get("ip-address") == ip_address
1512 ),
1513 None,
1514 )
1515 else: # VDU case
1516 vdur = next(
1517 (
1518 x
1519 for x in get_iterable(db_vnfr, "vdur")
1520 if x.get("vdu-id-ref") == vdu_id
1521 and x.get("count-index") == vdu_index
1522 ),
1523 None,
1524 )
1525
1526 if (
1527 not vdur and len(db_vnfr.get("vdur", ())) == 1
1528 ): # If only one, this should be the target vdu
1529 vdur = db_vnfr["vdur"][0]
1530 if not vdur:
1531 raise LcmException(
1532 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1533 vnfr_id, vdu_id, vdu_index
1534 )
1535 )
1536 # New generation RO stores information at "vim_info"
1537 ng_ro_status = None
1538 target_vim = None
1539 if vdur.get("vim_info"):
1540 target_vim = next(
1541 t for t in vdur["vim_info"]
1542 ) # there should be only one key
1543 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1544 if (
1545 vdur.get("pdu-type")
1546 or vdur.get("status") == "ACTIVE"
1547 or ng_ro_status == "ACTIVE"
1548 ):
1549 ip_address = vdur.get("ip-address")
1550 if not ip_address:
1551 continue
1552 target_vdu_id = vdur["vdu-id-ref"]
1553 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1554 raise LcmException(
1555 "Cannot inject ssh-key because target VM is in error state"
1556 )
1557
1558 if not target_vdu_id:
1559 continue
1560
1561 # inject public key into machine
1562 if pub_key and user:
1563 self.logger.debug(logging_text + "Inserting RO key")
1564 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1565 if vdur.get("pdu-type"):
1566 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1567 return ip_address
1568 try:
1569 ro_vm_id = "{}-{}".format(
1570 db_vnfr["member-vnf-index-ref"], target_vdu_id
1571 ) # TODO add vdu_index
1572 if self.ng_ro:
1573 target = {
1574 "action": {
1575 "action": "inject_ssh_key",
1576 "key": pub_key,
1577 "user": user,
1578 },
1579 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1580 }
1581 desc = await self.RO.deploy(nsr_id, target)
1582 action_id = desc["action_id"]
1583 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1584 break
1585 else:
1586 # wait until NS is deployed at RO
1587 if not ro_nsr_id:
1588 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1589 ro_nsr_id = deep_get(
1590 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1591 )
1592 if not ro_nsr_id:
1593 continue
1594 result_dict = await self.RO.create_action(
1595 item="ns",
1596 item_id_name=ro_nsr_id,
1597 descriptor={
1598 "add_public_key": pub_key,
1599 "vms": [ro_vm_id],
1600 "user": user,
1601 },
1602 )
1603 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1604 if not result_dict or not isinstance(result_dict, dict):
1605 raise LcmException(
1606 "Unknown response from RO when injecting key"
1607 )
1608 for result in result_dict.values():
1609 if result.get("vim_result") == 200:
1610 break
1611 else:
1612 raise ROclient.ROClientException(
1613 "error injecting key: {}".format(
1614 result.get("description")
1615 )
1616 )
1617 break
1618 except NgRoException as e:
1619 raise LcmException(
1620 "Reaching max tries injecting key. Error: {}".format(e)
1621 )
1622 except ROclient.ROClientException as e:
1623 if not nb_tries:
1624 self.logger.debug(
1625 logging_text
1626 + "error injecting key: {}. Retrying until {} seconds".format(
1627 e, 20 * 10
1628 )
1629 )
1630 nb_tries += 1
1631 if nb_tries >= 20:
1632 raise LcmException(
1633 "Reaching max tries injecting key. Error: {}".format(e)
1634 )
1635 else:
1636 break
1637
1638 return ip_address
1639
1640 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1641 """
1642 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1643 """
1644 my_vca = vca_deployed_list[vca_index]
1645 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1646 # vdu or kdu: no dependencies
1647 return
1648 timeout = 300
1649 while timeout >= 0:
1650 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1651 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1652 configuration_status_list = db_nsr["configurationStatus"]
1653 for index, vca_deployed in enumerate(configuration_status_list):
1654 if index == vca_index:
1655 # myself
1656 continue
1657 if not my_vca.get("member-vnf-index") or (
1658 vca_deployed.get("member-vnf-index")
1659 == my_vca.get("member-vnf-index")
1660 ):
1661 internal_status = configuration_status_list[index].get("status")
1662 if internal_status == "READY":
1663 continue
1664 elif internal_status == "BROKEN":
1665 raise LcmException(
1666 "Configuration aborted because dependent charm/s has failed"
1667 )
1668 else:
1669 break
1670 else:
1671 # no dependencies, return
1672 return
1673 await asyncio.sleep(10)
1674 timeout -= 1
1675
1676 raise LcmException("Configuration aborted because dependent charm/s timeout")
1677
1678 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1679 vca_id = None
1680 if db_vnfr:
1681 vca_id = deep_get(db_vnfr, ("vca-id",))
1682 elif db_nsr:
1683 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1684 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1685 return vca_id
1686
1687 async def instantiate_N2VC(
1688 self,
1689 logging_text,
1690 vca_index,
1691 nsi_id,
1692 db_nsr,
1693 db_vnfr,
1694 vdu_id,
1695 kdu_name,
1696 vdu_index,
1697 config_descriptor,
1698 deploy_params,
1699 base_folder,
1700 nslcmop_id,
1701 stage,
1702 vca_type,
1703 vca_name,
1704 ee_config_descriptor,
1705 ):
1706 nsr_id = db_nsr["_id"]
1707 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1708 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1709 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1710 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1711 db_dict = {
1712 "collection": "nsrs",
1713 "filter": {"_id": nsr_id},
1714 "path": db_update_entry,
1715 }
1716 step = ""
1717 try:
1718
1719 element_type = "NS"
1720 element_under_configuration = nsr_id
1721
1722 vnfr_id = None
1723 if db_vnfr:
1724 vnfr_id = db_vnfr["_id"]
1725 osm_config["osm"]["vnf_id"] = vnfr_id
1726
1727 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1728
1729 if vca_type == "native_charm":
1730 index_number = 0
1731 else:
1732 index_number = vdu_index or 0
1733
1734 if vnfr_id:
1735 element_type = "VNF"
1736 element_under_configuration = vnfr_id
1737 namespace += ".{}-{}".format(vnfr_id, index_number)
1738 if vdu_id:
1739 namespace += ".{}-{}".format(vdu_id, index_number)
1740 element_type = "VDU"
1741 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1742 osm_config["osm"]["vdu_id"] = vdu_id
1743 elif kdu_name:
1744 namespace += ".{}".format(kdu_name)
1745 element_type = "KDU"
1746 element_under_configuration = kdu_name
1747 osm_config["osm"]["kdu_name"] = kdu_name
1748
1749 # Get artifact path
1750 if base_folder["pkg-dir"]:
1751 artifact_path = "{}/{}/{}/{}".format(
1752 base_folder["folder"],
1753 base_folder["pkg-dir"],
1754 "charms"
1755 if vca_type
1756 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1757 else "helm-charts",
1758 vca_name,
1759 )
1760 else:
1761 artifact_path = "{}/Scripts/{}/{}/".format(
1762 base_folder["folder"],
1763 "charms"
1764 if vca_type
1765 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1766 else "helm-charts",
1767 vca_name,
1768 )
1769
1770 self.logger.debug("Artifact path > {}".format(artifact_path))
1771
1772 # get initial_config_primitive_list that applies to this element
1773 initial_config_primitive_list = config_descriptor.get(
1774 "initial-config-primitive"
1775 )
1776
1777 self.logger.debug(
1778 "Initial config primitive list > {}".format(
1779 initial_config_primitive_list
1780 )
1781 )
1782
1783 # add config if not present for NS charm
1784 ee_descriptor_id = ee_config_descriptor.get("id")
1785 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1786 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1787 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1788 )
1789
1790 self.logger.debug(
1791 "Initial config primitive list #2 > {}".format(
1792 initial_config_primitive_list
1793 )
1794 )
1795 # n2vc_redesign STEP 3.1
1796 # find old ee_id if exists
1797 ee_id = vca_deployed.get("ee_id")
1798
1799 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1800 # create or register execution environment in VCA
1801 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1802
1803 self._write_configuration_status(
1804 nsr_id=nsr_id,
1805 vca_index=vca_index,
1806 status="CREATING",
1807 element_under_configuration=element_under_configuration,
1808 element_type=element_type,
1809 )
1810
1811 step = "create execution environment"
1812 self.logger.debug(logging_text + step)
1813
1814 ee_id = None
1815 credentials = None
1816 if vca_type == "k8s_proxy_charm":
1817 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1818 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1819 namespace=namespace,
1820 artifact_path=artifact_path,
1821 db_dict=db_dict,
1822 vca_id=vca_id,
1823 )
1824 elif vca_type == "helm" or vca_type == "helm-v3":
1825 ee_id, credentials = await self.vca_map[
1826 vca_type
1827 ].create_execution_environment(
1828 namespace=namespace,
1829 reuse_ee_id=ee_id,
1830 db_dict=db_dict,
1831 config=osm_config,
1832 artifact_path=artifact_path,
1833 vca_type=vca_type,
1834 )
1835 else:
1836 ee_id, credentials = await self.vca_map[
1837 vca_type
1838 ].create_execution_environment(
1839 namespace=namespace,
1840 reuse_ee_id=ee_id,
1841 db_dict=db_dict,
1842 vca_id=vca_id,
1843 )
1844
1845 elif vca_type == "native_charm":
1846 step = "Waiting to VM being up and getting IP address"
1847 self.logger.debug(logging_text + step)
1848 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1849 logging_text,
1850 nsr_id,
1851 vnfr_id,
1852 vdu_id,
1853 vdu_index,
1854 user=None,
1855 pub_key=None,
1856 )
1857 credentials = {"hostname": rw_mgmt_ip}
1858 # get username
1859 username = deep_get(
1860 config_descriptor, ("config-access", "ssh-access", "default-user")
1861 )
1862 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1863 # merged. Meanwhile let's get username from initial-config-primitive
1864 if not username and initial_config_primitive_list:
1865 for config_primitive in initial_config_primitive_list:
1866 for param in config_primitive.get("parameter", ()):
1867 if param["name"] == "ssh-username":
1868 username = param["value"]
1869 break
1870 if not username:
1871 raise LcmException(
1872 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1873 "'config-access.ssh-access.default-user'"
1874 )
1875 credentials["username"] = username
1876 # n2vc_redesign STEP 3.2
1877
1878 self._write_configuration_status(
1879 nsr_id=nsr_id,
1880 vca_index=vca_index,
1881 status="REGISTERING",
1882 element_under_configuration=element_under_configuration,
1883 element_type=element_type,
1884 )
1885
1886 step = "register execution environment {}".format(credentials)
1887 self.logger.debug(logging_text + step)
1888 ee_id = await self.vca_map[vca_type].register_execution_environment(
1889 credentials=credentials,
1890 namespace=namespace,
1891 db_dict=db_dict,
1892 vca_id=vca_id,
1893 )
1894
1895 # for compatibility with MON/POL modules, the need model and application name at database
1896 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1897 ee_id_parts = ee_id.split(".")
1898 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1899 if len(ee_id_parts) >= 2:
1900 model_name = ee_id_parts[0]
1901 application_name = ee_id_parts[1]
1902 db_nsr_update[db_update_entry + "model"] = model_name
1903 db_nsr_update[db_update_entry + "application"] = application_name
1904
1905 # n2vc_redesign STEP 3.3
1906 step = "Install configuration Software"
1907
1908 self._write_configuration_status(
1909 nsr_id=nsr_id,
1910 vca_index=vca_index,
1911 status="INSTALLING SW",
1912 element_under_configuration=element_under_configuration,
1913 element_type=element_type,
1914 other_update=db_nsr_update,
1915 )
1916
1917 # TODO check if already done
1918 self.logger.debug(logging_text + step)
1919 config = None
1920 if vca_type == "native_charm":
1921 config_primitive = next(
1922 (p for p in initial_config_primitive_list if p["name"] == "config"),
1923 None,
1924 )
1925 if config_primitive:
1926 config = self._map_primitive_params(
1927 config_primitive, {}, deploy_params
1928 )
1929 num_units = 1
1930 if vca_type == "lxc_proxy_charm":
1931 if element_type == "NS":
1932 num_units = db_nsr.get("config-units") or 1
1933 elif element_type == "VNF":
1934 num_units = db_vnfr.get("config-units") or 1
1935 elif element_type == "VDU":
1936 for v in db_vnfr["vdur"]:
1937 if vdu_id == v["vdu-id-ref"]:
1938 num_units = v.get("config-units") or 1
1939 break
1940 if vca_type != "k8s_proxy_charm":
1941 await self.vca_map[vca_type].install_configuration_sw(
1942 ee_id=ee_id,
1943 artifact_path=artifact_path,
1944 db_dict=db_dict,
1945 config=config,
1946 num_units=num_units,
1947 vca_id=vca_id,
1948 vca_type=vca_type,
1949 )
1950
1951 # write in db flag of configuration_sw already installed
1952 self.update_db_2(
1953 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1954 )
1955
1956 # add relations for this VCA (wait for other peers related with this VCA)
1957 await self._add_vca_relations(
1958 logging_text=logging_text,
1959 nsr_id=nsr_id,
1960 vca_type=vca_type,
1961 vca_index=vca_index,
1962 )
1963
1964 # if SSH access is required, then get execution environment SSH public
1965 # if native charm we have waited already to VM be UP
1966 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1967 pub_key = None
1968 user = None
1969 # self.logger.debug("get ssh key block")
1970 if deep_get(
1971 config_descriptor, ("config-access", "ssh-access", "required")
1972 ):
1973 # self.logger.debug("ssh key needed")
1974 # Needed to inject a ssh key
1975 user = deep_get(
1976 config_descriptor,
1977 ("config-access", "ssh-access", "default-user"),
1978 )
1979 step = "Install configuration Software, getting public ssh key"
1980 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1981 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1982 )
1983
1984 step = "Insert public key into VM user={} ssh_key={}".format(
1985 user, pub_key
1986 )
1987 else:
1988 # self.logger.debug("no need to get ssh key")
1989 step = "Waiting to VM being up and getting IP address"
1990 self.logger.debug(logging_text + step)
1991
1992 # n2vc_redesign STEP 5.1
1993 # wait for RO (ip-address) Insert pub_key into VM
1994 if vnfr_id:
1995 if kdu_name:
1996 rw_mgmt_ip, services = await self.wait_kdu_up(
1997 logging_text, nsr_id, vnfr_id, kdu_name
1998 )
1999 vnfd = self.db.get_one(
2000 "vnfds_revisions",
2001 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2002 )
2003 kdu = get_kdu(vnfd, kdu_name)
2004 kdu_services = [
2005 service["name"] for service in get_kdu_services(kdu)
2006 ]
2007 exposed_services = []
2008 for service in services:
2009 if any(s in service["name"] for s in kdu_services):
2010 exposed_services.append(service)
2011 await self.vca_map[vca_type].exec_primitive(
2012 ee_id=ee_id,
2013 primitive_name="config",
2014 params_dict={
2015 "osm-config": json.dumps(
2016 OsmConfigBuilder(
2017 k8s={"services": exposed_services}
2018 ).build()
2019 )
2020 },
2021 vca_id=vca_id,
2022 )
2023 else:
2024 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2025 logging_text,
2026 nsr_id,
2027 vnfr_id,
2028 vdu_id,
2029 vdu_index,
2030 user=user,
2031 pub_key=pub_key,
2032 )
2033
2034 else:
2035 rw_mgmt_ip = None # This is for a NS configuration
2036
2037 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2038
2039 # store rw_mgmt_ip in deploy params for later replacement
2040 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2041
2042 # n2vc_redesign STEP 6 Execute initial config primitive
2043 step = "execute initial config primitive"
2044
2045 # wait for dependent primitives execution (NS -> VNF -> VDU)
2046 if initial_config_primitive_list:
2047 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2048
2049 # stage, in function of element type: vdu, kdu, vnf or ns
2050 my_vca = vca_deployed_list[vca_index]
2051 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2052 # VDU or KDU
2053 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2054 elif my_vca.get("member-vnf-index"):
2055 # VNF
2056 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2057 else:
2058 # NS
2059 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2060
2061 self._write_configuration_status(
2062 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2063 )
2064
2065 self._write_op_status(op_id=nslcmop_id, stage=stage)
2066
2067 check_if_terminated_needed = True
2068 for initial_config_primitive in initial_config_primitive_list:
2069 # adding information on the vca_deployed if it is a NS execution environment
2070 if not vca_deployed["member-vnf-index"]:
2071 deploy_params["ns_config_info"] = json.dumps(
2072 self._get_ns_config_info(nsr_id)
2073 )
2074 # TODO check if already done
2075 primitive_params_ = self._map_primitive_params(
2076 initial_config_primitive, {}, deploy_params
2077 )
2078
2079 step = "execute primitive '{}' params '{}'".format(
2080 initial_config_primitive["name"], primitive_params_
2081 )
2082 self.logger.debug(logging_text + step)
2083 await self.vca_map[vca_type].exec_primitive(
2084 ee_id=ee_id,
2085 primitive_name=initial_config_primitive["name"],
2086 params_dict=primitive_params_,
2087 db_dict=db_dict,
2088 vca_id=vca_id,
2089 vca_type=vca_type,
2090 )
2091 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2092 if check_if_terminated_needed:
2093 if config_descriptor.get("terminate-config-primitive"):
2094 self.update_db_2(
2095 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2096 )
2097 check_if_terminated_needed = False
2098
2099 # TODO register in database that primitive is done
2100
2101 # STEP 7 Configure metrics
2102 if vca_type == "helm" or vca_type == "helm-v3":
2103 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2104 ee_id=ee_id,
2105 artifact_path=artifact_path,
2106 ee_config_descriptor=ee_config_descriptor,
2107 vnfr_id=vnfr_id,
2108 nsr_id=nsr_id,
2109 target_ip=rw_mgmt_ip,
2110 )
2111 if prometheus_jobs:
2112 self.update_db_2(
2113 "nsrs",
2114 nsr_id,
2115 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2116 )
2117
2118 for job in prometheus_jobs:
2119 self.db.set_one(
2120 "prometheus_jobs",
2121 {"job_name": job["job_name"]},
2122 job,
2123 upsert=True,
2124 fail_on_empty=False,
2125 )
2126
2127 step = "instantiated at VCA"
2128 self.logger.debug(logging_text + step)
2129
2130 self._write_configuration_status(
2131 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2132 )
2133
2134 except Exception as e: # TODO not use Exception but N2VC exception
2135 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2136 if not isinstance(
2137 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2138 ):
2139 self.logger.error(
2140 "Exception while {} : {}".format(step, e), exc_info=True
2141 )
2142 self._write_configuration_status(
2143 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2144 )
2145 raise LcmException("{} {}".format(step, e)) from e
2146
2147 def _write_ns_status(
2148 self,
2149 nsr_id: str,
2150 ns_state: str,
2151 current_operation: str,
2152 current_operation_id: str,
2153 error_description: str = None,
2154 error_detail: str = None,
2155 other_update: dict = None,
2156 ):
2157 """
2158 Update db_nsr fields.
2159 :param nsr_id:
2160 :param ns_state:
2161 :param current_operation:
2162 :param current_operation_id:
2163 :param error_description:
2164 :param error_detail:
2165 :param other_update: Other required changes at database if provided, will be cleared
2166 :return:
2167 """
2168 try:
2169 db_dict = other_update or {}
2170 db_dict[
2171 "_admin.nslcmop"
2172 ] = current_operation_id # for backward compatibility
2173 db_dict["_admin.current-operation"] = current_operation_id
2174 db_dict["_admin.operation-type"] = (
2175 current_operation if current_operation != "IDLE" else None
2176 )
2177 db_dict["currentOperation"] = current_operation
2178 db_dict["currentOperationID"] = current_operation_id
2179 db_dict["errorDescription"] = error_description
2180 db_dict["errorDetail"] = error_detail
2181
2182 if ns_state:
2183 db_dict["nsState"] = ns_state
2184 self.update_db_2("nsrs", nsr_id, db_dict)
2185 except DbException as e:
2186 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2187
2188 def _write_op_status(
2189 self,
2190 op_id: str,
2191 stage: list = None,
2192 error_message: str = None,
2193 queuePosition: int = 0,
2194 operation_state: str = None,
2195 other_update: dict = None,
2196 ):
2197 try:
2198 db_dict = other_update or {}
2199 db_dict["queuePosition"] = queuePosition
2200 if isinstance(stage, list):
2201 db_dict["stage"] = stage[0]
2202 db_dict["detailed-status"] = " ".join(stage)
2203 elif stage is not None:
2204 db_dict["stage"] = str(stage)
2205
2206 if error_message is not None:
2207 db_dict["errorMessage"] = error_message
2208 if operation_state is not None:
2209 db_dict["operationState"] = operation_state
2210 db_dict["statusEnteredTime"] = time()
2211 self.update_db_2("nslcmops", op_id, db_dict)
2212 except DbException as e:
2213 self.logger.warn(
2214 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2215 )
2216
2217 def _write_all_config_status(self, db_nsr: dict, status: str):
2218 try:
2219 nsr_id = db_nsr["_id"]
2220 # configurationStatus
2221 config_status = db_nsr.get("configurationStatus")
2222 if config_status:
2223 db_nsr_update = {
2224 "configurationStatus.{}.status".format(index): status
2225 for index, v in enumerate(config_status)
2226 if v
2227 }
2228 # update status
2229 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2230
2231 except DbException as e:
2232 self.logger.warn(
2233 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2234 )
2235
2236 def _write_configuration_status(
2237 self,
2238 nsr_id: str,
2239 vca_index: int,
2240 status: str = None,
2241 element_under_configuration: str = None,
2242 element_type: str = None,
2243 other_update: dict = None,
2244 ):
2245
2246 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2247 # .format(vca_index, status))
2248
2249 try:
2250 db_path = "configurationStatus.{}.".format(vca_index)
2251 db_dict = other_update or {}
2252 if status:
2253 db_dict[db_path + "status"] = status
2254 if element_under_configuration:
2255 db_dict[
2256 db_path + "elementUnderConfiguration"
2257 ] = element_under_configuration
2258 if element_type:
2259 db_dict[db_path + "elementType"] = element_type
2260 self.update_db_2("nsrs", nsr_id, db_dict)
2261 except DbException as e:
2262 self.logger.warn(
2263 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2264 status, nsr_id, vca_index, e
2265 )
2266 )
2267
2268 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2269 """
2270 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2271 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2272 Database is used because the result can be obtained from a different LCM worker in case of HA.
2273 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2274 :param db_nslcmop: database content of nslcmop
2275 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2276 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2277 computed 'vim-account-id'
2278 """
2279 modified = False
2280 nslcmop_id = db_nslcmop["_id"]
2281 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2282 if placement_engine == "PLA":
2283 self.logger.debug(
2284 logging_text + "Invoke and wait for placement optimization"
2285 )
2286 await self.msg.aiowrite(
2287 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2288 )
2289 db_poll_interval = 5
2290 wait = db_poll_interval * 10
2291 pla_result = None
2292 while not pla_result and wait >= 0:
2293 await asyncio.sleep(db_poll_interval)
2294 wait -= db_poll_interval
2295 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2296 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2297
2298 if not pla_result:
2299 raise LcmException(
2300 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2301 )
2302
2303 for pla_vnf in pla_result["vnf"]:
2304 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2305 if not pla_vnf.get("vimAccountId") or not vnfr:
2306 continue
2307 modified = True
2308 self.db.set_one(
2309 "vnfrs",
2310 {"_id": vnfr["_id"]},
2311 {"vim-account-id": pla_vnf["vimAccountId"]},
2312 )
2313 # Modifies db_vnfrs
2314 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2315 return modified
2316
2317 def update_nsrs_with_pla_result(self, params):
2318 try:
2319 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2320 self.update_db_2(
2321 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2322 )
2323 except Exception as e:
2324 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2325
2326 async def instantiate(self, nsr_id, nslcmop_id):
2327 """
2328
2329 :param nsr_id: ns instance to deploy
2330 :param nslcmop_id: operation to run
2331 :return:
2332 """
2333
2334 # Try to lock HA task here
2335 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2336 if not task_is_locked_by_me:
2337 self.logger.debug(
2338 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2339 )
2340 return
2341
2342 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2343 self.logger.debug(logging_text + "Enter")
2344
2345 # get all needed from database
2346
2347 # database nsrs record
2348 db_nsr = None
2349
2350 # database nslcmops record
2351 db_nslcmop = None
2352
2353 # update operation on nsrs
2354 db_nsr_update = {}
2355 # update operation on nslcmops
2356 db_nslcmop_update = {}
2357
2358 nslcmop_operation_state = None
2359 db_vnfrs = {} # vnf's info indexed by member-index
2360 # n2vc_info = {}
2361 tasks_dict_info = {} # from task to info text
2362 exc = None
2363 error_list = []
2364 stage = [
2365 "Stage 1/5: preparation of the environment.",
2366 "Waiting for previous operations to terminate.",
2367 "",
2368 ]
2369 # ^ stage, step, VIM progress
2370 try:
2371 # wait for any previous tasks in process
2372 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2373
2374 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2375 stage[1] = "Reading from database."
2376 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2377 db_nsr_update["detailed-status"] = "creating"
2378 db_nsr_update["operational-status"] = "init"
2379 self._write_ns_status(
2380 nsr_id=nsr_id,
2381 ns_state="BUILDING",
2382 current_operation="INSTANTIATING",
2383 current_operation_id=nslcmop_id,
2384 other_update=db_nsr_update,
2385 )
2386 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2387
2388 # read from db: operation
2389 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2390 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2391 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2392 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2393 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2394 )
2395 ns_params = db_nslcmop.get("operationParams")
2396 if ns_params and ns_params.get("timeout_ns_deploy"):
2397 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2398 else:
2399 timeout_ns_deploy = self.timeout.get(
2400 "ns_deploy", self.timeout_ns_deploy
2401 )
2402
2403 # read from db: ns
2404 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2405 self.logger.debug(logging_text + stage[1])
2406 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2407 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2408 self.logger.debug(logging_text + stage[1])
2409 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2410 self.fs.sync(db_nsr["nsd-id"])
2411 db_nsr["nsd"] = nsd
2412 # nsr_name = db_nsr["name"] # TODO short-name??
2413
2414 # read from db: vnf's of this ns
2415 stage[1] = "Getting vnfrs from db."
2416 self.logger.debug(logging_text + stage[1])
2417 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2418
2419 # read from db: vnfd's for every vnf
2420 db_vnfds = [] # every vnfd data
2421
2422 # for each vnf in ns, read vnfd
2423 for vnfr in db_vnfrs_list:
2424 if vnfr.get("kdur"):
2425 kdur_list = []
2426 for kdur in vnfr["kdur"]:
2427 if kdur.get("additionalParams"):
2428 kdur["additionalParams"] = json.loads(
2429 kdur["additionalParams"]
2430 )
2431 kdur_list.append(kdur)
2432 vnfr["kdur"] = kdur_list
2433
2434 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2435 vnfd_id = vnfr["vnfd-id"]
2436 vnfd_ref = vnfr["vnfd-ref"]
2437 self.fs.sync(vnfd_id)
2438
2439 # if we haven't this vnfd, read it from db
2440 if vnfd_id not in db_vnfds:
2441 # read from db
2442 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2443 vnfd_id, vnfd_ref
2444 )
2445 self.logger.debug(logging_text + stage[1])
2446 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2447
2448 # store vnfd
2449 db_vnfds.append(vnfd)
2450
2451 # Get or generates the _admin.deployed.VCA list
2452 vca_deployed_list = None
2453 if db_nsr["_admin"].get("deployed"):
2454 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2455 if vca_deployed_list is None:
2456 vca_deployed_list = []
2457 configuration_status_list = []
2458 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2459 db_nsr_update["configurationStatus"] = configuration_status_list
2460 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2461 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2462 elif isinstance(vca_deployed_list, dict):
2463 # maintain backward compatibility. Change a dict to list at database
2464 vca_deployed_list = list(vca_deployed_list.values())
2465 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2466 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2467
2468 if not isinstance(
2469 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2470 ):
2471 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2472 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2473
2474 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2475 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2476 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2477 self.db.set_list(
2478 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2479 )
2480
2481 # n2vc_redesign STEP 2 Deploy Network Scenario
2482 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2483 self._write_op_status(op_id=nslcmop_id, stage=stage)
2484
2485 stage[1] = "Deploying KDUs."
2486 # self.logger.debug(logging_text + "Before deploy_kdus")
2487 # Call to deploy_kdus in case exists the "vdu:kdu" param
2488 await self.deploy_kdus(
2489 logging_text=logging_text,
2490 nsr_id=nsr_id,
2491 nslcmop_id=nslcmop_id,
2492 db_vnfrs=db_vnfrs,
2493 db_vnfds=db_vnfds,
2494 task_instantiation_info=tasks_dict_info,
2495 )
2496
2497 stage[1] = "Getting VCA public key."
2498 # n2vc_redesign STEP 1 Get VCA public ssh-key
2499 # feature 1429. Add n2vc public key to needed VMs
2500 n2vc_key = self.n2vc.get_public_key()
2501 n2vc_key_list = [n2vc_key]
2502 if self.vca_config.get("public_key"):
2503 n2vc_key_list.append(self.vca_config["public_key"])
2504
2505 stage[1] = "Deploying NS at VIM."
2506 task_ro = asyncio.ensure_future(
2507 self.instantiate_RO(
2508 logging_text=logging_text,
2509 nsr_id=nsr_id,
2510 nsd=nsd,
2511 db_nsr=db_nsr,
2512 db_nslcmop=db_nslcmop,
2513 db_vnfrs=db_vnfrs,
2514 db_vnfds=db_vnfds,
2515 n2vc_key_list=n2vc_key_list,
2516 stage=stage,
2517 )
2518 )
2519 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2520 tasks_dict_info[task_ro] = "Deploying at VIM"
2521
2522 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2523 stage[1] = "Deploying Execution Environments."
2524 self.logger.debug(logging_text + stage[1])
2525
2526 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2527 for vnf_profile in get_vnf_profiles(nsd):
2528 vnfd_id = vnf_profile["vnfd-id"]
2529 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2530 member_vnf_index = str(vnf_profile["id"])
2531 db_vnfr = db_vnfrs[member_vnf_index]
2532 base_folder = vnfd["_admin"]["storage"]
2533 vdu_id = None
2534 vdu_index = 0
2535 vdu_name = None
2536 kdu_name = None
2537
2538 # Get additional parameters
2539 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2540 if db_vnfr.get("additionalParamsForVnf"):
2541 deploy_params.update(
2542 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2543 )
2544
2545 descriptor_config = get_configuration(vnfd, vnfd["id"])
2546 if descriptor_config:
2547 self._deploy_n2vc(
2548 logging_text=logging_text
2549 + "member_vnf_index={} ".format(member_vnf_index),
2550 db_nsr=db_nsr,
2551 db_vnfr=db_vnfr,
2552 nslcmop_id=nslcmop_id,
2553 nsr_id=nsr_id,
2554 nsi_id=nsi_id,
2555 vnfd_id=vnfd_id,
2556 vdu_id=vdu_id,
2557 kdu_name=kdu_name,
2558 member_vnf_index=member_vnf_index,
2559 vdu_index=vdu_index,
2560 vdu_name=vdu_name,
2561 deploy_params=deploy_params,
2562 descriptor_config=descriptor_config,
2563 base_folder=base_folder,
2564 task_instantiation_info=tasks_dict_info,
2565 stage=stage,
2566 )
2567
2568 # Deploy charms for each VDU that supports one.
2569 for vdud in get_vdu_list(vnfd):
2570 vdu_id = vdud["id"]
2571 descriptor_config = get_configuration(vnfd, vdu_id)
2572 vdur = find_in_list(
2573 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2574 )
2575
2576 if vdur.get("additionalParams"):
2577 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2578 else:
2579 deploy_params_vdu = deploy_params
2580 deploy_params_vdu["OSM"] = get_osm_params(
2581 db_vnfr, vdu_id, vdu_count_index=0
2582 )
2583 vdud_count = get_number_of_instances(vnfd, vdu_id)
2584
2585 self.logger.debug("VDUD > {}".format(vdud))
2586 self.logger.debug(
2587 "Descriptor config > {}".format(descriptor_config)
2588 )
2589 if descriptor_config:
2590 vdu_name = None
2591 kdu_name = None
2592 for vdu_index in range(vdud_count):
2593 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2594 self._deploy_n2vc(
2595 logging_text=logging_text
2596 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2597 member_vnf_index, vdu_id, vdu_index
2598 ),
2599 db_nsr=db_nsr,
2600 db_vnfr=db_vnfr,
2601 nslcmop_id=nslcmop_id,
2602 nsr_id=nsr_id,
2603 nsi_id=nsi_id,
2604 vnfd_id=vnfd_id,
2605 vdu_id=vdu_id,
2606 kdu_name=kdu_name,
2607 member_vnf_index=member_vnf_index,
2608 vdu_index=vdu_index,
2609 vdu_name=vdu_name,
2610 deploy_params=deploy_params_vdu,
2611 descriptor_config=descriptor_config,
2612 base_folder=base_folder,
2613 task_instantiation_info=tasks_dict_info,
2614 stage=stage,
2615 )
2616 for kdud in get_kdu_list(vnfd):
2617 kdu_name = kdud["name"]
2618 descriptor_config = get_configuration(vnfd, kdu_name)
2619 if descriptor_config:
2620 vdu_id = None
2621 vdu_index = 0
2622 vdu_name = None
2623 kdur = next(
2624 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2625 )
2626 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2627 if kdur.get("additionalParams"):
2628 deploy_params_kdu.update(
2629 parse_yaml_strings(kdur["additionalParams"].copy())
2630 )
2631
2632 self._deploy_n2vc(
2633 logging_text=logging_text,
2634 db_nsr=db_nsr,
2635 db_vnfr=db_vnfr,
2636 nslcmop_id=nslcmop_id,
2637 nsr_id=nsr_id,
2638 nsi_id=nsi_id,
2639 vnfd_id=vnfd_id,
2640 vdu_id=vdu_id,
2641 kdu_name=kdu_name,
2642 member_vnf_index=member_vnf_index,
2643 vdu_index=vdu_index,
2644 vdu_name=vdu_name,
2645 deploy_params=deploy_params_kdu,
2646 descriptor_config=descriptor_config,
2647 base_folder=base_folder,
2648 task_instantiation_info=tasks_dict_info,
2649 stage=stage,
2650 )
2651
2652 # Check if this NS has a charm configuration
2653 descriptor_config = nsd.get("ns-configuration")
2654 if descriptor_config and descriptor_config.get("juju"):
2655 vnfd_id = None
2656 db_vnfr = None
2657 member_vnf_index = None
2658 vdu_id = None
2659 kdu_name = None
2660 vdu_index = 0
2661 vdu_name = None
2662
2663 # Get additional parameters
2664 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2665 if db_nsr.get("additionalParamsForNs"):
2666 deploy_params.update(
2667 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2668 )
2669 base_folder = nsd["_admin"]["storage"]
2670 self._deploy_n2vc(
2671 logging_text=logging_text,
2672 db_nsr=db_nsr,
2673 db_vnfr=db_vnfr,
2674 nslcmop_id=nslcmop_id,
2675 nsr_id=nsr_id,
2676 nsi_id=nsi_id,
2677 vnfd_id=vnfd_id,
2678 vdu_id=vdu_id,
2679 kdu_name=kdu_name,
2680 member_vnf_index=member_vnf_index,
2681 vdu_index=vdu_index,
2682 vdu_name=vdu_name,
2683 deploy_params=deploy_params,
2684 descriptor_config=descriptor_config,
2685 base_folder=base_folder,
2686 task_instantiation_info=tasks_dict_info,
2687 stage=stage,
2688 )
2689
2690 # rest of staff will be done at finally
2691
2692 except (
2693 ROclient.ROClientException,
2694 DbException,
2695 LcmException,
2696 N2VCException,
2697 ) as e:
2698 self.logger.error(
2699 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2700 )
2701 exc = e
2702 except asyncio.CancelledError:
2703 self.logger.error(
2704 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2705 )
2706 exc = "Operation was cancelled"
2707 except Exception as e:
2708 exc = traceback.format_exc()
2709 self.logger.critical(
2710 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2711 exc_info=True,
2712 )
2713 finally:
2714 if exc:
2715 error_list.append(str(exc))
2716 try:
2717 # wait for pending tasks
2718 if tasks_dict_info:
2719 stage[1] = "Waiting for instantiate pending tasks."
2720 self.logger.debug(logging_text + stage[1])
2721 error_list += await self._wait_for_tasks(
2722 logging_text,
2723 tasks_dict_info,
2724 timeout_ns_deploy,
2725 stage,
2726 nslcmop_id,
2727 nsr_id=nsr_id,
2728 )
2729 stage[1] = stage[2] = ""
2730 except asyncio.CancelledError:
2731 error_list.append("Cancelled")
2732 # TODO cancel all tasks
2733 except Exception as exc:
2734 error_list.append(str(exc))
2735
2736 # update operation-status
2737 db_nsr_update["operational-status"] = "running"
2738 # let's begin with VCA 'configured' status (later we can change it)
2739 db_nsr_update["config-status"] = "configured"
2740 for task, task_name in tasks_dict_info.items():
2741 if not task.done() or task.cancelled() or task.exception():
2742 if task_name.startswith(self.task_name_deploy_vca):
2743 # A N2VC task is pending
2744 db_nsr_update["config-status"] = "failed"
2745 else:
2746 # RO or KDU task is pending
2747 db_nsr_update["operational-status"] = "failed"
2748
2749 # update status at database
2750 if error_list:
2751 error_detail = ". ".join(error_list)
2752 self.logger.error(logging_text + error_detail)
2753 error_description_nslcmop = "{} Detail: {}".format(
2754 stage[0], error_detail
2755 )
2756 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2757 nslcmop_id, stage[0]
2758 )
2759
2760 db_nsr_update["detailed-status"] = (
2761 error_description_nsr + " Detail: " + error_detail
2762 )
2763 db_nslcmop_update["detailed-status"] = error_detail
2764 nslcmop_operation_state = "FAILED"
2765 ns_state = "BROKEN"
2766 else:
2767 error_detail = None
2768 error_description_nsr = error_description_nslcmop = None
2769 ns_state = "READY"
2770 db_nsr_update["detailed-status"] = "Done"
2771 db_nslcmop_update["detailed-status"] = "Done"
2772 nslcmop_operation_state = "COMPLETED"
2773
2774 if db_nsr:
2775 self._write_ns_status(
2776 nsr_id=nsr_id,
2777 ns_state=ns_state,
2778 current_operation="IDLE",
2779 current_operation_id=None,
2780 error_description=error_description_nsr,
2781 error_detail=error_detail,
2782 other_update=db_nsr_update,
2783 )
2784 self._write_op_status(
2785 op_id=nslcmop_id,
2786 stage="",
2787 error_message=error_description_nslcmop,
2788 operation_state=nslcmop_operation_state,
2789 other_update=db_nslcmop_update,
2790 )
2791
2792 if nslcmop_operation_state:
2793 try:
2794 await self.msg.aiowrite(
2795 "ns",
2796 "instantiated",
2797 {
2798 "nsr_id": nsr_id,
2799 "nslcmop_id": nslcmop_id,
2800 "operationState": nslcmop_operation_state,
2801 },
2802 loop=self.loop,
2803 )
2804 except Exception as e:
2805 self.logger.error(
2806 logging_text + "kafka_write notification Exception {}".format(e)
2807 )
2808
2809 self.logger.debug(logging_text + "Exit")
2810 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2811
2812 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2813 if vnfd_id not in cached_vnfds:
2814 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2815 return cached_vnfds[vnfd_id]
2816
2817 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2818 if vnf_profile_id not in cached_vnfrs:
2819 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2820 "vnfrs",
2821 {
2822 "member-vnf-index-ref": vnf_profile_id,
2823 "nsr-id-ref": nsr_id,
2824 },
2825 )
2826 return cached_vnfrs[vnf_profile_id]
2827
2828 def _is_deployed_vca_in_relation(
2829 self, vca: DeployedVCA, relation: Relation
2830 ) -> bool:
2831 found = False
2832 for endpoint in (relation.provider, relation.requirer):
2833 if endpoint["kdu-resource-profile-id"]:
2834 continue
2835 found = (
2836 vca.vnf_profile_id == endpoint.vnf_profile_id
2837 and vca.vdu_profile_id == endpoint.vdu_profile_id
2838 and vca.execution_environment_ref == endpoint.execution_environment_ref
2839 )
2840 if found:
2841 break
2842 return found
2843
2844 def _update_ee_relation_data_with_implicit_data(
2845 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2846 ):
2847 ee_relation_data = safe_get_ee_relation(
2848 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2849 )
2850 ee_relation_level = EELevel.get_level(ee_relation_data)
2851 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2852 "execution-environment-ref"
2853 ]:
2854 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2855 vnfd_id = vnf_profile["vnfd-id"]
2856 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2857 entity_id = (
2858 vnfd_id
2859 if ee_relation_level == EELevel.VNF
2860 else ee_relation_data["vdu-profile-id"]
2861 )
2862 ee = get_juju_ee_ref(db_vnfd, entity_id)
2863 if not ee:
2864 raise Exception(
2865 f"not execution environments found for ee_relation {ee_relation_data}"
2866 )
2867 ee_relation_data["execution-environment-ref"] = ee["id"]
2868 return ee_relation_data
2869
2870 def _get_ns_relations(
2871 self,
2872 nsr_id: str,
2873 nsd: Dict[str, Any],
2874 vca: DeployedVCA,
2875 cached_vnfds: Dict[str, Any],
2876 ) -> List[Relation]:
2877 relations = []
2878 db_ns_relations = get_ns_configuration_relation_list(nsd)
2879 for r in db_ns_relations:
2880 provider_dict = None
2881 requirer_dict = None
2882 if all(key in r for key in ("provider", "requirer")):
2883 provider_dict = r["provider"]
2884 requirer_dict = r["requirer"]
2885 elif "entities" in r:
2886 provider_id = r["entities"][0]["id"]
2887 provider_dict = {
2888 "nsr-id": nsr_id,
2889 "endpoint": r["entities"][0]["endpoint"],
2890 }
2891 if provider_id != nsd["id"]:
2892 provider_dict["vnf-profile-id"] = provider_id
2893 requirer_id = r["entities"][1]["id"]
2894 requirer_dict = {
2895 "nsr-id": nsr_id,
2896 "endpoint": r["entities"][1]["endpoint"],
2897 }
2898 if requirer_id != nsd["id"]:
2899 requirer_dict["vnf-profile-id"] = requirer_id
2900 else:
2901 raise Exception(
2902 "provider/requirer or entities must be included in the relation."
2903 )
2904 relation_provider = self._update_ee_relation_data_with_implicit_data(
2905 nsr_id, nsd, provider_dict, cached_vnfds
2906 )
2907 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2908 nsr_id, nsd, requirer_dict, cached_vnfds
2909 )
2910 provider = EERelation(relation_provider)
2911 requirer = EERelation(relation_requirer)
2912 relation = Relation(r["name"], provider, requirer)
2913 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2914 if vca_in_relation:
2915 relations.append(relation)
2916 return relations
2917
2918 def _get_vnf_relations(
2919 self,
2920 nsr_id: str,
2921 nsd: Dict[str, Any],
2922 vca: DeployedVCA,
2923 cached_vnfds: Dict[str, Any],
2924 ) -> List[Relation]:
2925 relations = []
2926 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2927 vnf_profile_id = vnf_profile["id"]
2928 vnfd_id = vnf_profile["vnfd-id"]
2929 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2930 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2931 for r in db_vnf_relations:
2932 provider_dict = None
2933 requirer_dict = None
2934 if all(key in r for key in ("provider", "requirer")):
2935 provider_dict = r["provider"]
2936 requirer_dict = r["requirer"]
2937 elif "entities" in r:
2938 provider_id = r["entities"][0]["id"]
2939 provider_dict = {
2940 "nsr-id": nsr_id,
2941 "vnf-profile-id": vnf_profile_id,
2942 "endpoint": r["entities"][0]["endpoint"],
2943 }
2944 if provider_id != vnfd_id:
2945 provider_dict["vdu-profile-id"] = provider_id
2946 requirer_id = r["entities"][1]["id"]
2947 requirer_dict = {
2948 "nsr-id": nsr_id,
2949 "vnf-profile-id": vnf_profile_id,
2950 "endpoint": r["entities"][1]["endpoint"],
2951 }
2952 if requirer_id != vnfd_id:
2953 requirer_dict["vdu-profile-id"] = requirer_id
2954 else:
2955 raise Exception(
2956 "provider/requirer or entities must be included in the relation."
2957 )
2958 relation_provider = self._update_ee_relation_data_with_implicit_data(
2959 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2960 )
2961 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2962 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2963 )
2964 provider = EERelation(relation_provider)
2965 requirer = EERelation(relation_requirer)
2966 relation = Relation(r["name"], provider, requirer)
2967 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2968 if vca_in_relation:
2969 relations.append(relation)
2970 return relations
2971
2972 def _get_kdu_resource_data(
2973 self,
2974 ee_relation: EERelation,
2975 db_nsr: Dict[str, Any],
2976 cached_vnfds: Dict[str, Any],
2977 ) -> DeployedK8sResource:
2978 nsd = get_nsd(db_nsr)
2979 vnf_profiles = get_vnf_profiles(nsd)
2980 vnfd_id = find_in_list(
2981 vnf_profiles,
2982 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
2983 )["vnfd-id"]
2984 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2985 kdu_resource_profile = get_kdu_resource_profile(
2986 db_vnfd, ee_relation.kdu_resource_profile_id
2987 )
2988 kdu_name = kdu_resource_profile["kdu-name"]
2989 deployed_kdu, _ = get_deployed_kdu(
2990 db_nsr.get("_admin", ()).get("deployed", ()),
2991 kdu_name,
2992 ee_relation.vnf_profile_id,
2993 )
2994 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
2995 return deployed_kdu
2996
2997 def _get_deployed_component(
2998 self,
2999 ee_relation: EERelation,
3000 db_nsr: Dict[str, Any],
3001 cached_vnfds: Dict[str, Any],
3002 ) -> DeployedComponent:
3003 nsr_id = db_nsr["_id"]
3004 deployed_component = None
3005 ee_level = EELevel.get_level(ee_relation)
3006 if ee_level == EELevel.NS:
3007 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3008 if vca:
3009 deployed_component = DeployedVCA(nsr_id, vca)
3010 elif ee_level == EELevel.VNF:
3011 vca = get_deployed_vca(
3012 db_nsr,
3013 {
3014 "vdu_id": None,
3015 "member-vnf-index": ee_relation.vnf_profile_id,
3016 "ee_descriptor_id": ee_relation.execution_environment_ref,
3017 },
3018 )
3019 if vca:
3020 deployed_component = DeployedVCA(nsr_id, vca)
3021 elif ee_level == EELevel.VDU:
3022 vca = get_deployed_vca(
3023 db_nsr,
3024 {
3025 "vdu_id": ee_relation.vdu_profile_id,
3026 "member-vnf-index": ee_relation.vnf_profile_id,
3027 "ee_descriptor_id": ee_relation.execution_environment_ref,
3028 },
3029 )
3030 if vca:
3031 deployed_component = DeployedVCA(nsr_id, vca)
3032 elif ee_level == EELevel.KDU:
3033 kdu_resource_data = self._get_kdu_resource_data(
3034 ee_relation, db_nsr, cached_vnfds
3035 )
3036 if kdu_resource_data:
3037 deployed_component = DeployedK8sResource(kdu_resource_data)
3038 return deployed_component
3039
3040 async def _add_relation(
3041 self,
3042 relation: Relation,
3043 vca_type: str,
3044 db_nsr: Dict[str, Any],
3045 cached_vnfds: Dict[str, Any],
3046 cached_vnfrs: Dict[str, Any],
3047 ) -> bool:
3048 deployed_provider = self._get_deployed_component(
3049 relation.provider, db_nsr, cached_vnfds
3050 )
3051 deployed_requirer = self._get_deployed_component(
3052 relation.requirer, db_nsr, cached_vnfds
3053 )
3054 if (
3055 deployed_provider
3056 and deployed_requirer
3057 and deployed_provider.config_sw_installed
3058 and deployed_requirer.config_sw_installed
3059 ):
3060 provider_db_vnfr = (
3061 self._get_vnfr(
3062 relation.provider.nsr_id,
3063 relation.provider.vnf_profile_id,
3064 cached_vnfrs,
3065 )
3066 if relation.provider.vnf_profile_id
3067 else None
3068 )
3069 requirer_db_vnfr = (
3070 self._get_vnfr(
3071 relation.requirer.nsr_id,
3072 relation.requirer.vnf_profile_id,
3073 cached_vnfrs,
3074 )
3075 if relation.requirer.vnf_profile_id
3076 else None
3077 )
3078 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3079 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3080 provider_relation_endpoint = RelationEndpoint(
3081 deployed_provider.ee_id,
3082 provider_vca_id,
3083 relation.provider.endpoint,
3084 )
3085 requirer_relation_endpoint = RelationEndpoint(
3086 deployed_requirer.ee_id,
3087 requirer_vca_id,
3088 relation.requirer.endpoint,
3089 )
3090 await self.vca_map[vca_type].add_relation(
3091 provider=provider_relation_endpoint,
3092 requirer=requirer_relation_endpoint,
3093 )
3094 # remove entry from relations list
3095 return True
3096 return False
3097
3098 async def _add_vca_relations(
3099 self,
3100 logging_text,
3101 nsr_id,
3102 vca_type: str,
3103 vca_index: int,
3104 timeout: int = 3600,
3105 ) -> bool:
3106
3107 # steps:
3108 # 1. find all relations for this VCA
3109 # 2. wait for other peers related
3110 # 3. add relations
3111
3112 try:
3113 # STEP 1: find all relations for this VCA
3114
3115 # read nsr record
3116 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3117 nsd = get_nsd(db_nsr)
3118
3119 # this VCA data
3120 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3121 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3122
3123 cached_vnfds = {}
3124 cached_vnfrs = {}
3125 relations = []
3126 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3127 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3128
3129 # if no relations, terminate
3130 if not relations:
3131 self.logger.debug(logging_text + " No relations")
3132 return True
3133
3134 self.logger.debug(logging_text + " adding relations {}".format(relations))
3135
3136 # add all relations
3137 start = time()
3138 while True:
3139 # check timeout
3140 now = time()
3141 if now - start >= timeout:
3142 self.logger.error(logging_text + " : timeout adding relations")
3143 return False
3144
3145 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3146 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3147
3148 # for each relation, find the VCA's related
3149 for relation in relations.copy():
3150 added = await self._add_relation(
3151 relation,
3152 vca_type,
3153 db_nsr,
3154 cached_vnfds,
3155 cached_vnfrs,
3156 )
3157 if added:
3158 relations.remove(relation)
3159
3160 if not relations:
3161 self.logger.debug("Relations added")
3162 break
3163 await asyncio.sleep(5.0)
3164
3165 return True
3166
3167 except Exception as e:
3168 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3169 return False
3170
3171 async def _install_kdu(
3172 self,
3173 nsr_id: str,
3174 nsr_db_path: str,
3175 vnfr_data: dict,
3176 kdu_index: int,
3177 kdud: dict,
3178 vnfd: dict,
3179 k8s_instance_info: dict,
3180 k8params: dict = None,
3181 timeout: int = 600,
3182 vca_id: str = None,
3183 ):
3184
3185 try:
3186 k8sclustertype = k8s_instance_info["k8scluster-type"]
3187 # Instantiate kdu
3188 db_dict_install = {
3189 "collection": "nsrs",
3190 "filter": {"_id": nsr_id},
3191 "path": nsr_db_path,
3192 }
3193
3194 if k8s_instance_info.get("kdu-deployment-name"):
3195 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3196 else:
3197 kdu_instance = self.k8scluster_map[
3198 k8sclustertype
3199 ].generate_kdu_instance_name(
3200 db_dict=db_dict_install,
3201 kdu_model=k8s_instance_info["kdu-model"],
3202 kdu_name=k8s_instance_info["kdu-name"],
3203 )
3204
3205 # Update the nsrs table with the kdu-instance value
3206 self.update_db_2(
3207 item="nsrs",
3208 _id=nsr_id,
3209 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3210 )
3211
3212 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3213 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3214 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3215 # namespace, this first verification could be removed, and the next step would be done for any kind
3216 # of KNF.
3217 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3218 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3219 if k8sclustertype in ("juju", "juju-bundle"):
3220 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3221 # that the user passed a namespace which he wants its KDU to be deployed in)
3222 if (
3223 self.db.count(
3224 table="nsrs",
3225 q_filter={
3226 "_id": nsr_id,
3227 "_admin.projects_write": k8s_instance_info["namespace"],
3228 "_admin.projects_read": k8s_instance_info["namespace"],
3229 },
3230 )
3231 > 0
3232 ):
3233 self.logger.debug(
3234 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3235 )
3236 self.update_db_2(
3237 item="nsrs",
3238 _id=nsr_id,
3239 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3240 )
3241 k8s_instance_info["namespace"] = kdu_instance
3242
3243 await self.k8scluster_map[k8sclustertype].install(
3244 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3245 kdu_model=k8s_instance_info["kdu-model"],
3246 atomic=True,
3247 params=k8params,
3248 db_dict=db_dict_install,
3249 timeout=timeout,
3250 kdu_name=k8s_instance_info["kdu-name"],
3251 namespace=k8s_instance_info["namespace"],
3252 kdu_instance=kdu_instance,
3253 vca_id=vca_id,
3254 )
3255
3256 # Obtain services to obtain management service ip
3257 services = await self.k8scluster_map[k8sclustertype].get_services(
3258 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3259 kdu_instance=kdu_instance,
3260 namespace=k8s_instance_info["namespace"],
3261 )
3262
3263 # Obtain management service info (if exists)
3264 vnfr_update_dict = {}
3265 kdu_config = get_configuration(vnfd, kdud["name"])
3266 if kdu_config:
3267 target_ee_list = kdu_config.get("execution-environment-list", [])
3268 else:
3269 target_ee_list = []
3270
3271 if services:
3272 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3273 mgmt_services = [
3274 service
3275 for service in kdud.get("service", [])
3276 if service.get("mgmt-service")
3277 ]
3278 for mgmt_service in mgmt_services:
3279 for service in services:
3280 if service["name"].startswith(mgmt_service["name"]):
3281 # Mgmt service found, Obtain service ip
3282 ip = service.get("external_ip", service.get("cluster_ip"))
3283 if isinstance(ip, list) and len(ip) == 1:
3284 ip = ip[0]
3285
3286 vnfr_update_dict[
3287 "kdur.{}.ip-address".format(kdu_index)
3288 ] = ip
3289
3290 # Check if must update also mgmt ip at the vnf
3291 service_external_cp = mgmt_service.get(
3292 "external-connection-point-ref"
3293 )
3294 if service_external_cp:
3295 if (
3296 deep_get(vnfd, ("mgmt-interface", "cp"))
3297 == service_external_cp
3298 ):
3299 vnfr_update_dict["ip-address"] = ip
3300
3301 if find_in_list(
3302 target_ee_list,
3303 lambda ee: ee.get(
3304 "external-connection-point-ref", ""
3305 )
3306 == service_external_cp,
3307 ):
3308 vnfr_update_dict[
3309 "kdur.{}.ip-address".format(kdu_index)
3310 ] = ip
3311 break
3312 else:
3313 self.logger.warn(
3314 "Mgmt service name: {} not found".format(
3315 mgmt_service["name"]
3316 )
3317 )
3318
3319 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3320 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3321
3322 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3323 if (
3324 kdu_config
3325 and kdu_config.get("initial-config-primitive")
3326 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3327 ):
3328 initial_config_primitive_list = kdu_config.get(
3329 "initial-config-primitive"
3330 )
3331 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3332
3333 for initial_config_primitive in initial_config_primitive_list:
3334 primitive_params_ = self._map_primitive_params(
3335 initial_config_primitive, {}, {}
3336 )
3337
3338 await asyncio.wait_for(
3339 self.k8scluster_map[k8sclustertype].exec_primitive(
3340 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3341 kdu_instance=kdu_instance,
3342 primitive_name=initial_config_primitive["name"],
3343 params=primitive_params_,
3344 db_dict=db_dict_install,
3345 vca_id=vca_id,
3346 ),
3347 timeout=timeout,
3348 )
3349
3350 except Exception as e:
3351 # Prepare update db with error and raise exception
3352 try:
3353 self.update_db_2(
3354 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3355 )
3356 self.update_db_2(
3357 "vnfrs",
3358 vnfr_data.get("_id"),
3359 {"kdur.{}.status".format(kdu_index): "ERROR"},
3360 )
3361 except Exception:
3362 # ignore to keep original exception
3363 pass
3364 # reraise original error
3365 raise
3366
3367 return kdu_instance
3368
3369 async def deploy_kdus(
3370 self,
3371 logging_text,
3372 nsr_id,
3373 nslcmop_id,
3374 db_vnfrs,
3375 db_vnfds,
3376 task_instantiation_info,
3377 ):
3378 # Launch kdus if present in the descriptor
3379
3380 k8scluster_id_2_uuic = {
3381 "helm-chart-v3": {},
3382 "helm-chart": {},
3383 "juju-bundle": {},
3384 }
3385
3386 async def _get_cluster_id(cluster_id, cluster_type):
3387 nonlocal k8scluster_id_2_uuic
3388 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3389 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3390
3391 # check if K8scluster is creating and wait look if previous tasks in process
3392 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3393 "k8scluster", cluster_id
3394 )
3395 if task_dependency:
3396 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3397 task_name, cluster_id
3398 )
3399 self.logger.debug(logging_text + text)
3400 await asyncio.wait(task_dependency, timeout=3600)
3401
3402 db_k8scluster = self.db.get_one(
3403 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3404 )
3405 if not db_k8scluster:
3406 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3407
3408 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3409 if not k8s_id:
3410 if cluster_type == "helm-chart-v3":
3411 try:
3412 # backward compatibility for existing clusters that have not been initialized for helm v3
3413 k8s_credentials = yaml.safe_dump(
3414 db_k8scluster.get("credentials")
3415 )
3416 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3417 k8s_credentials, reuse_cluster_uuid=cluster_id
3418 )
3419 db_k8scluster_update = {}
3420 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3421 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3422 db_k8scluster_update[
3423 "_admin.helm-chart-v3.created"
3424 ] = uninstall_sw
3425 db_k8scluster_update[
3426 "_admin.helm-chart-v3.operationalState"
3427 ] = "ENABLED"
3428 self.update_db_2(
3429 "k8sclusters", cluster_id, db_k8scluster_update
3430 )
3431 except Exception as e:
3432 self.logger.error(
3433 logging_text
3434 + "error initializing helm-v3 cluster: {}".format(str(e))
3435 )
3436 raise LcmException(
3437 "K8s cluster '{}' has not been initialized for '{}'".format(
3438 cluster_id, cluster_type
3439 )
3440 )
3441 else:
3442 raise LcmException(
3443 "K8s cluster '{}' has not been initialized for '{}'".format(
3444 cluster_id, cluster_type
3445 )
3446 )
3447 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3448 return k8s_id
3449
3450 logging_text += "Deploy kdus: "
3451 step = ""
3452 try:
3453 db_nsr_update = {"_admin.deployed.K8s": []}
3454 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3455
3456 index = 0
3457 updated_cluster_list = []
3458 updated_v3_cluster_list = []
3459
3460 for vnfr_data in db_vnfrs.values():
3461 vca_id = self.get_vca_id(vnfr_data, {})
3462 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3463 # Step 0: Prepare and set parameters
3464 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3465 vnfd_id = vnfr_data.get("vnfd-id")
3466 vnfd_with_id = find_in_list(
3467 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3468 )
3469 kdud = next(
3470 kdud
3471 for kdud in vnfd_with_id["kdu"]
3472 if kdud["name"] == kdur["kdu-name"]
3473 )
3474 namespace = kdur.get("k8s-namespace")
3475 kdu_deployment_name = kdur.get("kdu-deployment-name")
3476 if kdur.get("helm-chart"):
3477 kdumodel = kdur["helm-chart"]
3478 # Default version: helm3, if helm-version is v2 assign v2
3479 k8sclustertype = "helm-chart-v3"
3480 self.logger.debug("kdur: {}".format(kdur))
3481 if (
3482 kdur.get("helm-version")
3483 and kdur.get("helm-version") == "v2"
3484 ):
3485 k8sclustertype = "helm-chart"
3486 elif kdur.get("juju-bundle"):
3487 kdumodel = kdur["juju-bundle"]
3488 k8sclustertype = "juju-bundle"
3489 else:
3490 raise LcmException(
3491 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3492 "juju-bundle. Maybe an old NBI version is running".format(
3493 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3494 )
3495 )
3496 # check if kdumodel is a file and exists
3497 try:
3498 vnfd_with_id = find_in_list(
3499 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3500 )
3501 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3502 if storage: # may be not present if vnfd has not artifacts
3503 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3504 if storage["pkg-dir"]:
3505 filename = "{}/{}/{}s/{}".format(
3506 storage["folder"],
3507 storage["pkg-dir"],
3508 k8sclustertype,
3509 kdumodel,
3510 )
3511 else:
3512 filename = "{}/Scripts/{}s/{}".format(
3513 storage["folder"],
3514 k8sclustertype,
3515 kdumodel,
3516 )
3517 if self.fs.file_exists(
3518 filename, mode="file"
3519 ) or self.fs.file_exists(filename, mode="dir"):
3520 kdumodel = self.fs.path + filename
3521 except (asyncio.TimeoutError, asyncio.CancelledError):
3522 raise
3523 except Exception: # it is not a file
3524 pass
3525
3526 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3527 step = "Synchronize repos for k8s cluster '{}'".format(
3528 k8s_cluster_id
3529 )
3530 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3531
3532 # Synchronize repos
3533 if (
3534 k8sclustertype == "helm-chart"
3535 and cluster_uuid not in updated_cluster_list
3536 ) or (
3537 k8sclustertype == "helm-chart-v3"
3538 and cluster_uuid not in updated_v3_cluster_list
3539 ):
3540 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3541 self.k8scluster_map[k8sclustertype].synchronize_repos(
3542 cluster_uuid=cluster_uuid
3543 )
3544 )
3545 if del_repo_list or added_repo_dict:
3546 if k8sclustertype == "helm-chart":
3547 unset = {
3548 "_admin.helm_charts_added." + item: None
3549 for item in del_repo_list
3550 }
3551 updated = {
3552 "_admin.helm_charts_added." + item: name
3553 for item, name in added_repo_dict.items()
3554 }
3555 updated_cluster_list.append(cluster_uuid)
3556 elif k8sclustertype == "helm-chart-v3":
3557 unset = {
3558 "_admin.helm_charts_v3_added." + item: None
3559 for item in del_repo_list
3560 }
3561 updated = {
3562 "_admin.helm_charts_v3_added." + item: name
3563 for item, name in added_repo_dict.items()
3564 }
3565 updated_v3_cluster_list.append(cluster_uuid)
3566 self.logger.debug(
3567 logging_text + "repos synchronized on k8s cluster "
3568 "'{}' to_delete: {}, to_add: {}".format(
3569 k8s_cluster_id, del_repo_list, added_repo_dict
3570 )
3571 )
3572 self.db.set_one(
3573 "k8sclusters",
3574 {"_id": k8s_cluster_id},
3575 updated,
3576 unset=unset,
3577 )
3578
3579 # Instantiate kdu
3580 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3581 vnfr_data["member-vnf-index-ref"],
3582 kdur["kdu-name"],
3583 k8s_cluster_id,
3584 )
3585 k8s_instance_info = {
3586 "kdu-instance": None,
3587 "k8scluster-uuid": cluster_uuid,
3588 "k8scluster-type": k8sclustertype,
3589 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3590 "kdu-name": kdur["kdu-name"],
3591 "kdu-model": kdumodel,
3592 "namespace": namespace,
3593 "kdu-deployment-name": kdu_deployment_name,
3594 }
3595 db_path = "_admin.deployed.K8s.{}".format(index)
3596 db_nsr_update[db_path] = k8s_instance_info
3597 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3598 vnfd_with_id = find_in_list(
3599 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3600 )
3601 task = asyncio.ensure_future(
3602 self._install_kdu(
3603 nsr_id,
3604 db_path,
3605 vnfr_data,
3606 kdu_index,
3607 kdud,
3608 vnfd_with_id,
3609 k8s_instance_info,
3610 k8params=desc_params,
3611 timeout=1800,
3612 vca_id=vca_id,
3613 )
3614 )
3615 self.lcm_tasks.register(
3616 "ns",
3617 nsr_id,
3618 nslcmop_id,
3619 "instantiate_KDU-{}".format(index),
3620 task,
3621 )
3622 task_instantiation_info[task] = "Deploying KDU {}".format(
3623 kdur["kdu-name"]
3624 )
3625
3626 index += 1
3627
3628 except (LcmException, asyncio.CancelledError):
3629 raise
3630 except Exception as e:
3631 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3632 if isinstance(e, (N2VCException, DbException)):
3633 self.logger.error(logging_text + msg)
3634 else:
3635 self.logger.critical(logging_text + msg, exc_info=True)
3636 raise LcmException(msg)
3637 finally:
3638 if db_nsr_update:
3639 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3640
3641 def _deploy_n2vc(
3642 self,
3643 logging_text,
3644 db_nsr,
3645 db_vnfr,
3646 nslcmop_id,
3647 nsr_id,
3648 nsi_id,
3649 vnfd_id,
3650 vdu_id,
3651 kdu_name,
3652 member_vnf_index,
3653 vdu_index,
3654 vdu_name,
3655 deploy_params,
3656 descriptor_config,
3657 base_folder,
3658 task_instantiation_info,
3659 stage,
3660 ):
3661 # launch instantiate_N2VC in a asyncio task and register task object
3662 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3663 # if not found, create one entry and update database
3664 # fill db_nsr._admin.deployed.VCA.<index>
3665
3666 self.logger.debug(
3667 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3668 )
3669 if "execution-environment-list" in descriptor_config:
3670 ee_list = descriptor_config.get("execution-environment-list", [])
3671 elif "juju" in descriptor_config:
3672 ee_list = [descriptor_config] # ns charms
3673 else: # other types as script are not supported
3674 ee_list = []
3675
3676 for ee_item in ee_list:
3677 self.logger.debug(
3678 logging_text
3679 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3680 ee_item.get("juju"), ee_item.get("helm-chart")
3681 )
3682 )
3683 ee_descriptor_id = ee_item.get("id")
3684 if ee_item.get("juju"):
3685 vca_name = ee_item["juju"].get("charm")
3686 vca_type = (
3687 "lxc_proxy_charm"
3688 if ee_item["juju"].get("charm") is not None
3689 else "native_charm"
3690 )
3691 if ee_item["juju"].get("cloud") == "k8s":
3692 vca_type = "k8s_proxy_charm"
3693 elif ee_item["juju"].get("proxy") is False:
3694 vca_type = "native_charm"
3695 elif ee_item.get("helm-chart"):
3696 vca_name = ee_item["helm-chart"]
3697 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3698 vca_type = "helm"
3699 else:
3700 vca_type = "helm-v3"
3701 else:
3702 self.logger.debug(
3703 logging_text + "skipping non juju neither charm configuration"
3704 )
3705 continue
3706
3707 vca_index = -1
3708 for vca_index, vca_deployed in enumerate(
3709 db_nsr["_admin"]["deployed"]["VCA"]
3710 ):
3711 if not vca_deployed:
3712 continue
3713 if (
3714 vca_deployed.get("member-vnf-index") == member_vnf_index
3715 and vca_deployed.get("vdu_id") == vdu_id
3716 and vca_deployed.get("kdu_name") == kdu_name
3717 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3718 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3719 ):
3720 break
3721 else:
3722 # not found, create one.
3723 target = (
3724 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3725 )
3726 if vdu_id:
3727 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3728 elif kdu_name:
3729 target += "/kdu/{}".format(kdu_name)
3730 vca_deployed = {
3731 "target_element": target,
3732 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3733 "member-vnf-index": member_vnf_index,
3734 "vdu_id": vdu_id,
3735 "kdu_name": kdu_name,
3736 "vdu_count_index": vdu_index,
3737 "operational-status": "init", # TODO revise
3738 "detailed-status": "", # TODO revise
3739 "step": "initial-deploy", # TODO revise
3740 "vnfd_id": vnfd_id,
3741 "vdu_name": vdu_name,
3742 "type": vca_type,
3743 "ee_descriptor_id": ee_descriptor_id,
3744 }
3745 vca_index += 1
3746
3747 # create VCA and configurationStatus in db
3748 db_dict = {
3749 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3750 "configurationStatus.{}".format(vca_index): dict(),
3751 }
3752 self.update_db_2("nsrs", nsr_id, db_dict)
3753
3754 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3755
3756 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3757 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3758 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3759
3760 # Launch task
3761 task_n2vc = asyncio.ensure_future(
3762 self.instantiate_N2VC(
3763 logging_text=logging_text,
3764 vca_index=vca_index,
3765 nsi_id=nsi_id,
3766 db_nsr=db_nsr,
3767 db_vnfr=db_vnfr,
3768 vdu_id=vdu_id,
3769 kdu_name=kdu_name,
3770 vdu_index=vdu_index,
3771 deploy_params=deploy_params,
3772 config_descriptor=descriptor_config,
3773 base_folder=base_folder,
3774 nslcmop_id=nslcmop_id,
3775 stage=stage,
3776 vca_type=vca_type,
3777 vca_name=vca_name,
3778 ee_config_descriptor=ee_item,
3779 )
3780 )
3781 self.lcm_tasks.register(
3782 "ns",
3783 nsr_id,
3784 nslcmop_id,
3785 "instantiate_N2VC-{}".format(vca_index),
3786 task_n2vc,
3787 )
3788 task_instantiation_info[
3789 task_n2vc
3790 ] = self.task_name_deploy_vca + " {}.{}".format(
3791 member_vnf_index or "", vdu_id or ""
3792 )
3793
3794 @staticmethod
3795 def _create_nslcmop(nsr_id, operation, params):
3796 """
3797 Creates a ns-lcm-opp content to be stored at database.
3798 :param nsr_id: internal id of the instance
3799 :param operation: instantiate, terminate, scale, action, ...
3800 :param params: user parameters for the operation
3801 :return: dictionary following SOL005 format
3802 """
3803 # Raise exception if invalid arguments
3804 if not (nsr_id and operation and params):
3805 raise LcmException(
3806 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3807 )
3808 now = time()
3809 _id = str(uuid4())
3810 nslcmop = {
3811 "id": _id,
3812 "_id": _id,
3813 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3814 "operationState": "PROCESSING",
3815 "statusEnteredTime": now,
3816 "nsInstanceId": nsr_id,
3817 "lcmOperationType": operation,
3818 "startTime": now,
3819 "isAutomaticInvocation": False,
3820 "operationParams": params,
3821 "isCancelPending": False,
3822 "links": {
3823 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3824 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3825 },
3826 }
3827 return nslcmop
3828
3829 def _format_additional_params(self, params):
3830 params = params or {}
3831 for key, value in params.items():
3832 if str(value).startswith("!!yaml "):
3833 params[key] = yaml.safe_load(value[7:])
3834 return params
3835
3836 def _get_terminate_primitive_params(self, seq, vnf_index):
3837 primitive = seq.get("name")
3838 primitive_params = {}
3839 params = {
3840 "member_vnf_index": vnf_index,
3841 "primitive": primitive,
3842 "primitive_params": primitive_params,
3843 }
3844 desc_params = {}
3845 return self._map_primitive_params(seq, params, desc_params)
3846
3847 # sub-operations
3848
3849 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3850 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3851 if op.get("operationState") == "COMPLETED":
3852 # b. Skip sub-operation
3853 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3854 return self.SUBOPERATION_STATUS_SKIP
3855 else:
3856 # c. retry executing sub-operation
3857 # The sub-operation exists, and operationState != 'COMPLETED'
3858 # Update operationState = 'PROCESSING' to indicate a retry.
3859 operationState = "PROCESSING"
3860 detailed_status = "In progress"
3861 self._update_suboperation_status(
3862 db_nslcmop, op_index, operationState, detailed_status
3863 )
3864 # Return the sub-operation index
3865 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3866 # with arguments extracted from the sub-operation
3867 return op_index
3868
3869 # Find a sub-operation where all keys in a matching dictionary must match
3870 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3871 def _find_suboperation(self, db_nslcmop, match):
3872 if db_nslcmop and match:
3873 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3874 for i, op in enumerate(op_list):
3875 if all(op.get(k) == match[k] for k in match):
3876 return i
3877 return self.SUBOPERATION_STATUS_NOT_FOUND
3878
3879 # Update status for a sub-operation given its index
3880 def _update_suboperation_status(
3881 self, db_nslcmop, op_index, operationState, detailed_status
3882 ):
3883 # Update DB for HA tasks
3884 q_filter = {"_id": db_nslcmop["_id"]}
3885 update_dict = {
3886 "_admin.operations.{}.operationState".format(op_index): operationState,
3887 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3888 }
3889 self.db.set_one(
3890 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3891 )
3892
3893 # Add sub-operation, return the index of the added sub-operation
3894 # Optionally, set operationState, detailed-status, and operationType
3895 # Status and type are currently set for 'scale' sub-operations:
3896 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3897 # 'detailed-status' : status message
3898 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3899 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3900 def _add_suboperation(
3901 self,
3902 db_nslcmop,
3903 vnf_index,
3904 vdu_id,
3905 vdu_count_index,
3906 vdu_name,
3907 primitive,
3908 mapped_primitive_params,
3909 operationState=None,
3910 detailed_status=None,
3911 operationType=None,
3912 RO_nsr_id=None,
3913 RO_scaling_info=None,
3914 ):
3915 if not db_nslcmop:
3916 return self.SUBOPERATION_STATUS_NOT_FOUND
3917 # Get the "_admin.operations" list, if it exists
3918 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3919 op_list = db_nslcmop_admin.get("operations")
3920 # Create or append to the "_admin.operations" list
3921 new_op = {
3922 "member_vnf_index": vnf_index,
3923 "vdu_id": vdu_id,
3924 "vdu_count_index": vdu_count_index,
3925 "primitive": primitive,
3926 "primitive_params": mapped_primitive_params,
3927 }
3928 if operationState:
3929 new_op["operationState"] = operationState
3930 if detailed_status:
3931 new_op["detailed-status"] = detailed_status
3932 if operationType:
3933 new_op["lcmOperationType"] = operationType
3934 if RO_nsr_id:
3935 new_op["RO_nsr_id"] = RO_nsr_id
3936 if RO_scaling_info:
3937 new_op["RO_scaling_info"] = RO_scaling_info
3938 if not op_list:
3939 # No existing operations, create key 'operations' with current operation as first list element
3940 db_nslcmop_admin.update({"operations": [new_op]})
3941 op_list = db_nslcmop_admin.get("operations")
3942 else:
3943 # Existing operations, append operation to list
3944 op_list.append(new_op)
3945
3946 db_nslcmop_update = {"_admin.operations": op_list}
3947 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3948 op_index = len(op_list) - 1
3949 return op_index
3950
3951 # Helper methods for scale() sub-operations
3952
3953 # pre-scale/post-scale:
3954 # Check for 3 different cases:
3955 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3956 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3957 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3958 def _check_or_add_scale_suboperation(
3959 self,
3960 db_nslcmop,
3961 vnf_index,
3962 vnf_config_primitive,
3963 primitive_params,
3964 operationType,
3965 RO_nsr_id=None,
3966 RO_scaling_info=None,
3967 ):
3968 # Find this sub-operation
3969 if RO_nsr_id and RO_scaling_info:
3970 operationType = "SCALE-RO"
3971 match = {
3972 "member_vnf_index": vnf_index,
3973 "RO_nsr_id": RO_nsr_id,
3974 "RO_scaling_info": RO_scaling_info,
3975 }
3976 else:
3977 match = {
3978 "member_vnf_index": vnf_index,
3979 "primitive": vnf_config_primitive,
3980 "primitive_params": primitive_params,
3981 "lcmOperationType": operationType,
3982 }
3983 op_index = self._find_suboperation(db_nslcmop, match)
3984 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3985 # a. New sub-operation
3986 # The sub-operation does not exist, add it.
3987 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3988 # The following parameters are set to None for all kind of scaling:
3989 vdu_id = None
3990 vdu_count_index = None
3991 vdu_name = None
3992 if RO_nsr_id and RO_scaling_info:
3993 vnf_config_primitive = None
3994 primitive_params = None
3995 else:
3996 RO_nsr_id = None
3997 RO_scaling_info = None
3998 # Initial status for sub-operation
3999 operationState = "PROCESSING"
4000 detailed_status = "In progress"
4001 # Add sub-operation for pre/post-scaling (zero or more operations)
4002 self._add_suboperation(
4003 db_nslcmop,
4004 vnf_index,
4005 vdu_id,
4006 vdu_count_index,
4007 vdu_name,
4008 vnf_config_primitive,
4009 primitive_params,
4010 operationState,
4011 detailed_status,
4012 operationType,
4013 RO_nsr_id,
4014 RO_scaling_info,
4015 )
4016 return self.SUBOPERATION_STATUS_NEW
4017 else:
4018 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4019 # or op_index (operationState != 'COMPLETED')
4020 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4021
4022 # Function to return execution_environment id
4023
4024 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4025 # TODO vdu_index_count
4026 for vca in vca_deployed_list:
4027 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4028 return vca["ee_id"]
4029
4030 async def destroy_N2VC(
4031 self,
4032 logging_text,
4033 db_nslcmop,
4034 vca_deployed,
4035 config_descriptor,
4036 vca_index,
4037 destroy_ee=True,
4038 exec_primitives=True,
4039 scaling_in=False,
4040 vca_id: str = None,
4041 ):
4042 """
4043 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4044 :param logging_text:
4045 :param db_nslcmop:
4046 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4047 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4048 :param vca_index: index in the database _admin.deployed.VCA
4049 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4050 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4051 not executed properly
4052 :param scaling_in: True destroys the application, False destroys the model
4053 :return: None or exception
4054 """
4055
4056 self.logger.debug(
4057 logging_text
4058 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4059 vca_index, vca_deployed, config_descriptor, destroy_ee
4060 )
4061 )
4062
4063 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4064
4065 # execute terminate_primitives
4066 if exec_primitives:
4067 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4068 config_descriptor.get("terminate-config-primitive"),
4069 vca_deployed.get("ee_descriptor_id"),
4070 )
4071 vdu_id = vca_deployed.get("vdu_id")
4072 vdu_count_index = vca_deployed.get("vdu_count_index")
4073 vdu_name = vca_deployed.get("vdu_name")
4074 vnf_index = vca_deployed.get("member-vnf-index")
4075 if terminate_primitives and vca_deployed.get("needed_terminate"):
4076 for seq in terminate_primitives:
4077 # For each sequence in list, get primitive and call _ns_execute_primitive()
4078 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4079 vnf_index, seq.get("name")
4080 )
4081 self.logger.debug(logging_text + step)
4082 # Create the primitive for each sequence, i.e. "primitive": "touch"
4083 primitive = seq.get("name")
4084 mapped_primitive_params = self._get_terminate_primitive_params(
4085 seq, vnf_index
4086 )
4087
4088 # Add sub-operation
4089 self._add_suboperation(
4090 db_nslcmop,
4091 vnf_index,
4092 vdu_id,
4093 vdu_count_index,
4094 vdu_name,
4095 primitive,
4096 mapped_primitive_params,
4097 )
4098 # Sub-operations: Call _ns_execute_primitive() instead of action()
4099 try:
4100 result, result_detail = await self._ns_execute_primitive(
4101 vca_deployed["ee_id"],
4102 primitive,
4103 mapped_primitive_params,
4104 vca_type=vca_type,
4105 vca_id=vca_id,
4106 )
4107 except LcmException:
4108 # this happens when VCA is not deployed. In this case it is not needed to terminate
4109 continue
4110 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4111 if result not in result_ok:
4112 raise LcmException(
4113 "terminate_primitive {} for vnf_member_index={} fails with "
4114 "error {}".format(seq.get("name"), vnf_index, result_detail)
4115 )
4116 # set that this VCA do not need terminated
4117 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4118 vca_index
4119 )
4120 self.update_db_2(
4121 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4122 )
4123
4124 # Delete Prometheus Jobs if any
4125 # This uses NSR_ID, so it will destroy any jobs under this index
4126 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4127
4128 if destroy_ee:
4129 await self.vca_map[vca_type].delete_execution_environment(
4130 vca_deployed["ee_id"],
4131 scaling_in=scaling_in,
4132 vca_type=vca_type,
4133 vca_id=vca_id,
4134 )
4135
4136 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4137 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4138 namespace = "." + db_nsr["_id"]
4139 try:
4140 await self.n2vc.delete_namespace(
4141 namespace=namespace,
4142 total_timeout=self.timeout_charm_delete,
4143 vca_id=vca_id,
4144 )
4145 except N2VCNotFound: # already deleted. Skip
4146 pass
4147 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4148
4149 async def _terminate_RO(
4150 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4151 ):
4152 """
4153 Terminates a deployment from RO
4154 :param logging_text:
4155 :param nsr_deployed: db_nsr._admin.deployed
4156 :param nsr_id:
4157 :param nslcmop_id:
4158 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4159 this method will update only the index 2, but it will write on database the concatenated content of the list
4160 :return:
4161 """
4162 db_nsr_update = {}
4163 failed_detail = []
4164 ro_nsr_id = ro_delete_action = None
4165 if nsr_deployed and nsr_deployed.get("RO"):
4166 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4167 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4168 try:
4169 if ro_nsr_id:
4170 stage[2] = "Deleting ns from VIM."
4171 db_nsr_update["detailed-status"] = " ".join(stage)
4172 self._write_op_status(nslcmop_id, stage)
4173 self.logger.debug(logging_text + stage[2])
4174 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4175 self._write_op_status(nslcmop_id, stage)
4176 desc = await self.RO.delete("ns", ro_nsr_id)
4177 ro_delete_action = desc["action_id"]
4178 db_nsr_update[
4179 "_admin.deployed.RO.nsr_delete_action_id"
4180 ] = ro_delete_action
4181 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4182 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4183 if ro_delete_action:
4184 # wait until NS is deleted from VIM
4185 stage[2] = "Waiting ns deleted from VIM."
4186 detailed_status_old = None
4187 self.logger.debug(
4188 logging_text
4189 + stage[2]
4190 + " RO_id={} ro_delete_action={}".format(
4191 ro_nsr_id, ro_delete_action
4192 )
4193 )
4194 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4195 self._write_op_status(nslcmop_id, stage)
4196
4197 delete_timeout = 20 * 60 # 20 minutes
4198 while delete_timeout > 0:
4199 desc = await self.RO.show(
4200 "ns",
4201 item_id_name=ro_nsr_id,
4202 extra_item="action",
4203 extra_item_id=ro_delete_action,
4204 )
4205
4206 # deploymentStatus
4207 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4208
4209 ns_status, ns_status_info = self.RO.check_action_status(desc)
4210 if ns_status == "ERROR":
4211 raise ROclient.ROClientException(ns_status_info)
4212 elif ns_status == "BUILD":
4213 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4214 elif ns_status == "ACTIVE":
4215 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4216 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4217 break
4218 else:
4219 assert (
4220 False
4221 ), "ROclient.check_action_status returns unknown {}".format(
4222 ns_status
4223 )
4224 if stage[2] != detailed_status_old:
4225 detailed_status_old = stage[2]
4226 db_nsr_update["detailed-status"] = " ".join(stage)
4227 self._write_op_status(nslcmop_id, stage)
4228 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4229 await asyncio.sleep(5, loop=self.loop)
4230 delete_timeout -= 5
4231 else: # delete_timeout <= 0:
4232 raise ROclient.ROClientException(
4233 "Timeout waiting ns deleted from VIM"
4234 )
4235
4236 except Exception as e:
4237 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4238 if (
4239 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4240 ): # not found
4241 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4242 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4243 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4244 self.logger.debug(
4245 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4246 )
4247 elif (
4248 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4249 ): # conflict
4250 failed_detail.append("delete conflict: {}".format(e))
4251 self.logger.debug(
4252 logging_text
4253 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4254 )
4255 else:
4256 failed_detail.append("delete error: {}".format(e))
4257 self.logger.error(
4258 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4259 )
4260
4261 # Delete nsd
4262 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4263 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4264 try:
4265 stage[2] = "Deleting nsd from RO."
4266 db_nsr_update["detailed-status"] = " ".join(stage)
4267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4268 self._write_op_status(nslcmop_id, stage)
4269 await self.RO.delete("nsd", ro_nsd_id)
4270 self.logger.debug(
4271 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4272 )
4273 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4274 except Exception as e:
4275 if (
4276 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4277 ): # not found
4278 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4279 self.logger.debug(
4280 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4281 )
4282 elif (
4283 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4284 ): # conflict
4285 failed_detail.append(
4286 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4287 )
4288 self.logger.debug(logging_text + failed_detail[-1])
4289 else:
4290 failed_detail.append(
4291 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4292 )
4293 self.logger.error(logging_text + failed_detail[-1])
4294
4295 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4296 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4297 if not vnf_deployed or not vnf_deployed["id"]:
4298 continue
4299 try:
4300 ro_vnfd_id = vnf_deployed["id"]
4301 stage[
4302 2
4303 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4304 vnf_deployed["member-vnf-index"], ro_vnfd_id
4305 )
4306 db_nsr_update["detailed-status"] = " ".join(stage)
4307 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4308 self._write_op_status(nslcmop_id, stage)
4309 await self.RO.delete("vnfd", ro_vnfd_id)
4310 self.logger.debug(
4311 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4312 )
4313 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4314 except Exception as e:
4315 if (
4316 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4317 ): # not found
4318 db_nsr_update[
4319 "_admin.deployed.RO.vnfd.{}.id".format(index)
4320 ] = None
4321 self.logger.debug(
4322 logging_text
4323 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4324 )
4325 elif (
4326 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4327 ): # conflict
4328 failed_detail.append(
4329 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4330 )
4331 self.logger.debug(logging_text + failed_detail[-1])
4332 else:
4333 failed_detail.append(
4334 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4335 )
4336 self.logger.error(logging_text + failed_detail[-1])
4337
4338 if failed_detail:
4339 stage[2] = "Error deleting from VIM"
4340 else:
4341 stage[2] = "Deleted from VIM"
4342 db_nsr_update["detailed-status"] = " ".join(stage)
4343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4344 self._write_op_status(nslcmop_id, stage)
4345
4346 if failed_detail:
4347 raise LcmException("; ".join(failed_detail))
4348
4349 async def terminate(self, nsr_id, nslcmop_id):
4350 # Try to lock HA task here
4351 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4352 if not task_is_locked_by_me:
4353 return
4354
4355 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4356 self.logger.debug(logging_text + "Enter")
4357 timeout_ns_terminate = self.timeout_ns_terminate
4358 db_nsr = None
4359 db_nslcmop = None
4360 operation_params = None
4361 exc = None
4362 error_list = [] # annotates all failed error messages
4363 db_nslcmop_update = {}
4364 autoremove = False # autoremove after terminated
4365 tasks_dict_info = {}
4366 db_nsr_update = {}
4367 stage = [
4368 "Stage 1/3: Preparing task.",
4369 "Waiting for previous operations to terminate.",
4370 "",
4371 ]
4372 # ^ contains [stage, step, VIM-status]
4373 try:
4374 # wait for any previous tasks in process
4375 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4376
4377 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4378 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4379 operation_params = db_nslcmop.get("operationParams") or {}
4380 if operation_params.get("timeout_ns_terminate"):
4381 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4382 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4383 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4384
4385 db_nsr_update["operational-status"] = "terminating"
4386 db_nsr_update["config-status"] = "terminating"
4387 self._write_ns_status(
4388 nsr_id=nsr_id,
4389 ns_state="TERMINATING",
4390 current_operation="TERMINATING",
4391 current_operation_id=nslcmop_id,
4392 other_update=db_nsr_update,
4393 )
4394 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4395 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4396 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4397 return
4398
4399 stage[1] = "Getting vnf descriptors from db."
4400 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4401 db_vnfrs_dict = {
4402 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4403 }
4404 db_vnfds_from_id = {}
4405 db_vnfds_from_member_index = {}
4406 # Loop over VNFRs
4407 for vnfr in db_vnfrs_list:
4408 vnfd_id = vnfr["vnfd-id"]
4409 if vnfd_id not in db_vnfds_from_id:
4410 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4411 db_vnfds_from_id[vnfd_id] = vnfd
4412 db_vnfds_from_member_index[
4413 vnfr["member-vnf-index-ref"]
4414 ] = db_vnfds_from_id[vnfd_id]
4415
4416 # Destroy individual execution environments when there are terminating primitives.
4417 # Rest of EE will be deleted at once
4418 # TODO - check before calling _destroy_N2VC
4419 # if not operation_params.get("skip_terminate_primitives"):#
4420 # or not vca.get("needed_terminate"):
4421 stage[0] = "Stage 2/3 execute terminating primitives."
4422 self.logger.debug(logging_text + stage[0])
4423 stage[1] = "Looking execution environment that needs terminate."
4424 self.logger.debug(logging_text + stage[1])
4425
4426 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4427 config_descriptor = None
4428 vca_member_vnf_index = vca.get("member-vnf-index")
4429 vca_id = self.get_vca_id(
4430 db_vnfrs_dict.get(vca_member_vnf_index)
4431 if vca_member_vnf_index
4432 else None,
4433 db_nsr,
4434 )
4435 if not vca or not vca.get("ee_id"):
4436 continue
4437 if not vca.get("member-vnf-index"):
4438 # ns
4439 config_descriptor = db_nsr.get("ns-configuration")
4440 elif vca.get("vdu_id"):
4441 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4442 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4443 elif vca.get("kdu_name"):
4444 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4445 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4446 else:
4447 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4448 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4449 vca_type = vca.get("type")
4450 exec_terminate_primitives = not operation_params.get(
4451 "skip_terminate_primitives"
4452 ) and vca.get("needed_terminate")
4453 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4454 # pending native charms
4455 destroy_ee = (
4456 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4457 )
4458 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4459 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4460 task = asyncio.ensure_future(
4461 self.destroy_N2VC(
4462 logging_text,
4463 db_nslcmop,
4464 vca,
4465 config_descriptor,
4466 vca_index,
4467 destroy_ee,
4468 exec_terminate_primitives,
4469 vca_id=vca_id,
4470 )
4471 )
4472 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4473
4474 # wait for pending tasks of terminate primitives
4475 if tasks_dict_info:
4476 self.logger.debug(
4477 logging_text
4478 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4479 )
4480 error_list = await self._wait_for_tasks(
4481 logging_text,
4482 tasks_dict_info,
4483 min(self.timeout_charm_delete, timeout_ns_terminate),
4484 stage,
4485 nslcmop_id,
4486 )
4487 tasks_dict_info.clear()
4488 if error_list:
4489 return # raise LcmException("; ".join(error_list))
4490
4491 # remove All execution environments at once
4492 stage[0] = "Stage 3/3 delete all."
4493
4494 if nsr_deployed.get("VCA"):
4495 stage[1] = "Deleting all execution environments."
4496 self.logger.debug(logging_text + stage[1])
4497 vca_id = self.get_vca_id({}, db_nsr)
4498 task_delete_ee = asyncio.ensure_future(
4499 asyncio.wait_for(
4500 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4501 timeout=self.timeout_charm_delete,
4502 )
4503 )
4504 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4505 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4506
4507 # Delete from k8scluster
4508 stage[1] = "Deleting KDUs."
4509 self.logger.debug(logging_text + stage[1])
4510 # print(nsr_deployed)
4511 for kdu in get_iterable(nsr_deployed, "K8s"):
4512 if not kdu or not kdu.get("kdu-instance"):
4513 continue
4514 kdu_instance = kdu.get("kdu-instance")
4515 if kdu.get("k8scluster-type") in self.k8scluster_map:
4516 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4517 vca_id = self.get_vca_id({}, db_nsr)
4518 task_delete_kdu_instance = asyncio.ensure_future(
4519 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4520 cluster_uuid=kdu.get("k8scluster-uuid"),
4521 kdu_instance=kdu_instance,
4522 vca_id=vca_id,
4523 namespace=kdu.get("namespace"),
4524 )
4525 )
4526 else:
4527 self.logger.error(
4528 logging_text
4529 + "Unknown k8s deployment type {}".format(
4530 kdu.get("k8scluster-type")
4531 )
4532 )
4533 continue
4534 tasks_dict_info[
4535 task_delete_kdu_instance
4536 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4537
4538 # remove from RO
4539 stage[1] = "Deleting ns from VIM."
4540 if self.ng_ro:
4541 task_delete_ro = asyncio.ensure_future(
4542 self._terminate_ng_ro(
4543 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4544 )
4545 )
4546 else:
4547 task_delete_ro = asyncio.ensure_future(
4548 self._terminate_RO(
4549 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4550 )
4551 )
4552 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4553
4554 # rest of staff will be done at finally
4555
4556 except (
4557 ROclient.ROClientException,
4558 DbException,
4559 LcmException,
4560 N2VCException,
4561 ) as e:
4562 self.logger.error(logging_text + "Exit Exception {}".format(e))
4563 exc = e
4564 except asyncio.CancelledError:
4565 self.logger.error(
4566 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4567 )
4568 exc = "Operation was cancelled"
4569 except Exception as e:
4570 exc = traceback.format_exc()
4571 self.logger.critical(
4572 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4573 exc_info=True,
4574 )
4575 finally:
4576 if exc:
4577 error_list.append(str(exc))
4578 try:
4579 # wait for pending tasks
4580 if tasks_dict_info:
4581 stage[1] = "Waiting for terminate pending tasks."
4582 self.logger.debug(logging_text + stage[1])
4583 error_list += await self._wait_for_tasks(
4584 logging_text,
4585 tasks_dict_info,
4586 timeout_ns_terminate,
4587 stage,
4588 nslcmop_id,
4589 )
4590 stage[1] = stage[2] = ""
4591 except asyncio.CancelledError:
4592 error_list.append("Cancelled")
4593 # TODO cancell all tasks
4594 except Exception as exc:
4595 error_list.append(str(exc))
4596 # update status at database
4597 if error_list:
4598 error_detail = "; ".join(error_list)
4599 # self.logger.error(logging_text + error_detail)
4600 error_description_nslcmop = "{} Detail: {}".format(
4601 stage[0], error_detail
4602 )
4603 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4604 nslcmop_id, stage[0]
4605 )
4606
4607 db_nsr_update["operational-status"] = "failed"
4608 db_nsr_update["detailed-status"] = (
4609 error_description_nsr + " Detail: " + error_detail
4610 )
4611 db_nslcmop_update["detailed-status"] = error_detail
4612 nslcmop_operation_state = "FAILED"
4613 ns_state = "BROKEN"
4614 else:
4615 error_detail = None
4616 error_description_nsr = error_description_nslcmop = None
4617 ns_state = "NOT_INSTANTIATED"
4618 db_nsr_update["operational-status"] = "terminated"
4619 db_nsr_update["detailed-status"] = "Done"
4620 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4621 db_nslcmop_update["detailed-status"] = "Done"
4622 nslcmop_operation_state = "COMPLETED"
4623
4624 if db_nsr:
4625 self._write_ns_status(
4626 nsr_id=nsr_id,
4627 ns_state=ns_state,
4628 current_operation="IDLE",
4629 current_operation_id=None,
4630 error_description=error_description_nsr,
4631 error_detail=error_detail,
4632 other_update=db_nsr_update,
4633 )
4634 self._write_op_status(
4635 op_id=nslcmop_id,
4636 stage="",
4637 error_message=error_description_nslcmop,
4638 operation_state=nslcmop_operation_state,
4639 other_update=db_nslcmop_update,
4640 )
4641 if ns_state == "NOT_INSTANTIATED":
4642 try:
4643 self.db.set_list(
4644 "vnfrs",
4645 {"nsr-id-ref": nsr_id},
4646 {"_admin.nsState": "NOT_INSTANTIATED"},
4647 )
4648 except DbException as e:
4649 self.logger.warn(
4650 logging_text
4651 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4652 nsr_id, e
4653 )
4654 )
4655 if operation_params:
4656 autoremove = operation_params.get("autoremove", False)
4657 if nslcmop_operation_state:
4658 try:
4659 await self.msg.aiowrite(
4660 "ns",
4661 "terminated",
4662 {
4663 "nsr_id": nsr_id,
4664 "nslcmop_id": nslcmop_id,
4665 "operationState": nslcmop_operation_state,
4666 "autoremove": autoremove,
4667 },
4668 loop=self.loop,
4669 )
4670 except Exception as e:
4671 self.logger.error(
4672 logging_text + "kafka_write notification Exception {}".format(e)
4673 )
4674
4675 self.logger.debug(logging_text + "Exit")
4676 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4677
4678 async def _wait_for_tasks(
4679 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4680 ):
4681 time_start = time()
4682 error_detail_list = []
4683 error_list = []
4684 pending_tasks = list(created_tasks_info.keys())
4685 num_tasks = len(pending_tasks)
4686 num_done = 0
4687 stage[1] = "{}/{}.".format(num_done, num_tasks)
4688 self._write_op_status(nslcmop_id, stage)
4689 while pending_tasks:
4690 new_error = None
4691 _timeout = timeout + time_start - time()
4692 done, pending_tasks = await asyncio.wait(
4693 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4694 )
4695 num_done += len(done)
4696 if not done: # Timeout
4697 for task in pending_tasks:
4698 new_error = created_tasks_info[task] + ": Timeout"
4699 error_detail_list.append(new_error)
4700 error_list.append(new_error)
4701 break
4702 for task in done:
4703 if task.cancelled():
4704 exc = "Cancelled"
4705 else:
4706 exc = task.exception()
4707 if exc:
4708 if isinstance(exc, asyncio.TimeoutError):
4709 exc = "Timeout"
4710 new_error = created_tasks_info[task] + ": {}".format(exc)
4711 error_list.append(created_tasks_info[task])
4712 error_detail_list.append(new_error)
4713 if isinstance(
4714 exc,
4715 (
4716 str,
4717 DbException,
4718 N2VCException,
4719 ROclient.ROClientException,
4720 LcmException,
4721 K8sException,
4722 NgRoException,
4723 ),
4724 ):
4725 self.logger.error(logging_text + new_error)
4726 else:
4727 exc_traceback = "".join(
4728 traceback.format_exception(None, exc, exc.__traceback__)
4729 )
4730 self.logger.error(
4731 logging_text
4732 + created_tasks_info[task]
4733 + " "
4734 + exc_traceback
4735 )
4736 else:
4737 self.logger.debug(
4738 logging_text + created_tasks_info[task] + ": Done"
4739 )
4740 stage[1] = "{}/{}.".format(num_done, num_tasks)
4741 if new_error:
4742 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4743 if nsr_id: # update also nsr
4744 self.update_db_2(
4745 "nsrs",
4746 nsr_id,
4747 {
4748 "errorDescription": "Error at: " + ", ".join(error_list),
4749 "errorDetail": ". ".join(error_detail_list),
4750 },
4751 )
4752 self._write_op_status(nslcmop_id, stage)
4753 return error_detail_list
4754
4755 @staticmethod
4756 def _map_primitive_params(primitive_desc, params, instantiation_params):
4757 """
4758 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4759 The default-value is used. If it is between < > it look for a value at instantiation_params
4760 :param primitive_desc: portion of VNFD/NSD that describes primitive
4761 :param params: Params provided by user
4762 :param instantiation_params: Instantiation params provided by user
4763 :return: a dictionary with the calculated params
4764 """
4765 calculated_params = {}
4766 for parameter in primitive_desc.get("parameter", ()):
4767 param_name = parameter["name"]
4768 if param_name in params:
4769 calculated_params[param_name] = params[param_name]
4770 elif "default-value" in parameter or "value" in parameter:
4771 if "value" in parameter:
4772 calculated_params[param_name] = parameter["value"]
4773 else:
4774 calculated_params[param_name] = parameter["default-value"]
4775 if (
4776 isinstance(calculated_params[param_name], str)
4777 and calculated_params[param_name].startswith("<")
4778 and calculated_params[param_name].endswith(">")
4779 ):
4780 if calculated_params[param_name][1:-1] in instantiation_params:
4781 calculated_params[param_name] = instantiation_params[
4782 calculated_params[param_name][1:-1]
4783 ]
4784 else:
4785 raise LcmException(
4786 "Parameter {} needed to execute primitive {} not provided".format(
4787 calculated_params[param_name], primitive_desc["name"]
4788 )
4789 )
4790 else:
4791 raise LcmException(
4792 "Parameter {} needed to execute primitive {} not provided".format(
4793 param_name, primitive_desc["name"]
4794 )
4795 )
4796
4797 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4798 calculated_params[param_name] = yaml.safe_dump(
4799 calculated_params[param_name], default_flow_style=True, width=256
4800 )
4801 elif isinstance(calculated_params[param_name], str) and calculated_params[
4802 param_name
4803 ].startswith("!!yaml "):
4804 calculated_params[param_name] = calculated_params[param_name][7:]
4805 if parameter.get("data-type") == "INTEGER":
4806 try:
4807 calculated_params[param_name] = int(calculated_params[param_name])
4808 except ValueError: # error converting string to int
4809 raise LcmException(
4810 "Parameter {} of primitive {} must be integer".format(
4811 param_name, primitive_desc["name"]
4812 )
4813 )
4814 elif parameter.get("data-type") == "BOOLEAN":
4815 calculated_params[param_name] = not (
4816 (str(calculated_params[param_name])).lower() == "false"
4817 )
4818
4819 # add always ns_config_info if primitive name is config
4820 if primitive_desc["name"] == "config":
4821 if "ns_config_info" in instantiation_params:
4822 calculated_params["ns_config_info"] = instantiation_params[
4823 "ns_config_info"
4824 ]
4825 return calculated_params
4826
4827 def _look_for_deployed_vca(
4828 self,
4829 deployed_vca,
4830 member_vnf_index,
4831 vdu_id,
4832 vdu_count_index,
4833 kdu_name=None,
4834 ee_descriptor_id=None,
4835 ):
4836 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4837 for vca in deployed_vca:
4838 if not vca:
4839 continue
4840 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4841 continue
4842 if (
4843 vdu_count_index is not None
4844 and vdu_count_index != vca["vdu_count_index"]
4845 ):
4846 continue
4847 if kdu_name and kdu_name != vca["kdu_name"]:
4848 continue
4849 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4850 continue
4851 break
4852 else:
4853 # vca_deployed not found
4854 raise LcmException(
4855 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4856 " is not deployed".format(
4857 member_vnf_index,
4858 vdu_id,
4859 vdu_count_index,
4860 kdu_name,
4861 ee_descriptor_id,
4862 )
4863 )
4864 # get ee_id
4865 ee_id = vca.get("ee_id")
4866 vca_type = vca.get(
4867 "type", "lxc_proxy_charm"
4868 ) # default value for backward compatibility - proxy charm
4869 if not ee_id:
4870 raise LcmException(
4871 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4872 "execution environment".format(
4873 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4874 )
4875 )
4876 return ee_id, vca_type
4877
4878 async def _ns_execute_primitive(
4879 self,
4880 ee_id,
4881 primitive,
4882 primitive_params,
4883 retries=0,
4884 retries_interval=30,
4885 timeout=None,
4886 vca_type=None,
4887 db_dict=None,
4888 vca_id: str = None,
4889 ) -> (str, str):
4890 try:
4891 if primitive == "config":
4892 primitive_params = {"params": primitive_params}
4893
4894 vca_type = vca_type or "lxc_proxy_charm"
4895
4896 while retries >= 0:
4897 try:
4898 output = await asyncio.wait_for(
4899 self.vca_map[vca_type].exec_primitive(
4900 ee_id=ee_id,
4901 primitive_name=primitive,
4902 params_dict=primitive_params,
4903 progress_timeout=self.timeout_progress_primitive,
4904 total_timeout=self.timeout_primitive,
4905 db_dict=db_dict,
4906 vca_id=vca_id,
4907 vca_type=vca_type,
4908 ),
4909 timeout=timeout or self.timeout_primitive,
4910 )
4911 # execution was OK
4912 break
4913 except asyncio.CancelledError:
4914 raise
4915 except Exception as e: # asyncio.TimeoutError
4916 if isinstance(e, asyncio.TimeoutError):
4917 e = "Timeout"
4918 retries -= 1
4919 if retries >= 0:
4920 self.logger.debug(
4921 "Error executing action {} on {} -> {}".format(
4922 primitive, ee_id, e
4923 )
4924 )
4925 # wait and retry
4926 await asyncio.sleep(retries_interval, loop=self.loop)
4927 else:
4928 return "FAILED", str(e)
4929
4930 return "COMPLETED", output
4931
4932 except (LcmException, asyncio.CancelledError):
4933 raise
4934 except Exception as e:
4935 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4936
4937 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4938 """
4939 Updating the vca_status with latest juju information in nsrs record
4940 :param: nsr_id: Id of the nsr
4941 :param: nslcmop_id: Id of the nslcmop
4942 :return: None
4943 """
4944
4945 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4946 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4947 vca_id = self.get_vca_id({}, db_nsr)
4948 if db_nsr["_admin"]["deployed"]["K8s"]:
4949 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4950 cluster_uuid, kdu_instance, cluster_type = (
4951 k8s["k8scluster-uuid"],
4952 k8s["kdu-instance"],
4953 k8s["k8scluster-type"],
4954 )
4955 await self._on_update_k8s_db(
4956 cluster_uuid=cluster_uuid,
4957 kdu_instance=kdu_instance,
4958 filter={"_id": nsr_id},
4959 vca_id=vca_id,
4960 cluster_type=cluster_type,
4961 )
4962 else:
4963 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4964 table, filter = "nsrs", {"_id": nsr_id}
4965 path = "_admin.deployed.VCA.{}.".format(vca_index)
4966 await self._on_update_n2vc_db(table, filter, path, {})
4967
4968 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4969 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4970
4971 async def action(self, nsr_id, nslcmop_id):
4972 # Try to lock HA task here
4973 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4974 if not task_is_locked_by_me:
4975 return
4976
4977 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4978 self.logger.debug(logging_text + "Enter")
4979 # get all needed from database
4980 db_nsr = None
4981 db_nslcmop = None
4982 db_nsr_update = {}
4983 db_nslcmop_update = {}
4984 nslcmop_operation_state = None
4985 error_description_nslcmop = None
4986 exc = None
4987 try:
4988 # wait for any previous tasks in process
4989 step = "Waiting for previous operations to terminate"
4990 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4991
4992 self._write_ns_status(
4993 nsr_id=nsr_id,
4994 ns_state=None,
4995 current_operation="RUNNING ACTION",
4996 current_operation_id=nslcmop_id,
4997 )
4998
4999 step = "Getting information from database"
5000 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5001 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5002 if db_nslcmop["operationParams"].get("primitive_params"):
5003 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5004 db_nslcmop["operationParams"]["primitive_params"]
5005 )
5006
5007 nsr_deployed = db_nsr["_admin"].get("deployed")
5008 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5009 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5010 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5011 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5012 primitive = db_nslcmop["operationParams"]["primitive"]
5013 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5014 timeout_ns_action = db_nslcmop["operationParams"].get(
5015 "timeout_ns_action", self.timeout_primitive
5016 )
5017
5018 if vnf_index:
5019 step = "Getting vnfr from database"
5020 db_vnfr = self.db.get_one(
5021 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5022 )
5023 if db_vnfr.get("kdur"):
5024 kdur_list = []
5025 for kdur in db_vnfr["kdur"]:
5026 if kdur.get("additionalParams"):
5027 kdur["additionalParams"] = json.loads(
5028 kdur["additionalParams"]
5029 )
5030 kdur_list.append(kdur)
5031 db_vnfr["kdur"] = kdur_list
5032 step = "Getting vnfd from database"
5033 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5034
5035 # Sync filesystem before running a primitive
5036 self.fs.sync(db_vnfr["vnfd-id"])
5037 else:
5038 step = "Getting nsd from database"
5039 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5040
5041 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5042 # for backward compatibility
5043 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5044 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5045 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5046 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5047
5048 # look for primitive
5049 config_primitive_desc = descriptor_configuration = None
5050 if vdu_id:
5051 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5052 elif kdu_name:
5053 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5054 elif vnf_index:
5055 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5056 else:
5057 descriptor_configuration = db_nsd.get("ns-configuration")
5058
5059 if descriptor_configuration and descriptor_configuration.get(
5060 "config-primitive"
5061 ):
5062 for config_primitive in descriptor_configuration["config-primitive"]:
5063 if config_primitive["name"] == primitive:
5064 config_primitive_desc = config_primitive
5065 break
5066
5067 if not config_primitive_desc:
5068 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5069 raise LcmException(
5070 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5071 primitive
5072 )
5073 )
5074 primitive_name = primitive
5075 ee_descriptor_id = None
5076 else:
5077 primitive_name = config_primitive_desc.get(
5078 "execution-environment-primitive", primitive
5079 )
5080 ee_descriptor_id = config_primitive_desc.get(
5081 "execution-environment-ref"
5082 )
5083
5084 if vnf_index:
5085 if vdu_id:
5086 vdur = next(
5087 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5088 )
5089 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5090 elif kdu_name:
5091 kdur = next(
5092 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5093 )
5094 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5095 else:
5096 desc_params = parse_yaml_strings(
5097 db_vnfr.get("additionalParamsForVnf")
5098 )
5099 else:
5100 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5101 if kdu_name and get_configuration(db_vnfd, kdu_name):
5102 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5103 actions = set()
5104 for primitive in kdu_configuration.get("initial-config-primitive", []):
5105 actions.add(primitive["name"])
5106 for primitive in kdu_configuration.get("config-primitive", []):
5107 actions.add(primitive["name"])
5108 kdu = find_in_list(
5109 nsr_deployed["K8s"],
5110 lambda kdu: kdu_name == kdu["kdu-name"]
5111 and kdu["member-vnf-index"] == vnf_index,
5112 )
5113 kdu_action = (
5114 True
5115 if primitive_name in actions
5116 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5117 else False
5118 )
5119
5120 # TODO check if ns is in a proper status
5121 if kdu_name and (
5122 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5123 ):
5124 # kdur and desc_params already set from before
5125 if primitive_params:
5126 desc_params.update(primitive_params)
5127 # TODO Check if we will need something at vnf level
5128 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5129 if (
5130 kdu_name == kdu["kdu-name"]
5131 and kdu["member-vnf-index"] == vnf_index
5132 ):
5133 break
5134 else:
5135 raise LcmException(
5136 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5137 )
5138
5139 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5140 msg = "unknown k8scluster-type '{}'".format(
5141 kdu.get("k8scluster-type")
5142 )
5143 raise LcmException(msg)
5144
5145 db_dict = {
5146 "collection": "nsrs",
5147 "filter": {"_id": nsr_id},
5148 "path": "_admin.deployed.K8s.{}".format(index),
5149 }
5150 self.logger.debug(
5151 logging_text
5152 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5153 )
5154 step = "Executing kdu {}".format(primitive_name)
5155 if primitive_name == "upgrade":
5156 if desc_params.get("kdu_model"):
5157 kdu_model = desc_params.get("kdu_model")
5158 del desc_params["kdu_model"]
5159 else:
5160 kdu_model = kdu.get("kdu-model")
5161 parts = kdu_model.split(sep=":")
5162 if len(parts) == 2:
5163 kdu_model = parts[0]
5164
5165 detailed_status = await asyncio.wait_for(
5166 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5167 cluster_uuid=kdu.get("k8scluster-uuid"),
5168 kdu_instance=kdu.get("kdu-instance"),
5169 atomic=True,
5170 kdu_model=kdu_model,
5171 params=desc_params,
5172 db_dict=db_dict,
5173 timeout=timeout_ns_action,
5174 ),
5175 timeout=timeout_ns_action + 10,
5176 )
5177 self.logger.debug(
5178 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5179 )
5180 elif primitive_name == "rollback":
5181 detailed_status = await asyncio.wait_for(
5182 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5183 cluster_uuid=kdu.get("k8scluster-uuid"),
5184 kdu_instance=kdu.get("kdu-instance"),
5185 db_dict=db_dict,
5186 ),
5187 timeout=timeout_ns_action,
5188 )
5189 elif primitive_name == "status":
5190 detailed_status = await asyncio.wait_for(
5191 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5192 cluster_uuid=kdu.get("k8scluster-uuid"),
5193 kdu_instance=kdu.get("kdu-instance"),
5194 vca_id=vca_id,
5195 ),
5196 timeout=timeout_ns_action,
5197 )
5198 else:
5199 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5200 kdu["kdu-name"], nsr_id
5201 )
5202 params = self._map_primitive_params(
5203 config_primitive_desc, primitive_params, desc_params
5204 )
5205
5206 detailed_status = await asyncio.wait_for(
5207 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5208 cluster_uuid=kdu.get("k8scluster-uuid"),
5209 kdu_instance=kdu_instance,
5210 primitive_name=primitive_name,
5211 params=params,
5212 db_dict=db_dict,
5213 timeout=timeout_ns_action,
5214 vca_id=vca_id,
5215 ),
5216 timeout=timeout_ns_action,
5217 )
5218
5219 if detailed_status:
5220 nslcmop_operation_state = "COMPLETED"
5221 else:
5222 detailed_status = ""
5223 nslcmop_operation_state = "FAILED"
5224 else:
5225 ee_id, vca_type = self._look_for_deployed_vca(
5226 nsr_deployed["VCA"],
5227 member_vnf_index=vnf_index,
5228 vdu_id=vdu_id,
5229 vdu_count_index=vdu_count_index,
5230 ee_descriptor_id=ee_descriptor_id,
5231 )
5232 for vca_index, vca_deployed in enumerate(
5233 db_nsr["_admin"]["deployed"]["VCA"]
5234 ):
5235 if vca_deployed.get("member-vnf-index") == vnf_index:
5236 db_dict = {
5237 "collection": "nsrs",
5238 "filter": {"_id": nsr_id},
5239 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5240 }
5241 break
5242 (
5243 nslcmop_operation_state,
5244 detailed_status,
5245 ) = await self._ns_execute_primitive(
5246 ee_id,
5247 primitive=primitive_name,
5248 primitive_params=self._map_primitive_params(
5249 config_primitive_desc, primitive_params, desc_params
5250 ),
5251 timeout=timeout_ns_action,
5252 vca_type=vca_type,
5253 db_dict=db_dict,
5254 vca_id=vca_id,
5255 )
5256
5257 db_nslcmop_update["detailed-status"] = detailed_status
5258 error_description_nslcmop = (
5259 detailed_status if nslcmop_operation_state == "FAILED" else ""
5260 )
5261 self.logger.debug(
5262 logging_text
5263 + " task Done with result {} {}".format(
5264 nslcmop_operation_state, detailed_status
5265 )
5266 )
5267 return # database update is called inside finally
5268
5269 except (DbException, LcmException, N2VCException, K8sException) as e:
5270 self.logger.error(logging_text + "Exit Exception {}".format(e))
5271 exc = e
5272 except asyncio.CancelledError:
5273 self.logger.error(
5274 logging_text + "Cancelled Exception while '{}'".format(step)
5275 )
5276 exc = "Operation was cancelled"
5277 except asyncio.TimeoutError:
5278 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5279 exc = "Timeout"
5280 except Exception as e:
5281 exc = traceback.format_exc()
5282 self.logger.critical(
5283 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5284 exc_info=True,
5285 )
5286 finally:
5287 if exc:
5288 db_nslcmop_update[
5289 "detailed-status"
5290 ] = (
5291 detailed_status
5292 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5293 nslcmop_operation_state = "FAILED"
5294 if db_nsr:
5295 self._write_ns_status(
5296 nsr_id=nsr_id,
5297 ns_state=db_nsr[
5298 "nsState"
5299 ], # TODO check if degraded. For the moment use previous status
5300 current_operation="IDLE",
5301 current_operation_id=None,
5302 # error_description=error_description_nsr,
5303 # error_detail=error_detail,
5304 other_update=db_nsr_update,
5305 )
5306
5307 self._write_op_status(
5308 op_id=nslcmop_id,
5309 stage="",
5310 error_message=error_description_nslcmop,
5311 operation_state=nslcmop_operation_state,
5312 other_update=db_nslcmop_update,
5313 )
5314
5315 if nslcmop_operation_state:
5316 try:
5317 await self.msg.aiowrite(
5318 "ns",
5319 "actioned",
5320 {
5321 "nsr_id": nsr_id,
5322 "nslcmop_id": nslcmop_id,
5323 "operationState": nslcmop_operation_state,
5324 },
5325 loop=self.loop,
5326 )
5327 except Exception as e:
5328 self.logger.error(
5329 logging_text + "kafka_write notification Exception {}".format(e)
5330 )
5331 self.logger.debug(logging_text + "Exit")
5332 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5333 return nslcmop_operation_state, detailed_status
5334
5335 async def terminate_vdus(
5336 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5337 ):
5338 """This method terminates VDUs
5339
5340 Args:
5341 db_vnfr: VNF instance record
5342 member_vnf_index: VNF index to identify the VDUs to be removed
5343 db_nsr: NS instance record
5344 update_db_nslcmops: Nslcmop update record
5345 """
5346 vca_scaling_info = []
5347 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5348 scaling_info["scaling_direction"] = "IN"
5349 scaling_info["vdu-delete"] = {}
5350 scaling_info["kdu-delete"] = {}
5351 db_vdur = db_vnfr.get("vdur")
5352 vdur_list = copy(db_vdur)
5353 count_index = 0
5354 for index, vdu in enumerate(vdur_list):
5355 vca_scaling_info.append(
5356 {
5357 "osm_vdu_id": vdu["vdu-id-ref"],
5358 "member-vnf-index": member_vnf_index,
5359 "type": "delete",
5360 "vdu_index": count_index,
5361 })
5362 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5363 scaling_info["vdu"].append(
5364 {
5365 "name": vdu.get("name") or vdu.get("vdu-name"),
5366 "vdu_id": vdu["vdu-id-ref"],
5367 "interface": [],
5368 })
5369 for interface in vdu["interfaces"]:
5370 scaling_info["vdu"][index]["interface"].append(
5371 {
5372 "name": interface["name"],
5373 "ip_address": interface["ip-address"],
5374 "mac_address": interface.get("mac-address"),
5375 })
5376 self.logger.info("NS update scaling info{}".format(scaling_info))
5377 stage[2] = "Terminating VDUs"
5378 if scaling_info.get("vdu-delete"):
5379 # scale_process = "RO"
5380 if self.ro_config.get("ng"):
5381 await self._scale_ng_ro(
5382 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5383 )
5384
5385 async def remove_vnf(
5386 self, nsr_id, nslcmop_id, vnf_instance_id
5387 ):
5388 """This method is to Remove VNF instances from NS.
5389
5390 Args:
5391 nsr_id: NS instance id
5392 nslcmop_id: nslcmop id of update
5393 vnf_instance_id: id of the VNF instance to be removed
5394
5395 Returns:
5396 result: (str, str) COMPLETED/FAILED, details
5397 """
5398 try:
5399 db_nsr_update = {}
5400 logging_text = "Task ns={} update ".format(nsr_id)
5401 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5402 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5403 if check_vnfr_count > 1:
5404 stage = ["", "", ""]
5405 step = "Getting nslcmop from database"
5406 self.logger.debug(step + " after having waited for previous tasks to be completed")
5407 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5408 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5409 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5410 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5411 """ db_vnfr = self.db.get_one(
5412 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5413
5414 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5415 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5416
5417 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5418 constituent_vnfr.remove(db_vnfr.get("_id"))
5419 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5420 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5421 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5422 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5423 return "COMPLETED", "Done"
5424 else:
5425 step = "Terminate VNF Failed with"
5426 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5427 vnf_instance_id))
5428 except (LcmException, asyncio.CancelledError):
5429 raise
5430 except Exception as e:
5431 self.logger.debug("Error removing VNF {}".format(e))
5432 return "FAILED", "Error removing VNF {}".format(e)
5433
5434 async def _ns_redeploy_vnf(
5435 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5436 ):
5437 """This method updates and redeploys VNF instances
5438
5439 Args:
5440 nsr_id: NS instance id
5441 nslcmop_id: nslcmop id
5442 db_vnfd: VNF descriptor
5443 db_vnfr: VNF instance record
5444 db_nsr: NS instance record
5445
5446 Returns:
5447 result: (str, str) COMPLETED/FAILED, details
5448 """
5449 try:
5450 count_index = 0
5451 stage = ["", "", ""]
5452 logging_text = "Task ns={} update ".format(nsr_id)
5453 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5454 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5455
5456 # Terminate old VNF resources
5457 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5458 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5459
5460 # old_vnfd_id = db_vnfr["vnfd-id"]
5461 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5462 new_db_vnfd = db_vnfd
5463 # new_vnfd_ref = new_db_vnfd["id"]
5464 # new_vnfd_id = vnfd_id
5465
5466 # Create VDUR
5467 new_vnfr_cp = []
5468 for cp in new_db_vnfd.get("ext-cpd", ()):
5469 vnf_cp = {
5470 "name": cp.get("id"),
5471 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5472 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5473 "id": cp.get("id"),
5474 }
5475 new_vnfr_cp.append(vnf_cp)
5476 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5477 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5478 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5479 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5480 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5481 updated_db_vnfr = self.db.get_one(
5482 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5483 )
5484
5485 # Instantiate new VNF resources
5486 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5487 vca_scaling_info = []
5488 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5489 scaling_info["scaling_direction"] = "OUT"
5490 scaling_info["vdu-create"] = {}
5491 scaling_info["kdu-create"] = {}
5492 vdud_instantiate_list = db_vnfd["vdu"]
5493 for index, vdud in enumerate(vdud_instantiate_list):
5494 cloud_init_text = self._get_vdu_cloud_init_content(
5495 vdud, db_vnfd
5496 )
5497 if cloud_init_text:
5498 additional_params = (
5499 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5500 or {}
5501 )
5502 cloud_init_list = []
5503 if cloud_init_text:
5504 # TODO Information of its own ip is not available because db_vnfr is not updated.
5505 additional_params["OSM"] = get_osm_params(
5506 updated_db_vnfr, vdud["id"], 1
5507 )
5508 cloud_init_list.append(
5509 self._parse_cloud_init(
5510 cloud_init_text,
5511 additional_params,
5512 db_vnfd["id"],
5513 vdud["id"],
5514 )
5515 )
5516 vca_scaling_info.append(
5517 {
5518 "osm_vdu_id": vdud["id"],
5519 "member-vnf-index": member_vnf_index,
5520 "type": "create",
5521 "vdu_index": count_index,
5522 }
5523 )
5524 scaling_info["vdu-create"][vdud["id"]] = count_index
5525 if self.ro_config.get("ng"):
5526 self.logger.debug(
5527 "New Resources to be deployed: {}".format(scaling_info))
5528 await self._scale_ng_ro(
5529 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5530 )
5531 return "COMPLETED", "Done"
5532 except (LcmException, asyncio.CancelledError):
5533 raise
5534 except Exception as e:
5535 self.logger.debug("Error updating VNF {}".format(e))
5536 return "FAILED", "Error updating VNF {}".format(e)
5537
5538 async def _ns_charm_upgrade(
5539 self,
5540 ee_id,
5541 charm_id,
5542 charm_type,
5543 path,
5544 timeout: float = None,
5545 ) -> (str, str):
5546 """This method upgrade charms in VNF instances
5547
5548 Args:
5549 ee_id: Execution environment id
5550 path: Local path to the charm
5551 charm_id: charm-id
5552 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5553 timeout: (Float) Timeout for the ns update operation
5554
5555 Returns:
5556 result: (str, str) COMPLETED/FAILED, details
5557 """
5558 try:
5559 charm_type = charm_type or "lxc_proxy_charm"
5560 output = await self.vca_map[charm_type].upgrade_charm(
5561 ee_id=ee_id,
5562 path=path,
5563 charm_id=charm_id,
5564 charm_type=charm_type,
5565 timeout=timeout or self.timeout_ns_update,
5566 )
5567
5568 if output:
5569 return "COMPLETED", output
5570
5571 except (LcmException, asyncio.CancelledError):
5572 raise
5573
5574 except Exception as e:
5575
5576 self.logger.debug("Error upgrading charm {}".format(path))
5577
5578 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5579
5580 async def update(self, nsr_id, nslcmop_id):
5581 """Update NS according to different update types
5582
5583 This method performs upgrade of VNF instances then updates the revision
5584 number in VNF record
5585
5586 Args:
5587 nsr_id: Network service will be updated
5588 nslcmop_id: ns lcm operation id
5589
5590 Returns:
5591 It may raise DbException, LcmException, N2VCException, K8sException
5592
5593 """
5594 # Try to lock HA task here
5595 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5596 if not task_is_locked_by_me:
5597 return
5598
5599 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5600 self.logger.debug(logging_text + "Enter")
5601
5602 # Set the required variables to be filled up later
5603 db_nsr = None
5604 db_nslcmop_update = {}
5605 vnfr_update = {}
5606 nslcmop_operation_state = None
5607 db_nsr_update = {}
5608 error_description_nslcmop = ""
5609 exc = None
5610 change_type = "updated"
5611 detailed_status = ""
5612
5613 try:
5614 # wait for any previous tasks in process
5615 step = "Waiting for previous operations to terminate"
5616 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5617 self._write_ns_status(
5618 nsr_id=nsr_id,
5619 ns_state=None,
5620 current_operation="UPDATING",
5621 current_operation_id=nslcmop_id,
5622 )
5623
5624 step = "Getting nslcmop from database"
5625 db_nslcmop = self.db.get_one(
5626 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5627 )
5628 update_type = db_nslcmop["operationParams"]["updateType"]
5629
5630 step = "Getting nsr from database"
5631 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5632 old_operational_status = db_nsr["operational-status"]
5633 db_nsr_update["operational-status"] = "updating"
5634 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5635 nsr_deployed = db_nsr["_admin"].get("deployed")
5636
5637 if update_type == "CHANGE_VNFPKG":
5638
5639 # Get the input parameters given through update request
5640 vnf_instance_id = db_nslcmop["operationParams"][
5641 "changeVnfPackageData"
5642 ].get("vnfInstanceId")
5643
5644 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5645 "vnfdId"
5646 )
5647 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5648
5649 step = "Getting vnfr from database"
5650 db_vnfr = self.db.get_one(
5651 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5652 )
5653
5654 step = "Getting vnfds from database"
5655 # Latest VNFD
5656 latest_vnfd = self.db.get_one(
5657 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5658 )
5659 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5660
5661 # Current VNFD
5662 current_vnf_revision = db_vnfr.get("revision", 1)
5663 current_vnfd = self.db.get_one(
5664 "vnfds_revisions",
5665 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5666 fail_on_empty=False,
5667 )
5668 # Charm artifact paths will be filled up later
5669 (
5670 current_charm_artifact_path,
5671 target_charm_artifact_path,
5672 charm_artifact_paths,
5673 ) = ([], [], [])
5674
5675 step = "Checking if revision has changed in VNFD"
5676 if current_vnf_revision != latest_vnfd_revision:
5677
5678 change_type = "policy_updated"
5679
5680 # There is new revision of VNFD, update operation is required
5681 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5682 latest_vnfd_path = vnfd_id
5683
5684 step = "Removing the VNFD packages if they exist in the local path"
5685 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5686 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5687
5688 step = "Get the VNFD packages from FSMongo"
5689 self.fs.sync(from_path=latest_vnfd_path)
5690 self.fs.sync(from_path=current_vnfd_path)
5691
5692 step = (
5693 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5694 )
5695 base_folder = latest_vnfd["_admin"]["storage"]
5696
5697 for charm_index, charm_deployed in enumerate(
5698 get_iterable(nsr_deployed, "VCA")
5699 ):
5700 vnf_index = db_vnfr.get("member-vnf-index-ref")
5701
5702 # Getting charm-id and charm-type
5703 if charm_deployed.get("member-vnf-index") == vnf_index:
5704 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5705 charm_type = charm_deployed.get("type")
5706
5707 # Getting ee-id
5708 ee_id = charm_deployed.get("ee_id")
5709
5710 step = "Getting descriptor config"
5711 descriptor_config = get_configuration(
5712 current_vnfd, current_vnfd["id"]
5713 )
5714
5715 if "execution-environment-list" in descriptor_config:
5716 ee_list = descriptor_config.get(
5717 "execution-environment-list", []
5718 )
5719 else:
5720 ee_list = []
5721
5722 # There could be several charm used in the same VNF
5723 for ee_item in ee_list:
5724 if ee_item.get("juju"):
5725
5726 step = "Getting charm name"
5727 charm_name = ee_item["juju"].get("charm")
5728
5729 step = "Setting Charm artifact paths"
5730 current_charm_artifact_path.append(
5731 get_charm_artifact_path(
5732 base_folder,
5733 charm_name,
5734 charm_type,
5735 current_vnf_revision,
5736 )
5737 )
5738 target_charm_artifact_path.append(
5739 get_charm_artifact_path(
5740 base_folder,
5741 charm_name,
5742 charm_type,
5743 )
5744 )
5745
5746 charm_artifact_paths = zip(
5747 current_charm_artifact_path, target_charm_artifact_path
5748 )
5749
5750 step = "Checking if software version has changed in VNFD"
5751 if find_software_version(current_vnfd) != find_software_version(
5752 latest_vnfd
5753 ):
5754
5755 step = "Checking if existing VNF has charm"
5756 for current_charm_path, target_charm_path in list(
5757 charm_artifact_paths
5758 ):
5759 if current_charm_path:
5760 raise LcmException(
5761 "Software version change is not supported as VNF instance {} has charm.".format(
5762 vnf_instance_id
5763 )
5764 )
5765
5766 # There is no change in the charm package, then redeploy the VNF
5767 # based on new descriptor
5768 step = "Redeploying VNF"
5769 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5770 (
5771 result,
5772 detailed_status
5773 ) = await self._ns_redeploy_vnf(
5774 nsr_id,
5775 nslcmop_id,
5776 latest_vnfd,
5777 db_vnfr,
5778 db_nsr
5779 )
5780 if result == "FAILED":
5781 nslcmop_operation_state = result
5782 error_description_nslcmop = detailed_status
5783 db_nslcmop_update["detailed-status"] = detailed_status
5784 self.logger.debug(
5785 logging_text
5786 + " step {} Done with result {} {}".format(
5787 step, nslcmop_operation_state, detailed_status
5788 )
5789 )
5790
5791 else:
5792 step = "Checking if any charm package has changed or not"
5793 for current_charm_path, target_charm_path in list(
5794 charm_artifact_paths
5795 ):
5796 if (
5797 current_charm_path
5798 and target_charm_path
5799 and self.check_charm_hash_changed(
5800 current_charm_path, target_charm_path
5801 )
5802 ):
5803
5804 step = "Checking whether VNF uses juju bundle"
5805 if check_juju_bundle_existence(current_vnfd):
5806
5807 raise LcmException(
5808 "Charm upgrade is not supported for the instance which"
5809 " uses juju-bundle: {}".format(
5810 check_juju_bundle_existence(current_vnfd)
5811 )
5812 )
5813
5814 step = "Upgrading Charm"
5815 (
5816 result,
5817 detailed_status,
5818 ) = await self._ns_charm_upgrade(
5819 ee_id=ee_id,
5820 charm_id=charm_id,
5821 charm_type=charm_type,
5822 path=self.fs.path + target_charm_path,
5823 timeout=timeout_seconds,
5824 )
5825
5826 if result == "FAILED":
5827 nslcmop_operation_state = result
5828 error_description_nslcmop = detailed_status
5829
5830 db_nslcmop_update["detailed-status"] = detailed_status
5831 self.logger.debug(
5832 logging_text
5833 + " step {} Done with result {} {}".format(
5834 step, nslcmop_operation_state, detailed_status
5835 )
5836 )
5837
5838 step = "Updating policies"
5839 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5840 result = "COMPLETED"
5841 detailed_status = "Done"
5842 db_nslcmop_update["detailed-status"] = "Done"
5843
5844 # If nslcmop_operation_state is None, so any operation is not failed.
5845 if not nslcmop_operation_state:
5846 nslcmop_operation_state = "COMPLETED"
5847
5848 # If update CHANGE_VNFPKG nslcmop_operation is successful
5849 # vnf revision need to be updated
5850 vnfr_update["revision"] = latest_vnfd_revision
5851 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5852
5853 self.logger.debug(
5854 logging_text
5855 + " task Done with result {} {}".format(
5856 nslcmop_operation_state, detailed_status
5857 )
5858 )
5859 elif update_type == "REMOVE_VNF":
5860 # This part is included in https://osm.etsi.org/gerrit/11876
5861 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5862 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5863 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5864 step = "Removing VNF"
5865 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5866 if result == "FAILED":
5867 nslcmop_operation_state = result
5868 error_description_nslcmop = detailed_status
5869 db_nslcmop_update["detailed-status"] = detailed_status
5870 change_type = "vnf_terminated"
5871 if not nslcmop_operation_state:
5872 nslcmop_operation_state = "COMPLETED"
5873 self.logger.debug(
5874 logging_text
5875 + " task Done with result {} {}".format(
5876 nslcmop_operation_state, detailed_status
5877 )
5878 )
5879
5880 # If nslcmop_operation_state is None, so any operation is not failed.
5881 # All operations are executed in overall.
5882 if not nslcmop_operation_state:
5883 nslcmop_operation_state = "COMPLETED"
5884 db_nsr_update["operational-status"] = old_operational_status
5885
5886 except (DbException, LcmException, N2VCException, K8sException) as e:
5887 self.logger.error(logging_text + "Exit Exception {}".format(e))
5888 exc = e
5889 except asyncio.CancelledError:
5890 self.logger.error(
5891 logging_text + "Cancelled Exception while '{}'".format(step)
5892 )
5893 exc = "Operation was cancelled"
5894 except asyncio.TimeoutError:
5895 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5896 exc = "Timeout"
5897 except Exception as e:
5898 exc = traceback.format_exc()
5899 self.logger.critical(
5900 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5901 exc_info=True,
5902 )
5903 finally:
5904 if exc:
5905 db_nslcmop_update[
5906 "detailed-status"
5907 ] = (
5908 detailed_status
5909 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5910 nslcmop_operation_state = "FAILED"
5911 db_nsr_update["operational-status"] = old_operational_status
5912 if db_nsr:
5913 self._write_ns_status(
5914 nsr_id=nsr_id,
5915 ns_state=db_nsr["nsState"],
5916 current_operation="IDLE",
5917 current_operation_id=None,
5918 other_update=db_nsr_update,
5919 )
5920
5921 self._write_op_status(
5922 op_id=nslcmop_id,
5923 stage="",
5924 error_message=error_description_nslcmop,
5925 operation_state=nslcmop_operation_state,
5926 other_update=db_nslcmop_update,
5927 )
5928
5929 if nslcmop_operation_state:
5930 try:
5931 msg = {
5932 "nsr_id": nsr_id,
5933 "nslcmop_id": nslcmop_id,
5934 "operationState": nslcmop_operation_state,
5935 }
5936 if change_type in ("vnf_terminated", "policy_updated"):
5937 msg.update({"vnf_member_index": member_vnf_index})
5938 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
5939 except Exception as e:
5940 self.logger.error(
5941 logging_text + "kafka_write notification Exception {}".format(e)
5942 )
5943 self.logger.debug(logging_text + "Exit")
5944 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
5945 return nslcmop_operation_state, detailed_status
5946
5947 async def scale(self, nsr_id, nslcmop_id):
5948 # Try to lock HA task here
5949 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5950 if not task_is_locked_by_me:
5951 return
5952
5953 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
5954 stage = ["", "", ""]
5955 tasks_dict_info = {}
5956 # ^ stage, step, VIM progress
5957 self.logger.debug(logging_text + "Enter")
5958 # get all needed from database
5959 db_nsr = None
5960 db_nslcmop_update = {}
5961 db_nsr_update = {}
5962 exc = None
5963 # in case of error, indicates what part of scale was failed to put nsr at error status
5964 scale_process = None
5965 old_operational_status = ""
5966 old_config_status = ""
5967 nsi_id = None
5968 try:
5969 # wait for any previous tasks in process
5970 step = "Waiting for previous operations to terminate"
5971 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5972 self._write_ns_status(
5973 nsr_id=nsr_id,
5974 ns_state=None,
5975 current_operation="SCALING",
5976 current_operation_id=nslcmop_id,
5977 )
5978
5979 step = "Getting nslcmop from database"
5980 self.logger.debug(
5981 step + " after having waited for previous tasks to be completed"
5982 )
5983 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5984
5985 step = "Getting nsr from database"
5986 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5987 old_operational_status = db_nsr["operational-status"]
5988 old_config_status = db_nsr["config-status"]
5989
5990 step = "Parsing scaling parameters"
5991 db_nsr_update["operational-status"] = "scaling"
5992 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5993 nsr_deployed = db_nsr["_admin"].get("deployed")
5994
5995 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
5996 "scaleByStepData"
5997 ]["member-vnf-index"]
5998 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
5999 "scaleByStepData"
6000 ]["scaling-group-descriptor"]
6001 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6002 # for backward compatibility
6003 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6004 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6005 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6006 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6007
6008 step = "Getting vnfr from database"
6009 db_vnfr = self.db.get_one(
6010 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6011 )
6012
6013 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6014
6015 step = "Getting vnfd from database"
6016 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6017
6018 base_folder = db_vnfd["_admin"]["storage"]
6019
6020 step = "Getting scaling-group-descriptor"
6021 scaling_descriptor = find_in_list(
6022 get_scaling_aspect(db_vnfd),
6023 lambda scale_desc: scale_desc["name"] == scaling_group,
6024 )
6025 if not scaling_descriptor:
6026 raise LcmException(
6027 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6028 "at vnfd:scaling-group-descriptor".format(scaling_group)
6029 )
6030
6031 step = "Sending scale order to VIM"
6032 # TODO check if ns is in a proper status
6033 nb_scale_op = 0
6034 if not db_nsr["_admin"].get("scaling-group"):
6035 self.update_db_2(
6036 "nsrs",
6037 nsr_id,
6038 {
6039 "_admin.scaling-group": [
6040 {"name": scaling_group, "nb-scale-op": 0}
6041 ]
6042 },
6043 )
6044 admin_scale_index = 0
6045 else:
6046 for admin_scale_index, admin_scale_info in enumerate(
6047 db_nsr["_admin"]["scaling-group"]
6048 ):
6049 if admin_scale_info["name"] == scaling_group:
6050 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6051 break
6052 else: # not found, set index one plus last element and add new entry with the name
6053 admin_scale_index += 1
6054 db_nsr_update[
6055 "_admin.scaling-group.{}.name".format(admin_scale_index)
6056 ] = scaling_group
6057
6058 vca_scaling_info = []
6059 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6060 if scaling_type == "SCALE_OUT":
6061 if "aspect-delta-details" not in scaling_descriptor:
6062 raise LcmException(
6063 "Aspect delta details not fount in scaling descriptor {}".format(
6064 scaling_descriptor["name"]
6065 )
6066 )
6067 # count if max-instance-count is reached
6068 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6069
6070 scaling_info["scaling_direction"] = "OUT"
6071 scaling_info["vdu-create"] = {}
6072 scaling_info["kdu-create"] = {}
6073 for delta in deltas:
6074 for vdu_delta in delta.get("vdu-delta", {}):
6075 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6076 # vdu_index also provides the number of instance of the targeted vdu
6077 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6078 cloud_init_text = self._get_vdu_cloud_init_content(
6079 vdud, db_vnfd
6080 )
6081 if cloud_init_text:
6082 additional_params = (
6083 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6084 or {}
6085 )
6086 cloud_init_list = []
6087
6088 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6089 max_instance_count = 10
6090 if vdu_profile and "max-number-of-instances" in vdu_profile:
6091 max_instance_count = vdu_profile.get(
6092 "max-number-of-instances", 10
6093 )
6094
6095 default_instance_num = get_number_of_instances(
6096 db_vnfd, vdud["id"]
6097 )
6098 instances_number = vdu_delta.get("number-of-instances", 1)
6099 nb_scale_op += instances_number
6100
6101 new_instance_count = nb_scale_op + default_instance_num
6102 # Control if new count is over max and vdu count is less than max.
6103 # Then assign new instance count
6104 if new_instance_count > max_instance_count > vdu_count:
6105 instances_number = new_instance_count - max_instance_count
6106 else:
6107 instances_number = instances_number
6108
6109 if new_instance_count > max_instance_count:
6110 raise LcmException(
6111 "reached the limit of {} (max-instance-count) "
6112 "scaling-out operations for the "
6113 "scaling-group-descriptor '{}'".format(
6114 nb_scale_op, scaling_group
6115 )
6116 )
6117 for x in range(vdu_delta.get("number-of-instances", 1)):
6118 if cloud_init_text:
6119 # TODO Information of its own ip is not available because db_vnfr is not updated.
6120 additional_params["OSM"] = get_osm_params(
6121 db_vnfr, vdu_delta["id"], vdu_index + x
6122 )
6123 cloud_init_list.append(
6124 self._parse_cloud_init(
6125 cloud_init_text,
6126 additional_params,
6127 db_vnfd["id"],
6128 vdud["id"],
6129 )
6130 )
6131 vca_scaling_info.append(
6132 {
6133 "osm_vdu_id": vdu_delta["id"],
6134 "member-vnf-index": vnf_index,
6135 "type": "create",
6136 "vdu_index": vdu_index + x,
6137 }
6138 )
6139 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6140 for kdu_delta in delta.get("kdu-resource-delta", {}):
6141 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6142 kdu_name = kdu_profile["kdu-name"]
6143 resource_name = kdu_profile.get("resource-name", "")
6144
6145 # Might have different kdus in the same delta
6146 # Should have list for each kdu
6147 if not scaling_info["kdu-create"].get(kdu_name, None):
6148 scaling_info["kdu-create"][kdu_name] = []
6149
6150 kdur = get_kdur(db_vnfr, kdu_name)
6151 if kdur.get("helm-chart"):
6152 k8s_cluster_type = "helm-chart-v3"
6153 self.logger.debug("kdur: {}".format(kdur))
6154 if (
6155 kdur.get("helm-version")
6156 and kdur.get("helm-version") == "v2"
6157 ):
6158 k8s_cluster_type = "helm-chart"
6159 elif kdur.get("juju-bundle"):
6160 k8s_cluster_type = "juju-bundle"
6161 else:
6162 raise LcmException(
6163 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6164 "juju-bundle. Maybe an old NBI version is running".format(
6165 db_vnfr["member-vnf-index-ref"], kdu_name
6166 )
6167 )
6168
6169 max_instance_count = 10
6170 if kdu_profile and "max-number-of-instances" in kdu_profile:
6171 max_instance_count = kdu_profile.get(
6172 "max-number-of-instances", 10
6173 )
6174
6175 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6176 deployed_kdu, _ = get_deployed_kdu(
6177 nsr_deployed, kdu_name, vnf_index
6178 )
6179 if deployed_kdu is None:
6180 raise LcmException(
6181 "KDU '{}' for vnf '{}' not deployed".format(
6182 kdu_name, vnf_index
6183 )
6184 )
6185 kdu_instance = deployed_kdu.get("kdu-instance")
6186 instance_num = await self.k8scluster_map[
6187 k8s_cluster_type
6188 ].get_scale_count(
6189 resource_name,
6190 kdu_instance,
6191 vca_id=vca_id,
6192 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6193 kdu_model=deployed_kdu.get("kdu-model"),
6194 )
6195 kdu_replica_count = instance_num + kdu_delta.get(
6196 "number-of-instances", 1
6197 )
6198
6199 # Control if new count is over max and instance_num is less than max.
6200 # Then assign max instance number to kdu replica count
6201 if kdu_replica_count > max_instance_count > instance_num:
6202 kdu_replica_count = max_instance_count
6203 if kdu_replica_count > max_instance_count:
6204 raise LcmException(
6205 "reached the limit of {} (max-instance-count) "
6206 "scaling-out operations for the "
6207 "scaling-group-descriptor '{}'".format(
6208 instance_num, scaling_group
6209 )
6210 )
6211
6212 for x in range(kdu_delta.get("number-of-instances", 1)):
6213 vca_scaling_info.append(
6214 {
6215 "osm_kdu_id": kdu_name,
6216 "member-vnf-index": vnf_index,
6217 "type": "create",
6218 "kdu_index": instance_num + x - 1,
6219 }
6220 )
6221 scaling_info["kdu-create"][kdu_name].append(
6222 {
6223 "member-vnf-index": vnf_index,
6224 "type": "create",
6225 "k8s-cluster-type": k8s_cluster_type,
6226 "resource-name": resource_name,
6227 "scale": kdu_replica_count,
6228 }
6229 )
6230 elif scaling_type == "SCALE_IN":
6231 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6232
6233 scaling_info["scaling_direction"] = "IN"
6234 scaling_info["vdu-delete"] = {}
6235 scaling_info["kdu-delete"] = {}
6236
6237 for delta in deltas:
6238 for vdu_delta in delta.get("vdu-delta", {}):
6239 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6240 min_instance_count = 0
6241 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6242 if vdu_profile and "min-number-of-instances" in vdu_profile:
6243 min_instance_count = vdu_profile["min-number-of-instances"]
6244
6245 default_instance_num = get_number_of_instances(
6246 db_vnfd, vdu_delta["id"]
6247 )
6248 instance_num = vdu_delta.get("number-of-instances", 1)
6249 nb_scale_op -= instance_num
6250
6251 new_instance_count = nb_scale_op + default_instance_num
6252
6253 if new_instance_count < min_instance_count < vdu_count:
6254 instances_number = min_instance_count - new_instance_count
6255 else:
6256 instances_number = instance_num
6257
6258 if new_instance_count < min_instance_count:
6259 raise LcmException(
6260 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6261 "scaling-group-descriptor '{}'".format(
6262 nb_scale_op, scaling_group
6263 )
6264 )
6265 for x in range(vdu_delta.get("number-of-instances", 1)):
6266 vca_scaling_info.append(
6267 {
6268 "osm_vdu_id": vdu_delta["id"],
6269 "member-vnf-index": vnf_index,
6270 "type": "delete",
6271 "vdu_index": vdu_index - 1 - x,
6272 }
6273 )
6274 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6275 for kdu_delta in delta.get("kdu-resource-delta", {}):
6276 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6277 kdu_name = kdu_profile["kdu-name"]
6278 resource_name = kdu_profile.get("resource-name", "")
6279
6280 if not scaling_info["kdu-delete"].get(kdu_name, None):
6281 scaling_info["kdu-delete"][kdu_name] = []
6282
6283 kdur = get_kdur(db_vnfr, kdu_name)
6284 if kdur.get("helm-chart"):
6285 k8s_cluster_type = "helm-chart-v3"
6286 self.logger.debug("kdur: {}".format(kdur))
6287 if (
6288 kdur.get("helm-version")
6289 and kdur.get("helm-version") == "v2"
6290 ):
6291 k8s_cluster_type = "helm-chart"
6292 elif kdur.get("juju-bundle"):
6293 k8s_cluster_type = "juju-bundle"
6294 else:
6295 raise LcmException(
6296 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6297 "juju-bundle. Maybe an old NBI version is running".format(
6298 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6299 )
6300 )
6301
6302 min_instance_count = 0
6303 if kdu_profile and "min-number-of-instances" in kdu_profile:
6304 min_instance_count = kdu_profile["min-number-of-instances"]
6305
6306 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6307 deployed_kdu, _ = get_deployed_kdu(
6308 nsr_deployed, kdu_name, vnf_index
6309 )
6310 if deployed_kdu is None:
6311 raise LcmException(
6312 "KDU '{}' for vnf '{}' not deployed".format(
6313 kdu_name, vnf_index
6314 )
6315 )
6316 kdu_instance = deployed_kdu.get("kdu-instance")
6317 instance_num = await self.k8scluster_map[
6318 k8s_cluster_type
6319 ].get_scale_count(
6320 resource_name,
6321 kdu_instance,
6322 vca_id=vca_id,
6323 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6324 kdu_model=deployed_kdu.get("kdu-model"),
6325 )
6326 kdu_replica_count = instance_num - kdu_delta.get(
6327 "number-of-instances", 1
6328 )
6329
6330 if kdu_replica_count < min_instance_count < instance_num:
6331 kdu_replica_count = min_instance_count
6332 if kdu_replica_count < min_instance_count:
6333 raise LcmException(
6334 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6335 "scaling-group-descriptor '{}'".format(
6336 instance_num, scaling_group
6337 )
6338 )
6339
6340 for x in range(kdu_delta.get("number-of-instances", 1)):
6341 vca_scaling_info.append(
6342 {
6343 "osm_kdu_id": kdu_name,
6344 "member-vnf-index": vnf_index,
6345 "type": "delete",
6346 "kdu_index": instance_num - x - 1,
6347 }
6348 )
6349 scaling_info["kdu-delete"][kdu_name].append(
6350 {
6351 "member-vnf-index": vnf_index,
6352 "type": "delete",
6353 "k8s-cluster-type": k8s_cluster_type,
6354 "resource-name": resource_name,
6355 "scale": kdu_replica_count,
6356 }
6357 )
6358
6359 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6360 vdu_delete = copy(scaling_info.get("vdu-delete"))
6361 if scaling_info["scaling_direction"] == "IN":
6362 for vdur in reversed(db_vnfr["vdur"]):
6363 if vdu_delete.get(vdur["vdu-id-ref"]):
6364 vdu_delete[vdur["vdu-id-ref"]] -= 1
6365 scaling_info["vdu"].append(
6366 {
6367 "name": vdur.get("name") or vdur.get("vdu-name"),
6368 "vdu_id": vdur["vdu-id-ref"],
6369 "interface": [],
6370 }
6371 )
6372 for interface in vdur["interfaces"]:
6373 scaling_info["vdu"][-1]["interface"].append(
6374 {
6375 "name": interface["name"],
6376 "ip_address": interface["ip-address"],
6377 "mac_address": interface.get("mac-address"),
6378 }
6379 )
6380 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6381
6382 # PRE-SCALE BEGIN
6383 step = "Executing pre-scale vnf-config-primitive"
6384 if scaling_descriptor.get("scaling-config-action"):
6385 for scaling_config_action in scaling_descriptor[
6386 "scaling-config-action"
6387 ]:
6388 if (
6389 scaling_config_action.get("trigger") == "pre-scale-in"
6390 and scaling_type == "SCALE_IN"
6391 ) or (
6392 scaling_config_action.get("trigger") == "pre-scale-out"
6393 and scaling_type == "SCALE_OUT"
6394 ):
6395 vnf_config_primitive = scaling_config_action[
6396 "vnf-config-primitive-name-ref"
6397 ]
6398 step = db_nslcmop_update[
6399 "detailed-status"
6400 ] = "executing pre-scale scaling-config-action '{}'".format(
6401 vnf_config_primitive
6402 )
6403
6404 # look for primitive
6405 for config_primitive in (
6406 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6407 ).get("config-primitive", ()):
6408 if config_primitive["name"] == vnf_config_primitive:
6409 break
6410 else:
6411 raise LcmException(
6412 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6413 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6414 "primitive".format(scaling_group, vnf_config_primitive)
6415 )
6416
6417 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6418 if db_vnfr.get("additionalParamsForVnf"):
6419 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6420
6421 scale_process = "VCA"
6422 db_nsr_update["config-status"] = "configuring pre-scaling"
6423 primitive_params = self._map_primitive_params(
6424 config_primitive, {}, vnfr_params
6425 )
6426
6427 # Pre-scale retry check: Check if this sub-operation has been executed before
6428 op_index = self._check_or_add_scale_suboperation(
6429 db_nslcmop,
6430 vnf_index,
6431 vnf_config_primitive,
6432 primitive_params,
6433 "PRE-SCALE",
6434 )
6435 if op_index == self.SUBOPERATION_STATUS_SKIP:
6436 # Skip sub-operation
6437 result = "COMPLETED"
6438 result_detail = "Done"
6439 self.logger.debug(
6440 logging_text
6441 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6442 vnf_config_primitive, result, result_detail
6443 )
6444 )
6445 else:
6446 if op_index == self.SUBOPERATION_STATUS_NEW:
6447 # New sub-operation: Get index of this sub-operation
6448 op_index = (
6449 len(db_nslcmop.get("_admin", {}).get("operations"))
6450 - 1
6451 )
6452 self.logger.debug(
6453 logging_text
6454 + "vnf_config_primitive={} New sub-operation".format(
6455 vnf_config_primitive
6456 )
6457 )
6458 else:
6459 # retry: Get registered params for this existing sub-operation
6460 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6461 op_index
6462 ]
6463 vnf_index = op.get("member_vnf_index")
6464 vnf_config_primitive = op.get("primitive")
6465 primitive_params = op.get("primitive_params")
6466 self.logger.debug(
6467 logging_text
6468 + "vnf_config_primitive={} Sub-operation retry".format(
6469 vnf_config_primitive
6470 )
6471 )
6472 # Execute the primitive, either with new (first-time) or registered (reintent) args
6473 ee_descriptor_id = config_primitive.get(
6474 "execution-environment-ref"
6475 )
6476 primitive_name = config_primitive.get(
6477 "execution-environment-primitive", vnf_config_primitive
6478 )
6479 ee_id, vca_type = self._look_for_deployed_vca(
6480 nsr_deployed["VCA"],
6481 member_vnf_index=vnf_index,
6482 vdu_id=None,
6483 vdu_count_index=None,
6484 ee_descriptor_id=ee_descriptor_id,
6485 )
6486 result, result_detail = await self._ns_execute_primitive(
6487 ee_id,
6488 primitive_name,
6489 primitive_params,
6490 vca_type=vca_type,
6491 vca_id=vca_id,
6492 )
6493 self.logger.debug(
6494 logging_text
6495 + "vnf_config_primitive={} Done with result {} {}".format(
6496 vnf_config_primitive, result, result_detail
6497 )
6498 )
6499 # Update operationState = COMPLETED | FAILED
6500 self._update_suboperation_status(
6501 db_nslcmop, op_index, result, result_detail
6502 )
6503
6504 if result == "FAILED":
6505 raise LcmException(result_detail)
6506 db_nsr_update["config-status"] = old_config_status
6507 scale_process = None
6508 # PRE-SCALE END
6509
6510 db_nsr_update[
6511 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6512 ] = nb_scale_op
6513 db_nsr_update[
6514 "_admin.scaling-group.{}.time".format(admin_scale_index)
6515 ] = time()
6516
6517 # SCALE-IN VCA - BEGIN
6518 if vca_scaling_info:
6519 step = db_nslcmop_update[
6520 "detailed-status"
6521 ] = "Deleting the execution environments"
6522 scale_process = "VCA"
6523 for vca_info in vca_scaling_info:
6524 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6525 member_vnf_index = str(vca_info["member-vnf-index"])
6526 self.logger.debug(
6527 logging_text + "vdu info: {}".format(vca_info)
6528 )
6529 if vca_info.get("osm_vdu_id"):
6530 vdu_id = vca_info["osm_vdu_id"]
6531 vdu_index = int(vca_info["vdu_index"])
6532 stage[
6533 1
6534 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6535 member_vnf_index, vdu_id, vdu_index
6536 )
6537 stage[2] = step = "Scaling in VCA"
6538 self._write_op_status(op_id=nslcmop_id, stage=stage)
6539 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6540 config_update = db_nsr["configurationStatus"]
6541 for vca_index, vca in enumerate(vca_update):
6542 if (
6543 (vca or vca.get("ee_id"))
6544 and vca["member-vnf-index"] == member_vnf_index
6545 and vca["vdu_count_index"] == vdu_index
6546 ):
6547 if vca.get("vdu_id"):
6548 config_descriptor = get_configuration(
6549 db_vnfd, vca.get("vdu_id")
6550 )
6551 elif vca.get("kdu_name"):
6552 config_descriptor = get_configuration(
6553 db_vnfd, vca.get("kdu_name")
6554 )
6555 else:
6556 config_descriptor = get_configuration(
6557 db_vnfd, db_vnfd["id"]
6558 )
6559 operation_params = (
6560 db_nslcmop.get("operationParams") or {}
6561 )
6562 exec_terminate_primitives = not operation_params.get(
6563 "skip_terminate_primitives"
6564 ) and vca.get("needed_terminate")
6565 task = asyncio.ensure_future(
6566 asyncio.wait_for(
6567 self.destroy_N2VC(
6568 logging_text,
6569 db_nslcmop,
6570 vca,
6571 config_descriptor,
6572 vca_index,
6573 destroy_ee=True,
6574 exec_primitives=exec_terminate_primitives,
6575 scaling_in=True,
6576 vca_id=vca_id,
6577 ),
6578 timeout=self.timeout_charm_delete,
6579 )
6580 )
6581 tasks_dict_info[task] = "Terminating VCA {}".format(
6582 vca.get("ee_id")
6583 )
6584 del vca_update[vca_index]
6585 del config_update[vca_index]
6586 # wait for pending tasks of terminate primitives
6587 if tasks_dict_info:
6588 self.logger.debug(
6589 logging_text
6590 + "Waiting for tasks {}".format(
6591 list(tasks_dict_info.keys())
6592 )
6593 )
6594 error_list = await self._wait_for_tasks(
6595 logging_text,
6596 tasks_dict_info,
6597 min(
6598 self.timeout_charm_delete, self.timeout_ns_terminate
6599 ),
6600 stage,
6601 nslcmop_id,
6602 )
6603 tasks_dict_info.clear()
6604 if error_list:
6605 raise LcmException("; ".join(error_list))
6606
6607 db_vca_and_config_update = {
6608 "_admin.deployed.VCA": vca_update,
6609 "configurationStatus": config_update,
6610 }
6611 self.update_db_2(
6612 "nsrs", db_nsr["_id"], db_vca_and_config_update
6613 )
6614 scale_process = None
6615 # SCALE-IN VCA - END
6616
6617 # SCALE RO - BEGIN
6618 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6619 scale_process = "RO"
6620 if self.ro_config.get("ng"):
6621 await self._scale_ng_ro(
6622 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6623 )
6624 scaling_info.pop("vdu-create", None)
6625 scaling_info.pop("vdu-delete", None)
6626
6627 scale_process = None
6628 # SCALE RO - END
6629
6630 # SCALE KDU - BEGIN
6631 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6632 scale_process = "KDU"
6633 await self._scale_kdu(
6634 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6635 )
6636 scaling_info.pop("kdu-create", None)
6637 scaling_info.pop("kdu-delete", None)
6638
6639 scale_process = None
6640 # SCALE KDU - END
6641
6642 if db_nsr_update:
6643 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6644
6645 # SCALE-UP VCA - BEGIN
6646 if vca_scaling_info:
6647 step = db_nslcmop_update[
6648 "detailed-status"
6649 ] = "Creating new execution environments"
6650 scale_process = "VCA"
6651 for vca_info in vca_scaling_info:
6652 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6653 member_vnf_index = str(vca_info["member-vnf-index"])
6654 self.logger.debug(
6655 logging_text + "vdu info: {}".format(vca_info)
6656 )
6657 vnfd_id = db_vnfr["vnfd-ref"]
6658 if vca_info.get("osm_vdu_id"):
6659 vdu_index = int(vca_info["vdu_index"])
6660 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6661 if db_vnfr.get("additionalParamsForVnf"):
6662 deploy_params.update(
6663 parse_yaml_strings(
6664 db_vnfr["additionalParamsForVnf"].copy()
6665 )
6666 )
6667 descriptor_config = get_configuration(
6668 db_vnfd, db_vnfd["id"]
6669 )
6670 if descriptor_config:
6671 vdu_id = None
6672 vdu_name = None
6673 kdu_name = None
6674 self._deploy_n2vc(
6675 logging_text=logging_text
6676 + "member_vnf_index={} ".format(member_vnf_index),
6677 db_nsr=db_nsr,
6678 db_vnfr=db_vnfr,
6679 nslcmop_id=nslcmop_id,
6680 nsr_id=nsr_id,
6681 nsi_id=nsi_id,
6682 vnfd_id=vnfd_id,
6683 vdu_id=vdu_id,
6684 kdu_name=kdu_name,
6685 member_vnf_index=member_vnf_index,
6686 vdu_index=vdu_index,
6687 vdu_name=vdu_name,
6688 deploy_params=deploy_params,
6689 descriptor_config=descriptor_config,
6690 base_folder=base_folder,
6691 task_instantiation_info=tasks_dict_info,
6692 stage=stage,
6693 )
6694 vdu_id = vca_info["osm_vdu_id"]
6695 vdur = find_in_list(
6696 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6697 )
6698 descriptor_config = get_configuration(db_vnfd, vdu_id)
6699 if vdur.get("additionalParams"):
6700 deploy_params_vdu = parse_yaml_strings(
6701 vdur["additionalParams"]
6702 )
6703 else:
6704 deploy_params_vdu = deploy_params
6705 deploy_params_vdu["OSM"] = get_osm_params(
6706 db_vnfr, vdu_id, vdu_count_index=vdu_index
6707 )
6708 if descriptor_config:
6709 vdu_name = None
6710 kdu_name = None
6711 stage[
6712 1
6713 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6714 member_vnf_index, vdu_id, vdu_index
6715 )
6716 stage[2] = step = "Scaling out VCA"
6717 self._write_op_status(op_id=nslcmop_id, stage=stage)
6718 self._deploy_n2vc(
6719 logging_text=logging_text
6720 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6721 member_vnf_index, vdu_id, vdu_index
6722 ),
6723 db_nsr=db_nsr,
6724 db_vnfr=db_vnfr,
6725 nslcmop_id=nslcmop_id,
6726 nsr_id=nsr_id,
6727 nsi_id=nsi_id,
6728 vnfd_id=vnfd_id,
6729 vdu_id=vdu_id,
6730 kdu_name=kdu_name,
6731 member_vnf_index=member_vnf_index,
6732 vdu_index=vdu_index,
6733 vdu_name=vdu_name,
6734 deploy_params=deploy_params_vdu,
6735 descriptor_config=descriptor_config,
6736 base_folder=base_folder,
6737 task_instantiation_info=tasks_dict_info,
6738 stage=stage,
6739 )
6740 # SCALE-UP VCA - END
6741 scale_process = None
6742
6743 # POST-SCALE BEGIN
6744 # execute primitive service POST-SCALING
6745 step = "Executing post-scale vnf-config-primitive"
6746 if scaling_descriptor.get("scaling-config-action"):
6747 for scaling_config_action in scaling_descriptor[
6748 "scaling-config-action"
6749 ]:
6750 if (
6751 scaling_config_action.get("trigger") == "post-scale-in"
6752 and scaling_type == "SCALE_IN"
6753 ) or (
6754 scaling_config_action.get("trigger") == "post-scale-out"
6755 and scaling_type == "SCALE_OUT"
6756 ):
6757 vnf_config_primitive = scaling_config_action[
6758 "vnf-config-primitive-name-ref"
6759 ]
6760 step = db_nslcmop_update[
6761 "detailed-status"
6762 ] = "executing post-scale scaling-config-action '{}'".format(
6763 vnf_config_primitive
6764 )
6765
6766 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6767 if db_vnfr.get("additionalParamsForVnf"):
6768 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6769
6770 # look for primitive
6771 for config_primitive in (
6772 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6773 ).get("config-primitive", ()):
6774 if config_primitive["name"] == vnf_config_primitive:
6775 break
6776 else:
6777 raise LcmException(
6778 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6779 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6780 "config-primitive".format(
6781 scaling_group, vnf_config_primitive
6782 )
6783 )
6784 scale_process = "VCA"
6785 db_nsr_update["config-status"] = "configuring post-scaling"
6786 primitive_params = self._map_primitive_params(
6787 config_primitive, {}, vnfr_params
6788 )
6789
6790 # Post-scale retry check: Check if this sub-operation has been executed before
6791 op_index = self._check_or_add_scale_suboperation(
6792 db_nslcmop,
6793 vnf_index,
6794 vnf_config_primitive,
6795 primitive_params,
6796 "POST-SCALE",
6797 )
6798 if op_index == self.SUBOPERATION_STATUS_SKIP:
6799 # Skip sub-operation
6800 result = "COMPLETED"
6801 result_detail = "Done"
6802 self.logger.debug(
6803 logging_text
6804 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6805 vnf_config_primitive, result, result_detail
6806 )
6807 )
6808 else:
6809 if op_index == self.SUBOPERATION_STATUS_NEW:
6810 # New sub-operation: Get index of this sub-operation
6811 op_index = (
6812 len(db_nslcmop.get("_admin", {}).get("operations"))
6813 - 1
6814 )
6815 self.logger.debug(
6816 logging_text
6817 + "vnf_config_primitive={} New sub-operation".format(
6818 vnf_config_primitive
6819 )
6820 )
6821 else:
6822 # retry: Get registered params for this existing sub-operation
6823 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6824 op_index
6825 ]
6826 vnf_index = op.get("member_vnf_index")
6827 vnf_config_primitive = op.get("primitive")
6828 primitive_params = op.get("primitive_params")
6829 self.logger.debug(
6830 logging_text
6831 + "vnf_config_primitive={} Sub-operation retry".format(
6832 vnf_config_primitive
6833 )
6834 )
6835 # Execute the primitive, either with new (first-time) or registered (reintent) args
6836 ee_descriptor_id = config_primitive.get(
6837 "execution-environment-ref"
6838 )
6839 primitive_name = config_primitive.get(
6840 "execution-environment-primitive", vnf_config_primitive
6841 )
6842 ee_id, vca_type = self._look_for_deployed_vca(
6843 nsr_deployed["VCA"],
6844 member_vnf_index=vnf_index,
6845 vdu_id=None,
6846 vdu_count_index=None,
6847 ee_descriptor_id=ee_descriptor_id,
6848 )
6849 result, result_detail = await self._ns_execute_primitive(
6850 ee_id,
6851 primitive_name,
6852 primitive_params,
6853 vca_type=vca_type,
6854 vca_id=vca_id,
6855 )
6856 self.logger.debug(
6857 logging_text
6858 + "vnf_config_primitive={} Done with result {} {}".format(
6859 vnf_config_primitive, result, result_detail
6860 )
6861 )
6862 # Update operationState = COMPLETED | FAILED
6863 self._update_suboperation_status(
6864 db_nslcmop, op_index, result, result_detail
6865 )
6866
6867 if result == "FAILED":
6868 raise LcmException(result_detail)
6869 db_nsr_update["config-status"] = old_config_status
6870 scale_process = None
6871 # POST-SCALE END
6872
6873 db_nsr_update[
6874 "detailed-status"
6875 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6876 db_nsr_update["operational-status"] = (
6877 "running"
6878 if old_operational_status == "failed"
6879 else old_operational_status
6880 )
6881 db_nsr_update["config-status"] = old_config_status
6882 return
6883 except (
6884 ROclient.ROClientException,
6885 DbException,
6886 LcmException,
6887 NgRoException,
6888 ) as e:
6889 self.logger.error(logging_text + "Exit Exception {}".format(e))
6890 exc = e
6891 except asyncio.CancelledError:
6892 self.logger.error(
6893 logging_text + "Cancelled Exception while '{}'".format(step)
6894 )
6895 exc = "Operation was cancelled"
6896 except Exception as e:
6897 exc = traceback.format_exc()
6898 self.logger.critical(
6899 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6900 exc_info=True,
6901 )
6902 finally:
6903 self._write_ns_status(
6904 nsr_id=nsr_id,
6905 ns_state=None,
6906 current_operation="IDLE",
6907 current_operation_id=None,
6908 )
6909 if tasks_dict_info:
6910 stage[1] = "Waiting for instantiate pending tasks."
6911 self.logger.debug(logging_text + stage[1])
6912 exc = await self._wait_for_tasks(
6913 logging_text,
6914 tasks_dict_info,
6915 self.timeout_ns_deploy,
6916 stage,
6917 nslcmop_id,
6918 nsr_id=nsr_id,
6919 )
6920 if exc:
6921 db_nslcmop_update[
6922 "detailed-status"
6923 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6924 nslcmop_operation_state = "FAILED"
6925 if db_nsr:
6926 db_nsr_update["operational-status"] = old_operational_status
6927 db_nsr_update["config-status"] = old_config_status
6928 db_nsr_update["detailed-status"] = ""
6929 if scale_process:
6930 if "VCA" in scale_process:
6931 db_nsr_update["config-status"] = "failed"
6932 if "RO" in scale_process:
6933 db_nsr_update["operational-status"] = "failed"
6934 db_nsr_update[
6935 "detailed-status"
6936 ] = "FAILED scaling nslcmop={} {}: {}".format(
6937 nslcmop_id, step, exc
6938 )
6939 else:
6940 error_description_nslcmop = None
6941 nslcmop_operation_state = "COMPLETED"
6942 db_nslcmop_update["detailed-status"] = "Done"
6943
6944 self._write_op_status(
6945 op_id=nslcmop_id,
6946 stage="",
6947 error_message=error_description_nslcmop,
6948 operation_state=nslcmop_operation_state,
6949 other_update=db_nslcmop_update,
6950 )
6951 if db_nsr:
6952 self._write_ns_status(
6953 nsr_id=nsr_id,
6954 ns_state=None,
6955 current_operation="IDLE",
6956 current_operation_id=None,
6957 other_update=db_nsr_update,
6958 )
6959
6960 if nslcmop_operation_state:
6961 try:
6962 msg = {
6963 "nsr_id": nsr_id,
6964 "nslcmop_id": nslcmop_id,
6965 "operationState": nslcmop_operation_state,
6966 }
6967 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
6968 except Exception as e:
6969 self.logger.error(
6970 logging_text + "kafka_write notification Exception {}".format(e)
6971 )
6972 self.logger.debug(logging_text + "Exit")
6973 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
6974
6975 async def _scale_kdu(
6976 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6977 ):
6978 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
6979 for kdu_name in _scaling_info:
6980 for kdu_scaling_info in _scaling_info[kdu_name]:
6981 deployed_kdu, index = get_deployed_kdu(
6982 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
6983 )
6984 cluster_uuid = deployed_kdu["k8scluster-uuid"]
6985 kdu_instance = deployed_kdu["kdu-instance"]
6986 kdu_model = deployed_kdu.get("kdu-model")
6987 scale = int(kdu_scaling_info["scale"])
6988 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
6989
6990 db_dict = {
6991 "collection": "nsrs",
6992 "filter": {"_id": nsr_id},
6993 "path": "_admin.deployed.K8s.{}".format(index),
6994 }
6995
6996 step = "scaling application {}".format(
6997 kdu_scaling_info["resource-name"]
6998 )
6999 self.logger.debug(logging_text + step)
7000
7001 if kdu_scaling_info["type"] == "delete":
7002 kdu_config = get_configuration(db_vnfd, kdu_name)
7003 if (
7004 kdu_config
7005 and kdu_config.get("terminate-config-primitive")
7006 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7007 ):
7008 terminate_config_primitive_list = kdu_config.get(
7009 "terminate-config-primitive"
7010 )
7011 terminate_config_primitive_list.sort(
7012 key=lambda val: int(val["seq"])
7013 )
7014
7015 for (
7016 terminate_config_primitive
7017 ) in terminate_config_primitive_list:
7018 primitive_params_ = self._map_primitive_params(
7019 terminate_config_primitive, {}, {}
7020 )
7021 step = "execute terminate config primitive"
7022 self.logger.debug(logging_text + step)
7023 await asyncio.wait_for(
7024 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7025 cluster_uuid=cluster_uuid,
7026 kdu_instance=kdu_instance,
7027 primitive_name=terminate_config_primitive["name"],
7028 params=primitive_params_,
7029 db_dict=db_dict,
7030 vca_id=vca_id,
7031 ),
7032 timeout=600,
7033 )
7034
7035 await asyncio.wait_for(
7036 self.k8scluster_map[k8s_cluster_type].scale(
7037 kdu_instance,
7038 scale,
7039 kdu_scaling_info["resource-name"],
7040 vca_id=vca_id,
7041 cluster_uuid=cluster_uuid,
7042 kdu_model=kdu_model,
7043 atomic=True,
7044 db_dict=db_dict,
7045 ),
7046 timeout=self.timeout_vca_on_error,
7047 )
7048
7049 if kdu_scaling_info["type"] == "create":
7050 kdu_config = get_configuration(db_vnfd, kdu_name)
7051 if (
7052 kdu_config
7053 and kdu_config.get("initial-config-primitive")
7054 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7055 ):
7056 initial_config_primitive_list = kdu_config.get(
7057 "initial-config-primitive"
7058 )
7059 initial_config_primitive_list.sort(
7060 key=lambda val: int(val["seq"])
7061 )
7062
7063 for initial_config_primitive in initial_config_primitive_list:
7064 primitive_params_ = self._map_primitive_params(
7065 initial_config_primitive, {}, {}
7066 )
7067 step = "execute initial config primitive"
7068 self.logger.debug(logging_text + step)
7069 await asyncio.wait_for(
7070 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7071 cluster_uuid=cluster_uuid,
7072 kdu_instance=kdu_instance,
7073 primitive_name=initial_config_primitive["name"],
7074 params=primitive_params_,
7075 db_dict=db_dict,
7076 vca_id=vca_id,
7077 ),
7078 timeout=600,
7079 )
7080
7081 async def _scale_ng_ro(
7082 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7083 ):
7084 nsr_id = db_nslcmop["nsInstanceId"]
7085 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7086 db_vnfrs = {}
7087
7088 # read from db: vnfd's for every vnf
7089 db_vnfds = []
7090
7091 # for each vnf in ns, read vnfd
7092 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7093 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7094 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7095 # if we haven't this vnfd, read it from db
7096 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7097 # read from db
7098 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7099 db_vnfds.append(vnfd)
7100 n2vc_key = self.n2vc.get_public_key()
7101 n2vc_key_list = [n2vc_key]
7102 self.scale_vnfr(
7103 db_vnfr,
7104 vdu_scaling_info.get("vdu-create"),
7105 vdu_scaling_info.get("vdu-delete"),
7106 mark_delete=True,
7107 )
7108 # db_vnfr has been updated, update db_vnfrs to use it
7109 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7110 await self._instantiate_ng_ro(
7111 logging_text,
7112 nsr_id,
7113 db_nsd,
7114 db_nsr,
7115 db_nslcmop,
7116 db_vnfrs,
7117 db_vnfds,
7118 n2vc_key_list,
7119 stage=stage,
7120 start_deploy=time(),
7121 timeout_ns_deploy=self.timeout_ns_deploy,
7122 )
7123 if vdu_scaling_info.get("vdu-delete"):
7124 self.scale_vnfr(
7125 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7126 )
7127
7128 async def extract_prometheus_scrape_jobs(
7129 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7130 ):
7131 # look if exist a file called 'prometheus*.j2' and
7132 artifact_content = self.fs.dir_ls(artifact_path)
7133 job_file = next(
7134 (
7135 f
7136 for f in artifact_content
7137 if f.startswith("prometheus") and f.endswith(".j2")
7138 ),
7139 None,
7140 )
7141 if not job_file:
7142 return
7143 with self.fs.file_open((artifact_path, job_file), "r") as f:
7144 job_data = f.read()
7145
7146 # TODO get_service
7147 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7148 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7149 host_port = "80"
7150 vnfr_id = vnfr_id.replace("-", "")
7151 variables = {
7152 "JOB_NAME": vnfr_id,
7153 "TARGET_IP": target_ip,
7154 "EXPORTER_POD_IP": host_name,
7155 "EXPORTER_POD_PORT": host_port,
7156 }
7157 job_list = parse_job(job_data, variables)
7158 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7159 for job in job_list:
7160 if (
7161 not isinstance(job.get("job_name"), str)
7162 or vnfr_id not in job["job_name"]
7163 ):
7164 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7165 job["nsr_id"] = nsr_id
7166 job["vnfr_id"] = vnfr_id
7167 return job_list
7168
7169 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7170 """
7171 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7172
7173 :param: vim_account_id: VIM Account ID
7174
7175 :return: (cloud_name, cloud_credential)
7176 """
7177 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7178 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7179
7180 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7181 """
7182 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7183
7184 :param: vim_account_id: VIM Account ID
7185
7186 :return: (cloud_name, cloud_credential)
7187 """
7188 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7189 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7190
7191 async def migrate(self, nsr_id, nslcmop_id):
7192 """
7193 Migrate VNFs and VDUs instances in a NS
7194
7195 :param: nsr_id: NS Instance ID
7196 :param: nslcmop_id: nslcmop ID of migrate
7197
7198 """
7199 # Try to lock HA task here
7200 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7201 if not task_is_locked_by_me:
7202 return
7203 logging_text = "Task ns={} migrate ".format(nsr_id)
7204 self.logger.debug(logging_text + "Enter")
7205 # get all needed from database
7206 db_nslcmop = None
7207 db_nslcmop_update = {}
7208 nslcmop_operation_state = None
7209 db_nsr_update = {}
7210 target = {}
7211 exc = None
7212 # in case of error, indicates what part of scale was failed to put nsr at error status
7213 start_deploy = time()
7214
7215 try:
7216 # wait for any previous tasks in process
7217 step = "Waiting for previous operations to terminate"
7218 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7219
7220 self._write_ns_status(
7221 nsr_id=nsr_id,
7222 ns_state=None,
7223 current_operation="MIGRATING",
7224 current_operation_id=nslcmop_id
7225 )
7226 step = "Getting nslcmop from database"
7227 self.logger.debug(step + " after having waited for previous tasks to be completed")
7228 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7229 migrate_params = db_nslcmop.get("operationParams")
7230
7231 target = {}
7232 target.update(migrate_params)
7233 desc = await self.RO.migrate(nsr_id, target)
7234 self.logger.debug("RO return > {}".format(desc))
7235 action_id = desc["action_id"]
7236 await self._wait_ng_ro(
7237 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate
7238 )
7239 except (ROclient.ROClientException, DbException, LcmException) as e:
7240 self.logger.error("Exit Exception {}".format(e))
7241 exc = e
7242 except asyncio.CancelledError:
7243 self.logger.error("Cancelled Exception while '{}'".format(step))
7244 exc = "Operation was cancelled"
7245 except Exception as e:
7246 exc = traceback.format_exc()
7247 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7248 finally:
7249 self._write_ns_status(
7250 nsr_id=nsr_id,
7251 ns_state=None,
7252 current_operation="IDLE",
7253 current_operation_id=None,
7254 )
7255 if exc:
7256 db_nslcmop_update[
7257 "detailed-status"
7258 ] = "FAILED {}: {}".format(step, exc)
7259 nslcmop_operation_state = "FAILED"
7260 else:
7261 nslcmop_operation_state = "COMPLETED"
7262 db_nslcmop_update["detailed-status"] = "Done"
7263 db_nsr_update["detailed-status"] = "Done"
7264
7265 self._write_op_status(
7266 op_id=nslcmop_id,
7267 stage="",
7268 error_message="",
7269 operation_state=nslcmop_operation_state,
7270 other_update=db_nslcmop_update,
7271 )
7272 if nslcmop_operation_state:
7273 try:
7274 msg = {
7275 "nsr_id": nsr_id,
7276 "nslcmop_id": nslcmop_id,
7277 "operationState": nslcmop_operation_state,
7278 }
7279 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7280 except Exception as e:
7281 self.logger.error(
7282 logging_text + "kafka_write notification Exception {}".format(e)
7283 )
7284 self.logger.debug(logging_text + "Exit")
7285 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")