Bug 2160 fixed: verifying if VDUR exists within the method update_ns_vld_target
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105 from osm_lcm.data_utils.wim import (
106 get_sdn_ports,
107 get_target_wim_attrs,
108 select_feasible_wim_account,
109 )
110
111 from n2vc.n2vc_juju_conn import N2VCJujuConnector
112 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
113
114 from osm_lcm.lcm_helm_conn import LCMHelmConn
115 from osm_lcm.osm_config import OsmConfigBuilder
116 from osm_lcm.prometheus import parse_job
117
118 from copy import copy, deepcopy
119 from time import time
120 from uuid import uuid4
121
122 from random import randint
123
124 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
125
126
127 class NsLcm(LcmBase):
128 timeout_vca_on_error = (
129 5 * 60
130 ) # Time for charm from first time at blocked,error status to mark as failed
131 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
132 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
133 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
134 timeout_charm_delete = 10 * 60
135 timeout_primitive = 30 * 60 # timeout for primitive execution
136 timeout_ns_update = 30 * 60 # timeout for ns update
137 timeout_progress_primitive = (
138 10 * 60
139 ) # timeout for some progress in a primitive execution
140 timeout_migrate = 1800 # default global timeout for migrating vnfs
141 timeout_operate = 1800 # default global timeout for migrating vnfs
142 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
143 SUBOPERATION_STATUS_NOT_FOUND = -1
144 SUBOPERATION_STATUS_NEW = -2
145 SUBOPERATION_STATUS_SKIP = -3
146 task_name_deploy_vca = "Deploying VCA"
147
148 def __init__(self, msg, lcm_tasks, config, loop):
149 """
150 Init, Connect to database, filesystem storage, and messaging
151 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
152 :return: None
153 """
154 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
155
156 self.db = Database().instance.db
157 self.fs = Filesystem().instance.fs
158 self.loop = loop
159 self.lcm_tasks = lcm_tasks
160 self.timeout = config["timeout"]
161 self.ro_config = config["ro_config"]
162 self.ng_ro = config["ro_config"].get("ng")
163 self.vca_config = config["VCA"].copy()
164
165 # create N2VC connector
166 self.n2vc = N2VCJujuConnector(
167 log=self.logger,
168 loop=self.loop,
169 on_update_db=self._on_update_n2vc_db,
170 fs=self.fs,
171 db=self.db,
172 )
173
174 self.conn_helm_ee = LCMHelmConn(
175 log=self.logger,
176 loop=self.loop,
177 vca_config=self.vca_config,
178 on_update_db=self._on_update_n2vc_db,
179 )
180
181 self.k8sclusterhelm2 = K8sHelmConnector(
182 kubectl_command=self.vca_config.get("kubectlpath"),
183 helm_command=self.vca_config.get("helmpath"),
184 log=self.logger,
185 on_update_db=None,
186 fs=self.fs,
187 db=self.db,
188 )
189
190 self.k8sclusterhelm3 = K8sHelm3Connector(
191 kubectl_command=self.vca_config.get("kubectlpath"),
192 helm_command=self.vca_config.get("helm3path"),
193 fs=self.fs,
194 log=self.logger,
195 db=self.db,
196 on_update_db=None,
197 )
198
199 self.k8sclusterjuju = K8sJujuConnector(
200 kubectl_command=self.vca_config.get("kubectlpath"),
201 juju_command=self.vca_config.get("jujupath"),
202 log=self.logger,
203 loop=self.loop,
204 on_update_db=self._on_update_k8s_db,
205 fs=self.fs,
206 db=self.db,
207 )
208
209 self.k8scluster_map = {
210 "helm-chart": self.k8sclusterhelm2,
211 "helm-chart-v3": self.k8sclusterhelm3,
212 "chart": self.k8sclusterhelm3,
213 "juju-bundle": self.k8sclusterjuju,
214 "juju": self.k8sclusterjuju,
215 }
216
217 self.vca_map = {
218 "lxc_proxy_charm": self.n2vc,
219 "native_charm": self.n2vc,
220 "k8s_proxy_charm": self.n2vc,
221 "helm": self.conn_helm_ee,
222 "helm-v3": self.conn_helm_ee,
223 }
224
225 # create RO client
226 self.RO = NgRoClient(self.loop, **self.ro_config)
227
228 self.op_status_map = {
229 "instantiation": self.RO.status,
230 "termination": self.RO.status,
231 "migrate": self.RO.status,
232 "healing": self.RO.recreate_status,
233 "verticalscale": self.RO.status,
234 "start_stop_rebuild": self.RO.status,
235 }
236
237 @staticmethod
238 def increment_ip_mac(ip_mac, vm_index=1):
239 if not isinstance(ip_mac, str):
240 return ip_mac
241 try:
242 # try with ipv4 look for last dot
243 i = ip_mac.rfind(".")
244 if i > 0:
245 i += 1
246 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
247 # try with ipv6 or mac look for last colon. Operate in hex
248 i = ip_mac.rfind(":")
249 if i > 0:
250 i += 1
251 # format in hex, len can be 2 for mac or 4 for ipv6
252 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
253 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
254 )
255 except Exception:
256 pass
257 return None
258
259 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
260 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
261
262 try:
263 # TODO filter RO descriptor fields...
264
265 # write to database
266 db_dict = dict()
267 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
268 db_dict["deploymentStatus"] = ro_descriptor
269 self.update_db_2("nsrs", nsrs_id, db_dict)
270
271 except Exception as e:
272 self.logger.warn(
273 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
274 )
275
276 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
277 # remove last dot from path (if exists)
278 if path.endswith("."):
279 path = path[:-1]
280
281 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
282 # .format(table, filter, path, updated_data))
283 try:
284 nsr_id = filter.get("_id")
285
286 # read ns record from database
287 nsr = self.db.get_one(table="nsrs", q_filter=filter)
288 current_ns_status = nsr.get("nsState")
289
290 # get vca status for NS
291 status_dict = await self.n2vc.get_status(
292 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
293 )
294
295 # vcaStatus
296 db_dict = dict()
297 db_dict["vcaStatus"] = status_dict
298 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
299
300 # update configurationStatus for this VCA
301 try:
302 vca_index = int(path[path.rfind(".") + 1 :])
303
304 vca_list = deep_get(
305 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
306 )
307 vca_status = vca_list[vca_index].get("status")
308
309 configuration_status_list = nsr.get("configurationStatus")
310 config_status = configuration_status_list[vca_index].get("status")
311
312 if config_status == "BROKEN" and vca_status != "failed":
313 db_dict["configurationStatus"][vca_index] = "READY"
314 elif config_status != "BROKEN" and vca_status == "failed":
315 db_dict["configurationStatus"][vca_index] = "BROKEN"
316 except Exception as e:
317 # not update configurationStatus
318 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
319
320 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
321 # if nsState = 'DEGRADED' check if all is OK
322 is_degraded = False
323 if current_ns_status in ("READY", "DEGRADED"):
324 error_description = ""
325 # check machines
326 if status_dict.get("machines"):
327 for machine_id in status_dict.get("machines"):
328 machine = status_dict.get("machines").get(machine_id)
329 # check machine agent-status
330 if machine.get("agent-status"):
331 s = machine.get("agent-status").get("status")
332 if s != "started":
333 is_degraded = True
334 error_description += (
335 "machine {} agent-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check machine instance status
340 if machine.get("instance-status"):
341 s = machine.get("instance-status").get("status")
342 if s != "running":
343 is_degraded = True
344 error_description += (
345 "machine {} instance-status={} ; ".format(
346 machine_id, s
347 )
348 )
349 # check applications
350 if status_dict.get("applications"):
351 for app_id in status_dict.get("applications"):
352 app = status_dict.get("applications").get(app_id)
353 # check application status
354 if app.get("status"):
355 s = app.get("status").get("status")
356 if s != "active":
357 is_degraded = True
358 error_description += (
359 "application {} status={} ; ".format(app_id, s)
360 )
361
362 if error_description:
363 db_dict["errorDescription"] = error_description
364 if current_ns_status == "READY" and is_degraded:
365 db_dict["nsState"] = "DEGRADED"
366 if current_ns_status == "DEGRADED" and not is_degraded:
367 db_dict["nsState"] = "READY"
368
369 # write to database
370 self.update_db_2("nsrs", nsr_id, db_dict)
371
372 except (asyncio.CancelledError, asyncio.TimeoutError):
373 raise
374 except Exception as e:
375 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
376
377 async def _on_update_k8s_db(
378 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
379 ):
380 """
381 Updating vca status in NSR record
382 :param cluster_uuid: UUID of a k8s cluster
383 :param kdu_instance: The unique name of the KDU instance
384 :param filter: To get nsr_id
385 :cluster_type: The cluster type (juju, k8s)
386 :return: none
387 """
388
389 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
390 # .format(cluster_uuid, kdu_instance, filter))
391
392 nsr_id = filter.get("_id")
393 try:
394 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
395 cluster_uuid=cluster_uuid,
396 kdu_instance=kdu_instance,
397 yaml_format=False,
398 complete_status=True,
399 vca_id=vca_id,
400 )
401
402 # vcaStatus
403 db_dict = dict()
404 db_dict["vcaStatus"] = {nsr_id: vca_status}
405
406 if cluster_type in ("juju-bundle", "juju"):
407 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
408 # status in a similar way between Juju Bundles and Helm Charts on this side
409 await self.k8sclusterjuju.update_vca_status(
410 db_dict["vcaStatus"],
411 kdu_instance,
412 vca_id=vca_id,
413 )
414
415 self.logger.debug(
416 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
417 )
418
419 # write to database
420 self.update_db_2("nsrs", nsr_id, db_dict)
421 except (asyncio.CancelledError, asyncio.TimeoutError):
422 raise
423 except Exception as e:
424 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
425
426 @staticmethod
427 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
428 try:
429 env = Environment(undefined=StrictUndefined, autoescape=True)
430 template = env.from_string(cloud_init_text)
431 return template.render(additional_params or {})
432 except UndefinedError as e:
433 raise LcmException(
434 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
435 "file, must be provided in the instantiation parameters inside the "
436 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
437 )
438 except (TemplateError, TemplateNotFound) as e:
439 raise LcmException(
440 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
441 vnfd_id, vdu_id, e
442 )
443 )
444
445 def _get_vdu_cloud_init_content(self, vdu, vnfd):
446 cloud_init_content = cloud_init_file = None
447 try:
448 if vdu.get("cloud-init-file"):
449 base_folder = vnfd["_admin"]["storage"]
450 if base_folder["pkg-dir"]:
451 cloud_init_file = "{}/{}/cloud_init/{}".format(
452 base_folder["folder"],
453 base_folder["pkg-dir"],
454 vdu["cloud-init-file"],
455 )
456 else:
457 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
458 base_folder["folder"],
459 vdu["cloud-init-file"],
460 )
461 with self.fs.file_open(cloud_init_file, "r") as ci_file:
462 cloud_init_content = ci_file.read()
463 elif vdu.get("cloud-init"):
464 cloud_init_content = vdu["cloud-init"]
465
466 return cloud_init_content
467 except FsException as e:
468 raise LcmException(
469 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
470 vnfd["id"], vdu["id"], cloud_init_file, e
471 )
472 )
473
474 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
475 vdur = next(
476 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
477 )
478 additional_params = vdur.get("additionalParams")
479 return parse_yaml_strings(additional_params)
480
481 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
482 """
483 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
484 :param vnfd: input vnfd
485 :param new_id: overrides vnf id if provided
486 :param additionalParams: Instantiation params for VNFs provided
487 :param nsrId: Id of the NSR
488 :return: copy of vnfd
489 """
490 vnfd_RO = deepcopy(vnfd)
491 # remove unused by RO configuration, monitoring, scaling and internal keys
492 vnfd_RO.pop("_id", None)
493 vnfd_RO.pop("_admin", None)
494 vnfd_RO.pop("monitoring-param", None)
495 vnfd_RO.pop("scaling-group-descriptor", None)
496 vnfd_RO.pop("kdu", None)
497 vnfd_RO.pop("k8s-cluster", None)
498 if new_id:
499 vnfd_RO["id"] = new_id
500
501 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
502 for vdu in get_iterable(vnfd_RO, "vdu"):
503 vdu.pop("cloud-init-file", None)
504 vdu.pop("cloud-init", None)
505 return vnfd_RO
506
507 @staticmethod
508 def ip_profile_2_RO(ip_profile):
509 RO_ip_profile = deepcopy(ip_profile)
510 if "dns-server" in RO_ip_profile:
511 if isinstance(RO_ip_profile["dns-server"], list):
512 RO_ip_profile["dns-address"] = []
513 for ds in RO_ip_profile.pop("dns-server"):
514 RO_ip_profile["dns-address"].append(ds["address"])
515 else:
516 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
517 if RO_ip_profile.get("ip-version") == "ipv4":
518 RO_ip_profile["ip-version"] = "IPv4"
519 if RO_ip_profile.get("ip-version") == "ipv6":
520 RO_ip_profile["ip-version"] = "IPv6"
521 if "dhcp-params" in RO_ip_profile:
522 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
523 return RO_ip_profile
524
525 def _get_ro_vim_id_for_vim_account(self, vim_account):
526 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
527 if db_vim["_admin"]["operationalState"] != "ENABLED":
528 raise LcmException(
529 "VIM={} is not available. operationalState={}".format(
530 vim_account, db_vim["_admin"]["operationalState"]
531 )
532 )
533 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
534 return RO_vim_id
535
536 def get_ro_wim_id_for_wim_account(self, wim_account):
537 if isinstance(wim_account, str):
538 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
539 if db_wim["_admin"]["operationalState"] != "ENABLED":
540 raise LcmException(
541 "WIM={} is not available. operationalState={}".format(
542 wim_account, db_wim["_admin"]["operationalState"]
543 )
544 )
545 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
546 return RO_wim_id
547 else:
548 return wim_account
549
550 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
551 db_vdu_push_list = []
552 template_vdur = []
553 db_update = {"_admin.modified": time()}
554 if vdu_create:
555 for vdu_id, vdu_count in vdu_create.items():
556 vdur = next(
557 (
558 vdur
559 for vdur in reversed(db_vnfr["vdur"])
560 if vdur["vdu-id-ref"] == vdu_id
561 ),
562 None,
563 )
564 if not vdur:
565 # Read the template saved in the db:
566 self.logger.debug(
567 "No vdur in the database. Using the vdur-template to scale"
568 )
569 vdur_template = db_vnfr.get("vdur-template")
570 if not vdur_template:
571 raise LcmException(
572 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
573 vdu_id
574 )
575 )
576 vdur = vdur_template[0]
577 # Delete a template from the database after using it
578 self.db.set_one(
579 "vnfrs",
580 {"_id": db_vnfr["_id"]},
581 None,
582 pull={"vdur-template": {"_id": vdur["_id"]}},
583 )
584 for count in range(vdu_count):
585 vdur_copy = deepcopy(vdur)
586 vdur_copy["status"] = "BUILD"
587 vdur_copy["status-detailed"] = None
588 vdur_copy["ip-address"] = None
589 vdur_copy["_id"] = str(uuid4())
590 vdur_copy["count-index"] += count + 1
591 vdur_copy["id"] = "{}-{}".format(
592 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
593 )
594 vdur_copy.pop("vim_info", None)
595 for iface in vdur_copy["interfaces"]:
596 if iface.get("fixed-ip"):
597 iface["ip-address"] = self.increment_ip_mac(
598 iface["ip-address"], count + 1
599 )
600 else:
601 iface.pop("ip-address", None)
602 if iface.get("fixed-mac"):
603 iface["mac-address"] = self.increment_ip_mac(
604 iface["mac-address"], count + 1
605 )
606 else:
607 iface.pop("mac-address", None)
608 if db_vnfr["vdur"]:
609 iface.pop(
610 "mgmt_vnf", None
611 ) # only first vdu can be managment of vnf
612 db_vdu_push_list.append(vdur_copy)
613 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
614 if vdu_delete:
615 if len(db_vnfr["vdur"]) == 1:
616 # The scale will move to 0 instances
617 self.logger.debug(
618 "Scaling to 0 !, creating the template with the last vdur"
619 )
620 template_vdur = [db_vnfr["vdur"][0]]
621 for vdu_id, vdu_count in vdu_delete.items():
622 if mark_delete:
623 indexes_to_delete = [
624 iv[0]
625 for iv in enumerate(db_vnfr["vdur"])
626 if iv[1]["vdu-id-ref"] == vdu_id
627 ]
628 db_update.update(
629 {
630 "vdur.{}.status".format(i): "DELETING"
631 for i in indexes_to_delete[-vdu_count:]
632 }
633 )
634 else:
635 # it must be deleted one by one because common.db does not allow otherwise
636 vdus_to_delete = [
637 v
638 for v in reversed(db_vnfr["vdur"])
639 if v["vdu-id-ref"] == vdu_id
640 ]
641 for vdu in vdus_to_delete[:vdu_count]:
642 self.db.set_one(
643 "vnfrs",
644 {"_id": db_vnfr["_id"]},
645 None,
646 pull={"vdur": {"_id": vdu["_id"]}},
647 )
648 db_push = {}
649 if db_vdu_push_list:
650 db_push["vdur"] = db_vdu_push_list
651 if template_vdur:
652 db_push["vdur-template"] = template_vdur
653 if not db_push:
654 db_push = None
655 db_vnfr["vdur-template"] = template_vdur
656 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
657 # modify passed dictionary db_vnfr
658 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
659 db_vnfr["vdur"] = db_vnfr_["vdur"]
660
661 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
662 """
663 Updates database nsr with the RO info for the created vld
664 :param ns_update_nsr: dictionary to be filled with the updated info
665 :param db_nsr: content of db_nsr. This is also modified
666 :param nsr_desc_RO: nsr descriptor from RO
667 :return: Nothing, LcmException is raised on errors
668 """
669
670 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
671 for net_RO in get_iterable(nsr_desc_RO, "nets"):
672 if vld["id"] != net_RO.get("ns_net_osm_id"):
673 continue
674 vld["vim-id"] = net_RO.get("vim_net_id")
675 vld["name"] = net_RO.get("vim_name")
676 vld["status"] = net_RO.get("status")
677 vld["status-detailed"] = net_RO.get("error_msg")
678 ns_update_nsr["vld.{}".format(vld_index)] = vld
679 break
680 else:
681 raise LcmException(
682 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
683 )
684
685 def set_vnfr_at_error(self, db_vnfrs, error_text):
686 try:
687 for db_vnfr in db_vnfrs.values():
688 vnfr_update = {"status": "ERROR"}
689 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
690 if "status" not in vdur:
691 vdur["status"] = "ERROR"
692 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
693 if error_text:
694 vdur["status-detailed"] = str(error_text)
695 vnfr_update[
696 "vdur.{}.status-detailed".format(vdu_index)
697 ] = "ERROR"
698 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
699 except DbException as e:
700 self.logger.error("Cannot update vnf. {}".format(e))
701
702 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
703 """
704 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
705 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
706 :param nsr_desc_RO: nsr descriptor from RO
707 :return: Nothing, LcmException is raised on errors
708 """
709 for vnf_index, db_vnfr in db_vnfrs.items():
710 for vnf_RO in nsr_desc_RO["vnfs"]:
711 if vnf_RO["member_vnf_index"] != vnf_index:
712 continue
713 vnfr_update = {}
714 if vnf_RO.get("ip_address"):
715 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
716 "ip_address"
717 ].split(";")[0]
718 elif not db_vnfr.get("ip-address"):
719 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
720 raise LcmExceptionNoMgmtIP(
721 "ns member_vnf_index '{}' has no IP address".format(
722 vnf_index
723 )
724 )
725
726 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
727 vdur_RO_count_index = 0
728 if vdur.get("pdu-type"):
729 continue
730 for vdur_RO in get_iterable(vnf_RO, "vms"):
731 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
732 continue
733 if vdur["count-index"] != vdur_RO_count_index:
734 vdur_RO_count_index += 1
735 continue
736 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
737 if vdur_RO.get("ip_address"):
738 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
739 else:
740 vdur["ip-address"] = None
741 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
742 vdur["name"] = vdur_RO.get("vim_name")
743 vdur["status"] = vdur_RO.get("status")
744 vdur["status-detailed"] = vdur_RO.get("error_msg")
745 for ifacer in get_iterable(vdur, "interfaces"):
746 for interface_RO in get_iterable(vdur_RO, "interfaces"):
747 if ifacer["name"] == interface_RO.get("internal_name"):
748 ifacer["ip-address"] = interface_RO.get(
749 "ip_address"
750 )
751 ifacer["mac-address"] = interface_RO.get(
752 "mac_address"
753 )
754 break
755 else:
756 raise LcmException(
757 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
758 "from VIM info".format(
759 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
760 )
761 )
762 vnfr_update["vdur.{}".format(vdu_index)] = vdur
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
767 "VIM info".format(
768 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
769 )
770 )
771
772 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
773 for net_RO in get_iterable(nsr_desc_RO, "nets"):
774 if vld["id"] != net_RO.get("vnf_net_osm_id"):
775 continue
776 vld["vim-id"] = net_RO.get("vim_net_id")
777 vld["name"] = net_RO.get("vim_name")
778 vld["status"] = net_RO.get("status")
779 vld["status-detailed"] = net_RO.get("error_msg")
780 vnfr_update["vld.{}".format(vld_index)] = vld
781 break
782 else:
783 raise LcmException(
784 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
785 vnf_index, vld["id"]
786 )
787 )
788
789 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
790 break
791
792 else:
793 raise LcmException(
794 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
795 vnf_index
796 )
797 )
798
799 def _get_ns_config_info(self, nsr_id):
800 """
801 Generates a mapping between vnf,vdu elements and the N2VC id
802 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
803 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
804 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
805 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
806 """
807 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
808 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
809 mapping = {}
810 ns_config_info = {"osm-config-mapping": mapping}
811 for vca in vca_deployed_list:
812 if not vca["member-vnf-index"]:
813 continue
814 if not vca["vdu_id"]:
815 mapping[vca["member-vnf-index"]] = vca["application"]
816 else:
817 mapping[
818 "{}.{}.{}".format(
819 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
820 )
821 ] = vca["application"]
822 return ns_config_info
823
824 async def _instantiate_ng_ro(
825 self,
826 logging_text,
827 nsr_id,
828 nsd,
829 db_nsr,
830 db_nslcmop,
831 db_vnfrs,
832 db_vnfds,
833 n2vc_key_list,
834 stage,
835 start_deploy,
836 timeout_ns_deploy,
837 ):
838 db_vims = {}
839
840 def get_vim_account(vim_account_id):
841 nonlocal db_vims
842 if vim_account_id in db_vims:
843 return db_vims[vim_account_id]
844 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
845 db_vims[vim_account_id] = db_vim
846 return db_vim
847
848 # modify target_vld info with instantiation parameters
849 def parse_vld_instantiation_params(
850 target_vim, target_vld, vld_params, target_sdn
851 ):
852 if vld_params.get("ip-profile"):
853 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
854 "ip-profile"
855 ]
856 if vld_params.get("provider-network"):
857 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
858 "provider-network"
859 ]
860 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
861 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
862 "provider-network"
863 ]["sdn-ports"]
864
865 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
866 # if wim_account_id is specified in vld_params, validate if it is feasible.
867 wim_account_id, db_wim = select_feasible_wim_account(
868 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
869 )
870
871 if wim_account_id:
872 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
873 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
874 # update vld_params with correct WIM account Id
875 vld_params["wimAccountId"] = wim_account_id
876
877 target_wim = "wim:{}".format(wim_account_id)
878 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
879 sdn_ports = get_sdn_ports(vld_params, db_wim)
880 if len(sdn_ports) > 0:
881 target_vld["vim_info"][target_wim] = target_wim_attrs
882 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
883
884 self.logger.debug(
885 "Target VLD with WIM data: {:s}".format(str(target_vld))
886 )
887
888 for param in ("vim-network-name", "vim-network-id"):
889 if vld_params.get(param):
890 if isinstance(vld_params[param], dict):
891 for vim, vim_net in vld_params[param].items():
892 other_target_vim = "vim:" + vim
893 populate_dict(
894 target_vld["vim_info"],
895 (other_target_vim, param.replace("-", "_")),
896 vim_net,
897 )
898 else: # isinstance str
899 target_vld["vim_info"][target_vim][
900 param.replace("-", "_")
901 ] = vld_params[param]
902 if vld_params.get("common_id"):
903 target_vld["common_id"] = vld_params.get("common_id")
904
905 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
906 def update_ns_vld_target(target, ns_params):
907 for vnf_params in ns_params.get("vnf", ()):
908 if vnf_params.get("vimAccountId"):
909 target_vnf = next(
910 (
911 vnfr
912 for vnfr in db_vnfrs.values()
913 if vnf_params["member-vnf-index"]
914 == vnfr["member-vnf-index-ref"]
915 ),
916 None,
917 )
918 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
919 if not vdur:
920 continue
921 for a_index, a_vld in enumerate(target["ns"]["vld"]):
922 target_vld = find_in_list(
923 get_iterable(vdur, "interfaces"),
924 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
925 )
926
927 vld_params = find_in_list(
928 get_iterable(ns_params, "vld"),
929 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
930 )
931 if target_vld:
932 if vnf_params.get("vimAccountId") not in a_vld.get(
933 "vim_info", {}
934 ):
935 target_vim_network_list = [
936 v for _, v in a_vld.get("vim_info").items()
937 ]
938 target_vim_network_name = next(
939 (
940 item.get("vim_network_name", "")
941 for item in target_vim_network_list
942 ),
943 "",
944 )
945
946 target["ns"]["vld"][a_index].get("vim_info").update(
947 {
948 "vim:{}".format(vnf_params["vimAccountId"]): {
949 "vim_network_name": target_vim_network_name,
950 }
951 }
952 )
953
954 if vld_params:
955 for param in ("vim-network-name", "vim-network-id"):
956 if vld_params.get(param) and isinstance(
957 vld_params[param], dict
958 ):
959 for vim, vim_net in vld_params[
960 param
961 ].items():
962 other_target_vim = "vim:" + vim
963 populate_dict(
964 target["ns"]["vld"][a_index].get(
965 "vim_info"
966 ),
967 (
968 other_target_vim,
969 param.replace("-", "_"),
970 ),
971 vim_net,
972 )
973
974 nslcmop_id = db_nslcmop["_id"]
975 target = {
976 "name": db_nsr["name"],
977 "ns": {"vld": []},
978 "vnf": [],
979 "image": deepcopy(db_nsr["image"]),
980 "flavor": deepcopy(db_nsr["flavor"]),
981 "action_id": nslcmop_id,
982 "cloud_init_content": {},
983 }
984 for image in target["image"]:
985 image["vim_info"] = {}
986 for flavor in target["flavor"]:
987 flavor["vim_info"] = {}
988 if db_nsr.get("affinity-or-anti-affinity-group"):
989 target["affinity-or-anti-affinity-group"] = deepcopy(
990 db_nsr["affinity-or-anti-affinity-group"]
991 )
992 for affinity_or_anti_affinity_group in target[
993 "affinity-or-anti-affinity-group"
994 ]:
995 affinity_or_anti_affinity_group["vim_info"] = {}
996
997 if db_nslcmop.get("lcmOperationType") != "instantiate":
998 # get parameters of instantiation:
999 db_nslcmop_instantiate = self.db.get_list(
1000 "nslcmops",
1001 {
1002 "nsInstanceId": db_nslcmop["nsInstanceId"],
1003 "lcmOperationType": "instantiate",
1004 },
1005 )[-1]
1006 ns_params = db_nslcmop_instantiate.get("operationParams")
1007 else:
1008 ns_params = db_nslcmop.get("operationParams")
1009 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
1010 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
1011
1012 cp2target = {}
1013 for vld_index, vld in enumerate(db_nsr.get("vld")):
1014 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1015 target_vld = {
1016 "id": vld["id"],
1017 "name": vld["name"],
1018 "mgmt-network": vld.get("mgmt-network", False),
1019 "type": vld.get("type"),
1020 "vim_info": {
1021 target_vim: {
1022 "vim_network_name": vld.get("vim-network-name"),
1023 "vim_account_id": ns_params["vimAccountId"],
1024 }
1025 },
1026 }
1027 # check if this network needs SDN assist
1028 if vld.get("pci-interfaces"):
1029 db_vim = get_vim_account(ns_params["vimAccountId"])
1030 if vim_config := db_vim.get("config"):
1031 if sdnc_id := vim_config.get("sdn-controller"):
1032 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1033 target_sdn = "sdn:{}".format(sdnc_id)
1034 target_vld["vim_info"][target_sdn] = {
1035 "sdn": True,
1036 "target_vim": target_vim,
1037 "vlds": [sdn_vld],
1038 "type": vld.get("type"),
1039 }
1040
1041 nsd_vnf_profiles = get_vnf_profiles(nsd)
1042 for nsd_vnf_profile in nsd_vnf_profiles:
1043 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1044 if cp["virtual-link-profile-id"] == vld["id"]:
1045 cp2target[
1046 "member_vnf:{}.{}".format(
1047 cp["constituent-cpd-id"][0][
1048 "constituent-base-element-id"
1049 ],
1050 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1051 )
1052 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1053
1054 # check at nsd descriptor, if there is an ip-profile
1055 vld_params = {}
1056 nsd_vlp = find_in_list(
1057 get_virtual_link_profiles(nsd),
1058 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1059 == vld["id"],
1060 )
1061 if (
1062 nsd_vlp
1063 and nsd_vlp.get("virtual-link-protocol-data")
1064 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1065 ):
1066 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1067 "l3-protocol-data"
1068 ]
1069 ip_profile_dest_data = {}
1070 if "ip-version" in ip_profile_source_data:
1071 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1072 "ip-version"
1073 ]
1074 if "cidr" in ip_profile_source_data:
1075 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1076 "cidr"
1077 ]
1078 if "gateway-ip" in ip_profile_source_data:
1079 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1080 "gateway-ip"
1081 ]
1082 if "dhcp-enabled" in ip_profile_source_data:
1083 ip_profile_dest_data["dhcp-params"] = {
1084 "enabled": ip_profile_source_data["dhcp-enabled"]
1085 }
1086 vld_params["ip-profile"] = ip_profile_dest_data
1087
1088 # update vld_params with instantiation params
1089 vld_instantiation_params = find_in_list(
1090 get_iterable(ns_params, "vld"),
1091 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1092 )
1093 if vld_instantiation_params:
1094 vld_params.update(vld_instantiation_params)
1095 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1096 target["ns"]["vld"].append(target_vld)
1097 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1098 update_ns_vld_target(target, ns_params)
1099
1100 for vnfr in db_vnfrs.values():
1101 vnfd = find_in_list(
1102 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1103 )
1104 vnf_params = find_in_list(
1105 get_iterable(ns_params, "vnf"),
1106 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1107 )
1108 target_vnf = deepcopy(vnfr)
1109 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1110 for vld in target_vnf.get("vld", ()):
1111 # check if connected to a ns.vld, to fill target'
1112 vnf_cp = find_in_list(
1113 vnfd.get("int-virtual-link-desc", ()),
1114 lambda cpd: cpd.get("id") == vld["id"],
1115 )
1116 if vnf_cp:
1117 ns_cp = "member_vnf:{}.{}".format(
1118 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1119 )
1120 if cp2target.get(ns_cp):
1121 vld["target"] = cp2target[ns_cp]
1122
1123 vld["vim_info"] = {
1124 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1125 }
1126 # check if this network needs SDN assist
1127 target_sdn = None
1128 if vld.get("pci-interfaces"):
1129 db_vim = get_vim_account(vnfr["vim-account-id"])
1130 sdnc_id = db_vim["config"].get("sdn-controller")
1131 if sdnc_id:
1132 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1133 target_sdn = "sdn:{}".format(sdnc_id)
1134 vld["vim_info"][target_sdn] = {
1135 "sdn": True,
1136 "target_vim": target_vim,
1137 "vlds": [sdn_vld],
1138 "type": vld.get("type"),
1139 }
1140
1141 # check at vnfd descriptor, if there is an ip-profile
1142 vld_params = {}
1143 vnfd_vlp = find_in_list(
1144 get_virtual_link_profiles(vnfd),
1145 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1146 )
1147 if (
1148 vnfd_vlp
1149 and vnfd_vlp.get("virtual-link-protocol-data")
1150 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1151 ):
1152 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1153 "l3-protocol-data"
1154 ]
1155 ip_profile_dest_data = {}
1156 if "ip-version" in ip_profile_source_data:
1157 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1158 "ip-version"
1159 ]
1160 if "cidr" in ip_profile_source_data:
1161 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1162 "cidr"
1163 ]
1164 if "gateway-ip" in ip_profile_source_data:
1165 ip_profile_dest_data[
1166 "gateway-address"
1167 ] = ip_profile_source_data["gateway-ip"]
1168 if "dhcp-enabled" in ip_profile_source_data:
1169 ip_profile_dest_data["dhcp-params"] = {
1170 "enabled": ip_profile_source_data["dhcp-enabled"]
1171 }
1172
1173 vld_params["ip-profile"] = ip_profile_dest_data
1174 # update vld_params with instantiation params
1175 if vnf_params:
1176 vld_instantiation_params = find_in_list(
1177 get_iterable(vnf_params, "internal-vld"),
1178 lambda i_vld: i_vld["name"] == vld["id"],
1179 )
1180 if vld_instantiation_params:
1181 vld_params.update(vld_instantiation_params)
1182 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1183
1184 vdur_list = []
1185 for vdur in target_vnf.get("vdur", ()):
1186 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1187 continue # This vdu must not be created
1188 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1189
1190 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1191
1192 if ssh_keys_all:
1193 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1194 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1195 if (
1196 vdu_configuration
1197 and vdu_configuration.get("config-access")
1198 and vdu_configuration.get("config-access").get("ssh-access")
1199 ):
1200 vdur["ssh-keys"] = ssh_keys_all
1201 vdur["ssh-access-required"] = vdu_configuration[
1202 "config-access"
1203 ]["ssh-access"]["required"]
1204 elif (
1205 vnf_configuration
1206 and vnf_configuration.get("config-access")
1207 and vnf_configuration.get("config-access").get("ssh-access")
1208 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1209 ):
1210 vdur["ssh-keys"] = ssh_keys_all
1211 vdur["ssh-access-required"] = vnf_configuration[
1212 "config-access"
1213 ]["ssh-access"]["required"]
1214 elif ssh_keys_instantiation and find_in_list(
1215 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1216 ):
1217 vdur["ssh-keys"] = ssh_keys_instantiation
1218
1219 self.logger.debug("NS > vdur > {}".format(vdur))
1220
1221 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1222 # cloud-init
1223 if vdud.get("cloud-init-file"):
1224 vdur["cloud-init"] = "{}:file:{}".format(
1225 vnfd["_id"], vdud.get("cloud-init-file")
1226 )
1227 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1228 if vdur["cloud-init"] not in target["cloud_init_content"]:
1229 base_folder = vnfd["_admin"]["storage"]
1230 if base_folder["pkg-dir"]:
1231 cloud_init_file = "{}/{}/cloud_init/{}".format(
1232 base_folder["folder"],
1233 base_folder["pkg-dir"],
1234 vdud.get("cloud-init-file"),
1235 )
1236 else:
1237 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1238 base_folder["folder"],
1239 vdud.get("cloud-init-file"),
1240 )
1241 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1242 target["cloud_init_content"][
1243 vdur["cloud-init"]
1244 ] = ci_file.read()
1245 elif vdud.get("cloud-init"):
1246 vdur["cloud-init"] = "{}:vdu:{}".format(
1247 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1248 )
1249 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1250 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1251 "cloud-init"
1252 ]
1253 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1254 deploy_params_vdu = self._format_additional_params(
1255 vdur.get("additionalParams") or {}
1256 )
1257 deploy_params_vdu["OSM"] = get_osm_params(
1258 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1259 )
1260 vdur["additionalParams"] = deploy_params_vdu
1261
1262 # flavor
1263 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1264 if target_vim not in ns_flavor["vim_info"]:
1265 ns_flavor["vim_info"][target_vim] = {}
1266
1267 # deal with images
1268 # in case alternative images are provided we must check if they should be applied
1269 # for the vim_type, modify the vim_type taking into account
1270 ns_image_id = int(vdur["ns-image-id"])
1271 if vdur.get("alt-image-ids"):
1272 db_vim = get_vim_account(vnfr["vim-account-id"])
1273 vim_type = db_vim["vim_type"]
1274 for alt_image_id in vdur.get("alt-image-ids"):
1275 ns_alt_image = target["image"][int(alt_image_id)]
1276 if vim_type == ns_alt_image.get("vim-type"):
1277 # must use alternative image
1278 self.logger.debug(
1279 "use alternative image id: {}".format(alt_image_id)
1280 )
1281 ns_image_id = alt_image_id
1282 vdur["ns-image-id"] = ns_image_id
1283 break
1284 ns_image = target["image"][int(ns_image_id)]
1285 if target_vim not in ns_image["vim_info"]:
1286 ns_image["vim_info"][target_vim] = {}
1287
1288 # Affinity groups
1289 if vdur.get("affinity-or-anti-affinity-group-id"):
1290 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1291 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1292 if target_vim not in ns_ags["vim_info"]:
1293 ns_ags["vim_info"][target_vim] = {}
1294
1295 vdur["vim_info"] = {target_vim: {}}
1296 # instantiation parameters
1297 if vnf_params:
1298 vdu_instantiation_params = find_in_list(
1299 get_iterable(vnf_params, "vdu"),
1300 lambda i_vdu: i_vdu["id"] == vdud["id"],
1301 )
1302 if vdu_instantiation_params:
1303 # Parse the vdu_volumes from the instantiation params
1304 vdu_volumes = get_volumes_from_instantiation_params(
1305 vdu_instantiation_params, vdud
1306 )
1307 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1308 vdur_list.append(vdur)
1309 target_vnf["vdur"] = vdur_list
1310 target["vnf"].append(target_vnf)
1311
1312 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1313 desc = await self.RO.deploy(nsr_id, target)
1314 self.logger.debug("RO return > {}".format(desc))
1315 action_id = desc["action_id"]
1316 await self._wait_ng_ro(
1317 nsr_id,
1318 action_id,
1319 nslcmop_id,
1320 start_deploy,
1321 timeout_ns_deploy,
1322 stage,
1323 operation="instantiation",
1324 )
1325
1326 # Updating NSR
1327 db_nsr_update = {
1328 "_admin.deployed.RO.operational-status": "running",
1329 "detailed-status": " ".join(stage),
1330 }
1331 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1332 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1333 self._write_op_status(nslcmop_id, stage)
1334 self.logger.debug(
1335 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1336 )
1337 return
1338
1339 async def _wait_ng_ro(
1340 self,
1341 nsr_id,
1342 action_id,
1343 nslcmop_id=None,
1344 start_time=None,
1345 timeout=600,
1346 stage=None,
1347 operation=None,
1348 ):
1349 detailed_status_old = None
1350 db_nsr_update = {}
1351 start_time = start_time or time()
1352 while time() <= start_time + timeout:
1353 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1354 self.logger.debug("Wait NG RO > {}".format(desc_status))
1355 if desc_status["status"] == "FAILED":
1356 raise NgRoException(desc_status["details"])
1357 elif desc_status["status"] == "BUILD":
1358 if stage:
1359 stage[2] = "VIM: ({})".format(desc_status["details"])
1360 elif desc_status["status"] == "DONE":
1361 if stage:
1362 stage[2] = "Deployed at VIM"
1363 break
1364 else:
1365 assert False, "ROclient.check_ns_status returns unknown {}".format(
1366 desc_status["status"]
1367 )
1368 if stage and nslcmop_id and stage[2] != detailed_status_old:
1369 detailed_status_old = stage[2]
1370 db_nsr_update["detailed-status"] = " ".join(stage)
1371 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1372 self._write_op_status(nslcmop_id, stage)
1373 await asyncio.sleep(15, loop=self.loop)
1374 else: # timeout_ns_deploy
1375 raise NgRoException("Timeout waiting ns to deploy")
1376
1377 async def _terminate_ng_ro(
1378 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1379 ):
1380 db_nsr_update = {}
1381 failed_detail = []
1382 action_id = None
1383 start_deploy = time()
1384 try:
1385 target = {
1386 "ns": {"vld": []},
1387 "vnf": [],
1388 "image": [],
1389 "flavor": [],
1390 "action_id": nslcmop_id,
1391 }
1392 desc = await self.RO.deploy(nsr_id, target)
1393 action_id = desc["action_id"]
1394 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1395 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1396 self.logger.debug(
1397 logging_text
1398 + "ns terminate action at RO. action_id={}".format(action_id)
1399 )
1400
1401 # wait until done
1402 delete_timeout = 20 * 60 # 20 minutes
1403 await self._wait_ng_ro(
1404 nsr_id,
1405 action_id,
1406 nslcmop_id,
1407 start_deploy,
1408 delete_timeout,
1409 stage,
1410 operation="termination",
1411 )
1412
1413 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1414 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1415 # delete all nsr
1416 await self.RO.delete(nsr_id)
1417 except Exception as e:
1418 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1419 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1420 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1421 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1422 self.logger.debug(
1423 logging_text + "RO_action_id={} already deleted".format(action_id)
1424 )
1425 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1426 failed_detail.append("delete conflict: {}".format(e))
1427 self.logger.debug(
1428 logging_text
1429 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1430 )
1431 else:
1432 failed_detail.append("delete error: {}".format(e))
1433 self.logger.error(
1434 logging_text
1435 + "RO_action_id={} delete error: {}".format(action_id, e)
1436 )
1437
1438 if failed_detail:
1439 stage[2] = "Error deleting from VIM"
1440 else:
1441 stage[2] = "Deleted from VIM"
1442 db_nsr_update["detailed-status"] = " ".join(stage)
1443 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1444 self._write_op_status(nslcmop_id, stage)
1445
1446 if failed_detail:
1447 raise LcmException("; ".join(failed_detail))
1448 return
1449
1450 async def instantiate_RO(
1451 self,
1452 logging_text,
1453 nsr_id,
1454 nsd,
1455 db_nsr,
1456 db_nslcmop,
1457 db_vnfrs,
1458 db_vnfds,
1459 n2vc_key_list,
1460 stage,
1461 ):
1462 """
1463 Instantiate at RO
1464 :param logging_text: preffix text to use at logging
1465 :param nsr_id: nsr identity
1466 :param nsd: database content of ns descriptor
1467 :param db_nsr: database content of ns record
1468 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1469 :param db_vnfrs:
1470 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1471 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1472 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1473 :return: None or exception
1474 """
1475 try:
1476 start_deploy = time()
1477 ns_params = db_nslcmop.get("operationParams")
1478 if ns_params and ns_params.get("timeout_ns_deploy"):
1479 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1480 else:
1481 timeout_ns_deploy = self.timeout.get(
1482 "ns_deploy", self.timeout_ns_deploy
1483 )
1484
1485 # Check for and optionally request placement optimization. Database will be updated if placement activated
1486 stage[2] = "Waiting for Placement."
1487 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1488 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1489 for vnfr in db_vnfrs.values():
1490 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1491 break
1492 else:
1493 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1494
1495 return await self._instantiate_ng_ro(
1496 logging_text,
1497 nsr_id,
1498 nsd,
1499 db_nsr,
1500 db_nslcmop,
1501 db_vnfrs,
1502 db_vnfds,
1503 n2vc_key_list,
1504 stage,
1505 start_deploy,
1506 timeout_ns_deploy,
1507 )
1508 except Exception as e:
1509 stage[2] = "ERROR deploying at VIM"
1510 self.set_vnfr_at_error(db_vnfrs, str(e))
1511 self.logger.error(
1512 "Error deploying at VIM {}".format(e),
1513 exc_info=not isinstance(
1514 e,
1515 (
1516 ROclient.ROClientException,
1517 LcmException,
1518 DbException,
1519 NgRoException,
1520 ),
1521 ),
1522 )
1523 raise
1524
1525 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1526 """
1527 Wait for kdu to be up, get ip address
1528 :param logging_text: prefix use for logging
1529 :param nsr_id:
1530 :param vnfr_id:
1531 :param kdu_name:
1532 :return: IP address, K8s services
1533 """
1534
1535 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1536 nb_tries = 0
1537
1538 while nb_tries < 360:
1539 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1540 kdur = next(
1541 (
1542 x
1543 for x in get_iterable(db_vnfr, "kdur")
1544 if x.get("kdu-name") == kdu_name
1545 ),
1546 None,
1547 )
1548 if not kdur:
1549 raise LcmException(
1550 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1551 )
1552 if kdur.get("status"):
1553 if kdur["status"] in ("READY", "ENABLED"):
1554 return kdur.get("ip-address"), kdur.get("services")
1555 else:
1556 raise LcmException(
1557 "target KDU={} is in error state".format(kdu_name)
1558 )
1559
1560 await asyncio.sleep(10, loop=self.loop)
1561 nb_tries += 1
1562 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1563
1564 async def wait_vm_up_insert_key_ro(
1565 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1566 ):
1567 """
1568 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1569 :param logging_text: prefix use for logging
1570 :param nsr_id:
1571 :param vnfr_id:
1572 :param vdu_id:
1573 :param vdu_index:
1574 :param pub_key: public ssh key to inject, None to skip
1575 :param user: user to apply the public ssh key
1576 :return: IP address
1577 """
1578
1579 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1580 ro_nsr_id = None
1581 ip_address = None
1582 nb_tries = 0
1583 target_vdu_id = None
1584 ro_retries = 0
1585
1586 while True:
1587 ro_retries += 1
1588 if ro_retries >= 360: # 1 hour
1589 raise LcmException(
1590 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1591 )
1592
1593 await asyncio.sleep(10, loop=self.loop)
1594
1595 # get ip address
1596 if not target_vdu_id:
1597 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1598
1599 if not vdu_id: # for the VNF case
1600 if db_vnfr.get("status") == "ERROR":
1601 raise LcmException(
1602 "Cannot inject ssh-key because target VNF is in error state"
1603 )
1604 ip_address = db_vnfr.get("ip-address")
1605 if not ip_address:
1606 continue
1607 vdur = next(
1608 (
1609 x
1610 for x in get_iterable(db_vnfr, "vdur")
1611 if x.get("ip-address") == ip_address
1612 ),
1613 None,
1614 )
1615 else: # VDU case
1616 vdur = next(
1617 (
1618 x
1619 for x in get_iterable(db_vnfr, "vdur")
1620 if x.get("vdu-id-ref") == vdu_id
1621 and x.get("count-index") == vdu_index
1622 ),
1623 None,
1624 )
1625
1626 if (
1627 not vdur and len(db_vnfr.get("vdur", ())) == 1
1628 ): # If only one, this should be the target vdu
1629 vdur = db_vnfr["vdur"][0]
1630 if not vdur:
1631 raise LcmException(
1632 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1633 vnfr_id, vdu_id, vdu_index
1634 )
1635 )
1636 # New generation RO stores information at "vim_info"
1637 ng_ro_status = None
1638 target_vim = None
1639 if vdur.get("vim_info"):
1640 target_vim = next(
1641 t for t in vdur["vim_info"]
1642 ) # there should be only one key
1643 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1644 if (
1645 vdur.get("pdu-type")
1646 or vdur.get("status") == "ACTIVE"
1647 or ng_ro_status == "ACTIVE"
1648 ):
1649 ip_address = vdur.get("ip-address")
1650 if not ip_address:
1651 continue
1652 target_vdu_id = vdur["vdu-id-ref"]
1653 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1654 raise LcmException(
1655 "Cannot inject ssh-key because target VM is in error state"
1656 )
1657
1658 if not target_vdu_id:
1659 continue
1660
1661 # inject public key into machine
1662 if pub_key and user:
1663 self.logger.debug(logging_text + "Inserting RO key")
1664 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1665 if vdur.get("pdu-type"):
1666 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1667 return ip_address
1668 try:
1669 ro_vm_id = "{}-{}".format(
1670 db_vnfr["member-vnf-index-ref"], target_vdu_id
1671 ) # TODO add vdu_index
1672 if self.ng_ro:
1673 target = {
1674 "action": {
1675 "action": "inject_ssh_key",
1676 "key": pub_key,
1677 "user": user,
1678 },
1679 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1680 }
1681 desc = await self.RO.deploy(nsr_id, target)
1682 action_id = desc["action_id"]
1683 await self._wait_ng_ro(
1684 nsr_id, action_id, timeout=600, operation="instantiation"
1685 )
1686 break
1687 else:
1688 # wait until NS is deployed at RO
1689 if not ro_nsr_id:
1690 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1691 ro_nsr_id = deep_get(
1692 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1693 )
1694 if not ro_nsr_id:
1695 continue
1696 result_dict = await self.RO.create_action(
1697 item="ns",
1698 item_id_name=ro_nsr_id,
1699 descriptor={
1700 "add_public_key": pub_key,
1701 "vms": [ro_vm_id],
1702 "user": user,
1703 },
1704 )
1705 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1706 if not result_dict or not isinstance(result_dict, dict):
1707 raise LcmException(
1708 "Unknown response from RO when injecting key"
1709 )
1710 for result in result_dict.values():
1711 if result.get("vim_result") == 200:
1712 break
1713 else:
1714 raise ROclient.ROClientException(
1715 "error injecting key: {}".format(
1716 result.get("description")
1717 )
1718 )
1719 break
1720 except NgRoException as e:
1721 raise LcmException(
1722 "Reaching max tries injecting key. Error: {}".format(e)
1723 )
1724 except ROclient.ROClientException as e:
1725 if not nb_tries:
1726 self.logger.debug(
1727 logging_text
1728 + "error injecting key: {}. Retrying until {} seconds".format(
1729 e, 20 * 10
1730 )
1731 )
1732 nb_tries += 1
1733 if nb_tries >= 20:
1734 raise LcmException(
1735 "Reaching max tries injecting key. Error: {}".format(e)
1736 )
1737 else:
1738 break
1739
1740 return ip_address
1741
1742 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1743 """
1744 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1745 """
1746 my_vca = vca_deployed_list[vca_index]
1747 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1748 # vdu or kdu: no dependencies
1749 return
1750 timeout = 300
1751 while timeout >= 0:
1752 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1753 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1754 configuration_status_list = db_nsr["configurationStatus"]
1755 for index, vca_deployed in enumerate(configuration_status_list):
1756 if index == vca_index:
1757 # myself
1758 continue
1759 if not my_vca.get("member-vnf-index") or (
1760 vca_deployed.get("member-vnf-index")
1761 == my_vca.get("member-vnf-index")
1762 ):
1763 internal_status = configuration_status_list[index].get("status")
1764 if internal_status == "READY":
1765 continue
1766 elif internal_status == "BROKEN":
1767 raise LcmException(
1768 "Configuration aborted because dependent charm/s has failed"
1769 )
1770 else:
1771 break
1772 else:
1773 # no dependencies, return
1774 return
1775 await asyncio.sleep(10)
1776 timeout -= 1
1777
1778 raise LcmException("Configuration aborted because dependent charm/s timeout")
1779
1780 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1781 vca_id = None
1782 if db_vnfr:
1783 vca_id = deep_get(db_vnfr, ("vca-id",))
1784 elif db_nsr:
1785 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1786 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1787 return vca_id
1788
1789 async def instantiate_N2VC(
1790 self,
1791 logging_text,
1792 vca_index,
1793 nsi_id,
1794 db_nsr,
1795 db_vnfr,
1796 vdu_id,
1797 kdu_name,
1798 vdu_index,
1799 config_descriptor,
1800 deploy_params,
1801 base_folder,
1802 nslcmop_id,
1803 stage,
1804 vca_type,
1805 vca_name,
1806 ee_config_descriptor,
1807 ):
1808 nsr_id = db_nsr["_id"]
1809 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1810 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1811 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1812 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1813 db_dict = {
1814 "collection": "nsrs",
1815 "filter": {"_id": nsr_id},
1816 "path": db_update_entry,
1817 }
1818 step = ""
1819 try:
1820 element_type = "NS"
1821 element_under_configuration = nsr_id
1822
1823 vnfr_id = None
1824 if db_vnfr:
1825 vnfr_id = db_vnfr["_id"]
1826 osm_config["osm"]["vnf_id"] = vnfr_id
1827
1828 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1829
1830 if vca_type == "native_charm":
1831 index_number = 0
1832 else:
1833 index_number = vdu_index or 0
1834
1835 if vnfr_id:
1836 element_type = "VNF"
1837 element_under_configuration = vnfr_id
1838 namespace += ".{}-{}".format(vnfr_id, index_number)
1839 if vdu_id:
1840 namespace += ".{}-{}".format(vdu_id, index_number)
1841 element_type = "VDU"
1842 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1843 osm_config["osm"]["vdu_id"] = vdu_id
1844 elif kdu_name:
1845 namespace += ".{}".format(kdu_name)
1846 element_type = "KDU"
1847 element_under_configuration = kdu_name
1848 osm_config["osm"]["kdu_name"] = kdu_name
1849
1850 # Get artifact path
1851 if base_folder["pkg-dir"]:
1852 artifact_path = "{}/{}/{}/{}".format(
1853 base_folder["folder"],
1854 base_folder["pkg-dir"],
1855 "charms"
1856 if vca_type
1857 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1858 else "helm-charts",
1859 vca_name,
1860 )
1861 else:
1862 artifact_path = "{}/Scripts/{}/{}/".format(
1863 base_folder["folder"],
1864 "charms"
1865 if vca_type
1866 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1867 else "helm-charts",
1868 vca_name,
1869 )
1870
1871 self.logger.debug("Artifact path > {}".format(artifact_path))
1872
1873 # get initial_config_primitive_list that applies to this element
1874 initial_config_primitive_list = config_descriptor.get(
1875 "initial-config-primitive"
1876 )
1877
1878 self.logger.debug(
1879 "Initial config primitive list > {}".format(
1880 initial_config_primitive_list
1881 )
1882 )
1883
1884 # add config if not present for NS charm
1885 ee_descriptor_id = ee_config_descriptor.get("id")
1886 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1887 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1888 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1889 )
1890
1891 self.logger.debug(
1892 "Initial config primitive list #2 > {}".format(
1893 initial_config_primitive_list
1894 )
1895 )
1896 # n2vc_redesign STEP 3.1
1897 # find old ee_id if exists
1898 ee_id = vca_deployed.get("ee_id")
1899
1900 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1901 # create or register execution environment in VCA
1902 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1903 self._write_configuration_status(
1904 nsr_id=nsr_id,
1905 vca_index=vca_index,
1906 status="CREATING",
1907 element_under_configuration=element_under_configuration,
1908 element_type=element_type,
1909 )
1910
1911 step = "create execution environment"
1912 self.logger.debug(logging_text + step)
1913
1914 ee_id = None
1915 credentials = None
1916 if vca_type == "k8s_proxy_charm":
1917 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1918 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1919 namespace=namespace,
1920 artifact_path=artifact_path,
1921 db_dict=db_dict,
1922 vca_id=vca_id,
1923 )
1924 elif vca_type == "helm" or vca_type == "helm-v3":
1925 ee_id, credentials = await self.vca_map[
1926 vca_type
1927 ].create_execution_environment(
1928 namespace=namespace,
1929 reuse_ee_id=ee_id,
1930 db_dict=db_dict,
1931 config=osm_config,
1932 artifact_path=artifact_path,
1933 vca_type=vca_type,
1934 )
1935 else:
1936 ee_id, credentials = await self.vca_map[
1937 vca_type
1938 ].create_execution_environment(
1939 namespace=namespace,
1940 reuse_ee_id=ee_id,
1941 db_dict=db_dict,
1942 vca_id=vca_id,
1943 )
1944
1945 elif vca_type == "native_charm":
1946 step = "Waiting to VM being up and getting IP address"
1947 self.logger.debug(logging_text + step)
1948 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1949 logging_text,
1950 nsr_id,
1951 vnfr_id,
1952 vdu_id,
1953 vdu_index,
1954 user=None,
1955 pub_key=None,
1956 )
1957 credentials = {"hostname": rw_mgmt_ip}
1958 # get username
1959 username = deep_get(
1960 config_descriptor, ("config-access", "ssh-access", "default-user")
1961 )
1962 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1963 # merged. Meanwhile let's get username from initial-config-primitive
1964 if not username and initial_config_primitive_list:
1965 for config_primitive in initial_config_primitive_list:
1966 for param in config_primitive.get("parameter", ()):
1967 if param["name"] == "ssh-username":
1968 username = param["value"]
1969 break
1970 if not username:
1971 raise LcmException(
1972 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1973 "'config-access.ssh-access.default-user'"
1974 )
1975 credentials["username"] = username
1976 # n2vc_redesign STEP 3.2
1977
1978 self._write_configuration_status(
1979 nsr_id=nsr_id,
1980 vca_index=vca_index,
1981 status="REGISTERING",
1982 element_under_configuration=element_under_configuration,
1983 element_type=element_type,
1984 )
1985
1986 step = "register execution environment {}".format(credentials)
1987 self.logger.debug(logging_text + step)
1988 ee_id = await self.vca_map[vca_type].register_execution_environment(
1989 credentials=credentials,
1990 namespace=namespace,
1991 db_dict=db_dict,
1992 vca_id=vca_id,
1993 )
1994
1995 # for compatibility with MON/POL modules, the need model and application name at database
1996 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1997 ee_id_parts = ee_id.split(".")
1998 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1999 if len(ee_id_parts) >= 2:
2000 model_name = ee_id_parts[0]
2001 application_name = ee_id_parts[1]
2002 db_nsr_update[db_update_entry + "model"] = model_name
2003 db_nsr_update[db_update_entry + "application"] = application_name
2004
2005 # n2vc_redesign STEP 3.3
2006 step = "Install configuration Software"
2007
2008 self._write_configuration_status(
2009 nsr_id=nsr_id,
2010 vca_index=vca_index,
2011 status="INSTALLING SW",
2012 element_under_configuration=element_under_configuration,
2013 element_type=element_type,
2014 other_update=db_nsr_update,
2015 )
2016
2017 # TODO check if already done
2018 self.logger.debug(logging_text + step)
2019 config = None
2020 if vca_type == "native_charm":
2021 config_primitive = next(
2022 (p for p in initial_config_primitive_list if p["name"] == "config"),
2023 None,
2024 )
2025 if config_primitive:
2026 config = self._map_primitive_params(
2027 config_primitive, {}, deploy_params
2028 )
2029 num_units = 1
2030 if vca_type == "lxc_proxy_charm":
2031 if element_type == "NS":
2032 num_units = db_nsr.get("config-units") or 1
2033 elif element_type == "VNF":
2034 num_units = db_vnfr.get("config-units") or 1
2035 elif element_type == "VDU":
2036 for v in db_vnfr["vdur"]:
2037 if vdu_id == v["vdu-id-ref"]:
2038 num_units = v.get("config-units") or 1
2039 break
2040 if vca_type != "k8s_proxy_charm":
2041 await self.vca_map[vca_type].install_configuration_sw(
2042 ee_id=ee_id,
2043 artifact_path=artifact_path,
2044 db_dict=db_dict,
2045 config=config,
2046 num_units=num_units,
2047 vca_id=vca_id,
2048 vca_type=vca_type,
2049 )
2050
2051 # write in db flag of configuration_sw already installed
2052 self.update_db_2(
2053 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2054 )
2055
2056 # add relations for this VCA (wait for other peers related with this VCA)
2057 await self._add_vca_relations(
2058 logging_text=logging_text,
2059 nsr_id=nsr_id,
2060 vca_type=vca_type,
2061 vca_index=vca_index,
2062 )
2063
2064 # if SSH access is required, then get execution environment SSH public
2065 # if native charm we have waited already to VM be UP
2066 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2067 pub_key = None
2068 user = None
2069 # self.logger.debug("get ssh key block")
2070 if deep_get(
2071 config_descriptor, ("config-access", "ssh-access", "required")
2072 ):
2073 # self.logger.debug("ssh key needed")
2074 # Needed to inject a ssh key
2075 user = deep_get(
2076 config_descriptor,
2077 ("config-access", "ssh-access", "default-user"),
2078 )
2079 step = "Install configuration Software, getting public ssh key"
2080 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2081 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2082 )
2083
2084 step = "Insert public key into VM user={} ssh_key={}".format(
2085 user, pub_key
2086 )
2087 else:
2088 # self.logger.debug("no need to get ssh key")
2089 step = "Waiting to VM being up and getting IP address"
2090 self.logger.debug(logging_text + step)
2091
2092 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2093 rw_mgmt_ip = None
2094
2095 # n2vc_redesign STEP 5.1
2096 # wait for RO (ip-address) Insert pub_key into VM
2097 if vnfr_id:
2098 if kdu_name:
2099 rw_mgmt_ip, services = await self.wait_kdu_up(
2100 logging_text, nsr_id, vnfr_id, kdu_name
2101 )
2102 vnfd = self.db.get_one(
2103 "vnfds_revisions",
2104 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2105 )
2106 kdu = get_kdu(vnfd, kdu_name)
2107 kdu_services = [
2108 service["name"] for service in get_kdu_services(kdu)
2109 ]
2110 exposed_services = []
2111 for service in services:
2112 if any(s in service["name"] for s in kdu_services):
2113 exposed_services.append(service)
2114 await self.vca_map[vca_type].exec_primitive(
2115 ee_id=ee_id,
2116 primitive_name="config",
2117 params_dict={
2118 "osm-config": json.dumps(
2119 OsmConfigBuilder(
2120 k8s={"services": exposed_services}
2121 ).build()
2122 )
2123 },
2124 vca_id=vca_id,
2125 )
2126
2127 # This verification is needed in order to avoid trying to add a public key
2128 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2129 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2130 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2131 # or it is a KNF)
2132 elif db_vnfr.get("vdur"):
2133 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2134 logging_text,
2135 nsr_id,
2136 vnfr_id,
2137 vdu_id,
2138 vdu_index,
2139 user=user,
2140 pub_key=pub_key,
2141 )
2142
2143 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2144
2145 # store rw_mgmt_ip in deploy params for later replacement
2146 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2147
2148 # n2vc_redesign STEP 6 Execute initial config primitive
2149 step = "execute initial config primitive"
2150
2151 # wait for dependent primitives execution (NS -> VNF -> VDU)
2152 if initial_config_primitive_list:
2153 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2154
2155 # stage, in function of element type: vdu, kdu, vnf or ns
2156 my_vca = vca_deployed_list[vca_index]
2157 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2158 # VDU or KDU
2159 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2160 elif my_vca.get("member-vnf-index"):
2161 # VNF
2162 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2163 else:
2164 # NS
2165 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2166
2167 self._write_configuration_status(
2168 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2169 )
2170
2171 self._write_op_status(op_id=nslcmop_id, stage=stage)
2172
2173 check_if_terminated_needed = True
2174 for initial_config_primitive in initial_config_primitive_list:
2175 # adding information on the vca_deployed if it is a NS execution environment
2176 if not vca_deployed["member-vnf-index"]:
2177 deploy_params["ns_config_info"] = json.dumps(
2178 self._get_ns_config_info(nsr_id)
2179 )
2180 # TODO check if already done
2181 primitive_params_ = self._map_primitive_params(
2182 initial_config_primitive, {}, deploy_params
2183 )
2184
2185 step = "execute primitive '{}' params '{}'".format(
2186 initial_config_primitive["name"], primitive_params_
2187 )
2188 self.logger.debug(logging_text + step)
2189 await self.vca_map[vca_type].exec_primitive(
2190 ee_id=ee_id,
2191 primitive_name=initial_config_primitive["name"],
2192 params_dict=primitive_params_,
2193 db_dict=db_dict,
2194 vca_id=vca_id,
2195 vca_type=vca_type,
2196 )
2197 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2198 if check_if_terminated_needed:
2199 if config_descriptor.get("terminate-config-primitive"):
2200 self.update_db_2(
2201 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2202 )
2203 check_if_terminated_needed = False
2204
2205 # TODO register in database that primitive is done
2206
2207 # STEP 7 Configure metrics
2208 if vca_type == "helm" or vca_type == "helm-v3":
2209 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2210 ee_id=ee_id,
2211 artifact_path=artifact_path,
2212 ee_config_descriptor=ee_config_descriptor,
2213 vnfr_id=vnfr_id,
2214 nsr_id=nsr_id,
2215 target_ip=rw_mgmt_ip,
2216 )
2217 if prometheus_jobs:
2218 self.update_db_2(
2219 "nsrs",
2220 nsr_id,
2221 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2222 )
2223
2224 for job in prometheus_jobs:
2225 self.db.set_one(
2226 "prometheus_jobs",
2227 {"job_name": job["job_name"]},
2228 job,
2229 upsert=True,
2230 fail_on_empty=False,
2231 )
2232
2233 step = "instantiated at VCA"
2234 self.logger.debug(logging_text + step)
2235
2236 self._write_configuration_status(
2237 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2238 )
2239
2240 except Exception as e: # TODO not use Exception but N2VC exception
2241 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2242 if not isinstance(
2243 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2244 ):
2245 self.logger.error(
2246 "Exception while {} : {}".format(step, e), exc_info=True
2247 )
2248 self._write_configuration_status(
2249 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2250 )
2251 raise LcmException("{} {}".format(step, e)) from e
2252
2253 def _write_ns_status(
2254 self,
2255 nsr_id: str,
2256 ns_state: str,
2257 current_operation: str,
2258 current_operation_id: str,
2259 error_description: str = None,
2260 error_detail: str = None,
2261 other_update: dict = None,
2262 ):
2263 """
2264 Update db_nsr fields.
2265 :param nsr_id:
2266 :param ns_state:
2267 :param current_operation:
2268 :param current_operation_id:
2269 :param error_description:
2270 :param error_detail:
2271 :param other_update: Other required changes at database if provided, will be cleared
2272 :return:
2273 """
2274 try:
2275 db_dict = other_update or {}
2276 db_dict[
2277 "_admin.nslcmop"
2278 ] = current_operation_id # for backward compatibility
2279 db_dict["_admin.current-operation"] = current_operation_id
2280 db_dict["_admin.operation-type"] = (
2281 current_operation if current_operation != "IDLE" else None
2282 )
2283 db_dict["currentOperation"] = current_operation
2284 db_dict["currentOperationID"] = current_operation_id
2285 db_dict["errorDescription"] = error_description
2286 db_dict["errorDetail"] = error_detail
2287
2288 if ns_state:
2289 db_dict["nsState"] = ns_state
2290 self.update_db_2("nsrs", nsr_id, db_dict)
2291 except DbException as e:
2292 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2293
2294 def _write_op_status(
2295 self,
2296 op_id: str,
2297 stage: list = None,
2298 error_message: str = None,
2299 queuePosition: int = 0,
2300 operation_state: str = None,
2301 other_update: dict = None,
2302 ):
2303 try:
2304 db_dict = other_update or {}
2305 db_dict["queuePosition"] = queuePosition
2306 if isinstance(stage, list):
2307 db_dict["stage"] = stage[0]
2308 db_dict["detailed-status"] = " ".join(stage)
2309 elif stage is not None:
2310 db_dict["stage"] = str(stage)
2311
2312 if error_message is not None:
2313 db_dict["errorMessage"] = error_message
2314 if operation_state is not None:
2315 db_dict["operationState"] = operation_state
2316 db_dict["statusEnteredTime"] = time()
2317 self.update_db_2("nslcmops", op_id, db_dict)
2318 except DbException as e:
2319 self.logger.warn(
2320 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2321 )
2322
2323 def _write_all_config_status(self, db_nsr: dict, status: str):
2324 try:
2325 nsr_id = db_nsr["_id"]
2326 # configurationStatus
2327 config_status = db_nsr.get("configurationStatus")
2328 if config_status:
2329 db_nsr_update = {
2330 "configurationStatus.{}.status".format(index): status
2331 for index, v in enumerate(config_status)
2332 if v
2333 }
2334 # update status
2335 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2336
2337 except DbException as e:
2338 self.logger.warn(
2339 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2340 )
2341
2342 def _write_configuration_status(
2343 self,
2344 nsr_id: str,
2345 vca_index: int,
2346 status: str = None,
2347 element_under_configuration: str = None,
2348 element_type: str = None,
2349 other_update: dict = None,
2350 ):
2351 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2352 # .format(vca_index, status))
2353
2354 try:
2355 db_path = "configurationStatus.{}.".format(vca_index)
2356 db_dict = other_update or {}
2357 if status:
2358 db_dict[db_path + "status"] = status
2359 if element_under_configuration:
2360 db_dict[
2361 db_path + "elementUnderConfiguration"
2362 ] = element_under_configuration
2363 if element_type:
2364 db_dict[db_path + "elementType"] = element_type
2365 self.update_db_2("nsrs", nsr_id, db_dict)
2366 except DbException as e:
2367 self.logger.warn(
2368 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2369 status, nsr_id, vca_index, e
2370 )
2371 )
2372
2373 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2374 """
2375 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2376 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2377 Database is used because the result can be obtained from a different LCM worker in case of HA.
2378 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2379 :param db_nslcmop: database content of nslcmop
2380 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2381 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2382 computed 'vim-account-id'
2383 """
2384 modified = False
2385 nslcmop_id = db_nslcmop["_id"]
2386 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2387 if placement_engine == "PLA":
2388 self.logger.debug(
2389 logging_text + "Invoke and wait for placement optimization"
2390 )
2391 await self.msg.aiowrite(
2392 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2393 )
2394 db_poll_interval = 5
2395 wait = db_poll_interval * 10
2396 pla_result = None
2397 while not pla_result and wait >= 0:
2398 await asyncio.sleep(db_poll_interval)
2399 wait -= db_poll_interval
2400 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2401 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2402
2403 if not pla_result:
2404 raise LcmException(
2405 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2406 )
2407
2408 for pla_vnf in pla_result["vnf"]:
2409 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2410 if not pla_vnf.get("vimAccountId") or not vnfr:
2411 continue
2412 modified = True
2413 self.db.set_one(
2414 "vnfrs",
2415 {"_id": vnfr["_id"]},
2416 {"vim-account-id": pla_vnf["vimAccountId"]},
2417 )
2418 # Modifies db_vnfrs
2419 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2420 return modified
2421
2422 def update_nsrs_with_pla_result(self, params):
2423 try:
2424 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2425 self.update_db_2(
2426 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2427 )
2428 except Exception as e:
2429 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2430
2431 async def instantiate(self, nsr_id, nslcmop_id):
2432 """
2433
2434 :param nsr_id: ns instance to deploy
2435 :param nslcmop_id: operation to run
2436 :return:
2437 """
2438
2439 # Try to lock HA task here
2440 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2441 if not task_is_locked_by_me:
2442 self.logger.debug(
2443 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2444 )
2445 return
2446
2447 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2448 self.logger.debug(logging_text + "Enter")
2449
2450 # get all needed from database
2451
2452 # database nsrs record
2453 db_nsr = None
2454
2455 # database nslcmops record
2456 db_nslcmop = None
2457
2458 # update operation on nsrs
2459 db_nsr_update = {}
2460 # update operation on nslcmops
2461 db_nslcmop_update = {}
2462
2463 nslcmop_operation_state = None
2464 db_vnfrs = {} # vnf's info indexed by member-index
2465 # n2vc_info = {}
2466 tasks_dict_info = {} # from task to info text
2467 exc = None
2468 error_list = []
2469 stage = [
2470 "Stage 1/5: preparation of the environment.",
2471 "Waiting for previous operations to terminate.",
2472 "",
2473 ]
2474 # ^ stage, step, VIM progress
2475 try:
2476 # wait for any previous tasks in process
2477 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2478
2479 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2480 stage[1] = "Reading from database."
2481 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2482 db_nsr_update["detailed-status"] = "creating"
2483 db_nsr_update["operational-status"] = "init"
2484 self._write_ns_status(
2485 nsr_id=nsr_id,
2486 ns_state="BUILDING",
2487 current_operation="INSTANTIATING",
2488 current_operation_id=nslcmop_id,
2489 other_update=db_nsr_update,
2490 )
2491 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2492
2493 # read from db: operation
2494 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2495 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2496 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2497 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2498 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2499 )
2500 ns_params = db_nslcmop.get("operationParams")
2501 if ns_params and ns_params.get("timeout_ns_deploy"):
2502 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2503 else:
2504 timeout_ns_deploy = self.timeout.get(
2505 "ns_deploy", self.timeout_ns_deploy
2506 )
2507
2508 # read from db: ns
2509 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2510 self.logger.debug(logging_text + stage[1])
2511 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2512 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2513 self.logger.debug(logging_text + stage[1])
2514 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2515 self.fs.sync(db_nsr["nsd-id"])
2516 db_nsr["nsd"] = nsd
2517 # nsr_name = db_nsr["name"] # TODO short-name??
2518
2519 # read from db: vnf's of this ns
2520 stage[1] = "Getting vnfrs from db."
2521 self.logger.debug(logging_text + stage[1])
2522 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2523
2524 # read from db: vnfd's for every vnf
2525 db_vnfds = [] # every vnfd data
2526
2527 # for each vnf in ns, read vnfd
2528 for vnfr in db_vnfrs_list:
2529 if vnfr.get("kdur"):
2530 kdur_list = []
2531 for kdur in vnfr["kdur"]:
2532 if kdur.get("additionalParams"):
2533 kdur["additionalParams"] = json.loads(
2534 kdur["additionalParams"]
2535 )
2536 kdur_list.append(kdur)
2537 vnfr["kdur"] = kdur_list
2538
2539 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2540 vnfd_id = vnfr["vnfd-id"]
2541 vnfd_ref = vnfr["vnfd-ref"]
2542 self.fs.sync(vnfd_id)
2543
2544 # if we haven't this vnfd, read it from db
2545 if vnfd_id not in db_vnfds:
2546 # read from db
2547 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2548 vnfd_id, vnfd_ref
2549 )
2550 self.logger.debug(logging_text + stage[1])
2551 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2552
2553 # store vnfd
2554 db_vnfds.append(vnfd)
2555
2556 # Get or generates the _admin.deployed.VCA list
2557 vca_deployed_list = None
2558 if db_nsr["_admin"].get("deployed"):
2559 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2560 if vca_deployed_list is None:
2561 vca_deployed_list = []
2562 configuration_status_list = []
2563 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2564 db_nsr_update["configurationStatus"] = configuration_status_list
2565 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2566 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2567 elif isinstance(vca_deployed_list, dict):
2568 # maintain backward compatibility. Change a dict to list at database
2569 vca_deployed_list = list(vca_deployed_list.values())
2570 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2571 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2572
2573 if not isinstance(
2574 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2575 ):
2576 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2577 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2578
2579 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2580 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2581 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2582 self.db.set_list(
2583 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2584 )
2585
2586 # n2vc_redesign STEP 2 Deploy Network Scenario
2587 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2588 self._write_op_status(op_id=nslcmop_id, stage=stage)
2589
2590 stage[1] = "Deploying KDUs."
2591 # self.logger.debug(logging_text + "Before deploy_kdus")
2592 # Call to deploy_kdus in case exists the "vdu:kdu" param
2593 await self.deploy_kdus(
2594 logging_text=logging_text,
2595 nsr_id=nsr_id,
2596 nslcmop_id=nslcmop_id,
2597 db_vnfrs=db_vnfrs,
2598 db_vnfds=db_vnfds,
2599 task_instantiation_info=tasks_dict_info,
2600 )
2601
2602 stage[1] = "Getting VCA public key."
2603 # n2vc_redesign STEP 1 Get VCA public ssh-key
2604 # feature 1429. Add n2vc public key to needed VMs
2605 n2vc_key = self.n2vc.get_public_key()
2606 n2vc_key_list = [n2vc_key]
2607 if self.vca_config.get("public_key"):
2608 n2vc_key_list.append(self.vca_config["public_key"])
2609
2610 stage[1] = "Deploying NS at VIM."
2611 task_ro = asyncio.ensure_future(
2612 self.instantiate_RO(
2613 logging_text=logging_text,
2614 nsr_id=nsr_id,
2615 nsd=nsd,
2616 db_nsr=db_nsr,
2617 db_nslcmop=db_nslcmop,
2618 db_vnfrs=db_vnfrs,
2619 db_vnfds=db_vnfds,
2620 n2vc_key_list=n2vc_key_list,
2621 stage=stage,
2622 )
2623 )
2624 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2625 tasks_dict_info[task_ro] = "Deploying at VIM"
2626
2627 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2628 stage[1] = "Deploying Execution Environments."
2629 self.logger.debug(logging_text + stage[1])
2630
2631 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2632 for vnf_profile in get_vnf_profiles(nsd):
2633 vnfd_id = vnf_profile["vnfd-id"]
2634 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2635 member_vnf_index = str(vnf_profile["id"])
2636 db_vnfr = db_vnfrs[member_vnf_index]
2637 base_folder = vnfd["_admin"]["storage"]
2638 vdu_id = None
2639 vdu_index = 0
2640 vdu_name = None
2641 kdu_name = None
2642
2643 # Get additional parameters
2644 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2645 if db_vnfr.get("additionalParamsForVnf"):
2646 deploy_params.update(
2647 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2648 )
2649
2650 descriptor_config = get_configuration(vnfd, vnfd["id"])
2651 if descriptor_config:
2652 self._deploy_n2vc(
2653 logging_text=logging_text
2654 + "member_vnf_index={} ".format(member_vnf_index),
2655 db_nsr=db_nsr,
2656 db_vnfr=db_vnfr,
2657 nslcmop_id=nslcmop_id,
2658 nsr_id=nsr_id,
2659 nsi_id=nsi_id,
2660 vnfd_id=vnfd_id,
2661 vdu_id=vdu_id,
2662 kdu_name=kdu_name,
2663 member_vnf_index=member_vnf_index,
2664 vdu_index=vdu_index,
2665 vdu_name=vdu_name,
2666 deploy_params=deploy_params,
2667 descriptor_config=descriptor_config,
2668 base_folder=base_folder,
2669 task_instantiation_info=tasks_dict_info,
2670 stage=stage,
2671 )
2672
2673 # Deploy charms for each VDU that supports one.
2674 for vdud in get_vdu_list(vnfd):
2675 vdu_id = vdud["id"]
2676 descriptor_config = get_configuration(vnfd, vdu_id)
2677 vdur = find_in_list(
2678 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2679 )
2680
2681 if vdur.get("additionalParams"):
2682 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2683 else:
2684 deploy_params_vdu = deploy_params
2685 deploy_params_vdu["OSM"] = get_osm_params(
2686 db_vnfr, vdu_id, vdu_count_index=0
2687 )
2688 vdud_count = get_number_of_instances(vnfd, vdu_id)
2689
2690 self.logger.debug("VDUD > {}".format(vdud))
2691 self.logger.debug(
2692 "Descriptor config > {}".format(descriptor_config)
2693 )
2694 if descriptor_config:
2695 vdu_name = None
2696 kdu_name = None
2697 for vdu_index in range(vdud_count):
2698 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2699 self._deploy_n2vc(
2700 logging_text=logging_text
2701 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2702 member_vnf_index, vdu_id, vdu_index
2703 ),
2704 db_nsr=db_nsr,
2705 db_vnfr=db_vnfr,
2706 nslcmop_id=nslcmop_id,
2707 nsr_id=nsr_id,
2708 nsi_id=nsi_id,
2709 vnfd_id=vnfd_id,
2710 vdu_id=vdu_id,
2711 kdu_name=kdu_name,
2712 member_vnf_index=member_vnf_index,
2713 vdu_index=vdu_index,
2714 vdu_name=vdu_name,
2715 deploy_params=deploy_params_vdu,
2716 descriptor_config=descriptor_config,
2717 base_folder=base_folder,
2718 task_instantiation_info=tasks_dict_info,
2719 stage=stage,
2720 )
2721 for kdud in get_kdu_list(vnfd):
2722 kdu_name = kdud["name"]
2723 descriptor_config = get_configuration(vnfd, kdu_name)
2724 if descriptor_config:
2725 vdu_id = None
2726 vdu_index = 0
2727 vdu_name = None
2728 kdur = next(
2729 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2730 )
2731 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2732 if kdur.get("additionalParams"):
2733 deploy_params_kdu.update(
2734 parse_yaml_strings(kdur["additionalParams"].copy())
2735 )
2736
2737 self._deploy_n2vc(
2738 logging_text=logging_text,
2739 db_nsr=db_nsr,
2740 db_vnfr=db_vnfr,
2741 nslcmop_id=nslcmop_id,
2742 nsr_id=nsr_id,
2743 nsi_id=nsi_id,
2744 vnfd_id=vnfd_id,
2745 vdu_id=vdu_id,
2746 kdu_name=kdu_name,
2747 member_vnf_index=member_vnf_index,
2748 vdu_index=vdu_index,
2749 vdu_name=vdu_name,
2750 deploy_params=deploy_params_kdu,
2751 descriptor_config=descriptor_config,
2752 base_folder=base_folder,
2753 task_instantiation_info=tasks_dict_info,
2754 stage=stage,
2755 )
2756
2757 # Check if this NS has a charm configuration
2758 descriptor_config = nsd.get("ns-configuration")
2759 if descriptor_config and descriptor_config.get("juju"):
2760 vnfd_id = None
2761 db_vnfr = None
2762 member_vnf_index = None
2763 vdu_id = None
2764 kdu_name = None
2765 vdu_index = 0
2766 vdu_name = None
2767
2768 # Get additional parameters
2769 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2770 if db_nsr.get("additionalParamsForNs"):
2771 deploy_params.update(
2772 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2773 )
2774 base_folder = nsd["_admin"]["storage"]
2775 self._deploy_n2vc(
2776 logging_text=logging_text,
2777 db_nsr=db_nsr,
2778 db_vnfr=db_vnfr,
2779 nslcmop_id=nslcmop_id,
2780 nsr_id=nsr_id,
2781 nsi_id=nsi_id,
2782 vnfd_id=vnfd_id,
2783 vdu_id=vdu_id,
2784 kdu_name=kdu_name,
2785 member_vnf_index=member_vnf_index,
2786 vdu_index=vdu_index,
2787 vdu_name=vdu_name,
2788 deploy_params=deploy_params,
2789 descriptor_config=descriptor_config,
2790 base_folder=base_folder,
2791 task_instantiation_info=tasks_dict_info,
2792 stage=stage,
2793 )
2794
2795 # rest of staff will be done at finally
2796
2797 except (
2798 ROclient.ROClientException,
2799 DbException,
2800 LcmException,
2801 N2VCException,
2802 ) as e:
2803 self.logger.error(
2804 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2805 )
2806 exc = e
2807 except asyncio.CancelledError:
2808 self.logger.error(
2809 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2810 )
2811 exc = "Operation was cancelled"
2812 except Exception as e:
2813 exc = traceback.format_exc()
2814 self.logger.critical(
2815 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2816 exc_info=True,
2817 )
2818 finally:
2819 if exc:
2820 error_list.append(str(exc))
2821 try:
2822 # wait for pending tasks
2823 if tasks_dict_info:
2824 stage[1] = "Waiting for instantiate pending tasks."
2825 self.logger.debug(logging_text + stage[1])
2826 error_list += await self._wait_for_tasks(
2827 logging_text,
2828 tasks_dict_info,
2829 timeout_ns_deploy,
2830 stage,
2831 nslcmop_id,
2832 nsr_id=nsr_id,
2833 )
2834 stage[1] = stage[2] = ""
2835 except asyncio.CancelledError:
2836 error_list.append("Cancelled")
2837 # TODO cancel all tasks
2838 except Exception as exc:
2839 error_list.append(str(exc))
2840
2841 # update operation-status
2842 db_nsr_update["operational-status"] = "running"
2843 # let's begin with VCA 'configured' status (later we can change it)
2844 db_nsr_update["config-status"] = "configured"
2845 for task, task_name in tasks_dict_info.items():
2846 if not task.done() or task.cancelled() or task.exception():
2847 if task_name.startswith(self.task_name_deploy_vca):
2848 # A N2VC task is pending
2849 db_nsr_update["config-status"] = "failed"
2850 else:
2851 # RO or KDU task is pending
2852 db_nsr_update["operational-status"] = "failed"
2853
2854 # update status at database
2855 if error_list:
2856 error_detail = ". ".join(error_list)
2857 self.logger.error(logging_text + error_detail)
2858 error_description_nslcmop = "{} Detail: {}".format(
2859 stage[0], error_detail
2860 )
2861 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2862 nslcmop_id, stage[0]
2863 )
2864
2865 db_nsr_update["detailed-status"] = (
2866 error_description_nsr + " Detail: " + error_detail
2867 )
2868 db_nslcmop_update["detailed-status"] = error_detail
2869 nslcmop_operation_state = "FAILED"
2870 ns_state = "BROKEN"
2871 else:
2872 error_detail = None
2873 error_description_nsr = error_description_nslcmop = None
2874 ns_state = "READY"
2875 db_nsr_update["detailed-status"] = "Done"
2876 db_nslcmop_update["detailed-status"] = "Done"
2877 nslcmop_operation_state = "COMPLETED"
2878
2879 if db_nsr:
2880 self._write_ns_status(
2881 nsr_id=nsr_id,
2882 ns_state=ns_state,
2883 current_operation="IDLE",
2884 current_operation_id=None,
2885 error_description=error_description_nsr,
2886 error_detail=error_detail,
2887 other_update=db_nsr_update,
2888 )
2889 self._write_op_status(
2890 op_id=nslcmop_id,
2891 stage="",
2892 error_message=error_description_nslcmop,
2893 operation_state=nslcmop_operation_state,
2894 other_update=db_nslcmop_update,
2895 )
2896
2897 if nslcmop_operation_state:
2898 try:
2899 await self.msg.aiowrite(
2900 "ns",
2901 "instantiated",
2902 {
2903 "nsr_id": nsr_id,
2904 "nslcmop_id": nslcmop_id,
2905 "operationState": nslcmop_operation_state,
2906 },
2907 loop=self.loop,
2908 )
2909 except Exception as e:
2910 self.logger.error(
2911 logging_text + "kafka_write notification Exception {}".format(e)
2912 )
2913
2914 self.logger.debug(logging_text + "Exit")
2915 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2916
2917 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2918 if vnfd_id not in cached_vnfds:
2919 cached_vnfds[vnfd_id] = self.db.get_one(
2920 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2921 )
2922 return cached_vnfds[vnfd_id]
2923
2924 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2925 if vnf_profile_id not in cached_vnfrs:
2926 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2927 "vnfrs",
2928 {
2929 "member-vnf-index-ref": vnf_profile_id,
2930 "nsr-id-ref": nsr_id,
2931 },
2932 )
2933 return cached_vnfrs[vnf_profile_id]
2934
2935 def _is_deployed_vca_in_relation(
2936 self, vca: DeployedVCA, relation: Relation
2937 ) -> bool:
2938 found = False
2939 for endpoint in (relation.provider, relation.requirer):
2940 if endpoint["kdu-resource-profile-id"]:
2941 continue
2942 found = (
2943 vca.vnf_profile_id == endpoint.vnf_profile_id
2944 and vca.vdu_profile_id == endpoint.vdu_profile_id
2945 and vca.execution_environment_ref == endpoint.execution_environment_ref
2946 )
2947 if found:
2948 break
2949 return found
2950
2951 def _update_ee_relation_data_with_implicit_data(
2952 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2953 ):
2954 ee_relation_data = safe_get_ee_relation(
2955 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2956 )
2957 ee_relation_level = EELevel.get_level(ee_relation_data)
2958 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2959 "execution-environment-ref"
2960 ]:
2961 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2962 vnfd_id = vnf_profile["vnfd-id"]
2963 project = nsd["_admin"]["projects_read"][0]
2964 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2965 entity_id = (
2966 vnfd_id
2967 if ee_relation_level == EELevel.VNF
2968 else ee_relation_data["vdu-profile-id"]
2969 )
2970 ee = get_juju_ee_ref(db_vnfd, entity_id)
2971 if not ee:
2972 raise Exception(
2973 f"not execution environments found for ee_relation {ee_relation_data}"
2974 )
2975 ee_relation_data["execution-environment-ref"] = ee["id"]
2976 return ee_relation_data
2977
2978 def _get_ns_relations(
2979 self,
2980 nsr_id: str,
2981 nsd: Dict[str, Any],
2982 vca: DeployedVCA,
2983 cached_vnfds: Dict[str, Any],
2984 ) -> List[Relation]:
2985 relations = []
2986 db_ns_relations = get_ns_configuration_relation_list(nsd)
2987 for r in db_ns_relations:
2988 provider_dict = None
2989 requirer_dict = None
2990 if all(key in r for key in ("provider", "requirer")):
2991 provider_dict = r["provider"]
2992 requirer_dict = r["requirer"]
2993 elif "entities" in r:
2994 provider_id = r["entities"][0]["id"]
2995 provider_dict = {
2996 "nsr-id": nsr_id,
2997 "endpoint": r["entities"][0]["endpoint"],
2998 }
2999 if provider_id != nsd["id"]:
3000 provider_dict["vnf-profile-id"] = provider_id
3001 requirer_id = r["entities"][1]["id"]
3002 requirer_dict = {
3003 "nsr-id": nsr_id,
3004 "endpoint": r["entities"][1]["endpoint"],
3005 }
3006 if requirer_id != nsd["id"]:
3007 requirer_dict["vnf-profile-id"] = requirer_id
3008 else:
3009 raise Exception(
3010 "provider/requirer or entities must be included in the relation."
3011 )
3012 relation_provider = self._update_ee_relation_data_with_implicit_data(
3013 nsr_id, nsd, provider_dict, cached_vnfds
3014 )
3015 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3016 nsr_id, nsd, requirer_dict, cached_vnfds
3017 )
3018 provider = EERelation(relation_provider)
3019 requirer = EERelation(relation_requirer)
3020 relation = Relation(r["name"], provider, requirer)
3021 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3022 if vca_in_relation:
3023 relations.append(relation)
3024 return relations
3025
3026 def _get_vnf_relations(
3027 self,
3028 nsr_id: str,
3029 nsd: Dict[str, Any],
3030 vca: DeployedVCA,
3031 cached_vnfds: Dict[str, Any],
3032 ) -> List[Relation]:
3033 relations = []
3034 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3035 vnf_profile_id = vnf_profile["id"]
3036 vnfd_id = vnf_profile["vnfd-id"]
3037 project = nsd["_admin"]["projects_read"][0]
3038 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3039 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3040 for r in db_vnf_relations:
3041 provider_dict = None
3042 requirer_dict = None
3043 if all(key in r for key in ("provider", "requirer")):
3044 provider_dict = r["provider"]
3045 requirer_dict = r["requirer"]
3046 elif "entities" in r:
3047 provider_id = r["entities"][0]["id"]
3048 provider_dict = {
3049 "nsr-id": nsr_id,
3050 "vnf-profile-id": vnf_profile_id,
3051 "endpoint": r["entities"][0]["endpoint"],
3052 }
3053 if provider_id != vnfd_id:
3054 provider_dict["vdu-profile-id"] = provider_id
3055 requirer_id = r["entities"][1]["id"]
3056 requirer_dict = {
3057 "nsr-id": nsr_id,
3058 "vnf-profile-id": vnf_profile_id,
3059 "endpoint": r["entities"][1]["endpoint"],
3060 }
3061 if requirer_id != vnfd_id:
3062 requirer_dict["vdu-profile-id"] = requirer_id
3063 else:
3064 raise Exception(
3065 "provider/requirer or entities must be included in the relation."
3066 )
3067 relation_provider = self._update_ee_relation_data_with_implicit_data(
3068 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3069 )
3070 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3071 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3072 )
3073 provider = EERelation(relation_provider)
3074 requirer = EERelation(relation_requirer)
3075 relation = Relation(r["name"], provider, requirer)
3076 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3077 if vca_in_relation:
3078 relations.append(relation)
3079 return relations
3080
3081 def _get_kdu_resource_data(
3082 self,
3083 ee_relation: EERelation,
3084 db_nsr: Dict[str, Any],
3085 cached_vnfds: Dict[str, Any],
3086 ) -> DeployedK8sResource:
3087 nsd = get_nsd(db_nsr)
3088 vnf_profiles = get_vnf_profiles(nsd)
3089 vnfd_id = find_in_list(
3090 vnf_profiles,
3091 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3092 )["vnfd-id"]
3093 project = nsd["_admin"]["projects_read"][0]
3094 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3095 kdu_resource_profile = get_kdu_resource_profile(
3096 db_vnfd, ee_relation.kdu_resource_profile_id
3097 )
3098 kdu_name = kdu_resource_profile["kdu-name"]
3099 deployed_kdu, _ = get_deployed_kdu(
3100 db_nsr.get("_admin", ()).get("deployed", ()),
3101 kdu_name,
3102 ee_relation.vnf_profile_id,
3103 )
3104 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3105 return deployed_kdu
3106
3107 def _get_deployed_component(
3108 self,
3109 ee_relation: EERelation,
3110 db_nsr: Dict[str, Any],
3111 cached_vnfds: Dict[str, Any],
3112 ) -> DeployedComponent:
3113 nsr_id = db_nsr["_id"]
3114 deployed_component = None
3115 ee_level = EELevel.get_level(ee_relation)
3116 if ee_level == EELevel.NS:
3117 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3118 if vca:
3119 deployed_component = DeployedVCA(nsr_id, vca)
3120 elif ee_level == EELevel.VNF:
3121 vca = get_deployed_vca(
3122 db_nsr,
3123 {
3124 "vdu_id": None,
3125 "member-vnf-index": ee_relation.vnf_profile_id,
3126 "ee_descriptor_id": ee_relation.execution_environment_ref,
3127 },
3128 )
3129 if vca:
3130 deployed_component = DeployedVCA(nsr_id, vca)
3131 elif ee_level == EELevel.VDU:
3132 vca = get_deployed_vca(
3133 db_nsr,
3134 {
3135 "vdu_id": ee_relation.vdu_profile_id,
3136 "member-vnf-index": ee_relation.vnf_profile_id,
3137 "ee_descriptor_id": ee_relation.execution_environment_ref,
3138 },
3139 )
3140 if vca:
3141 deployed_component = DeployedVCA(nsr_id, vca)
3142 elif ee_level == EELevel.KDU:
3143 kdu_resource_data = self._get_kdu_resource_data(
3144 ee_relation, db_nsr, cached_vnfds
3145 )
3146 if kdu_resource_data:
3147 deployed_component = DeployedK8sResource(kdu_resource_data)
3148 return deployed_component
3149
3150 async def _add_relation(
3151 self,
3152 relation: Relation,
3153 vca_type: str,
3154 db_nsr: Dict[str, Any],
3155 cached_vnfds: Dict[str, Any],
3156 cached_vnfrs: Dict[str, Any],
3157 ) -> bool:
3158 deployed_provider = self._get_deployed_component(
3159 relation.provider, db_nsr, cached_vnfds
3160 )
3161 deployed_requirer = self._get_deployed_component(
3162 relation.requirer, db_nsr, cached_vnfds
3163 )
3164 if (
3165 deployed_provider
3166 and deployed_requirer
3167 and deployed_provider.config_sw_installed
3168 and deployed_requirer.config_sw_installed
3169 ):
3170 provider_db_vnfr = (
3171 self._get_vnfr(
3172 relation.provider.nsr_id,
3173 relation.provider.vnf_profile_id,
3174 cached_vnfrs,
3175 )
3176 if relation.provider.vnf_profile_id
3177 else None
3178 )
3179 requirer_db_vnfr = (
3180 self._get_vnfr(
3181 relation.requirer.nsr_id,
3182 relation.requirer.vnf_profile_id,
3183 cached_vnfrs,
3184 )
3185 if relation.requirer.vnf_profile_id
3186 else None
3187 )
3188 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3189 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3190 provider_relation_endpoint = RelationEndpoint(
3191 deployed_provider.ee_id,
3192 provider_vca_id,
3193 relation.provider.endpoint,
3194 )
3195 requirer_relation_endpoint = RelationEndpoint(
3196 deployed_requirer.ee_id,
3197 requirer_vca_id,
3198 relation.requirer.endpoint,
3199 )
3200 await self.vca_map[vca_type].add_relation(
3201 provider=provider_relation_endpoint,
3202 requirer=requirer_relation_endpoint,
3203 )
3204 # remove entry from relations list
3205 return True
3206 return False
3207
3208 async def _add_vca_relations(
3209 self,
3210 logging_text,
3211 nsr_id,
3212 vca_type: str,
3213 vca_index: int,
3214 timeout: int = 3600,
3215 ) -> bool:
3216 # steps:
3217 # 1. find all relations for this VCA
3218 # 2. wait for other peers related
3219 # 3. add relations
3220
3221 try:
3222 # STEP 1: find all relations for this VCA
3223
3224 # read nsr record
3225 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3226 nsd = get_nsd(db_nsr)
3227
3228 # this VCA data
3229 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3230 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3231
3232 cached_vnfds = {}
3233 cached_vnfrs = {}
3234 relations = []
3235 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3236 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3237
3238 # if no relations, terminate
3239 if not relations:
3240 self.logger.debug(logging_text + " No relations")
3241 return True
3242
3243 self.logger.debug(logging_text + " adding relations {}".format(relations))
3244
3245 # add all relations
3246 start = time()
3247 while True:
3248 # check timeout
3249 now = time()
3250 if now - start >= timeout:
3251 self.logger.error(logging_text + " : timeout adding relations")
3252 return False
3253
3254 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3255 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3256
3257 # for each relation, find the VCA's related
3258 for relation in relations.copy():
3259 added = await self._add_relation(
3260 relation,
3261 vca_type,
3262 db_nsr,
3263 cached_vnfds,
3264 cached_vnfrs,
3265 )
3266 if added:
3267 relations.remove(relation)
3268
3269 if not relations:
3270 self.logger.debug("Relations added")
3271 break
3272 await asyncio.sleep(5.0)
3273
3274 return True
3275
3276 except Exception as e:
3277 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3278 return False
3279
3280 async def _install_kdu(
3281 self,
3282 nsr_id: str,
3283 nsr_db_path: str,
3284 vnfr_data: dict,
3285 kdu_index: int,
3286 kdud: dict,
3287 vnfd: dict,
3288 k8s_instance_info: dict,
3289 k8params: dict = None,
3290 timeout: int = 600,
3291 vca_id: str = None,
3292 ):
3293 try:
3294 k8sclustertype = k8s_instance_info["k8scluster-type"]
3295 # Instantiate kdu
3296 db_dict_install = {
3297 "collection": "nsrs",
3298 "filter": {"_id": nsr_id},
3299 "path": nsr_db_path,
3300 }
3301
3302 if k8s_instance_info.get("kdu-deployment-name"):
3303 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3304 else:
3305 kdu_instance = self.k8scluster_map[
3306 k8sclustertype
3307 ].generate_kdu_instance_name(
3308 db_dict=db_dict_install,
3309 kdu_model=k8s_instance_info["kdu-model"],
3310 kdu_name=k8s_instance_info["kdu-name"],
3311 )
3312
3313 # Update the nsrs table with the kdu-instance value
3314 self.update_db_2(
3315 item="nsrs",
3316 _id=nsr_id,
3317 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3318 )
3319
3320 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3321 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3322 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3323 # namespace, this first verification could be removed, and the next step would be done for any kind
3324 # of KNF.
3325 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3326 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3327 if k8sclustertype in ("juju", "juju-bundle"):
3328 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3329 # that the user passed a namespace which he wants its KDU to be deployed in)
3330 if (
3331 self.db.count(
3332 table="nsrs",
3333 q_filter={
3334 "_id": nsr_id,
3335 "_admin.projects_write": k8s_instance_info["namespace"],
3336 "_admin.projects_read": k8s_instance_info["namespace"],
3337 },
3338 )
3339 > 0
3340 ):
3341 self.logger.debug(
3342 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3343 )
3344 self.update_db_2(
3345 item="nsrs",
3346 _id=nsr_id,
3347 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3348 )
3349 k8s_instance_info["namespace"] = kdu_instance
3350
3351 await self.k8scluster_map[k8sclustertype].install(
3352 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3353 kdu_model=k8s_instance_info["kdu-model"],
3354 atomic=True,
3355 params=k8params,
3356 db_dict=db_dict_install,
3357 timeout=timeout,
3358 kdu_name=k8s_instance_info["kdu-name"],
3359 namespace=k8s_instance_info["namespace"],
3360 kdu_instance=kdu_instance,
3361 vca_id=vca_id,
3362 )
3363
3364 # Obtain services to obtain management service ip
3365 services = await self.k8scluster_map[k8sclustertype].get_services(
3366 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3367 kdu_instance=kdu_instance,
3368 namespace=k8s_instance_info["namespace"],
3369 )
3370
3371 # Obtain management service info (if exists)
3372 vnfr_update_dict = {}
3373 kdu_config = get_configuration(vnfd, kdud["name"])
3374 if kdu_config:
3375 target_ee_list = kdu_config.get("execution-environment-list", [])
3376 else:
3377 target_ee_list = []
3378
3379 if services:
3380 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3381 mgmt_services = [
3382 service
3383 for service in kdud.get("service", [])
3384 if service.get("mgmt-service")
3385 ]
3386 for mgmt_service in mgmt_services:
3387 for service in services:
3388 if service["name"].startswith(mgmt_service["name"]):
3389 # Mgmt service found, Obtain service ip
3390 ip = service.get("external_ip", service.get("cluster_ip"))
3391 if isinstance(ip, list) and len(ip) == 1:
3392 ip = ip[0]
3393
3394 vnfr_update_dict[
3395 "kdur.{}.ip-address".format(kdu_index)
3396 ] = ip
3397
3398 # Check if must update also mgmt ip at the vnf
3399 service_external_cp = mgmt_service.get(
3400 "external-connection-point-ref"
3401 )
3402 if service_external_cp:
3403 if (
3404 deep_get(vnfd, ("mgmt-interface", "cp"))
3405 == service_external_cp
3406 ):
3407 vnfr_update_dict["ip-address"] = ip
3408
3409 if find_in_list(
3410 target_ee_list,
3411 lambda ee: ee.get(
3412 "external-connection-point-ref", ""
3413 )
3414 == service_external_cp,
3415 ):
3416 vnfr_update_dict[
3417 "kdur.{}.ip-address".format(kdu_index)
3418 ] = ip
3419 break
3420 else:
3421 self.logger.warn(
3422 "Mgmt service name: {} not found".format(
3423 mgmt_service["name"]
3424 )
3425 )
3426
3427 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3428 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3429
3430 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3431 if (
3432 kdu_config
3433 and kdu_config.get("initial-config-primitive")
3434 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3435 ):
3436 initial_config_primitive_list = kdu_config.get(
3437 "initial-config-primitive"
3438 )
3439 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3440
3441 for initial_config_primitive in initial_config_primitive_list:
3442 primitive_params_ = self._map_primitive_params(
3443 initial_config_primitive, {}, {}
3444 )
3445
3446 await asyncio.wait_for(
3447 self.k8scluster_map[k8sclustertype].exec_primitive(
3448 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3449 kdu_instance=kdu_instance,
3450 primitive_name=initial_config_primitive["name"],
3451 params=primitive_params_,
3452 db_dict=db_dict_install,
3453 vca_id=vca_id,
3454 ),
3455 timeout=timeout,
3456 )
3457
3458 except Exception as e:
3459 # Prepare update db with error and raise exception
3460 try:
3461 self.update_db_2(
3462 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3463 )
3464 self.update_db_2(
3465 "vnfrs",
3466 vnfr_data.get("_id"),
3467 {"kdur.{}.status".format(kdu_index): "ERROR"},
3468 )
3469 except Exception:
3470 # ignore to keep original exception
3471 pass
3472 # reraise original error
3473 raise
3474
3475 return kdu_instance
3476
3477 async def deploy_kdus(
3478 self,
3479 logging_text,
3480 nsr_id,
3481 nslcmop_id,
3482 db_vnfrs,
3483 db_vnfds,
3484 task_instantiation_info,
3485 ):
3486 # Launch kdus if present in the descriptor
3487
3488 k8scluster_id_2_uuic = {
3489 "helm-chart-v3": {},
3490 "helm-chart": {},
3491 "juju-bundle": {},
3492 }
3493
3494 async def _get_cluster_id(cluster_id, cluster_type):
3495 nonlocal k8scluster_id_2_uuic
3496 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3497 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3498
3499 # check if K8scluster is creating and wait look if previous tasks in process
3500 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3501 "k8scluster", cluster_id
3502 )
3503 if task_dependency:
3504 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3505 task_name, cluster_id
3506 )
3507 self.logger.debug(logging_text + text)
3508 await asyncio.wait(task_dependency, timeout=3600)
3509
3510 db_k8scluster = self.db.get_one(
3511 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3512 )
3513 if not db_k8scluster:
3514 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3515
3516 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3517 if not k8s_id:
3518 if cluster_type == "helm-chart-v3":
3519 try:
3520 # backward compatibility for existing clusters that have not been initialized for helm v3
3521 k8s_credentials = yaml.safe_dump(
3522 db_k8scluster.get("credentials")
3523 )
3524 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3525 k8s_credentials, reuse_cluster_uuid=cluster_id
3526 )
3527 db_k8scluster_update = {}
3528 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3529 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3530 db_k8scluster_update[
3531 "_admin.helm-chart-v3.created"
3532 ] = uninstall_sw
3533 db_k8scluster_update[
3534 "_admin.helm-chart-v3.operationalState"
3535 ] = "ENABLED"
3536 self.update_db_2(
3537 "k8sclusters", cluster_id, db_k8scluster_update
3538 )
3539 except Exception as e:
3540 self.logger.error(
3541 logging_text
3542 + "error initializing helm-v3 cluster: {}".format(str(e))
3543 )
3544 raise LcmException(
3545 "K8s cluster '{}' has not been initialized for '{}'".format(
3546 cluster_id, cluster_type
3547 )
3548 )
3549 else:
3550 raise LcmException(
3551 "K8s cluster '{}' has not been initialized for '{}'".format(
3552 cluster_id, cluster_type
3553 )
3554 )
3555 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3556 return k8s_id
3557
3558 logging_text += "Deploy kdus: "
3559 step = ""
3560 try:
3561 db_nsr_update = {"_admin.deployed.K8s": []}
3562 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3563
3564 index = 0
3565 updated_cluster_list = []
3566 updated_v3_cluster_list = []
3567
3568 for vnfr_data in db_vnfrs.values():
3569 vca_id = self.get_vca_id(vnfr_data, {})
3570 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3571 # Step 0: Prepare and set parameters
3572 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3573 vnfd_id = vnfr_data.get("vnfd-id")
3574 vnfd_with_id = find_in_list(
3575 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3576 )
3577 kdud = next(
3578 kdud
3579 for kdud in vnfd_with_id["kdu"]
3580 if kdud["name"] == kdur["kdu-name"]
3581 )
3582 namespace = kdur.get("k8s-namespace")
3583 kdu_deployment_name = kdur.get("kdu-deployment-name")
3584 if kdur.get("helm-chart"):
3585 kdumodel = kdur["helm-chart"]
3586 # Default version: helm3, if helm-version is v2 assign v2
3587 k8sclustertype = "helm-chart-v3"
3588 self.logger.debug("kdur: {}".format(kdur))
3589 if (
3590 kdur.get("helm-version")
3591 and kdur.get("helm-version") == "v2"
3592 ):
3593 k8sclustertype = "helm-chart"
3594 elif kdur.get("juju-bundle"):
3595 kdumodel = kdur["juju-bundle"]
3596 k8sclustertype = "juju-bundle"
3597 else:
3598 raise LcmException(
3599 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3600 "juju-bundle. Maybe an old NBI version is running".format(
3601 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3602 )
3603 )
3604 # check if kdumodel is a file and exists
3605 try:
3606 vnfd_with_id = find_in_list(
3607 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3608 )
3609 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3610 if storage: # may be not present if vnfd has not artifacts
3611 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3612 if storage["pkg-dir"]:
3613 filename = "{}/{}/{}s/{}".format(
3614 storage["folder"],
3615 storage["pkg-dir"],
3616 k8sclustertype,
3617 kdumodel,
3618 )
3619 else:
3620 filename = "{}/Scripts/{}s/{}".format(
3621 storage["folder"],
3622 k8sclustertype,
3623 kdumodel,
3624 )
3625 if self.fs.file_exists(
3626 filename, mode="file"
3627 ) or self.fs.file_exists(filename, mode="dir"):
3628 kdumodel = self.fs.path + filename
3629 except (asyncio.TimeoutError, asyncio.CancelledError):
3630 raise
3631 except Exception: # it is not a file
3632 pass
3633
3634 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3635 step = "Synchronize repos for k8s cluster '{}'".format(
3636 k8s_cluster_id
3637 )
3638 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3639
3640 # Synchronize repos
3641 if (
3642 k8sclustertype == "helm-chart"
3643 and cluster_uuid not in updated_cluster_list
3644 ) or (
3645 k8sclustertype == "helm-chart-v3"
3646 and cluster_uuid not in updated_v3_cluster_list
3647 ):
3648 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3649 self.k8scluster_map[k8sclustertype].synchronize_repos(
3650 cluster_uuid=cluster_uuid
3651 )
3652 )
3653 if del_repo_list or added_repo_dict:
3654 if k8sclustertype == "helm-chart":
3655 unset = {
3656 "_admin.helm_charts_added." + item: None
3657 for item in del_repo_list
3658 }
3659 updated = {
3660 "_admin.helm_charts_added." + item: name
3661 for item, name in added_repo_dict.items()
3662 }
3663 updated_cluster_list.append(cluster_uuid)
3664 elif k8sclustertype == "helm-chart-v3":
3665 unset = {
3666 "_admin.helm_charts_v3_added." + item: None
3667 for item in del_repo_list
3668 }
3669 updated = {
3670 "_admin.helm_charts_v3_added." + item: name
3671 for item, name in added_repo_dict.items()
3672 }
3673 updated_v3_cluster_list.append(cluster_uuid)
3674 self.logger.debug(
3675 logging_text + "repos synchronized on k8s cluster "
3676 "'{}' to_delete: {}, to_add: {}".format(
3677 k8s_cluster_id, del_repo_list, added_repo_dict
3678 )
3679 )
3680 self.db.set_one(
3681 "k8sclusters",
3682 {"_id": k8s_cluster_id},
3683 updated,
3684 unset=unset,
3685 )
3686
3687 # Instantiate kdu
3688 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3689 vnfr_data["member-vnf-index-ref"],
3690 kdur["kdu-name"],
3691 k8s_cluster_id,
3692 )
3693 k8s_instance_info = {
3694 "kdu-instance": None,
3695 "k8scluster-uuid": cluster_uuid,
3696 "k8scluster-type": k8sclustertype,
3697 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3698 "kdu-name": kdur["kdu-name"],
3699 "kdu-model": kdumodel,
3700 "namespace": namespace,
3701 "kdu-deployment-name": kdu_deployment_name,
3702 }
3703 db_path = "_admin.deployed.K8s.{}".format(index)
3704 db_nsr_update[db_path] = k8s_instance_info
3705 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3706 vnfd_with_id = find_in_list(
3707 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3708 )
3709 task = asyncio.ensure_future(
3710 self._install_kdu(
3711 nsr_id,
3712 db_path,
3713 vnfr_data,
3714 kdu_index,
3715 kdud,
3716 vnfd_with_id,
3717 k8s_instance_info,
3718 k8params=desc_params,
3719 timeout=1800,
3720 vca_id=vca_id,
3721 )
3722 )
3723 self.lcm_tasks.register(
3724 "ns",
3725 nsr_id,
3726 nslcmop_id,
3727 "instantiate_KDU-{}".format(index),
3728 task,
3729 )
3730 task_instantiation_info[task] = "Deploying KDU {}".format(
3731 kdur["kdu-name"]
3732 )
3733
3734 index += 1
3735
3736 except (LcmException, asyncio.CancelledError):
3737 raise
3738 except Exception as e:
3739 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3740 if isinstance(e, (N2VCException, DbException)):
3741 self.logger.error(logging_text + msg)
3742 else:
3743 self.logger.critical(logging_text + msg, exc_info=True)
3744 raise LcmException(msg)
3745 finally:
3746 if db_nsr_update:
3747 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3748
3749 def _deploy_n2vc(
3750 self,
3751 logging_text,
3752 db_nsr,
3753 db_vnfr,
3754 nslcmop_id,
3755 nsr_id,
3756 nsi_id,
3757 vnfd_id,
3758 vdu_id,
3759 kdu_name,
3760 member_vnf_index,
3761 vdu_index,
3762 vdu_name,
3763 deploy_params,
3764 descriptor_config,
3765 base_folder,
3766 task_instantiation_info,
3767 stage,
3768 ):
3769 # launch instantiate_N2VC in a asyncio task and register task object
3770 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3771 # if not found, create one entry and update database
3772 # fill db_nsr._admin.deployed.VCA.<index>
3773
3774 self.logger.debug(
3775 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3776 )
3777 if "execution-environment-list" in descriptor_config:
3778 ee_list = descriptor_config.get("execution-environment-list", [])
3779 elif "juju" in descriptor_config:
3780 ee_list = [descriptor_config] # ns charms
3781 else: # other types as script are not supported
3782 ee_list = []
3783
3784 for ee_item in ee_list:
3785 self.logger.debug(
3786 logging_text
3787 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3788 ee_item.get("juju"), ee_item.get("helm-chart")
3789 )
3790 )
3791 ee_descriptor_id = ee_item.get("id")
3792 if ee_item.get("juju"):
3793 vca_name = ee_item["juju"].get("charm")
3794 vca_type = (
3795 "lxc_proxy_charm"
3796 if ee_item["juju"].get("charm") is not None
3797 else "native_charm"
3798 )
3799 if ee_item["juju"].get("cloud") == "k8s":
3800 vca_type = "k8s_proxy_charm"
3801 elif ee_item["juju"].get("proxy") is False:
3802 vca_type = "native_charm"
3803 elif ee_item.get("helm-chart"):
3804 vca_name = ee_item["helm-chart"]
3805 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3806 vca_type = "helm"
3807 else:
3808 vca_type = "helm-v3"
3809 else:
3810 self.logger.debug(
3811 logging_text + "skipping non juju neither charm configuration"
3812 )
3813 continue
3814
3815 vca_index = -1
3816 for vca_index, vca_deployed in enumerate(
3817 db_nsr["_admin"]["deployed"]["VCA"]
3818 ):
3819 if not vca_deployed:
3820 continue
3821 if (
3822 vca_deployed.get("member-vnf-index") == member_vnf_index
3823 and vca_deployed.get("vdu_id") == vdu_id
3824 and vca_deployed.get("kdu_name") == kdu_name
3825 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3826 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3827 ):
3828 break
3829 else:
3830 # not found, create one.
3831 target = (
3832 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3833 )
3834 if vdu_id:
3835 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3836 elif kdu_name:
3837 target += "/kdu/{}".format(kdu_name)
3838 vca_deployed = {
3839 "target_element": target,
3840 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3841 "member-vnf-index": member_vnf_index,
3842 "vdu_id": vdu_id,
3843 "kdu_name": kdu_name,
3844 "vdu_count_index": vdu_index,
3845 "operational-status": "init", # TODO revise
3846 "detailed-status": "", # TODO revise
3847 "step": "initial-deploy", # TODO revise
3848 "vnfd_id": vnfd_id,
3849 "vdu_name": vdu_name,
3850 "type": vca_type,
3851 "ee_descriptor_id": ee_descriptor_id,
3852 }
3853 vca_index += 1
3854
3855 # create VCA and configurationStatus in db
3856 db_dict = {
3857 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3858 "configurationStatus.{}".format(vca_index): dict(),
3859 }
3860 self.update_db_2("nsrs", nsr_id, db_dict)
3861
3862 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3863
3864 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3865 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3866 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3867
3868 # Launch task
3869 task_n2vc = asyncio.ensure_future(
3870 self.instantiate_N2VC(
3871 logging_text=logging_text,
3872 vca_index=vca_index,
3873 nsi_id=nsi_id,
3874 db_nsr=db_nsr,
3875 db_vnfr=db_vnfr,
3876 vdu_id=vdu_id,
3877 kdu_name=kdu_name,
3878 vdu_index=vdu_index,
3879 deploy_params=deploy_params,
3880 config_descriptor=descriptor_config,
3881 base_folder=base_folder,
3882 nslcmop_id=nslcmop_id,
3883 stage=stage,
3884 vca_type=vca_type,
3885 vca_name=vca_name,
3886 ee_config_descriptor=ee_item,
3887 )
3888 )
3889 self.lcm_tasks.register(
3890 "ns",
3891 nsr_id,
3892 nslcmop_id,
3893 "instantiate_N2VC-{}".format(vca_index),
3894 task_n2vc,
3895 )
3896 task_instantiation_info[
3897 task_n2vc
3898 ] = self.task_name_deploy_vca + " {}.{}".format(
3899 member_vnf_index or "", vdu_id or ""
3900 )
3901
3902 @staticmethod
3903 def _create_nslcmop(nsr_id, operation, params):
3904 """
3905 Creates a ns-lcm-opp content to be stored at database.
3906 :param nsr_id: internal id of the instance
3907 :param operation: instantiate, terminate, scale, action, ...
3908 :param params: user parameters for the operation
3909 :return: dictionary following SOL005 format
3910 """
3911 # Raise exception if invalid arguments
3912 if not (nsr_id and operation and params):
3913 raise LcmException(
3914 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3915 )
3916 now = time()
3917 _id = str(uuid4())
3918 nslcmop = {
3919 "id": _id,
3920 "_id": _id,
3921 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3922 "operationState": "PROCESSING",
3923 "statusEnteredTime": now,
3924 "nsInstanceId": nsr_id,
3925 "lcmOperationType": operation,
3926 "startTime": now,
3927 "isAutomaticInvocation": False,
3928 "operationParams": params,
3929 "isCancelPending": False,
3930 "links": {
3931 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3932 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3933 },
3934 }
3935 return nslcmop
3936
3937 def _format_additional_params(self, params):
3938 params = params or {}
3939 for key, value in params.items():
3940 if str(value).startswith("!!yaml "):
3941 params[key] = yaml.safe_load(value[7:])
3942 return params
3943
3944 def _get_terminate_primitive_params(self, seq, vnf_index):
3945 primitive = seq.get("name")
3946 primitive_params = {}
3947 params = {
3948 "member_vnf_index": vnf_index,
3949 "primitive": primitive,
3950 "primitive_params": primitive_params,
3951 }
3952 desc_params = {}
3953 return self._map_primitive_params(seq, params, desc_params)
3954
3955 # sub-operations
3956
3957 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3958 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3959 if op.get("operationState") == "COMPLETED":
3960 # b. Skip sub-operation
3961 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3962 return self.SUBOPERATION_STATUS_SKIP
3963 else:
3964 # c. retry executing sub-operation
3965 # The sub-operation exists, and operationState != 'COMPLETED'
3966 # Update operationState = 'PROCESSING' to indicate a retry.
3967 operationState = "PROCESSING"
3968 detailed_status = "In progress"
3969 self._update_suboperation_status(
3970 db_nslcmop, op_index, operationState, detailed_status
3971 )
3972 # Return the sub-operation index
3973 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3974 # with arguments extracted from the sub-operation
3975 return op_index
3976
3977 # Find a sub-operation where all keys in a matching dictionary must match
3978 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3979 def _find_suboperation(self, db_nslcmop, match):
3980 if db_nslcmop and match:
3981 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3982 for i, op in enumerate(op_list):
3983 if all(op.get(k) == match[k] for k in match):
3984 return i
3985 return self.SUBOPERATION_STATUS_NOT_FOUND
3986
3987 # Update status for a sub-operation given its index
3988 def _update_suboperation_status(
3989 self, db_nslcmop, op_index, operationState, detailed_status
3990 ):
3991 # Update DB for HA tasks
3992 q_filter = {"_id": db_nslcmop["_id"]}
3993 update_dict = {
3994 "_admin.operations.{}.operationState".format(op_index): operationState,
3995 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3996 }
3997 self.db.set_one(
3998 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3999 )
4000
4001 # Add sub-operation, return the index of the added sub-operation
4002 # Optionally, set operationState, detailed-status, and operationType
4003 # Status and type are currently set for 'scale' sub-operations:
4004 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4005 # 'detailed-status' : status message
4006 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4007 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4008 def _add_suboperation(
4009 self,
4010 db_nslcmop,
4011 vnf_index,
4012 vdu_id,
4013 vdu_count_index,
4014 vdu_name,
4015 primitive,
4016 mapped_primitive_params,
4017 operationState=None,
4018 detailed_status=None,
4019 operationType=None,
4020 RO_nsr_id=None,
4021 RO_scaling_info=None,
4022 ):
4023 if not db_nslcmop:
4024 return self.SUBOPERATION_STATUS_NOT_FOUND
4025 # Get the "_admin.operations" list, if it exists
4026 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4027 op_list = db_nslcmop_admin.get("operations")
4028 # Create or append to the "_admin.operations" list
4029 new_op = {
4030 "member_vnf_index": vnf_index,
4031 "vdu_id": vdu_id,
4032 "vdu_count_index": vdu_count_index,
4033 "primitive": primitive,
4034 "primitive_params": mapped_primitive_params,
4035 }
4036 if operationState:
4037 new_op["operationState"] = operationState
4038 if detailed_status:
4039 new_op["detailed-status"] = detailed_status
4040 if operationType:
4041 new_op["lcmOperationType"] = operationType
4042 if RO_nsr_id:
4043 new_op["RO_nsr_id"] = RO_nsr_id
4044 if RO_scaling_info:
4045 new_op["RO_scaling_info"] = RO_scaling_info
4046 if not op_list:
4047 # No existing operations, create key 'operations' with current operation as first list element
4048 db_nslcmop_admin.update({"operations": [new_op]})
4049 op_list = db_nslcmop_admin.get("operations")
4050 else:
4051 # Existing operations, append operation to list
4052 op_list.append(new_op)
4053
4054 db_nslcmop_update = {"_admin.operations": op_list}
4055 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4056 op_index = len(op_list) - 1
4057 return op_index
4058
4059 # Helper methods for scale() sub-operations
4060
4061 # pre-scale/post-scale:
4062 # Check for 3 different cases:
4063 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4064 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4065 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4066 def _check_or_add_scale_suboperation(
4067 self,
4068 db_nslcmop,
4069 vnf_index,
4070 vnf_config_primitive,
4071 primitive_params,
4072 operationType,
4073 RO_nsr_id=None,
4074 RO_scaling_info=None,
4075 ):
4076 # Find this sub-operation
4077 if RO_nsr_id and RO_scaling_info:
4078 operationType = "SCALE-RO"
4079 match = {
4080 "member_vnf_index": vnf_index,
4081 "RO_nsr_id": RO_nsr_id,
4082 "RO_scaling_info": RO_scaling_info,
4083 }
4084 else:
4085 match = {
4086 "member_vnf_index": vnf_index,
4087 "primitive": vnf_config_primitive,
4088 "primitive_params": primitive_params,
4089 "lcmOperationType": operationType,
4090 }
4091 op_index = self._find_suboperation(db_nslcmop, match)
4092 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4093 # a. New sub-operation
4094 # The sub-operation does not exist, add it.
4095 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4096 # The following parameters are set to None for all kind of scaling:
4097 vdu_id = None
4098 vdu_count_index = None
4099 vdu_name = None
4100 if RO_nsr_id and RO_scaling_info:
4101 vnf_config_primitive = None
4102 primitive_params = None
4103 else:
4104 RO_nsr_id = None
4105 RO_scaling_info = None
4106 # Initial status for sub-operation
4107 operationState = "PROCESSING"
4108 detailed_status = "In progress"
4109 # Add sub-operation for pre/post-scaling (zero or more operations)
4110 self._add_suboperation(
4111 db_nslcmop,
4112 vnf_index,
4113 vdu_id,
4114 vdu_count_index,
4115 vdu_name,
4116 vnf_config_primitive,
4117 primitive_params,
4118 operationState,
4119 detailed_status,
4120 operationType,
4121 RO_nsr_id,
4122 RO_scaling_info,
4123 )
4124 return self.SUBOPERATION_STATUS_NEW
4125 else:
4126 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4127 # or op_index (operationState != 'COMPLETED')
4128 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4129
4130 # Function to return execution_environment id
4131
4132 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4133 # TODO vdu_index_count
4134 for vca in vca_deployed_list:
4135 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4136 return vca["ee_id"]
4137
4138 async def destroy_N2VC(
4139 self,
4140 logging_text,
4141 db_nslcmop,
4142 vca_deployed,
4143 config_descriptor,
4144 vca_index,
4145 destroy_ee=True,
4146 exec_primitives=True,
4147 scaling_in=False,
4148 vca_id: str = None,
4149 ):
4150 """
4151 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4152 :param logging_text:
4153 :param db_nslcmop:
4154 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4155 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4156 :param vca_index: index in the database _admin.deployed.VCA
4157 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4158 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4159 not executed properly
4160 :param scaling_in: True destroys the application, False destroys the model
4161 :return: None or exception
4162 """
4163
4164 self.logger.debug(
4165 logging_text
4166 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4167 vca_index, vca_deployed, config_descriptor, destroy_ee
4168 )
4169 )
4170
4171 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4172
4173 # execute terminate_primitives
4174 if exec_primitives:
4175 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4176 config_descriptor.get("terminate-config-primitive"),
4177 vca_deployed.get("ee_descriptor_id"),
4178 )
4179 vdu_id = vca_deployed.get("vdu_id")
4180 vdu_count_index = vca_deployed.get("vdu_count_index")
4181 vdu_name = vca_deployed.get("vdu_name")
4182 vnf_index = vca_deployed.get("member-vnf-index")
4183 if terminate_primitives and vca_deployed.get("needed_terminate"):
4184 for seq in terminate_primitives:
4185 # For each sequence in list, get primitive and call _ns_execute_primitive()
4186 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4187 vnf_index, seq.get("name")
4188 )
4189 self.logger.debug(logging_text + step)
4190 # Create the primitive for each sequence, i.e. "primitive": "touch"
4191 primitive = seq.get("name")
4192 mapped_primitive_params = self._get_terminate_primitive_params(
4193 seq, vnf_index
4194 )
4195
4196 # Add sub-operation
4197 self._add_suboperation(
4198 db_nslcmop,
4199 vnf_index,
4200 vdu_id,
4201 vdu_count_index,
4202 vdu_name,
4203 primitive,
4204 mapped_primitive_params,
4205 )
4206 # Sub-operations: Call _ns_execute_primitive() instead of action()
4207 try:
4208 result, result_detail = await self._ns_execute_primitive(
4209 vca_deployed["ee_id"],
4210 primitive,
4211 mapped_primitive_params,
4212 vca_type=vca_type,
4213 vca_id=vca_id,
4214 )
4215 except LcmException:
4216 # this happens when VCA is not deployed. In this case it is not needed to terminate
4217 continue
4218 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4219 if result not in result_ok:
4220 raise LcmException(
4221 "terminate_primitive {} for vnf_member_index={} fails with "
4222 "error {}".format(seq.get("name"), vnf_index, result_detail)
4223 )
4224 # set that this VCA do not need terminated
4225 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4226 vca_index
4227 )
4228 self.update_db_2(
4229 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4230 )
4231
4232 # Delete Prometheus Jobs if any
4233 # This uses NSR_ID, so it will destroy any jobs under this index
4234 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4235
4236 if destroy_ee:
4237 await self.vca_map[vca_type].delete_execution_environment(
4238 vca_deployed["ee_id"],
4239 scaling_in=scaling_in,
4240 vca_type=vca_type,
4241 vca_id=vca_id,
4242 )
4243
4244 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4245 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4246 namespace = "." + db_nsr["_id"]
4247 try:
4248 await self.n2vc.delete_namespace(
4249 namespace=namespace,
4250 total_timeout=self.timeout_charm_delete,
4251 vca_id=vca_id,
4252 )
4253 except N2VCNotFound: # already deleted. Skip
4254 pass
4255 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4256
4257 async def _terminate_RO(
4258 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4259 ):
4260 """
4261 Terminates a deployment from RO
4262 :param logging_text:
4263 :param nsr_deployed: db_nsr._admin.deployed
4264 :param nsr_id:
4265 :param nslcmop_id:
4266 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4267 this method will update only the index 2, but it will write on database the concatenated content of the list
4268 :return:
4269 """
4270 db_nsr_update = {}
4271 failed_detail = []
4272 ro_nsr_id = ro_delete_action = None
4273 if nsr_deployed and nsr_deployed.get("RO"):
4274 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4275 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4276 try:
4277 if ro_nsr_id:
4278 stage[2] = "Deleting ns from VIM."
4279 db_nsr_update["detailed-status"] = " ".join(stage)
4280 self._write_op_status(nslcmop_id, stage)
4281 self.logger.debug(logging_text + stage[2])
4282 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4283 self._write_op_status(nslcmop_id, stage)
4284 desc = await self.RO.delete("ns", ro_nsr_id)
4285 ro_delete_action = desc["action_id"]
4286 db_nsr_update[
4287 "_admin.deployed.RO.nsr_delete_action_id"
4288 ] = ro_delete_action
4289 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4290 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4291 if ro_delete_action:
4292 # wait until NS is deleted from VIM
4293 stage[2] = "Waiting ns deleted from VIM."
4294 detailed_status_old = None
4295 self.logger.debug(
4296 logging_text
4297 + stage[2]
4298 + " RO_id={} ro_delete_action={}".format(
4299 ro_nsr_id, ro_delete_action
4300 )
4301 )
4302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4303 self._write_op_status(nslcmop_id, stage)
4304
4305 delete_timeout = 20 * 60 # 20 minutes
4306 while delete_timeout > 0:
4307 desc = await self.RO.show(
4308 "ns",
4309 item_id_name=ro_nsr_id,
4310 extra_item="action",
4311 extra_item_id=ro_delete_action,
4312 )
4313
4314 # deploymentStatus
4315 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4316
4317 ns_status, ns_status_info = self.RO.check_action_status(desc)
4318 if ns_status == "ERROR":
4319 raise ROclient.ROClientException(ns_status_info)
4320 elif ns_status == "BUILD":
4321 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4322 elif ns_status == "ACTIVE":
4323 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4324 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4325 break
4326 else:
4327 assert (
4328 False
4329 ), "ROclient.check_action_status returns unknown {}".format(
4330 ns_status
4331 )
4332 if stage[2] != detailed_status_old:
4333 detailed_status_old = stage[2]
4334 db_nsr_update["detailed-status"] = " ".join(stage)
4335 self._write_op_status(nslcmop_id, stage)
4336 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4337 await asyncio.sleep(5, loop=self.loop)
4338 delete_timeout -= 5
4339 else: # delete_timeout <= 0:
4340 raise ROclient.ROClientException(
4341 "Timeout waiting ns deleted from VIM"
4342 )
4343
4344 except Exception as e:
4345 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4346 if (
4347 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4348 ): # not found
4349 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4350 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4351 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4352 self.logger.debug(
4353 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4354 )
4355 elif (
4356 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4357 ): # conflict
4358 failed_detail.append("delete conflict: {}".format(e))
4359 self.logger.debug(
4360 logging_text
4361 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4362 )
4363 else:
4364 failed_detail.append("delete error: {}".format(e))
4365 self.logger.error(
4366 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4367 )
4368
4369 # Delete nsd
4370 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4371 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4372 try:
4373 stage[2] = "Deleting nsd from RO."
4374 db_nsr_update["detailed-status"] = " ".join(stage)
4375 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4376 self._write_op_status(nslcmop_id, stage)
4377 await self.RO.delete("nsd", ro_nsd_id)
4378 self.logger.debug(
4379 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4380 )
4381 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4382 except Exception as e:
4383 if (
4384 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4385 ): # not found
4386 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4387 self.logger.debug(
4388 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4389 )
4390 elif (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4392 ): # conflict
4393 failed_detail.append(
4394 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4395 )
4396 self.logger.debug(logging_text + failed_detail[-1])
4397 else:
4398 failed_detail.append(
4399 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4400 )
4401 self.logger.error(logging_text + failed_detail[-1])
4402
4403 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4404 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4405 if not vnf_deployed or not vnf_deployed["id"]:
4406 continue
4407 try:
4408 ro_vnfd_id = vnf_deployed["id"]
4409 stage[
4410 2
4411 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4412 vnf_deployed["member-vnf-index"], ro_vnfd_id
4413 )
4414 db_nsr_update["detailed-status"] = " ".join(stage)
4415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4416 self._write_op_status(nslcmop_id, stage)
4417 await self.RO.delete("vnfd", ro_vnfd_id)
4418 self.logger.debug(
4419 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4420 )
4421 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4422 except Exception as e:
4423 if (
4424 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4425 ): # not found
4426 db_nsr_update[
4427 "_admin.deployed.RO.vnfd.{}.id".format(index)
4428 ] = None
4429 self.logger.debug(
4430 logging_text
4431 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4432 )
4433 elif (
4434 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4435 ): # conflict
4436 failed_detail.append(
4437 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4438 )
4439 self.logger.debug(logging_text + failed_detail[-1])
4440 else:
4441 failed_detail.append(
4442 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4443 )
4444 self.logger.error(logging_text + failed_detail[-1])
4445
4446 if failed_detail:
4447 stage[2] = "Error deleting from VIM"
4448 else:
4449 stage[2] = "Deleted from VIM"
4450 db_nsr_update["detailed-status"] = " ".join(stage)
4451 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4452 self._write_op_status(nslcmop_id, stage)
4453
4454 if failed_detail:
4455 raise LcmException("; ".join(failed_detail))
4456
4457 async def terminate(self, nsr_id, nslcmop_id):
4458 # Try to lock HA task here
4459 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4460 if not task_is_locked_by_me:
4461 return
4462
4463 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4464 self.logger.debug(logging_text + "Enter")
4465 timeout_ns_terminate = self.timeout_ns_terminate
4466 db_nsr = None
4467 db_nslcmop = None
4468 operation_params = None
4469 exc = None
4470 error_list = [] # annotates all failed error messages
4471 db_nslcmop_update = {}
4472 autoremove = False # autoremove after terminated
4473 tasks_dict_info = {}
4474 db_nsr_update = {}
4475 stage = [
4476 "Stage 1/3: Preparing task.",
4477 "Waiting for previous operations to terminate.",
4478 "",
4479 ]
4480 # ^ contains [stage, step, VIM-status]
4481 try:
4482 # wait for any previous tasks in process
4483 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4484
4485 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4486 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4487 operation_params = db_nslcmop.get("operationParams") or {}
4488 if operation_params.get("timeout_ns_terminate"):
4489 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4490 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4491 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4492
4493 db_nsr_update["operational-status"] = "terminating"
4494 db_nsr_update["config-status"] = "terminating"
4495 self._write_ns_status(
4496 nsr_id=nsr_id,
4497 ns_state="TERMINATING",
4498 current_operation="TERMINATING",
4499 current_operation_id=nslcmop_id,
4500 other_update=db_nsr_update,
4501 )
4502 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4503 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4504 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4505 return
4506
4507 stage[1] = "Getting vnf descriptors from db."
4508 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4509 db_vnfrs_dict = {
4510 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4511 }
4512 db_vnfds_from_id = {}
4513 db_vnfds_from_member_index = {}
4514 # Loop over VNFRs
4515 for vnfr in db_vnfrs_list:
4516 vnfd_id = vnfr["vnfd-id"]
4517 if vnfd_id not in db_vnfds_from_id:
4518 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4519 db_vnfds_from_id[vnfd_id] = vnfd
4520 db_vnfds_from_member_index[
4521 vnfr["member-vnf-index-ref"]
4522 ] = db_vnfds_from_id[vnfd_id]
4523
4524 # Destroy individual execution environments when there are terminating primitives.
4525 # Rest of EE will be deleted at once
4526 # TODO - check before calling _destroy_N2VC
4527 # if not operation_params.get("skip_terminate_primitives"):#
4528 # or not vca.get("needed_terminate"):
4529 stage[0] = "Stage 2/3 execute terminating primitives."
4530 self.logger.debug(logging_text + stage[0])
4531 stage[1] = "Looking execution environment that needs terminate."
4532 self.logger.debug(logging_text + stage[1])
4533
4534 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4535 config_descriptor = None
4536 vca_member_vnf_index = vca.get("member-vnf-index")
4537 vca_id = self.get_vca_id(
4538 db_vnfrs_dict.get(vca_member_vnf_index)
4539 if vca_member_vnf_index
4540 else None,
4541 db_nsr,
4542 )
4543 if not vca or not vca.get("ee_id"):
4544 continue
4545 if not vca.get("member-vnf-index"):
4546 # ns
4547 config_descriptor = db_nsr.get("ns-configuration")
4548 elif vca.get("vdu_id"):
4549 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4550 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4551 elif vca.get("kdu_name"):
4552 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4553 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4554 else:
4555 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4556 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4557 vca_type = vca.get("type")
4558 exec_terminate_primitives = not operation_params.get(
4559 "skip_terminate_primitives"
4560 ) and vca.get("needed_terminate")
4561 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4562 # pending native charms
4563 destroy_ee = (
4564 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4565 )
4566 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4567 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4568 task = asyncio.ensure_future(
4569 self.destroy_N2VC(
4570 logging_text,
4571 db_nslcmop,
4572 vca,
4573 config_descriptor,
4574 vca_index,
4575 destroy_ee,
4576 exec_terminate_primitives,
4577 vca_id=vca_id,
4578 )
4579 )
4580 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4581
4582 # wait for pending tasks of terminate primitives
4583 if tasks_dict_info:
4584 self.logger.debug(
4585 logging_text
4586 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4587 )
4588 error_list = await self._wait_for_tasks(
4589 logging_text,
4590 tasks_dict_info,
4591 min(self.timeout_charm_delete, timeout_ns_terminate),
4592 stage,
4593 nslcmop_id,
4594 )
4595 tasks_dict_info.clear()
4596 if error_list:
4597 return # raise LcmException("; ".join(error_list))
4598
4599 # remove All execution environments at once
4600 stage[0] = "Stage 3/3 delete all."
4601
4602 if nsr_deployed.get("VCA"):
4603 stage[1] = "Deleting all execution environments."
4604 self.logger.debug(logging_text + stage[1])
4605 vca_id = self.get_vca_id({}, db_nsr)
4606 task_delete_ee = asyncio.ensure_future(
4607 asyncio.wait_for(
4608 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4609 timeout=self.timeout_charm_delete,
4610 )
4611 )
4612 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4613 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4614
4615 # Delete from k8scluster
4616 stage[1] = "Deleting KDUs."
4617 self.logger.debug(logging_text + stage[1])
4618 # print(nsr_deployed)
4619 for kdu in get_iterable(nsr_deployed, "K8s"):
4620 if not kdu or not kdu.get("kdu-instance"):
4621 continue
4622 kdu_instance = kdu.get("kdu-instance")
4623 if kdu.get("k8scluster-type") in self.k8scluster_map:
4624 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4625 vca_id = self.get_vca_id({}, db_nsr)
4626 task_delete_kdu_instance = asyncio.ensure_future(
4627 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4628 cluster_uuid=kdu.get("k8scluster-uuid"),
4629 kdu_instance=kdu_instance,
4630 vca_id=vca_id,
4631 namespace=kdu.get("namespace"),
4632 )
4633 )
4634 else:
4635 self.logger.error(
4636 logging_text
4637 + "Unknown k8s deployment type {}".format(
4638 kdu.get("k8scluster-type")
4639 )
4640 )
4641 continue
4642 tasks_dict_info[
4643 task_delete_kdu_instance
4644 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4645
4646 # remove from RO
4647 stage[1] = "Deleting ns from VIM."
4648 if self.ng_ro:
4649 task_delete_ro = asyncio.ensure_future(
4650 self._terminate_ng_ro(
4651 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4652 )
4653 )
4654 else:
4655 task_delete_ro = asyncio.ensure_future(
4656 self._terminate_RO(
4657 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4658 )
4659 )
4660 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4661
4662 # rest of staff will be done at finally
4663
4664 except (
4665 ROclient.ROClientException,
4666 DbException,
4667 LcmException,
4668 N2VCException,
4669 ) as e:
4670 self.logger.error(logging_text + "Exit Exception {}".format(e))
4671 exc = e
4672 except asyncio.CancelledError:
4673 self.logger.error(
4674 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4675 )
4676 exc = "Operation was cancelled"
4677 except Exception as e:
4678 exc = traceback.format_exc()
4679 self.logger.critical(
4680 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4681 exc_info=True,
4682 )
4683 finally:
4684 if exc:
4685 error_list.append(str(exc))
4686 try:
4687 # wait for pending tasks
4688 if tasks_dict_info:
4689 stage[1] = "Waiting for terminate pending tasks."
4690 self.logger.debug(logging_text + stage[1])
4691 error_list += await self._wait_for_tasks(
4692 logging_text,
4693 tasks_dict_info,
4694 timeout_ns_terminate,
4695 stage,
4696 nslcmop_id,
4697 )
4698 stage[1] = stage[2] = ""
4699 except asyncio.CancelledError:
4700 error_list.append("Cancelled")
4701 # TODO cancell all tasks
4702 except Exception as exc:
4703 error_list.append(str(exc))
4704 # update status at database
4705 if error_list:
4706 error_detail = "; ".join(error_list)
4707 # self.logger.error(logging_text + error_detail)
4708 error_description_nslcmop = "{} Detail: {}".format(
4709 stage[0], error_detail
4710 )
4711 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4712 nslcmop_id, stage[0]
4713 )
4714
4715 db_nsr_update["operational-status"] = "failed"
4716 db_nsr_update["detailed-status"] = (
4717 error_description_nsr + " Detail: " + error_detail
4718 )
4719 db_nslcmop_update["detailed-status"] = error_detail
4720 nslcmop_operation_state = "FAILED"
4721 ns_state = "BROKEN"
4722 else:
4723 error_detail = None
4724 error_description_nsr = error_description_nslcmop = None
4725 ns_state = "NOT_INSTANTIATED"
4726 db_nsr_update["operational-status"] = "terminated"
4727 db_nsr_update["detailed-status"] = "Done"
4728 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4729 db_nslcmop_update["detailed-status"] = "Done"
4730 nslcmop_operation_state = "COMPLETED"
4731
4732 if db_nsr:
4733 self._write_ns_status(
4734 nsr_id=nsr_id,
4735 ns_state=ns_state,
4736 current_operation="IDLE",
4737 current_operation_id=None,
4738 error_description=error_description_nsr,
4739 error_detail=error_detail,
4740 other_update=db_nsr_update,
4741 )
4742 self._write_op_status(
4743 op_id=nslcmop_id,
4744 stage="",
4745 error_message=error_description_nslcmop,
4746 operation_state=nslcmop_operation_state,
4747 other_update=db_nslcmop_update,
4748 )
4749 if ns_state == "NOT_INSTANTIATED":
4750 try:
4751 self.db.set_list(
4752 "vnfrs",
4753 {"nsr-id-ref": nsr_id},
4754 {"_admin.nsState": "NOT_INSTANTIATED"},
4755 )
4756 except DbException as e:
4757 self.logger.warn(
4758 logging_text
4759 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4760 nsr_id, e
4761 )
4762 )
4763 if operation_params:
4764 autoremove = operation_params.get("autoremove", False)
4765 if nslcmop_operation_state:
4766 try:
4767 await self.msg.aiowrite(
4768 "ns",
4769 "terminated",
4770 {
4771 "nsr_id": nsr_id,
4772 "nslcmop_id": nslcmop_id,
4773 "operationState": nslcmop_operation_state,
4774 "autoremove": autoremove,
4775 },
4776 loop=self.loop,
4777 )
4778 except Exception as e:
4779 self.logger.error(
4780 logging_text + "kafka_write notification Exception {}".format(e)
4781 )
4782
4783 self.logger.debug(logging_text + "Exit")
4784 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4785
4786 async def _wait_for_tasks(
4787 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4788 ):
4789 time_start = time()
4790 error_detail_list = []
4791 error_list = []
4792 pending_tasks = list(created_tasks_info.keys())
4793 num_tasks = len(pending_tasks)
4794 num_done = 0
4795 stage[1] = "{}/{}.".format(num_done, num_tasks)
4796 self._write_op_status(nslcmop_id, stage)
4797 while pending_tasks:
4798 new_error = None
4799 _timeout = timeout + time_start - time()
4800 done, pending_tasks = await asyncio.wait(
4801 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4802 )
4803 num_done += len(done)
4804 if not done: # Timeout
4805 for task in pending_tasks:
4806 new_error = created_tasks_info[task] + ": Timeout"
4807 error_detail_list.append(new_error)
4808 error_list.append(new_error)
4809 break
4810 for task in done:
4811 if task.cancelled():
4812 exc = "Cancelled"
4813 else:
4814 exc = task.exception()
4815 if exc:
4816 if isinstance(exc, asyncio.TimeoutError):
4817 exc = "Timeout"
4818 new_error = created_tasks_info[task] + ": {}".format(exc)
4819 error_list.append(created_tasks_info[task])
4820 error_detail_list.append(new_error)
4821 if isinstance(
4822 exc,
4823 (
4824 str,
4825 DbException,
4826 N2VCException,
4827 ROclient.ROClientException,
4828 LcmException,
4829 K8sException,
4830 NgRoException,
4831 ),
4832 ):
4833 self.logger.error(logging_text + new_error)
4834 else:
4835 exc_traceback = "".join(
4836 traceback.format_exception(None, exc, exc.__traceback__)
4837 )
4838 self.logger.error(
4839 logging_text
4840 + created_tasks_info[task]
4841 + " "
4842 + exc_traceback
4843 )
4844 else:
4845 self.logger.debug(
4846 logging_text + created_tasks_info[task] + ": Done"
4847 )
4848 stage[1] = "{}/{}.".format(num_done, num_tasks)
4849 if new_error:
4850 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4851 if nsr_id: # update also nsr
4852 self.update_db_2(
4853 "nsrs",
4854 nsr_id,
4855 {
4856 "errorDescription": "Error at: " + ", ".join(error_list),
4857 "errorDetail": ". ".join(error_detail_list),
4858 },
4859 )
4860 self._write_op_status(nslcmop_id, stage)
4861 return error_detail_list
4862
4863 @staticmethod
4864 def _map_primitive_params(primitive_desc, params, instantiation_params):
4865 """
4866 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4867 The default-value is used. If it is between < > it look for a value at instantiation_params
4868 :param primitive_desc: portion of VNFD/NSD that describes primitive
4869 :param params: Params provided by user
4870 :param instantiation_params: Instantiation params provided by user
4871 :return: a dictionary with the calculated params
4872 """
4873 calculated_params = {}
4874 for parameter in primitive_desc.get("parameter", ()):
4875 param_name = parameter["name"]
4876 if param_name in params:
4877 calculated_params[param_name] = params[param_name]
4878 elif "default-value" in parameter or "value" in parameter:
4879 if "value" in parameter:
4880 calculated_params[param_name] = parameter["value"]
4881 else:
4882 calculated_params[param_name] = parameter["default-value"]
4883 if (
4884 isinstance(calculated_params[param_name], str)
4885 and calculated_params[param_name].startswith("<")
4886 and calculated_params[param_name].endswith(">")
4887 ):
4888 if calculated_params[param_name][1:-1] in instantiation_params:
4889 calculated_params[param_name] = instantiation_params[
4890 calculated_params[param_name][1:-1]
4891 ]
4892 else:
4893 raise LcmException(
4894 "Parameter {} needed to execute primitive {} not provided".format(
4895 calculated_params[param_name], primitive_desc["name"]
4896 )
4897 )
4898 else:
4899 raise LcmException(
4900 "Parameter {} needed to execute primitive {} not provided".format(
4901 param_name, primitive_desc["name"]
4902 )
4903 )
4904
4905 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4906 calculated_params[param_name] = yaml.safe_dump(
4907 calculated_params[param_name], default_flow_style=True, width=256
4908 )
4909 elif isinstance(calculated_params[param_name], str) and calculated_params[
4910 param_name
4911 ].startswith("!!yaml "):
4912 calculated_params[param_name] = calculated_params[param_name][7:]
4913 if parameter.get("data-type") == "INTEGER":
4914 try:
4915 calculated_params[param_name] = int(calculated_params[param_name])
4916 except ValueError: # error converting string to int
4917 raise LcmException(
4918 "Parameter {} of primitive {} must be integer".format(
4919 param_name, primitive_desc["name"]
4920 )
4921 )
4922 elif parameter.get("data-type") == "BOOLEAN":
4923 calculated_params[param_name] = not (
4924 (str(calculated_params[param_name])).lower() == "false"
4925 )
4926
4927 # add always ns_config_info if primitive name is config
4928 if primitive_desc["name"] == "config":
4929 if "ns_config_info" in instantiation_params:
4930 calculated_params["ns_config_info"] = instantiation_params[
4931 "ns_config_info"
4932 ]
4933 return calculated_params
4934
4935 def _look_for_deployed_vca(
4936 self,
4937 deployed_vca,
4938 member_vnf_index,
4939 vdu_id,
4940 vdu_count_index,
4941 kdu_name=None,
4942 ee_descriptor_id=None,
4943 ):
4944 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4945 for vca in deployed_vca:
4946 if not vca:
4947 continue
4948 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4949 continue
4950 if (
4951 vdu_count_index is not None
4952 and vdu_count_index != vca["vdu_count_index"]
4953 ):
4954 continue
4955 if kdu_name and kdu_name != vca["kdu_name"]:
4956 continue
4957 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4958 continue
4959 break
4960 else:
4961 # vca_deployed not found
4962 raise LcmException(
4963 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4964 " is not deployed".format(
4965 member_vnf_index,
4966 vdu_id,
4967 vdu_count_index,
4968 kdu_name,
4969 ee_descriptor_id,
4970 )
4971 )
4972 # get ee_id
4973 ee_id = vca.get("ee_id")
4974 vca_type = vca.get(
4975 "type", "lxc_proxy_charm"
4976 ) # default value for backward compatibility - proxy charm
4977 if not ee_id:
4978 raise LcmException(
4979 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4980 "execution environment".format(
4981 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4982 )
4983 )
4984 return ee_id, vca_type
4985
4986 async def _ns_execute_primitive(
4987 self,
4988 ee_id,
4989 primitive,
4990 primitive_params,
4991 retries=0,
4992 retries_interval=30,
4993 timeout=None,
4994 vca_type=None,
4995 db_dict=None,
4996 vca_id: str = None,
4997 ) -> (str, str):
4998 try:
4999 if primitive == "config":
5000 primitive_params = {"params": primitive_params}
5001
5002 vca_type = vca_type or "lxc_proxy_charm"
5003
5004 while retries >= 0:
5005 try:
5006 output = await asyncio.wait_for(
5007 self.vca_map[vca_type].exec_primitive(
5008 ee_id=ee_id,
5009 primitive_name=primitive,
5010 params_dict=primitive_params,
5011 progress_timeout=self.timeout_progress_primitive,
5012 total_timeout=self.timeout_primitive,
5013 db_dict=db_dict,
5014 vca_id=vca_id,
5015 vca_type=vca_type,
5016 ),
5017 timeout=timeout or self.timeout_primitive,
5018 )
5019 # execution was OK
5020 break
5021 except asyncio.CancelledError:
5022 raise
5023 except Exception as e: # asyncio.TimeoutError
5024 if isinstance(e, asyncio.TimeoutError):
5025 e = "Timeout"
5026 retries -= 1
5027 if retries >= 0:
5028 self.logger.debug(
5029 "Error executing action {} on {} -> {}".format(
5030 primitive, ee_id, e
5031 )
5032 )
5033 # wait and retry
5034 await asyncio.sleep(retries_interval, loop=self.loop)
5035 else:
5036 return "FAILED", str(e)
5037
5038 return "COMPLETED", output
5039
5040 except (LcmException, asyncio.CancelledError):
5041 raise
5042 except Exception as e:
5043 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5044
5045 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5046 """
5047 Updating the vca_status with latest juju information in nsrs record
5048 :param: nsr_id: Id of the nsr
5049 :param: nslcmop_id: Id of the nslcmop
5050 :return: None
5051 """
5052
5053 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5054 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5055 vca_id = self.get_vca_id({}, db_nsr)
5056 if db_nsr["_admin"]["deployed"]["K8s"]:
5057 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5058 cluster_uuid, kdu_instance, cluster_type = (
5059 k8s["k8scluster-uuid"],
5060 k8s["kdu-instance"],
5061 k8s["k8scluster-type"],
5062 )
5063 await self._on_update_k8s_db(
5064 cluster_uuid=cluster_uuid,
5065 kdu_instance=kdu_instance,
5066 filter={"_id": nsr_id},
5067 vca_id=vca_id,
5068 cluster_type=cluster_type,
5069 )
5070 else:
5071 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5072 table, filter = "nsrs", {"_id": nsr_id}
5073 path = "_admin.deployed.VCA.{}.".format(vca_index)
5074 await self._on_update_n2vc_db(table, filter, path, {})
5075
5076 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5077 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5078
5079 async def action(self, nsr_id, nslcmop_id):
5080 # Try to lock HA task here
5081 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5082 if not task_is_locked_by_me:
5083 return
5084
5085 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5086 self.logger.debug(logging_text + "Enter")
5087 # get all needed from database
5088 db_nsr = None
5089 db_nslcmop = None
5090 db_nsr_update = {}
5091 db_nslcmop_update = {}
5092 nslcmop_operation_state = None
5093 error_description_nslcmop = None
5094 exc = None
5095 try:
5096 # wait for any previous tasks in process
5097 step = "Waiting for previous operations to terminate"
5098 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5099
5100 self._write_ns_status(
5101 nsr_id=nsr_id,
5102 ns_state=None,
5103 current_operation="RUNNING ACTION",
5104 current_operation_id=nslcmop_id,
5105 )
5106
5107 step = "Getting information from database"
5108 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5109 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5110 if db_nslcmop["operationParams"].get("primitive_params"):
5111 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5112 db_nslcmop["operationParams"]["primitive_params"]
5113 )
5114
5115 nsr_deployed = db_nsr["_admin"].get("deployed")
5116 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5117 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5118 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5119 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5120 primitive = db_nslcmop["operationParams"]["primitive"]
5121 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5122 timeout_ns_action = db_nslcmop["operationParams"].get(
5123 "timeout_ns_action", self.timeout_primitive
5124 )
5125
5126 if vnf_index:
5127 step = "Getting vnfr from database"
5128 db_vnfr = self.db.get_one(
5129 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5130 )
5131 if db_vnfr.get("kdur"):
5132 kdur_list = []
5133 for kdur in db_vnfr["kdur"]:
5134 if kdur.get("additionalParams"):
5135 kdur["additionalParams"] = json.loads(
5136 kdur["additionalParams"]
5137 )
5138 kdur_list.append(kdur)
5139 db_vnfr["kdur"] = kdur_list
5140 step = "Getting vnfd from database"
5141 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5142
5143 # Sync filesystem before running a primitive
5144 self.fs.sync(db_vnfr["vnfd-id"])
5145 else:
5146 step = "Getting nsd from database"
5147 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5148
5149 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5150 # for backward compatibility
5151 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5152 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5153 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5154 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5155
5156 # look for primitive
5157 config_primitive_desc = descriptor_configuration = None
5158 if vdu_id:
5159 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5160 elif kdu_name:
5161 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5162 elif vnf_index:
5163 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5164 else:
5165 descriptor_configuration = db_nsd.get("ns-configuration")
5166
5167 if descriptor_configuration and descriptor_configuration.get(
5168 "config-primitive"
5169 ):
5170 for config_primitive in descriptor_configuration["config-primitive"]:
5171 if config_primitive["name"] == primitive:
5172 config_primitive_desc = config_primitive
5173 break
5174
5175 if not config_primitive_desc:
5176 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5177 raise LcmException(
5178 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5179 primitive
5180 )
5181 )
5182 primitive_name = primitive
5183 ee_descriptor_id = None
5184 else:
5185 primitive_name = config_primitive_desc.get(
5186 "execution-environment-primitive", primitive
5187 )
5188 ee_descriptor_id = config_primitive_desc.get(
5189 "execution-environment-ref"
5190 )
5191
5192 if vnf_index:
5193 if vdu_id:
5194 vdur = next(
5195 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5196 )
5197 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5198 elif kdu_name:
5199 kdur = next(
5200 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5201 )
5202 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5203 else:
5204 desc_params = parse_yaml_strings(
5205 db_vnfr.get("additionalParamsForVnf")
5206 )
5207 else:
5208 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5209 if kdu_name and get_configuration(db_vnfd, kdu_name):
5210 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5211 actions = set()
5212 for primitive in kdu_configuration.get("initial-config-primitive", []):
5213 actions.add(primitive["name"])
5214 for primitive in kdu_configuration.get("config-primitive", []):
5215 actions.add(primitive["name"])
5216 kdu = find_in_list(
5217 nsr_deployed["K8s"],
5218 lambda kdu: kdu_name == kdu["kdu-name"]
5219 and kdu["member-vnf-index"] == vnf_index,
5220 )
5221 kdu_action = (
5222 True
5223 if primitive_name in actions
5224 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5225 else False
5226 )
5227
5228 # TODO check if ns is in a proper status
5229 if kdu_name and (
5230 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5231 ):
5232 # kdur and desc_params already set from before
5233 if primitive_params:
5234 desc_params.update(primitive_params)
5235 # TODO Check if we will need something at vnf level
5236 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5237 if (
5238 kdu_name == kdu["kdu-name"]
5239 and kdu["member-vnf-index"] == vnf_index
5240 ):
5241 break
5242 else:
5243 raise LcmException(
5244 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5245 )
5246
5247 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5248 msg = "unknown k8scluster-type '{}'".format(
5249 kdu.get("k8scluster-type")
5250 )
5251 raise LcmException(msg)
5252
5253 db_dict = {
5254 "collection": "nsrs",
5255 "filter": {"_id": nsr_id},
5256 "path": "_admin.deployed.K8s.{}".format(index),
5257 }
5258 self.logger.debug(
5259 logging_text
5260 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5261 )
5262 step = "Executing kdu {}".format(primitive_name)
5263 if primitive_name == "upgrade":
5264 if desc_params.get("kdu_model"):
5265 kdu_model = desc_params.get("kdu_model")
5266 del desc_params["kdu_model"]
5267 else:
5268 kdu_model = kdu.get("kdu-model")
5269 if kdu_model.count("/") < 2:
5270 parts = kdu_model.split(sep=":")
5271 if len(parts) == 2:
5272 kdu_model = parts[0]
5273 if desc_params.get("kdu_atomic_upgrade"):
5274 atomic_upgrade = desc_params.get(
5275 "kdu_atomic_upgrade"
5276 ).lower() in ("yes", "true", "1")
5277 del desc_params["kdu_atomic_upgrade"]
5278 else:
5279 atomic_upgrade = True
5280
5281 detailed_status = await asyncio.wait_for(
5282 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5283 cluster_uuid=kdu.get("k8scluster-uuid"),
5284 kdu_instance=kdu.get("kdu-instance"),
5285 atomic=atomic_upgrade,
5286 kdu_model=kdu_model,
5287 params=desc_params,
5288 db_dict=db_dict,
5289 timeout=timeout_ns_action,
5290 ),
5291 timeout=timeout_ns_action + 10,
5292 )
5293 self.logger.debug(
5294 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5295 )
5296 elif primitive_name == "rollback":
5297 detailed_status = await asyncio.wait_for(
5298 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5299 cluster_uuid=kdu.get("k8scluster-uuid"),
5300 kdu_instance=kdu.get("kdu-instance"),
5301 db_dict=db_dict,
5302 ),
5303 timeout=timeout_ns_action,
5304 )
5305 elif primitive_name == "status":
5306 detailed_status = await asyncio.wait_for(
5307 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5308 cluster_uuid=kdu.get("k8scluster-uuid"),
5309 kdu_instance=kdu.get("kdu-instance"),
5310 vca_id=vca_id,
5311 ),
5312 timeout=timeout_ns_action,
5313 )
5314 else:
5315 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5316 kdu["kdu-name"], nsr_id
5317 )
5318 params = self._map_primitive_params(
5319 config_primitive_desc, primitive_params, desc_params
5320 )
5321
5322 detailed_status = await asyncio.wait_for(
5323 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5324 cluster_uuid=kdu.get("k8scluster-uuid"),
5325 kdu_instance=kdu_instance,
5326 primitive_name=primitive_name,
5327 params=params,
5328 db_dict=db_dict,
5329 timeout=timeout_ns_action,
5330 vca_id=vca_id,
5331 ),
5332 timeout=timeout_ns_action,
5333 )
5334
5335 if detailed_status:
5336 nslcmop_operation_state = "COMPLETED"
5337 else:
5338 detailed_status = ""
5339 nslcmop_operation_state = "FAILED"
5340 else:
5341 ee_id, vca_type = self._look_for_deployed_vca(
5342 nsr_deployed["VCA"],
5343 member_vnf_index=vnf_index,
5344 vdu_id=vdu_id,
5345 vdu_count_index=vdu_count_index,
5346 ee_descriptor_id=ee_descriptor_id,
5347 )
5348 for vca_index, vca_deployed in enumerate(
5349 db_nsr["_admin"]["deployed"]["VCA"]
5350 ):
5351 if vca_deployed.get("member-vnf-index") == vnf_index:
5352 db_dict = {
5353 "collection": "nsrs",
5354 "filter": {"_id": nsr_id},
5355 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5356 }
5357 break
5358 (
5359 nslcmop_operation_state,
5360 detailed_status,
5361 ) = await self._ns_execute_primitive(
5362 ee_id,
5363 primitive=primitive_name,
5364 primitive_params=self._map_primitive_params(
5365 config_primitive_desc, primitive_params, desc_params
5366 ),
5367 timeout=timeout_ns_action,
5368 vca_type=vca_type,
5369 db_dict=db_dict,
5370 vca_id=vca_id,
5371 )
5372
5373 db_nslcmop_update["detailed-status"] = detailed_status
5374 error_description_nslcmop = (
5375 detailed_status if nslcmop_operation_state == "FAILED" else ""
5376 )
5377 self.logger.debug(
5378 logging_text
5379 + " task Done with result {} {}".format(
5380 nslcmop_operation_state, detailed_status
5381 )
5382 )
5383 return # database update is called inside finally
5384
5385 except (DbException, LcmException, N2VCException, K8sException) as e:
5386 self.logger.error(logging_text + "Exit Exception {}".format(e))
5387 exc = e
5388 except asyncio.CancelledError:
5389 self.logger.error(
5390 logging_text + "Cancelled Exception while '{}'".format(step)
5391 )
5392 exc = "Operation was cancelled"
5393 except asyncio.TimeoutError:
5394 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5395 exc = "Timeout"
5396 except Exception as e:
5397 exc = traceback.format_exc()
5398 self.logger.critical(
5399 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5400 exc_info=True,
5401 )
5402 finally:
5403 if exc:
5404 db_nslcmop_update[
5405 "detailed-status"
5406 ] = (
5407 detailed_status
5408 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5409 nslcmop_operation_state = "FAILED"
5410 if db_nsr:
5411 self._write_ns_status(
5412 nsr_id=nsr_id,
5413 ns_state=db_nsr[
5414 "nsState"
5415 ], # TODO check if degraded. For the moment use previous status
5416 current_operation="IDLE",
5417 current_operation_id=None,
5418 # error_description=error_description_nsr,
5419 # error_detail=error_detail,
5420 other_update=db_nsr_update,
5421 )
5422
5423 self._write_op_status(
5424 op_id=nslcmop_id,
5425 stage="",
5426 error_message=error_description_nslcmop,
5427 operation_state=nslcmop_operation_state,
5428 other_update=db_nslcmop_update,
5429 )
5430
5431 if nslcmop_operation_state:
5432 try:
5433 await self.msg.aiowrite(
5434 "ns",
5435 "actioned",
5436 {
5437 "nsr_id": nsr_id,
5438 "nslcmop_id": nslcmop_id,
5439 "operationState": nslcmop_operation_state,
5440 },
5441 loop=self.loop,
5442 )
5443 except Exception as e:
5444 self.logger.error(
5445 logging_text + "kafka_write notification Exception {}".format(e)
5446 )
5447 self.logger.debug(logging_text + "Exit")
5448 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5449 return nslcmop_operation_state, detailed_status
5450
5451 async def terminate_vdus(
5452 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5453 ):
5454 """This method terminates VDUs
5455
5456 Args:
5457 db_vnfr: VNF instance record
5458 member_vnf_index: VNF index to identify the VDUs to be removed
5459 db_nsr: NS instance record
5460 update_db_nslcmops: Nslcmop update record
5461 """
5462 vca_scaling_info = []
5463 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5464 scaling_info["scaling_direction"] = "IN"
5465 scaling_info["vdu-delete"] = {}
5466 scaling_info["kdu-delete"] = {}
5467 db_vdur = db_vnfr.get("vdur")
5468 vdur_list = copy(db_vdur)
5469 count_index = 0
5470 for index, vdu in enumerate(vdur_list):
5471 vca_scaling_info.append(
5472 {
5473 "osm_vdu_id": vdu["vdu-id-ref"],
5474 "member-vnf-index": member_vnf_index,
5475 "type": "delete",
5476 "vdu_index": count_index,
5477 }
5478 )
5479 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5480 scaling_info["vdu"].append(
5481 {
5482 "name": vdu.get("name") or vdu.get("vdu-name"),
5483 "vdu_id": vdu["vdu-id-ref"],
5484 "interface": [],
5485 }
5486 )
5487 for interface in vdu["interfaces"]:
5488 scaling_info["vdu"][index]["interface"].append(
5489 {
5490 "name": interface["name"],
5491 "ip_address": interface["ip-address"],
5492 "mac_address": interface.get("mac-address"),
5493 }
5494 )
5495 self.logger.info("NS update scaling info{}".format(scaling_info))
5496 stage[2] = "Terminating VDUs"
5497 if scaling_info.get("vdu-delete"):
5498 # scale_process = "RO"
5499 if self.ro_config.get("ng"):
5500 await self._scale_ng_ro(
5501 logging_text,
5502 db_nsr,
5503 update_db_nslcmops,
5504 db_vnfr,
5505 scaling_info,
5506 stage,
5507 )
5508
5509 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5510 """This method is to Remove VNF instances from NS.
5511
5512 Args:
5513 nsr_id: NS instance id
5514 nslcmop_id: nslcmop id of update
5515 vnf_instance_id: id of the VNF instance to be removed
5516
5517 Returns:
5518 result: (str, str) COMPLETED/FAILED, details
5519 """
5520 try:
5521 db_nsr_update = {}
5522 logging_text = "Task ns={} update ".format(nsr_id)
5523 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5524 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5525 if check_vnfr_count > 1:
5526 stage = ["", "", ""]
5527 step = "Getting nslcmop from database"
5528 self.logger.debug(
5529 step + " after having waited for previous tasks to be completed"
5530 )
5531 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5532 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5533 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5534 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5535 """ db_vnfr = self.db.get_one(
5536 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5537
5538 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5539 await self.terminate_vdus(
5540 db_vnfr,
5541 member_vnf_index,
5542 db_nsr,
5543 update_db_nslcmops,
5544 stage,
5545 logging_text,
5546 )
5547
5548 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5549 constituent_vnfr.remove(db_vnfr.get("_id"))
5550 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5551 "constituent-vnfr-ref"
5552 )
5553 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5554 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5555 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5556 return "COMPLETED", "Done"
5557 else:
5558 step = "Terminate VNF Failed with"
5559 raise LcmException(
5560 "{} Cannot terminate the last VNF in this NS.".format(
5561 vnf_instance_id
5562 )
5563 )
5564 except (LcmException, asyncio.CancelledError):
5565 raise
5566 except Exception as e:
5567 self.logger.debug("Error removing VNF {}".format(e))
5568 return "FAILED", "Error removing VNF {}".format(e)
5569
5570 async def _ns_redeploy_vnf(
5571 self,
5572 nsr_id,
5573 nslcmop_id,
5574 db_vnfd,
5575 db_vnfr,
5576 db_nsr,
5577 ):
5578 """This method updates and redeploys VNF instances
5579
5580 Args:
5581 nsr_id: NS instance id
5582 nslcmop_id: nslcmop id
5583 db_vnfd: VNF descriptor
5584 db_vnfr: VNF instance record
5585 db_nsr: NS instance record
5586
5587 Returns:
5588 result: (str, str) COMPLETED/FAILED, details
5589 """
5590 try:
5591 count_index = 0
5592 stage = ["", "", ""]
5593 logging_text = "Task ns={} update ".format(nsr_id)
5594 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5595 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5596
5597 # Terminate old VNF resources
5598 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5599 await self.terminate_vdus(
5600 db_vnfr,
5601 member_vnf_index,
5602 db_nsr,
5603 update_db_nslcmops,
5604 stage,
5605 logging_text,
5606 )
5607
5608 # old_vnfd_id = db_vnfr["vnfd-id"]
5609 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5610 new_db_vnfd = db_vnfd
5611 # new_vnfd_ref = new_db_vnfd["id"]
5612 # new_vnfd_id = vnfd_id
5613
5614 # Create VDUR
5615 new_vnfr_cp = []
5616 for cp in new_db_vnfd.get("ext-cpd", ()):
5617 vnf_cp = {
5618 "name": cp.get("id"),
5619 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5620 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5621 "id": cp.get("id"),
5622 }
5623 new_vnfr_cp.append(vnf_cp)
5624 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5625 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5626 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5627 new_vnfr_update = {
5628 "revision": latest_vnfd_revision,
5629 "connection-point": new_vnfr_cp,
5630 "vdur": new_vdur,
5631 "ip-address": "",
5632 }
5633 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5634 updated_db_vnfr = self.db.get_one(
5635 "vnfrs",
5636 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5637 )
5638
5639 # Instantiate new VNF resources
5640 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5641 vca_scaling_info = []
5642 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5643 scaling_info["scaling_direction"] = "OUT"
5644 scaling_info["vdu-create"] = {}
5645 scaling_info["kdu-create"] = {}
5646 vdud_instantiate_list = db_vnfd["vdu"]
5647 for index, vdud in enumerate(vdud_instantiate_list):
5648 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5649 if cloud_init_text:
5650 additional_params = (
5651 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5652 or {}
5653 )
5654 cloud_init_list = []
5655 if cloud_init_text:
5656 # TODO Information of its own ip is not available because db_vnfr is not updated.
5657 additional_params["OSM"] = get_osm_params(
5658 updated_db_vnfr, vdud["id"], 1
5659 )
5660 cloud_init_list.append(
5661 self._parse_cloud_init(
5662 cloud_init_text,
5663 additional_params,
5664 db_vnfd["id"],
5665 vdud["id"],
5666 )
5667 )
5668 vca_scaling_info.append(
5669 {
5670 "osm_vdu_id": vdud["id"],
5671 "member-vnf-index": member_vnf_index,
5672 "type": "create",
5673 "vdu_index": count_index,
5674 }
5675 )
5676 scaling_info["vdu-create"][vdud["id"]] = count_index
5677 if self.ro_config.get("ng"):
5678 self.logger.debug(
5679 "New Resources to be deployed: {}".format(scaling_info)
5680 )
5681 await self._scale_ng_ro(
5682 logging_text,
5683 db_nsr,
5684 update_db_nslcmops,
5685 updated_db_vnfr,
5686 scaling_info,
5687 stage,
5688 )
5689 return "COMPLETED", "Done"
5690 except (LcmException, asyncio.CancelledError):
5691 raise
5692 except Exception as e:
5693 self.logger.debug("Error updating VNF {}".format(e))
5694 return "FAILED", "Error updating VNF {}".format(e)
5695
5696 async def _ns_charm_upgrade(
5697 self,
5698 ee_id,
5699 charm_id,
5700 charm_type,
5701 path,
5702 timeout: float = None,
5703 ) -> (str, str):
5704 """This method upgrade charms in VNF instances
5705
5706 Args:
5707 ee_id: Execution environment id
5708 path: Local path to the charm
5709 charm_id: charm-id
5710 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5711 timeout: (Float) Timeout for the ns update operation
5712
5713 Returns:
5714 result: (str, str) COMPLETED/FAILED, details
5715 """
5716 try:
5717 charm_type = charm_type or "lxc_proxy_charm"
5718 output = await self.vca_map[charm_type].upgrade_charm(
5719 ee_id=ee_id,
5720 path=path,
5721 charm_id=charm_id,
5722 charm_type=charm_type,
5723 timeout=timeout or self.timeout_ns_update,
5724 )
5725
5726 if output:
5727 return "COMPLETED", output
5728
5729 except (LcmException, asyncio.CancelledError):
5730 raise
5731
5732 except Exception as e:
5733 self.logger.debug("Error upgrading charm {}".format(path))
5734
5735 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5736
5737 async def update(self, nsr_id, nslcmop_id):
5738 """Update NS according to different update types
5739
5740 This method performs upgrade of VNF instances then updates the revision
5741 number in VNF record
5742
5743 Args:
5744 nsr_id: Network service will be updated
5745 nslcmop_id: ns lcm operation id
5746
5747 Returns:
5748 It may raise DbException, LcmException, N2VCException, K8sException
5749
5750 """
5751 # Try to lock HA task here
5752 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5753 if not task_is_locked_by_me:
5754 return
5755
5756 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5757 self.logger.debug(logging_text + "Enter")
5758
5759 # Set the required variables to be filled up later
5760 db_nsr = None
5761 db_nslcmop_update = {}
5762 vnfr_update = {}
5763 nslcmop_operation_state = None
5764 db_nsr_update = {}
5765 error_description_nslcmop = ""
5766 exc = None
5767 change_type = "updated"
5768 detailed_status = ""
5769
5770 try:
5771 # wait for any previous tasks in process
5772 step = "Waiting for previous operations to terminate"
5773 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5774 self._write_ns_status(
5775 nsr_id=nsr_id,
5776 ns_state=None,
5777 current_operation="UPDATING",
5778 current_operation_id=nslcmop_id,
5779 )
5780
5781 step = "Getting nslcmop from database"
5782 db_nslcmop = self.db.get_one(
5783 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5784 )
5785 update_type = db_nslcmop["operationParams"]["updateType"]
5786
5787 step = "Getting nsr from database"
5788 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5789 old_operational_status = db_nsr["operational-status"]
5790 db_nsr_update["operational-status"] = "updating"
5791 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5792 nsr_deployed = db_nsr["_admin"].get("deployed")
5793
5794 if update_type == "CHANGE_VNFPKG":
5795 # Get the input parameters given through update request
5796 vnf_instance_id = db_nslcmop["operationParams"][
5797 "changeVnfPackageData"
5798 ].get("vnfInstanceId")
5799
5800 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5801 "vnfdId"
5802 )
5803 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5804
5805 step = "Getting vnfr from database"
5806 db_vnfr = self.db.get_one(
5807 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5808 )
5809
5810 step = "Getting vnfds from database"
5811 # Latest VNFD
5812 latest_vnfd = self.db.get_one(
5813 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5814 )
5815 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5816
5817 # Current VNFD
5818 current_vnf_revision = db_vnfr.get("revision", 1)
5819 current_vnfd = self.db.get_one(
5820 "vnfds_revisions",
5821 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5822 fail_on_empty=False,
5823 )
5824 # Charm artifact paths will be filled up later
5825 (
5826 current_charm_artifact_path,
5827 target_charm_artifact_path,
5828 charm_artifact_paths,
5829 ) = ([], [], [])
5830
5831 step = "Checking if revision has changed in VNFD"
5832 if current_vnf_revision != latest_vnfd_revision:
5833 change_type = "policy_updated"
5834
5835 # There is new revision of VNFD, update operation is required
5836 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5837 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5838
5839 step = "Removing the VNFD packages if they exist in the local path"
5840 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5841 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5842
5843 step = "Get the VNFD packages from FSMongo"
5844 self.fs.sync(from_path=latest_vnfd_path)
5845 self.fs.sync(from_path=current_vnfd_path)
5846
5847 step = (
5848 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5849 )
5850 base_folder = latest_vnfd["_admin"]["storage"]
5851
5852 for charm_index, charm_deployed in enumerate(
5853 get_iterable(nsr_deployed, "VCA")
5854 ):
5855 vnf_index = db_vnfr.get("member-vnf-index-ref")
5856
5857 # Getting charm-id and charm-type
5858 if charm_deployed.get("member-vnf-index") == vnf_index:
5859 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5860 charm_type = charm_deployed.get("type")
5861
5862 # Getting ee-id
5863 ee_id = charm_deployed.get("ee_id")
5864
5865 step = "Getting descriptor config"
5866 descriptor_config = get_configuration(
5867 current_vnfd, current_vnfd["id"]
5868 )
5869
5870 if "execution-environment-list" in descriptor_config:
5871 ee_list = descriptor_config.get(
5872 "execution-environment-list", []
5873 )
5874 else:
5875 ee_list = []
5876
5877 # There could be several charm used in the same VNF
5878 for ee_item in ee_list:
5879 if ee_item.get("juju"):
5880 step = "Getting charm name"
5881 charm_name = ee_item["juju"].get("charm")
5882
5883 step = "Setting Charm artifact paths"
5884 current_charm_artifact_path.append(
5885 get_charm_artifact_path(
5886 base_folder,
5887 charm_name,
5888 charm_type,
5889 current_vnf_revision,
5890 )
5891 )
5892 target_charm_artifact_path.append(
5893 get_charm_artifact_path(
5894 base_folder,
5895 charm_name,
5896 charm_type,
5897 latest_vnfd_revision,
5898 )
5899 )
5900
5901 charm_artifact_paths = zip(
5902 current_charm_artifact_path, target_charm_artifact_path
5903 )
5904
5905 step = "Checking if software version has changed in VNFD"
5906 if find_software_version(current_vnfd) != find_software_version(
5907 latest_vnfd
5908 ):
5909 step = "Checking if existing VNF has charm"
5910 for current_charm_path, target_charm_path in list(
5911 charm_artifact_paths
5912 ):
5913 if current_charm_path:
5914 raise LcmException(
5915 "Software version change is not supported as VNF instance {} has charm.".format(
5916 vnf_instance_id
5917 )
5918 )
5919
5920 # There is no change in the charm package, then redeploy the VNF
5921 # based on new descriptor
5922 step = "Redeploying VNF"
5923 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5924 (result, detailed_status) = await self._ns_redeploy_vnf(
5925 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5926 )
5927 if result == "FAILED":
5928 nslcmop_operation_state = result
5929 error_description_nslcmop = detailed_status
5930 db_nslcmop_update["detailed-status"] = detailed_status
5931 self.logger.debug(
5932 logging_text
5933 + " step {} Done with result {} {}".format(
5934 step, nslcmop_operation_state, detailed_status
5935 )
5936 )
5937
5938 else:
5939 step = "Checking if any charm package has changed or not"
5940 for current_charm_path, target_charm_path in list(
5941 charm_artifact_paths
5942 ):
5943 if (
5944 current_charm_path
5945 and target_charm_path
5946 and self.check_charm_hash_changed(
5947 current_charm_path, target_charm_path
5948 )
5949 ):
5950 step = "Checking whether VNF uses juju bundle"
5951 if check_juju_bundle_existence(current_vnfd):
5952 raise LcmException(
5953 "Charm upgrade is not supported for the instance which"
5954 " uses juju-bundle: {}".format(
5955 check_juju_bundle_existence(current_vnfd)
5956 )
5957 )
5958
5959 step = "Upgrading Charm"
5960 (
5961 result,
5962 detailed_status,
5963 ) = await self._ns_charm_upgrade(
5964 ee_id=ee_id,
5965 charm_id=charm_id,
5966 charm_type=charm_type,
5967 path=self.fs.path + target_charm_path,
5968 timeout=timeout_seconds,
5969 )
5970
5971 if result == "FAILED":
5972 nslcmop_operation_state = result
5973 error_description_nslcmop = detailed_status
5974
5975 db_nslcmop_update["detailed-status"] = detailed_status
5976 self.logger.debug(
5977 logging_text
5978 + " step {} Done with result {} {}".format(
5979 step, nslcmop_operation_state, detailed_status
5980 )
5981 )
5982
5983 step = "Updating policies"
5984 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5985 result = "COMPLETED"
5986 detailed_status = "Done"
5987 db_nslcmop_update["detailed-status"] = "Done"
5988
5989 # If nslcmop_operation_state is None, so any operation is not failed.
5990 if not nslcmop_operation_state:
5991 nslcmop_operation_state = "COMPLETED"
5992
5993 # If update CHANGE_VNFPKG nslcmop_operation is successful
5994 # vnf revision need to be updated
5995 vnfr_update["revision"] = latest_vnfd_revision
5996 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5997
5998 self.logger.debug(
5999 logging_text
6000 + " task Done with result {} {}".format(
6001 nslcmop_operation_state, detailed_status
6002 )
6003 )
6004 elif update_type == "REMOVE_VNF":
6005 # This part is included in https://osm.etsi.org/gerrit/11876
6006 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6007 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6008 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6009 step = "Removing VNF"
6010 (result, detailed_status) = await self.remove_vnf(
6011 nsr_id, nslcmop_id, vnf_instance_id
6012 )
6013 if result == "FAILED":
6014 nslcmop_operation_state = result
6015 error_description_nslcmop = detailed_status
6016 db_nslcmop_update["detailed-status"] = detailed_status
6017 change_type = "vnf_terminated"
6018 if not nslcmop_operation_state:
6019 nslcmop_operation_state = "COMPLETED"
6020 self.logger.debug(
6021 logging_text
6022 + " task Done with result {} {}".format(
6023 nslcmop_operation_state, detailed_status
6024 )
6025 )
6026
6027 elif update_type == "OPERATE_VNF":
6028 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6029 "vnfInstanceId"
6030 ]
6031 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6032 "changeStateTo"
6033 ]
6034 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6035 "additionalParam"
6036 ]
6037 (result, detailed_status) = await self.rebuild_start_stop(
6038 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6039 )
6040 if result == "FAILED":
6041 nslcmop_operation_state = result
6042 error_description_nslcmop = detailed_status
6043 db_nslcmop_update["detailed-status"] = detailed_status
6044 if not nslcmop_operation_state:
6045 nslcmop_operation_state = "COMPLETED"
6046 self.logger.debug(
6047 logging_text
6048 + " task Done with result {} {}".format(
6049 nslcmop_operation_state, detailed_status
6050 )
6051 )
6052
6053 # If nslcmop_operation_state is None, so any operation is not failed.
6054 # All operations are executed in overall.
6055 if not nslcmop_operation_state:
6056 nslcmop_operation_state = "COMPLETED"
6057 db_nsr_update["operational-status"] = old_operational_status
6058
6059 except (DbException, LcmException, N2VCException, K8sException) as e:
6060 self.logger.error(logging_text + "Exit Exception {}".format(e))
6061 exc = e
6062 except asyncio.CancelledError:
6063 self.logger.error(
6064 logging_text + "Cancelled Exception while '{}'".format(step)
6065 )
6066 exc = "Operation was cancelled"
6067 except asyncio.TimeoutError:
6068 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6069 exc = "Timeout"
6070 except Exception as e:
6071 exc = traceback.format_exc()
6072 self.logger.critical(
6073 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6074 exc_info=True,
6075 )
6076 finally:
6077 if exc:
6078 db_nslcmop_update[
6079 "detailed-status"
6080 ] = (
6081 detailed_status
6082 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6083 nslcmop_operation_state = "FAILED"
6084 db_nsr_update["operational-status"] = old_operational_status
6085 if db_nsr:
6086 self._write_ns_status(
6087 nsr_id=nsr_id,
6088 ns_state=db_nsr["nsState"],
6089 current_operation="IDLE",
6090 current_operation_id=None,
6091 other_update=db_nsr_update,
6092 )
6093
6094 self._write_op_status(
6095 op_id=nslcmop_id,
6096 stage="",
6097 error_message=error_description_nslcmop,
6098 operation_state=nslcmop_operation_state,
6099 other_update=db_nslcmop_update,
6100 )
6101
6102 if nslcmop_operation_state:
6103 try:
6104 msg = {
6105 "nsr_id": nsr_id,
6106 "nslcmop_id": nslcmop_id,
6107 "operationState": nslcmop_operation_state,
6108 }
6109 if change_type in ("vnf_terminated", "policy_updated"):
6110 msg.update({"vnf_member_index": member_vnf_index})
6111 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6112 except Exception as e:
6113 self.logger.error(
6114 logging_text + "kafka_write notification Exception {}".format(e)
6115 )
6116 self.logger.debug(logging_text + "Exit")
6117 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6118 return nslcmop_operation_state, detailed_status
6119
6120 async def scale(self, nsr_id, nslcmop_id):
6121 # Try to lock HA task here
6122 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6123 if not task_is_locked_by_me:
6124 return
6125
6126 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6127 stage = ["", "", ""]
6128 tasks_dict_info = {}
6129 # ^ stage, step, VIM progress
6130 self.logger.debug(logging_text + "Enter")
6131 # get all needed from database
6132 db_nsr = None
6133 db_nslcmop_update = {}
6134 db_nsr_update = {}
6135 exc = None
6136 # in case of error, indicates what part of scale was failed to put nsr at error status
6137 scale_process = None
6138 old_operational_status = ""
6139 old_config_status = ""
6140 nsi_id = None
6141 try:
6142 # wait for any previous tasks in process
6143 step = "Waiting for previous operations to terminate"
6144 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6145 self._write_ns_status(
6146 nsr_id=nsr_id,
6147 ns_state=None,
6148 current_operation="SCALING",
6149 current_operation_id=nslcmop_id,
6150 )
6151
6152 step = "Getting nslcmop from database"
6153 self.logger.debug(
6154 step + " after having waited for previous tasks to be completed"
6155 )
6156 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6157
6158 step = "Getting nsr from database"
6159 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6160 old_operational_status = db_nsr["operational-status"]
6161 old_config_status = db_nsr["config-status"]
6162
6163 step = "Parsing scaling parameters"
6164 db_nsr_update["operational-status"] = "scaling"
6165 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6166 nsr_deployed = db_nsr["_admin"].get("deployed")
6167
6168 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6169 "scaleByStepData"
6170 ]["member-vnf-index"]
6171 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6172 "scaleByStepData"
6173 ]["scaling-group-descriptor"]
6174 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6175 # for backward compatibility
6176 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6177 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6178 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6179 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6180
6181 step = "Getting vnfr from database"
6182 db_vnfr = self.db.get_one(
6183 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6184 )
6185
6186 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6187
6188 step = "Getting vnfd from database"
6189 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6190
6191 base_folder = db_vnfd["_admin"]["storage"]
6192
6193 step = "Getting scaling-group-descriptor"
6194 scaling_descriptor = find_in_list(
6195 get_scaling_aspect(db_vnfd),
6196 lambda scale_desc: scale_desc["name"] == scaling_group,
6197 )
6198 if not scaling_descriptor:
6199 raise LcmException(
6200 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6201 "at vnfd:scaling-group-descriptor".format(scaling_group)
6202 )
6203
6204 step = "Sending scale order to VIM"
6205 # TODO check if ns is in a proper status
6206 nb_scale_op = 0
6207 if not db_nsr["_admin"].get("scaling-group"):
6208 self.update_db_2(
6209 "nsrs",
6210 nsr_id,
6211 {
6212 "_admin.scaling-group": [
6213 {"name": scaling_group, "nb-scale-op": 0}
6214 ]
6215 },
6216 )
6217 admin_scale_index = 0
6218 else:
6219 for admin_scale_index, admin_scale_info in enumerate(
6220 db_nsr["_admin"]["scaling-group"]
6221 ):
6222 if admin_scale_info["name"] == scaling_group:
6223 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6224 break
6225 else: # not found, set index one plus last element and add new entry with the name
6226 admin_scale_index += 1
6227 db_nsr_update[
6228 "_admin.scaling-group.{}.name".format(admin_scale_index)
6229 ] = scaling_group
6230
6231 vca_scaling_info = []
6232 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6233 if scaling_type == "SCALE_OUT":
6234 if "aspect-delta-details" not in scaling_descriptor:
6235 raise LcmException(
6236 "Aspect delta details not fount in scaling descriptor {}".format(
6237 scaling_descriptor["name"]
6238 )
6239 )
6240 # count if max-instance-count is reached
6241 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6242
6243 scaling_info["scaling_direction"] = "OUT"
6244 scaling_info["vdu-create"] = {}
6245 scaling_info["kdu-create"] = {}
6246 for delta in deltas:
6247 for vdu_delta in delta.get("vdu-delta", {}):
6248 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6249 # vdu_index also provides the number of instance of the targeted vdu
6250 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6251 cloud_init_text = self._get_vdu_cloud_init_content(
6252 vdud, db_vnfd
6253 )
6254 if cloud_init_text:
6255 additional_params = (
6256 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6257 or {}
6258 )
6259 cloud_init_list = []
6260
6261 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6262 max_instance_count = 10
6263 if vdu_profile and "max-number-of-instances" in vdu_profile:
6264 max_instance_count = vdu_profile.get(
6265 "max-number-of-instances", 10
6266 )
6267
6268 default_instance_num = get_number_of_instances(
6269 db_vnfd, vdud["id"]
6270 )
6271 instances_number = vdu_delta.get("number-of-instances", 1)
6272 nb_scale_op += instances_number
6273
6274 new_instance_count = nb_scale_op + default_instance_num
6275 # Control if new count is over max and vdu count is less than max.
6276 # Then assign new instance count
6277 if new_instance_count > max_instance_count > vdu_count:
6278 instances_number = new_instance_count - max_instance_count
6279 else:
6280 instances_number = instances_number
6281
6282 if new_instance_count > max_instance_count:
6283 raise LcmException(
6284 "reached the limit of {} (max-instance-count) "
6285 "scaling-out operations for the "
6286 "scaling-group-descriptor '{}'".format(
6287 nb_scale_op, scaling_group
6288 )
6289 )
6290 for x in range(vdu_delta.get("number-of-instances", 1)):
6291 if cloud_init_text:
6292 # TODO Information of its own ip is not available because db_vnfr is not updated.
6293 additional_params["OSM"] = get_osm_params(
6294 db_vnfr, vdu_delta["id"], vdu_index + x
6295 )
6296 cloud_init_list.append(
6297 self._parse_cloud_init(
6298 cloud_init_text,
6299 additional_params,
6300 db_vnfd["id"],
6301 vdud["id"],
6302 )
6303 )
6304 vca_scaling_info.append(
6305 {
6306 "osm_vdu_id": vdu_delta["id"],
6307 "member-vnf-index": vnf_index,
6308 "type": "create",
6309 "vdu_index": vdu_index + x,
6310 }
6311 )
6312 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6313 for kdu_delta in delta.get("kdu-resource-delta", {}):
6314 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6315 kdu_name = kdu_profile["kdu-name"]
6316 resource_name = kdu_profile.get("resource-name", "")
6317
6318 # Might have different kdus in the same delta
6319 # Should have list for each kdu
6320 if not scaling_info["kdu-create"].get(kdu_name, None):
6321 scaling_info["kdu-create"][kdu_name] = []
6322
6323 kdur = get_kdur(db_vnfr, kdu_name)
6324 if kdur.get("helm-chart"):
6325 k8s_cluster_type = "helm-chart-v3"
6326 self.logger.debug("kdur: {}".format(kdur))
6327 if (
6328 kdur.get("helm-version")
6329 and kdur.get("helm-version") == "v2"
6330 ):
6331 k8s_cluster_type = "helm-chart"
6332 elif kdur.get("juju-bundle"):
6333 k8s_cluster_type = "juju-bundle"
6334 else:
6335 raise LcmException(
6336 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6337 "juju-bundle. Maybe an old NBI version is running".format(
6338 db_vnfr["member-vnf-index-ref"], kdu_name
6339 )
6340 )
6341
6342 max_instance_count = 10
6343 if kdu_profile and "max-number-of-instances" in kdu_profile:
6344 max_instance_count = kdu_profile.get(
6345 "max-number-of-instances", 10
6346 )
6347
6348 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6349 deployed_kdu, _ = get_deployed_kdu(
6350 nsr_deployed, kdu_name, vnf_index
6351 )
6352 if deployed_kdu is None:
6353 raise LcmException(
6354 "KDU '{}' for vnf '{}' not deployed".format(
6355 kdu_name, vnf_index
6356 )
6357 )
6358 kdu_instance = deployed_kdu.get("kdu-instance")
6359 instance_num = await self.k8scluster_map[
6360 k8s_cluster_type
6361 ].get_scale_count(
6362 resource_name,
6363 kdu_instance,
6364 vca_id=vca_id,
6365 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6366 kdu_model=deployed_kdu.get("kdu-model"),
6367 )
6368 kdu_replica_count = instance_num + kdu_delta.get(
6369 "number-of-instances", 1
6370 )
6371
6372 # Control if new count is over max and instance_num is less than max.
6373 # Then assign max instance number to kdu replica count
6374 if kdu_replica_count > max_instance_count > instance_num:
6375 kdu_replica_count = max_instance_count
6376 if kdu_replica_count > max_instance_count:
6377 raise LcmException(
6378 "reached the limit of {} (max-instance-count) "
6379 "scaling-out operations for the "
6380 "scaling-group-descriptor '{}'".format(
6381 instance_num, scaling_group
6382 )
6383 )
6384
6385 for x in range(kdu_delta.get("number-of-instances", 1)):
6386 vca_scaling_info.append(
6387 {
6388 "osm_kdu_id": kdu_name,
6389 "member-vnf-index": vnf_index,
6390 "type": "create",
6391 "kdu_index": instance_num + x - 1,
6392 }
6393 )
6394 scaling_info["kdu-create"][kdu_name].append(
6395 {
6396 "member-vnf-index": vnf_index,
6397 "type": "create",
6398 "k8s-cluster-type": k8s_cluster_type,
6399 "resource-name": resource_name,
6400 "scale": kdu_replica_count,
6401 }
6402 )
6403 elif scaling_type == "SCALE_IN":
6404 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6405
6406 scaling_info["scaling_direction"] = "IN"
6407 scaling_info["vdu-delete"] = {}
6408 scaling_info["kdu-delete"] = {}
6409
6410 for delta in deltas:
6411 for vdu_delta in delta.get("vdu-delta", {}):
6412 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6413 min_instance_count = 0
6414 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6415 if vdu_profile and "min-number-of-instances" in vdu_profile:
6416 min_instance_count = vdu_profile["min-number-of-instances"]
6417
6418 default_instance_num = get_number_of_instances(
6419 db_vnfd, vdu_delta["id"]
6420 )
6421 instance_num = vdu_delta.get("number-of-instances", 1)
6422 nb_scale_op -= instance_num
6423
6424 new_instance_count = nb_scale_op + default_instance_num
6425
6426 if new_instance_count < min_instance_count < vdu_count:
6427 instances_number = min_instance_count - new_instance_count
6428 else:
6429 instances_number = instance_num
6430
6431 if new_instance_count < min_instance_count:
6432 raise LcmException(
6433 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6434 "scaling-group-descriptor '{}'".format(
6435 nb_scale_op, scaling_group
6436 )
6437 )
6438 for x in range(vdu_delta.get("number-of-instances", 1)):
6439 vca_scaling_info.append(
6440 {
6441 "osm_vdu_id": vdu_delta["id"],
6442 "member-vnf-index": vnf_index,
6443 "type": "delete",
6444 "vdu_index": vdu_index - 1 - x,
6445 }
6446 )
6447 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6448 for kdu_delta in delta.get("kdu-resource-delta", {}):
6449 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6450 kdu_name = kdu_profile["kdu-name"]
6451 resource_name = kdu_profile.get("resource-name", "")
6452
6453 if not scaling_info["kdu-delete"].get(kdu_name, None):
6454 scaling_info["kdu-delete"][kdu_name] = []
6455
6456 kdur = get_kdur(db_vnfr, kdu_name)
6457 if kdur.get("helm-chart"):
6458 k8s_cluster_type = "helm-chart-v3"
6459 self.logger.debug("kdur: {}".format(kdur))
6460 if (
6461 kdur.get("helm-version")
6462 and kdur.get("helm-version") == "v2"
6463 ):
6464 k8s_cluster_type = "helm-chart"
6465 elif kdur.get("juju-bundle"):
6466 k8s_cluster_type = "juju-bundle"
6467 else:
6468 raise LcmException(
6469 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6470 "juju-bundle. Maybe an old NBI version is running".format(
6471 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6472 )
6473 )
6474
6475 min_instance_count = 0
6476 if kdu_profile and "min-number-of-instances" in kdu_profile:
6477 min_instance_count = kdu_profile["min-number-of-instances"]
6478
6479 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6480 deployed_kdu, _ = get_deployed_kdu(
6481 nsr_deployed, kdu_name, vnf_index
6482 )
6483 if deployed_kdu is None:
6484 raise LcmException(
6485 "KDU '{}' for vnf '{}' not deployed".format(
6486 kdu_name, vnf_index
6487 )
6488 )
6489 kdu_instance = deployed_kdu.get("kdu-instance")
6490 instance_num = await self.k8scluster_map[
6491 k8s_cluster_type
6492 ].get_scale_count(
6493 resource_name,
6494 kdu_instance,
6495 vca_id=vca_id,
6496 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6497 kdu_model=deployed_kdu.get("kdu-model"),
6498 )
6499 kdu_replica_count = instance_num - kdu_delta.get(
6500 "number-of-instances", 1
6501 )
6502
6503 if kdu_replica_count < min_instance_count < instance_num:
6504 kdu_replica_count = min_instance_count
6505 if kdu_replica_count < min_instance_count:
6506 raise LcmException(
6507 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6508 "scaling-group-descriptor '{}'".format(
6509 instance_num, scaling_group
6510 )
6511 )
6512
6513 for x in range(kdu_delta.get("number-of-instances", 1)):
6514 vca_scaling_info.append(
6515 {
6516 "osm_kdu_id": kdu_name,
6517 "member-vnf-index": vnf_index,
6518 "type": "delete",
6519 "kdu_index": instance_num - x - 1,
6520 }
6521 )
6522 scaling_info["kdu-delete"][kdu_name].append(
6523 {
6524 "member-vnf-index": vnf_index,
6525 "type": "delete",
6526 "k8s-cluster-type": k8s_cluster_type,
6527 "resource-name": resource_name,
6528 "scale": kdu_replica_count,
6529 }
6530 )
6531
6532 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6533 vdu_delete = copy(scaling_info.get("vdu-delete"))
6534 if scaling_info["scaling_direction"] == "IN":
6535 for vdur in reversed(db_vnfr["vdur"]):
6536 if vdu_delete.get(vdur["vdu-id-ref"]):
6537 vdu_delete[vdur["vdu-id-ref"]] -= 1
6538 scaling_info["vdu"].append(
6539 {
6540 "name": vdur.get("name") or vdur.get("vdu-name"),
6541 "vdu_id": vdur["vdu-id-ref"],
6542 "interface": [],
6543 }
6544 )
6545 for interface in vdur["interfaces"]:
6546 scaling_info["vdu"][-1]["interface"].append(
6547 {
6548 "name": interface["name"],
6549 "ip_address": interface["ip-address"],
6550 "mac_address": interface.get("mac-address"),
6551 }
6552 )
6553 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6554
6555 # PRE-SCALE BEGIN
6556 step = "Executing pre-scale vnf-config-primitive"
6557 if scaling_descriptor.get("scaling-config-action"):
6558 for scaling_config_action in scaling_descriptor[
6559 "scaling-config-action"
6560 ]:
6561 if (
6562 scaling_config_action.get("trigger") == "pre-scale-in"
6563 and scaling_type == "SCALE_IN"
6564 ) or (
6565 scaling_config_action.get("trigger") == "pre-scale-out"
6566 and scaling_type == "SCALE_OUT"
6567 ):
6568 vnf_config_primitive = scaling_config_action[
6569 "vnf-config-primitive-name-ref"
6570 ]
6571 step = db_nslcmop_update[
6572 "detailed-status"
6573 ] = "executing pre-scale scaling-config-action '{}'".format(
6574 vnf_config_primitive
6575 )
6576
6577 # look for primitive
6578 for config_primitive in (
6579 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6580 ).get("config-primitive", ()):
6581 if config_primitive["name"] == vnf_config_primitive:
6582 break
6583 else:
6584 raise LcmException(
6585 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6586 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6587 "primitive".format(scaling_group, vnf_config_primitive)
6588 )
6589
6590 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6591 if db_vnfr.get("additionalParamsForVnf"):
6592 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6593
6594 scale_process = "VCA"
6595 db_nsr_update["config-status"] = "configuring pre-scaling"
6596 primitive_params = self._map_primitive_params(
6597 config_primitive, {}, vnfr_params
6598 )
6599
6600 # Pre-scale retry check: Check if this sub-operation has been executed before
6601 op_index = self._check_or_add_scale_suboperation(
6602 db_nslcmop,
6603 vnf_index,
6604 vnf_config_primitive,
6605 primitive_params,
6606 "PRE-SCALE",
6607 )
6608 if op_index == self.SUBOPERATION_STATUS_SKIP:
6609 # Skip sub-operation
6610 result = "COMPLETED"
6611 result_detail = "Done"
6612 self.logger.debug(
6613 logging_text
6614 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6615 vnf_config_primitive, result, result_detail
6616 )
6617 )
6618 else:
6619 if op_index == self.SUBOPERATION_STATUS_NEW:
6620 # New sub-operation: Get index of this sub-operation
6621 op_index = (
6622 len(db_nslcmop.get("_admin", {}).get("operations"))
6623 - 1
6624 )
6625 self.logger.debug(
6626 logging_text
6627 + "vnf_config_primitive={} New sub-operation".format(
6628 vnf_config_primitive
6629 )
6630 )
6631 else:
6632 # retry: Get registered params for this existing sub-operation
6633 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6634 op_index
6635 ]
6636 vnf_index = op.get("member_vnf_index")
6637 vnf_config_primitive = op.get("primitive")
6638 primitive_params = op.get("primitive_params")
6639 self.logger.debug(
6640 logging_text
6641 + "vnf_config_primitive={} Sub-operation retry".format(
6642 vnf_config_primitive
6643 )
6644 )
6645 # Execute the primitive, either with new (first-time) or registered (reintent) args
6646 ee_descriptor_id = config_primitive.get(
6647 "execution-environment-ref"
6648 )
6649 primitive_name = config_primitive.get(
6650 "execution-environment-primitive", vnf_config_primitive
6651 )
6652 ee_id, vca_type = self._look_for_deployed_vca(
6653 nsr_deployed["VCA"],
6654 member_vnf_index=vnf_index,
6655 vdu_id=None,
6656 vdu_count_index=None,
6657 ee_descriptor_id=ee_descriptor_id,
6658 )
6659 result, result_detail = await self._ns_execute_primitive(
6660 ee_id,
6661 primitive_name,
6662 primitive_params,
6663 vca_type=vca_type,
6664 vca_id=vca_id,
6665 )
6666 self.logger.debug(
6667 logging_text
6668 + "vnf_config_primitive={} Done with result {} {}".format(
6669 vnf_config_primitive, result, result_detail
6670 )
6671 )
6672 # Update operationState = COMPLETED | FAILED
6673 self._update_suboperation_status(
6674 db_nslcmop, op_index, result, result_detail
6675 )
6676
6677 if result == "FAILED":
6678 raise LcmException(result_detail)
6679 db_nsr_update["config-status"] = old_config_status
6680 scale_process = None
6681 # PRE-SCALE END
6682
6683 db_nsr_update[
6684 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6685 ] = nb_scale_op
6686 db_nsr_update[
6687 "_admin.scaling-group.{}.time".format(admin_scale_index)
6688 ] = time()
6689
6690 # SCALE-IN VCA - BEGIN
6691 if vca_scaling_info:
6692 step = db_nslcmop_update[
6693 "detailed-status"
6694 ] = "Deleting the execution environments"
6695 scale_process = "VCA"
6696 for vca_info in vca_scaling_info:
6697 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6698 member_vnf_index = str(vca_info["member-vnf-index"])
6699 self.logger.debug(
6700 logging_text + "vdu info: {}".format(vca_info)
6701 )
6702 if vca_info.get("osm_vdu_id"):
6703 vdu_id = vca_info["osm_vdu_id"]
6704 vdu_index = int(vca_info["vdu_index"])
6705 stage[
6706 1
6707 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6708 member_vnf_index, vdu_id, vdu_index
6709 )
6710 stage[2] = step = "Scaling in VCA"
6711 self._write_op_status(op_id=nslcmop_id, stage=stage)
6712 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6713 config_update = db_nsr["configurationStatus"]
6714 for vca_index, vca in enumerate(vca_update):
6715 if (
6716 (vca or vca.get("ee_id"))
6717 and vca["member-vnf-index"] == member_vnf_index
6718 and vca["vdu_count_index"] == vdu_index
6719 ):
6720 if vca.get("vdu_id"):
6721 config_descriptor = get_configuration(
6722 db_vnfd, vca.get("vdu_id")
6723 )
6724 elif vca.get("kdu_name"):
6725 config_descriptor = get_configuration(
6726 db_vnfd, vca.get("kdu_name")
6727 )
6728 else:
6729 config_descriptor = get_configuration(
6730 db_vnfd, db_vnfd["id"]
6731 )
6732 operation_params = (
6733 db_nslcmop.get("operationParams") or {}
6734 )
6735 exec_terminate_primitives = not operation_params.get(
6736 "skip_terminate_primitives"
6737 ) and vca.get("needed_terminate")
6738 task = asyncio.ensure_future(
6739 asyncio.wait_for(
6740 self.destroy_N2VC(
6741 logging_text,
6742 db_nslcmop,
6743 vca,
6744 config_descriptor,
6745 vca_index,
6746 destroy_ee=True,
6747 exec_primitives=exec_terminate_primitives,
6748 scaling_in=True,
6749 vca_id=vca_id,
6750 ),
6751 timeout=self.timeout_charm_delete,
6752 )
6753 )
6754 tasks_dict_info[task] = "Terminating VCA {}".format(
6755 vca.get("ee_id")
6756 )
6757 del vca_update[vca_index]
6758 del config_update[vca_index]
6759 # wait for pending tasks of terminate primitives
6760 if tasks_dict_info:
6761 self.logger.debug(
6762 logging_text
6763 + "Waiting for tasks {}".format(
6764 list(tasks_dict_info.keys())
6765 )
6766 )
6767 error_list = await self._wait_for_tasks(
6768 logging_text,
6769 tasks_dict_info,
6770 min(
6771 self.timeout_charm_delete, self.timeout_ns_terminate
6772 ),
6773 stage,
6774 nslcmop_id,
6775 )
6776 tasks_dict_info.clear()
6777 if error_list:
6778 raise LcmException("; ".join(error_list))
6779
6780 db_vca_and_config_update = {
6781 "_admin.deployed.VCA": vca_update,
6782 "configurationStatus": config_update,
6783 }
6784 self.update_db_2(
6785 "nsrs", db_nsr["_id"], db_vca_and_config_update
6786 )
6787 scale_process = None
6788 # SCALE-IN VCA - END
6789
6790 # SCALE RO - BEGIN
6791 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6792 scale_process = "RO"
6793 if self.ro_config.get("ng"):
6794 await self._scale_ng_ro(
6795 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6796 )
6797 scaling_info.pop("vdu-create", None)
6798 scaling_info.pop("vdu-delete", None)
6799
6800 scale_process = None
6801 # SCALE RO - END
6802
6803 # SCALE KDU - BEGIN
6804 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6805 scale_process = "KDU"
6806 await self._scale_kdu(
6807 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6808 )
6809 scaling_info.pop("kdu-create", None)
6810 scaling_info.pop("kdu-delete", None)
6811
6812 scale_process = None
6813 # SCALE KDU - END
6814
6815 if db_nsr_update:
6816 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6817
6818 # SCALE-UP VCA - BEGIN
6819 if vca_scaling_info:
6820 step = db_nslcmop_update[
6821 "detailed-status"
6822 ] = "Creating new execution environments"
6823 scale_process = "VCA"
6824 for vca_info in vca_scaling_info:
6825 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6826 member_vnf_index = str(vca_info["member-vnf-index"])
6827 self.logger.debug(
6828 logging_text + "vdu info: {}".format(vca_info)
6829 )
6830 vnfd_id = db_vnfr["vnfd-ref"]
6831 if vca_info.get("osm_vdu_id"):
6832 vdu_index = int(vca_info["vdu_index"])
6833 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6834 if db_vnfr.get("additionalParamsForVnf"):
6835 deploy_params.update(
6836 parse_yaml_strings(
6837 db_vnfr["additionalParamsForVnf"].copy()
6838 )
6839 )
6840 descriptor_config = get_configuration(
6841 db_vnfd, db_vnfd["id"]
6842 )
6843 if descriptor_config:
6844 vdu_id = None
6845 vdu_name = None
6846 kdu_name = None
6847 self._deploy_n2vc(
6848 logging_text=logging_text
6849 + "member_vnf_index={} ".format(member_vnf_index),
6850 db_nsr=db_nsr,
6851 db_vnfr=db_vnfr,
6852 nslcmop_id=nslcmop_id,
6853 nsr_id=nsr_id,
6854 nsi_id=nsi_id,
6855 vnfd_id=vnfd_id,
6856 vdu_id=vdu_id,
6857 kdu_name=kdu_name,
6858 member_vnf_index=member_vnf_index,
6859 vdu_index=vdu_index,
6860 vdu_name=vdu_name,
6861 deploy_params=deploy_params,
6862 descriptor_config=descriptor_config,
6863 base_folder=base_folder,
6864 task_instantiation_info=tasks_dict_info,
6865 stage=stage,
6866 )
6867 vdu_id = vca_info["osm_vdu_id"]
6868 vdur = find_in_list(
6869 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6870 )
6871 descriptor_config = get_configuration(db_vnfd, vdu_id)
6872 if vdur.get("additionalParams"):
6873 deploy_params_vdu = parse_yaml_strings(
6874 vdur["additionalParams"]
6875 )
6876 else:
6877 deploy_params_vdu = deploy_params
6878 deploy_params_vdu["OSM"] = get_osm_params(
6879 db_vnfr, vdu_id, vdu_count_index=vdu_index
6880 )
6881 if descriptor_config:
6882 vdu_name = None
6883 kdu_name = None
6884 stage[
6885 1
6886 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6887 member_vnf_index, vdu_id, vdu_index
6888 )
6889 stage[2] = step = "Scaling out VCA"
6890 self._write_op_status(op_id=nslcmop_id, stage=stage)
6891 self._deploy_n2vc(
6892 logging_text=logging_text
6893 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6894 member_vnf_index, vdu_id, vdu_index
6895 ),
6896 db_nsr=db_nsr,
6897 db_vnfr=db_vnfr,
6898 nslcmop_id=nslcmop_id,
6899 nsr_id=nsr_id,
6900 nsi_id=nsi_id,
6901 vnfd_id=vnfd_id,
6902 vdu_id=vdu_id,
6903 kdu_name=kdu_name,
6904 member_vnf_index=member_vnf_index,
6905 vdu_index=vdu_index,
6906 vdu_name=vdu_name,
6907 deploy_params=deploy_params_vdu,
6908 descriptor_config=descriptor_config,
6909 base_folder=base_folder,
6910 task_instantiation_info=tasks_dict_info,
6911 stage=stage,
6912 )
6913 # SCALE-UP VCA - END
6914 scale_process = None
6915
6916 # POST-SCALE BEGIN
6917 # execute primitive service POST-SCALING
6918 step = "Executing post-scale vnf-config-primitive"
6919 if scaling_descriptor.get("scaling-config-action"):
6920 for scaling_config_action in scaling_descriptor[
6921 "scaling-config-action"
6922 ]:
6923 if (
6924 scaling_config_action.get("trigger") == "post-scale-in"
6925 and scaling_type == "SCALE_IN"
6926 ) or (
6927 scaling_config_action.get("trigger") == "post-scale-out"
6928 and scaling_type == "SCALE_OUT"
6929 ):
6930 vnf_config_primitive = scaling_config_action[
6931 "vnf-config-primitive-name-ref"
6932 ]
6933 step = db_nslcmop_update[
6934 "detailed-status"
6935 ] = "executing post-scale scaling-config-action '{}'".format(
6936 vnf_config_primitive
6937 )
6938
6939 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6940 if db_vnfr.get("additionalParamsForVnf"):
6941 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6942
6943 # look for primitive
6944 for config_primitive in (
6945 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6946 ).get("config-primitive", ()):
6947 if config_primitive["name"] == vnf_config_primitive:
6948 break
6949 else:
6950 raise LcmException(
6951 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6952 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6953 "config-primitive".format(
6954 scaling_group, vnf_config_primitive
6955 )
6956 )
6957 scale_process = "VCA"
6958 db_nsr_update["config-status"] = "configuring post-scaling"
6959 primitive_params = self._map_primitive_params(
6960 config_primitive, {}, vnfr_params
6961 )
6962
6963 # Post-scale retry check: Check if this sub-operation has been executed before
6964 op_index = self._check_or_add_scale_suboperation(
6965 db_nslcmop,
6966 vnf_index,
6967 vnf_config_primitive,
6968 primitive_params,
6969 "POST-SCALE",
6970 )
6971 if op_index == self.SUBOPERATION_STATUS_SKIP:
6972 # Skip sub-operation
6973 result = "COMPLETED"
6974 result_detail = "Done"
6975 self.logger.debug(
6976 logging_text
6977 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6978 vnf_config_primitive, result, result_detail
6979 )
6980 )
6981 else:
6982 if op_index == self.SUBOPERATION_STATUS_NEW:
6983 # New sub-operation: Get index of this sub-operation
6984 op_index = (
6985 len(db_nslcmop.get("_admin", {}).get("operations"))
6986 - 1
6987 )
6988 self.logger.debug(
6989 logging_text
6990 + "vnf_config_primitive={} New sub-operation".format(
6991 vnf_config_primitive
6992 )
6993 )
6994 else:
6995 # retry: Get registered params for this existing sub-operation
6996 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6997 op_index
6998 ]
6999 vnf_index = op.get("member_vnf_index")
7000 vnf_config_primitive = op.get("primitive")
7001 primitive_params = op.get("primitive_params")
7002 self.logger.debug(
7003 logging_text
7004 + "vnf_config_primitive={} Sub-operation retry".format(
7005 vnf_config_primitive
7006 )
7007 )
7008 # Execute the primitive, either with new (first-time) or registered (reintent) args
7009 ee_descriptor_id = config_primitive.get(
7010 "execution-environment-ref"
7011 )
7012 primitive_name = config_primitive.get(
7013 "execution-environment-primitive", vnf_config_primitive
7014 )
7015 ee_id, vca_type = self._look_for_deployed_vca(
7016 nsr_deployed["VCA"],
7017 member_vnf_index=vnf_index,
7018 vdu_id=None,
7019 vdu_count_index=None,
7020 ee_descriptor_id=ee_descriptor_id,
7021 )
7022 result, result_detail = await self._ns_execute_primitive(
7023 ee_id,
7024 primitive_name,
7025 primitive_params,
7026 vca_type=vca_type,
7027 vca_id=vca_id,
7028 )
7029 self.logger.debug(
7030 logging_text
7031 + "vnf_config_primitive={} Done with result {} {}".format(
7032 vnf_config_primitive, result, result_detail
7033 )
7034 )
7035 # Update operationState = COMPLETED | FAILED
7036 self._update_suboperation_status(
7037 db_nslcmop, op_index, result, result_detail
7038 )
7039
7040 if result == "FAILED":
7041 raise LcmException(result_detail)
7042 db_nsr_update["config-status"] = old_config_status
7043 scale_process = None
7044 # POST-SCALE END
7045
7046 db_nsr_update[
7047 "detailed-status"
7048 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7049 db_nsr_update["operational-status"] = (
7050 "running"
7051 if old_operational_status == "failed"
7052 else old_operational_status
7053 )
7054 db_nsr_update["config-status"] = old_config_status
7055 return
7056 except (
7057 ROclient.ROClientException,
7058 DbException,
7059 LcmException,
7060 NgRoException,
7061 ) as e:
7062 self.logger.error(logging_text + "Exit Exception {}".format(e))
7063 exc = e
7064 except asyncio.CancelledError:
7065 self.logger.error(
7066 logging_text + "Cancelled Exception while '{}'".format(step)
7067 )
7068 exc = "Operation was cancelled"
7069 except Exception as e:
7070 exc = traceback.format_exc()
7071 self.logger.critical(
7072 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7073 exc_info=True,
7074 )
7075 finally:
7076 self._write_ns_status(
7077 nsr_id=nsr_id,
7078 ns_state=None,
7079 current_operation="IDLE",
7080 current_operation_id=None,
7081 )
7082 if tasks_dict_info:
7083 stage[1] = "Waiting for instantiate pending tasks."
7084 self.logger.debug(logging_text + stage[1])
7085 exc = await self._wait_for_tasks(
7086 logging_text,
7087 tasks_dict_info,
7088 self.timeout_ns_deploy,
7089 stage,
7090 nslcmop_id,
7091 nsr_id=nsr_id,
7092 )
7093 if exc:
7094 db_nslcmop_update[
7095 "detailed-status"
7096 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7097 nslcmop_operation_state = "FAILED"
7098 if db_nsr:
7099 db_nsr_update["operational-status"] = old_operational_status
7100 db_nsr_update["config-status"] = old_config_status
7101 db_nsr_update["detailed-status"] = ""
7102 if scale_process:
7103 if "VCA" in scale_process:
7104 db_nsr_update["config-status"] = "failed"
7105 if "RO" in scale_process:
7106 db_nsr_update["operational-status"] = "failed"
7107 db_nsr_update[
7108 "detailed-status"
7109 ] = "FAILED scaling nslcmop={} {}: {}".format(
7110 nslcmop_id, step, exc
7111 )
7112 else:
7113 error_description_nslcmop = None
7114 nslcmop_operation_state = "COMPLETED"
7115 db_nslcmop_update["detailed-status"] = "Done"
7116
7117 self._write_op_status(
7118 op_id=nslcmop_id,
7119 stage="",
7120 error_message=error_description_nslcmop,
7121 operation_state=nslcmop_operation_state,
7122 other_update=db_nslcmop_update,
7123 )
7124 if db_nsr:
7125 self._write_ns_status(
7126 nsr_id=nsr_id,
7127 ns_state=None,
7128 current_operation="IDLE",
7129 current_operation_id=None,
7130 other_update=db_nsr_update,
7131 )
7132
7133 if nslcmop_operation_state:
7134 try:
7135 msg = {
7136 "nsr_id": nsr_id,
7137 "nslcmop_id": nslcmop_id,
7138 "operationState": nslcmop_operation_state,
7139 }
7140 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7141 except Exception as e:
7142 self.logger.error(
7143 logging_text + "kafka_write notification Exception {}".format(e)
7144 )
7145 self.logger.debug(logging_text + "Exit")
7146 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7147
7148 async def _scale_kdu(
7149 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7150 ):
7151 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7152 for kdu_name in _scaling_info:
7153 for kdu_scaling_info in _scaling_info[kdu_name]:
7154 deployed_kdu, index = get_deployed_kdu(
7155 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7156 )
7157 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7158 kdu_instance = deployed_kdu["kdu-instance"]
7159 kdu_model = deployed_kdu.get("kdu-model")
7160 scale = int(kdu_scaling_info["scale"])
7161 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7162
7163 db_dict = {
7164 "collection": "nsrs",
7165 "filter": {"_id": nsr_id},
7166 "path": "_admin.deployed.K8s.{}".format(index),
7167 }
7168
7169 step = "scaling application {}".format(
7170 kdu_scaling_info["resource-name"]
7171 )
7172 self.logger.debug(logging_text + step)
7173
7174 if kdu_scaling_info["type"] == "delete":
7175 kdu_config = get_configuration(db_vnfd, kdu_name)
7176 if (
7177 kdu_config
7178 and kdu_config.get("terminate-config-primitive")
7179 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7180 ):
7181 terminate_config_primitive_list = kdu_config.get(
7182 "terminate-config-primitive"
7183 )
7184 terminate_config_primitive_list.sort(
7185 key=lambda val: int(val["seq"])
7186 )
7187
7188 for (
7189 terminate_config_primitive
7190 ) in terminate_config_primitive_list:
7191 primitive_params_ = self._map_primitive_params(
7192 terminate_config_primitive, {}, {}
7193 )
7194 step = "execute terminate config primitive"
7195 self.logger.debug(logging_text + step)
7196 await asyncio.wait_for(
7197 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7198 cluster_uuid=cluster_uuid,
7199 kdu_instance=kdu_instance,
7200 primitive_name=terminate_config_primitive["name"],
7201 params=primitive_params_,
7202 db_dict=db_dict,
7203 vca_id=vca_id,
7204 ),
7205 timeout=600,
7206 )
7207
7208 await asyncio.wait_for(
7209 self.k8scluster_map[k8s_cluster_type].scale(
7210 kdu_instance,
7211 scale,
7212 kdu_scaling_info["resource-name"],
7213 vca_id=vca_id,
7214 cluster_uuid=cluster_uuid,
7215 kdu_model=kdu_model,
7216 atomic=True,
7217 db_dict=db_dict,
7218 ),
7219 timeout=self.timeout_vca_on_error,
7220 )
7221
7222 if kdu_scaling_info["type"] == "create":
7223 kdu_config = get_configuration(db_vnfd, kdu_name)
7224 if (
7225 kdu_config
7226 and kdu_config.get("initial-config-primitive")
7227 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7228 ):
7229 initial_config_primitive_list = kdu_config.get(
7230 "initial-config-primitive"
7231 )
7232 initial_config_primitive_list.sort(
7233 key=lambda val: int(val["seq"])
7234 )
7235
7236 for initial_config_primitive in initial_config_primitive_list:
7237 primitive_params_ = self._map_primitive_params(
7238 initial_config_primitive, {}, {}
7239 )
7240 step = "execute initial config primitive"
7241 self.logger.debug(logging_text + step)
7242 await asyncio.wait_for(
7243 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7244 cluster_uuid=cluster_uuid,
7245 kdu_instance=kdu_instance,
7246 primitive_name=initial_config_primitive["name"],
7247 params=primitive_params_,
7248 db_dict=db_dict,
7249 vca_id=vca_id,
7250 ),
7251 timeout=600,
7252 )
7253
7254 async def _scale_ng_ro(
7255 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7256 ):
7257 nsr_id = db_nslcmop["nsInstanceId"]
7258 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7259 db_vnfrs = {}
7260
7261 # read from db: vnfd's for every vnf
7262 db_vnfds = []
7263
7264 # for each vnf in ns, read vnfd
7265 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7266 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7267 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7268 # if we haven't this vnfd, read it from db
7269 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7270 # read from db
7271 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7272 db_vnfds.append(vnfd)
7273 n2vc_key = self.n2vc.get_public_key()
7274 n2vc_key_list = [n2vc_key]
7275 self.scale_vnfr(
7276 db_vnfr,
7277 vdu_scaling_info.get("vdu-create"),
7278 vdu_scaling_info.get("vdu-delete"),
7279 mark_delete=True,
7280 )
7281 # db_vnfr has been updated, update db_vnfrs to use it
7282 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7283 await self._instantiate_ng_ro(
7284 logging_text,
7285 nsr_id,
7286 db_nsd,
7287 db_nsr,
7288 db_nslcmop,
7289 db_vnfrs,
7290 db_vnfds,
7291 n2vc_key_list,
7292 stage=stage,
7293 start_deploy=time(),
7294 timeout_ns_deploy=self.timeout_ns_deploy,
7295 )
7296 if vdu_scaling_info.get("vdu-delete"):
7297 self.scale_vnfr(
7298 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7299 )
7300
7301 async def extract_prometheus_scrape_jobs(
7302 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7303 ):
7304 # look if exist a file called 'prometheus*.j2' and
7305 artifact_content = self.fs.dir_ls(artifact_path)
7306 job_file = next(
7307 (
7308 f
7309 for f in artifact_content
7310 if f.startswith("prometheus") and f.endswith(".j2")
7311 ),
7312 None,
7313 )
7314 if not job_file:
7315 return
7316 with self.fs.file_open((artifact_path, job_file), "r") as f:
7317 job_data = f.read()
7318
7319 # TODO get_service
7320 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7321 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7322 host_port = "80"
7323 vnfr_id = vnfr_id.replace("-", "")
7324 variables = {
7325 "JOB_NAME": vnfr_id,
7326 "TARGET_IP": target_ip,
7327 "EXPORTER_POD_IP": host_name,
7328 "EXPORTER_POD_PORT": host_port,
7329 }
7330 job_list = parse_job(job_data, variables)
7331 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7332 for job in job_list:
7333 if (
7334 not isinstance(job.get("job_name"), str)
7335 or vnfr_id not in job["job_name"]
7336 ):
7337 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7338 job["nsr_id"] = nsr_id
7339 job["vnfr_id"] = vnfr_id
7340 return job_list
7341
7342 async def rebuild_start_stop(
7343 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7344 ):
7345 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7346 self.logger.info(logging_text + "Enter")
7347 stage = ["Preparing the environment", ""]
7348 # database nsrs record
7349 db_nsr_update = {}
7350 vdu_vim_name = None
7351 vim_vm_id = None
7352 # in case of error, indicates what part of scale was failed to put nsr at error status
7353 start_deploy = time()
7354 try:
7355 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7356 vim_account_id = db_vnfr.get("vim-account-id")
7357 vim_info_key = "vim:" + vim_account_id
7358 vdu_id = additional_param["vdu_id"]
7359 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7360 vdur = find_in_list(
7361 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7362 )
7363 if vdur:
7364 vdu_vim_name = vdur["name"]
7365 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7366 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7367 else:
7368 raise LcmException("Target vdu is not found")
7369 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7370 # wait for any previous tasks in process
7371 stage[1] = "Waiting for previous operations to terminate"
7372 self.logger.info(stage[1])
7373 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7374
7375 stage[1] = "Reading from database."
7376 self.logger.info(stage[1])
7377 self._write_ns_status(
7378 nsr_id=nsr_id,
7379 ns_state=None,
7380 current_operation=operation_type.upper(),
7381 current_operation_id=nslcmop_id,
7382 )
7383 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7384
7385 # read from db: ns
7386 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7387 db_nsr_update["operational-status"] = operation_type
7388 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7389 # Payload for RO
7390 desc = {
7391 operation_type: {
7392 "vim_vm_id": vim_vm_id,
7393 "vnf_id": vnf_id,
7394 "vdu_index": additional_param["count-index"],
7395 "vdu_id": vdur["id"],
7396 "target_vim": target_vim,
7397 "vim_account_id": vim_account_id,
7398 }
7399 }
7400 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7401 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7402 self.logger.info("ro nsr id: {}".format(nsr_id))
7403 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7404 self.logger.info("response from RO: {}".format(result_dict))
7405 action_id = result_dict["action_id"]
7406 await self._wait_ng_ro(
7407 nsr_id,
7408 action_id,
7409 nslcmop_id,
7410 start_deploy,
7411 self.timeout_operate,
7412 None,
7413 "start_stop_rebuild",
7414 )
7415 return "COMPLETED", "Done"
7416 except (ROclient.ROClientException, DbException, LcmException) as e:
7417 self.logger.error("Exit Exception {}".format(e))
7418 exc = e
7419 except asyncio.CancelledError:
7420 self.logger.error("Cancelled Exception while '{}'".format(stage))
7421 exc = "Operation was cancelled"
7422 except Exception as e:
7423 exc = traceback.format_exc()
7424 self.logger.critical(
7425 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7426 )
7427 return "FAILED", "Error in operate VNF {}".format(exc)
7428
7429 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7430 """
7431 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7432
7433 :param: vim_account_id: VIM Account ID
7434
7435 :return: (cloud_name, cloud_credential)
7436 """
7437 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7438 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7439
7440 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7441 """
7442 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7443
7444 :param: vim_account_id: VIM Account ID
7445
7446 :return: (cloud_name, cloud_credential)
7447 """
7448 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7449 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7450
7451 async def migrate(self, nsr_id, nslcmop_id):
7452 """
7453 Migrate VNFs and VDUs instances in a NS
7454
7455 :param: nsr_id: NS Instance ID
7456 :param: nslcmop_id: nslcmop ID of migrate
7457
7458 """
7459 # Try to lock HA task here
7460 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7461 if not task_is_locked_by_me:
7462 return
7463 logging_text = "Task ns={} migrate ".format(nsr_id)
7464 self.logger.debug(logging_text + "Enter")
7465 # get all needed from database
7466 db_nslcmop = None
7467 db_nslcmop_update = {}
7468 nslcmop_operation_state = None
7469 db_nsr_update = {}
7470 target = {}
7471 exc = None
7472 # in case of error, indicates what part of scale was failed to put nsr at error status
7473 start_deploy = time()
7474
7475 try:
7476 # wait for any previous tasks in process
7477 step = "Waiting for previous operations to terminate"
7478 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7479
7480 self._write_ns_status(
7481 nsr_id=nsr_id,
7482 ns_state=None,
7483 current_operation="MIGRATING",
7484 current_operation_id=nslcmop_id,
7485 )
7486 step = "Getting nslcmop from database"
7487 self.logger.debug(
7488 step + " after having waited for previous tasks to be completed"
7489 )
7490 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7491 migrate_params = db_nslcmop.get("operationParams")
7492
7493 target = {}
7494 target.update(migrate_params)
7495 desc = await self.RO.migrate(nsr_id, target)
7496 self.logger.debug("RO return > {}".format(desc))
7497 action_id = desc["action_id"]
7498 await self._wait_ng_ro(
7499 nsr_id,
7500 action_id,
7501 nslcmop_id,
7502 start_deploy,
7503 self.timeout_migrate,
7504 operation="migrate",
7505 )
7506 except (ROclient.ROClientException, DbException, LcmException) as e:
7507 self.logger.error("Exit Exception {}".format(e))
7508 exc = e
7509 except asyncio.CancelledError:
7510 self.logger.error("Cancelled Exception while '{}'".format(step))
7511 exc = "Operation was cancelled"
7512 except Exception as e:
7513 exc = traceback.format_exc()
7514 self.logger.critical(
7515 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7516 )
7517 finally:
7518 self._write_ns_status(
7519 nsr_id=nsr_id,
7520 ns_state=None,
7521 current_operation="IDLE",
7522 current_operation_id=None,
7523 )
7524 if exc:
7525 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7526 nslcmop_operation_state = "FAILED"
7527 else:
7528 nslcmop_operation_state = "COMPLETED"
7529 db_nslcmop_update["detailed-status"] = "Done"
7530 db_nsr_update["detailed-status"] = "Done"
7531
7532 self._write_op_status(
7533 op_id=nslcmop_id,
7534 stage="",
7535 error_message="",
7536 operation_state=nslcmop_operation_state,
7537 other_update=db_nslcmop_update,
7538 )
7539 if nslcmop_operation_state:
7540 try:
7541 msg = {
7542 "nsr_id": nsr_id,
7543 "nslcmop_id": nslcmop_id,
7544 "operationState": nslcmop_operation_state,
7545 }
7546 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7547 except Exception as e:
7548 self.logger.error(
7549 logging_text + "kafka_write notification Exception {}".format(e)
7550 )
7551 self.logger.debug(logging_text + "Exit")
7552 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7553
7554 async def heal(self, nsr_id, nslcmop_id):
7555 """
7556 Heal NS
7557
7558 :param nsr_id: ns instance to heal
7559 :param nslcmop_id: operation to run
7560 :return:
7561 """
7562
7563 # Try to lock HA task here
7564 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7565 if not task_is_locked_by_me:
7566 return
7567
7568 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7569 stage = ["", "", ""]
7570 tasks_dict_info = {}
7571 # ^ stage, step, VIM progress
7572 self.logger.debug(logging_text + "Enter")
7573 # get all needed from database
7574 db_nsr = None
7575 db_nslcmop_update = {}
7576 db_nsr_update = {}
7577 db_vnfrs = {} # vnf's info indexed by _id
7578 exc = None
7579 old_operational_status = ""
7580 old_config_status = ""
7581 nsi_id = None
7582 try:
7583 # wait for any previous tasks in process
7584 step = "Waiting for previous operations to terminate"
7585 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7586 self._write_ns_status(
7587 nsr_id=nsr_id,
7588 ns_state=None,
7589 current_operation="HEALING",
7590 current_operation_id=nslcmop_id,
7591 )
7592
7593 step = "Getting nslcmop from database"
7594 self.logger.debug(
7595 step + " after having waited for previous tasks to be completed"
7596 )
7597 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7598
7599 step = "Getting nsr from database"
7600 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7601 old_operational_status = db_nsr["operational-status"]
7602 old_config_status = db_nsr["config-status"]
7603
7604 db_nsr_update = {
7605 "_admin.deployed.RO.operational-status": "healing",
7606 }
7607 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7608
7609 step = "Sending heal order to VIM"
7610 # task_ro = asyncio.ensure_future(
7611 # self.heal_RO(
7612 # logging_text=logging_text,
7613 # nsr_id=nsr_id,
7614 # db_nslcmop=db_nslcmop,
7615 # stage=stage,
7616 # )
7617 # )
7618 # self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7619 # tasks_dict_info[task_ro] = "Healing at VIM"
7620 await self.heal_RO(
7621 logging_text=logging_text,
7622 nsr_id=nsr_id,
7623 db_nslcmop=db_nslcmop,
7624 stage=stage,
7625 )
7626 # VCA tasks
7627 # read from db: nsd
7628 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7629 self.logger.debug(logging_text + stage[1])
7630 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7631 self.fs.sync(db_nsr["nsd-id"])
7632 db_nsr["nsd"] = nsd
7633 # read from db: vnfr's of this ns
7634 step = "Getting vnfrs from db"
7635 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7636 for vnfr in db_vnfrs_list:
7637 db_vnfrs[vnfr["_id"]] = vnfr
7638 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7639
7640 # Check for each target VNF
7641 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7642 for target_vnf in target_list:
7643 # Find this VNF in the list from DB
7644 vnfr_id = target_vnf.get("vnfInstanceId", None)
7645 if vnfr_id:
7646 db_vnfr = db_vnfrs[vnfr_id]
7647 vnfd_id = db_vnfr.get("vnfd-id")
7648 vnfd_ref = db_vnfr.get("vnfd-ref")
7649 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7650 base_folder = vnfd["_admin"]["storage"]
7651 vdu_id = None
7652 vdu_index = 0
7653 vdu_name = None
7654 kdu_name = None
7655 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7656 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7657
7658 # Check each target VDU and deploy N2VC
7659 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7660 "vdu", []
7661 )
7662 if not target_vdu_list:
7663 # Codigo nuevo para crear diccionario
7664 target_vdu_list = []
7665 for existing_vdu in db_vnfr.get("vdur"):
7666 vdu_name = existing_vdu.get("vdu-name", None)
7667 vdu_index = existing_vdu.get("count-index", 0)
7668 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7669 "run-day1", False
7670 )
7671 vdu_to_be_healed = {
7672 "vdu-id": vdu_name,
7673 "count-index": vdu_index,
7674 "run-day1": vdu_run_day1,
7675 }
7676 target_vdu_list.append(vdu_to_be_healed)
7677 for target_vdu in target_vdu_list:
7678 deploy_params_vdu = target_vdu
7679 # Set run-day1 vnf level value if not vdu level value exists
7680 if not deploy_params_vdu.get("run-day1") and target_vnf[
7681 "additionalParams"
7682 ].get("run-day1"):
7683 deploy_params_vdu["run-day1"] = target_vnf[
7684 "additionalParams"
7685 ].get("run-day1")
7686 vdu_name = target_vdu.get("vdu-id", None)
7687 # TODO: Get vdu_id from vdud.
7688 vdu_id = vdu_name
7689 # For multi instance VDU count-index is mandatory
7690 # For single session VDU count-indes is 0
7691 vdu_index = target_vdu.get("count-index", 0)
7692
7693 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7694 stage[1] = "Deploying Execution Environments."
7695 self.logger.debug(logging_text + stage[1])
7696
7697 # VNF Level charm. Normal case when proxy charms.
7698 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7699 descriptor_config = get_configuration(vnfd, vnfd_ref)
7700 if descriptor_config:
7701 # Continue if healed machine is management machine
7702 vnf_ip_address = db_vnfr.get("ip-address")
7703 target_instance = None
7704 for instance in db_vnfr.get("vdur", None):
7705 if (
7706 instance["vdu-name"] == vdu_name
7707 and instance["count-index"] == vdu_index
7708 ):
7709 target_instance = instance
7710 break
7711 if vnf_ip_address == target_instance.get("ip-address"):
7712 self._heal_n2vc(
7713 logging_text=logging_text
7714 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7715 member_vnf_index, vdu_name, vdu_index
7716 ),
7717 db_nsr=db_nsr,
7718 db_vnfr=db_vnfr,
7719 nslcmop_id=nslcmop_id,
7720 nsr_id=nsr_id,
7721 nsi_id=nsi_id,
7722 vnfd_id=vnfd_ref,
7723 vdu_id=None,
7724 kdu_name=None,
7725 member_vnf_index=member_vnf_index,
7726 vdu_index=0,
7727 vdu_name=None,
7728 deploy_params=deploy_params_vdu,
7729 descriptor_config=descriptor_config,
7730 base_folder=base_folder,
7731 task_instantiation_info=tasks_dict_info,
7732 stage=stage,
7733 )
7734
7735 # VDU Level charm. Normal case with native charms.
7736 descriptor_config = get_configuration(vnfd, vdu_name)
7737 if descriptor_config:
7738 self._heal_n2vc(
7739 logging_text=logging_text
7740 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7741 member_vnf_index, vdu_name, vdu_index
7742 ),
7743 db_nsr=db_nsr,
7744 db_vnfr=db_vnfr,
7745 nslcmop_id=nslcmop_id,
7746 nsr_id=nsr_id,
7747 nsi_id=nsi_id,
7748 vnfd_id=vnfd_ref,
7749 vdu_id=vdu_id,
7750 kdu_name=kdu_name,
7751 member_vnf_index=member_vnf_index,
7752 vdu_index=vdu_index,
7753 vdu_name=vdu_name,
7754 deploy_params=deploy_params_vdu,
7755 descriptor_config=descriptor_config,
7756 base_folder=base_folder,
7757 task_instantiation_info=tasks_dict_info,
7758 stage=stage,
7759 )
7760
7761 except (
7762 ROclient.ROClientException,
7763 DbException,
7764 LcmException,
7765 NgRoException,
7766 ) as e:
7767 self.logger.error(logging_text + "Exit Exception {}".format(e))
7768 exc = e
7769 except asyncio.CancelledError:
7770 self.logger.error(
7771 logging_text + "Cancelled Exception while '{}'".format(step)
7772 )
7773 exc = "Operation was cancelled"
7774 except Exception as e:
7775 exc = traceback.format_exc()
7776 self.logger.critical(
7777 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7778 exc_info=True,
7779 )
7780 finally:
7781 if tasks_dict_info:
7782 stage[1] = "Waiting for healing pending tasks."
7783 self.logger.debug(logging_text + stage[1])
7784 exc = await self._wait_for_tasks(
7785 logging_text,
7786 tasks_dict_info,
7787 self.timeout_ns_deploy,
7788 stage,
7789 nslcmop_id,
7790 nsr_id=nsr_id,
7791 )
7792 if exc:
7793 db_nslcmop_update[
7794 "detailed-status"
7795 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7796 nslcmop_operation_state = "FAILED"
7797 if db_nsr:
7798 db_nsr_update["operational-status"] = old_operational_status
7799 db_nsr_update["config-status"] = old_config_status
7800 db_nsr_update[
7801 "detailed-status"
7802 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7803 for task, task_name in tasks_dict_info.items():
7804 if not task.done() or task.cancelled() or task.exception():
7805 if task_name.startswith(self.task_name_deploy_vca):
7806 # A N2VC task is pending
7807 db_nsr_update["config-status"] = "failed"
7808 else:
7809 # RO task is pending
7810 db_nsr_update["operational-status"] = "failed"
7811 else:
7812 error_description_nslcmop = None
7813 nslcmop_operation_state = "COMPLETED"
7814 db_nslcmop_update["detailed-status"] = "Done"
7815 db_nsr_update["detailed-status"] = "Done"
7816 db_nsr_update["operational-status"] = "running"
7817 db_nsr_update["config-status"] = "configured"
7818
7819 self._write_op_status(
7820 op_id=nslcmop_id,
7821 stage="",
7822 error_message=error_description_nslcmop,
7823 operation_state=nslcmop_operation_state,
7824 other_update=db_nslcmop_update,
7825 )
7826 if db_nsr:
7827 self._write_ns_status(
7828 nsr_id=nsr_id,
7829 ns_state=None,
7830 current_operation="IDLE",
7831 current_operation_id=None,
7832 other_update=db_nsr_update,
7833 )
7834
7835 if nslcmop_operation_state:
7836 try:
7837 msg = {
7838 "nsr_id": nsr_id,
7839 "nslcmop_id": nslcmop_id,
7840 "operationState": nslcmop_operation_state,
7841 }
7842 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7843 except Exception as e:
7844 self.logger.error(
7845 logging_text + "kafka_write notification Exception {}".format(e)
7846 )
7847 self.logger.debug(logging_text + "Exit")
7848 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7849
7850 async def heal_RO(
7851 self,
7852 logging_text,
7853 nsr_id,
7854 db_nslcmop,
7855 stage,
7856 ):
7857 """
7858 Heal at RO
7859 :param logging_text: preffix text to use at logging
7860 :param nsr_id: nsr identity
7861 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7862 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7863 :return: None or exception
7864 """
7865
7866 def get_vim_account(vim_account_id):
7867 nonlocal db_vims
7868 if vim_account_id in db_vims:
7869 return db_vims[vim_account_id]
7870 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7871 db_vims[vim_account_id] = db_vim
7872 return db_vim
7873
7874 try:
7875 start_heal = time()
7876 ns_params = db_nslcmop.get("operationParams")
7877 if ns_params and ns_params.get("timeout_ns_heal"):
7878 timeout_ns_heal = ns_params["timeout_ns_heal"]
7879 else:
7880 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7881
7882 db_vims = {}
7883
7884 nslcmop_id = db_nslcmop["_id"]
7885 target = {
7886 "action_id": nslcmop_id,
7887 }
7888 self.logger.warning(
7889 "db_nslcmop={} and timeout_ns_heal={}".format(
7890 db_nslcmop, timeout_ns_heal
7891 )
7892 )
7893 target.update(db_nslcmop.get("operationParams", {}))
7894
7895 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7896 desc = await self.RO.recreate(nsr_id, target)
7897 self.logger.debug("RO return > {}".format(desc))
7898 action_id = desc["action_id"]
7899 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7900 await self._wait_ng_ro(
7901 nsr_id,
7902 action_id,
7903 nslcmop_id,
7904 start_heal,
7905 timeout_ns_heal,
7906 stage,
7907 operation="healing",
7908 )
7909
7910 # Updating NSR
7911 db_nsr_update = {
7912 "_admin.deployed.RO.operational-status": "running",
7913 "detailed-status": " ".join(stage),
7914 }
7915 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7916 self._write_op_status(nslcmop_id, stage)
7917 self.logger.debug(
7918 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7919 )
7920
7921 except Exception as e:
7922 stage[2] = "ERROR healing at VIM"
7923 # self.set_vnfr_at_error(db_vnfrs, str(e))
7924 self.logger.error(
7925 "Error healing at VIM {}".format(e),
7926 exc_info=not isinstance(
7927 e,
7928 (
7929 ROclient.ROClientException,
7930 LcmException,
7931 DbException,
7932 NgRoException,
7933 ),
7934 ),
7935 )
7936 raise
7937
7938 def _heal_n2vc(
7939 self,
7940 logging_text,
7941 db_nsr,
7942 db_vnfr,
7943 nslcmop_id,
7944 nsr_id,
7945 nsi_id,
7946 vnfd_id,
7947 vdu_id,
7948 kdu_name,
7949 member_vnf_index,
7950 vdu_index,
7951 vdu_name,
7952 deploy_params,
7953 descriptor_config,
7954 base_folder,
7955 task_instantiation_info,
7956 stage,
7957 ):
7958 # launch instantiate_N2VC in a asyncio task and register task object
7959 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7960 # if not found, create one entry and update database
7961 # fill db_nsr._admin.deployed.VCA.<index>
7962
7963 self.logger.debug(
7964 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7965 )
7966 if "execution-environment-list" in descriptor_config:
7967 ee_list = descriptor_config.get("execution-environment-list", [])
7968 elif "juju" in descriptor_config:
7969 ee_list = [descriptor_config] # ns charms
7970 else: # other types as script are not supported
7971 ee_list = []
7972
7973 for ee_item in ee_list:
7974 self.logger.debug(
7975 logging_text
7976 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7977 ee_item.get("juju"), ee_item.get("helm-chart")
7978 )
7979 )
7980 ee_descriptor_id = ee_item.get("id")
7981 if ee_item.get("juju"):
7982 vca_name = ee_item["juju"].get("charm")
7983 vca_type = (
7984 "lxc_proxy_charm"
7985 if ee_item["juju"].get("charm") is not None
7986 else "native_charm"
7987 )
7988 if ee_item["juju"].get("cloud") == "k8s":
7989 vca_type = "k8s_proxy_charm"
7990 elif ee_item["juju"].get("proxy") is False:
7991 vca_type = "native_charm"
7992 elif ee_item.get("helm-chart"):
7993 vca_name = ee_item["helm-chart"]
7994 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7995 vca_type = "helm"
7996 else:
7997 vca_type = "helm-v3"
7998 else:
7999 self.logger.debug(
8000 logging_text + "skipping non juju neither charm configuration"
8001 )
8002 continue
8003
8004 vca_index = -1
8005 for vca_index, vca_deployed in enumerate(
8006 db_nsr["_admin"]["deployed"]["VCA"]
8007 ):
8008 if not vca_deployed:
8009 continue
8010 if (
8011 vca_deployed.get("member-vnf-index") == member_vnf_index
8012 and vca_deployed.get("vdu_id") == vdu_id
8013 and vca_deployed.get("kdu_name") == kdu_name
8014 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8015 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8016 ):
8017 break
8018 else:
8019 # not found, create one.
8020 target = (
8021 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8022 )
8023 if vdu_id:
8024 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8025 elif kdu_name:
8026 target += "/kdu/{}".format(kdu_name)
8027 vca_deployed = {
8028 "target_element": target,
8029 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8030 "member-vnf-index": member_vnf_index,
8031 "vdu_id": vdu_id,
8032 "kdu_name": kdu_name,
8033 "vdu_count_index": vdu_index,
8034 "operational-status": "init", # TODO revise
8035 "detailed-status": "", # TODO revise
8036 "step": "initial-deploy", # TODO revise
8037 "vnfd_id": vnfd_id,
8038 "vdu_name": vdu_name,
8039 "type": vca_type,
8040 "ee_descriptor_id": ee_descriptor_id,
8041 }
8042 vca_index += 1
8043
8044 # create VCA and configurationStatus in db
8045 db_dict = {
8046 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8047 "configurationStatus.{}".format(vca_index): dict(),
8048 }
8049 self.update_db_2("nsrs", nsr_id, db_dict)
8050
8051 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8052
8053 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8054 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8055 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8056
8057 # Launch task
8058 task_n2vc = asyncio.ensure_future(
8059 self.heal_N2VC(
8060 logging_text=logging_text,
8061 vca_index=vca_index,
8062 nsi_id=nsi_id,
8063 db_nsr=db_nsr,
8064 db_vnfr=db_vnfr,
8065 vdu_id=vdu_id,
8066 kdu_name=kdu_name,
8067 vdu_index=vdu_index,
8068 deploy_params=deploy_params,
8069 config_descriptor=descriptor_config,
8070 base_folder=base_folder,
8071 nslcmop_id=nslcmop_id,
8072 stage=stage,
8073 vca_type=vca_type,
8074 vca_name=vca_name,
8075 ee_config_descriptor=ee_item,
8076 )
8077 )
8078 self.lcm_tasks.register(
8079 "ns",
8080 nsr_id,
8081 nslcmop_id,
8082 "instantiate_N2VC-{}".format(vca_index),
8083 task_n2vc,
8084 )
8085 task_instantiation_info[
8086 task_n2vc
8087 ] = self.task_name_deploy_vca + " {}.{}".format(
8088 member_vnf_index or "", vdu_id or ""
8089 )
8090
8091 async def heal_N2VC(
8092 self,
8093 logging_text,
8094 vca_index,
8095 nsi_id,
8096 db_nsr,
8097 db_vnfr,
8098 vdu_id,
8099 kdu_name,
8100 vdu_index,
8101 config_descriptor,
8102 deploy_params,
8103 base_folder,
8104 nslcmop_id,
8105 stage,
8106 vca_type,
8107 vca_name,
8108 ee_config_descriptor,
8109 ):
8110 nsr_id = db_nsr["_id"]
8111 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8112 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8113 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8114 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8115 db_dict = {
8116 "collection": "nsrs",
8117 "filter": {"_id": nsr_id},
8118 "path": db_update_entry,
8119 }
8120 step = ""
8121 try:
8122 element_type = "NS"
8123 element_under_configuration = nsr_id
8124
8125 vnfr_id = None
8126 if db_vnfr:
8127 vnfr_id = db_vnfr["_id"]
8128 osm_config["osm"]["vnf_id"] = vnfr_id
8129
8130 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8131
8132 if vca_type == "native_charm":
8133 index_number = 0
8134 else:
8135 index_number = vdu_index or 0
8136
8137 if vnfr_id:
8138 element_type = "VNF"
8139 element_under_configuration = vnfr_id
8140 namespace += ".{}-{}".format(vnfr_id, index_number)
8141 if vdu_id:
8142 namespace += ".{}-{}".format(vdu_id, index_number)
8143 element_type = "VDU"
8144 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8145 osm_config["osm"]["vdu_id"] = vdu_id
8146 elif kdu_name:
8147 namespace += ".{}".format(kdu_name)
8148 element_type = "KDU"
8149 element_under_configuration = kdu_name
8150 osm_config["osm"]["kdu_name"] = kdu_name
8151
8152 # Get artifact path
8153 if base_folder["pkg-dir"]:
8154 artifact_path = "{}/{}/{}/{}".format(
8155 base_folder["folder"],
8156 base_folder["pkg-dir"],
8157 "charms"
8158 if vca_type
8159 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8160 else "helm-charts",
8161 vca_name,
8162 )
8163 else:
8164 artifact_path = "{}/Scripts/{}/{}/".format(
8165 base_folder["folder"],
8166 "charms"
8167 if vca_type
8168 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8169 else "helm-charts",
8170 vca_name,
8171 )
8172
8173 self.logger.debug("Artifact path > {}".format(artifact_path))
8174
8175 # get initial_config_primitive_list that applies to this element
8176 initial_config_primitive_list = config_descriptor.get(
8177 "initial-config-primitive"
8178 )
8179
8180 self.logger.debug(
8181 "Initial config primitive list > {}".format(
8182 initial_config_primitive_list
8183 )
8184 )
8185
8186 # add config if not present for NS charm
8187 ee_descriptor_id = ee_config_descriptor.get("id")
8188 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8189 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8190 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8191 )
8192
8193 self.logger.debug(
8194 "Initial config primitive list #2 > {}".format(
8195 initial_config_primitive_list
8196 )
8197 )
8198 # n2vc_redesign STEP 3.1
8199 # find old ee_id if exists
8200 ee_id = vca_deployed.get("ee_id")
8201
8202 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8203 # create or register execution environment in VCA. Only for native charms when healing
8204 if vca_type == "native_charm":
8205 step = "Waiting to VM being up and getting IP address"
8206 self.logger.debug(logging_text + step)
8207 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8208 logging_text,
8209 nsr_id,
8210 vnfr_id,
8211 vdu_id,
8212 vdu_index,
8213 user=None,
8214 pub_key=None,
8215 )
8216 credentials = {"hostname": rw_mgmt_ip}
8217 # get username
8218 username = deep_get(
8219 config_descriptor, ("config-access", "ssh-access", "default-user")
8220 )
8221 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8222 # merged. Meanwhile let's get username from initial-config-primitive
8223 if not username and initial_config_primitive_list:
8224 for config_primitive in initial_config_primitive_list:
8225 for param in config_primitive.get("parameter", ()):
8226 if param["name"] == "ssh-username":
8227 username = param["value"]
8228 break
8229 if not username:
8230 raise LcmException(
8231 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8232 "'config-access.ssh-access.default-user'"
8233 )
8234 credentials["username"] = username
8235
8236 # n2vc_redesign STEP 3.2
8237 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8238 self._write_configuration_status(
8239 nsr_id=nsr_id,
8240 vca_index=vca_index,
8241 status="REGISTERING",
8242 element_under_configuration=element_under_configuration,
8243 element_type=element_type,
8244 )
8245
8246 step = "register execution environment {}".format(credentials)
8247 self.logger.debug(logging_text + step)
8248 ee_id = await self.vca_map[vca_type].register_execution_environment(
8249 credentials=credentials,
8250 namespace=namespace,
8251 db_dict=db_dict,
8252 vca_id=vca_id,
8253 )
8254
8255 # update ee_id en db
8256 db_dict_ee_id = {
8257 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8258 }
8259 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8260
8261 # for compatibility with MON/POL modules, the need model and application name at database
8262 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8263 # Not sure if this need to be done when healing
8264 """
8265 ee_id_parts = ee_id.split(".")
8266 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8267 if len(ee_id_parts) >= 2:
8268 model_name = ee_id_parts[0]
8269 application_name = ee_id_parts[1]
8270 db_nsr_update[db_update_entry + "model"] = model_name
8271 db_nsr_update[db_update_entry + "application"] = application_name
8272 """
8273
8274 # n2vc_redesign STEP 3.3
8275 # Install configuration software. Only for native charms.
8276 step = "Install configuration Software"
8277
8278 self._write_configuration_status(
8279 nsr_id=nsr_id,
8280 vca_index=vca_index,
8281 status="INSTALLING SW",
8282 element_under_configuration=element_under_configuration,
8283 element_type=element_type,
8284 # other_update=db_nsr_update,
8285 other_update=None,
8286 )
8287
8288 # TODO check if already done
8289 self.logger.debug(logging_text + step)
8290 config = None
8291 if vca_type == "native_charm":
8292 config_primitive = next(
8293 (p for p in initial_config_primitive_list if p["name"] == "config"),
8294 None,
8295 )
8296 if config_primitive:
8297 config = self._map_primitive_params(
8298 config_primitive, {}, deploy_params
8299 )
8300 await self.vca_map[vca_type].install_configuration_sw(
8301 ee_id=ee_id,
8302 artifact_path=artifact_path,
8303 db_dict=db_dict,
8304 config=config,
8305 num_units=1,
8306 vca_id=vca_id,
8307 vca_type=vca_type,
8308 )
8309
8310 # write in db flag of configuration_sw already installed
8311 self.update_db_2(
8312 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8313 )
8314
8315 # Not sure if this need to be done when healing
8316 """
8317 # add relations for this VCA (wait for other peers related with this VCA)
8318 await self._add_vca_relations(
8319 logging_text=logging_text,
8320 nsr_id=nsr_id,
8321 vca_type=vca_type,
8322 vca_index=vca_index,
8323 )
8324 """
8325
8326 # if SSH access is required, then get execution environment SSH public
8327 # if native charm we have waited already to VM be UP
8328 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8329 pub_key = None
8330 user = None
8331 # self.logger.debug("get ssh key block")
8332 if deep_get(
8333 config_descriptor, ("config-access", "ssh-access", "required")
8334 ):
8335 # self.logger.debug("ssh key needed")
8336 # Needed to inject a ssh key
8337 user = deep_get(
8338 config_descriptor,
8339 ("config-access", "ssh-access", "default-user"),
8340 )
8341 step = "Install configuration Software, getting public ssh key"
8342 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8343 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8344 )
8345
8346 step = "Insert public key into VM user={} ssh_key={}".format(
8347 user, pub_key
8348 )
8349 else:
8350 # self.logger.debug("no need to get ssh key")
8351 step = "Waiting to VM being up and getting IP address"
8352 self.logger.debug(logging_text + step)
8353
8354 # n2vc_redesign STEP 5.1
8355 # wait for RO (ip-address) Insert pub_key into VM
8356 # IMPORTANT: We need do wait for RO to complete healing operation.
8357 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8358 if vnfr_id:
8359 if kdu_name:
8360 rw_mgmt_ip = await self.wait_kdu_up(
8361 logging_text, nsr_id, vnfr_id, kdu_name
8362 )
8363 else:
8364 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8365 logging_text,
8366 nsr_id,
8367 vnfr_id,
8368 vdu_id,
8369 vdu_index,
8370 user=user,
8371 pub_key=pub_key,
8372 )
8373 else:
8374 rw_mgmt_ip = None # This is for a NS configuration
8375
8376 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8377
8378 # store rw_mgmt_ip in deploy params for later replacement
8379 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8380
8381 # Day1 operations.
8382 # get run-day1 operation parameter
8383 runDay1 = deploy_params.get("run-day1", False)
8384 self.logger.debug(
8385 " Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8386 )
8387 if runDay1:
8388 # n2vc_redesign STEP 6 Execute initial config primitive
8389 step = "execute initial config primitive"
8390
8391 # wait for dependent primitives execution (NS -> VNF -> VDU)
8392 if initial_config_primitive_list:
8393 await self._wait_dependent_n2vc(
8394 nsr_id, vca_deployed_list, vca_index
8395 )
8396
8397 # stage, in function of element type: vdu, kdu, vnf or ns
8398 my_vca = vca_deployed_list[vca_index]
8399 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8400 # VDU or KDU
8401 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8402 elif my_vca.get("member-vnf-index"):
8403 # VNF
8404 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8405 else:
8406 # NS
8407 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8408
8409 self._write_configuration_status(
8410 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8411 )
8412
8413 self._write_op_status(op_id=nslcmop_id, stage=stage)
8414
8415 check_if_terminated_needed = True
8416 for initial_config_primitive in initial_config_primitive_list:
8417 # adding information on the vca_deployed if it is a NS execution environment
8418 if not vca_deployed["member-vnf-index"]:
8419 deploy_params["ns_config_info"] = json.dumps(
8420 self._get_ns_config_info(nsr_id)
8421 )
8422 # TODO check if already done
8423 primitive_params_ = self._map_primitive_params(
8424 initial_config_primitive, {}, deploy_params
8425 )
8426
8427 step = "execute primitive '{}' params '{}'".format(
8428 initial_config_primitive["name"], primitive_params_
8429 )
8430 self.logger.debug(logging_text + step)
8431 await self.vca_map[vca_type].exec_primitive(
8432 ee_id=ee_id,
8433 primitive_name=initial_config_primitive["name"],
8434 params_dict=primitive_params_,
8435 db_dict=db_dict,
8436 vca_id=vca_id,
8437 vca_type=vca_type,
8438 )
8439 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8440 if check_if_terminated_needed:
8441 if config_descriptor.get("terminate-config-primitive"):
8442 self.update_db_2(
8443 "nsrs",
8444 nsr_id,
8445 {db_update_entry + "needed_terminate": True},
8446 )
8447 check_if_terminated_needed = False
8448
8449 # TODO register in database that primitive is done
8450
8451 # STEP 7 Configure metrics
8452 # Not sure if this need to be done when healing
8453 """
8454 if vca_type == "helm" or vca_type == "helm-v3":
8455 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8456 ee_id=ee_id,
8457 artifact_path=artifact_path,
8458 ee_config_descriptor=ee_config_descriptor,
8459 vnfr_id=vnfr_id,
8460 nsr_id=nsr_id,
8461 target_ip=rw_mgmt_ip,
8462 )
8463 if prometheus_jobs:
8464 self.update_db_2(
8465 "nsrs",
8466 nsr_id,
8467 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8468 )
8469
8470 for job in prometheus_jobs:
8471 self.db.set_one(
8472 "prometheus_jobs",
8473 {"job_name": job["job_name"]},
8474 job,
8475 upsert=True,
8476 fail_on_empty=False,
8477 )
8478
8479 """
8480 step = "instantiated at VCA"
8481 self.logger.debug(logging_text + step)
8482
8483 self._write_configuration_status(
8484 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8485 )
8486
8487 except Exception as e: # TODO not use Exception but N2VC exception
8488 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8489 if not isinstance(
8490 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8491 ):
8492 self.logger.error(
8493 "Exception while {} : {}".format(step, e), exc_info=True
8494 )
8495 self._write_configuration_status(
8496 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8497 )
8498 raise LcmException("{} {}".format(step, e)) from e
8499
8500 async def _wait_heal_ro(
8501 self,
8502 nsr_id,
8503 timeout=600,
8504 ):
8505 start_time = time()
8506 while time() <= start_time + timeout:
8507 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8508 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8509 "operational-status"
8510 ]
8511 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8512 if operational_status_ro != "healing":
8513 break
8514 await asyncio.sleep(15, loop=self.loop)
8515 else: # timeout_ns_deploy
8516 raise NgRoException("Timeout waiting ns to deploy")
8517
8518 async def vertical_scale(self, nsr_id, nslcmop_id):
8519 """
8520 Vertical Scale the VDUs in a NS
8521
8522 :param: nsr_id: NS Instance ID
8523 :param: nslcmop_id: nslcmop ID of migrate
8524
8525 """
8526 # Try to lock HA task here
8527 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8528 if not task_is_locked_by_me:
8529 return
8530 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8531 self.logger.debug(logging_text + "Enter")
8532 # get all needed from database
8533 db_nslcmop = None
8534 db_nslcmop_update = {}
8535 nslcmop_operation_state = None
8536 db_nsr_update = {}
8537 target = {}
8538 exc = None
8539 # in case of error, indicates what part of scale was failed to put nsr at error status
8540 start_deploy = time()
8541
8542 try:
8543 # wait for any previous tasks in process
8544 step = "Waiting for previous operations to terminate"
8545 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8546
8547 self._write_ns_status(
8548 nsr_id=nsr_id,
8549 ns_state=None,
8550 current_operation="VerticalScale",
8551 current_operation_id=nslcmop_id,
8552 )
8553 step = "Getting nslcmop from database"
8554 self.logger.debug(
8555 step + " after having waited for previous tasks to be completed"
8556 )
8557 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8558 operationParams = db_nslcmop.get("operationParams")
8559 target = {}
8560 target.update(operationParams)
8561 desc = await self.RO.vertical_scale(nsr_id, target)
8562 self.logger.debug("RO return > {}".format(desc))
8563 action_id = desc["action_id"]
8564 await self._wait_ng_ro(
8565 nsr_id,
8566 action_id,
8567 nslcmop_id,
8568 start_deploy,
8569 self.timeout_verticalscale,
8570 operation="verticalscale",
8571 )
8572 except (ROclient.ROClientException, DbException, LcmException) as e:
8573 self.logger.error("Exit Exception {}".format(e))
8574 exc = e
8575 except asyncio.CancelledError:
8576 self.logger.error("Cancelled Exception while '{}'".format(step))
8577 exc = "Operation was cancelled"
8578 except Exception as e:
8579 exc = traceback.format_exc()
8580 self.logger.critical(
8581 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8582 )
8583 finally:
8584 self._write_ns_status(
8585 nsr_id=nsr_id,
8586 ns_state=None,
8587 current_operation="IDLE",
8588 current_operation_id=None,
8589 )
8590 if exc:
8591 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8592 nslcmop_operation_state = "FAILED"
8593 else:
8594 nslcmop_operation_state = "COMPLETED"
8595 db_nslcmop_update["detailed-status"] = "Done"
8596 db_nsr_update["detailed-status"] = "Done"
8597
8598 self._write_op_status(
8599 op_id=nslcmop_id,
8600 stage="",
8601 error_message="",
8602 operation_state=nslcmop_operation_state,
8603 other_update=db_nslcmop_update,
8604 )
8605 if nslcmop_operation_state:
8606 try:
8607 msg = {
8608 "nsr_id": nsr_id,
8609 "nslcmop_id": nslcmop_id,
8610 "operationState": nslcmop_operation_state,
8611 }
8612 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8613 except Exception as e:
8614 self.logger.error(
8615 logging_text + "kafka_write notification Exception {}".format(e)
8616 )
8617 self.logger.debug(logging_text + "Exit")
8618 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")