Bug 2124 fixed: timeout defined when calling the N2VC scale method
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 )
63 from osm_lcm.data_utils.nsd import (
64 get_ns_configuration_relation_list,
65 get_vnf_profile,
66 get_vnf_profiles,
67 )
68 from osm_lcm.data_utils.vnfd import (
69 get_kdu,
70 get_kdu_services,
71 get_relation_list,
72 get_vdu_list,
73 get_vdu_profile,
74 get_ee_sorted_initial_config_primitive_list,
75 get_ee_sorted_terminate_config_primitive_list,
76 get_kdu_list,
77 get_virtual_link_profiles,
78 get_vdu,
79 get_configuration,
80 get_vdu_index,
81 get_scaling_aspect,
82 get_number_of_instances,
83 get_juju_ee_ref,
84 get_kdu_resource_profile,
85 find_software_version,
86 )
87 from osm_lcm.data_utils.list_utils import find_in_list
88 from osm_lcm.data_utils.vnfr import (
89 get_osm_params,
90 get_vdur_index,
91 get_kdur,
92 get_volumes_from_instantiation_params,
93 )
94 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
95 from osm_lcm.data_utils.database.vim_account import VimAccountDB
96 from n2vc.definitions import RelationEndpoint
97 from n2vc.k8s_helm_conn import K8sHelmConnector
98 from n2vc.k8s_helm3_conn import K8sHelm3Connector
99 from n2vc.k8s_juju_conn import K8sJujuConnector
100
101 from osm_common.dbbase import DbException
102 from osm_common.fsbase import FsException
103
104 from osm_lcm.data_utils.database.database import Database
105 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
106
107 from n2vc.n2vc_juju_conn import N2VCJujuConnector
108 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
109
110 from osm_lcm.lcm_helm_conn import LCMHelmConn
111 from osm_lcm.osm_config import OsmConfigBuilder
112 from osm_lcm.prometheus import parse_job
113
114 from copy import copy, deepcopy
115 from time import time
116 from uuid import uuid4
117
118 from random import randint
119
120 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
121
122
123 class NsLcm(LcmBase):
124 timeout_scale_on_error = (
125 5 * 60
126 ) # Time for charm from first time at blocked,error status to mark as failed
127 timeout_scale_on_error_outer_factor = 1.05 # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
128 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
129 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
130 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
131 timeout_charm_delete = 10 * 60
132 timeout_primitive = 30 * 60 # Timeout for primitive execution
133 timeout_primitive_outer_factor = 1.05 # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
134 timeout_ns_update = 30 * 60 # timeout for ns update
135 timeout_progress_primitive = (
136 10 * 60
137 ) # timeout for some progress in a primitive execution
138 timeout_migrate = 1800 # default global timeout for migrating vnfs
139 timeout_operate = 1800 # default global timeout for migrating vnfs
140 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
141 SUBOPERATION_STATUS_NOT_FOUND = -1
142 SUBOPERATION_STATUS_NEW = -2
143 SUBOPERATION_STATUS_SKIP = -3
144 task_name_deploy_vca = "Deploying VCA"
145
146 def __init__(self, msg, lcm_tasks, config, loop):
147 """
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
150 :return: None
151 """
152 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
153
154 self.db = Database().instance.db
155 self.fs = Filesystem().instance.fs
156 self.loop = loop
157 self.lcm_tasks = lcm_tasks
158 self.timeout = config["timeout"]
159 self.ro_config = config["ro_config"]
160 self.ng_ro = config["ro_config"].get("ng")
161 self.vca_config = config["VCA"].copy()
162
163 # create N2VC connector
164 self.n2vc = N2VCJujuConnector(
165 log=self.logger,
166 loop=self.loop,
167 on_update_db=self._on_update_n2vc_db,
168 fs=self.fs,
169 db=self.db,
170 )
171
172 self.conn_helm_ee = LCMHelmConn(
173 log=self.logger,
174 loop=self.loop,
175 vca_config=self.vca_config,
176 on_update_db=self._on_update_n2vc_db,
177 )
178
179 self.k8sclusterhelm2 = K8sHelmConnector(
180 kubectl_command=self.vca_config.get("kubectlpath"),
181 helm_command=self.vca_config.get("helmpath"),
182 log=self.logger,
183 on_update_db=None,
184 fs=self.fs,
185 db=self.db,
186 )
187
188 self.k8sclusterhelm3 = K8sHelm3Connector(
189 kubectl_command=self.vca_config.get("kubectlpath"),
190 helm_command=self.vca_config.get("helm3path"),
191 fs=self.fs,
192 log=self.logger,
193 db=self.db,
194 on_update_db=None,
195 )
196
197 self.k8sclusterjuju = K8sJujuConnector(
198 kubectl_command=self.vca_config.get("kubectlpath"),
199 juju_command=self.vca_config.get("jujupath"),
200 log=self.logger,
201 loop=self.loop,
202 on_update_db=self._on_update_k8s_db,
203 fs=self.fs,
204 db=self.db,
205 )
206
207 self.k8scluster_map = {
208 "helm-chart": self.k8sclusterhelm2,
209 "helm-chart-v3": self.k8sclusterhelm3,
210 "chart": self.k8sclusterhelm3,
211 "juju-bundle": self.k8sclusterjuju,
212 "juju": self.k8sclusterjuju,
213 }
214
215 self.vca_map = {
216 "lxc_proxy_charm": self.n2vc,
217 "native_charm": self.n2vc,
218 "k8s_proxy_charm": self.n2vc,
219 "helm": self.conn_helm_ee,
220 "helm-v3": self.conn_helm_ee,
221 }
222
223 # create RO client
224 self.RO = NgRoClient(self.loop, **self.ro_config)
225
226 self.op_status_map = {
227 "instantiation": self.RO.status,
228 "termination": self.RO.status,
229 "migrate": self.RO.status,
230 "healing": self.RO.recreate_status,
231 "verticalscale": self.RO.status,
232 "start_stop_rebuild": self.RO.status,
233 }
234
235 @staticmethod
236 def increment_ip_mac(ip_mac, vm_index=1):
237 if not isinstance(ip_mac, str):
238 return ip_mac
239 try:
240 # try with ipv4 look for last dot
241 i = ip_mac.rfind(".")
242 if i > 0:
243 i += 1
244 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
245 # try with ipv6 or mac look for last colon. Operate in hex
246 i = ip_mac.rfind(":")
247 if i > 0:
248 i += 1
249 # format in hex, len can be 2 for mac or 4 for ipv6
250 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
251 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
252 )
253 except Exception:
254 pass
255 return None
256
257 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
258
259 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
260
261 try:
262 # TODO filter RO descriptor fields...
263
264 # write to database
265 db_dict = dict()
266 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
267 db_dict["deploymentStatus"] = ro_descriptor
268 self.update_db_2("nsrs", nsrs_id, db_dict)
269
270 except Exception as e:
271 self.logger.warn(
272 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
273 )
274
275 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
276
277 # remove last dot from path (if exists)
278 if path.endswith("."):
279 path = path[:-1]
280
281 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
282 # .format(table, filter, path, updated_data))
283 try:
284
285 nsr_id = filter.get("_id")
286
287 # read ns record from database
288 nsr = self.db.get_one(table="nsrs", q_filter=filter)
289 current_ns_status = nsr.get("nsState")
290
291 # get vca status for NS
292 status_dict = await self.n2vc.get_status(
293 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
294 )
295
296 # vcaStatus
297 db_dict = dict()
298 db_dict["vcaStatus"] = status_dict
299
300 # update configurationStatus for this VCA
301 try:
302 vca_index = int(path[path.rfind(".") + 1 :])
303
304 vca_list = deep_get(
305 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
306 )
307 vca_status = vca_list[vca_index].get("status")
308
309 configuration_status_list = nsr.get("configurationStatus")
310 config_status = configuration_status_list[vca_index].get("status")
311
312 if config_status == "BROKEN" and vca_status != "failed":
313 db_dict["configurationStatus"][vca_index] = "READY"
314 elif config_status != "BROKEN" and vca_status == "failed":
315 db_dict["configurationStatus"][vca_index] = "BROKEN"
316 except Exception as e:
317 # not update configurationStatus
318 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
319
320 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
321 # if nsState = 'DEGRADED' check if all is OK
322 is_degraded = False
323 if current_ns_status in ("READY", "DEGRADED"):
324 error_description = ""
325 # check machines
326 if status_dict.get("machines"):
327 for machine_id in status_dict.get("machines"):
328 machine = status_dict.get("machines").get(machine_id)
329 # check machine agent-status
330 if machine.get("agent-status"):
331 s = machine.get("agent-status").get("status")
332 if s != "started":
333 is_degraded = True
334 error_description += (
335 "machine {} agent-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check machine instance status
340 if machine.get("instance-status"):
341 s = machine.get("instance-status").get("status")
342 if s != "running":
343 is_degraded = True
344 error_description += (
345 "machine {} instance-status={} ; ".format(
346 machine_id, s
347 )
348 )
349 # check applications
350 if status_dict.get("applications"):
351 for app_id in status_dict.get("applications"):
352 app = status_dict.get("applications").get(app_id)
353 # check application status
354 if app.get("status"):
355 s = app.get("status").get("status")
356 if s != "active":
357 is_degraded = True
358 error_description += (
359 "application {} status={} ; ".format(app_id, s)
360 )
361
362 if error_description:
363 db_dict["errorDescription"] = error_description
364 if current_ns_status == "READY" and is_degraded:
365 db_dict["nsState"] = "DEGRADED"
366 if current_ns_status == "DEGRADED" and not is_degraded:
367 db_dict["nsState"] = "READY"
368
369 # write to database
370 self.update_db_2("nsrs", nsr_id, db_dict)
371
372 except (asyncio.CancelledError, asyncio.TimeoutError):
373 raise
374 except Exception as e:
375 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
376
377 async def _on_update_k8s_db(
378 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
379 ):
380 """
381 Updating vca status in NSR record
382 :param cluster_uuid: UUID of a k8s cluster
383 :param kdu_instance: The unique name of the KDU instance
384 :param filter: To get nsr_id
385 :cluster_type: The cluster type (juju, k8s)
386 :return: none
387 """
388
389 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
390 # .format(cluster_uuid, kdu_instance, filter))
391
392 nsr_id = filter.get("_id")
393 try:
394 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
395 cluster_uuid=cluster_uuid,
396 kdu_instance=kdu_instance,
397 yaml_format=False,
398 complete_status=True,
399 vca_id=vca_id,
400 )
401
402 # vcaStatus
403 db_dict = dict()
404 db_dict["vcaStatus"] = {nsr_id: vca_status}
405
406 self.logger.debug(
407 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
408 )
409
410 # write to database
411 self.update_db_2("nsrs", nsr_id, db_dict)
412 except (asyncio.CancelledError, asyncio.TimeoutError):
413 raise
414 except Exception as e:
415 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
416
417 @staticmethod
418 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
419 try:
420 env = Environment(
421 undefined=StrictUndefined,
422 autoescape=select_autoescape(default_for_string=True, default=True),
423 )
424 template = env.from_string(cloud_init_text)
425 return template.render(additional_params or {})
426 except UndefinedError as e:
427 raise LcmException(
428 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
429 "file, must be provided in the instantiation parameters inside the "
430 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
431 )
432 except (TemplateError, TemplateNotFound) as e:
433 raise LcmException(
434 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
435 vnfd_id, vdu_id, e
436 )
437 )
438
439 def _get_vdu_cloud_init_content(self, vdu, vnfd):
440 cloud_init_content = cloud_init_file = None
441 try:
442 if vdu.get("cloud-init-file"):
443 base_folder = vnfd["_admin"]["storage"]
444 if base_folder["pkg-dir"]:
445 cloud_init_file = "{}/{}/cloud_init/{}".format(
446 base_folder["folder"],
447 base_folder["pkg-dir"],
448 vdu["cloud-init-file"],
449 )
450 else:
451 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
452 base_folder["folder"],
453 vdu["cloud-init-file"],
454 )
455 with self.fs.file_open(cloud_init_file, "r") as ci_file:
456 cloud_init_content = ci_file.read()
457 elif vdu.get("cloud-init"):
458 cloud_init_content = vdu["cloud-init"]
459
460 return cloud_init_content
461 except FsException as e:
462 raise LcmException(
463 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
464 vnfd["id"], vdu["id"], cloud_init_file, e
465 )
466 )
467
468 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
469 vdur = next(
470 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
471 )
472 additional_params = vdur.get("additionalParams")
473 return parse_yaml_strings(additional_params)
474
475 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
476 """
477 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
478 :param vnfd: input vnfd
479 :param new_id: overrides vnf id if provided
480 :param additionalParams: Instantiation params for VNFs provided
481 :param nsrId: Id of the NSR
482 :return: copy of vnfd
483 """
484 vnfd_RO = deepcopy(vnfd)
485 # remove unused by RO configuration, monitoring, scaling and internal keys
486 vnfd_RO.pop("_id", None)
487 vnfd_RO.pop("_admin", None)
488 vnfd_RO.pop("monitoring-param", None)
489 vnfd_RO.pop("scaling-group-descriptor", None)
490 vnfd_RO.pop("kdu", None)
491 vnfd_RO.pop("k8s-cluster", None)
492 if new_id:
493 vnfd_RO["id"] = new_id
494
495 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
496 for vdu in get_iterable(vnfd_RO, "vdu"):
497 vdu.pop("cloud-init-file", None)
498 vdu.pop("cloud-init", None)
499 return vnfd_RO
500
501 @staticmethod
502 def ip_profile_2_RO(ip_profile):
503 RO_ip_profile = deepcopy(ip_profile)
504 if "dns-server" in RO_ip_profile:
505 if isinstance(RO_ip_profile["dns-server"], list):
506 RO_ip_profile["dns-address"] = []
507 for ds in RO_ip_profile.pop("dns-server"):
508 RO_ip_profile["dns-address"].append(ds["address"])
509 else:
510 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
511 if RO_ip_profile.get("ip-version") == "ipv4":
512 RO_ip_profile["ip-version"] = "IPv4"
513 if RO_ip_profile.get("ip-version") == "ipv6":
514 RO_ip_profile["ip-version"] = "IPv6"
515 if "dhcp-params" in RO_ip_profile:
516 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
517 return RO_ip_profile
518
519 def _get_ro_vim_id_for_vim_account(self, vim_account):
520 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
521 if db_vim["_admin"]["operationalState"] != "ENABLED":
522 raise LcmException(
523 "VIM={} is not available. operationalState={}".format(
524 vim_account, db_vim["_admin"]["operationalState"]
525 )
526 )
527 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
528 return RO_vim_id
529
530 def get_ro_wim_id_for_wim_account(self, wim_account):
531 if isinstance(wim_account, str):
532 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
533 if db_wim["_admin"]["operationalState"] != "ENABLED":
534 raise LcmException(
535 "WIM={} is not available. operationalState={}".format(
536 wim_account, db_wim["_admin"]["operationalState"]
537 )
538 )
539 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
540 return RO_wim_id
541 else:
542 return wim_account
543
544 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
545
546 db_vdu_push_list = []
547 template_vdur = []
548 db_update = {"_admin.modified": time()}
549 if vdu_create:
550 for vdu_id, vdu_count in vdu_create.items():
551 vdur = next(
552 (
553 vdur
554 for vdur in reversed(db_vnfr["vdur"])
555 if vdur["vdu-id-ref"] == vdu_id
556 ),
557 None,
558 )
559 if not vdur:
560 # Read the template saved in the db:
561 self.logger.debug(
562 "No vdur in the database. Using the vdur-template to scale"
563 )
564 vdur_template = db_vnfr.get("vdur-template")
565 if not vdur_template:
566 raise LcmException(
567 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
568 vdu_id
569 )
570 )
571 vdur = vdur_template[0]
572 # Delete a template from the database after using it
573 self.db.set_one(
574 "vnfrs",
575 {"_id": db_vnfr["_id"]},
576 None,
577 pull={"vdur-template": {"_id": vdur["_id"]}},
578 )
579 for count in range(vdu_count):
580 vdur_copy = deepcopy(vdur)
581 vdur_copy["status"] = "BUILD"
582 vdur_copy["status-detailed"] = None
583 vdur_copy["ip-address"] = None
584 vdur_copy["_id"] = str(uuid4())
585 vdur_copy["count-index"] += count + 1
586 vdur_copy["id"] = "{}-{}".format(
587 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
588 )
589 vdur_copy.pop("vim_info", None)
590 for iface in vdur_copy["interfaces"]:
591 if iface.get("fixed-ip"):
592 iface["ip-address"] = self.increment_ip_mac(
593 iface["ip-address"], count + 1
594 )
595 else:
596 iface.pop("ip-address", None)
597 if iface.get("fixed-mac"):
598 iface["mac-address"] = self.increment_ip_mac(
599 iface["mac-address"], count + 1
600 )
601 else:
602 iface.pop("mac-address", None)
603 if db_vnfr["vdur"]:
604 iface.pop(
605 "mgmt_vnf", None
606 ) # only first vdu can be managment of vnf
607 db_vdu_push_list.append(vdur_copy)
608 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
609 if vdu_delete:
610 if len(db_vnfr["vdur"]) == 1:
611 # The scale will move to 0 instances
612 self.logger.debug(
613 "Scaling to 0 !, creating the template with the last vdur"
614 )
615 template_vdur = [db_vnfr["vdur"][0]]
616 for vdu_id, vdu_count in vdu_delete.items():
617 if mark_delete:
618 indexes_to_delete = [
619 iv[0]
620 for iv in enumerate(db_vnfr["vdur"])
621 if iv[1]["vdu-id-ref"] == vdu_id
622 ]
623 db_update.update(
624 {
625 "vdur.{}.status".format(i): "DELETING"
626 for i in indexes_to_delete[-vdu_count:]
627 }
628 )
629 else:
630 # it must be deleted one by one because common.db does not allow otherwise
631 vdus_to_delete = [
632 v
633 for v in reversed(db_vnfr["vdur"])
634 if v["vdu-id-ref"] == vdu_id
635 ]
636 for vdu in vdus_to_delete[:vdu_count]:
637 self.db.set_one(
638 "vnfrs",
639 {"_id": db_vnfr["_id"]},
640 None,
641 pull={"vdur": {"_id": vdu["_id"]}},
642 )
643 db_push = {}
644 if db_vdu_push_list:
645 db_push["vdur"] = db_vdu_push_list
646 if template_vdur:
647 db_push["vdur-template"] = template_vdur
648 if not db_push:
649 db_push = None
650 db_vnfr["vdur-template"] = template_vdur
651 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
652 # modify passed dictionary db_vnfr
653 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
654 db_vnfr["vdur"] = db_vnfr_["vdur"]
655
656 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
657 """
658 Updates database nsr with the RO info for the created vld
659 :param ns_update_nsr: dictionary to be filled with the updated info
660 :param db_nsr: content of db_nsr. This is also modified
661 :param nsr_desc_RO: nsr descriptor from RO
662 :return: Nothing, LcmException is raised on errors
663 """
664
665 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
666 for net_RO in get_iterable(nsr_desc_RO, "nets"):
667 if vld["id"] != net_RO.get("ns_net_osm_id"):
668 continue
669 vld["vim-id"] = net_RO.get("vim_net_id")
670 vld["name"] = net_RO.get("vim_name")
671 vld["status"] = net_RO.get("status")
672 vld["status-detailed"] = net_RO.get("error_msg")
673 ns_update_nsr["vld.{}".format(vld_index)] = vld
674 break
675 else:
676 raise LcmException(
677 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
678 )
679
680 def set_vnfr_at_error(self, db_vnfrs, error_text):
681 try:
682 for db_vnfr in db_vnfrs.values():
683 vnfr_update = {"status": "ERROR"}
684 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
685 if "status" not in vdur:
686 vdur["status"] = "ERROR"
687 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
688 if error_text:
689 vdur["status-detailed"] = str(error_text)
690 vnfr_update[
691 "vdur.{}.status-detailed".format(vdu_index)
692 ] = "ERROR"
693 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
694 except DbException as e:
695 self.logger.error("Cannot update vnf. {}".format(e))
696
697 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
698 """
699 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
700 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
701 :param nsr_desc_RO: nsr descriptor from RO
702 :return: Nothing, LcmException is raised on errors
703 """
704 for vnf_index, db_vnfr in db_vnfrs.items():
705 for vnf_RO in nsr_desc_RO["vnfs"]:
706 if vnf_RO["member_vnf_index"] != vnf_index:
707 continue
708 vnfr_update = {}
709 if vnf_RO.get("ip_address"):
710 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
711 "ip_address"
712 ].split(";")[0]
713 elif not db_vnfr.get("ip-address"):
714 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
715 raise LcmExceptionNoMgmtIP(
716 "ns member_vnf_index '{}' has no IP address".format(
717 vnf_index
718 )
719 )
720
721 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
722 vdur_RO_count_index = 0
723 if vdur.get("pdu-type"):
724 continue
725 for vdur_RO in get_iterable(vnf_RO, "vms"):
726 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
727 continue
728 if vdur["count-index"] != vdur_RO_count_index:
729 vdur_RO_count_index += 1
730 continue
731 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
732 if vdur_RO.get("ip_address"):
733 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
734 else:
735 vdur["ip-address"] = None
736 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
737 vdur["name"] = vdur_RO.get("vim_name")
738 vdur["status"] = vdur_RO.get("status")
739 vdur["status-detailed"] = vdur_RO.get("error_msg")
740 for ifacer in get_iterable(vdur, "interfaces"):
741 for interface_RO in get_iterable(vdur_RO, "interfaces"):
742 if ifacer["name"] == interface_RO.get("internal_name"):
743 ifacer["ip-address"] = interface_RO.get(
744 "ip_address"
745 )
746 ifacer["mac-address"] = interface_RO.get(
747 "mac_address"
748 )
749 break
750 else:
751 raise LcmException(
752 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
753 "from VIM info".format(
754 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
755 )
756 )
757 vnfr_update["vdur.{}".format(vdu_index)] = vdur
758 break
759 else:
760 raise LcmException(
761 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
762 "VIM info".format(
763 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
764 )
765 )
766
767 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
768 for net_RO in get_iterable(nsr_desc_RO, "nets"):
769 if vld["id"] != net_RO.get("vnf_net_osm_id"):
770 continue
771 vld["vim-id"] = net_RO.get("vim_net_id")
772 vld["name"] = net_RO.get("vim_name")
773 vld["status"] = net_RO.get("status")
774 vld["status-detailed"] = net_RO.get("error_msg")
775 vnfr_update["vld.{}".format(vld_index)] = vld
776 break
777 else:
778 raise LcmException(
779 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
780 vnf_index, vld["id"]
781 )
782 )
783
784 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
785 break
786
787 else:
788 raise LcmException(
789 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
790 vnf_index
791 )
792 )
793
794 def _get_ns_config_info(self, nsr_id):
795 """
796 Generates a mapping between vnf,vdu elements and the N2VC id
797 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
798 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
799 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
800 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
801 """
802 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
803 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
804 mapping = {}
805 ns_config_info = {"osm-config-mapping": mapping}
806 for vca in vca_deployed_list:
807 if not vca["member-vnf-index"]:
808 continue
809 if not vca["vdu_id"]:
810 mapping[vca["member-vnf-index"]] = vca["application"]
811 else:
812 mapping[
813 "{}.{}.{}".format(
814 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
815 )
816 ] = vca["application"]
817 return ns_config_info
818
819 async def _instantiate_ng_ro(
820 self,
821 logging_text,
822 nsr_id,
823 nsd,
824 db_nsr,
825 db_nslcmop,
826 db_vnfrs,
827 db_vnfds,
828 n2vc_key_list,
829 stage,
830 start_deploy,
831 timeout_ns_deploy,
832 ):
833
834 db_vims = {}
835
836 def get_vim_account(vim_account_id):
837 nonlocal db_vims
838 if vim_account_id in db_vims:
839 return db_vims[vim_account_id]
840 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
841 db_vims[vim_account_id] = db_vim
842 return db_vim
843
844 # modify target_vld info with instantiation parameters
845 def parse_vld_instantiation_params(
846 target_vim, target_vld, vld_params, target_sdn
847 ):
848 if vld_params.get("ip-profile"):
849 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
850 "ip-profile"
851 ]
852 if vld_params.get("provider-network"):
853 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
854 "provider-network"
855 ]
856 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
857 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
858 "provider-network"
859 ]["sdn-ports"]
860 if vld_params.get("wimAccountId"):
861 target_wim = "wim:{}".format(vld_params["wimAccountId"])
862 target_vld["vim_info"][target_wim] = {}
863 for param in ("vim-network-name", "vim-network-id"):
864 if vld_params.get(param):
865 if isinstance(vld_params[param], dict):
866 for vim, vim_net in vld_params[param].items():
867 other_target_vim = "vim:" + vim
868 populate_dict(
869 target_vld["vim_info"],
870 (other_target_vim, param.replace("-", "_")),
871 vim_net,
872 )
873 else: # isinstance str
874 target_vld["vim_info"][target_vim][
875 param.replace("-", "_")
876 ] = vld_params[param]
877 if vld_params.get("common_id"):
878 target_vld["common_id"] = vld_params.get("common_id")
879
880 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
881 def update_ns_vld_target(target, ns_params):
882 for vnf_params in ns_params.get("vnf", ()):
883 if vnf_params.get("vimAccountId"):
884 target_vnf = next(
885 (
886 vnfr
887 for vnfr in db_vnfrs.values()
888 if vnf_params["member-vnf-index"]
889 == vnfr["member-vnf-index-ref"]
890 ),
891 None,
892 )
893 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
894 if not vdur:
895 return
896 for a_index, a_vld in enumerate(target["ns"]["vld"]):
897 target_vld = find_in_list(
898 get_iterable(vdur, "interfaces"),
899 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
900 )
901
902 vld_params = find_in_list(
903 get_iterable(ns_params, "vld"),
904 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
905 )
906 if target_vld:
907
908 if vnf_params.get("vimAccountId") not in a_vld.get(
909 "vim_info", {}
910 ):
911 target_vim_network_list = [
912 v for _, v in a_vld.get("vim_info").items()
913 ]
914 target_vim_network_name = next(
915 (
916 item.get("vim_network_name", "")
917 for item in target_vim_network_list
918 ),
919 "",
920 )
921
922 target["ns"]["vld"][a_index].get("vim_info").update(
923 {
924 "vim:{}".format(vnf_params["vimAccountId"]): {
925 "vim_network_name": target_vim_network_name,
926 }
927 }
928 )
929
930 if vld_params:
931 for param in ("vim-network-name", "vim-network-id"):
932 if vld_params.get(param) and isinstance(
933 vld_params[param], dict
934 ):
935 for vim, vim_net in vld_params[
936 param
937 ].items():
938 other_target_vim = "vim:" + vim
939 populate_dict(
940 target["ns"]["vld"][a_index].get(
941 "vim_info"
942 ),
943 (
944 other_target_vim,
945 param.replace("-", "_"),
946 ),
947 vim_net,
948 )
949
950 nslcmop_id = db_nslcmop["_id"]
951 target = {
952 "name": db_nsr["name"],
953 "ns": {"vld": []},
954 "vnf": [],
955 "image": deepcopy(db_nsr["image"]),
956 "flavor": deepcopy(db_nsr["flavor"]),
957 "action_id": nslcmop_id,
958 "cloud_init_content": {},
959 }
960 for image in target["image"]:
961 image["vim_info"] = {}
962 for flavor in target["flavor"]:
963 flavor["vim_info"] = {}
964 if db_nsr.get("affinity-or-anti-affinity-group"):
965 target["affinity-or-anti-affinity-group"] = deepcopy(
966 db_nsr["affinity-or-anti-affinity-group"]
967 )
968 for affinity_or_anti_affinity_group in target[
969 "affinity-or-anti-affinity-group"
970 ]:
971 affinity_or_anti_affinity_group["vim_info"] = {}
972
973 if db_nslcmop.get("lcmOperationType") != "instantiate":
974 # get parameters of instantiation:
975 db_nslcmop_instantiate = self.db.get_list(
976 "nslcmops",
977 {
978 "nsInstanceId": db_nslcmop["nsInstanceId"],
979 "lcmOperationType": "instantiate",
980 },
981 )[-1]
982 ns_params = db_nslcmop_instantiate.get("operationParams")
983 else:
984 ns_params = db_nslcmop.get("operationParams")
985 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
986 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
987
988 cp2target = {}
989 for vld_index, vld in enumerate(db_nsr.get("vld")):
990 target_vim = "vim:{}".format(ns_params["vimAccountId"])
991 target_vld = {
992 "id": vld["id"],
993 "name": vld["name"],
994 "mgmt-network": vld.get("mgmt-network", False),
995 "type": vld.get("type"),
996 "vim_info": {
997 target_vim: {
998 "vim_network_name": vld.get("vim-network-name"),
999 "vim_account_id": ns_params["vimAccountId"],
1000 }
1001 },
1002 }
1003 # check if this network needs SDN assist
1004 if vld.get("pci-interfaces"):
1005 db_vim = get_vim_account(ns_params["vimAccountId"])
1006 sdnc_id = db_vim["config"].get("sdn-controller")
1007 if sdnc_id:
1008 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1009 target_sdn = "sdn:{}".format(sdnc_id)
1010 target_vld["vim_info"][target_sdn] = {
1011 "sdn": True,
1012 "target_vim": target_vim,
1013 "vlds": [sdn_vld],
1014 "type": vld.get("type"),
1015 }
1016
1017 nsd_vnf_profiles = get_vnf_profiles(nsd)
1018 for nsd_vnf_profile in nsd_vnf_profiles:
1019 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1020 if cp["virtual-link-profile-id"] == vld["id"]:
1021 cp2target[
1022 "member_vnf:{}.{}".format(
1023 cp["constituent-cpd-id"][0][
1024 "constituent-base-element-id"
1025 ],
1026 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1027 )
1028 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1029
1030 # check at nsd descriptor, if there is an ip-profile
1031 vld_params = {}
1032 nsd_vlp = find_in_list(
1033 get_virtual_link_profiles(nsd),
1034 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1035 == vld["id"],
1036 )
1037 if (
1038 nsd_vlp
1039 and nsd_vlp.get("virtual-link-protocol-data")
1040 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1041 ):
1042 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1043 "l3-protocol-data"
1044 ]
1045 ip_profile_dest_data = {}
1046 if "ip-version" in ip_profile_source_data:
1047 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1048 "ip-version"
1049 ]
1050 if "cidr" in ip_profile_source_data:
1051 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1052 "cidr"
1053 ]
1054 if "gateway-ip" in ip_profile_source_data:
1055 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1056 "gateway-ip"
1057 ]
1058 if "dhcp-enabled" in ip_profile_source_data:
1059 ip_profile_dest_data["dhcp-params"] = {
1060 "enabled": ip_profile_source_data["dhcp-enabled"]
1061 }
1062 vld_params["ip-profile"] = ip_profile_dest_data
1063
1064 # update vld_params with instantiation params
1065 vld_instantiation_params = find_in_list(
1066 get_iterable(ns_params, "vld"),
1067 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1068 )
1069 if vld_instantiation_params:
1070 vld_params.update(vld_instantiation_params)
1071 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1072 target["ns"]["vld"].append(target_vld)
1073 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1074 update_ns_vld_target(target, ns_params)
1075
1076 for vnfr in db_vnfrs.values():
1077 vnfd = find_in_list(
1078 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1079 )
1080 vnf_params = find_in_list(
1081 get_iterable(ns_params, "vnf"),
1082 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1083 )
1084 target_vnf = deepcopy(vnfr)
1085 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1086 for vld in target_vnf.get("vld", ()):
1087 # check if connected to a ns.vld, to fill target'
1088 vnf_cp = find_in_list(
1089 vnfd.get("int-virtual-link-desc", ()),
1090 lambda cpd: cpd.get("id") == vld["id"],
1091 )
1092 if vnf_cp:
1093 ns_cp = "member_vnf:{}.{}".format(
1094 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1095 )
1096 if cp2target.get(ns_cp):
1097 vld["target"] = cp2target[ns_cp]
1098
1099 vld["vim_info"] = {
1100 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1101 }
1102 # check if this network needs SDN assist
1103 target_sdn = None
1104 if vld.get("pci-interfaces"):
1105 db_vim = get_vim_account(vnfr["vim-account-id"])
1106 sdnc_id = db_vim["config"].get("sdn-controller")
1107 if sdnc_id:
1108 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1109 target_sdn = "sdn:{}".format(sdnc_id)
1110 vld["vim_info"][target_sdn] = {
1111 "sdn": True,
1112 "target_vim": target_vim,
1113 "vlds": [sdn_vld],
1114 "type": vld.get("type"),
1115 }
1116
1117 # check at vnfd descriptor, if there is an ip-profile
1118 vld_params = {}
1119 vnfd_vlp = find_in_list(
1120 get_virtual_link_profiles(vnfd),
1121 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1122 )
1123 if (
1124 vnfd_vlp
1125 and vnfd_vlp.get("virtual-link-protocol-data")
1126 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1127 ):
1128 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1129 "l3-protocol-data"
1130 ]
1131 ip_profile_dest_data = {}
1132 if "ip-version" in ip_profile_source_data:
1133 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1134 "ip-version"
1135 ]
1136 if "cidr" in ip_profile_source_data:
1137 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1138 "cidr"
1139 ]
1140 if "gateway-ip" in ip_profile_source_data:
1141 ip_profile_dest_data[
1142 "gateway-address"
1143 ] = ip_profile_source_data["gateway-ip"]
1144 if "dhcp-enabled" in ip_profile_source_data:
1145 ip_profile_dest_data["dhcp-params"] = {
1146 "enabled": ip_profile_source_data["dhcp-enabled"]
1147 }
1148
1149 vld_params["ip-profile"] = ip_profile_dest_data
1150 # update vld_params with instantiation params
1151 if vnf_params:
1152 vld_instantiation_params = find_in_list(
1153 get_iterable(vnf_params, "internal-vld"),
1154 lambda i_vld: i_vld["name"] == vld["id"],
1155 )
1156 if vld_instantiation_params:
1157 vld_params.update(vld_instantiation_params)
1158 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1159
1160 vdur_list = []
1161 for vdur in target_vnf.get("vdur", ()):
1162 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1163 continue # This vdu must not be created
1164 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1165
1166 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1167
1168 if ssh_keys_all:
1169 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1170 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1171 if (
1172 vdu_configuration
1173 and vdu_configuration.get("config-access")
1174 and vdu_configuration.get("config-access").get("ssh-access")
1175 ):
1176 vdur["ssh-keys"] = ssh_keys_all
1177 vdur["ssh-access-required"] = vdu_configuration[
1178 "config-access"
1179 ]["ssh-access"]["required"]
1180 elif (
1181 vnf_configuration
1182 and vnf_configuration.get("config-access")
1183 and vnf_configuration.get("config-access").get("ssh-access")
1184 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1185 ):
1186 vdur["ssh-keys"] = ssh_keys_all
1187 vdur["ssh-access-required"] = vnf_configuration[
1188 "config-access"
1189 ]["ssh-access"]["required"]
1190 elif ssh_keys_instantiation and find_in_list(
1191 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1192 ):
1193 vdur["ssh-keys"] = ssh_keys_instantiation
1194
1195 self.logger.debug("NS > vdur > {}".format(vdur))
1196
1197 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1198 # cloud-init
1199 if vdud.get("cloud-init-file"):
1200 vdur["cloud-init"] = "{}:file:{}".format(
1201 vnfd["_id"], vdud.get("cloud-init-file")
1202 )
1203 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1204 if vdur["cloud-init"] not in target["cloud_init_content"]:
1205 base_folder = vnfd["_admin"]["storage"]
1206 if base_folder["pkg-dir"]:
1207 cloud_init_file = "{}/{}/cloud_init/{}".format(
1208 base_folder["folder"],
1209 base_folder["pkg-dir"],
1210 vdud.get("cloud-init-file"),
1211 )
1212 else:
1213 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1214 base_folder["folder"],
1215 vdud.get("cloud-init-file"),
1216 )
1217 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1218 target["cloud_init_content"][
1219 vdur["cloud-init"]
1220 ] = ci_file.read()
1221 elif vdud.get("cloud-init"):
1222 vdur["cloud-init"] = "{}:vdu:{}".format(
1223 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1224 )
1225 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1226 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1227 "cloud-init"
1228 ]
1229 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1230 deploy_params_vdu = self._format_additional_params(
1231 vdur.get("additionalParams") or {}
1232 )
1233 deploy_params_vdu["OSM"] = get_osm_params(
1234 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1235 )
1236 vdur["additionalParams"] = deploy_params_vdu
1237
1238 # flavor
1239 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1240 if target_vim not in ns_flavor["vim_info"]:
1241 ns_flavor["vim_info"][target_vim] = {}
1242
1243 # deal with images
1244 # in case alternative images are provided we must check if they should be applied
1245 # for the vim_type, modify the vim_type taking into account
1246 ns_image_id = int(vdur["ns-image-id"])
1247 if vdur.get("alt-image-ids"):
1248 db_vim = get_vim_account(vnfr["vim-account-id"])
1249 vim_type = db_vim["vim_type"]
1250 for alt_image_id in vdur.get("alt-image-ids"):
1251 ns_alt_image = target["image"][int(alt_image_id)]
1252 if vim_type == ns_alt_image.get("vim-type"):
1253 # must use alternative image
1254 self.logger.debug(
1255 "use alternative image id: {}".format(alt_image_id)
1256 )
1257 ns_image_id = alt_image_id
1258 vdur["ns-image-id"] = ns_image_id
1259 break
1260 ns_image = target["image"][int(ns_image_id)]
1261 if target_vim not in ns_image["vim_info"]:
1262 ns_image["vim_info"][target_vim] = {}
1263
1264 # Affinity groups
1265 if vdur.get("affinity-or-anti-affinity-group-id"):
1266 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1267 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1268 if target_vim not in ns_ags["vim_info"]:
1269 ns_ags["vim_info"][target_vim] = {}
1270
1271 vdur["vim_info"] = {target_vim: {}}
1272 # instantiation parameters
1273 if vnf_params:
1274 vdu_instantiation_params = find_in_list(
1275 get_iterable(vnf_params, "vdu"),
1276 lambda i_vdu: i_vdu["id"] == vdud["id"],
1277 )
1278 if vdu_instantiation_params:
1279 # Parse the vdu_volumes from the instantiation params
1280 vdu_volumes = get_volumes_from_instantiation_params(
1281 vdu_instantiation_params, vdud
1282 )
1283 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1284 vdur_list.append(vdur)
1285 target_vnf["vdur"] = vdur_list
1286 target["vnf"].append(target_vnf)
1287
1288 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1289 desc = await self.RO.deploy(nsr_id, target)
1290 self.logger.debug("RO return > {}".format(desc))
1291 action_id = desc["action_id"]
1292 await self._wait_ng_ro(
1293 nsr_id,
1294 action_id,
1295 nslcmop_id,
1296 start_deploy,
1297 timeout_ns_deploy,
1298 stage,
1299 operation="instantiation",
1300 )
1301
1302 # Updating NSR
1303 db_nsr_update = {
1304 "_admin.deployed.RO.operational-status": "running",
1305 "detailed-status": " ".join(stage),
1306 }
1307 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1308 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1309 self._write_op_status(nslcmop_id, stage)
1310 self.logger.debug(
1311 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1312 )
1313 return
1314
1315 async def _wait_ng_ro(
1316 self,
1317 nsr_id,
1318 action_id,
1319 nslcmop_id=None,
1320 start_time=None,
1321 timeout=600,
1322 stage=None,
1323 operation=None,
1324 ):
1325 detailed_status_old = None
1326 db_nsr_update = {}
1327 start_time = start_time or time()
1328 while time() <= start_time + timeout:
1329 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1330 self.logger.debug("Wait NG RO > {}".format(desc_status))
1331 if desc_status["status"] == "FAILED":
1332 raise NgRoException(desc_status["details"])
1333 elif desc_status["status"] == "BUILD":
1334 if stage:
1335 stage[2] = "VIM: ({})".format(desc_status["details"])
1336 elif desc_status["status"] == "DONE":
1337 if stage:
1338 stage[2] = "Deployed at VIM"
1339 break
1340 else:
1341 assert False, "ROclient.check_ns_status returns unknown {}".format(
1342 desc_status["status"]
1343 )
1344 if stage and nslcmop_id and stage[2] != detailed_status_old:
1345 detailed_status_old = stage[2]
1346 db_nsr_update["detailed-status"] = " ".join(stage)
1347 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1348 self._write_op_status(nslcmop_id, stage)
1349 await asyncio.sleep(15, loop=self.loop)
1350 else: # timeout_ns_deploy
1351 raise NgRoException("Timeout waiting ns to deploy")
1352
1353 async def _terminate_ng_ro(
1354 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1355 ):
1356 db_nsr_update = {}
1357 failed_detail = []
1358 action_id = None
1359 start_deploy = time()
1360 try:
1361 target = {
1362 "ns": {"vld": []},
1363 "vnf": [],
1364 "image": [],
1365 "flavor": [],
1366 "action_id": nslcmop_id,
1367 }
1368 desc = await self.RO.deploy(nsr_id, target)
1369 action_id = desc["action_id"]
1370 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1371 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1372 self.logger.debug(
1373 logging_text
1374 + "ns terminate action at RO. action_id={}".format(action_id)
1375 )
1376
1377 # wait until done
1378 delete_timeout = 20 * 60 # 20 minutes
1379 await self._wait_ng_ro(
1380 nsr_id,
1381 action_id,
1382 nslcmop_id,
1383 start_deploy,
1384 delete_timeout,
1385 stage,
1386 operation="termination",
1387 )
1388
1389 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1390 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1391 # delete all nsr
1392 await self.RO.delete(nsr_id)
1393 except Exception as e:
1394 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1395 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1396 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1397 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1398 self.logger.debug(
1399 logging_text + "RO_action_id={} already deleted".format(action_id)
1400 )
1401 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1402 failed_detail.append("delete conflict: {}".format(e))
1403 self.logger.debug(
1404 logging_text
1405 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1406 )
1407 else:
1408 failed_detail.append("delete error: {}".format(e))
1409 self.logger.error(
1410 logging_text
1411 + "RO_action_id={} delete error: {}".format(action_id, e)
1412 )
1413
1414 if failed_detail:
1415 stage[2] = "Error deleting from VIM"
1416 else:
1417 stage[2] = "Deleted from VIM"
1418 db_nsr_update["detailed-status"] = " ".join(stage)
1419 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1420 self._write_op_status(nslcmop_id, stage)
1421
1422 if failed_detail:
1423 raise LcmException("; ".join(failed_detail))
1424 return
1425
1426 async def instantiate_RO(
1427 self,
1428 logging_text,
1429 nsr_id,
1430 nsd,
1431 db_nsr,
1432 db_nslcmop,
1433 db_vnfrs,
1434 db_vnfds,
1435 n2vc_key_list,
1436 stage,
1437 ):
1438 """
1439 Instantiate at RO
1440 :param logging_text: preffix text to use at logging
1441 :param nsr_id: nsr identity
1442 :param nsd: database content of ns descriptor
1443 :param db_nsr: database content of ns record
1444 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1445 :param db_vnfrs:
1446 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1447 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1448 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1449 :return: None or exception
1450 """
1451 try:
1452 start_deploy = time()
1453 ns_params = db_nslcmop.get("operationParams")
1454 if ns_params and ns_params.get("timeout_ns_deploy"):
1455 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1456 else:
1457 timeout_ns_deploy = self.timeout.get(
1458 "ns_deploy", self.timeout_ns_deploy
1459 )
1460
1461 # Check for and optionally request placement optimization. Database will be updated if placement activated
1462 stage[2] = "Waiting for Placement."
1463 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1464 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1465 for vnfr in db_vnfrs.values():
1466 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1467 break
1468 else:
1469 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1470
1471 return await self._instantiate_ng_ro(
1472 logging_text,
1473 nsr_id,
1474 nsd,
1475 db_nsr,
1476 db_nslcmop,
1477 db_vnfrs,
1478 db_vnfds,
1479 n2vc_key_list,
1480 stage,
1481 start_deploy,
1482 timeout_ns_deploy,
1483 )
1484 except Exception as e:
1485 stage[2] = "ERROR deploying at VIM"
1486 self.set_vnfr_at_error(db_vnfrs, str(e))
1487 self.logger.error(
1488 "Error deploying at VIM {}".format(e),
1489 exc_info=not isinstance(
1490 e,
1491 (
1492 ROclient.ROClientException,
1493 LcmException,
1494 DbException,
1495 NgRoException,
1496 ),
1497 ),
1498 )
1499 raise
1500
1501 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1502 """
1503 Wait for kdu to be up, get ip address
1504 :param logging_text: prefix use for logging
1505 :param nsr_id:
1506 :param vnfr_id:
1507 :param kdu_name:
1508 :return: IP address, K8s services
1509 """
1510
1511 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1512 nb_tries = 0
1513
1514 while nb_tries < 360:
1515 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1516 kdur = next(
1517 (
1518 x
1519 for x in get_iterable(db_vnfr, "kdur")
1520 if x.get("kdu-name") == kdu_name
1521 ),
1522 None,
1523 )
1524 if not kdur:
1525 raise LcmException(
1526 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1527 )
1528 if kdur.get("status"):
1529 if kdur["status"] in ("READY", "ENABLED"):
1530 return kdur.get("ip-address"), kdur.get("services")
1531 else:
1532 raise LcmException(
1533 "target KDU={} is in error state".format(kdu_name)
1534 )
1535
1536 await asyncio.sleep(10, loop=self.loop)
1537 nb_tries += 1
1538 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1539
1540 async def wait_vm_up_insert_key_ro(
1541 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1542 ):
1543 """
1544 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1545 :param logging_text: prefix use for logging
1546 :param nsr_id:
1547 :param vnfr_id:
1548 :param vdu_id:
1549 :param vdu_index:
1550 :param pub_key: public ssh key to inject, None to skip
1551 :param user: user to apply the public ssh key
1552 :return: IP address
1553 """
1554
1555 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1556 ro_nsr_id = None
1557 ip_address = None
1558 nb_tries = 0
1559 target_vdu_id = None
1560 ro_retries = 0
1561
1562 while True:
1563
1564 ro_retries += 1
1565 if ro_retries >= 360: # 1 hour
1566 raise LcmException(
1567 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1568 )
1569
1570 await asyncio.sleep(10, loop=self.loop)
1571
1572 # get ip address
1573 if not target_vdu_id:
1574 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1575
1576 if not vdu_id: # for the VNF case
1577 if db_vnfr.get("status") == "ERROR":
1578 raise LcmException(
1579 "Cannot inject ssh-key because target VNF is in error state"
1580 )
1581 ip_address = db_vnfr.get("ip-address")
1582 if not ip_address:
1583 continue
1584 vdur = next(
1585 (
1586 x
1587 for x in get_iterable(db_vnfr, "vdur")
1588 if x.get("ip-address") == ip_address
1589 ),
1590 None,
1591 )
1592 else: # VDU case
1593 vdur = next(
1594 (
1595 x
1596 for x in get_iterable(db_vnfr, "vdur")
1597 if x.get("vdu-id-ref") == vdu_id
1598 and x.get("count-index") == vdu_index
1599 ),
1600 None,
1601 )
1602
1603 if (
1604 not vdur and len(db_vnfr.get("vdur", ())) == 1
1605 ): # If only one, this should be the target vdu
1606 vdur = db_vnfr["vdur"][0]
1607 if not vdur:
1608 raise LcmException(
1609 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1610 vnfr_id, vdu_id, vdu_index
1611 )
1612 )
1613 # New generation RO stores information at "vim_info"
1614 ng_ro_status = None
1615 target_vim = None
1616 if vdur.get("vim_info"):
1617 target_vim = next(
1618 t for t in vdur["vim_info"]
1619 ) # there should be only one key
1620 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1621 if (
1622 vdur.get("pdu-type")
1623 or vdur.get("status") == "ACTIVE"
1624 or ng_ro_status == "ACTIVE"
1625 ):
1626 ip_address = vdur.get("ip-address")
1627 if not ip_address:
1628 continue
1629 target_vdu_id = vdur["vdu-id-ref"]
1630 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1631 raise LcmException(
1632 "Cannot inject ssh-key because target VM is in error state"
1633 )
1634
1635 if not target_vdu_id:
1636 continue
1637
1638 # inject public key into machine
1639 if pub_key and user:
1640 self.logger.debug(logging_text + "Inserting RO key")
1641 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1642 if vdur.get("pdu-type"):
1643 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1644 return ip_address
1645 try:
1646 ro_vm_id = "{}-{}".format(
1647 db_vnfr["member-vnf-index-ref"], target_vdu_id
1648 ) # TODO add vdu_index
1649 if self.ng_ro:
1650 target = {
1651 "action": {
1652 "action": "inject_ssh_key",
1653 "key": pub_key,
1654 "user": user,
1655 },
1656 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1657 }
1658 desc = await self.RO.deploy(nsr_id, target)
1659 action_id = desc["action_id"]
1660 await self._wait_ng_ro(
1661 nsr_id, action_id, timeout=600, operation="instantiation"
1662 )
1663 break
1664 else:
1665 # wait until NS is deployed at RO
1666 if not ro_nsr_id:
1667 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1668 ro_nsr_id = deep_get(
1669 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1670 )
1671 if not ro_nsr_id:
1672 continue
1673 result_dict = await self.RO.create_action(
1674 item="ns",
1675 item_id_name=ro_nsr_id,
1676 descriptor={
1677 "add_public_key": pub_key,
1678 "vms": [ro_vm_id],
1679 "user": user,
1680 },
1681 )
1682 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1683 if not result_dict or not isinstance(result_dict, dict):
1684 raise LcmException(
1685 "Unknown response from RO when injecting key"
1686 )
1687 for result in result_dict.values():
1688 if result.get("vim_result") == 200:
1689 break
1690 else:
1691 raise ROclient.ROClientException(
1692 "error injecting key: {}".format(
1693 result.get("description")
1694 )
1695 )
1696 break
1697 except NgRoException as e:
1698 raise LcmException(
1699 "Reaching max tries injecting key. Error: {}".format(e)
1700 )
1701 except ROclient.ROClientException as e:
1702 if not nb_tries:
1703 self.logger.debug(
1704 logging_text
1705 + "error injecting key: {}. Retrying until {} seconds".format(
1706 e, 20 * 10
1707 )
1708 )
1709 nb_tries += 1
1710 if nb_tries >= 20:
1711 raise LcmException(
1712 "Reaching max tries injecting key. Error: {}".format(e)
1713 )
1714 else:
1715 break
1716
1717 return ip_address
1718
1719 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1720 """
1721 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1722 """
1723 my_vca = vca_deployed_list[vca_index]
1724 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1725 # vdu or kdu: no dependencies
1726 return
1727 timeout = 300
1728 while timeout >= 0:
1729 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1730 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1731 configuration_status_list = db_nsr["configurationStatus"]
1732 for index, vca_deployed in enumerate(configuration_status_list):
1733 if index == vca_index:
1734 # myself
1735 continue
1736 if not my_vca.get("member-vnf-index") or (
1737 vca_deployed.get("member-vnf-index")
1738 == my_vca.get("member-vnf-index")
1739 ):
1740 internal_status = configuration_status_list[index].get("status")
1741 if internal_status == "READY":
1742 continue
1743 elif internal_status == "BROKEN":
1744 raise LcmException(
1745 "Configuration aborted because dependent charm/s has failed"
1746 )
1747 else:
1748 break
1749 else:
1750 # no dependencies, return
1751 return
1752 await asyncio.sleep(10)
1753 timeout -= 1
1754
1755 raise LcmException("Configuration aborted because dependent charm/s timeout")
1756
1757 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1758 vca_id = None
1759 if db_vnfr:
1760 vca_id = deep_get(db_vnfr, ("vca-id",))
1761 elif db_nsr:
1762 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1763 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1764 return vca_id
1765
1766 async def instantiate_N2VC(
1767 self,
1768 logging_text,
1769 vca_index,
1770 nsi_id,
1771 db_nsr,
1772 db_vnfr,
1773 vdu_id,
1774 kdu_name,
1775 vdu_index,
1776 config_descriptor,
1777 deploy_params,
1778 base_folder,
1779 nslcmop_id,
1780 stage,
1781 vca_type,
1782 vca_name,
1783 ee_config_descriptor,
1784 ):
1785 nsr_id = db_nsr["_id"]
1786 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1787 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1788 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1789 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1790 db_dict = {
1791 "collection": "nsrs",
1792 "filter": {"_id": nsr_id},
1793 "path": db_update_entry,
1794 }
1795 step = ""
1796 try:
1797
1798 element_type = "NS"
1799 element_under_configuration = nsr_id
1800
1801 vnfr_id = None
1802 if db_vnfr:
1803 vnfr_id = db_vnfr["_id"]
1804 osm_config["osm"]["vnf_id"] = vnfr_id
1805
1806 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1807
1808 if vca_type == "native_charm":
1809 index_number = 0
1810 else:
1811 index_number = vdu_index or 0
1812
1813 if vnfr_id:
1814 element_type = "VNF"
1815 element_under_configuration = vnfr_id
1816 namespace += ".{}-{}".format(vnfr_id, index_number)
1817 if vdu_id:
1818 namespace += ".{}-{}".format(vdu_id, index_number)
1819 element_type = "VDU"
1820 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1821 osm_config["osm"]["vdu_id"] = vdu_id
1822 elif kdu_name:
1823 namespace += ".{}".format(kdu_name)
1824 element_type = "KDU"
1825 element_under_configuration = kdu_name
1826 osm_config["osm"]["kdu_name"] = kdu_name
1827
1828 # Get artifact path
1829 if base_folder["pkg-dir"]:
1830 artifact_path = "{}/{}/{}/{}".format(
1831 base_folder["folder"],
1832 base_folder["pkg-dir"],
1833 "charms"
1834 if vca_type
1835 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1836 else "helm-charts",
1837 vca_name,
1838 )
1839 else:
1840 artifact_path = "{}/Scripts/{}/{}/".format(
1841 base_folder["folder"],
1842 "charms"
1843 if vca_type
1844 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1845 else "helm-charts",
1846 vca_name,
1847 )
1848
1849 self.logger.debug("Artifact path > {}".format(artifact_path))
1850
1851 # get initial_config_primitive_list that applies to this element
1852 initial_config_primitive_list = config_descriptor.get(
1853 "initial-config-primitive"
1854 )
1855
1856 self.logger.debug(
1857 "Initial config primitive list > {}".format(
1858 initial_config_primitive_list
1859 )
1860 )
1861
1862 # add config if not present for NS charm
1863 ee_descriptor_id = ee_config_descriptor.get("id")
1864 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1865 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1866 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1867 )
1868
1869 self.logger.debug(
1870 "Initial config primitive list #2 > {}".format(
1871 initial_config_primitive_list
1872 )
1873 )
1874 # n2vc_redesign STEP 3.1
1875 # find old ee_id if exists
1876 ee_id = vca_deployed.get("ee_id")
1877
1878 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1879 # create or register execution environment in VCA
1880 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1881
1882 self._write_configuration_status(
1883 nsr_id=nsr_id,
1884 vca_index=vca_index,
1885 status="CREATING",
1886 element_under_configuration=element_under_configuration,
1887 element_type=element_type,
1888 )
1889
1890 step = "create execution environment"
1891 self.logger.debug(logging_text + step)
1892
1893 ee_id = None
1894 credentials = None
1895 if vca_type == "k8s_proxy_charm":
1896 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1897 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1898 namespace=namespace,
1899 artifact_path=artifact_path,
1900 db_dict=db_dict,
1901 vca_id=vca_id,
1902 )
1903 elif vca_type == "helm" or vca_type == "helm-v3":
1904 ee_id, credentials = await self.vca_map[
1905 vca_type
1906 ].create_execution_environment(
1907 namespace=namespace,
1908 reuse_ee_id=ee_id,
1909 db_dict=db_dict,
1910 config=osm_config,
1911 artifact_path=artifact_path,
1912 chart_model=vca_name,
1913 vca_type=vca_type,
1914 )
1915 else:
1916 ee_id, credentials = await self.vca_map[
1917 vca_type
1918 ].create_execution_environment(
1919 namespace=namespace,
1920 reuse_ee_id=ee_id,
1921 db_dict=db_dict,
1922 vca_id=vca_id,
1923 )
1924
1925 elif vca_type == "native_charm":
1926 step = "Waiting to VM being up and getting IP address"
1927 self.logger.debug(logging_text + step)
1928 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1929 logging_text,
1930 nsr_id,
1931 vnfr_id,
1932 vdu_id,
1933 vdu_index,
1934 user=None,
1935 pub_key=None,
1936 )
1937 credentials = {"hostname": rw_mgmt_ip}
1938 # get username
1939 username = deep_get(
1940 config_descriptor, ("config-access", "ssh-access", "default-user")
1941 )
1942 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1943 # merged. Meanwhile let's get username from initial-config-primitive
1944 if not username and initial_config_primitive_list:
1945 for config_primitive in initial_config_primitive_list:
1946 for param in config_primitive.get("parameter", ()):
1947 if param["name"] == "ssh-username":
1948 username = param["value"]
1949 break
1950 if not username:
1951 raise LcmException(
1952 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1953 "'config-access.ssh-access.default-user'"
1954 )
1955 credentials["username"] = username
1956 # n2vc_redesign STEP 3.2
1957
1958 self._write_configuration_status(
1959 nsr_id=nsr_id,
1960 vca_index=vca_index,
1961 status="REGISTERING",
1962 element_under_configuration=element_under_configuration,
1963 element_type=element_type,
1964 )
1965
1966 step = "register execution environment {}".format(credentials)
1967 self.logger.debug(logging_text + step)
1968 ee_id = await self.vca_map[vca_type].register_execution_environment(
1969 credentials=credentials,
1970 namespace=namespace,
1971 db_dict=db_dict,
1972 vca_id=vca_id,
1973 )
1974
1975 # for compatibility with MON/POL modules, the need model and application name at database
1976 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1977 ee_id_parts = ee_id.split(".")
1978 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1979 if len(ee_id_parts) >= 2:
1980 model_name = ee_id_parts[0]
1981 application_name = ee_id_parts[1]
1982 db_nsr_update[db_update_entry + "model"] = model_name
1983 db_nsr_update[db_update_entry + "application"] = application_name
1984
1985 # n2vc_redesign STEP 3.3
1986 step = "Install configuration Software"
1987
1988 self._write_configuration_status(
1989 nsr_id=nsr_id,
1990 vca_index=vca_index,
1991 status="INSTALLING SW",
1992 element_under_configuration=element_under_configuration,
1993 element_type=element_type,
1994 other_update=db_nsr_update,
1995 )
1996
1997 # TODO check if already done
1998 self.logger.debug(logging_text + step)
1999 config = None
2000 if vca_type == "native_charm":
2001 config_primitive = next(
2002 (p for p in initial_config_primitive_list if p["name"] == "config"),
2003 None,
2004 )
2005 if config_primitive:
2006 config = self._map_primitive_params(
2007 config_primitive, {}, deploy_params
2008 )
2009 num_units = 1
2010 if vca_type == "lxc_proxy_charm":
2011 if element_type == "NS":
2012 num_units = db_nsr.get("config-units") or 1
2013 elif element_type == "VNF":
2014 num_units = db_vnfr.get("config-units") or 1
2015 elif element_type == "VDU":
2016 for v in db_vnfr["vdur"]:
2017 if vdu_id == v["vdu-id-ref"]:
2018 num_units = v.get("config-units") or 1
2019 break
2020 if vca_type != "k8s_proxy_charm":
2021 await self.vca_map[vca_type].install_configuration_sw(
2022 ee_id=ee_id,
2023 artifact_path=artifact_path,
2024 db_dict=db_dict,
2025 config=config,
2026 num_units=num_units,
2027 vca_id=vca_id,
2028 vca_type=vca_type,
2029 )
2030
2031 # write in db flag of configuration_sw already installed
2032 self.update_db_2(
2033 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2034 )
2035
2036 # add relations for this VCA (wait for other peers related with this VCA)
2037 await self._add_vca_relations(
2038 logging_text=logging_text,
2039 nsr_id=nsr_id,
2040 vca_type=vca_type,
2041 vca_index=vca_index,
2042 )
2043
2044 # if SSH access is required, then get execution environment SSH public
2045 # if native charm we have waited already to VM be UP
2046 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2047 pub_key = None
2048 user = None
2049 # self.logger.debug("get ssh key block")
2050 if deep_get(
2051 config_descriptor, ("config-access", "ssh-access", "required")
2052 ):
2053 # self.logger.debug("ssh key needed")
2054 # Needed to inject a ssh key
2055 user = deep_get(
2056 config_descriptor,
2057 ("config-access", "ssh-access", "default-user"),
2058 )
2059 step = "Install configuration Software, getting public ssh key"
2060 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2061 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2062 )
2063
2064 step = "Insert public key into VM user={} ssh_key={}".format(
2065 user, pub_key
2066 )
2067 else:
2068 # self.logger.debug("no need to get ssh key")
2069 step = "Waiting to VM being up and getting IP address"
2070 self.logger.debug(logging_text + step)
2071
2072 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2073 rw_mgmt_ip = None
2074
2075 # n2vc_redesign STEP 5.1
2076 # wait for RO (ip-address) Insert pub_key into VM
2077 if vnfr_id:
2078 if kdu_name:
2079 rw_mgmt_ip, services = await self.wait_kdu_up(
2080 logging_text, nsr_id, vnfr_id, kdu_name
2081 )
2082 vnfd = self.db.get_one(
2083 "vnfds_revisions",
2084 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2085 )
2086 kdu = get_kdu(vnfd, kdu_name)
2087 kdu_services = [
2088 service["name"] for service in get_kdu_services(kdu)
2089 ]
2090 exposed_services = []
2091 for service in services:
2092 if any(s in service["name"] for s in kdu_services):
2093 exposed_services.append(service)
2094 await self.vca_map[vca_type].exec_primitive(
2095 ee_id=ee_id,
2096 primitive_name="config",
2097 params_dict={
2098 "osm-config": json.dumps(
2099 OsmConfigBuilder(
2100 k8s={"services": exposed_services}
2101 ).build()
2102 )
2103 },
2104 vca_id=vca_id,
2105 )
2106
2107 # This verification is needed in order to avoid trying to add a public key
2108 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2109 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2110 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2111 # or it is a KNF)
2112 elif db_vnfr.get("vdur"):
2113 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2114 logging_text,
2115 nsr_id,
2116 vnfr_id,
2117 vdu_id,
2118 vdu_index,
2119 user=user,
2120 pub_key=pub_key,
2121 )
2122
2123 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2124
2125 # store rw_mgmt_ip in deploy params for later replacement
2126 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2127
2128 # n2vc_redesign STEP 6 Execute initial config primitive
2129 step = "execute initial config primitive"
2130
2131 # wait for dependent primitives execution (NS -> VNF -> VDU)
2132 if initial_config_primitive_list:
2133 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2134
2135 # stage, in function of element type: vdu, kdu, vnf or ns
2136 my_vca = vca_deployed_list[vca_index]
2137 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2138 # VDU or KDU
2139 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2140 elif my_vca.get("member-vnf-index"):
2141 # VNF
2142 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2143 else:
2144 # NS
2145 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2146
2147 self._write_configuration_status(
2148 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2149 )
2150
2151 self._write_op_status(op_id=nslcmop_id, stage=stage)
2152
2153 check_if_terminated_needed = True
2154 for initial_config_primitive in initial_config_primitive_list:
2155 # adding information on the vca_deployed if it is a NS execution environment
2156 if not vca_deployed["member-vnf-index"]:
2157 deploy_params["ns_config_info"] = json.dumps(
2158 self._get_ns_config_info(nsr_id)
2159 )
2160 # TODO check if already done
2161 primitive_params_ = self._map_primitive_params(
2162 initial_config_primitive, {}, deploy_params
2163 )
2164
2165 step = "execute primitive '{}' params '{}'".format(
2166 initial_config_primitive["name"], primitive_params_
2167 )
2168 self.logger.debug(logging_text + step)
2169 await self.vca_map[vca_type].exec_primitive(
2170 ee_id=ee_id,
2171 primitive_name=initial_config_primitive["name"],
2172 params_dict=primitive_params_,
2173 db_dict=db_dict,
2174 vca_id=vca_id,
2175 vca_type=vca_type,
2176 )
2177 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2178 if check_if_terminated_needed:
2179 if config_descriptor.get("terminate-config-primitive"):
2180 self.update_db_2(
2181 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2182 )
2183 check_if_terminated_needed = False
2184
2185 # TODO register in database that primitive is done
2186
2187 # STEP 7 Configure metrics
2188 if vca_type == "helm" or vca_type == "helm-v3":
2189 # TODO: review for those cases where the helm chart is a reference and
2190 # is not part of the NF package
2191 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2192 ee_id=ee_id,
2193 artifact_path=artifact_path,
2194 ee_config_descriptor=ee_config_descriptor,
2195 vnfr_id=vnfr_id,
2196 nsr_id=nsr_id,
2197 target_ip=rw_mgmt_ip,
2198 )
2199 if prometheus_jobs:
2200 self.update_db_2(
2201 "nsrs",
2202 nsr_id,
2203 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2204 )
2205
2206 for job in prometheus_jobs:
2207 self.db.set_one(
2208 "prometheus_jobs",
2209 {"job_name": job["job_name"]},
2210 job,
2211 upsert=True,
2212 fail_on_empty=False,
2213 )
2214
2215 step = "instantiated at VCA"
2216 self.logger.debug(logging_text + step)
2217
2218 self._write_configuration_status(
2219 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2220 )
2221
2222 except Exception as e: # TODO not use Exception but N2VC exception
2223 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2224 if not isinstance(
2225 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2226 ):
2227 self.logger.error(
2228 "Exception while {} : {}".format(step, e), exc_info=True
2229 )
2230 self._write_configuration_status(
2231 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2232 )
2233 raise LcmException("{} {}".format(step, e)) from e
2234
2235 def _write_ns_status(
2236 self,
2237 nsr_id: str,
2238 ns_state: str,
2239 current_operation: str,
2240 current_operation_id: str,
2241 error_description: str = None,
2242 error_detail: str = None,
2243 other_update: dict = None,
2244 ):
2245 """
2246 Update db_nsr fields.
2247 :param nsr_id:
2248 :param ns_state:
2249 :param current_operation:
2250 :param current_operation_id:
2251 :param error_description:
2252 :param error_detail:
2253 :param other_update: Other required changes at database if provided, will be cleared
2254 :return:
2255 """
2256 try:
2257 db_dict = other_update or {}
2258 db_dict[
2259 "_admin.nslcmop"
2260 ] = current_operation_id # for backward compatibility
2261 db_dict["_admin.current-operation"] = current_operation_id
2262 db_dict["_admin.operation-type"] = (
2263 current_operation if current_operation != "IDLE" else None
2264 )
2265 db_dict["currentOperation"] = current_operation
2266 db_dict["currentOperationID"] = current_operation_id
2267 db_dict["errorDescription"] = error_description
2268 db_dict["errorDetail"] = error_detail
2269
2270 if ns_state:
2271 db_dict["nsState"] = ns_state
2272 self.update_db_2("nsrs", nsr_id, db_dict)
2273 except DbException as e:
2274 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2275
2276 def _write_op_status(
2277 self,
2278 op_id: str,
2279 stage: list = None,
2280 error_message: str = None,
2281 queuePosition: int = 0,
2282 operation_state: str = None,
2283 other_update: dict = None,
2284 ):
2285 try:
2286 db_dict = other_update or {}
2287 db_dict["queuePosition"] = queuePosition
2288 if isinstance(stage, list):
2289 db_dict["stage"] = stage[0]
2290 db_dict["detailed-status"] = " ".join(stage)
2291 elif stage is not None:
2292 db_dict["stage"] = str(stage)
2293
2294 if error_message is not None:
2295 db_dict["errorMessage"] = error_message
2296 if operation_state is not None:
2297 db_dict["operationState"] = operation_state
2298 db_dict["statusEnteredTime"] = time()
2299 self.update_db_2("nslcmops", op_id, db_dict)
2300 except DbException as e:
2301 self.logger.warn(
2302 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2303 )
2304
2305 def _write_all_config_status(self, db_nsr: dict, status: str):
2306 try:
2307 nsr_id = db_nsr["_id"]
2308 # configurationStatus
2309 config_status = db_nsr.get("configurationStatus")
2310 if config_status:
2311 db_nsr_update = {
2312 "configurationStatus.{}.status".format(index): status
2313 for index, v in enumerate(config_status)
2314 if v
2315 }
2316 # update status
2317 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2318
2319 except DbException as e:
2320 self.logger.warn(
2321 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2322 )
2323
2324 def _write_configuration_status(
2325 self,
2326 nsr_id: str,
2327 vca_index: int,
2328 status: str = None,
2329 element_under_configuration: str = None,
2330 element_type: str = None,
2331 other_update: dict = None,
2332 ):
2333
2334 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2335 # .format(vca_index, status))
2336
2337 try:
2338 db_path = "configurationStatus.{}.".format(vca_index)
2339 db_dict = other_update or {}
2340 if status:
2341 db_dict[db_path + "status"] = status
2342 if element_under_configuration:
2343 db_dict[
2344 db_path + "elementUnderConfiguration"
2345 ] = element_under_configuration
2346 if element_type:
2347 db_dict[db_path + "elementType"] = element_type
2348 self.update_db_2("nsrs", nsr_id, db_dict)
2349 except DbException as e:
2350 self.logger.warn(
2351 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2352 status, nsr_id, vca_index, e
2353 )
2354 )
2355
2356 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2357 """
2358 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2359 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2360 Database is used because the result can be obtained from a different LCM worker in case of HA.
2361 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2362 :param db_nslcmop: database content of nslcmop
2363 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2364 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2365 computed 'vim-account-id'
2366 """
2367 modified = False
2368 nslcmop_id = db_nslcmop["_id"]
2369 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2370 if placement_engine == "PLA":
2371 self.logger.debug(
2372 logging_text + "Invoke and wait for placement optimization"
2373 )
2374 await self.msg.aiowrite(
2375 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2376 )
2377 db_poll_interval = 5
2378 wait = db_poll_interval * 10
2379 pla_result = None
2380 while not pla_result and wait >= 0:
2381 await asyncio.sleep(db_poll_interval)
2382 wait -= db_poll_interval
2383 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2384 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2385
2386 if not pla_result:
2387 raise LcmException(
2388 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2389 )
2390
2391 for pla_vnf in pla_result["vnf"]:
2392 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2393 if not pla_vnf.get("vimAccountId") or not vnfr:
2394 continue
2395 modified = True
2396 self.db.set_one(
2397 "vnfrs",
2398 {"_id": vnfr["_id"]},
2399 {"vim-account-id": pla_vnf["vimAccountId"]},
2400 )
2401 # Modifies db_vnfrs
2402 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2403 return modified
2404
2405 def update_nsrs_with_pla_result(self, params):
2406 try:
2407 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2408 self.update_db_2(
2409 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2410 )
2411 except Exception as e:
2412 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2413
2414 async def instantiate(self, nsr_id, nslcmop_id):
2415 """
2416
2417 :param nsr_id: ns instance to deploy
2418 :param nslcmop_id: operation to run
2419 :return:
2420 """
2421
2422 # Try to lock HA task here
2423 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2424 if not task_is_locked_by_me:
2425 self.logger.debug(
2426 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2427 )
2428 return
2429
2430 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2431 self.logger.debug(logging_text + "Enter")
2432
2433 # get all needed from database
2434
2435 # database nsrs record
2436 db_nsr = None
2437
2438 # database nslcmops record
2439 db_nslcmop = None
2440
2441 # update operation on nsrs
2442 db_nsr_update = {}
2443 # update operation on nslcmops
2444 db_nslcmop_update = {}
2445
2446 nslcmop_operation_state = None
2447 db_vnfrs = {} # vnf's info indexed by member-index
2448 # n2vc_info = {}
2449 tasks_dict_info = {} # from task to info text
2450 exc = None
2451 error_list = []
2452 stage = [
2453 "Stage 1/5: preparation of the environment.",
2454 "Waiting for previous operations to terminate.",
2455 "",
2456 ]
2457 # ^ stage, step, VIM progress
2458 try:
2459 # wait for any previous tasks in process
2460 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2461
2462 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2463 stage[1] = "Reading from database."
2464 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2465 db_nsr_update["detailed-status"] = "creating"
2466 db_nsr_update["operational-status"] = "init"
2467 self._write_ns_status(
2468 nsr_id=nsr_id,
2469 ns_state="BUILDING",
2470 current_operation="INSTANTIATING",
2471 current_operation_id=nslcmop_id,
2472 other_update=db_nsr_update,
2473 )
2474 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2475
2476 # read from db: operation
2477 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2478 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2479 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2480 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2481 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2482 )
2483 ns_params = db_nslcmop.get("operationParams")
2484 if ns_params and ns_params.get("timeout_ns_deploy"):
2485 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2486 else:
2487 timeout_ns_deploy = self.timeout.get(
2488 "ns_deploy", self.timeout_ns_deploy
2489 )
2490
2491 # read from db: ns
2492 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2493 self.logger.debug(logging_text + stage[1])
2494 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2495 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2496 self.logger.debug(logging_text + stage[1])
2497 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2498 self.fs.sync(db_nsr["nsd-id"])
2499 db_nsr["nsd"] = nsd
2500 # nsr_name = db_nsr["name"] # TODO short-name??
2501
2502 # read from db: vnf's of this ns
2503 stage[1] = "Getting vnfrs from db."
2504 self.logger.debug(logging_text + stage[1])
2505 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2506
2507 # read from db: vnfd's for every vnf
2508 db_vnfds = [] # every vnfd data
2509
2510 # for each vnf in ns, read vnfd
2511 for vnfr in db_vnfrs_list:
2512 if vnfr.get("kdur"):
2513 kdur_list = []
2514 for kdur in vnfr["kdur"]:
2515 if kdur.get("additionalParams"):
2516 kdur["additionalParams"] = json.loads(
2517 kdur["additionalParams"]
2518 )
2519 kdur_list.append(kdur)
2520 vnfr["kdur"] = kdur_list
2521
2522 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2523 vnfd_id = vnfr["vnfd-id"]
2524 vnfd_ref = vnfr["vnfd-ref"]
2525 self.fs.sync(vnfd_id)
2526
2527 # if we haven't this vnfd, read it from db
2528 if vnfd_id not in db_vnfds:
2529 # read from db
2530 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2531 vnfd_id, vnfd_ref
2532 )
2533 self.logger.debug(logging_text + stage[1])
2534 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2535
2536 # store vnfd
2537 db_vnfds.append(vnfd)
2538
2539 # Get or generates the _admin.deployed.VCA list
2540 vca_deployed_list = None
2541 if db_nsr["_admin"].get("deployed"):
2542 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2543 if vca_deployed_list is None:
2544 vca_deployed_list = []
2545 configuration_status_list = []
2546 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2547 db_nsr_update["configurationStatus"] = configuration_status_list
2548 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2549 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2550 elif isinstance(vca_deployed_list, dict):
2551 # maintain backward compatibility. Change a dict to list at database
2552 vca_deployed_list = list(vca_deployed_list.values())
2553 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2554 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2555
2556 if not isinstance(
2557 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2558 ):
2559 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2560 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2561
2562 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2563 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2564 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2565 self.db.set_list(
2566 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2567 )
2568
2569 # n2vc_redesign STEP 2 Deploy Network Scenario
2570 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2571 self._write_op_status(op_id=nslcmop_id, stage=stage)
2572
2573 stage[1] = "Deploying KDUs."
2574 # self.logger.debug(logging_text + "Before deploy_kdus")
2575 # Call to deploy_kdus in case exists the "vdu:kdu" param
2576 await self.deploy_kdus(
2577 logging_text=logging_text,
2578 nsr_id=nsr_id,
2579 nslcmop_id=nslcmop_id,
2580 db_vnfrs=db_vnfrs,
2581 db_vnfds=db_vnfds,
2582 task_instantiation_info=tasks_dict_info,
2583 )
2584
2585 stage[1] = "Getting VCA public key."
2586 # n2vc_redesign STEP 1 Get VCA public ssh-key
2587 # feature 1429. Add n2vc public key to needed VMs
2588 n2vc_key = self.n2vc.get_public_key()
2589 n2vc_key_list = [n2vc_key]
2590 if self.vca_config.get("public_key"):
2591 n2vc_key_list.append(self.vca_config["public_key"])
2592
2593 stage[1] = "Deploying NS at VIM."
2594 task_ro = asyncio.ensure_future(
2595 self.instantiate_RO(
2596 logging_text=logging_text,
2597 nsr_id=nsr_id,
2598 nsd=nsd,
2599 db_nsr=db_nsr,
2600 db_nslcmop=db_nslcmop,
2601 db_vnfrs=db_vnfrs,
2602 db_vnfds=db_vnfds,
2603 n2vc_key_list=n2vc_key_list,
2604 stage=stage,
2605 )
2606 )
2607 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2608 tasks_dict_info[task_ro] = "Deploying at VIM"
2609
2610 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2611 stage[1] = "Deploying Execution Environments."
2612 self.logger.debug(logging_text + stage[1])
2613
2614 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2615 for vnf_profile in get_vnf_profiles(nsd):
2616 vnfd_id = vnf_profile["vnfd-id"]
2617 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2618 member_vnf_index = str(vnf_profile["id"])
2619 db_vnfr = db_vnfrs[member_vnf_index]
2620 base_folder = vnfd["_admin"]["storage"]
2621 vdu_id = None
2622 vdu_index = 0
2623 vdu_name = None
2624 kdu_name = None
2625
2626 # Get additional parameters
2627 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2628 if db_vnfr.get("additionalParamsForVnf"):
2629 deploy_params.update(
2630 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2631 )
2632
2633 descriptor_config = get_configuration(vnfd, vnfd["id"])
2634 if descriptor_config:
2635 self._deploy_n2vc(
2636 logging_text=logging_text
2637 + "member_vnf_index={} ".format(member_vnf_index),
2638 db_nsr=db_nsr,
2639 db_vnfr=db_vnfr,
2640 nslcmop_id=nslcmop_id,
2641 nsr_id=nsr_id,
2642 nsi_id=nsi_id,
2643 vnfd_id=vnfd_id,
2644 vdu_id=vdu_id,
2645 kdu_name=kdu_name,
2646 member_vnf_index=member_vnf_index,
2647 vdu_index=vdu_index,
2648 vdu_name=vdu_name,
2649 deploy_params=deploy_params,
2650 descriptor_config=descriptor_config,
2651 base_folder=base_folder,
2652 task_instantiation_info=tasks_dict_info,
2653 stage=stage,
2654 )
2655
2656 # Deploy charms for each VDU that supports one.
2657 for vdud in get_vdu_list(vnfd):
2658 vdu_id = vdud["id"]
2659 descriptor_config = get_configuration(vnfd, vdu_id)
2660 vdur = find_in_list(
2661 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2662 )
2663
2664 if vdur.get("additionalParams"):
2665 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2666 else:
2667 deploy_params_vdu = deploy_params
2668 deploy_params_vdu["OSM"] = get_osm_params(
2669 db_vnfr, vdu_id, vdu_count_index=0
2670 )
2671 vdud_count = get_number_of_instances(vnfd, vdu_id)
2672
2673 self.logger.debug("VDUD > {}".format(vdud))
2674 self.logger.debug(
2675 "Descriptor config > {}".format(descriptor_config)
2676 )
2677 if descriptor_config:
2678 vdu_name = None
2679 kdu_name = None
2680 for vdu_index in range(vdud_count):
2681 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2682 self._deploy_n2vc(
2683 logging_text=logging_text
2684 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2685 member_vnf_index, vdu_id, vdu_index
2686 ),
2687 db_nsr=db_nsr,
2688 db_vnfr=db_vnfr,
2689 nslcmop_id=nslcmop_id,
2690 nsr_id=nsr_id,
2691 nsi_id=nsi_id,
2692 vnfd_id=vnfd_id,
2693 vdu_id=vdu_id,
2694 kdu_name=kdu_name,
2695 member_vnf_index=member_vnf_index,
2696 vdu_index=vdu_index,
2697 vdu_name=vdu_name,
2698 deploy_params=deploy_params_vdu,
2699 descriptor_config=descriptor_config,
2700 base_folder=base_folder,
2701 task_instantiation_info=tasks_dict_info,
2702 stage=stage,
2703 )
2704 for kdud in get_kdu_list(vnfd):
2705 kdu_name = kdud["name"]
2706 descriptor_config = get_configuration(vnfd, kdu_name)
2707 if descriptor_config:
2708 vdu_id = None
2709 vdu_index = 0
2710 vdu_name = None
2711 kdur = next(
2712 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2713 )
2714 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2715 if kdur.get("additionalParams"):
2716 deploy_params_kdu.update(
2717 parse_yaml_strings(kdur["additionalParams"].copy())
2718 )
2719
2720 self._deploy_n2vc(
2721 logging_text=logging_text,
2722 db_nsr=db_nsr,
2723 db_vnfr=db_vnfr,
2724 nslcmop_id=nslcmop_id,
2725 nsr_id=nsr_id,
2726 nsi_id=nsi_id,
2727 vnfd_id=vnfd_id,
2728 vdu_id=vdu_id,
2729 kdu_name=kdu_name,
2730 member_vnf_index=member_vnf_index,
2731 vdu_index=vdu_index,
2732 vdu_name=vdu_name,
2733 deploy_params=deploy_params_kdu,
2734 descriptor_config=descriptor_config,
2735 base_folder=base_folder,
2736 task_instantiation_info=tasks_dict_info,
2737 stage=stage,
2738 )
2739
2740 # Check if this NS has a charm configuration
2741 descriptor_config = nsd.get("ns-configuration")
2742 if descriptor_config and descriptor_config.get("juju"):
2743 vnfd_id = None
2744 db_vnfr = None
2745 member_vnf_index = None
2746 vdu_id = None
2747 kdu_name = None
2748 vdu_index = 0
2749 vdu_name = None
2750
2751 # Get additional parameters
2752 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2753 if db_nsr.get("additionalParamsForNs"):
2754 deploy_params.update(
2755 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2756 )
2757 base_folder = nsd["_admin"]["storage"]
2758 self._deploy_n2vc(
2759 logging_text=logging_text,
2760 db_nsr=db_nsr,
2761 db_vnfr=db_vnfr,
2762 nslcmop_id=nslcmop_id,
2763 nsr_id=nsr_id,
2764 nsi_id=nsi_id,
2765 vnfd_id=vnfd_id,
2766 vdu_id=vdu_id,
2767 kdu_name=kdu_name,
2768 member_vnf_index=member_vnf_index,
2769 vdu_index=vdu_index,
2770 vdu_name=vdu_name,
2771 deploy_params=deploy_params,
2772 descriptor_config=descriptor_config,
2773 base_folder=base_folder,
2774 task_instantiation_info=tasks_dict_info,
2775 stage=stage,
2776 )
2777
2778 # rest of staff will be done at finally
2779
2780 except (
2781 ROclient.ROClientException,
2782 DbException,
2783 LcmException,
2784 N2VCException,
2785 ) as e:
2786 self.logger.error(
2787 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2788 )
2789 exc = e
2790 except asyncio.CancelledError:
2791 self.logger.error(
2792 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2793 )
2794 exc = "Operation was cancelled"
2795 except Exception as e:
2796 exc = traceback.format_exc()
2797 self.logger.critical(
2798 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2799 exc_info=True,
2800 )
2801 finally:
2802 if exc:
2803 error_list.append(str(exc))
2804 try:
2805 # wait for pending tasks
2806 if tasks_dict_info:
2807 stage[1] = "Waiting for instantiate pending tasks."
2808 self.logger.debug(logging_text + stage[1])
2809 error_list += await self._wait_for_tasks(
2810 logging_text,
2811 tasks_dict_info,
2812 timeout_ns_deploy,
2813 stage,
2814 nslcmop_id,
2815 nsr_id=nsr_id,
2816 )
2817 stage[1] = stage[2] = ""
2818 except asyncio.CancelledError:
2819 error_list.append("Cancelled")
2820 # TODO cancel all tasks
2821 except Exception as exc:
2822 error_list.append(str(exc))
2823
2824 # update operation-status
2825 db_nsr_update["operational-status"] = "running"
2826 # let's begin with VCA 'configured' status (later we can change it)
2827 db_nsr_update["config-status"] = "configured"
2828 for task, task_name in tasks_dict_info.items():
2829 if not task.done() or task.cancelled() or task.exception():
2830 if task_name.startswith(self.task_name_deploy_vca):
2831 # A N2VC task is pending
2832 db_nsr_update["config-status"] = "failed"
2833 else:
2834 # RO or KDU task is pending
2835 db_nsr_update["operational-status"] = "failed"
2836
2837 # update status at database
2838 if error_list:
2839 error_detail = ". ".join(error_list)
2840 self.logger.error(logging_text + error_detail)
2841 error_description_nslcmop = "{} Detail: {}".format(
2842 stage[0], error_detail
2843 )
2844 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2845 nslcmop_id, stage[0]
2846 )
2847
2848 db_nsr_update["detailed-status"] = (
2849 error_description_nsr + " Detail: " + error_detail
2850 )
2851 db_nslcmop_update["detailed-status"] = error_detail
2852 nslcmop_operation_state = "FAILED"
2853 ns_state = "BROKEN"
2854 else:
2855 error_detail = None
2856 error_description_nsr = error_description_nslcmop = None
2857 ns_state = "READY"
2858 db_nsr_update["detailed-status"] = "Done"
2859 db_nslcmop_update["detailed-status"] = "Done"
2860 nslcmop_operation_state = "COMPLETED"
2861
2862 if db_nsr:
2863 self._write_ns_status(
2864 nsr_id=nsr_id,
2865 ns_state=ns_state,
2866 current_operation="IDLE",
2867 current_operation_id=None,
2868 error_description=error_description_nsr,
2869 error_detail=error_detail,
2870 other_update=db_nsr_update,
2871 )
2872 self._write_op_status(
2873 op_id=nslcmop_id,
2874 stage="",
2875 error_message=error_description_nslcmop,
2876 operation_state=nslcmop_operation_state,
2877 other_update=db_nslcmop_update,
2878 )
2879
2880 if nslcmop_operation_state:
2881 try:
2882 await self.msg.aiowrite(
2883 "ns",
2884 "instantiated",
2885 {
2886 "nsr_id": nsr_id,
2887 "nslcmop_id": nslcmop_id,
2888 "operationState": nslcmop_operation_state,
2889 },
2890 loop=self.loop,
2891 )
2892 except Exception as e:
2893 self.logger.error(
2894 logging_text + "kafka_write notification Exception {}".format(e)
2895 )
2896
2897 self.logger.debug(logging_text + "Exit")
2898 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2899
2900 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2901 if vnfd_id not in cached_vnfds:
2902 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2903 return cached_vnfds[vnfd_id]
2904
2905 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2906 if vnf_profile_id not in cached_vnfrs:
2907 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2908 "vnfrs",
2909 {
2910 "member-vnf-index-ref": vnf_profile_id,
2911 "nsr-id-ref": nsr_id,
2912 },
2913 )
2914 return cached_vnfrs[vnf_profile_id]
2915
2916 def _is_deployed_vca_in_relation(
2917 self, vca: DeployedVCA, relation: Relation
2918 ) -> bool:
2919 found = False
2920 for endpoint in (relation.provider, relation.requirer):
2921 if endpoint["kdu-resource-profile-id"]:
2922 continue
2923 found = (
2924 vca.vnf_profile_id == endpoint.vnf_profile_id
2925 and vca.vdu_profile_id == endpoint.vdu_profile_id
2926 and vca.execution_environment_ref == endpoint.execution_environment_ref
2927 )
2928 if found:
2929 break
2930 return found
2931
2932 def _update_ee_relation_data_with_implicit_data(
2933 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2934 ):
2935 ee_relation_data = safe_get_ee_relation(
2936 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2937 )
2938 ee_relation_level = EELevel.get_level(ee_relation_data)
2939 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2940 "execution-environment-ref"
2941 ]:
2942 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2943 vnfd_id = vnf_profile["vnfd-id"]
2944 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2945 entity_id = (
2946 vnfd_id
2947 if ee_relation_level == EELevel.VNF
2948 else ee_relation_data["vdu-profile-id"]
2949 )
2950 ee = get_juju_ee_ref(db_vnfd, entity_id)
2951 if not ee:
2952 raise Exception(
2953 f"not execution environments found for ee_relation {ee_relation_data}"
2954 )
2955 ee_relation_data["execution-environment-ref"] = ee["id"]
2956 return ee_relation_data
2957
2958 def _get_ns_relations(
2959 self,
2960 nsr_id: str,
2961 nsd: Dict[str, Any],
2962 vca: DeployedVCA,
2963 cached_vnfds: Dict[str, Any],
2964 ) -> List[Relation]:
2965 relations = []
2966 db_ns_relations = get_ns_configuration_relation_list(nsd)
2967 for r in db_ns_relations:
2968 provider_dict = None
2969 requirer_dict = None
2970 if all(key in r for key in ("provider", "requirer")):
2971 provider_dict = r["provider"]
2972 requirer_dict = r["requirer"]
2973 elif "entities" in r:
2974 provider_id = r["entities"][0]["id"]
2975 provider_dict = {
2976 "nsr-id": nsr_id,
2977 "endpoint": r["entities"][0]["endpoint"],
2978 }
2979 if provider_id != nsd["id"]:
2980 provider_dict["vnf-profile-id"] = provider_id
2981 requirer_id = r["entities"][1]["id"]
2982 requirer_dict = {
2983 "nsr-id": nsr_id,
2984 "endpoint": r["entities"][1]["endpoint"],
2985 }
2986 if requirer_id != nsd["id"]:
2987 requirer_dict["vnf-profile-id"] = requirer_id
2988 else:
2989 raise Exception(
2990 "provider/requirer or entities must be included in the relation."
2991 )
2992 relation_provider = self._update_ee_relation_data_with_implicit_data(
2993 nsr_id, nsd, provider_dict, cached_vnfds
2994 )
2995 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2996 nsr_id, nsd, requirer_dict, cached_vnfds
2997 )
2998 provider = EERelation(relation_provider)
2999 requirer = EERelation(relation_requirer)
3000 relation = Relation(r["name"], provider, requirer)
3001 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3002 if vca_in_relation:
3003 relations.append(relation)
3004 return relations
3005
3006 def _get_vnf_relations(
3007 self,
3008 nsr_id: str,
3009 nsd: Dict[str, Any],
3010 vca: DeployedVCA,
3011 cached_vnfds: Dict[str, Any],
3012 ) -> List[Relation]:
3013 relations = []
3014 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3015 vnf_profile_id = vnf_profile["id"]
3016 vnfd_id = vnf_profile["vnfd-id"]
3017 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3018 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3019 for r in db_vnf_relations:
3020 provider_dict = None
3021 requirer_dict = None
3022 if all(key in r for key in ("provider", "requirer")):
3023 provider_dict = r["provider"]
3024 requirer_dict = r["requirer"]
3025 elif "entities" in r:
3026 provider_id = r["entities"][0]["id"]
3027 provider_dict = {
3028 "nsr-id": nsr_id,
3029 "vnf-profile-id": vnf_profile_id,
3030 "endpoint": r["entities"][0]["endpoint"],
3031 }
3032 if provider_id != vnfd_id:
3033 provider_dict["vdu-profile-id"] = provider_id
3034 requirer_id = r["entities"][1]["id"]
3035 requirer_dict = {
3036 "nsr-id": nsr_id,
3037 "vnf-profile-id": vnf_profile_id,
3038 "endpoint": r["entities"][1]["endpoint"],
3039 }
3040 if requirer_id != vnfd_id:
3041 requirer_dict["vdu-profile-id"] = requirer_id
3042 else:
3043 raise Exception(
3044 "provider/requirer or entities must be included in the relation."
3045 )
3046 relation_provider = self._update_ee_relation_data_with_implicit_data(
3047 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3048 )
3049 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3050 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3051 )
3052 provider = EERelation(relation_provider)
3053 requirer = EERelation(relation_requirer)
3054 relation = Relation(r["name"], provider, requirer)
3055 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3056 if vca_in_relation:
3057 relations.append(relation)
3058 return relations
3059
3060 def _get_kdu_resource_data(
3061 self,
3062 ee_relation: EERelation,
3063 db_nsr: Dict[str, Any],
3064 cached_vnfds: Dict[str, Any],
3065 ) -> DeployedK8sResource:
3066 nsd = get_nsd(db_nsr)
3067 vnf_profiles = get_vnf_profiles(nsd)
3068 vnfd_id = find_in_list(
3069 vnf_profiles,
3070 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3071 )["vnfd-id"]
3072 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3073 kdu_resource_profile = get_kdu_resource_profile(
3074 db_vnfd, ee_relation.kdu_resource_profile_id
3075 )
3076 kdu_name = kdu_resource_profile["kdu-name"]
3077 deployed_kdu, _ = get_deployed_kdu(
3078 db_nsr.get("_admin", ()).get("deployed", ()),
3079 kdu_name,
3080 ee_relation.vnf_profile_id,
3081 )
3082 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3083 return deployed_kdu
3084
3085 def _get_deployed_component(
3086 self,
3087 ee_relation: EERelation,
3088 db_nsr: Dict[str, Any],
3089 cached_vnfds: Dict[str, Any],
3090 ) -> DeployedComponent:
3091 nsr_id = db_nsr["_id"]
3092 deployed_component = None
3093 ee_level = EELevel.get_level(ee_relation)
3094 if ee_level == EELevel.NS:
3095 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3096 if vca:
3097 deployed_component = DeployedVCA(nsr_id, vca)
3098 elif ee_level == EELevel.VNF:
3099 vca = get_deployed_vca(
3100 db_nsr,
3101 {
3102 "vdu_id": None,
3103 "member-vnf-index": ee_relation.vnf_profile_id,
3104 "ee_descriptor_id": ee_relation.execution_environment_ref,
3105 },
3106 )
3107 if vca:
3108 deployed_component = DeployedVCA(nsr_id, vca)
3109 elif ee_level == EELevel.VDU:
3110 vca = get_deployed_vca(
3111 db_nsr,
3112 {
3113 "vdu_id": ee_relation.vdu_profile_id,
3114 "member-vnf-index": ee_relation.vnf_profile_id,
3115 "ee_descriptor_id": ee_relation.execution_environment_ref,
3116 },
3117 )
3118 if vca:
3119 deployed_component = DeployedVCA(nsr_id, vca)
3120 elif ee_level == EELevel.KDU:
3121 kdu_resource_data = self._get_kdu_resource_data(
3122 ee_relation, db_nsr, cached_vnfds
3123 )
3124 if kdu_resource_data:
3125 deployed_component = DeployedK8sResource(kdu_resource_data)
3126 return deployed_component
3127
3128 async def _add_relation(
3129 self,
3130 relation: Relation,
3131 vca_type: str,
3132 db_nsr: Dict[str, Any],
3133 cached_vnfds: Dict[str, Any],
3134 cached_vnfrs: Dict[str, Any],
3135 ) -> bool:
3136 deployed_provider = self._get_deployed_component(
3137 relation.provider, db_nsr, cached_vnfds
3138 )
3139 deployed_requirer = self._get_deployed_component(
3140 relation.requirer, db_nsr, cached_vnfds
3141 )
3142 if (
3143 deployed_provider
3144 and deployed_requirer
3145 and deployed_provider.config_sw_installed
3146 and deployed_requirer.config_sw_installed
3147 ):
3148 provider_db_vnfr = (
3149 self._get_vnfr(
3150 relation.provider.nsr_id,
3151 relation.provider.vnf_profile_id,
3152 cached_vnfrs,
3153 )
3154 if relation.provider.vnf_profile_id
3155 else None
3156 )
3157 requirer_db_vnfr = (
3158 self._get_vnfr(
3159 relation.requirer.nsr_id,
3160 relation.requirer.vnf_profile_id,
3161 cached_vnfrs,
3162 )
3163 if relation.requirer.vnf_profile_id
3164 else None
3165 )
3166 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3167 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3168 provider_relation_endpoint = RelationEndpoint(
3169 deployed_provider.ee_id,
3170 provider_vca_id,
3171 relation.provider.endpoint,
3172 )
3173 requirer_relation_endpoint = RelationEndpoint(
3174 deployed_requirer.ee_id,
3175 requirer_vca_id,
3176 relation.requirer.endpoint,
3177 )
3178 await self.vca_map[vca_type].add_relation(
3179 provider=provider_relation_endpoint,
3180 requirer=requirer_relation_endpoint,
3181 )
3182 # remove entry from relations list
3183 return True
3184 return False
3185
3186 async def _add_vca_relations(
3187 self,
3188 logging_text,
3189 nsr_id,
3190 vca_type: str,
3191 vca_index: int,
3192 timeout: int = 3600,
3193 ) -> bool:
3194
3195 # steps:
3196 # 1. find all relations for this VCA
3197 # 2. wait for other peers related
3198 # 3. add relations
3199
3200 try:
3201 # STEP 1: find all relations for this VCA
3202
3203 # read nsr record
3204 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3205 nsd = get_nsd(db_nsr)
3206
3207 # this VCA data
3208 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3209 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3210
3211 cached_vnfds = {}
3212 cached_vnfrs = {}
3213 relations = []
3214 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3215 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3216
3217 # if no relations, terminate
3218 if not relations:
3219 self.logger.debug(logging_text + " No relations")
3220 return True
3221
3222 self.logger.debug(logging_text + " adding relations {}".format(relations))
3223
3224 # add all relations
3225 start = time()
3226 while True:
3227 # check timeout
3228 now = time()
3229 if now - start >= timeout:
3230 self.logger.error(logging_text + " : timeout adding relations")
3231 return False
3232
3233 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3234 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3235
3236 # for each relation, find the VCA's related
3237 for relation in relations.copy():
3238 added = await self._add_relation(
3239 relation,
3240 vca_type,
3241 db_nsr,
3242 cached_vnfds,
3243 cached_vnfrs,
3244 )
3245 if added:
3246 relations.remove(relation)
3247
3248 if not relations:
3249 self.logger.debug("Relations added")
3250 break
3251 await asyncio.sleep(5.0)
3252
3253 return True
3254
3255 except Exception as e:
3256 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3257 return False
3258
3259 async def _install_kdu(
3260 self,
3261 nsr_id: str,
3262 nsr_db_path: str,
3263 vnfr_data: dict,
3264 kdu_index: int,
3265 kdud: dict,
3266 vnfd: dict,
3267 k8s_instance_info: dict,
3268 k8params: dict = None,
3269 timeout: int = 600,
3270 vca_id: str = None,
3271 ):
3272
3273 try:
3274 k8sclustertype = k8s_instance_info["k8scluster-type"]
3275 # Instantiate kdu
3276 db_dict_install = {
3277 "collection": "nsrs",
3278 "filter": {"_id": nsr_id},
3279 "path": nsr_db_path,
3280 }
3281
3282 if k8s_instance_info.get("kdu-deployment-name"):
3283 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3284 else:
3285 kdu_instance = self.k8scluster_map[
3286 k8sclustertype
3287 ].generate_kdu_instance_name(
3288 db_dict=db_dict_install,
3289 kdu_model=k8s_instance_info["kdu-model"],
3290 kdu_name=k8s_instance_info["kdu-name"],
3291 )
3292
3293 # Update the nsrs table with the kdu-instance value
3294 self.update_db_2(
3295 item="nsrs",
3296 _id=nsr_id,
3297 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3298 )
3299
3300 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3301 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3302 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3303 # namespace, this first verification could be removed, and the next step would be done for any kind
3304 # of KNF.
3305 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3306 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3307 if k8sclustertype in ("juju", "juju-bundle"):
3308 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3309 # that the user passed a namespace which he wants its KDU to be deployed in)
3310 if (
3311 self.db.count(
3312 table="nsrs",
3313 q_filter={
3314 "_id": nsr_id,
3315 "_admin.projects_write": k8s_instance_info["namespace"],
3316 "_admin.projects_read": k8s_instance_info["namespace"],
3317 },
3318 )
3319 > 0
3320 ):
3321 self.logger.debug(
3322 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3323 )
3324 self.update_db_2(
3325 item="nsrs",
3326 _id=nsr_id,
3327 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3328 )
3329 k8s_instance_info["namespace"] = kdu_instance
3330
3331 await self.k8scluster_map[k8sclustertype].install(
3332 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3333 kdu_model=k8s_instance_info["kdu-model"],
3334 atomic=True,
3335 params=k8params,
3336 db_dict=db_dict_install,
3337 timeout=timeout,
3338 kdu_name=k8s_instance_info["kdu-name"],
3339 namespace=k8s_instance_info["namespace"],
3340 kdu_instance=kdu_instance,
3341 vca_id=vca_id,
3342 )
3343
3344 # Obtain services to obtain management service ip
3345 services = await self.k8scluster_map[k8sclustertype].get_services(
3346 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3347 kdu_instance=kdu_instance,
3348 namespace=k8s_instance_info["namespace"],
3349 )
3350
3351 # Obtain management service info (if exists)
3352 vnfr_update_dict = {}
3353 kdu_config = get_configuration(vnfd, kdud["name"])
3354 if kdu_config:
3355 target_ee_list = kdu_config.get("execution-environment-list", [])
3356 else:
3357 target_ee_list = []
3358
3359 if services:
3360 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3361 mgmt_services = [
3362 service
3363 for service in kdud.get("service", [])
3364 if service.get("mgmt-service")
3365 ]
3366 for mgmt_service in mgmt_services:
3367 for service in services:
3368 if service["name"].startswith(mgmt_service["name"]):
3369 # Mgmt service found, Obtain service ip
3370 ip = service.get("external_ip", service.get("cluster_ip"))
3371 if isinstance(ip, list) and len(ip) == 1:
3372 ip = ip[0]
3373
3374 vnfr_update_dict[
3375 "kdur.{}.ip-address".format(kdu_index)
3376 ] = ip
3377
3378 # Check if must update also mgmt ip at the vnf
3379 service_external_cp = mgmt_service.get(
3380 "external-connection-point-ref"
3381 )
3382 if service_external_cp:
3383 if (
3384 deep_get(vnfd, ("mgmt-interface", "cp"))
3385 == service_external_cp
3386 ):
3387 vnfr_update_dict["ip-address"] = ip
3388
3389 if find_in_list(
3390 target_ee_list,
3391 lambda ee: ee.get(
3392 "external-connection-point-ref", ""
3393 )
3394 == service_external_cp,
3395 ):
3396 vnfr_update_dict[
3397 "kdur.{}.ip-address".format(kdu_index)
3398 ] = ip
3399 break
3400 else:
3401 self.logger.warn(
3402 "Mgmt service name: {} not found".format(
3403 mgmt_service["name"]
3404 )
3405 )
3406
3407 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3408 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3409
3410 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3411 if (
3412 kdu_config
3413 and kdu_config.get("initial-config-primitive")
3414 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3415 ):
3416 initial_config_primitive_list = kdu_config.get(
3417 "initial-config-primitive"
3418 )
3419 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3420
3421 for initial_config_primitive in initial_config_primitive_list:
3422 primitive_params_ = self._map_primitive_params(
3423 initial_config_primitive, {}, {}
3424 )
3425
3426 await asyncio.wait_for(
3427 self.k8scluster_map[k8sclustertype].exec_primitive(
3428 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3429 kdu_instance=kdu_instance,
3430 primitive_name=initial_config_primitive["name"],
3431 params=primitive_params_,
3432 db_dict=db_dict_install,
3433 vca_id=vca_id,
3434 ),
3435 timeout=timeout,
3436 )
3437
3438 except Exception as e:
3439 # Prepare update db with error and raise exception
3440 try:
3441 self.update_db_2(
3442 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3443 )
3444 self.update_db_2(
3445 "vnfrs",
3446 vnfr_data.get("_id"),
3447 {"kdur.{}.status".format(kdu_index): "ERROR"},
3448 )
3449 except Exception:
3450 # ignore to keep original exception
3451 pass
3452 # reraise original error
3453 raise
3454
3455 return kdu_instance
3456
3457 async def deploy_kdus(
3458 self,
3459 logging_text,
3460 nsr_id,
3461 nslcmop_id,
3462 db_vnfrs,
3463 db_vnfds,
3464 task_instantiation_info,
3465 ):
3466 # Launch kdus if present in the descriptor
3467
3468 k8scluster_id_2_uuic = {
3469 "helm-chart-v3": {},
3470 "helm-chart": {},
3471 "juju-bundle": {},
3472 }
3473
3474 async def _get_cluster_id(cluster_id, cluster_type):
3475 nonlocal k8scluster_id_2_uuic
3476 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3477 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3478
3479 # check if K8scluster is creating and wait look if previous tasks in process
3480 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3481 "k8scluster", cluster_id
3482 )
3483 if task_dependency:
3484 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3485 task_name, cluster_id
3486 )
3487 self.logger.debug(logging_text + text)
3488 await asyncio.wait(task_dependency, timeout=3600)
3489
3490 db_k8scluster = self.db.get_one(
3491 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3492 )
3493 if not db_k8scluster:
3494 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3495
3496 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3497 if not k8s_id:
3498 if cluster_type == "helm-chart-v3":
3499 try:
3500 # backward compatibility for existing clusters that have not been initialized for helm v3
3501 k8s_credentials = yaml.safe_dump(
3502 db_k8scluster.get("credentials")
3503 )
3504 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3505 k8s_credentials, reuse_cluster_uuid=cluster_id
3506 )
3507 db_k8scluster_update = {}
3508 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3509 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3510 db_k8scluster_update[
3511 "_admin.helm-chart-v3.created"
3512 ] = uninstall_sw
3513 db_k8scluster_update[
3514 "_admin.helm-chart-v3.operationalState"
3515 ] = "ENABLED"
3516 self.update_db_2(
3517 "k8sclusters", cluster_id, db_k8scluster_update
3518 )
3519 except Exception as e:
3520 self.logger.error(
3521 logging_text
3522 + "error initializing helm-v3 cluster: {}".format(str(e))
3523 )
3524 raise LcmException(
3525 "K8s cluster '{}' has not been initialized for '{}'".format(
3526 cluster_id, cluster_type
3527 )
3528 )
3529 else:
3530 raise LcmException(
3531 "K8s cluster '{}' has not been initialized for '{}'".format(
3532 cluster_id, cluster_type
3533 )
3534 )
3535 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3536 return k8s_id
3537
3538 logging_text += "Deploy kdus: "
3539 step = ""
3540 try:
3541 db_nsr_update = {"_admin.deployed.K8s": []}
3542 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3543
3544 index = 0
3545 updated_cluster_list = []
3546 updated_v3_cluster_list = []
3547
3548 for vnfr_data in db_vnfrs.values():
3549 vca_id = self.get_vca_id(vnfr_data, {})
3550 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3551 # Step 0: Prepare and set parameters
3552 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3553 vnfd_id = vnfr_data.get("vnfd-id")
3554 vnfd_with_id = find_in_list(
3555 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3556 )
3557 kdud = next(
3558 kdud
3559 for kdud in vnfd_with_id["kdu"]
3560 if kdud["name"] == kdur["kdu-name"]
3561 )
3562 namespace = kdur.get("k8s-namespace")
3563 kdu_deployment_name = kdur.get("kdu-deployment-name")
3564 if kdur.get("helm-chart"):
3565 kdumodel = kdur["helm-chart"]
3566 # Default version: helm3, if helm-version is v2 assign v2
3567 k8sclustertype = "helm-chart-v3"
3568 self.logger.debug("kdur: {}".format(kdur))
3569 if (
3570 kdur.get("helm-version")
3571 and kdur.get("helm-version") == "v2"
3572 ):
3573 k8sclustertype = "helm-chart"
3574 elif kdur.get("juju-bundle"):
3575 kdumodel = kdur["juju-bundle"]
3576 k8sclustertype = "juju-bundle"
3577 else:
3578 raise LcmException(
3579 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3580 "juju-bundle. Maybe an old NBI version is running".format(
3581 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3582 )
3583 )
3584 # check if kdumodel is a file and exists
3585 try:
3586 vnfd_with_id = find_in_list(
3587 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3588 )
3589 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3590 if storage: # may be not present if vnfd has not artifacts
3591 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3592 if storage["pkg-dir"]:
3593 filename = "{}/{}/{}s/{}".format(
3594 storage["folder"],
3595 storage["pkg-dir"],
3596 k8sclustertype,
3597 kdumodel,
3598 )
3599 else:
3600 filename = "{}/Scripts/{}s/{}".format(
3601 storage["folder"],
3602 k8sclustertype,
3603 kdumodel,
3604 )
3605 if self.fs.file_exists(
3606 filename, mode="file"
3607 ) or self.fs.file_exists(filename, mode="dir"):
3608 kdumodel = self.fs.path + filename
3609 except (asyncio.TimeoutError, asyncio.CancelledError):
3610 raise
3611 except Exception: # it is not a file
3612 pass
3613
3614 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3615 step = "Synchronize repos for k8s cluster '{}'".format(
3616 k8s_cluster_id
3617 )
3618 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3619
3620 # Synchronize repos
3621 if (
3622 k8sclustertype == "helm-chart"
3623 and cluster_uuid not in updated_cluster_list
3624 ) or (
3625 k8sclustertype == "helm-chart-v3"
3626 and cluster_uuid not in updated_v3_cluster_list
3627 ):
3628 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3629 self.k8scluster_map[k8sclustertype].synchronize_repos(
3630 cluster_uuid=cluster_uuid
3631 )
3632 )
3633 if del_repo_list or added_repo_dict:
3634 if k8sclustertype == "helm-chart":
3635 unset = {
3636 "_admin.helm_charts_added." + item: None
3637 for item in del_repo_list
3638 }
3639 updated = {
3640 "_admin.helm_charts_added." + item: name
3641 for item, name in added_repo_dict.items()
3642 }
3643 updated_cluster_list.append(cluster_uuid)
3644 elif k8sclustertype == "helm-chart-v3":
3645 unset = {
3646 "_admin.helm_charts_v3_added." + item: None
3647 for item in del_repo_list
3648 }
3649 updated = {
3650 "_admin.helm_charts_v3_added." + item: name
3651 for item, name in added_repo_dict.items()
3652 }
3653 updated_v3_cluster_list.append(cluster_uuid)
3654 self.logger.debug(
3655 logging_text + "repos synchronized on k8s cluster "
3656 "'{}' to_delete: {}, to_add: {}".format(
3657 k8s_cluster_id, del_repo_list, added_repo_dict
3658 )
3659 )
3660 self.db.set_one(
3661 "k8sclusters",
3662 {"_id": k8s_cluster_id},
3663 updated,
3664 unset=unset,
3665 )
3666
3667 # Instantiate kdu
3668 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3669 vnfr_data["member-vnf-index-ref"],
3670 kdur["kdu-name"],
3671 k8s_cluster_id,
3672 )
3673 k8s_instance_info = {
3674 "kdu-instance": None,
3675 "k8scluster-uuid": cluster_uuid,
3676 "k8scluster-type": k8sclustertype,
3677 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3678 "kdu-name": kdur["kdu-name"],
3679 "kdu-model": kdumodel,
3680 "namespace": namespace,
3681 "kdu-deployment-name": kdu_deployment_name,
3682 }
3683 db_path = "_admin.deployed.K8s.{}".format(index)
3684 db_nsr_update[db_path] = k8s_instance_info
3685 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3686 vnfd_with_id = find_in_list(
3687 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3688 )
3689 task = asyncio.ensure_future(
3690 self._install_kdu(
3691 nsr_id,
3692 db_path,
3693 vnfr_data,
3694 kdu_index,
3695 kdud,
3696 vnfd_with_id,
3697 k8s_instance_info,
3698 k8params=desc_params,
3699 timeout=1800,
3700 vca_id=vca_id,
3701 )
3702 )
3703 self.lcm_tasks.register(
3704 "ns",
3705 nsr_id,
3706 nslcmop_id,
3707 "instantiate_KDU-{}".format(index),
3708 task,
3709 )
3710 task_instantiation_info[task] = "Deploying KDU {}".format(
3711 kdur["kdu-name"]
3712 )
3713
3714 index += 1
3715
3716 except (LcmException, asyncio.CancelledError):
3717 raise
3718 except Exception as e:
3719 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3720 if isinstance(e, (N2VCException, DbException)):
3721 self.logger.error(logging_text + msg)
3722 else:
3723 self.logger.critical(logging_text + msg, exc_info=True)
3724 raise LcmException(msg)
3725 finally:
3726 if db_nsr_update:
3727 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3728
3729 def _deploy_n2vc(
3730 self,
3731 logging_text,
3732 db_nsr,
3733 db_vnfr,
3734 nslcmop_id,
3735 nsr_id,
3736 nsi_id,
3737 vnfd_id,
3738 vdu_id,
3739 kdu_name,
3740 member_vnf_index,
3741 vdu_index,
3742 vdu_name,
3743 deploy_params,
3744 descriptor_config,
3745 base_folder,
3746 task_instantiation_info,
3747 stage,
3748 ):
3749 # launch instantiate_N2VC in a asyncio task and register task object
3750 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3751 # if not found, create one entry and update database
3752 # fill db_nsr._admin.deployed.VCA.<index>
3753
3754 self.logger.debug(
3755 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3756 )
3757
3758 charm_name = ""
3759 get_charm_name = False
3760 if "execution-environment-list" in descriptor_config:
3761 ee_list = descriptor_config.get("execution-environment-list", [])
3762 elif "juju" in descriptor_config:
3763 ee_list = [descriptor_config] # ns charms
3764 if "execution-environment-list" not in descriptor_config:
3765 # charm name is only required for ns charms
3766 get_charm_name = True
3767 else: # other types as script are not supported
3768 ee_list = []
3769
3770 for ee_item in ee_list:
3771 self.logger.debug(
3772 logging_text
3773 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3774 ee_item.get("juju"), ee_item.get("helm-chart")
3775 )
3776 )
3777 ee_descriptor_id = ee_item.get("id")
3778 if ee_item.get("juju"):
3779 vca_name = ee_item["juju"].get("charm")
3780 if get_charm_name:
3781 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3782 vca_type = (
3783 "lxc_proxy_charm"
3784 if ee_item["juju"].get("charm") is not None
3785 else "native_charm"
3786 )
3787 if ee_item["juju"].get("cloud") == "k8s":
3788 vca_type = "k8s_proxy_charm"
3789 elif ee_item["juju"].get("proxy") is False:
3790 vca_type = "native_charm"
3791 elif ee_item.get("helm-chart"):
3792 vca_name = ee_item["helm-chart"]
3793 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3794 vca_type = "helm"
3795 else:
3796 vca_type = "helm-v3"
3797 else:
3798 self.logger.debug(
3799 logging_text + "skipping non juju neither charm configuration"
3800 )
3801 continue
3802
3803 vca_index = -1
3804 for vca_index, vca_deployed in enumerate(
3805 db_nsr["_admin"]["deployed"]["VCA"]
3806 ):
3807 if not vca_deployed:
3808 continue
3809 if (
3810 vca_deployed.get("member-vnf-index") == member_vnf_index
3811 and vca_deployed.get("vdu_id") == vdu_id
3812 and vca_deployed.get("kdu_name") == kdu_name
3813 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3814 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3815 ):
3816 break
3817 else:
3818 # not found, create one.
3819 target = (
3820 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3821 )
3822 if vdu_id:
3823 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3824 elif kdu_name:
3825 target += "/kdu/{}".format(kdu_name)
3826 vca_deployed = {
3827 "target_element": target,
3828 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3829 "member-vnf-index": member_vnf_index,
3830 "vdu_id": vdu_id,
3831 "kdu_name": kdu_name,
3832 "vdu_count_index": vdu_index,
3833 "operational-status": "init", # TODO revise
3834 "detailed-status": "", # TODO revise
3835 "step": "initial-deploy", # TODO revise
3836 "vnfd_id": vnfd_id,
3837 "vdu_name": vdu_name,
3838 "type": vca_type,
3839 "ee_descriptor_id": ee_descriptor_id,
3840 "charm_name": charm_name,
3841 }
3842 vca_index += 1
3843
3844 # create VCA and configurationStatus in db
3845 db_dict = {
3846 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3847 "configurationStatus.{}".format(vca_index): dict(),
3848 }
3849 self.update_db_2("nsrs", nsr_id, db_dict)
3850
3851 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3852
3853 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3854 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3855 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3856
3857 # Launch task
3858 task_n2vc = asyncio.ensure_future(
3859 self.instantiate_N2VC(
3860 logging_text=logging_text,
3861 vca_index=vca_index,
3862 nsi_id=nsi_id,
3863 db_nsr=db_nsr,
3864 db_vnfr=db_vnfr,
3865 vdu_id=vdu_id,
3866 kdu_name=kdu_name,
3867 vdu_index=vdu_index,
3868 deploy_params=deploy_params,
3869 config_descriptor=descriptor_config,
3870 base_folder=base_folder,
3871 nslcmop_id=nslcmop_id,
3872 stage=stage,
3873 vca_type=vca_type,
3874 vca_name=vca_name,
3875 ee_config_descriptor=ee_item,
3876 )
3877 )
3878 self.lcm_tasks.register(
3879 "ns",
3880 nsr_id,
3881 nslcmop_id,
3882 "instantiate_N2VC-{}".format(vca_index),
3883 task_n2vc,
3884 )
3885 task_instantiation_info[
3886 task_n2vc
3887 ] = self.task_name_deploy_vca + " {}.{}".format(
3888 member_vnf_index or "", vdu_id or ""
3889 )
3890
3891 @staticmethod
3892 def _create_nslcmop(nsr_id, operation, params):
3893 """
3894 Creates a ns-lcm-opp content to be stored at database.
3895 :param nsr_id: internal id of the instance
3896 :param operation: instantiate, terminate, scale, action, ...
3897 :param params: user parameters for the operation
3898 :return: dictionary following SOL005 format
3899 """
3900 # Raise exception if invalid arguments
3901 if not (nsr_id and operation and params):
3902 raise LcmException(
3903 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3904 )
3905 now = time()
3906 _id = str(uuid4())
3907 nslcmop = {
3908 "id": _id,
3909 "_id": _id,
3910 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3911 "operationState": "PROCESSING",
3912 "statusEnteredTime": now,
3913 "nsInstanceId": nsr_id,
3914 "lcmOperationType": operation,
3915 "startTime": now,
3916 "isAutomaticInvocation": False,
3917 "operationParams": params,
3918 "isCancelPending": False,
3919 "links": {
3920 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3921 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3922 },
3923 }
3924 return nslcmop
3925
3926 def _format_additional_params(self, params):
3927 params = params or {}
3928 for key, value in params.items():
3929 if str(value).startswith("!!yaml "):
3930 params[key] = yaml.safe_load(value[7:])
3931 return params
3932
3933 def _get_terminate_primitive_params(self, seq, vnf_index):
3934 primitive = seq.get("name")
3935 primitive_params = {}
3936 params = {
3937 "member_vnf_index": vnf_index,
3938 "primitive": primitive,
3939 "primitive_params": primitive_params,
3940 }
3941 desc_params = {}
3942 return self._map_primitive_params(seq, params, desc_params)
3943
3944 # sub-operations
3945
3946 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3947 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3948 if op.get("operationState") == "COMPLETED":
3949 # b. Skip sub-operation
3950 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3951 return self.SUBOPERATION_STATUS_SKIP
3952 else:
3953 # c. retry executing sub-operation
3954 # The sub-operation exists, and operationState != 'COMPLETED'
3955 # Update operationState = 'PROCESSING' to indicate a retry.
3956 operationState = "PROCESSING"
3957 detailed_status = "In progress"
3958 self._update_suboperation_status(
3959 db_nslcmop, op_index, operationState, detailed_status
3960 )
3961 # Return the sub-operation index
3962 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3963 # with arguments extracted from the sub-operation
3964 return op_index
3965
3966 # Find a sub-operation where all keys in a matching dictionary must match
3967 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3968 def _find_suboperation(self, db_nslcmop, match):
3969 if db_nslcmop and match:
3970 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3971 for i, op in enumerate(op_list):
3972 if all(op.get(k) == match[k] for k in match):
3973 return i
3974 return self.SUBOPERATION_STATUS_NOT_FOUND
3975
3976 # Update status for a sub-operation given its index
3977 def _update_suboperation_status(
3978 self, db_nslcmop, op_index, operationState, detailed_status
3979 ):
3980 # Update DB for HA tasks
3981 q_filter = {"_id": db_nslcmop["_id"]}
3982 update_dict = {
3983 "_admin.operations.{}.operationState".format(op_index): operationState,
3984 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3985 }
3986 self.db.set_one(
3987 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3988 )
3989
3990 # Add sub-operation, return the index of the added sub-operation
3991 # Optionally, set operationState, detailed-status, and operationType
3992 # Status and type are currently set for 'scale' sub-operations:
3993 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3994 # 'detailed-status' : status message
3995 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3996 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3997 def _add_suboperation(
3998 self,
3999 db_nslcmop,
4000 vnf_index,
4001 vdu_id,
4002 vdu_count_index,
4003 vdu_name,
4004 primitive,
4005 mapped_primitive_params,
4006 operationState=None,
4007 detailed_status=None,
4008 operationType=None,
4009 RO_nsr_id=None,
4010 RO_scaling_info=None,
4011 ):
4012 if not db_nslcmop:
4013 return self.SUBOPERATION_STATUS_NOT_FOUND
4014 # Get the "_admin.operations" list, if it exists
4015 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4016 op_list = db_nslcmop_admin.get("operations")
4017 # Create or append to the "_admin.operations" list
4018 new_op = {
4019 "member_vnf_index": vnf_index,
4020 "vdu_id": vdu_id,
4021 "vdu_count_index": vdu_count_index,
4022 "primitive": primitive,
4023 "primitive_params": mapped_primitive_params,
4024 }
4025 if operationState:
4026 new_op["operationState"] = operationState
4027 if detailed_status:
4028 new_op["detailed-status"] = detailed_status
4029 if operationType:
4030 new_op["lcmOperationType"] = operationType
4031 if RO_nsr_id:
4032 new_op["RO_nsr_id"] = RO_nsr_id
4033 if RO_scaling_info:
4034 new_op["RO_scaling_info"] = RO_scaling_info
4035 if not op_list:
4036 # No existing operations, create key 'operations' with current operation as first list element
4037 db_nslcmop_admin.update({"operations": [new_op]})
4038 op_list = db_nslcmop_admin.get("operations")
4039 else:
4040 # Existing operations, append operation to list
4041 op_list.append(new_op)
4042
4043 db_nslcmop_update = {"_admin.operations": op_list}
4044 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4045 op_index = len(op_list) - 1
4046 return op_index
4047
4048 # Helper methods for scale() sub-operations
4049
4050 # pre-scale/post-scale:
4051 # Check for 3 different cases:
4052 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4053 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4054 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4055 def _check_or_add_scale_suboperation(
4056 self,
4057 db_nslcmop,
4058 vnf_index,
4059 vnf_config_primitive,
4060 primitive_params,
4061 operationType,
4062 RO_nsr_id=None,
4063 RO_scaling_info=None,
4064 ):
4065 # Find this sub-operation
4066 if RO_nsr_id and RO_scaling_info:
4067 operationType = "SCALE-RO"
4068 match = {
4069 "member_vnf_index": vnf_index,
4070 "RO_nsr_id": RO_nsr_id,
4071 "RO_scaling_info": RO_scaling_info,
4072 }
4073 else:
4074 match = {
4075 "member_vnf_index": vnf_index,
4076 "primitive": vnf_config_primitive,
4077 "primitive_params": primitive_params,
4078 "lcmOperationType": operationType,
4079 }
4080 op_index = self._find_suboperation(db_nslcmop, match)
4081 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4082 # a. New sub-operation
4083 # The sub-operation does not exist, add it.
4084 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4085 # The following parameters are set to None for all kind of scaling:
4086 vdu_id = None
4087 vdu_count_index = None
4088 vdu_name = None
4089 if RO_nsr_id and RO_scaling_info:
4090 vnf_config_primitive = None
4091 primitive_params = None
4092 else:
4093 RO_nsr_id = None
4094 RO_scaling_info = None
4095 # Initial status for sub-operation
4096 operationState = "PROCESSING"
4097 detailed_status = "In progress"
4098 # Add sub-operation for pre/post-scaling (zero or more operations)
4099 self._add_suboperation(
4100 db_nslcmop,
4101 vnf_index,
4102 vdu_id,
4103 vdu_count_index,
4104 vdu_name,
4105 vnf_config_primitive,
4106 primitive_params,
4107 operationState,
4108 detailed_status,
4109 operationType,
4110 RO_nsr_id,
4111 RO_scaling_info,
4112 )
4113 return self.SUBOPERATION_STATUS_NEW
4114 else:
4115 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4116 # or op_index (operationState != 'COMPLETED')
4117 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4118
4119 # Function to return execution_environment id
4120
4121 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4122 # TODO vdu_index_count
4123 for vca in vca_deployed_list:
4124 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4125 return vca["ee_id"]
4126
4127 async def destroy_N2VC(
4128 self,
4129 logging_text,
4130 db_nslcmop,
4131 vca_deployed,
4132 config_descriptor,
4133 vca_index,
4134 destroy_ee=True,
4135 exec_primitives=True,
4136 scaling_in=False,
4137 vca_id: str = None,
4138 ):
4139 """
4140 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4141 :param logging_text:
4142 :param db_nslcmop:
4143 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4144 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4145 :param vca_index: index in the database _admin.deployed.VCA
4146 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4147 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4148 not executed properly
4149 :param scaling_in: True destroys the application, False destroys the model
4150 :return: None or exception
4151 """
4152
4153 self.logger.debug(
4154 logging_text
4155 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4156 vca_index, vca_deployed, config_descriptor, destroy_ee
4157 )
4158 )
4159
4160 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4161
4162 # execute terminate_primitives
4163 if exec_primitives:
4164 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4165 config_descriptor.get("terminate-config-primitive"),
4166 vca_deployed.get("ee_descriptor_id"),
4167 )
4168 vdu_id = vca_deployed.get("vdu_id")
4169 vdu_count_index = vca_deployed.get("vdu_count_index")
4170 vdu_name = vca_deployed.get("vdu_name")
4171 vnf_index = vca_deployed.get("member-vnf-index")
4172 if terminate_primitives and vca_deployed.get("needed_terminate"):
4173 for seq in terminate_primitives:
4174 # For each sequence in list, get primitive and call _ns_execute_primitive()
4175 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4176 vnf_index, seq.get("name")
4177 )
4178 self.logger.debug(logging_text + step)
4179 # Create the primitive for each sequence, i.e. "primitive": "touch"
4180 primitive = seq.get("name")
4181 mapped_primitive_params = self._get_terminate_primitive_params(
4182 seq, vnf_index
4183 )
4184
4185 # Add sub-operation
4186 self._add_suboperation(
4187 db_nslcmop,
4188 vnf_index,
4189 vdu_id,
4190 vdu_count_index,
4191 vdu_name,
4192 primitive,
4193 mapped_primitive_params,
4194 )
4195 # Sub-operations: Call _ns_execute_primitive() instead of action()
4196 try:
4197 result, result_detail = await self._ns_execute_primitive(
4198 vca_deployed["ee_id"],
4199 primitive,
4200 mapped_primitive_params,
4201 vca_type=vca_type,
4202 vca_id=vca_id,
4203 )
4204 except LcmException:
4205 # this happens when VCA is not deployed. In this case it is not needed to terminate
4206 continue
4207 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4208 if result not in result_ok:
4209 raise LcmException(
4210 "terminate_primitive {} for vnf_member_index={} fails with "
4211 "error {}".format(seq.get("name"), vnf_index, result_detail)
4212 )
4213 # set that this VCA do not need terminated
4214 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4215 vca_index
4216 )
4217 self.update_db_2(
4218 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4219 )
4220
4221 # Delete Prometheus Jobs if any
4222 # This uses NSR_ID, so it will destroy any jobs under this index
4223 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4224
4225 if destroy_ee:
4226 await self.vca_map[vca_type].delete_execution_environment(
4227 vca_deployed["ee_id"],
4228 scaling_in=scaling_in,
4229 vca_type=vca_type,
4230 vca_id=vca_id,
4231 )
4232
4233 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4234 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4235 namespace = "." + db_nsr["_id"]
4236 try:
4237 await self.n2vc.delete_namespace(
4238 namespace=namespace,
4239 total_timeout=self.timeout_charm_delete,
4240 vca_id=vca_id,
4241 )
4242 except N2VCNotFound: # already deleted. Skip
4243 pass
4244 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4245
4246 async def _terminate_RO(
4247 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4248 ):
4249 """
4250 Terminates a deployment from RO
4251 :param logging_text:
4252 :param nsr_deployed: db_nsr._admin.deployed
4253 :param nsr_id:
4254 :param nslcmop_id:
4255 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4256 this method will update only the index 2, but it will write on database the concatenated content of the list
4257 :return:
4258 """
4259 db_nsr_update = {}
4260 failed_detail = []
4261 ro_nsr_id = ro_delete_action = None
4262 if nsr_deployed and nsr_deployed.get("RO"):
4263 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4264 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4265 try:
4266 if ro_nsr_id:
4267 stage[2] = "Deleting ns from VIM."
4268 db_nsr_update["detailed-status"] = " ".join(stage)
4269 self._write_op_status(nslcmop_id, stage)
4270 self.logger.debug(logging_text + stage[2])
4271 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4272 self._write_op_status(nslcmop_id, stage)
4273 desc = await self.RO.delete("ns", ro_nsr_id)
4274 ro_delete_action = desc["action_id"]
4275 db_nsr_update[
4276 "_admin.deployed.RO.nsr_delete_action_id"
4277 ] = ro_delete_action
4278 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4279 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4280 if ro_delete_action:
4281 # wait until NS is deleted from VIM
4282 stage[2] = "Waiting ns deleted from VIM."
4283 detailed_status_old = None
4284 self.logger.debug(
4285 logging_text
4286 + stage[2]
4287 + " RO_id={} ro_delete_action={}".format(
4288 ro_nsr_id, ro_delete_action
4289 )
4290 )
4291 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4292 self._write_op_status(nslcmop_id, stage)
4293
4294 delete_timeout = 20 * 60 # 20 minutes
4295 while delete_timeout > 0:
4296 desc = await self.RO.show(
4297 "ns",
4298 item_id_name=ro_nsr_id,
4299 extra_item="action",
4300 extra_item_id=ro_delete_action,
4301 )
4302
4303 # deploymentStatus
4304 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4305
4306 ns_status, ns_status_info = self.RO.check_action_status(desc)
4307 if ns_status == "ERROR":
4308 raise ROclient.ROClientException(ns_status_info)
4309 elif ns_status == "BUILD":
4310 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4311 elif ns_status == "ACTIVE":
4312 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4313 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4314 break
4315 else:
4316 assert (
4317 False
4318 ), "ROclient.check_action_status returns unknown {}".format(
4319 ns_status
4320 )
4321 if stage[2] != detailed_status_old:
4322 detailed_status_old = stage[2]
4323 db_nsr_update["detailed-status"] = " ".join(stage)
4324 self._write_op_status(nslcmop_id, stage)
4325 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4326 await asyncio.sleep(5, loop=self.loop)
4327 delete_timeout -= 5
4328 else: # delete_timeout <= 0:
4329 raise ROclient.ROClientException(
4330 "Timeout waiting ns deleted from VIM"
4331 )
4332
4333 except Exception as e:
4334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4335 if (
4336 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4337 ): # not found
4338 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4339 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4340 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4341 self.logger.debug(
4342 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4343 )
4344 elif (
4345 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4346 ): # conflict
4347 failed_detail.append("delete conflict: {}".format(e))
4348 self.logger.debug(
4349 logging_text
4350 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4351 )
4352 else:
4353 failed_detail.append("delete error: {}".format(e))
4354 self.logger.error(
4355 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4356 )
4357
4358 # Delete nsd
4359 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4360 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4361 try:
4362 stage[2] = "Deleting nsd from RO."
4363 db_nsr_update["detailed-status"] = " ".join(stage)
4364 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4365 self._write_op_status(nslcmop_id, stage)
4366 await self.RO.delete("nsd", ro_nsd_id)
4367 self.logger.debug(
4368 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4369 )
4370 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4371 except Exception as e:
4372 if (
4373 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4374 ): # not found
4375 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4376 self.logger.debug(
4377 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4378 )
4379 elif (
4380 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4381 ): # conflict
4382 failed_detail.append(
4383 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4384 )
4385 self.logger.debug(logging_text + failed_detail[-1])
4386 else:
4387 failed_detail.append(
4388 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4389 )
4390 self.logger.error(logging_text + failed_detail[-1])
4391
4392 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4393 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4394 if not vnf_deployed or not vnf_deployed["id"]:
4395 continue
4396 try:
4397 ro_vnfd_id = vnf_deployed["id"]
4398 stage[
4399 2
4400 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4401 vnf_deployed["member-vnf-index"], ro_vnfd_id
4402 )
4403 db_nsr_update["detailed-status"] = " ".join(stage)
4404 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4405 self._write_op_status(nslcmop_id, stage)
4406 await self.RO.delete("vnfd", ro_vnfd_id)
4407 self.logger.debug(
4408 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4409 )
4410 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4411 except Exception as e:
4412 if (
4413 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4414 ): # not found
4415 db_nsr_update[
4416 "_admin.deployed.RO.vnfd.{}.id".format(index)
4417 ] = None
4418 self.logger.debug(
4419 logging_text
4420 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4421 )
4422 elif (
4423 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4424 ): # conflict
4425 failed_detail.append(
4426 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4427 )
4428 self.logger.debug(logging_text + failed_detail[-1])
4429 else:
4430 failed_detail.append(
4431 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4432 )
4433 self.logger.error(logging_text + failed_detail[-1])
4434
4435 if failed_detail:
4436 stage[2] = "Error deleting from VIM"
4437 else:
4438 stage[2] = "Deleted from VIM"
4439 db_nsr_update["detailed-status"] = " ".join(stage)
4440 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4441 self._write_op_status(nslcmop_id, stage)
4442
4443 if failed_detail:
4444 raise LcmException("; ".join(failed_detail))
4445
4446 async def terminate(self, nsr_id, nslcmop_id):
4447 # Try to lock HA task here
4448 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4449 if not task_is_locked_by_me:
4450 return
4451
4452 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4453 self.logger.debug(logging_text + "Enter")
4454 timeout_ns_terminate = self.timeout_ns_terminate
4455 db_nsr = None
4456 db_nslcmop = None
4457 operation_params = None
4458 exc = None
4459 error_list = [] # annotates all failed error messages
4460 db_nslcmop_update = {}
4461 autoremove = False # autoremove after terminated
4462 tasks_dict_info = {}
4463 db_nsr_update = {}
4464 stage = [
4465 "Stage 1/3: Preparing task.",
4466 "Waiting for previous operations to terminate.",
4467 "",
4468 ]
4469 # ^ contains [stage, step, VIM-status]
4470 try:
4471 # wait for any previous tasks in process
4472 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4473
4474 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4475 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4476 operation_params = db_nslcmop.get("operationParams") or {}
4477 if operation_params.get("timeout_ns_terminate"):
4478 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4479 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4480 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4481
4482 db_nsr_update["operational-status"] = "terminating"
4483 db_nsr_update["config-status"] = "terminating"
4484 self._write_ns_status(
4485 nsr_id=nsr_id,
4486 ns_state="TERMINATING",
4487 current_operation="TERMINATING",
4488 current_operation_id=nslcmop_id,
4489 other_update=db_nsr_update,
4490 )
4491 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4492 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4493 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4494 return
4495
4496 stage[1] = "Getting vnf descriptors from db."
4497 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4498 db_vnfrs_dict = {
4499 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4500 }
4501 db_vnfds_from_id = {}
4502 db_vnfds_from_member_index = {}
4503 # Loop over VNFRs
4504 for vnfr in db_vnfrs_list:
4505 vnfd_id = vnfr["vnfd-id"]
4506 if vnfd_id not in db_vnfds_from_id:
4507 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4508 db_vnfds_from_id[vnfd_id] = vnfd
4509 db_vnfds_from_member_index[
4510 vnfr["member-vnf-index-ref"]
4511 ] = db_vnfds_from_id[vnfd_id]
4512
4513 # Destroy individual execution environments when there are terminating primitives.
4514 # Rest of EE will be deleted at once
4515 # TODO - check before calling _destroy_N2VC
4516 # if not operation_params.get("skip_terminate_primitives"):#
4517 # or not vca.get("needed_terminate"):
4518 stage[0] = "Stage 2/3 execute terminating primitives."
4519 self.logger.debug(logging_text + stage[0])
4520 stage[1] = "Looking execution environment that needs terminate."
4521 self.logger.debug(logging_text + stage[1])
4522
4523 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4524 config_descriptor = None
4525 vca_member_vnf_index = vca.get("member-vnf-index")
4526 vca_id = self.get_vca_id(
4527 db_vnfrs_dict.get(vca_member_vnf_index)
4528 if vca_member_vnf_index
4529 else None,
4530 db_nsr,
4531 )
4532 if not vca or not vca.get("ee_id"):
4533 continue
4534 if not vca.get("member-vnf-index"):
4535 # ns
4536 config_descriptor = db_nsr.get("ns-configuration")
4537 elif vca.get("vdu_id"):
4538 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4539 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4540 elif vca.get("kdu_name"):
4541 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4542 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4543 else:
4544 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4545 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4546 vca_type = vca.get("type")
4547 exec_terminate_primitives = not operation_params.get(
4548 "skip_terminate_primitives"
4549 ) and vca.get("needed_terminate")
4550 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4551 # pending native charms
4552 destroy_ee = (
4553 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4554 )
4555 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4556 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4557 task = asyncio.ensure_future(
4558 self.destroy_N2VC(
4559 logging_text,
4560 db_nslcmop,
4561 vca,
4562 config_descriptor,
4563 vca_index,
4564 destroy_ee,
4565 exec_terminate_primitives,
4566 vca_id=vca_id,
4567 )
4568 )
4569 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4570
4571 # wait for pending tasks of terminate primitives
4572 if tasks_dict_info:
4573 self.logger.debug(
4574 logging_text
4575 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4576 )
4577 error_list = await self._wait_for_tasks(
4578 logging_text,
4579 tasks_dict_info,
4580 min(self.timeout_charm_delete, timeout_ns_terminate),
4581 stage,
4582 nslcmop_id,
4583 )
4584 tasks_dict_info.clear()
4585 if error_list:
4586 return # raise LcmException("; ".join(error_list))
4587
4588 # remove All execution environments at once
4589 stage[0] = "Stage 3/3 delete all."
4590
4591 if nsr_deployed.get("VCA"):
4592 stage[1] = "Deleting all execution environments."
4593 self.logger.debug(logging_text + stage[1])
4594 vca_id = self.get_vca_id({}, db_nsr)
4595 task_delete_ee = asyncio.ensure_future(
4596 asyncio.wait_for(
4597 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4598 timeout=self.timeout_charm_delete,
4599 )
4600 )
4601 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4602 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4603
4604 # Delete from k8scluster
4605 stage[1] = "Deleting KDUs."
4606 self.logger.debug(logging_text + stage[1])
4607 # print(nsr_deployed)
4608 for kdu in get_iterable(nsr_deployed, "K8s"):
4609 if not kdu or not kdu.get("kdu-instance"):
4610 continue
4611 kdu_instance = kdu.get("kdu-instance")
4612 if kdu.get("k8scluster-type") in self.k8scluster_map:
4613 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4614 vca_id = self.get_vca_id({}, db_nsr)
4615 task_delete_kdu_instance = asyncio.ensure_future(
4616 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4617 cluster_uuid=kdu.get("k8scluster-uuid"),
4618 kdu_instance=kdu_instance,
4619 vca_id=vca_id,
4620 namespace=kdu.get("namespace"),
4621 )
4622 )
4623 else:
4624 self.logger.error(
4625 logging_text
4626 + "Unknown k8s deployment type {}".format(
4627 kdu.get("k8scluster-type")
4628 )
4629 )
4630 continue
4631 tasks_dict_info[
4632 task_delete_kdu_instance
4633 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4634
4635 # remove from RO
4636 stage[1] = "Deleting ns from VIM."
4637 if self.ng_ro:
4638 task_delete_ro = asyncio.ensure_future(
4639 self._terminate_ng_ro(
4640 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4641 )
4642 )
4643 else:
4644 task_delete_ro = asyncio.ensure_future(
4645 self._terminate_RO(
4646 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4647 )
4648 )
4649 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4650
4651 # rest of staff will be done at finally
4652
4653 except (
4654 ROclient.ROClientException,
4655 DbException,
4656 LcmException,
4657 N2VCException,
4658 ) as e:
4659 self.logger.error(logging_text + "Exit Exception {}".format(e))
4660 exc = e
4661 except asyncio.CancelledError:
4662 self.logger.error(
4663 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4664 )
4665 exc = "Operation was cancelled"
4666 except Exception as e:
4667 exc = traceback.format_exc()
4668 self.logger.critical(
4669 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4670 exc_info=True,
4671 )
4672 finally:
4673 if exc:
4674 error_list.append(str(exc))
4675 try:
4676 # wait for pending tasks
4677 if tasks_dict_info:
4678 stage[1] = "Waiting for terminate pending tasks."
4679 self.logger.debug(logging_text + stage[1])
4680 error_list += await self._wait_for_tasks(
4681 logging_text,
4682 tasks_dict_info,
4683 timeout_ns_terminate,
4684 stage,
4685 nslcmop_id,
4686 )
4687 stage[1] = stage[2] = ""
4688 except asyncio.CancelledError:
4689 error_list.append("Cancelled")
4690 # TODO cancell all tasks
4691 except Exception as exc:
4692 error_list.append(str(exc))
4693 # update status at database
4694 if error_list:
4695 error_detail = "; ".join(error_list)
4696 # self.logger.error(logging_text + error_detail)
4697 error_description_nslcmop = "{} Detail: {}".format(
4698 stage[0], error_detail
4699 )
4700 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4701 nslcmop_id, stage[0]
4702 )
4703
4704 db_nsr_update["operational-status"] = "failed"
4705 db_nsr_update["detailed-status"] = (
4706 error_description_nsr + " Detail: " + error_detail
4707 )
4708 db_nslcmop_update["detailed-status"] = error_detail
4709 nslcmop_operation_state = "FAILED"
4710 ns_state = "BROKEN"
4711 else:
4712 error_detail = None
4713 error_description_nsr = error_description_nslcmop = None
4714 ns_state = "NOT_INSTANTIATED"
4715 db_nsr_update["operational-status"] = "terminated"
4716 db_nsr_update["detailed-status"] = "Done"
4717 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4718 db_nslcmop_update["detailed-status"] = "Done"
4719 nslcmop_operation_state = "COMPLETED"
4720
4721 if db_nsr:
4722 self._write_ns_status(
4723 nsr_id=nsr_id,
4724 ns_state=ns_state,
4725 current_operation="IDLE",
4726 current_operation_id=None,
4727 error_description=error_description_nsr,
4728 error_detail=error_detail,
4729 other_update=db_nsr_update,
4730 )
4731 self._write_op_status(
4732 op_id=nslcmop_id,
4733 stage="",
4734 error_message=error_description_nslcmop,
4735 operation_state=nslcmop_operation_state,
4736 other_update=db_nslcmop_update,
4737 )
4738 if ns_state == "NOT_INSTANTIATED":
4739 try:
4740 self.db.set_list(
4741 "vnfrs",
4742 {"nsr-id-ref": nsr_id},
4743 {"_admin.nsState": "NOT_INSTANTIATED"},
4744 )
4745 except DbException as e:
4746 self.logger.warn(
4747 logging_text
4748 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4749 nsr_id, e
4750 )
4751 )
4752 if operation_params:
4753 autoremove = operation_params.get("autoremove", False)
4754 if nslcmop_operation_state:
4755 try:
4756 await self.msg.aiowrite(
4757 "ns",
4758 "terminated",
4759 {
4760 "nsr_id": nsr_id,
4761 "nslcmop_id": nslcmop_id,
4762 "operationState": nslcmop_operation_state,
4763 "autoremove": autoremove,
4764 },
4765 loop=self.loop,
4766 )
4767 except Exception as e:
4768 self.logger.error(
4769 logging_text + "kafka_write notification Exception {}".format(e)
4770 )
4771
4772 self.logger.debug(logging_text + "Exit")
4773 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4774
4775 async def _wait_for_tasks(
4776 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4777 ):
4778 time_start = time()
4779 error_detail_list = []
4780 error_list = []
4781 pending_tasks = list(created_tasks_info.keys())
4782 num_tasks = len(pending_tasks)
4783 num_done = 0
4784 stage[1] = "{}/{}.".format(num_done, num_tasks)
4785 self._write_op_status(nslcmop_id, stage)
4786 while pending_tasks:
4787 new_error = None
4788 _timeout = timeout + time_start - time()
4789 done, pending_tasks = await asyncio.wait(
4790 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4791 )
4792 num_done += len(done)
4793 if not done: # Timeout
4794 for task in pending_tasks:
4795 new_error = created_tasks_info[task] + ": Timeout"
4796 error_detail_list.append(new_error)
4797 error_list.append(new_error)
4798 break
4799 for task in done:
4800 if task.cancelled():
4801 exc = "Cancelled"
4802 else:
4803 exc = task.exception()
4804 if exc:
4805 if isinstance(exc, asyncio.TimeoutError):
4806 exc = "Timeout"
4807 new_error = created_tasks_info[task] + ": {}".format(exc)
4808 error_list.append(created_tasks_info[task])
4809 error_detail_list.append(new_error)
4810 if isinstance(
4811 exc,
4812 (
4813 str,
4814 DbException,
4815 N2VCException,
4816 ROclient.ROClientException,
4817 LcmException,
4818 K8sException,
4819 NgRoException,
4820 ),
4821 ):
4822 self.logger.error(logging_text + new_error)
4823 else:
4824 exc_traceback = "".join(
4825 traceback.format_exception(None, exc, exc.__traceback__)
4826 )
4827 self.logger.error(
4828 logging_text
4829 + created_tasks_info[task]
4830 + " "
4831 + exc_traceback
4832 )
4833 else:
4834 self.logger.debug(
4835 logging_text + created_tasks_info[task] + ": Done"
4836 )
4837 stage[1] = "{}/{}.".format(num_done, num_tasks)
4838 if new_error:
4839 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4840 if nsr_id: # update also nsr
4841 self.update_db_2(
4842 "nsrs",
4843 nsr_id,
4844 {
4845 "errorDescription": "Error at: " + ", ".join(error_list),
4846 "errorDetail": ". ".join(error_detail_list),
4847 },
4848 )
4849 self._write_op_status(nslcmop_id, stage)
4850 return error_detail_list
4851
4852 @staticmethod
4853 def _map_primitive_params(primitive_desc, params, instantiation_params):
4854 """
4855 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4856 The default-value is used. If it is between < > it look for a value at instantiation_params
4857 :param primitive_desc: portion of VNFD/NSD that describes primitive
4858 :param params: Params provided by user
4859 :param instantiation_params: Instantiation params provided by user
4860 :return: a dictionary with the calculated params
4861 """
4862 calculated_params = {}
4863 for parameter in primitive_desc.get("parameter", ()):
4864 param_name = parameter["name"]
4865 if param_name in params:
4866 calculated_params[param_name] = params[param_name]
4867 elif "default-value" in parameter or "value" in parameter:
4868 if "value" in parameter:
4869 calculated_params[param_name] = parameter["value"]
4870 else:
4871 calculated_params[param_name] = parameter["default-value"]
4872 if (
4873 isinstance(calculated_params[param_name], str)
4874 and calculated_params[param_name].startswith("<")
4875 and calculated_params[param_name].endswith(">")
4876 ):
4877 if calculated_params[param_name][1:-1] in instantiation_params:
4878 calculated_params[param_name] = instantiation_params[
4879 calculated_params[param_name][1:-1]
4880 ]
4881 else:
4882 raise LcmException(
4883 "Parameter {} needed to execute primitive {} not provided".format(
4884 calculated_params[param_name], primitive_desc["name"]
4885 )
4886 )
4887 else:
4888 raise LcmException(
4889 "Parameter {} needed to execute primitive {} not provided".format(
4890 param_name, primitive_desc["name"]
4891 )
4892 )
4893
4894 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4895 calculated_params[param_name] = yaml.safe_dump(
4896 calculated_params[param_name], default_flow_style=True, width=256
4897 )
4898 elif isinstance(calculated_params[param_name], str) and calculated_params[
4899 param_name
4900 ].startswith("!!yaml "):
4901 calculated_params[param_name] = calculated_params[param_name][7:]
4902 if parameter.get("data-type") == "INTEGER":
4903 try:
4904 calculated_params[param_name] = int(calculated_params[param_name])
4905 except ValueError: # error converting string to int
4906 raise LcmException(
4907 "Parameter {} of primitive {} must be integer".format(
4908 param_name, primitive_desc["name"]
4909 )
4910 )
4911 elif parameter.get("data-type") == "BOOLEAN":
4912 calculated_params[param_name] = not (
4913 (str(calculated_params[param_name])).lower() == "false"
4914 )
4915
4916 # add always ns_config_info if primitive name is config
4917 if primitive_desc["name"] == "config":
4918 if "ns_config_info" in instantiation_params:
4919 calculated_params["ns_config_info"] = instantiation_params[
4920 "ns_config_info"
4921 ]
4922 return calculated_params
4923
4924 def _look_for_deployed_vca(
4925 self,
4926 deployed_vca,
4927 member_vnf_index,
4928 vdu_id,
4929 vdu_count_index,
4930 kdu_name=None,
4931 ee_descriptor_id=None,
4932 ):
4933 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4934 for vca in deployed_vca:
4935 if not vca:
4936 continue
4937 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4938 continue
4939 if (
4940 vdu_count_index is not None
4941 and vdu_count_index != vca["vdu_count_index"]
4942 ):
4943 continue
4944 if kdu_name and kdu_name != vca["kdu_name"]:
4945 continue
4946 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4947 continue
4948 break
4949 else:
4950 # vca_deployed not found
4951 raise LcmException(
4952 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4953 " is not deployed".format(
4954 member_vnf_index,
4955 vdu_id,
4956 vdu_count_index,
4957 kdu_name,
4958 ee_descriptor_id,
4959 )
4960 )
4961 # get ee_id
4962 ee_id = vca.get("ee_id")
4963 vca_type = vca.get(
4964 "type", "lxc_proxy_charm"
4965 ) # default value for backward compatibility - proxy charm
4966 if not ee_id:
4967 raise LcmException(
4968 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4969 "execution environment".format(
4970 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4971 )
4972 )
4973 return ee_id, vca_type
4974
4975 async def _ns_execute_primitive(
4976 self,
4977 ee_id,
4978 primitive,
4979 primitive_params,
4980 retries=0,
4981 retries_interval=30,
4982 timeout=None,
4983 vca_type=None,
4984 db_dict=None,
4985 vca_id: str = None,
4986 ) -> (str, str):
4987 try:
4988 if primitive == "config":
4989 primitive_params = {"params": primitive_params}
4990
4991 vca_type = vca_type or "lxc_proxy_charm"
4992
4993 while retries >= 0:
4994 try:
4995 output = await asyncio.wait_for(
4996 self.vca_map[vca_type].exec_primitive(
4997 ee_id=ee_id,
4998 primitive_name=primitive,
4999 params_dict=primitive_params,
5000 progress_timeout=self.timeout_progress_primitive,
5001 total_timeout=self.timeout_primitive,
5002 db_dict=db_dict,
5003 vca_id=vca_id,
5004 vca_type=vca_type,
5005 ),
5006 timeout=timeout or self.timeout_primitive,
5007 )
5008 # execution was OK
5009 break
5010 except asyncio.CancelledError:
5011 raise
5012 except Exception as e:
5013 retries -= 1
5014 if retries >= 0:
5015 self.logger.debug(
5016 "Error executing action {} on {} -> {}".format(
5017 primitive, ee_id, e
5018 )
5019 )
5020 # wait and retry
5021 await asyncio.sleep(retries_interval, loop=self.loop)
5022 else:
5023 if isinstance(e, asyncio.TimeoutError):
5024 e = N2VCException(
5025 message="Timed out waiting for action to complete"
5026 )
5027 return "FAILED", getattr(e, "message", repr(e))
5028
5029 return "COMPLETED", output
5030
5031 except (LcmException, asyncio.CancelledError):
5032 raise
5033 except Exception as e:
5034 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5035
5036 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5037 """
5038 Updating the vca_status with latest juju information in nsrs record
5039 :param: nsr_id: Id of the nsr
5040 :param: nslcmop_id: Id of the nslcmop
5041 :return: None
5042 """
5043
5044 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5045 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5046 vca_id = self.get_vca_id({}, db_nsr)
5047 if db_nsr["_admin"]["deployed"]["K8s"]:
5048 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5049 cluster_uuid, kdu_instance, cluster_type = (
5050 k8s["k8scluster-uuid"],
5051 k8s["kdu-instance"],
5052 k8s["k8scluster-type"],
5053 )
5054 await self._on_update_k8s_db(
5055 cluster_uuid=cluster_uuid,
5056 kdu_instance=kdu_instance,
5057 filter={"_id": nsr_id},
5058 vca_id=vca_id,
5059 cluster_type=cluster_type,
5060 )
5061 else:
5062 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5063 table, filter = "nsrs", {"_id": nsr_id}
5064 path = "_admin.deployed.VCA.{}.".format(vca_index)
5065 await self._on_update_n2vc_db(table, filter, path, {})
5066
5067 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5068 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5069
5070 async def action(self, nsr_id, nslcmop_id):
5071 # Try to lock HA task here
5072 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5073 if not task_is_locked_by_me:
5074 return
5075
5076 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5077 self.logger.debug(logging_text + "Enter")
5078 # get all needed from database
5079 db_nsr = None
5080 db_nslcmop = None
5081 db_nsr_update = {}
5082 db_nslcmop_update = {}
5083 nslcmop_operation_state = None
5084 error_description_nslcmop = None
5085 exc = None
5086 try:
5087 # wait for any previous tasks in process
5088 step = "Waiting for previous operations to terminate"
5089 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5090
5091 self._write_ns_status(
5092 nsr_id=nsr_id,
5093 ns_state=None,
5094 current_operation="RUNNING ACTION",
5095 current_operation_id=nslcmop_id,
5096 )
5097
5098 step = "Getting information from database"
5099 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5100 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5101 if db_nslcmop["operationParams"].get("primitive_params"):
5102 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5103 db_nslcmop["operationParams"]["primitive_params"]
5104 )
5105
5106 nsr_deployed = db_nsr["_admin"].get("deployed")
5107 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5108 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5109 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5110 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5111 primitive = db_nslcmop["operationParams"]["primitive"]
5112 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5113 timeout_ns_action = db_nslcmop["operationParams"].get(
5114 "timeout_ns_action", self.timeout_primitive
5115 )
5116
5117 if vnf_index:
5118 step = "Getting vnfr from database"
5119 db_vnfr = self.db.get_one(
5120 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5121 )
5122 if db_vnfr.get("kdur"):
5123 kdur_list = []
5124 for kdur in db_vnfr["kdur"]:
5125 if kdur.get("additionalParams"):
5126 kdur["additionalParams"] = json.loads(
5127 kdur["additionalParams"]
5128 )
5129 kdur_list.append(kdur)
5130 db_vnfr["kdur"] = kdur_list
5131 step = "Getting vnfd from database"
5132 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5133
5134 # Sync filesystem before running a primitive
5135 self.fs.sync(db_vnfr["vnfd-id"])
5136 else:
5137 step = "Getting nsd from database"
5138 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5139
5140 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5141 # for backward compatibility
5142 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5143 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5144 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5145 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5146
5147 # look for primitive
5148 config_primitive_desc = descriptor_configuration = None
5149 if vdu_id:
5150 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5151 elif kdu_name:
5152 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5153 elif vnf_index:
5154 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5155 else:
5156 descriptor_configuration = db_nsd.get("ns-configuration")
5157
5158 if descriptor_configuration and descriptor_configuration.get(
5159 "config-primitive"
5160 ):
5161 for config_primitive in descriptor_configuration["config-primitive"]:
5162 if config_primitive["name"] == primitive:
5163 config_primitive_desc = config_primitive
5164 break
5165
5166 if not config_primitive_desc:
5167 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5168 raise LcmException(
5169 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5170 primitive
5171 )
5172 )
5173 primitive_name = primitive
5174 ee_descriptor_id = None
5175 else:
5176 primitive_name = config_primitive_desc.get(
5177 "execution-environment-primitive", primitive
5178 )
5179 ee_descriptor_id = config_primitive_desc.get(
5180 "execution-environment-ref"
5181 )
5182
5183 if vnf_index:
5184 if vdu_id:
5185 vdur = next(
5186 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5187 )
5188 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5189 elif kdu_name:
5190 kdur = next(
5191 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5192 )
5193 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5194 else:
5195 desc_params = parse_yaml_strings(
5196 db_vnfr.get("additionalParamsForVnf")
5197 )
5198 else:
5199 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5200 if kdu_name and get_configuration(db_vnfd, kdu_name):
5201 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5202 actions = set()
5203 for primitive in kdu_configuration.get("initial-config-primitive", []):
5204 actions.add(primitive["name"])
5205 for primitive in kdu_configuration.get("config-primitive", []):
5206 actions.add(primitive["name"])
5207 kdu = find_in_list(
5208 nsr_deployed["K8s"],
5209 lambda kdu: kdu_name == kdu["kdu-name"]
5210 and kdu["member-vnf-index"] == vnf_index,
5211 )
5212 kdu_action = (
5213 True
5214 if primitive_name in actions
5215 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5216 else False
5217 )
5218
5219 # TODO check if ns is in a proper status
5220 if kdu_name and (
5221 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5222 ):
5223 # kdur and desc_params already set from before
5224 if primitive_params:
5225 desc_params.update(primitive_params)
5226 # TODO Check if we will need something at vnf level
5227 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5228 if (
5229 kdu_name == kdu["kdu-name"]
5230 and kdu["member-vnf-index"] == vnf_index
5231 ):
5232 break
5233 else:
5234 raise LcmException(
5235 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5236 )
5237
5238 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5239 msg = "unknown k8scluster-type '{}'".format(
5240 kdu.get("k8scluster-type")
5241 )
5242 raise LcmException(msg)
5243
5244 db_dict = {
5245 "collection": "nsrs",
5246 "filter": {"_id": nsr_id},
5247 "path": "_admin.deployed.K8s.{}".format(index),
5248 }
5249 self.logger.debug(
5250 logging_text
5251 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5252 )
5253 step = "Executing kdu {}".format(primitive_name)
5254 if primitive_name == "upgrade":
5255 if desc_params.get("kdu_model"):
5256 kdu_model = desc_params.get("kdu_model")
5257 del desc_params["kdu_model"]
5258 else:
5259 kdu_model = kdu.get("kdu-model")
5260 parts = kdu_model.split(sep=":")
5261 if len(parts) == 2:
5262 kdu_model = parts[0]
5263 if desc_params.get("kdu_atomic_upgrade"):
5264 atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1")
5265 del desc_params["kdu_atomic_upgrade"]
5266 else:
5267 atomic_upgrade = True
5268
5269 detailed_status = await asyncio.wait_for(
5270 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5271 cluster_uuid=kdu.get("k8scluster-uuid"),
5272 kdu_instance=kdu.get("kdu-instance"),
5273 atomic=atomic_upgrade,
5274 kdu_model=kdu_model,
5275 params=desc_params,
5276 db_dict=db_dict,
5277 timeout=timeout_ns_action,
5278 ),
5279 timeout=timeout_ns_action + 10,
5280 )
5281 self.logger.debug(
5282 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5283 )
5284 elif primitive_name == "rollback":
5285 detailed_status = await asyncio.wait_for(
5286 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5287 cluster_uuid=kdu.get("k8scluster-uuid"),
5288 kdu_instance=kdu.get("kdu-instance"),
5289 db_dict=db_dict,
5290 ),
5291 timeout=timeout_ns_action,
5292 )
5293 elif primitive_name == "status":
5294 detailed_status = await asyncio.wait_for(
5295 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5296 cluster_uuid=kdu.get("k8scluster-uuid"),
5297 kdu_instance=kdu.get("kdu-instance"),
5298 vca_id=vca_id,
5299 ),
5300 timeout=timeout_ns_action,
5301 )
5302 else:
5303 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5304 kdu["kdu-name"], nsr_id
5305 )
5306 params = self._map_primitive_params(
5307 config_primitive_desc, primitive_params, desc_params
5308 )
5309
5310 detailed_status = await asyncio.wait_for(
5311 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5312 cluster_uuid=kdu.get("k8scluster-uuid"),
5313 kdu_instance=kdu_instance,
5314 primitive_name=primitive_name,
5315 params=params,
5316 db_dict=db_dict,
5317 timeout=timeout_ns_action,
5318 vca_id=vca_id,
5319 ),
5320 timeout=timeout_ns_action,
5321 )
5322
5323 if detailed_status:
5324 nslcmop_operation_state = "COMPLETED"
5325 else:
5326 detailed_status = ""
5327 nslcmop_operation_state = "FAILED"
5328 else:
5329 ee_id, vca_type = self._look_for_deployed_vca(
5330 nsr_deployed["VCA"],
5331 member_vnf_index=vnf_index,
5332 vdu_id=vdu_id,
5333 vdu_count_index=vdu_count_index,
5334 ee_descriptor_id=ee_descriptor_id,
5335 )
5336 for vca_index, vca_deployed in enumerate(
5337 db_nsr["_admin"]["deployed"]["VCA"]
5338 ):
5339 if vca_deployed.get("member-vnf-index") == vnf_index:
5340 db_dict = {
5341 "collection": "nsrs",
5342 "filter": {"_id": nsr_id},
5343 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5344 }
5345 break
5346 (
5347 nslcmop_operation_state,
5348 detailed_status,
5349 ) = await self._ns_execute_primitive(
5350 ee_id,
5351 primitive=primitive_name,
5352 primitive_params=self._map_primitive_params(
5353 config_primitive_desc, primitive_params, desc_params
5354 ),
5355 timeout=timeout_ns_action,
5356 vca_type=vca_type,
5357 db_dict=db_dict,
5358 vca_id=vca_id,
5359 )
5360
5361 db_nslcmop_update["detailed-status"] = detailed_status
5362 error_description_nslcmop = (
5363 detailed_status if nslcmop_operation_state == "FAILED" else ""
5364 )
5365 self.logger.debug(
5366 logging_text
5367 + "Done with result {} {}".format(
5368 nslcmop_operation_state, detailed_status
5369 )
5370 )
5371 return # database update is called inside finally
5372
5373 except (DbException, LcmException, N2VCException, K8sException) as e:
5374 self.logger.error(logging_text + "Exit Exception {}".format(e))
5375 exc = e
5376 except asyncio.CancelledError:
5377 self.logger.error(
5378 logging_text + "Cancelled Exception while '{}'".format(step)
5379 )
5380 exc = "Operation was cancelled"
5381 except asyncio.TimeoutError:
5382 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5383 exc = "Timeout"
5384 except Exception as e:
5385 exc = traceback.format_exc()
5386 self.logger.critical(
5387 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5388 exc_info=True,
5389 )
5390 finally:
5391 if exc:
5392 db_nslcmop_update[
5393 "detailed-status"
5394 ] = (
5395 detailed_status
5396 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5397 nslcmop_operation_state = "FAILED"
5398 if db_nsr:
5399 self._write_ns_status(
5400 nsr_id=nsr_id,
5401 ns_state=db_nsr[
5402 "nsState"
5403 ], # TODO check if degraded. For the moment use previous status
5404 current_operation="IDLE",
5405 current_operation_id=None,
5406 # error_description=error_description_nsr,
5407 # error_detail=error_detail,
5408 other_update=db_nsr_update,
5409 )
5410
5411 self._write_op_status(
5412 op_id=nslcmop_id,
5413 stage="",
5414 error_message=error_description_nslcmop,
5415 operation_state=nslcmop_operation_state,
5416 other_update=db_nslcmop_update,
5417 )
5418
5419 if nslcmop_operation_state:
5420 try:
5421 await self.msg.aiowrite(
5422 "ns",
5423 "actioned",
5424 {
5425 "nsr_id": nsr_id,
5426 "nslcmop_id": nslcmop_id,
5427 "operationState": nslcmop_operation_state,
5428 },
5429 loop=self.loop,
5430 )
5431 except Exception as e:
5432 self.logger.error(
5433 logging_text + "kafka_write notification Exception {}".format(e)
5434 )
5435 self.logger.debug(logging_text + "Exit")
5436 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5437 return nslcmop_operation_state, detailed_status
5438
5439 async def terminate_vdus(
5440 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5441 ):
5442 """This method terminates VDUs
5443
5444 Args:
5445 db_vnfr: VNF instance record
5446 member_vnf_index: VNF index to identify the VDUs to be removed
5447 db_nsr: NS instance record
5448 update_db_nslcmops: Nslcmop update record
5449 """
5450 vca_scaling_info = []
5451 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5452 scaling_info["scaling_direction"] = "IN"
5453 scaling_info["vdu-delete"] = {}
5454 scaling_info["kdu-delete"] = {}
5455 db_vdur = db_vnfr.get("vdur")
5456 vdur_list = copy(db_vdur)
5457 count_index = 0
5458 for index, vdu in enumerate(vdur_list):
5459 vca_scaling_info.append(
5460 {
5461 "osm_vdu_id": vdu["vdu-id-ref"],
5462 "member-vnf-index": member_vnf_index,
5463 "type": "delete",
5464 "vdu_index": count_index,
5465 }
5466 )
5467 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5468 scaling_info["vdu"].append(
5469 {
5470 "name": vdu.get("name") or vdu.get("vdu-name"),
5471 "vdu_id": vdu["vdu-id-ref"],
5472 "interface": [],
5473 }
5474 )
5475 for interface in vdu["interfaces"]:
5476 scaling_info["vdu"][index]["interface"].append(
5477 {
5478 "name": interface["name"],
5479 "ip_address": interface["ip-address"],
5480 "mac_address": interface.get("mac-address"),
5481 }
5482 )
5483 self.logger.info("NS update scaling info{}".format(scaling_info))
5484 stage[2] = "Terminating VDUs"
5485 if scaling_info.get("vdu-delete"):
5486 # scale_process = "RO"
5487 if self.ro_config.get("ng"):
5488 await self._scale_ng_ro(
5489 logging_text,
5490 db_nsr,
5491 update_db_nslcmops,
5492 db_vnfr,
5493 scaling_info,
5494 stage,
5495 )
5496
5497 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5498 """This method is to Remove VNF instances from NS.
5499
5500 Args:
5501 nsr_id: NS instance id
5502 nslcmop_id: nslcmop id of update
5503 vnf_instance_id: id of the VNF instance to be removed
5504
5505 Returns:
5506 result: (str, str) COMPLETED/FAILED, details
5507 """
5508 try:
5509 db_nsr_update = {}
5510 logging_text = "Task ns={} update ".format(nsr_id)
5511 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5512 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5513 if check_vnfr_count > 1:
5514 stage = ["", "", ""]
5515 step = "Getting nslcmop from database"
5516 self.logger.debug(
5517 step + " after having waited for previous tasks to be completed"
5518 )
5519 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5520 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5521 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5522 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5523 """ db_vnfr = self.db.get_one(
5524 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5525
5526 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5527 await self.terminate_vdus(
5528 db_vnfr,
5529 member_vnf_index,
5530 db_nsr,
5531 update_db_nslcmops,
5532 stage,
5533 logging_text,
5534 )
5535
5536 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5537 constituent_vnfr.remove(db_vnfr.get("_id"))
5538 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5539 "constituent-vnfr-ref"
5540 )
5541 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5542 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5543 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5544 return "COMPLETED", "Done"
5545 else:
5546 step = "Terminate VNF Failed with"
5547 raise LcmException(
5548 "{} Cannot terminate the last VNF in this NS.".format(
5549 vnf_instance_id
5550 )
5551 )
5552 except (LcmException, asyncio.CancelledError):
5553 raise
5554 except Exception as e:
5555 self.logger.debug("Error removing VNF {}".format(e))
5556 return "FAILED", "Error removing VNF {}".format(e)
5557
5558 async def _ns_redeploy_vnf(
5559 self,
5560 nsr_id,
5561 nslcmop_id,
5562 db_vnfd,
5563 db_vnfr,
5564 db_nsr,
5565 ):
5566 """This method updates and redeploys VNF instances
5567
5568 Args:
5569 nsr_id: NS instance id
5570 nslcmop_id: nslcmop id
5571 db_vnfd: VNF descriptor
5572 db_vnfr: VNF instance record
5573 db_nsr: NS instance record
5574
5575 Returns:
5576 result: (str, str) COMPLETED/FAILED, details
5577 """
5578 try:
5579 count_index = 0
5580 stage = ["", "", ""]
5581 logging_text = "Task ns={} update ".format(nsr_id)
5582 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5583 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5584
5585 # Terminate old VNF resources
5586 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5587 await self.terminate_vdus(
5588 db_vnfr,
5589 member_vnf_index,
5590 db_nsr,
5591 update_db_nslcmops,
5592 stage,
5593 logging_text,
5594 )
5595
5596 # old_vnfd_id = db_vnfr["vnfd-id"]
5597 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5598 new_db_vnfd = db_vnfd
5599 # new_vnfd_ref = new_db_vnfd["id"]
5600 # new_vnfd_id = vnfd_id
5601
5602 # Create VDUR
5603 new_vnfr_cp = []
5604 for cp in new_db_vnfd.get("ext-cpd", ()):
5605 vnf_cp = {
5606 "name": cp.get("id"),
5607 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5608 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5609 "id": cp.get("id"),
5610 }
5611 new_vnfr_cp.append(vnf_cp)
5612 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5613 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5614 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5615 new_vnfr_update = {
5616 "revision": latest_vnfd_revision,
5617 "connection-point": new_vnfr_cp,
5618 "vdur": new_vdur,
5619 "ip-address": "",
5620 }
5621 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5622 updated_db_vnfr = self.db.get_one(
5623 "vnfrs",
5624 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5625 )
5626
5627 # Instantiate new VNF resources
5628 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5629 vca_scaling_info = []
5630 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5631 scaling_info["scaling_direction"] = "OUT"
5632 scaling_info["vdu-create"] = {}
5633 scaling_info["kdu-create"] = {}
5634 vdud_instantiate_list = db_vnfd["vdu"]
5635 for index, vdud in enumerate(vdud_instantiate_list):
5636 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5637 if cloud_init_text:
5638 additional_params = (
5639 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5640 or {}
5641 )
5642 cloud_init_list = []
5643 if cloud_init_text:
5644 # TODO Information of its own ip is not available because db_vnfr is not updated.
5645 additional_params["OSM"] = get_osm_params(
5646 updated_db_vnfr, vdud["id"], 1
5647 )
5648 cloud_init_list.append(
5649 self._parse_cloud_init(
5650 cloud_init_text,
5651 additional_params,
5652 db_vnfd["id"],
5653 vdud["id"],
5654 )
5655 )
5656 vca_scaling_info.append(
5657 {
5658 "osm_vdu_id": vdud["id"],
5659 "member-vnf-index": member_vnf_index,
5660 "type": "create",
5661 "vdu_index": count_index,
5662 }
5663 )
5664 scaling_info["vdu-create"][vdud["id"]] = count_index
5665 if self.ro_config.get("ng"):
5666 self.logger.debug(
5667 "New Resources to be deployed: {}".format(scaling_info)
5668 )
5669 await self._scale_ng_ro(
5670 logging_text,
5671 db_nsr,
5672 update_db_nslcmops,
5673 updated_db_vnfr,
5674 scaling_info,
5675 stage,
5676 )
5677 return "COMPLETED", "Done"
5678 except (LcmException, asyncio.CancelledError):
5679 raise
5680 except Exception as e:
5681 self.logger.debug("Error updating VNF {}".format(e))
5682 return "FAILED", "Error updating VNF {}".format(e)
5683
5684 async def _ns_charm_upgrade(
5685 self,
5686 ee_id,
5687 charm_id,
5688 charm_type,
5689 path,
5690 timeout: float = None,
5691 ) -> (str, str):
5692 """This method upgrade charms in VNF instances
5693
5694 Args:
5695 ee_id: Execution environment id
5696 path: Local path to the charm
5697 charm_id: charm-id
5698 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5699 timeout: (Float) Timeout for the ns update operation
5700
5701 Returns:
5702 result: (str, str) COMPLETED/FAILED, details
5703 """
5704 try:
5705 charm_type = charm_type or "lxc_proxy_charm"
5706 output = await self.vca_map[charm_type].upgrade_charm(
5707 ee_id=ee_id,
5708 path=path,
5709 charm_id=charm_id,
5710 charm_type=charm_type,
5711 timeout=timeout or self.timeout_ns_update,
5712 )
5713
5714 if output:
5715 return "COMPLETED", output
5716
5717 except (LcmException, asyncio.CancelledError):
5718 raise
5719
5720 except Exception as e:
5721
5722 self.logger.debug("Error upgrading charm {}".format(path))
5723
5724 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5725
5726 async def update(self, nsr_id, nslcmop_id):
5727 """Update NS according to different update types
5728
5729 This method performs upgrade of VNF instances then updates the revision
5730 number in VNF record
5731
5732 Args:
5733 nsr_id: Network service will be updated
5734 nslcmop_id: ns lcm operation id
5735
5736 Returns:
5737 It may raise DbException, LcmException, N2VCException, K8sException
5738
5739 """
5740 # Try to lock HA task here
5741 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5742 if not task_is_locked_by_me:
5743 return
5744
5745 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5746 self.logger.debug(logging_text + "Enter")
5747
5748 # Set the required variables to be filled up later
5749 db_nsr = None
5750 db_nslcmop_update = {}
5751 vnfr_update = {}
5752 nslcmop_operation_state = None
5753 db_nsr_update = {}
5754 error_description_nslcmop = ""
5755 exc = None
5756 change_type = "updated"
5757 detailed_status = ""
5758
5759 try:
5760 # wait for any previous tasks in process
5761 step = "Waiting for previous operations to terminate"
5762 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5763 self._write_ns_status(
5764 nsr_id=nsr_id,
5765 ns_state=None,
5766 current_operation="UPDATING",
5767 current_operation_id=nslcmop_id,
5768 )
5769
5770 step = "Getting nslcmop from database"
5771 db_nslcmop = self.db.get_one(
5772 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5773 )
5774 update_type = db_nslcmop["operationParams"]["updateType"]
5775
5776 step = "Getting nsr from database"
5777 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5778 old_operational_status = db_nsr["operational-status"]
5779 db_nsr_update["operational-status"] = "updating"
5780 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5781 nsr_deployed = db_nsr["_admin"].get("deployed")
5782
5783 if update_type == "CHANGE_VNFPKG":
5784
5785 # Get the input parameters given through update request
5786 vnf_instance_id = db_nslcmop["operationParams"][
5787 "changeVnfPackageData"
5788 ].get("vnfInstanceId")
5789
5790 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5791 "vnfdId"
5792 )
5793 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5794
5795 step = "Getting vnfr from database"
5796 db_vnfr = self.db.get_one(
5797 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5798 )
5799
5800 step = "Getting vnfds from database"
5801 # Latest VNFD
5802 latest_vnfd = self.db.get_one(
5803 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5804 )
5805 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5806
5807 # Current VNFD
5808 current_vnf_revision = db_vnfr.get("revision", 1)
5809 current_vnfd = self.db.get_one(
5810 "vnfds_revisions",
5811 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5812 fail_on_empty=False,
5813 )
5814 # Charm artifact paths will be filled up later
5815 (
5816 current_charm_artifact_path,
5817 target_charm_artifact_path,
5818 charm_artifact_paths,
5819 ) = ([], [], [])
5820
5821 step = "Checking if revision has changed in VNFD"
5822 if current_vnf_revision != latest_vnfd_revision:
5823
5824 change_type = "policy_updated"
5825
5826 # There is new revision of VNFD, update operation is required
5827 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5828 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5829
5830 step = "Removing the VNFD packages if they exist in the local path"
5831 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5832 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5833
5834 step = "Get the VNFD packages from FSMongo"
5835 self.fs.sync(from_path=latest_vnfd_path)
5836 self.fs.sync(from_path=current_vnfd_path)
5837
5838 step = (
5839 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5840 )
5841 base_folder = latest_vnfd["_admin"]["storage"]
5842
5843 for charm_index, charm_deployed in enumerate(
5844 get_iterable(nsr_deployed, "VCA")
5845 ):
5846 vnf_index = db_vnfr.get("member-vnf-index-ref")
5847
5848 # Getting charm-id and charm-type
5849 if charm_deployed.get("member-vnf-index") == vnf_index:
5850 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5851 charm_type = charm_deployed.get("type")
5852
5853 # Getting ee-id
5854 ee_id = charm_deployed.get("ee_id")
5855
5856 step = "Getting descriptor config"
5857 descriptor_config = get_configuration(
5858 current_vnfd, current_vnfd["id"]
5859 )
5860
5861 if "execution-environment-list" in descriptor_config:
5862 ee_list = descriptor_config.get(
5863 "execution-environment-list", []
5864 )
5865 else:
5866 ee_list = []
5867
5868 # There could be several charm used in the same VNF
5869 for ee_item in ee_list:
5870 if ee_item.get("juju"):
5871
5872 step = "Getting charm name"
5873 charm_name = ee_item["juju"].get("charm")
5874
5875 step = "Setting Charm artifact paths"
5876 current_charm_artifact_path.append(
5877 get_charm_artifact_path(
5878 base_folder,
5879 charm_name,
5880 charm_type,
5881 current_vnf_revision,
5882 )
5883 )
5884 target_charm_artifact_path.append(
5885 get_charm_artifact_path(
5886 base_folder,
5887 charm_name,
5888 charm_type,
5889 latest_vnfd_revision,
5890 )
5891 )
5892
5893 charm_artifact_paths = zip(
5894 current_charm_artifact_path, target_charm_artifact_path
5895 )
5896
5897 step = "Checking if software version has changed in VNFD"
5898 if find_software_version(current_vnfd) != find_software_version(
5899 latest_vnfd
5900 ):
5901
5902 step = "Checking if existing VNF has charm"
5903 for current_charm_path, target_charm_path in list(
5904 charm_artifact_paths
5905 ):
5906 if current_charm_path:
5907 raise LcmException(
5908 "Software version change is not supported as VNF instance {} has charm.".format(
5909 vnf_instance_id
5910 )
5911 )
5912
5913 # There is no change in the charm package, then redeploy the VNF
5914 # based on new descriptor
5915 step = "Redeploying VNF"
5916 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5917 (result, detailed_status) = await self._ns_redeploy_vnf(
5918 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5919 )
5920 if result == "FAILED":
5921 nslcmop_operation_state = result
5922 error_description_nslcmop = detailed_status
5923 db_nslcmop_update["detailed-status"] = detailed_status
5924 self.logger.debug(
5925 logging_text
5926 + " step {} Done with result {} {}".format(
5927 step, nslcmop_operation_state, detailed_status
5928 )
5929 )
5930
5931 else:
5932 step = "Checking if any charm package has changed or not"
5933 for current_charm_path, target_charm_path in list(
5934 charm_artifact_paths
5935 ):
5936 if (
5937 current_charm_path
5938 and target_charm_path
5939 and self.check_charm_hash_changed(
5940 current_charm_path, target_charm_path
5941 )
5942 ):
5943
5944 step = "Checking whether VNF uses juju bundle"
5945 if check_juju_bundle_existence(current_vnfd):
5946
5947 raise LcmException(
5948 "Charm upgrade is not supported for the instance which"
5949 " uses juju-bundle: {}".format(
5950 check_juju_bundle_existence(current_vnfd)
5951 )
5952 )
5953
5954 step = "Upgrading Charm"
5955 (
5956 result,
5957 detailed_status,
5958 ) = await self._ns_charm_upgrade(
5959 ee_id=ee_id,
5960 charm_id=charm_id,
5961 charm_type=charm_type,
5962 path=self.fs.path + target_charm_path,
5963 timeout=timeout_seconds,
5964 )
5965
5966 if result == "FAILED":
5967 nslcmop_operation_state = result
5968 error_description_nslcmop = detailed_status
5969
5970 db_nslcmop_update["detailed-status"] = detailed_status
5971 self.logger.debug(
5972 logging_text
5973 + " step {} Done with result {} {}".format(
5974 step, nslcmop_operation_state, detailed_status
5975 )
5976 )
5977
5978 step = "Updating policies"
5979 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5980 result = "COMPLETED"
5981 detailed_status = "Done"
5982 db_nslcmop_update["detailed-status"] = "Done"
5983
5984 # If nslcmop_operation_state is None, so any operation is not failed.
5985 if not nslcmop_operation_state:
5986 nslcmop_operation_state = "COMPLETED"
5987
5988 # If update CHANGE_VNFPKG nslcmop_operation is successful
5989 # vnf revision need to be updated
5990 vnfr_update["revision"] = latest_vnfd_revision
5991 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5992
5993 self.logger.debug(
5994 logging_text
5995 + " task Done with result {} {}".format(
5996 nslcmop_operation_state, detailed_status
5997 )
5998 )
5999 elif update_type == "REMOVE_VNF":
6000 # This part is included in https://osm.etsi.org/gerrit/11876
6001 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6002 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6003 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6004 step = "Removing VNF"
6005 (result, detailed_status) = await self.remove_vnf(
6006 nsr_id, nslcmop_id, vnf_instance_id
6007 )
6008 if result == "FAILED":
6009 nslcmop_operation_state = result
6010 error_description_nslcmop = detailed_status
6011 db_nslcmop_update["detailed-status"] = detailed_status
6012 change_type = "vnf_terminated"
6013 if not nslcmop_operation_state:
6014 nslcmop_operation_state = "COMPLETED"
6015 self.logger.debug(
6016 logging_text
6017 + " task Done with result {} {}".format(
6018 nslcmop_operation_state, detailed_status
6019 )
6020 )
6021
6022 elif update_type == "OPERATE_VNF":
6023 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6024 "vnfInstanceId"
6025 ]
6026 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6027 "changeStateTo"
6028 ]
6029 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6030 "additionalParam"
6031 ]
6032 (result, detailed_status) = await self.rebuild_start_stop(
6033 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6034 )
6035 if result == "FAILED":
6036 nslcmop_operation_state = result
6037 error_description_nslcmop = detailed_status
6038 db_nslcmop_update["detailed-status"] = detailed_status
6039 if not nslcmop_operation_state:
6040 nslcmop_operation_state = "COMPLETED"
6041 self.logger.debug(
6042 logging_text
6043 + " task Done with result {} {}".format(
6044 nslcmop_operation_state, detailed_status
6045 )
6046 )
6047
6048 # If nslcmop_operation_state is None, so any operation is not failed.
6049 # All operations are executed in overall.
6050 if not nslcmop_operation_state:
6051 nslcmop_operation_state = "COMPLETED"
6052 db_nsr_update["operational-status"] = old_operational_status
6053
6054 except (DbException, LcmException, N2VCException, K8sException) as e:
6055 self.logger.error(logging_text + "Exit Exception {}".format(e))
6056 exc = e
6057 except asyncio.CancelledError:
6058 self.logger.error(
6059 logging_text + "Cancelled Exception while '{}'".format(step)
6060 )
6061 exc = "Operation was cancelled"
6062 except asyncio.TimeoutError:
6063 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6064 exc = "Timeout"
6065 except Exception as e:
6066 exc = traceback.format_exc()
6067 self.logger.critical(
6068 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6069 exc_info=True,
6070 )
6071 finally:
6072 if exc:
6073 db_nslcmop_update[
6074 "detailed-status"
6075 ] = (
6076 detailed_status
6077 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6078 nslcmop_operation_state = "FAILED"
6079 db_nsr_update["operational-status"] = old_operational_status
6080 if db_nsr:
6081 self._write_ns_status(
6082 nsr_id=nsr_id,
6083 ns_state=db_nsr["nsState"],
6084 current_operation="IDLE",
6085 current_operation_id=None,
6086 other_update=db_nsr_update,
6087 )
6088
6089 self._write_op_status(
6090 op_id=nslcmop_id,
6091 stage="",
6092 error_message=error_description_nslcmop,
6093 operation_state=nslcmop_operation_state,
6094 other_update=db_nslcmop_update,
6095 )
6096
6097 if nslcmop_operation_state:
6098 try:
6099 msg = {
6100 "nsr_id": nsr_id,
6101 "nslcmop_id": nslcmop_id,
6102 "operationState": nslcmop_operation_state,
6103 }
6104 if change_type in ("vnf_terminated", "policy_updated"):
6105 msg.update({"vnf_member_index": member_vnf_index})
6106 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6107 except Exception as e:
6108 self.logger.error(
6109 logging_text + "kafka_write notification Exception {}".format(e)
6110 )
6111 self.logger.debug(logging_text + "Exit")
6112 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6113 return nslcmop_operation_state, detailed_status
6114
6115 async def scale(self, nsr_id, nslcmop_id):
6116 # Try to lock HA task here
6117 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6118 if not task_is_locked_by_me:
6119 return
6120
6121 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6122 stage = ["", "", ""]
6123 tasks_dict_info = {}
6124 # ^ stage, step, VIM progress
6125 self.logger.debug(logging_text + "Enter")
6126 # get all needed from database
6127 db_nsr = None
6128 db_nslcmop_update = {}
6129 db_nsr_update = {}
6130 exc = None
6131 # in case of error, indicates what part of scale was failed to put nsr at error status
6132 scale_process = None
6133 old_operational_status = ""
6134 old_config_status = ""
6135 nsi_id = None
6136 try:
6137 # wait for any previous tasks in process
6138 step = "Waiting for previous operations to terminate"
6139 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6140 self._write_ns_status(
6141 nsr_id=nsr_id,
6142 ns_state=None,
6143 current_operation="SCALING",
6144 current_operation_id=nslcmop_id,
6145 )
6146
6147 step = "Getting nslcmop from database"
6148 self.logger.debug(
6149 step + " after having waited for previous tasks to be completed"
6150 )
6151 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6152
6153 step = "Getting nsr from database"
6154 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6155 old_operational_status = db_nsr["operational-status"]
6156 old_config_status = db_nsr["config-status"]
6157
6158 step = "Parsing scaling parameters"
6159 db_nsr_update["operational-status"] = "scaling"
6160 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6161 nsr_deployed = db_nsr["_admin"].get("deployed")
6162
6163 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6164 "scaleByStepData"
6165 ]["member-vnf-index"]
6166 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6167 "scaleByStepData"
6168 ]["scaling-group-descriptor"]
6169 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6170 # for backward compatibility
6171 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6172 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6173 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6174 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6175
6176 step = "Getting vnfr from database"
6177 db_vnfr = self.db.get_one(
6178 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6179 )
6180
6181 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6182
6183 step = "Getting vnfd from database"
6184 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6185
6186 base_folder = db_vnfd["_admin"]["storage"]
6187
6188 step = "Getting scaling-group-descriptor"
6189 scaling_descriptor = find_in_list(
6190 get_scaling_aspect(db_vnfd),
6191 lambda scale_desc: scale_desc["name"] == scaling_group,
6192 )
6193 if not scaling_descriptor:
6194 raise LcmException(
6195 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6196 "at vnfd:scaling-group-descriptor".format(scaling_group)
6197 )
6198
6199 step = "Sending scale order to VIM"
6200 # TODO check if ns is in a proper status
6201 nb_scale_op = 0
6202 if not db_nsr["_admin"].get("scaling-group"):
6203 self.update_db_2(
6204 "nsrs",
6205 nsr_id,
6206 {
6207 "_admin.scaling-group": [
6208 {"name": scaling_group, "nb-scale-op": 0}
6209 ]
6210 },
6211 )
6212 admin_scale_index = 0
6213 else:
6214 for admin_scale_index, admin_scale_info in enumerate(
6215 db_nsr["_admin"]["scaling-group"]
6216 ):
6217 if admin_scale_info["name"] == scaling_group:
6218 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6219 break
6220 else: # not found, set index one plus last element and add new entry with the name
6221 admin_scale_index += 1
6222 db_nsr_update[
6223 "_admin.scaling-group.{}.name".format(admin_scale_index)
6224 ] = scaling_group
6225
6226 vca_scaling_info = []
6227 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6228 if scaling_type == "SCALE_OUT":
6229 if "aspect-delta-details" not in scaling_descriptor:
6230 raise LcmException(
6231 "Aspect delta details not fount in scaling descriptor {}".format(
6232 scaling_descriptor["name"]
6233 )
6234 )
6235 # count if max-instance-count is reached
6236 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6237
6238 scaling_info["scaling_direction"] = "OUT"
6239 scaling_info["vdu-create"] = {}
6240 scaling_info["kdu-create"] = {}
6241 for delta in deltas:
6242 for vdu_delta in delta.get("vdu-delta", {}):
6243 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6244 # vdu_index also provides the number of instance of the targeted vdu
6245 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6246 cloud_init_text = self._get_vdu_cloud_init_content(
6247 vdud, db_vnfd
6248 )
6249 if cloud_init_text:
6250 additional_params = (
6251 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6252 or {}
6253 )
6254 cloud_init_list = []
6255
6256 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6257 max_instance_count = 10
6258 if vdu_profile and "max-number-of-instances" in vdu_profile:
6259 max_instance_count = vdu_profile.get(
6260 "max-number-of-instances", 10
6261 )
6262
6263 default_instance_num = get_number_of_instances(
6264 db_vnfd, vdud["id"]
6265 )
6266 instances_number = vdu_delta.get("number-of-instances", 1)
6267 nb_scale_op += instances_number
6268
6269 new_instance_count = nb_scale_op + default_instance_num
6270 # Control if new count is over max and vdu count is less than max.
6271 # Then assign new instance count
6272 if new_instance_count > max_instance_count > vdu_count:
6273 instances_number = new_instance_count - max_instance_count
6274 else:
6275 instances_number = instances_number
6276
6277 if new_instance_count > max_instance_count:
6278 raise LcmException(
6279 "reached the limit of {} (max-instance-count) "
6280 "scaling-out operations for the "
6281 "scaling-group-descriptor '{}'".format(
6282 nb_scale_op, scaling_group
6283 )
6284 )
6285 for x in range(vdu_delta.get("number-of-instances", 1)):
6286 if cloud_init_text:
6287 # TODO Information of its own ip is not available because db_vnfr is not updated.
6288 additional_params["OSM"] = get_osm_params(
6289 db_vnfr, vdu_delta["id"], vdu_index + x
6290 )
6291 cloud_init_list.append(
6292 self._parse_cloud_init(
6293 cloud_init_text,
6294 additional_params,
6295 db_vnfd["id"],
6296 vdud["id"],
6297 )
6298 )
6299 vca_scaling_info.append(
6300 {
6301 "osm_vdu_id": vdu_delta["id"],
6302 "member-vnf-index": vnf_index,
6303 "type": "create",
6304 "vdu_index": vdu_index + x,
6305 }
6306 )
6307 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6308 for kdu_delta in delta.get("kdu-resource-delta", {}):
6309 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6310 kdu_name = kdu_profile["kdu-name"]
6311 resource_name = kdu_profile.get("resource-name", "")
6312
6313 # Might have different kdus in the same delta
6314 # Should have list for each kdu
6315 if not scaling_info["kdu-create"].get(kdu_name, None):
6316 scaling_info["kdu-create"][kdu_name] = []
6317
6318 kdur = get_kdur(db_vnfr, kdu_name)
6319 if kdur.get("helm-chart"):
6320 k8s_cluster_type = "helm-chart-v3"
6321 self.logger.debug("kdur: {}".format(kdur))
6322 if (
6323 kdur.get("helm-version")
6324 and kdur.get("helm-version") == "v2"
6325 ):
6326 k8s_cluster_type = "helm-chart"
6327 elif kdur.get("juju-bundle"):
6328 k8s_cluster_type = "juju-bundle"
6329 else:
6330 raise LcmException(
6331 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6332 "juju-bundle. Maybe an old NBI version is running".format(
6333 db_vnfr["member-vnf-index-ref"], kdu_name
6334 )
6335 )
6336
6337 max_instance_count = 10
6338 if kdu_profile and "max-number-of-instances" in kdu_profile:
6339 max_instance_count = kdu_profile.get(
6340 "max-number-of-instances", 10
6341 )
6342
6343 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6344 deployed_kdu, _ = get_deployed_kdu(
6345 nsr_deployed, kdu_name, vnf_index
6346 )
6347 if deployed_kdu is None:
6348 raise LcmException(
6349 "KDU '{}' for vnf '{}' not deployed".format(
6350 kdu_name, vnf_index
6351 )
6352 )
6353 kdu_instance = deployed_kdu.get("kdu-instance")
6354 instance_num = await self.k8scluster_map[
6355 k8s_cluster_type
6356 ].get_scale_count(
6357 resource_name,
6358 kdu_instance,
6359 vca_id=vca_id,
6360 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6361 kdu_model=deployed_kdu.get("kdu-model"),
6362 )
6363 kdu_replica_count = instance_num + kdu_delta.get(
6364 "number-of-instances", 1
6365 )
6366
6367 # Control if new count is over max and instance_num is less than max.
6368 # Then assign max instance number to kdu replica count
6369 if kdu_replica_count > max_instance_count > instance_num:
6370 kdu_replica_count = max_instance_count
6371 if kdu_replica_count > max_instance_count:
6372 raise LcmException(
6373 "reached the limit of {} (max-instance-count) "
6374 "scaling-out operations for the "
6375 "scaling-group-descriptor '{}'".format(
6376 instance_num, scaling_group
6377 )
6378 )
6379
6380 for x in range(kdu_delta.get("number-of-instances", 1)):
6381 vca_scaling_info.append(
6382 {
6383 "osm_kdu_id": kdu_name,
6384 "member-vnf-index": vnf_index,
6385 "type": "create",
6386 "kdu_index": instance_num + x - 1,
6387 }
6388 )
6389 scaling_info["kdu-create"][kdu_name].append(
6390 {
6391 "member-vnf-index": vnf_index,
6392 "type": "create",
6393 "k8s-cluster-type": k8s_cluster_type,
6394 "resource-name": resource_name,
6395 "scale": kdu_replica_count,
6396 }
6397 )
6398 elif scaling_type == "SCALE_IN":
6399 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6400
6401 scaling_info["scaling_direction"] = "IN"
6402 scaling_info["vdu-delete"] = {}
6403 scaling_info["kdu-delete"] = {}
6404
6405 for delta in deltas:
6406 for vdu_delta in delta.get("vdu-delta", {}):
6407 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6408 min_instance_count = 0
6409 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6410 if vdu_profile and "min-number-of-instances" in vdu_profile:
6411 min_instance_count = vdu_profile["min-number-of-instances"]
6412
6413 default_instance_num = get_number_of_instances(
6414 db_vnfd, vdu_delta["id"]
6415 )
6416 instance_num = vdu_delta.get("number-of-instances", 1)
6417 nb_scale_op -= instance_num
6418
6419 new_instance_count = nb_scale_op + default_instance_num
6420
6421 if new_instance_count < min_instance_count < vdu_count:
6422 instances_number = min_instance_count - new_instance_count
6423 else:
6424 instances_number = instance_num
6425
6426 if new_instance_count < min_instance_count:
6427 raise LcmException(
6428 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6429 "scaling-group-descriptor '{}'".format(
6430 nb_scale_op, scaling_group
6431 )
6432 )
6433 for x in range(vdu_delta.get("number-of-instances", 1)):
6434 vca_scaling_info.append(
6435 {
6436 "osm_vdu_id": vdu_delta["id"],
6437 "member-vnf-index": vnf_index,
6438 "type": "delete",
6439 "vdu_index": vdu_index - 1 - x,
6440 }
6441 )
6442 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6443 for kdu_delta in delta.get("kdu-resource-delta", {}):
6444 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6445 kdu_name = kdu_profile["kdu-name"]
6446 resource_name = kdu_profile.get("resource-name", "")
6447
6448 if not scaling_info["kdu-delete"].get(kdu_name, None):
6449 scaling_info["kdu-delete"][kdu_name] = []
6450
6451 kdur = get_kdur(db_vnfr, kdu_name)
6452 if kdur.get("helm-chart"):
6453 k8s_cluster_type = "helm-chart-v3"
6454 self.logger.debug("kdur: {}".format(kdur))
6455 if (
6456 kdur.get("helm-version")
6457 and kdur.get("helm-version") == "v2"
6458 ):
6459 k8s_cluster_type = "helm-chart"
6460 elif kdur.get("juju-bundle"):
6461 k8s_cluster_type = "juju-bundle"
6462 else:
6463 raise LcmException(
6464 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6465 "juju-bundle. Maybe an old NBI version is running".format(
6466 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6467 )
6468 )
6469
6470 min_instance_count = 0
6471 if kdu_profile and "min-number-of-instances" in kdu_profile:
6472 min_instance_count = kdu_profile["min-number-of-instances"]
6473
6474 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6475 deployed_kdu, _ = get_deployed_kdu(
6476 nsr_deployed, kdu_name, vnf_index
6477 )
6478 if deployed_kdu is None:
6479 raise LcmException(
6480 "KDU '{}' for vnf '{}' not deployed".format(
6481 kdu_name, vnf_index
6482 )
6483 )
6484 kdu_instance = deployed_kdu.get("kdu-instance")
6485 instance_num = await self.k8scluster_map[
6486 k8s_cluster_type
6487 ].get_scale_count(
6488 resource_name,
6489 kdu_instance,
6490 vca_id=vca_id,
6491 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6492 kdu_model=deployed_kdu.get("kdu-model"),
6493 )
6494 kdu_replica_count = instance_num - kdu_delta.get(
6495 "number-of-instances", 1
6496 )
6497
6498 if kdu_replica_count < min_instance_count < instance_num:
6499 kdu_replica_count = min_instance_count
6500 if kdu_replica_count < min_instance_count:
6501 raise LcmException(
6502 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6503 "scaling-group-descriptor '{}'".format(
6504 instance_num, scaling_group
6505 )
6506 )
6507
6508 for x in range(kdu_delta.get("number-of-instances", 1)):
6509 vca_scaling_info.append(
6510 {
6511 "osm_kdu_id": kdu_name,
6512 "member-vnf-index": vnf_index,
6513 "type": "delete",
6514 "kdu_index": instance_num - x - 1,
6515 }
6516 )
6517 scaling_info["kdu-delete"][kdu_name].append(
6518 {
6519 "member-vnf-index": vnf_index,
6520 "type": "delete",
6521 "k8s-cluster-type": k8s_cluster_type,
6522 "resource-name": resource_name,
6523 "scale": kdu_replica_count,
6524 }
6525 )
6526
6527 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6528 vdu_delete = copy(scaling_info.get("vdu-delete"))
6529 if scaling_info["scaling_direction"] == "IN":
6530 for vdur in reversed(db_vnfr["vdur"]):
6531 if vdu_delete.get(vdur["vdu-id-ref"]):
6532 vdu_delete[vdur["vdu-id-ref"]] -= 1
6533 scaling_info["vdu"].append(
6534 {
6535 "name": vdur.get("name") or vdur.get("vdu-name"),
6536 "vdu_id": vdur["vdu-id-ref"],
6537 "interface": [],
6538 }
6539 )
6540 for interface in vdur["interfaces"]:
6541 scaling_info["vdu"][-1]["interface"].append(
6542 {
6543 "name": interface["name"],
6544 "ip_address": interface["ip-address"],
6545 "mac_address": interface.get("mac-address"),
6546 }
6547 )
6548 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6549
6550 # PRE-SCALE BEGIN
6551 step = "Executing pre-scale vnf-config-primitive"
6552 if scaling_descriptor.get("scaling-config-action"):
6553 for scaling_config_action in scaling_descriptor[
6554 "scaling-config-action"
6555 ]:
6556 if (
6557 scaling_config_action.get("trigger") == "pre-scale-in"
6558 and scaling_type == "SCALE_IN"
6559 ) or (
6560 scaling_config_action.get("trigger") == "pre-scale-out"
6561 and scaling_type == "SCALE_OUT"
6562 ):
6563 vnf_config_primitive = scaling_config_action[
6564 "vnf-config-primitive-name-ref"
6565 ]
6566 step = db_nslcmop_update[
6567 "detailed-status"
6568 ] = "executing pre-scale scaling-config-action '{}'".format(
6569 vnf_config_primitive
6570 )
6571
6572 # look for primitive
6573 for config_primitive in (
6574 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6575 ).get("config-primitive", ()):
6576 if config_primitive["name"] == vnf_config_primitive:
6577 break
6578 else:
6579 raise LcmException(
6580 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6581 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6582 "primitive".format(scaling_group, vnf_config_primitive)
6583 )
6584
6585 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6586 if db_vnfr.get("additionalParamsForVnf"):
6587 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6588
6589 scale_process = "VCA"
6590 db_nsr_update["config-status"] = "configuring pre-scaling"
6591 primitive_params = self._map_primitive_params(
6592 config_primitive, {}, vnfr_params
6593 )
6594
6595 # Pre-scale retry check: Check if this sub-operation has been executed before
6596 op_index = self._check_or_add_scale_suboperation(
6597 db_nslcmop,
6598 vnf_index,
6599 vnf_config_primitive,
6600 primitive_params,
6601 "PRE-SCALE",
6602 )
6603 if op_index == self.SUBOPERATION_STATUS_SKIP:
6604 # Skip sub-operation
6605 result = "COMPLETED"
6606 result_detail = "Done"
6607 self.logger.debug(
6608 logging_text
6609 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6610 vnf_config_primitive, result, result_detail
6611 )
6612 )
6613 else:
6614 if op_index == self.SUBOPERATION_STATUS_NEW:
6615 # New sub-operation: Get index of this sub-operation
6616 op_index = (
6617 len(db_nslcmop.get("_admin", {}).get("operations"))
6618 - 1
6619 )
6620 self.logger.debug(
6621 logging_text
6622 + "vnf_config_primitive={} New sub-operation".format(
6623 vnf_config_primitive
6624 )
6625 )
6626 else:
6627 # retry: Get registered params for this existing sub-operation
6628 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6629 op_index
6630 ]
6631 vnf_index = op.get("member_vnf_index")
6632 vnf_config_primitive = op.get("primitive")
6633 primitive_params = op.get("primitive_params")
6634 self.logger.debug(
6635 logging_text
6636 + "vnf_config_primitive={} Sub-operation retry".format(
6637 vnf_config_primitive
6638 )
6639 )
6640 # Execute the primitive, either with new (first-time) or registered (reintent) args
6641 ee_descriptor_id = config_primitive.get(
6642 "execution-environment-ref"
6643 )
6644 primitive_name = config_primitive.get(
6645 "execution-environment-primitive", vnf_config_primitive
6646 )
6647 ee_id, vca_type = self._look_for_deployed_vca(
6648 nsr_deployed["VCA"],
6649 member_vnf_index=vnf_index,
6650 vdu_id=None,
6651 vdu_count_index=None,
6652 ee_descriptor_id=ee_descriptor_id,
6653 )
6654 result, result_detail = await self._ns_execute_primitive(
6655 ee_id,
6656 primitive_name,
6657 primitive_params,
6658 vca_type=vca_type,
6659 vca_id=vca_id,
6660 )
6661 self.logger.debug(
6662 logging_text
6663 + "vnf_config_primitive={} Done with result {} {}".format(
6664 vnf_config_primitive, result, result_detail
6665 )
6666 )
6667 # Update operationState = COMPLETED | FAILED
6668 self._update_suboperation_status(
6669 db_nslcmop, op_index, result, result_detail
6670 )
6671
6672 if result == "FAILED":
6673 raise LcmException(result_detail)
6674 db_nsr_update["config-status"] = old_config_status
6675 scale_process = None
6676 # PRE-SCALE END
6677
6678 db_nsr_update[
6679 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6680 ] = nb_scale_op
6681 db_nsr_update[
6682 "_admin.scaling-group.{}.time".format(admin_scale_index)
6683 ] = time()
6684
6685 # SCALE-IN VCA - BEGIN
6686 if vca_scaling_info:
6687 step = db_nslcmop_update[
6688 "detailed-status"
6689 ] = "Deleting the execution environments"
6690 scale_process = "VCA"
6691 for vca_info in vca_scaling_info:
6692 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6693 member_vnf_index = str(vca_info["member-vnf-index"])
6694 self.logger.debug(
6695 logging_text + "vdu info: {}".format(vca_info)
6696 )
6697 if vca_info.get("osm_vdu_id"):
6698 vdu_id = vca_info["osm_vdu_id"]
6699 vdu_index = int(vca_info["vdu_index"])
6700 stage[
6701 1
6702 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6703 member_vnf_index, vdu_id, vdu_index
6704 )
6705 stage[2] = step = "Scaling in VCA"
6706 self._write_op_status(op_id=nslcmop_id, stage=stage)
6707 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6708 config_update = db_nsr["configurationStatus"]
6709 for vca_index, vca in enumerate(vca_update):
6710 if (
6711 (vca or vca.get("ee_id"))
6712 and vca["member-vnf-index"] == member_vnf_index
6713 and vca["vdu_count_index"] == vdu_index
6714 ):
6715 if vca.get("vdu_id"):
6716 config_descriptor = get_configuration(
6717 db_vnfd, vca.get("vdu_id")
6718 )
6719 elif vca.get("kdu_name"):
6720 config_descriptor = get_configuration(
6721 db_vnfd, vca.get("kdu_name")
6722 )
6723 else:
6724 config_descriptor = get_configuration(
6725 db_vnfd, db_vnfd["id"]
6726 )
6727 operation_params = (
6728 db_nslcmop.get("operationParams") or {}
6729 )
6730 exec_terminate_primitives = not operation_params.get(
6731 "skip_terminate_primitives"
6732 ) and vca.get("needed_terminate")
6733 task = asyncio.ensure_future(
6734 asyncio.wait_for(
6735 self.destroy_N2VC(
6736 logging_text,
6737 db_nslcmop,
6738 vca,
6739 config_descriptor,
6740 vca_index,
6741 destroy_ee=True,
6742 exec_primitives=exec_terminate_primitives,
6743 scaling_in=True,
6744 vca_id=vca_id,
6745 ),
6746 timeout=self.timeout_charm_delete,
6747 )
6748 )
6749 tasks_dict_info[task] = "Terminating VCA {}".format(
6750 vca.get("ee_id")
6751 )
6752 del vca_update[vca_index]
6753 del config_update[vca_index]
6754 # wait for pending tasks of terminate primitives
6755 if tasks_dict_info:
6756 self.logger.debug(
6757 logging_text
6758 + "Waiting for tasks {}".format(
6759 list(tasks_dict_info.keys())
6760 )
6761 )
6762 error_list = await self._wait_for_tasks(
6763 logging_text,
6764 tasks_dict_info,
6765 min(
6766 self.timeout_charm_delete, self.timeout_ns_terminate
6767 ),
6768 stage,
6769 nslcmop_id,
6770 )
6771 tasks_dict_info.clear()
6772 if error_list:
6773 raise LcmException("; ".join(error_list))
6774
6775 db_vca_and_config_update = {
6776 "_admin.deployed.VCA": vca_update,
6777 "configurationStatus": config_update,
6778 }
6779 self.update_db_2(
6780 "nsrs", db_nsr["_id"], db_vca_and_config_update
6781 )
6782 scale_process = None
6783 # SCALE-IN VCA - END
6784
6785 # SCALE RO - BEGIN
6786 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6787 scale_process = "RO"
6788 if self.ro_config.get("ng"):
6789 await self._scale_ng_ro(
6790 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6791 )
6792 scaling_info.pop("vdu-create", None)
6793 scaling_info.pop("vdu-delete", None)
6794
6795 scale_process = None
6796 # SCALE RO - END
6797
6798 # SCALE KDU - BEGIN
6799 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6800 scale_process = "KDU"
6801 await self._scale_kdu(
6802 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6803 )
6804 scaling_info.pop("kdu-create", None)
6805 scaling_info.pop("kdu-delete", None)
6806
6807 scale_process = None
6808 # SCALE KDU - END
6809
6810 if db_nsr_update:
6811 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6812
6813 # SCALE-UP VCA - BEGIN
6814 if vca_scaling_info:
6815 step = db_nslcmop_update[
6816 "detailed-status"
6817 ] = "Creating new execution environments"
6818 scale_process = "VCA"
6819 for vca_info in vca_scaling_info:
6820 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6821 member_vnf_index = str(vca_info["member-vnf-index"])
6822 self.logger.debug(
6823 logging_text + "vdu info: {}".format(vca_info)
6824 )
6825 vnfd_id = db_vnfr["vnfd-ref"]
6826 if vca_info.get("osm_vdu_id"):
6827 vdu_index = int(vca_info["vdu_index"])
6828 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6829 if db_vnfr.get("additionalParamsForVnf"):
6830 deploy_params.update(
6831 parse_yaml_strings(
6832 db_vnfr["additionalParamsForVnf"].copy()
6833 )
6834 )
6835 descriptor_config = get_configuration(
6836 db_vnfd, db_vnfd["id"]
6837 )
6838 if descriptor_config:
6839 vdu_id = None
6840 vdu_name = None
6841 kdu_name = None
6842 self._deploy_n2vc(
6843 logging_text=logging_text
6844 + "member_vnf_index={} ".format(member_vnf_index),
6845 db_nsr=db_nsr,
6846 db_vnfr=db_vnfr,
6847 nslcmop_id=nslcmop_id,
6848 nsr_id=nsr_id,
6849 nsi_id=nsi_id,
6850 vnfd_id=vnfd_id,
6851 vdu_id=vdu_id,
6852 kdu_name=kdu_name,
6853 member_vnf_index=member_vnf_index,
6854 vdu_index=vdu_index,
6855 vdu_name=vdu_name,
6856 deploy_params=deploy_params,
6857 descriptor_config=descriptor_config,
6858 base_folder=base_folder,
6859 task_instantiation_info=tasks_dict_info,
6860 stage=stage,
6861 )
6862 vdu_id = vca_info["osm_vdu_id"]
6863 vdur = find_in_list(
6864 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6865 )
6866 descriptor_config = get_configuration(db_vnfd, vdu_id)
6867 if vdur.get("additionalParams"):
6868 deploy_params_vdu = parse_yaml_strings(
6869 vdur["additionalParams"]
6870 )
6871 else:
6872 deploy_params_vdu = deploy_params
6873 deploy_params_vdu["OSM"] = get_osm_params(
6874 db_vnfr, vdu_id, vdu_count_index=vdu_index
6875 )
6876 if descriptor_config:
6877 vdu_name = None
6878 kdu_name = None
6879 stage[
6880 1
6881 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6882 member_vnf_index, vdu_id, vdu_index
6883 )
6884 stage[2] = step = "Scaling out VCA"
6885 self._write_op_status(op_id=nslcmop_id, stage=stage)
6886 self._deploy_n2vc(
6887 logging_text=logging_text
6888 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6889 member_vnf_index, vdu_id, vdu_index
6890 ),
6891 db_nsr=db_nsr,
6892 db_vnfr=db_vnfr,
6893 nslcmop_id=nslcmop_id,
6894 nsr_id=nsr_id,
6895 nsi_id=nsi_id,
6896 vnfd_id=vnfd_id,
6897 vdu_id=vdu_id,
6898 kdu_name=kdu_name,
6899 member_vnf_index=member_vnf_index,
6900 vdu_index=vdu_index,
6901 vdu_name=vdu_name,
6902 deploy_params=deploy_params_vdu,
6903 descriptor_config=descriptor_config,
6904 base_folder=base_folder,
6905 task_instantiation_info=tasks_dict_info,
6906 stage=stage,
6907 )
6908 # SCALE-UP VCA - END
6909 scale_process = None
6910
6911 # POST-SCALE BEGIN
6912 # execute primitive service POST-SCALING
6913 step = "Executing post-scale vnf-config-primitive"
6914 if scaling_descriptor.get("scaling-config-action"):
6915 for scaling_config_action in scaling_descriptor[
6916 "scaling-config-action"
6917 ]:
6918 if (
6919 scaling_config_action.get("trigger") == "post-scale-in"
6920 and scaling_type == "SCALE_IN"
6921 ) or (
6922 scaling_config_action.get("trigger") == "post-scale-out"
6923 and scaling_type == "SCALE_OUT"
6924 ):
6925 vnf_config_primitive = scaling_config_action[
6926 "vnf-config-primitive-name-ref"
6927 ]
6928 step = db_nslcmop_update[
6929 "detailed-status"
6930 ] = "executing post-scale scaling-config-action '{}'".format(
6931 vnf_config_primitive
6932 )
6933
6934 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6935 if db_vnfr.get("additionalParamsForVnf"):
6936 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6937
6938 # look for primitive
6939 for config_primitive in (
6940 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6941 ).get("config-primitive", ()):
6942 if config_primitive["name"] == vnf_config_primitive:
6943 break
6944 else:
6945 raise LcmException(
6946 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6947 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6948 "config-primitive".format(
6949 scaling_group, vnf_config_primitive
6950 )
6951 )
6952 scale_process = "VCA"
6953 db_nsr_update["config-status"] = "configuring post-scaling"
6954 primitive_params = self._map_primitive_params(
6955 config_primitive, {}, vnfr_params
6956 )
6957
6958 # Post-scale retry check: Check if this sub-operation has been executed before
6959 op_index = self._check_or_add_scale_suboperation(
6960 db_nslcmop,
6961 vnf_index,
6962 vnf_config_primitive,
6963 primitive_params,
6964 "POST-SCALE",
6965 )
6966 if op_index == self.SUBOPERATION_STATUS_SKIP:
6967 # Skip sub-operation
6968 result = "COMPLETED"
6969 result_detail = "Done"
6970 self.logger.debug(
6971 logging_text
6972 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6973 vnf_config_primitive, result, result_detail
6974 )
6975 )
6976 else:
6977 if op_index == self.SUBOPERATION_STATUS_NEW:
6978 # New sub-operation: Get index of this sub-operation
6979 op_index = (
6980 len(db_nslcmop.get("_admin", {}).get("operations"))
6981 - 1
6982 )
6983 self.logger.debug(
6984 logging_text
6985 + "vnf_config_primitive={} New sub-operation".format(
6986 vnf_config_primitive
6987 )
6988 )
6989 else:
6990 # retry: Get registered params for this existing sub-operation
6991 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6992 op_index
6993 ]
6994 vnf_index = op.get("member_vnf_index")
6995 vnf_config_primitive = op.get("primitive")
6996 primitive_params = op.get("primitive_params")
6997 self.logger.debug(
6998 logging_text
6999 + "vnf_config_primitive={} Sub-operation retry".format(
7000 vnf_config_primitive
7001 )
7002 )
7003 # Execute the primitive, either with new (first-time) or registered (reintent) args
7004 ee_descriptor_id = config_primitive.get(
7005 "execution-environment-ref"
7006 )
7007 primitive_name = config_primitive.get(
7008 "execution-environment-primitive", vnf_config_primitive
7009 )
7010 ee_id, vca_type = self._look_for_deployed_vca(
7011 nsr_deployed["VCA"],
7012 member_vnf_index=vnf_index,
7013 vdu_id=None,
7014 vdu_count_index=None,
7015 ee_descriptor_id=ee_descriptor_id,
7016 )
7017 result, result_detail = await self._ns_execute_primitive(
7018 ee_id,
7019 primitive_name,
7020 primitive_params,
7021 vca_type=vca_type,
7022 vca_id=vca_id,
7023 )
7024 self.logger.debug(
7025 logging_text
7026 + "vnf_config_primitive={} Done with result {} {}".format(
7027 vnf_config_primitive, result, result_detail
7028 )
7029 )
7030 # Update operationState = COMPLETED | FAILED
7031 self._update_suboperation_status(
7032 db_nslcmop, op_index, result, result_detail
7033 )
7034
7035 if result == "FAILED":
7036 raise LcmException(result_detail)
7037 db_nsr_update["config-status"] = old_config_status
7038 scale_process = None
7039 # POST-SCALE END
7040
7041 db_nsr_update[
7042 "detailed-status"
7043 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7044 db_nsr_update["operational-status"] = (
7045 "running"
7046 if old_operational_status == "failed"
7047 else old_operational_status
7048 )
7049 db_nsr_update["config-status"] = old_config_status
7050 return
7051 except (
7052 ROclient.ROClientException,
7053 DbException,
7054 LcmException,
7055 NgRoException,
7056 ) as e:
7057 self.logger.error(logging_text + "Exit Exception {}".format(e))
7058 exc = e
7059 except asyncio.CancelledError:
7060 self.logger.error(
7061 logging_text + "Cancelled Exception while '{}'".format(step)
7062 )
7063 exc = "Operation was cancelled"
7064 except Exception as e:
7065 exc = traceback.format_exc()
7066 self.logger.critical(
7067 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7068 exc_info=True,
7069 )
7070 finally:
7071 self._write_ns_status(
7072 nsr_id=nsr_id,
7073 ns_state=None,
7074 current_operation="IDLE",
7075 current_operation_id=None,
7076 )
7077 if tasks_dict_info:
7078 stage[1] = "Waiting for instantiate pending tasks."
7079 self.logger.debug(logging_text + stage[1])
7080 exc = await self._wait_for_tasks(
7081 logging_text,
7082 tasks_dict_info,
7083 self.timeout_ns_deploy,
7084 stage,
7085 nslcmop_id,
7086 nsr_id=nsr_id,
7087 )
7088 if exc:
7089 db_nslcmop_update[
7090 "detailed-status"
7091 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7092 nslcmop_operation_state = "FAILED"
7093 if db_nsr:
7094 db_nsr_update["operational-status"] = old_operational_status
7095 db_nsr_update["config-status"] = old_config_status
7096 db_nsr_update["detailed-status"] = ""
7097 if scale_process:
7098 if "VCA" in scale_process:
7099 db_nsr_update["config-status"] = "failed"
7100 if "RO" in scale_process:
7101 db_nsr_update["operational-status"] = "failed"
7102 db_nsr_update[
7103 "detailed-status"
7104 ] = "FAILED scaling nslcmop={} {}: {}".format(
7105 nslcmop_id, step, exc
7106 )
7107 else:
7108 error_description_nslcmop = None
7109 nslcmop_operation_state = "COMPLETED"
7110 db_nslcmop_update["detailed-status"] = "Done"
7111
7112 self._write_op_status(
7113 op_id=nslcmop_id,
7114 stage="",
7115 error_message=error_description_nslcmop,
7116 operation_state=nslcmop_operation_state,
7117 other_update=db_nslcmop_update,
7118 )
7119 if db_nsr:
7120 self._write_ns_status(
7121 nsr_id=nsr_id,
7122 ns_state=None,
7123 current_operation="IDLE",
7124 current_operation_id=None,
7125 other_update=db_nsr_update,
7126 )
7127
7128 if nslcmop_operation_state:
7129 try:
7130 msg = {
7131 "nsr_id": nsr_id,
7132 "nslcmop_id": nslcmop_id,
7133 "operationState": nslcmop_operation_state,
7134 }
7135 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7136 except Exception as e:
7137 self.logger.error(
7138 logging_text + "kafka_write notification Exception {}".format(e)
7139 )
7140 self.logger.debug(logging_text + "Exit")
7141 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7142
7143 async def _scale_kdu(
7144 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7145 ):
7146 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7147 for kdu_name in _scaling_info:
7148 for kdu_scaling_info in _scaling_info[kdu_name]:
7149 deployed_kdu, index = get_deployed_kdu(
7150 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7151 )
7152 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7153 kdu_instance = deployed_kdu["kdu-instance"]
7154 kdu_model = deployed_kdu.get("kdu-model")
7155 scale = int(kdu_scaling_info["scale"])
7156 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7157
7158 db_dict = {
7159 "collection": "nsrs",
7160 "filter": {"_id": nsr_id},
7161 "path": "_admin.deployed.K8s.{}".format(index),
7162 }
7163
7164 step = "scaling application {}".format(
7165 kdu_scaling_info["resource-name"]
7166 )
7167 self.logger.debug(logging_text + step)
7168
7169 if kdu_scaling_info["type"] == "delete":
7170 kdu_config = get_configuration(db_vnfd, kdu_name)
7171 if (
7172 kdu_config
7173 and kdu_config.get("terminate-config-primitive")
7174 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7175 ):
7176 terminate_config_primitive_list = kdu_config.get(
7177 "terminate-config-primitive"
7178 )
7179 terminate_config_primitive_list.sort(
7180 key=lambda val: int(val["seq"])
7181 )
7182
7183 for (
7184 terminate_config_primitive
7185 ) in terminate_config_primitive_list:
7186 primitive_params_ = self._map_primitive_params(
7187 terminate_config_primitive, {}, {}
7188 )
7189 step = "execute terminate config primitive"
7190 self.logger.debug(logging_text + step)
7191 await asyncio.wait_for(
7192 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7193 cluster_uuid=cluster_uuid,
7194 kdu_instance=kdu_instance,
7195 primitive_name=terminate_config_primitive["name"],
7196 params=primitive_params_,
7197 db_dict=db_dict,
7198 total_timeout=self.timeout_primitive,
7199 vca_id=vca_id,
7200 ),
7201 timeout=self.timeout_primitive
7202 * self.timeout_primitive_outer_factor,
7203 )
7204
7205 await asyncio.wait_for(
7206 self.k8scluster_map[k8s_cluster_type].scale(
7207 kdu_instance=kdu_instance,
7208 scale=scale,
7209 resource_name=kdu_scaling_info["resource-name"],
7210 total_timeout=self.timeout_scale_on_error,
7211 vca_id=vca_id,
7212 cluster_uuid=cluster_uuid,
7213 kdu_model=kdu_model,
7214 atomic=True,
7215 db_dict=db_dict,
7216 ),
7217 timeout=self.timeout_scale_on_error
7218 * self.timeout_scale_on_error_outer_factor,
7219 )
7220
7221 if kdu_scaling_info["type"] == "create":
7222 kdu_config = get_configuration(db_vnfd, kdu_name)
7223 if (
7224 kdu_config
7225 and kdu_config.get("initial-config-primitive")
7226 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7227 ):
7228 initial_config_primitive_list = kdu_config.get(
7229 "initial-config-primitive"
7230 )
7231 initial_config_primitive_list.sort(
7232 key=lambda val: int(val["seq"])
7233 )
7234
7235 for initial_config_primitive in initial_config_primitive_list:
7236 primitive_params_ = self._map_primitive_params(
7237 initial_config_primitive, {}, {}
7238 )
7239 step = "execute initial config primitive"
7240 self.logger.debug(logging_text + step)
7241 await asyncio.wait_for(
7242 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7243 cluster_uuid=cluster_uuid,
7244 kdu_instance=kdu_instance,
7245 primitive_name=initial_config_primitive["name"],
7246 params=primitive_params_,
7247 db_dict=db_dict,
7248 vca_id=vca_id,
7249 ),
7250 timeout=600,
7251 )
7252
7253 async def _scale_ng_ro(
7254 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7255 ):
7256 nsr_id = db_nslcmop["nsInstanceId"]
7257 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7258 db_vnfrs = {}
7259
7260 # read from db: vnfd's for every vnf
7261 db_vnfds = []
7262
7263 # for each vnf in ns, read vnfd
7264 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7265 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7266 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7267 # if we haven't this vnfd, read it from db
7268 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7269 # read from db
7270 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7271 db_vnfds.append(vnfd)
7272 n2vc_key = self.n2vc.get_public_key()
7273 n2vc_key_list = [n2vc_key]
7274 self.scale_vnfr(
7275 db_vnfr,
7276 vdu_scaling_info.get("vdu-create"),
7277 vdu_scaling_info.get("vdu-delete"),
7278 mark_delete=True,
7279 )
7280 # db_vnfr has been updated, update db_vnfrs to use it
7281 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7282 await self._instantiate_ng_ro(
7283 logging_text,
7284 nsr_id,
7285 db_nsd,
7286 db_nsr,
7287 db_nslcmop,
7288 db_vnfrs,
7289 db_vnfds,
7290 n2vc_key_list,
7291 stage=stage,
7292 start_deploy=time(),
7293 timeout_ns_deploy=self.timeout_ns_deploy,
7294 )
7295 if vdu_scaling_info.get("vdu-delete"):
7296 self.scale_vnfr(
7297 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7298 )
7299
7300 async def extract_prometheus_scrape_jobs(
7301 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7302 ):
7303 # look if exist a file called 'prometheus*.j2' and
7304 artifact_content = self.fs.dir_ls(artifact_path)
7305 job_file = next(
7306 (
7307 f
7308 for f in artifact_content
7309 if f.startswith("prometheus") and f.endswith(".j2")
7310 ),
7311 None,
7312 )
7313 if not job_file:
7314 return
7315 with self.fs.file_open((artifact_path, job_file), "r") as f:
7316 job_data = f.read()
7317
7318 # TODO get_service
7319 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7320 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7321 host_port = "80"
7322 vnfr_id = vnfr_id.replace("-", "")
7323 variables = {
7324 "JOB_NAME": vnfr_id,
7325 "TARGET_IP": target_ip,
7326 "EXPORTER_POD_IP": host_name,
7327 "EXPORTER_POD_PORT": host_port,
7328 }
7329 job_list = parse_job(job_data, variables)
7330 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7331 for job in job_list:
7332 if (
7333 not isinstance(job.get("job_name"), str)
7334 or vnfr_id not in job["job_name"]
7335 ):
7336 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7337 job["nsr_id"] = nsr_id
7338 job["vnfr_id"] = vnfr_id
7339 return job_list
7340
7341 async def rebuild_start_stop(
7342 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7343 ):
7344 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7345 self.logger.info(logging_text + "Enter")
7346 stage = ["Preparing the environment", ""]
7347 # database nsrs record
7348 db_nsr_update = {}
7349 vdu_vim_name = None
7350 vim_vm_id = None
7351 # in case of error, indicates what part of scale was failed to put nsr at error status
7352 start_deploy = time()
7353 try:
7354 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7355 vim_account_id = db_vnfr.get("vim-account-id")
7356 vim_info_key = "vim:" + vim_account_id
7357 vdu_id = additional_param["vdu_id"]
7358 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7359 vdur = find_in_list(
7360 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7361 )
7362 if vdur:
7363 vdu_vim_name = vdur["name"]
7364 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7365 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7366 else:
7367 raise LcmException("Target vdu is not found")
7368 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7369 # wait for any previous tasks in process
7370 stage[1] = "Waiting for previous operations to terminate"
7371 self.logger.info(stage[1])
7372 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7373
7374 stage[1] = "Reading from database."
7375 self.logger.info(stage[1])
7376 self._write_ns_status(
7377 nsr_id=nsr_id,
7378 ns_state=None,
7379 current_operation=operation_type.upper(),
7380 current_operation_id=nslcmop_id,
7381 )
7382 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7383
7384 # read from db: ns
7385 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7386 db_nsr_update["operational-status"] = operation_type
7387 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7388 # Payload for RO
7389 desc = {
7390 operation_type: {
7391 "vim_vm_id": vim_vm_id,
7392 "vnf_id": vnf_id,
7393 "vdu_index": additional_param["count-index"],
7394 "vdu_id": vdur["id"],
7395 "target_vim": target_vim,
7396 "vim_account_id": vim_account_id,
7397 }
7398 }
7399 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7400 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7401 self.logger.info("ro nsr id: {}".format(nsr_id))
7402 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7403 self.logger.info("response from RO: {}".format(result_dict))
7404 action_id = result_dict["action_id"]
7405 await self._wait_ng_ro(
7406 nsr_id,
7407 action_id,
7408 nslcmop_id,
7409 start_deploy,
7410 self.timeout_operate,
7411 None,
7412 "start_stop_rebuild",
7413 )
7414 return "COMPLETED", "Done"
7415 except (ROclient.ROClientException, DbException, LcmException) as e:
7416 self.logger.error("Exit Exception {}".format(e))
7417 exc = e
7418 except asyncio.CancelledError:
7419 self.logger.error("Cancelled Exception while '{}'".format(stage))
7420 exc = "Operation was cancelled"
7421 except Exception as e:
7422 exc = traceback.format_exc()
7423 self.logger.critical(
7424 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7425 )
7426 return "FAILED", "Error in operate VNF {}".format(exc)
7427
7428 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7429 """
7430 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7431
7432 :param: vim_account_id: VIM Account ID
7433
7434 :return: (cloud_name, cloud_credential)
7435 """
7436 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7437 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7438
7439 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7440 """
7441 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7442
7443 :param: vim_account_id: VIM Account ID
7444
7445 :return: (cloud_name, cloud_credential)
7446 """
7447 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7448 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7449
7450 async def migrate(self, nsr_id, nslcmop_id):
7451 """
7452 Migrate VNFs and VDUs instances in a NS
7453
7454 :param: nsr_id: NS Instance ID
7455 :param: nslcmop_id: nslcmop ID of migrate
7456
7457 """
7458 # Try to lock HA task here
7459 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7460 if not task_is_locked_by_me:
7461 return
7462 logging_text = "Task ns={} migrate ".format(nsr_id)
7463 self.logger.debug(logging_text + "Enter")
7464 # get all needed from database
7465 db_nslcmop = None
7466 db_nslcmop_update = {}
7467 nslcmop_operation_state = None
7468 db_nsr_update = {}
7469 target = {}
7470 exc = None
7471 # in case of error, indicates what part of scale was failed to put nsr at error status
7472 start_deploy = time()
7473
7474 try:
7475 # wait for any previous tasks in process
7476 step = "Waiting for previous operations to terminate"
7477 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7478
7479 self._write_ns_status(
7480 nsr_id=nsr_id,
7481 ns_state=None,
7482 current_operation="MIGRATING",
7483 current_operation_id=nslcmop_id,
7484 )
7485 step = "Getting nslcmop from database"
7486 self.logger.debug(
7487 step + " after having waited for previous tasks to be completed"
7488 )
7489 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7490 migrate_params = db_nslcmop.get("operationParams")
7491
7492 target = {}
7493 target.update(migrate_params)
7494 desc = await self.RO.migrate(nsr_id, target)
7495 self.logger.debug("RO return > {}".format(desc))
7496 action_id = desc["action_id"]
7497 await self._wait_ng_ro(
7498 nsr_id,
7499 action_id,
7500 nslcmop_id,
7501 start_deploy,
7502 self.timeout_migrate,
7503 operation="migrate",
7504 )
7505 except (ROclient.ROClientException, DbException, LcmException) as e:
7506 self.logger.error("Exit Exception {}".format(e))
7507 exc = e
7508 except asyncio.CancelledError:
7509 self.logger.error("Cancelled Exception while '{}'".format(step))
7510 exc = "Operation was cancelled"
7511 except Exception as e:
7512 exc = traceback.format_exc()
7513 self.logger.critical(
7514 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7515 )
7516 finally:
7517 self._write_ns_status(
7518 nsr_id=nsr_id,
7519 ns_state=None,
7520 current_operation="IDLE",
7521 current_operation_id=None,
7522 )
7523 if exc:
7524 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7525 nslcmop_operation_state = "FAILED"
7526 else:
7527 nslcmop_operation_state = "COMPLETED"
7528 db_nslcmop_update["detailed-status"] = "Done"
7529 db_nsr_update["detailed-status"] = "Done"
7530
7531 self._write_op_status(
7532 op_id=nslcmop_id,
7533 stage="",
7534 error_message="",
7535 operation_state=nslcmop_operation_state,
7536 other_update=db_nslcmop_update,
7537 )
7538 if nslcmop_operation_state:
7539 try:
7540 msg = {
7541 "nsr_id": nsr_id,
7542 "nslcmop_id": nslcmop_id,
7543 "operationState": nslcmop_operation_state,
7544 }
7545 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7546 except Exception as e:
7547 self.logger.error(
7548 logging_text + "kafka_write notification Exception {}".format(e)
7549 )
7550 self.logger.debug(logging_text + "Exit")
7551 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7552
7553 async def heal(self, nsr_id, nslcmop_id):
7554 """
7555 Heal NS
7556
7557 :param nsr_id: ns instance to heal
7558 :param nslcmop_id: operation to run
7559 :return:
7560 """
7561
7562 # Try to lock HA task here
7563 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7564 if not task_is_locked_by_me:
7565 return
7566
7567 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7568 stage = ["", "", ""]
7569 tasks_dict_info = {}
7570 # ^ stage, step, VIM progress
7571 self.logger.debug(logging_text + "Enter")
7572 # get all needed from database
7573 db_nsr = None
7574 db_nslcmop_update = {}
7575 db_nsr_update = {}
7576 db_vnfrs = {} # vnf's info indexed by _id
7577 exc = None
7578 old_operational_status = ""
7579 old_config_status = ""
7580 nsi_id = None
7581 try:
7582 # wait for any previous tasks in process
7583 step = "Waiting for previous operations to terminate"
7584 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7585 self._write_ns_status(
7586 nsr_id=nsr_id,
7587 ns_state=None,
7588 current_operation="HEALING",
7589 current_operation_id=nslcmop_id,
7590 )
7591
7592 step = "Getting nslcmop from database"
7593 self.logger.debug(
7594 step + " after having waited for previous tasks to be completed"
7595 )
7596 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7597
7598 step = "Getting nsr from database"
7599 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7600 old_operational_status = db_nsr["operational-status"]
7601 old_config_status = db_nsr["config-status"]
7602
7603 db_nsr_update = {
7604 "_admin.deployed.RO.operational-status": "healing",
7605 }
7606 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7607
7608 step = "Sending heal order to VIM"
7609 task_ro = asyncio.ensure_future(
7610 self.heal_RO(
7611 logging_text=logging_text,
7612 nsr_id=nsr_id,
7613 db_nslcmop=db_nslcmop,
7614 stage=stage,
7615 )
7616 )
7617 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7618 tasks_dict_info[task_ro] = "Healing at VIM"
7619
7620 # VCA tasks
7621 # read from db: nsd
7622 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7623 self.logger.debug(logging_text + stage[1])
7624 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7625 self.fs.sync(db_nsr["nsd-id"])
7626 db_nsr["nsd"] = nsd
7627 # read from db: vnfr's of this ns
7628 step = "Getting vnfrs from db"
7629 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7630 for vnfr in db_vnfrs_list:
7631 db_vnfrs[vnfr["_id"]] = vnfr
7632 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7633
7634 # Check for each target VNF
7635 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7636 for target_vnf in target_list:
7637 # Find this VNF in the list from DB
7638 vnfr_id = target_vnf.get("vnfInstanceId", None)
7639 if vnfr_id:
7640 db_vnfr = db_vnfrs[vnfr_id]
7641 vnfd_id = db_vnfr.get("vnfd-id")
7642 vnfd_ref = db_vnfr.get("vnfd-ref")
7643 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7644 base_folder = vnfd["_admin"]["storage"]
7645 vdu_id = None
7646 vdu_index = 0
7647 vdu_name = None
7648 kdu_name = None
7649 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7650 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7651
7652 # Check each target VDU and deploy N2VC
7653 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7654 "vdu", []
7655 )
7656 if not target_vdu_list:
7657 # Codigo nuevo para crear diccionario
7658 target_vdu_list = []
7659 for existing_vdu in db_vnfr.get("vdur"):
7660 vdu_name = existing_vdu.get("vdu-name", None)
7661 vdu_index = existing_vdu.get("count-index", 0)
7662 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7663 "run-day1", False
7664 )
7665 vdu_to_be_healed = {
7666 "vdu-id": vdu_name,
7667 "count-index": vdu_index,
7668 "run-day1": vdu_run_day1,
7669 }
7670 target_vdu_list.append(vdu_to_be_healed)
7671 for target_vdu in target_vdu_list:
7672 deploy_params_vdu = target_vdu
7673 # Set run-day1 vnf level value if not vdu level value exists
7674 if not deploy_params_vdu.get("run-day1") and target_vnf[
7675 "additionalParams"
7676 ].get("run-day1"):
7677 deploy_params_vdu["run-day1"] = target_vnf[
7678 "additionalParams"
7679 ].get("run-day1")
7680 vdu_name = target_vdu.get("vdu-id", None)
7681 # TODO: Get vdu_id from vdud.
7682 vdu_id = vdu_name
7683 # For multi instance VDU count-index is mandatory
7684 # For single session VDU count-indes is 0
7685 vdu_index = target_vdu.get("count-index", 0)
7686
7687 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7688 stage[1] = "Deploying Execution Environments."
7689 self.logger.debug(logging_text + stage[1])
7690
7691 # VNF Level charm. Normal case when proxy charms.
7692 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7693 descriptor_config = get_configuration(vnfd, vnfd_ref)
7694 if descriptor_config:
7695 # Continue if healed machine is management machine
7696 vnf_ip_address = db_vnfr.get("ip-address")
7697 target_instance = None
7698 for instance in db_vnfr.get("vdur", None):
7699 if (
7700 instance["vdu-name"] == vdu_name
7701 and instance["count-index"] == vdu_index
7702 ):
7703 target_instance = instance
7704 break
7705 if vnf_ip_address == target_instance.get("ip-address"):
7706 self._heal_n2vc(
7707 logging_text=logging_text
7708 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7709 member_vnf_index, vdu_name, vdu_index
7710 ),
7711 db_nsr=db_nsr,
7712 db_vnfr=db_vnfr,
7713 nslcmop_id=nslcmop_id,
7714 nsr_id=nsr_id,
7715 nsi_id=nsi_id,
7716 vnfd_id=vnfd_ref,
7717 vdu_id=None,
7718 kdu_name=None,
7719 member_vnf_index=member_vnf_index,
7720 vdu_index=0,
7721 vdu_name=None,
7722 deploy_params=deploy_params_vdu,
7723 descriptor_config=descriptor_config,
7724 base_folder=base_folder,
7725 task_instantiation_info=tasks_dict_info,
7726 stage=stage,
7727 )
7728
7729 # VDU Level charm. Normal case with native charms.
7730 descriptor_config = get_configuration(vnfd, vdu_name)
7731 if descriptor_config:
7732 self._heal_n2vc(
7733 logging_text=logging_text
7734 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7735 member_vnf_index, vdu_name, vdu_index
7736 ),
7737 db_nsr=db_nsr,
7738 db_vnfr=db_vnfr,
7739 nslcmop_id=nslcmop_id,
7740 nsr_id=nsr_id,
7741 nsi_id=nsi_id,
7742 vnfd_id=vnfd_ref,
7743 vdu_id=vdu_id,
7744 kdu_name=kdu_name,
7745 member_vnf_index=member_vnf_index,
7746 vdu_index=vdu_index,
7747 vdu_name=vdu_name,
7748 deploy_params=deploy_params_vdu,
7749 descriptor_config=descriptor_config,
7750 base_folder=base_folder,
7751 task_instantiation_info=tasks_dict_info,
7752 stage=stage,
7753 )
7754
7755 except (
7756 ROclient.ROClientException,
7757 DbException,
7758 LcmException,
7759 NgRoException,
7760 ) as e:
7761 self.logger.error(logging_text + "Exit Exception {}".format(e))
7762 exc = e
7763 except asyncio.CancelledError:
7764 self.logger.error(
7765 logging_text + "Cancelled Exception while '{}'".format(step)
7766 )
7767 exc = "Operation was cancelled"
7768 except Exception as e:
7769 exc = traceback.format_exc()
7770 self.logger.critical(
7771 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7772 exc_info=True,
7773 )
7774 finally:
7775 if tasks_dict_info:
7776 stage[1] = "Waiting for healing pending tasks."
7777 self.logger.debug(logging_text + stage[1])
7778 exc = await self._wait_for_tasks(
7779 logging_text,
7780 tasks_dict_info,
7781 self.timeout_ns_deploy,
7782 stage,
7783 nslcmop_id,
7784 nsr_id=nsr_id,
7785 )
7786 if exc:
7787 db_nslcmop_update[
7788 "detailed-status"
7789 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7790 nslcmop_operation_state = "FAILED"
7791 if db_nsr:
7792 db_nsr_update["operational-status"] = old_operational_status
7793 db_nsr_update["config-status"] = old_config_status
7794 db_nsr_update[
7795 "detailed-status"
7796 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7797 for task, task_name in tasks_dict_info.items():
7798 if not task.done() or task.cancelled() or task.exception():
7799 if task_name.startswith(self.task_name_deploy_vca):
7800 # A N2VC task is pending
7801 db_nsr_update["config-status"] = "failed"
7802 else:
7803 # RO task is pending
7804 db_nsr_update["operational-status"] = "failed"
7805 else:
7806 error_description_nslcmop = None
7807 nslcmop_operation_state = "COMPLETED"
7808 db_nslcmop_update["detailed-status"] = "Done"
7809 db_nsr_update["detailed-status"] = "Done"
7810 db_nsr_update["operational-status"] = "running"
7811 db_nsr_update["config-status"] = "configured"
7812
7813 self._write_op_status(
7814 op_id=nslcmop_id,
7815 stage="",
7816 error_message=error_description_nslcmop,
7817 operation_state=nslcmop_operation_state,
7818 other_update=db_nslcmop_update,
7819 )
7820 if db_nsr:
7821 self._write_ns_status(
7822 nsr_id=nsr_id,
7823 ns_state=None,
7824 current_operation="IDLE",
7825 current_operation_id=None,
7826 other_update=db_nsr_update,
7827 )
7828
7829 if nslcmop_operation_state:
7830 try:
7831 msg = {
7832 "nsr_id": nsr_id,
7833 "nslcmop_id": nslcmop_id,
7834 "operationState": nslcmop_operation_state,
7835 }
7836 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7837 except Exception as e:
7838 self.logger.error(
7839 logging_text + "kafka_write notification Exception {}".format(e)
7840 )
7841 self.logger.debug(logging_text + "Exit")
7842 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7843
7844 async def heal_RO(
7845 self,
7846 logging_text,
7847 nsr_id,
7848 db_nslcmop,
7849 stage,
7850 ):
7851 """
7852 Heal at RO
7853 :param logging_text: preffix text to use at logging
7854 :param nsr_id: nsr identity
7855 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7856 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7857 :return: None or exception
7858 """
7859
7860 def get_vim_account(vim_account_id):
7861 nonlocal db_vims
7862 if vim_account_id in db_vims:
7863 return db_vims[vim_account_id]
7864 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7865 db_vims[vim_account_id] = db_vim
7866 return db_vim
7867
7868 try:
7869 start_heal = time()
7870 ns_params = db_nslcmop.get("operationParams")
7871 if ns_params and ns_params.get("timeout_ns_heal"):
7872 timeout_ns_heal = ns_params["timeout_ns_heal"]
7873 else:
7874 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7875
7876 db_vims = {}
7877
7878 nslcmop_id = db_nslcmop["_id"]
7879 target = {
7880 "action_id": nslcmop_id,
7881 }
7882 self.logger.warning(
7883 "db_nslcmop={} and timeout_ns_heal={}".format(
7884 db_nslcmop, timeout_ns_heal
7885 )
7886 )
7887 target.update(db_nslcmop.get("operationParams", {}))
7888
7889 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7890 desc = await self.RO.recreate(nsr_id, target)
7891 self.logger.debug("RO return > {}".format(desc))
7892 action_id = desc["action_id"]
7893 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7894 await self._wait_ng_ro(
7895 nsr_id,
7896 action_id,
7897 nslcmop_id,
7898 start_heal,
7899 timeout_ns_heal,
7900 stage,
7901 operation="healing",
7902 )
7903
7904 # Updating NSR
7905 db_nsr_update = {
7906 "_admin.deployed.RO.operational-status": "running",
7907 "detailed-status": " ".join(stage),
7908 }
7909 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7910 self._write_op_status(nslcmop_id, stage)
7911 self.logger.debug(
7912 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7913 )
7914
7915 except Exception as e:
7916 stage[2] = "ERROR healing at VIM"
7917 # self.set_vnfr_at_error(db_vnfrs, str(e))
7918 self.logger.error(
7919 "Error healing at VIM {}".format(e),
7920 exc_info=not isinstance(
7921 e,
7922 (
7923 ROclient.ROClientException,
7924 LcmException,
7925 DbException,
7926 NgRoException,
7927 ),
7928 ),
7929 )
7930 raise
7931
7932 def _heal_n2vc(
7933 self,
7934 logging_text,
7935 db_nsr,
7936 db_vnfr,
7937 nslcmop_id,
7938 nsr_id,
7939 nsi_id,
7940 vnfd_id,
7941 vdu_id,
7942 kdu_name,
7943 member_vnf_index,
7944 vdu_index,
7945 vdu_name,
7946 deploy_params,
7947 descriptor_config,
7948 base_folder,
7949 task_instantiation_info,
7950 stage,
7951 ):
7952 # launch instantiate_N2VC in a asyncio task and register task object
7953 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7954 # if not found, create one entry and update database
7955 # fill db_nsr._admin.deployed.VCA.<index>
7956
7957 self.logger.debug(
7958 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7959 )
7960
7961 charm_name = ""
7962 get_charm_name = False
7963 if "execution-environment-list" in descriptor_config:
7964 ee_list = descriptor_config.get("execution-environment-list", [])
7965 elif "juju" in descriptor_config:
7966 ee_list = [descriptor_config] # ns charms
7967 if "execution-environment-list" not in descriptor_config:
7968 # charm name is only required for ns charms
7969 get_charm_name = True
7970 else: # other types as script are not supported
7971 ee_list = []
7972
7973 for ee_item in ee_list:
7974 self.logger.debug(
7975 logging_text
7976 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7977 ee_item.get("juju"), ee_item.get("helm-chart")
7978 )
7979 )
7980 ee_descriptor_id = ee_item.get("id")
7981 if ee_item.get("juju"):
7982 vca_name = ee_item["juju"].get("charm")
7983 if get_charm_name:
7984 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7985 vca_type = (
7986 "lxc_proxy_charm"
7987 if ee_item["juju"].get("charm") is not None
7988 else "native_charm"
7989 )
7990 if ee_item["juju"].get("cloud") == "k8s":
7991 vca_type = "k8s_proxy_charm"
7992 elif ee_item["juju"].get("proxy") is False:
7993 vca_type = "native_charm"
7994 elif ee_item.get("helm-chart"):
7995 vca_name = ee_item["helm-chart"]
7996 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7997 vca_type = "helm"
7998 else:
7999 vca_type = "helm-v3"
8000 else:
8001 self.logger.debug(
8002 logging_text + "skipping non juju neither charm configuration"
8003 )
8004 continue
8005
8006 vca_index = -1
8007 for vca_index, vca_deployed in enumerate(
8008 db_nsr["_admin"]["deployed"]["VCA"]
8009 ):
8010 if not vca_deployed:
8011 continue
8012 if (
8013 vca_deployed.get("member-vnf-index") == member_vnf_index
8014 and vca_deployed.get("vdu_id") == vdu_id
8015 and vca_deployed.get("kdu_name") == kdu_name
8016 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8017 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8018 ):
8019 break
8020 else:
8021 # not found, create one.
8022 target = (
8023 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8024 )
8025 if vdu_id:
8026 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8027 elif kdu_name:
8028 target += "/kdu/{}".format(kdu_name)
8029 vca_deployed = {
8030 "target_element": target,
8031 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8032 "member-vnf-index": member_vnf_index,
8033 "vdu_id": vdu_id,
8034 "kdu_name": kdu_name,
8035 "vdu_count_index": vdu_index,
8036 "operational-status": "init", # TODO revise
8037 "detailed-status": "", # TODO revise
8038 "step": "initial-deploy", # TODO revise
8039 "vnfd_id": vnfd_id,
8040 "vdu_name": vdu_name,
8041 "type": vca_type,
8042 "ee_descriptor_id": ee_descriptor_id,
8043 "charm_name": charm_name,
8044 }
8045 vca_index += 1
8046
8047 # create VCA and configurationStatus in db
8048 db_dict = {
8049 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8050 "configurationStatus.{}".format(vca_index): dict(),
8051 }
8052 self.update_db_2("nsrs", nsr_id, db_dict)
8053
8054 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8055
8056 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8057 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8058 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8059
8060 # Launch task
8061 task_n2vc = asyncio.ensure_future(
8062 self.heal_N2VC(
8063 logging_text=logging_text,
8064 vca_index=vca_index,
8065 nsi_id=nsi_id,
8066 db_nsr=db_nsr,
8067 db_vnfr=db_vnfr,
8068 vdu_id=vdu_id,
8069 kdu_name=kdu_name,
8070 vdu_index=vdu_index,
8071 deploy_params=deploy_params,
8072 config_descriptor=descriptor_config,
8073 base_folder=base_folder,
8074 nslcmop_id=nslcmop_id,
8075 stage=stage,
8076 vca_type=vca_type,
8077 vca_name=vca_name,
8078 ee_config_descriptor=ee_item,
8079 )
8080 )
8081 self.lcm_tasks.register(
8082 "ns",
8083 nsr_id,
8084 nslcmop_id,
8085 "instantiate_N2VC-{}".format(vca_index),
8086 task_n2vc,
8087 )
8088 task_instantiation_info[
8089 task_n2vc
8090 ] = self.task_name_deploy_vca + " {}.{}".format(
8091 member_vnf_index or "", vdu_id or ""
8092 )
8093
8094 async def heal_N2VC(
8095 self,
8096 logging_text,
8097 vca_index,
8098 nsi_id,
8099 db_nsr,
8100 db_vnfr,
8101 vdu_id,
8102 kdu_name,
8103 vdu_index,
8104 config_descriptor,
8105 deploy_params,
8106 base_folder,
8107 nslcmop_id,
8108 stage,
8109 vca_type,
8110 vca_name,
8111 ee_config_descriptor,
8112 ):
8113 nsr_id = db_nsr["_id"]
8114 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8115 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8116 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8117 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8118 db_dict = {
8119 "collection": "nsrs",
8120 "filter": {"_id": nsr_id},
8121 "path": db_update_entry,
8122 }
8123 step = ""
8124 try:
8125
8126 element_type = "NS"
8127 element_under_configuration = nsr_id
8128
8129 vnfr_id = None
8130 if db_vnfr:
8131 vnfr_id = db_vnfr["_id"]
8132 osm_config["osm"]["vnf_id"] = vnfr_id
8133
8134 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8135
8136 if vca_type == "native_charm":
8137 index_number = 0
8138 else:
8139 index_number = vdu_index or 0
8140
8141 if vnfr_id:
8142 element_type = "VNF"
8143 element_under_configuration = vnfr_id
8144 namespace += ".{}-{}".format(vnfr_id, index_number)
8145 if vdu_id:
8146 namespace += ".{}-{}".format(vdu_id, index_number)
8147 element_type = "VDU"
8148 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8149 osm_config["osm"]["vdu_id"] = vdu_id
8150 elif kdu_name:
8151 namespace += ".{}".format(kdu_name)
8152 element_type = "KDU"
8153 element_under_configuration = kdu_name
8154 osm_config["osm"]["kdu_name"] = kdu_name
8155
8156 # Get artifact path
8157 if base_folder["pkg-dir"]:
8158 artifact_path = "{}/{}/{}/{}".format(
8159 base_folder["folder"],
8160 base_folder["pkg-dir"],
8161 "charms"
8162 if vca_type
8163 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8164 else "helm-charts",
8165 vca_name,
8166 )
8167 else:
8168 artifact_path = "{}/Scripts/{}/{}/".format(
8169 base_folder["folder"],
8170 "charms"
8171 if vca_type
8172 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8173 else "helm-charts",
8174 vca_name,
8175 )
8176
8177 self.logger.debug("Artifact path > {}".format(artifact_path))
8178
8179 # get initial_config_primitive_list that applies to this element
8180 initial_config_primitive_list = config_descriptor.get(
8181 "initial-config-primitive"
8182 )
8183
8184 self.logger.debug(
8185 "Initial config primitive list > {}".format(
8186 initial_config_primitive_list
8187 )
8188 )
8189
8190 # add config if not present for NS charm
8191 ee_descriptor_id = ee_config_descriptor.get("id")
8192 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8193 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8194 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8195 )
8196
8197 self.logger.debug(
8198 "Initial config primitive list #2 > {}".format(
8199 initial_config_primitive_list
8200 )
8201 )
8202 # n2vc_redesign STEP 3.1
8203 # find old ee_id if exists
8204 ee_id = vca_deployed.get("ee_id")
8205
8206 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8207 # create or register execution environment in VCA. Only for native charms when healing
8208 if vca_type == "native_charm":
8209 step = "Waiting to VM being up and getting IP address"
8210 self.logger.debug(logging_text + step)
8211 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8212 logging_text,
8213 nsr_id,
8214 vnfr_id,
8215 vdu_id,
8216 vdu_index,
8217 user=None,
8218 pub_key=None,
8219 )
8220 credentials = {"hostname": rw_mgmt_ip}
8221 # get username
8222 username = deep_get(
8223 config_descriptor, ("config-access", "ssh-access", "default-user")
8224 )
8225 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8226 # merged. Meanwhile let's get username from initial-config-primitive
8227 if not username and initial_config_primitive_list:
8228 for config_primitive in initial_config_primitive_list:
8229 for param in config_primitive.get("parameter", ()):
8230 if param["name"] == "ssh-username":
8231 username = param["value"]
8232 break
8233 if not username:
8234 raise LcmException(
8235 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8236 "'config-access.ssh-access.default-user'"
8237 )
8238 credentials["username"] = username
8239
8240 # n2vc_redesign STEP 3.2
8241 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8242 self._write_configuration_status(
8243 nsr_id=nsr_id,
8244 vca_index=vca_index,
8245 status="REGISTERING",
8246 element_under_configuration=element_under_configuration,
8247 element_type=element_type,
8248 )
8249
8250 step = "register execution environment {}".format(credentials)
8251 self.logger.debug(logging_text + step)
8252 ee_id = await self.vca_map[vca_type].register_execution_environment(
8253 credentials=credentials,
8254 namespace=namespace,
8255 db_dict=db_dict,
8256 vca_id=vca_id,
8257 )
8258
8259 # update ee_id en db
8260 db_dict_ee_id = {
8261 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8262 }
8263 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8264
8265 # for compatibility with MON/POL modules, the need model and application name at database
8266 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8267 # Not sure if this need to be done when healing
8268 """
8269 ee_id_parts = ee_id.split(".")
8270 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8271 if len(ee_id_parts) >= 2:
8272 model_name = ee_id_parts[0]
8273 application_name = ee_id_parts[1]
8274 db_nsr_update[db_update_entry + "model"] = model_name
8275 db_nsr_update[db_update_entry + "application"] = application_name
8276 """
8277
8278 # n2vc_redesign STEP 3.3
8279 # Install configuration software. Only for native charms.
8280 step = "Install configuration Software"
8281
8282 self._write_configuration_status(
8283 nsr_id=nsr_id,
8284 vca_index=vca_index,
8285 status="INSTALLING SW",
8286 element_under_configuration=element_under_configuration,
8287 element_type=element_type,
8288 # other_update=db_nsr_update,
8289 other_update=None,
8290 )
8291
8292 # TODO check if already done
8293 self.logger.debug(logging_text + step)
8294 config = None
8295 if vca_type == "native_charm":
8296 config_primitive = next(
8297 (p for p in initial_config_primitive_list if p["name"] == "config"),
8298 None,
8299 )
8300 if config_primitive:
8301 config = self._map_primitive_params(
8302 config_primitive, {}, deploy_params
8303 )
8304 await self.vca_map[vca_type].install_configuration_sw(
8305 ee_id=ee_id,
8306 artifact_path=artifact_path,
8307 db_dict=db_dict,
8308 config=config,
8309 num_units=1,
8310 vca_id=vca_id,
8311 vca_type=vca_type,
8312 )
8313
8314 # write in db flag of configuration_sw already installed
8315 self.update_db_2(
8316 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8317 )
8318
8319 # Not sure if this need to be done when healing
8320 """
8321 # add relations for this VCA (wait for other peers related with this VCA)
8322 await self._add_vca_relations(
8323 logging_text=logging_text,
8324 nsr_id=nsr_id,
8325 vca_type=vca_type,
8326 vca_index=vca_index,
8327 )
8328 """
8329
8330 # if SSH access is required, then get execution environment SSH public
8331 # if native charm we have waited already to VM be UP
8332 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8333 pub_key = None
8334 user = None
8335 # self.logger.debug("get ssh key block")
8336 if deep_get(
8337 config_descriptor, ("config-access", "ssh-access", "required")
8338 ):
8339 # self.logger.debug("ssh key needed")
8340 # Needed to inject a ssh key
8341 user = deep_get(
8342 config_descriptor,
8343 ("config-access", "ssh-access", "default-user"),
8344 )
8345 step = "Install configuration Software, getting public ssh key"
8346 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8347 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8348 )
8349
8350 step = "Insert public key into VM user={} ssh_key={}".format(
8351 user, pub_key
8352 )
8353 else:
8354 # self.logger.debug("no need to get ssh key")
8355 step = "Waiting to VM being up and getting IP address"
8356 self.logger.debug(logging_text + step)
8357
8358 # n2vc_redesign STEP 5.1
8359 # wait for RO (ip-address) Insert pub_key into VM
8360 # IMPORTANT: We need do wait for RO to complete healing operation.
8361 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8362 if vnfr_id:
8363 if kdu_name:
8364 rw_mgmt_ip = await self.wait_kdu_up(
8365 logging_text, nsr_id, vnfr_id, kdu_name
8366 )
8367 else:
8368 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8369 logging_text,
8370 nsr_id,
8371 vnfr_id,
8372 vdu_id,
8373 vdu_index,
8374 user=user,
8375 pub_key=pub_key,
8376 )
8377 else:
8378 rw_mgmt_ip = None # This is for a NS configuration
8379
8380 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8381
8382 # store rw_mgmt_ip in deploy params for later replacement
8383 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8384
8385 # Day1 operations.
8386 # get run-day1 operation parameter
8387 runDay1 = deploy_params.get("run-day1", False)
8388 self.logger.debug(
8389 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8390 )
8391 if runDay1:
8392 # n2vc_redesign STEP 6 Execute initial config primitive
8393 step = "execute initial config primitive"
8394
8395 # wait for dependent primitives execution (NS -> VNF -> VDU)
8396 if initial_config_primitive_list:
8397 await self._wait_dependent_n2vc(
8398 nsr_id, vca_deployed_list, vca_index
8399 )
8400
8401 # stage, in function of element type: vdu, kdu, vnf or ns
8402 my_vca = vca_deployed_list[vca_index]
8403 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8404 # VDU or KDU
8405 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8406 elif my_vca.get("member-vnf-index"):
8407 # VNF
8408 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8409 else:
8410 # NS
8411 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8412
8413 self._write_configuration_status(
8414 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8415 )
8416
8417 self._write_op_status(op_id=nslcmop_id, stage=stage)
8418
8419 check_if_terminated_needed = True
8420 for initial_config_primitive in initial_config_primitive_list:
8421 # adding information on the vca_deployed if it is a NS execution environment
8422 if not vca_deployed["member-vnf-index"]:
8423 deploy_params["ns_config_info"] = json.dumps(
8424 self._get_ns_config_info(nsr_id)
8425 )
8426 # TODO check if already done
8427 primitive_params_ = self._map_primitive_params(
8428 initial_config_primitive, {}, deploy_params
8429 )
8430
8431 step = "execute primitive '{}' params '{}'".format(
8432 initial_config_primitive["name"], primitive_params_
8433 )
8434 self.logger.debug(logging_text + step)
8435 await self.vca_map[vca_type].exec_primitive(
8436 ee_id=ee_id,
8437 primitive_name=initial_config_primitive["name"],
8438 params_dict=primitive_params_,
8439 db_dict=db_dict,
8440 vca_id=vca_id,
8441 vca_type=vca_type,
8442 )
8443 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8444 if check_if_terminated_needed:
8445 if config_descriptor.get("terminate-config-primitive"):
8446 self.update_db_2(
8447 "nsrs",
8448 nsr_id,
8449 {db_update_entry + "needed_terminate": True},
8450 )
8451 check_if_terminated_needed = False
8452
8453 # TODO register in database that primitive is done
8454
8455 # STEP 7 Configure metrics
8456 # Not sure if this need to be done when healing
8457 """
8458 if vca_type == "helm" or vca_type == "helm-v3":
8459 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8460 ee_id=ee_id,
8461 artifact_path=artifact_path,
8462 ee_config_descriptor=ee_config_descriptor,
8463 vnfr_id=vnfr_id,
8464 nsr_id=nsr_id,
8465 target_ip=rw_mgmt_ip,
8466 )
8467 if prometheus_jobs:
8468 self.update_db_2(
8469 "nsrs",
8470 nsr_id,
8471 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8472 )
8473
8474 for job in prometheus_jobs:
8475 self.db.set_one(
8476 "prometheus_jobs",
8477 {"job_name": job["job_name"]},
8478 job,
8479 upsert=True,
8480 fail_on_empty=False,
8481 )
8482
8483 """
8484 step = "instantiated at VCA"
8485 self.logger.debug(logging_text + step)
8486
8487 self._write_configuration_status(
8488 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8489 )
8490
8491 except Exception as e: # TODO not use Exception but N2VC exception
8492 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8493 if not isinstance(
8494 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8495 ):
8496 self.logger.error(
8497 "Exception while {} : {}".format(step, e), exc_info=True
8498 )
8499 self._write_configuration_status(
8500 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8501 )
8502 raise LcmException("{} {}".format(step, e)) from e
8503
8504 async def _wait_heal_ro(
8505 self,
8506 nsr_id,
8507 timeout=600,
8508 ):
8509 start_time = time()
8510 while time() <= start_time + timeout:
8511 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8512 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8513 "operational-status"
8514 ]
8515 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8516 if operational_status_ro != "healing":
8517 break
8518 await asyncio.sleep(15, loop=self.loop)
8519 else: # timeout_ns_deploy
8520 raise NgRoException("Timeout waiting ns to deploy")
8521
8522 async def vertical_scale(self, nsr_id, nslcmop_id):
8523 """
8524 Vertical Scale the VDUs in a NS
8525
8526 :param: nsr_id: NS Instance ID
8527 :param: nslcmop_id: nslcmop ID of migrate
8528
8529 """
8530 # Try to lock HA task here
8531 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8532 if not task_is_locked_by_me:
8533 return
8534 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8535 self.logger.debug(logging_text + "Enter")
8536 # get all needed from database
8537 db_nslcmop = None
8538 db_nslcmop_update = {}
8539 nslcmop_operation_state = None
8540 db_nsr_update = {}
8541 target = {}
8542 exc = None
8543 # in case of error, indicates what part of scale was failed to put nsr at error status
8544 start_deploy = time()
8545
8546 try:
8547 # wait for any previous tasks in process
8548 step = "Waiting for previous operations to terminate"
8549 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8550
8551 self._write_ns_status(
8552 nsr_id=nsr_id,
8553 ns_state=None,
8554 current_operation="VerticalScale",
8555 current_operation_id=nslcmop_id,
8556 )
8557 step = "Getting nslcmop from database"
8558 self.logger.debug(
8559 step + " after having waited for previous tasks to be completed"
8560 )
8561 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8562 operationParams = db_nslcmop.get("operationParams")
8563 target = {}
8564 target.update(operationParams)
8565 desc = await self.RO.vertical_scale(nsr_id, target)
8566 self.logger.debug("RO return > {}".format(desc))
8567 action_id = desc["action_id"]
8568 await self._wait_ng_ro(
8569 nsr_id,
8570 action_id,
8571 nslcmop_id,
8572 start_deploy,
8573 self.timeout_verticalscale,
8574 operation="verticalscale",
8575 )
8576 except (ROclient.ROClientException, DbException, LcmException) as e:
8577 self.logger.error("Exit Exception {}".format(e))
8578 exc = e
8579 except asyncio.CancelledError:
8580 self.logger.error("Cancelled Exception while '{}'".format(step))
8581 exc = "Operation was cancelled"
8582 except Exception as e:
8583 exc = traceback.format_exc()
8584 self.logger.critical(
8585 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8586 )
8587 finally:
8588 self._write_ns_status(
8589 nsr_id=nsr_id,
8590 ns_state=None,
8591 current_operation="IDLE",
8592 current_operation_id=None,
8593 )
8594 if exc:
8595 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8596 nslcmop_operation_state = "FAILED"
8597 else:
8598 nslcmop_operation_state = "COMPLETED"
8599 db_nslcmop_update["detailed-status"] = "Done"
8600 db_nsr_update["detailed-status"] = "Done"
8601
8602 self._write_op_status(
8603 op_id=nslcmop_id,
8604 stage="",
8605 error_message="",
8606 operation_state=nslcmop_operation_state,
8607 other_update=db_nslcmop_update,
8608 )
8609 if nslcmop_operation_state:
8610 try:
8611 msg = {
8612 "nsr_id": nsr_id,
8613 "nslcmop_id": nslcmop_id,
8614 "operationState": nslcmop_operation_state,
8615 }
8616 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8617 except Exception as e:
8618 self.logger.error(
8619 logging_text + "kafka_write notification Exception {}".format(e)
8620 )
8621 self.logger.debug(logging_text + "Exit")
8622 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")