Heal: Await heal operation in RO before N2VC part to avoid errors
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 )
64 from osm_lcm.data_utils.nsd import (
65 get_ns_configuration_relation_list,
66 get_vnf_profile,
67 get_vnf_profiles,
68 )
69 from osm_lcm.data_utils.vnfd import (
70 get_kdu,
71 get_kdu_services,
72 get_relation_list,
73 get_vdu_list,
74 get_vdu_profile,
75 get_ee_sorted_initial_config_primitive_list,
76 get_ee_sorted_terminate_config_primitive_list,
77 get_kdu_list,
78 get_virtual_link_profiles,
79 get_vdu,
80 get_configuration,
81 get_vdu_index,
82 get_scaling_aspect,
83 get_number_of_instances,
84 get_juju_ee_ref,
85 get_kdu_resource_profile,
86 find_software_version,
87 )
88 from osm_lcm.data_utils.list_utils import find_in_list
89 from osm_lcm.data_utils.vnfr import (
90 get_osm_params,
91 get_vdur_index,
92 get_kdur,
93 get_volumes_from_instantiation_params,
94 )
95 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
96 from osm_lcm.data_utils.database.vim_account import VimAccountDB
97 from n2vc.definitions import RelationEndpoint
98 from n2vc.k8s_helm_conn import K8sHelmConnector
99 from n2vc.k8s_helm3_conn import K8sHelm3Connector
100 from n2vc.k8s_juju_conn import K8sJujuConnector
101
102 from osm_common.dbbase import DbException
103 from osm_common.fsbase import FsException
104
105 from osm_lcm.data_utils.database.database import Database
106 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
107 from osm_lcm.data_utils.wim import (
108 get_sdn_ports,
109 get_target_wim_attrs,
110 select_feasible_wim_account,
111 )
112
113 from n2vc.n2vc_juju_conn import N2VCJujuConnector
114 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
115
116 from osm_lcm.lcm_helm_conn import LCMHelmConn
117 from osm_lcm.osm_config import OsmConfigBuilder
118 from osm_lcm.prometheus import parse_job
119
120 from copy import copy, deepcopy
121 from time import time
122 from uuid import uuid4
123
124 from random import randint
125
126 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
127
128
129 class NsLcm(LcmBase):
130 timeout_scale_on_error = (
131 5 * 60
132 ) # Time for charm from first time at blocked,error status to mark as failed
133 timeout_scale_on_error_outer_factor = 1.05 # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
134 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
135 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
136 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
137 timeout_charm_delete = 10 * 60
138 timeout_primitive = 30 * 60 # Timeout for primitive execution
139 timeout_primitive_outer_factor = 1.05 # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
140 timeout_ns_update = 30 * 60 # timeout for ns update
141 timeout_progress_primitive = (
142 10 * 60
143 ) # timeout for some progress in a primitive execution
144 timeout_migrate = 1800 # default global timeout for migrating vnfs
145 timeout_operate = 1800 # default global timeout for migrating vnfs
146 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
147 SUBOPERATION_STATUS_NOT_FOUND = -1
148 SUBOPERATION_STATUS_NEW = -2
149 SUBOPERATION_STATUS_SKIP = -3
150 task_name_deploy_vca = "Deploying VCA"
151
152 def __init__(self, msg, lcm_tasks, config, loop):
153 """
154 Init, Connect to database, filesystem storage, and messaging
155 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
156 :return: None
157 """
158 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
159
160 self.db = Database().instance.db
161 self.fs = Filesystem().instance.fs
162 self.loop = loop
163 self.lcm_tasks = lcm_tasks
164 self.timeout = config["timeout"]
165 self.ro_config = config["ro_config"]
166 self.ng_ro = config["ro_config"].get("ng")
167 self.vca_config = config["VCA"].copy()
168
169 # create N2VC connector
170 self.n2vc = N2VCJujuConnector(
171 log=self.logger,
172 loop=self.loop,
173 on_update_db=self._on_update_n2vc_db,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.conn_helm_ee = LCMHelmConn(
179 log=self.logger,
180 loop=self.loop,
181 vca_config=self.vca_config,
182 on_update_db=self._on_update_n2vc_db,
183 )
184
185 self.k8sclusterhelm2 = K8sHelmConnector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helmpath"),
188 log=self.logger,
189 on_update_db=None,
190 fs=self.fs,
191 db=self.db,
192 )
193
194 self.k8sclusterhelm3 = K8sHelm3Connector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 helm_command=self.vca_config.get("helm3path"),
197 fs=self.fs,
198 log=self.logger,
199 db=self.db,
200 on_update_db=None,
201 )
202
203 self.k8sclusterjuju = K8sJujuConnector(
204 kubectl_command=self.vca_config.get("kubectlpath"),
205 juju_command=self.vca_config.get("jujupath"),
206 log=self.logger,
207 loop=self.loop,
208 on_update_db=self._on_update_k8s_db,
209 fs=self.fs,
210 db=self.db,
211 )
212
213 self.k8scluster_map = {
214 "helm-chart": self.k8sclusterhelm2,
215 "helm-chart-v3": self.k8sclusterhelm3,
216 "chart": self.k8sclusterhelm3,
217 "juju-bundle": self.k8sclusterjuju,
218 "juju": self.k8sclusterjuju,
219 }
220
221 self.vca_map = {
222 "lxc_proxy_charm": self.n2vc,
223 "native_charm": self.n2vc,
224 "k8s_proxy_charm": self.n2vc,
225 "helm": self.conn_helm_ee,
226 "helm-v3": self.conn_helm_ee,
227 }
228
229 # create RO client
230 self.RO = NgRoClient(self.loop, **self.ro_config)
231
232 self.op_status_map = {
233 "instantiation": self.RO.status,
234 "termination": self.RO.status,
235 "migrate": self.RO.status,
236 "healing": self.RO.recreate_status,
237 "verticalscale": self.RO.status,
238 "start_stop_rebuild": self.RO.status,
239 }
240
241 @staticmethod
242 def increment_ip_mac(ip_mac, vm_index=1):
243 if not isinstance(ip_mac, str):
244 return ip_mac
245 try:
246 # try with ipv4 look for last dot
247 i = ip_mac.rfind(".")
248 if i > 0:
249 i += 1
250 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
251 # try with ipv6 or mac look for last colon. Operate in hex
252 i = ip_mac.rfind(":")
253 if i > 0:
254 i += 1
255 # format in hex, len can be 2 for mac or 4 for ipv6
256 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
257 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
258 )
259 except Exception:
260 pass
261 return None
262
263 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
264
265 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
266
267 try:
268 # TODO filter RO descriptor fields...
269
270 # write to database
271 db_dict = dict()
272 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
273 db_dict["deploymentStatus"] = ro_descriptor
274 self.update_db_2("nsrs", nsrs_id, db_dict)
275
276 except Exception as e:
277 self.logger.warn(
278 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
279 )
280
281 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
282
283 # remove last dot from path (if exists)
284 if path.endswith("."):
285 path = path[:-1]
286
287 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
288 # .format(table, filter, path, updated_data))
289 try:
290
291 nsr_id = filter.get("_id")
292
293 # read ns record from database
294 nsr = self.db.get_one(table="nsrs", q_filter=filter)
295 current_ns_status = nsr.get("nsState")
296
297 # get vca status for NS
298 status_dict = await self.n2vc.get_status(
299 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
300 )
301
302 # vcaStatus
303 db_dict = dict()
304 db_dict["vcaStatus"] = status_dict
305
306 # update configurationStatus for this VCA
307 try:
308 vca_index = int(path[path.rfind(".") + 1 :])
309
310 vca_list = deep_get(
311 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
312 )
313 vca_status = vca_list[vca_index].get("status")
314
315 configuration_status_list = nsr.get("configurationStatus")
316 config_status = configuration_status_list[vca_index].get("status")
317
318 if config_status == "BROKEN" and vca_status != "failed":
319 db_dict["configurationStatus"][vca_index] = "READY"
320 elif config_status != "BROKEN" and vca_status == "failed":
321 db_dict["configurationStatus"][vca_index] = "BROKEN"
322 except Exception as e:
323 # not update configurationStatus
324 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
325
326 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
327 # if nsState = 'DEGRADED' check if all is OK
328 is_degraded = False
329 if current_ns_status in ("READY", "DEGRADED"):
330 error_description = ""
331 # check machines
332 if status_dict.get("machines"):
333 for machine_id in status_dict.get("machines"):
334 machine = status_dict.get("machines").get(machine_id)
335 # check machine agent-status
336 if machine.get("agent-status"):
337 s = machine.get("agent-status").get("status")
338 if s != "started":
339 is_degraded = True
340 error_description += (
341 "machine {} agent-status={} ; ".format(
342 machine_id, s
343 )
344 )
345 # check machine instance status
346 if machine.get("instance-status"):
347 s = machine.get("instance-status").get("status")
348 if s != "running":
349 is_degraded = True
350 error_description += (
351 "machine {} instance-status={} ; ".format(
352 machine_id, s
353 )
354 )
355 # check applications
356 if status_dict.get("applications"):
357 for app_id in status_dict.get("applications"):
358 app = status_dict.get("applications").get(app_id)
359 # check application status
360 if app.get("status"):
361 s = app.get("status").get("status")
362 if s != "active":
363 is_degraded = True
364 error_description += (
365 "application {} status={} ; ".format(app_id, s)
366 )
367
368 if error_description:
369 db_dict["errorDescription"] = error_description
370 if current_ns_status == "READY" and is_degraded:
371 db_dict["nsState"] = "DEGRADED"
372 if current_ns_status == "DEGRADED" and not is_degraded:
373 db_dict["nsState"] = "READY"
374
375 # write to database
376 self.update_db_2("nsrs", nsr_id, db_dict)
377
378 except (asyncio.CancelledError, asyncio.TimeoutError):
379 raise
380 except Exception as e:
381 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
382
383 async def _on_update_k8s_db(
384 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
385 ):
386 """
387 Updating vca status in NSR record
388 :param cluster_uuid: UUID of a k8s cluster
389 :param kdu_instance: The unique name of the KDU instance
390 :param filter: To get nsr_id
391 :cluster_type: The cluster type (juju, k8s)
392 :return: none
393 """
394
395 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
396 # .format(cluster_uuid, kdu_instance, filter))
397
398 nsr_id = filter.get("_id")
399 try:
400 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
401 cluster_uuid=cluster_uuid,
402 kdu_instance=kdu_instance,
403 yaml_format=False,
404 complete_status=True,
405 vca_id=vca_id,
406 )
407
408 # vcaStatus
409 db_dict = dict()
410 db_dict["vcaStatus"] = {nsr_id: vca_status}
411
412 self.logger.debug(
413 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
414 )
415
416 # write to database
417 self.update_db_2("nsrs", nsr_id, db_dict)
418 except (asyncio.CancelledError, asyncio.TimeoutError):
419 raise
420 except Exception as e:
421 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
422
423 @staticmethod
424 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
425 try:
426 env = Environment(
427 undefined=StrictUndefined,
428 autoescape=select_autoescape(default_for_string=True, default=True),
429 )
430 template = env.from_string(cloud_init_text)
431 return template.render(additional_params or {})
432 except UndefinedError as e:
433 raise LcmException(
434 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
435 "file, must be provided in the instantiation parameters inside the "
436 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
437 )
438 except (TemplateError, TemplateNotFound) as e:
439 raise LcmException(
440 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
441 vnfd_id, vdu_id, e
442 )
443 )
444
445 def _get_vdu_cloud_init_content(self, vdu, vnfd):
446 cloud_init_content = cloud_init_file = None
447 try:
448 if vdu.get("cloud-init-file"):
449 base_folder = vnfd["_admin"]["storage"]
450 if base_folder["pkg-dir"]:
451 cloud_init_file = "{}/{}/cloud_init/{}".format(
452 base_folder["folder"],
453 base_folder["pkg-dir"],
454 vdu["cloud-init-file"],
455 )
456 else:
457 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
458 base_folder["folder"],
459 vdu["cloud-init-file"],
460 )
461 with self.fs.file_open(cloud_init_file, "r") as ci_file:
462 cloud_init_content = ci_file.read()
463 elif vdu.get("cloud-init"):
464 cloud_init_content = vdu["cloud-init"]
465
466 return cloud_init_content
467 except FsException as e:
468 raise LcmException(
469 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
470 vnfd["id"], vdu["id"], cloud_init_file, e
471 )
472 )
473
474 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
475 vdur = next(
476 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
477 )
478 additional_params = vdur.get("additionalParams")
479 return parse_yaml_strings(additional_params)
480
481 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
482 """
483 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
484 :param vnfd: input vnfd
485 :param new_id: overrides vnf id if provided
486 :param additionalParams: Instantiation params for VNFs provided
487 :param nsrId: Id of the NSR
488 :return: copy of vnfd
489 """
490 vnfd_RO = deepcopy(vnfd)
491 # remove unused by RO configuration, monitoring, scaling and internal keys
492 vnfd_RO.pop("_id", None)
493 vnfd_RO.pop("_admin", None)
494 vnfd_RO.pop("monitoring-param", None)
495 vnfd_RO.pop("scaling-group-descriptor", None)
496 vnfd_RO.pop("kdu", None)
497 vnfd_RO.pop("k8s-cluster", None)
498 if new_id:
499 vnfd_RO["id"] = new_id
500
501 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
502 for vdu in get_iterable(vnfd_RO, "vdu"):
503 vdu.pop("cloud-init-file", None)
504 vdu.pop("cloud-init", None)
505 return vnfd_RO
506
507 @staticmethod
508 def ip_profile_2_RO(ip_profile):
509 RO_ip_profile = deepcopy(ip_profile)
510 if "dns-server" in RO_ip_profile:
511 if isinstance(RO_ip_profile["dns-server"], list):
512 RO_ip_profile["dns-address"] = []
513 for ds in RO_ip_profile.pop("dns-server"):
514 RO_ip_profile["dns-address"].append(ds["address"])
515 else:
516 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
517 if RO_ip_profile.get("ip-version") == "ipv4":
518 RO_ip_profile["ip-version"] = "IPv4"
519 if RO_ip_profile.get("ip-version") == "ipv6":
520 RO_ip_profile["ip-version"] = "IPv6"
521 if "dhcp-params" in RO_ip_profile:
522 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
523 return RO_ip_profile
524
525 def _get_ro_vim_id_for_vim_account(self, vim_account):
526 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
527 if db_vim["_admin"]["operationalState"] != "ENABLED":
528 raise LcmException(
529 "VIM={} is not available. operationalState={}".format(
530 vim_account, db_vim["_admin"]["operationalState"]
531 )
532 )
533 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
534 return RO_vim_id
535
536 def get_ro_wim_id_for_wim_account(self, wim_account):
537 if isinstance(wim_account, str):
538 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
539 if db_wim["_admin"]["operationalState"] != "ENABLED":
540 raise LcmException(
541 "WIM={} is not available. operationalState={}".format(
542 wim_account, db_wim["_admin"]["operationalState"]
543 )
544 )
545 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
546 return RO_wim_id
547 else:
548 return wim_account
549
550 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
551
552 db_vdu_push_list = []
553 template_vdur = []
554 db_update = {"_admin.modified": time()}
555 if vdu_create:
556 for vdu_id, vdu_count in vdu_create.items():
557 vdur = next(
558 (
559 vdur
560 for vdur in reversed(db_vnfr["vdur"])
561 if vdur["vdu-id-ref"] == vdu_id
562 ),
563 None,
564 )
565 if not vdur:
566 # Read the template saved in the db:
567 self.logger.debug(
568 "No vdur in the database. Using the vdur-template to scale"
569 )
570 vdur_template = db_vnfr.get("vdur-template")
571 if not vdur_template:
572 raise LcmException(
573 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
574 vdu_id
575 )
576 )
577 vdur = vdur_template[0]
578 # Delete a template from the database after using it
579 self.db.set_one(
580 "vnfrs",
581 {"_id": db_vnfr["_id"]},
582 None,
583 pull={"vdur-template": {"_id": vdur["_id"]}},
584 )
585 for count in range(vdu_count):
586 vdur_copy = deepcopy(vdur)
587 vdur_copy["status"] = "BUILD"
588 vdur_copy["status-detailed"] = None
589 vdur_copy["ip-address"] = None
590 vdur_copy["_id"] = str(uuid4())
591 vdur_copy["count-index"] += count + 1
592 vdur_copy["id"] = "{}-{}".format(
593 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
594 )
595 vdur_copy.pop("vim_info", None)
596 for iface in vdur_copy["interfaces"]:
597 if iface.get("fixed-ip"):
598 iface["ip-address"] = self.increment_ip_mac(
599 iface["ip-address"], count + 1
600 )
601 else:
602 iface.pop("ip-address", None)
603 if iface.get("fixed-mac"):
604 iface["mac-address"] = self.increment_ip_mac(
605 iface["mac-address"], count + 1
606 )
607 else:
608 iface.pop("mac-address", None)
609 if db_vnfr["vdur"]:
610 iface.pop(
611 "mgmt_vnf", None
612 ) # only first vdu can be managment of vnf
613 db_vdu_push_list.append(vdur_copy)
614 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
615 if vdu_delete:
616 if len(db_vnfr["vdur"]) == 1:
617 # The scale will move to 0 instances
618 self.logger.debug(
619 "Scaling to 0 !, creating the template with the last vdur"
620 )
621 template_vdur = [db_vnfr["vdur"][0]]
622 for vdu_id, vdu_count in vdu_delete.items():
623 if mark_delete:
624 indexes_to_delete = [
625 iv[0]
626 for iv in enumerate(db_vnfr["vdur"])
627 if iv[1]["vdu-id-ref"] == vdu_id
628 ]
629 db_update.update(
630 {
631 "vdur.{}.status".format(i): "DELETING"
632 for i in indexes_to_delete[-vdu_count:]
633 }
634 )
635 else:
636 # it must be deleted one by one because common.db does not allow otherwise
637 vdus_to_delete = [
638 v
639 for v in reversed(db_vnfr["vdur"])
640 if v["vdu-id-ref"] == vdu_id
641 ]
642 for vdu in vdus_to_delete[:vdu_count]:
643 self.db.set_one(
644 "vnfrs",
645 {"_id": db_vnfr["_id"]},
646 None,
647 pull={"vdur": {"_id": vdu["_id"]}},
648 )
649 db_push = {}
650 if db_vdu_push_list:
651 db_push["vdur"] = db_vdu_push_list
652 if template_vdur:
653 db_push["vdur-template"] = template_vdur
654 if not db_push:
655 db_push = None
656 db_vnfr["vdur-template"] = template_vdur
657 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
658 # modify passed dictionary db_vnfr
659 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
660 db_vnfr["vdur"] = db_vnfr_["vdur"]
661
662 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
663 """
664 Updates database nsr with the RO info for the created vld
665 :param ns_update_nsr: dictionary to be filled with the updated info
666 :param db_nsr: content of db_nsr. This is also modified
667 :param nsr_desc_RO: nsr descriptor from RO
668 :return: Nothing, LcmException is raised on errors
669 """
670
671 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
672 for net_RO in get_iterable(nsr_desc_RO, "nets"):
673 if vld["id"] != net_RO.get("ns_net_osm_id"):
674 continue
675 vld["vim-id"] = net_RO.get("vim_net_id")
676 vld["name"] = net_RO.get("vim_name")
677 vld["status"] = net_RO.get("status")
678 vld["status-detailed"] = net_RO.get("error_msg")
679 ns_update_nsr["vld.{}".format(vld_index)] = vld
680 break
681 else:
682 raise LcmException(
683 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
684 )
685
686 def set_vnfr_at_error(self, db_vnfrs, error_text):
687 try:
688 for db_vnfr in db_vnfrs.values():
689 vnfr_update = {"status": "ERROR"}
690 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
691 if "status" not in vdur:
692 vdur["status"] = "ERROR"
693 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
694 if error_text:
695 vdur["status-detailed"] = str(error_text)
696 vnfr_update[
697 "vdur.{}.status-detailed".format(vdu_index)
698 ] = "ERROR"
699 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
700 except DbException as e:
701 self.logger.error("Cannot update vnf. {}".format(e))
702
703 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
704 """
705 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
706 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
707 :param nsr_desc_RO: nsr descriptor from RO
708 :return: Nothing, LcmException is raised on errors
709 """
710 for vnf_index, db_vnfr in db_vnfrs.items():
711 for vnf_RO in nsr_desc_RO["vnfs"]:
712 if vnf_RO["member_vnf_index"] != vnf_index:
713 continue
714 vnfr_update = {}
715 if vnf_RO.get("ip_address"):
716 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
717 "ip_address"
718 ].split(";")[0]
719 elif not db_vnfr.get("ip-address"):
720 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
721 raise LcmExceptionNoMgmtIP(
722 "ns member_vnf_index '{}' has no IP address".format(
723 vnf_index
724 )
725 )
726
727 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
728 vdur_RO_count_index = 0
729 if vdur.get("pdu-type"):
730 continue
731 for vdur_RO in get_iterable(vnf_RO, "vms"):
732 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
733 continue
734 if vdur["count-index"] != vdur_RO_count_index:
735 vdur_RO_count_index += 1
736 continue
737 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
738 if vdur_RO.get("ip_address"):
739 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
740 else:
741 vdur["ip-address"] = None
742 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
743 vdur["name"] = vdur_RO.get("vim_name")
744 vdur["status"] = vdur_RO.get("status")
745 vdur["status-detailed"] = vdur_RO.get("error_msg")
746 for ifacer in get_iterable(vdur, "interfaces"):
747 for interface_RO in get_iterable(vdur_RO, "interfaces"):
748 if ifacer["name"] == interface_RO.get("internal_name"):
749 ifacer["ip-address"] = interface_RO.get(
750 "ip_address"
751 )
752 ifacer["mac-address"] = interface_RO.get(
753 "mac_address"
754 )
755 break
756 else:
757 raise LcmException(
758 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
759 "from VIM info".format(
760 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
761 )
762 )
763 vnfr_update["vdur.{}".format(vdu_index)] = vdur
764 break
765 else:
766 raise LcmException(
767 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
768 "VIM info".format(
769 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
770 )
771 )
772
773 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
774 for net_RO in get_iterable(nsr_desc_RO, "nets"):
775 if vld["id"] != net_RO.get("vnf_net_osm_id"):
776 continue
777 vld["vim-id"] = net_RO.get("vim_net_id")
778 vld["name"] = net_RO.get("vim_name")
779 vld["status"] = net_RO.get("status")
780 vld["status-detailed"] = net_RO.get("error_msg")
781 vnfr_update["vld.{}".format(vld_index)] = vld
782 break
783 else:
784 raise LcmException(
785 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
786 vnf_index, vld["id"]
787 )
788 )
789
790 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
791 break
792
793 else:
794 raise LcmException(
795 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
796 vnf_index
797 )
798 )
799
800 def _get_ns_config_info(self, nsr_id):
801 """
802 Generates a mapping between vnf,vdu elements and the N2VC id
803 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
804 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
805 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
806 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
807 """
808 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
809 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
810 mapping = {}
811 ns_config_info = {"osm-config-mapping": mapping}
812 for vca in vca_deployed_list:
813 if not vca["member-vnf-index"]:
814 continue
815 if not vca["vdu_id"]:
816 mapping[vca["member-vnf-index"]] = vca["application"]
817 else:
818 mapping[
819 "{}.{}.{}".format(
820 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
821 )
822 ] = vca["application"]
823 return ns_config_info
824
825 async def _instantiate_ng_ro(
826 self,
827 logging_text,
828 nsr_id,
829 nsd,
830 db_nsr,
831 db_nslcmop,
832 db_vnfrs,
833 db_vnfds,
834 n2vc_key_list,
835 stage,
836 start_deploy,
837 timeout_ns_deploy,
838 ):
839
840 db_vims = {}
841
842 def get_vim_account(vim_account_id):
843 nonlocal db_vims
844 if vim_account_id in db_vims:
845 return db_vims[vim_account_id]
846 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
847 db_vims[vim_account_id] = db_vim
848 return db_vim
849
850 # modify target_vld info with instantiation parameters
851 def parse_vld_instantiation_params(
852 target_vim, target_vld, vld_params, target_sdn
853 ):
854 if vld_params.get("ip-profile"):
855 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
856 "ip-profile"
857 ]
858 if vld_params.get("provider-network"):
859 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
860 "provider-network"
861 ]
862 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
863 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
864 "provider-network"
865 ]["sdn-ports"]
866
867 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
868 # if wim_account_id is specified in vld_params, validate if it is feasible.
869 wim_account_id, db_wim = select_feasible_wim_account(
870 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
871 )
872
873 if wim_account_id:
874 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
875 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
876 # update vld_params with correct WIM account Id
877 vld_params["wimAccountId"] = wim_account_id
878
879 target_wim = "wim:{}".format(wim_account_id)
880 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
881 sdn_ports = get_sdn_ports(vld_params, db_wim)
882 if len(sdn_ports) > 0:
883 target_vld["vim_info"][target_wim] = target_wim_attrs
884 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
885
886 self.logger.debug(
887 "Target VLD with WIM data: {:s}".format(str(target_vld))
888 )
889
890 for param in ("vim-network-name", "vim-network-id"):
891 if vld_params.get(param):
892 if isinstance(vld_params[param], dict):
893 for vim, vim_net in vld_params[param].items():
894 other_target_vim = "vim:" + vim
895 populate_dict(
896 target_vld["vim_info"],
897 (other_target_vim, param.replace("-", "_")),
898 vim_net,
899 )
900 else: # isinstance str
901 target_vld["vim_info"][target_vim][
902 param.replace("-", "_")
903 ] = vld_params[param]
904 if vld_params.get("common_id"):
905 target_vld["common_id"] = vld_params.get("common_id")
906
907 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
908 def update_ns_vld_target(target, ns_params):
909 for vnf_params in ns_params.get("vnf", ()):
910 if vnf_params.get("vimAccountId"):
911 target_vnf = next(
912 (
913 vnfr
914 for vnfr in db_vnfrs.values()
915 if vnf_params["member-vnf-index"]
916 == vnfr["member-vnf-index-ref"]
917 ),
918 None,
919 )
920 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
921 if not vdur:
922 return
923 for a_index, a_vld in enumerate(target["ns"]["vld"]):
924 target_vld = find_in_list(
925 get_iterable(vdur, "interfaces"),
926 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
927 )
928
929 vld_params = find_in_list(
930 get_iterable(ns_params, "vld"),
931 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
932 )
933 if target_vld:
934
935 if vnf_params.get("vimAccountId") not in a_vld.get(
936 "vim_info", {}
937 ):
938 target_vim_network_list = [
939 v for _, v in a_vld.get("vim_info").items()
940 ]
941 target_vim_network_name = next(
942 (
943 item.get("vim_network_name", "")
944 for item in target_vim_network_list
945 ),
946 "",
947 )
948
949 target["ns"]["vld"][a_index].get("vim_info").update(
950 {
951 "vim:{}".format(vnf_params["vimAccountId"]): {
952 "vim_network_name": target_vim_network_name,
953 }
954 }
955 )
956
957 if vld_params:
958 for param in ("vim-network-name", "vim-network-id"):
959 if vld_params.get(param) and isinstance(
960 vld_params[param], dict
961 ):
962 for vim, vim_net in vld_params[
963 param
964 ].items():
965 other_target_vim = "vim:" + vim
966 populate_dict(
967 target["ns"]["vld"][a_index].get(
968 "vim_info"
969 ),
970 (
971 other_target_vim,
972 param.replace("-", "_"),
973 ),
974 vim_net,
975 )
976
977 nslcmop_id = db_nslcmop["_id"]
978 target = {
979 "name": db_nsr["name"],
980 "ns": {"vld": []},
981 "vnf": [],
982 "image": deepcopy(db_nsr["image"]),
983 "flavor": deepcopy(db_nsr["flavor"]),
984 "action_id": nslcmop_id,
985 "cloud_init_content": {},
986 }
987 for image in target["image"]:
988 image["vim_info"] = {}
989 for flavor in target["flavor"]:
990 flavor["vim_info"] = {}
991 if db_nsr.get("affinity-or-anti-affinity-group"):
992 target["affinity-or-anti-affinity-group"] = deepcopy(
993 db_nsr["affinity-or-anti-affinity-group"]
994 )
995 for affinity_or_anti_affinity_group in target[
996 "affinity-or-anti-affinity-group"
997 ]:
998 affinity_or_anti_affinity_group["vim_info"] = {}
999
1000 if db_nslcmop.get("lcmOperationType") != "instantiate":
1001 # get parameters of instantiation:
1002 db_nslcmop_instantiate = self.db.get_list(
1003 "nslcmops",
1004 {
1005 "nsInstanceId": db_nslcmop["nsInstanceId"],
1006 "lcmOperationType": "instantiate",
1007 },
1008 )[-1]
1009 ns_params = db_nslcmop_instantiate.get("operationParams")
1010 else:
1011 ns_params = db_nslcmop.get("operationParams")
1012 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
1013 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
1014
1015 cp2target = {}
1016 for vld_index, vld in enumerate(db_nsr.get("vld")):
1017 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1018 target_vld = {
1019 "id": vld["id"],
1020 "name": vld["name"],
1021 "mgmt-network": vld.get("mgmt-network", False),
1022 "type": vld.get("type"),
1023 "vim_info": {
1024 target_vim: {
1025 "vim_network_name": vld.get("vim-network-name"),
1026 "vim_account_id": ns_params["vimAccountId"],
1027 }
1028 },
1029 }
1030 # check if this network needs SDN assist
1031 if vld.get("pci-interfaces"):
1032 db_vim = get_vim_account(ns_params["vimAccountId"])
1033 sdnc_id = db_vim["config"].get("sdn-controller")
1034 if sdnc_id:
1035 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1036 target_sdn = "sdn:{}".format(sdnc_id)
1037 target_vld["vim_info"][target_sdn] = {
1038 "sdn": True,
1039 "target_vim": target_vim,
1040 "vlds": [sdn_vld],
1041 "type": vld.get("type"),
1042 }
1043
1044 nsd_vnf_profiles = get_vnf_profiles(nsd)
1045 for nsd_vnf_profile in nsd_vnf_profiles:
1046 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1047 if cp["virtual-link-profile-id"] == vld["id"]:
1048 cp2target[
1049 "member_vnf:{}.{}".format(
1050 cp["constituent-cpd-id"][0][
1051 "constituent-base-element-id"
1052 ],
1053 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1054 )
1055 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1056
1057 # check at nsd descriptor, if there is an ip-profile
1058 vld_params = {}
1059 nsd_vlp = find_in_list(
1060 get_virtual_link_profiles(nsd),
1061 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1062 == vld["id"],
1063 )
1064 if (
1065 nsd_vlp
1066 and nsd_vlp.get("virtual-link-protocol-data")
1067 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1068 ):
1069 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1070 "l3-protocol-data"
1071 ]
1072 ip_profile_dest_data = {}
1073 if "ip-version" in ip_profile_source_data:
1074 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1075 "ip-version"
1076 ]
1077 if "cidr" in ip_profile_source_data:
1078 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1079 "cidr"
1080 ]
1081 if "gateway-ip" in ip_profile_source_data:
1082 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1083 "gateway-ip"
1084 ]
1085 if "dhcp-enabled" in ip_profile_source_data:
1086 ip_profile_dest_data["dhcp-params"] = {
1087 "enabled": ip_profile_source_data["dhcp-enabled"]
1088 }
1089 vld_params["ip-profile"] = ip_profile_dest_data
1090
1091 # update vld_params with instantiation params
1092 vld_instantiation_params = find_in_list(
1093 get_iterable(ns_params, "vld"),
1094 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1095 )
1096 if vld_instantiation_params:
1097 vld_params.update(vld_instantiation_params)
1098 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1099 target["ns"]["vld"].append(target_vld)
1100 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1101 update_ns_vld_target(target, ns_params)
1102
1103 for vnfr in db_vnfrs.values():
1104 vnfd = find_in_list(
1105 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1106 )
1107 vnf_params = find_in_list(
1108 get_iterable(ns_params, "vnf"),
1109 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1110 )
1111 target_vnf = deepcopy(vnfr)
1112 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1113 for vld in target_vnf.get("vld", ()):
1114 # check if connected to a ns.vld, to fill target'
1115 vnf_cp = find_in_list(
1116 vnfd.get("int-virtual-link-desc", ()),
1117 lambda cpd: cpd.get("id") == vld["id"],
1118 )
1119 if vnf_cp:
1120 ns_cp = "member_vnf:{}.{}".format(
1121 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1122 )
1123 if cp2target.get(ns_cp):
1124 vld["target"] = cp2target[ns_cp]
1125
1126 vld["vim_info"] = {
1127 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1128 }
1129 # check if this network needs SDN assist
1130 target_sdn = None
1131 if vld.get("pci-interfaces"):
1132 db_vim = get_vim_account(vnfr["vim-account-id"])
1133 sdnc_id = db_vim["config"].get("sdn-controller")
1134 if sdnc_id:
1135 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1136 target_sdn = "sdn:{}".format(sdnc_id)
1137 vld["vim_info"][target_sdn] = {
1138 "sdn": True,
1139 "target_vim": target_vim,
1140 "vlds": [sdn_vld],
1141 "type": vld.get("type"),
1142 }
1143
1144 # check at vnfd descriptor, if there is an ip-profile
1145 vld_params = {}
1146 vnfd_vlp = find_in_list(
1147 get_virtual_link_profiles(vnfd),
1148 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1149 )
1150 if (
1151 vnfd_vlp
1152 and vnfd_vlp.get("virtual-link-protocol-data")
1153 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1154 ):
1155 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1156 "l3-protocol-data"
1157 ]
1158 ip_profile_dest_data = {}
1159 if "ip-version" in ip_profile_source_data:
1160 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1161 "ip-version"
1162 ]
1163 if "cidr" in ip_profile_source_data:
1164 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1165 "cidr"
1166 ]
1167 if "gateway-ip" in ip_profile_source_data:
1168 ip_profile_dest_data[
1169 "gateway-address"
1170 ] = ip_profile_source_data["gateway-ip"]
1171 if "dhcp-enabled" in ip_profile_source_data:
1172 ip_profile_dest_data["dhcp-params"] = {
1173 "enabled": ip_profile_source_data["dhcp-enabled"]
1174 }
1175
1176 vld_params["ip-profile"] = ip_profile_dest_data
1177 # update vld_params with instantiation params
1178 if vnf_params:
1179 vld_instantiation_params = find_in_list(
1180 get_iterable(vnf_params, "internal-vld"),
1181 lambda i_vld: i_vld["name"] == vld["id"],
1182 )
1183 if vld_instantiation_params:
1184 vld_params.update(vld_instantiation_params)
1185 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1186
1187 vdur_list = []
1188 for vdur in target_vnf.get("vdur", ()):
1189 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1190 continue # This vdu must not be created
1191 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1192
1193 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1194
1195 if ssh_keys_all:
1196 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1197 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1198 if (
1199 vdu_configuration
1200 and vdu_configuration.get("config-access")
1201 and vdu_configuration.get("config-access").get("ssh-access")
1202 ):
1203 vdur["ssh-keys"] = ssh_keys_all
1204 vdur["ssh-access-required"] = vdu_configuration[
1205 "config-access"
1206 ]["ssh-access"]["required"]
1207 elif (
1208 vnf_configuration
1209 and vnf_configuration.get("config-access")
1210 and vnf_configuration.get("config-access").get("ssh-access")
1211 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1212 ):
1213 vdur["ssh-keys"] = ssh_keys_all
1214 vdur["ssh-access-required"] = vnf_configuration[
1215 "config-access"
1216 ]["ssh-access"]["required"]
1217 elif ssh_keys_instantiation and find_in_list(
1218 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1219 ):
1220 vdur["ssh-keys"] = ssh_keys_instantiation
1221
1222 self.logger.debug("NS > vdur > {}".format(vdur))
1223
1224 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1225 # cloud-init
1226 if vdud.get("cloud-init-file"):
1227 vdur["cloud-init"] = "{}:file:{}".format(
1228 vnfd["_id"], vdud.get("cloud-init-file")
1229 )
1230 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1231 if vdur["cloud-init"] not in target["cloud_init_content"]:
1232 base_folder = vnfd["_admin"]["storage"]
1233 if base_folder["pkg-dir"]:
1234 cloud_init_file = "{}/{}/cloud_init/{}".format(
1235 base_folder["folder"],
1236 base_folder["pkg-dir"],
1237 vdud.get("cloud-init-file"),
1238 )
1239 else:
1240 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1241 base_folder["folder"],
1242 vdud.get("cloud-init-file"),
1243 )
1244 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1245 target["cloud_init_content"][
1246 vdur["cloud-init"]
1247 ] = ci_file.read()
1248 elif vdud.get("cloud-init"):
1249 vdur["cloud-init"] = "{}:vdu:{}".format(
1250 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1251 )
1252 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1253 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1254 "cloud-init"
1255 ]
1256 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1257 deploy_params_vdu = self._format_additional_params(
1258 vdur.get("additionalParams") or {}
1259 )
1260 deploy_params_vdu["OSM"] = get_osm_params(
1261 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1262 )
1263 vdur["additionalParams"] = deploy_params_vdu
1264
1265 # flavor
1266 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1267 if target_vim not in ns_flavor["vim_info"]:
1268 ns_flavor["vim_info"][target_vim] = {}
1269
1270 # deal with images
1271 # in case alternative images are provided we must check if they should be applied
1272 # for the vim_type, modify the vim_type taking into account
1273 ns_image_id = int(vdur["ns-image-id"])
1274 if vdur.get("alt-image-ids"):
1275 db_vim = get_vim_account(vnfr["vim-account-id"])
1276 vim_type = db_vim["vim_type"]
1277 for alt_image_id in vdur.get("alt-image-ids"):
1278 ns_alt_image = target["image"][int(alt_image_id)]
1279 if vim_type == ns_alt_image.get("vim-type"):
1280 # must use alternative image
1281 self.logger.debug(
1282 "use alternative image id: {}".format(alt_image_id)
1283 )
1284 ns_image_id = alt_image_id
1285 vdur["ns-image-id"] = ns_image_id
1286 break
1287 ns_image = target["image"][int(ns_image_id)]
1288 if target_vim not in ns_image["vim_info"]:
1289 ns_image["vim_info"][target_vim] = {}
1290
1291 # Affinity groups
1292 if vdur.get("affinity-or-anti-affinity-group-id"):
1293 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1294 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1295 if target_vim not in ns_ags["vim_info"]:
1296 ns_ags["vim_info"][target_vim] = {}
1297
1298 vdur["vim_info"] = {target_vim: {}}
1299 # instantiation parameters
1300 if vnf_params:
1301 vdu_instantiation_params = find_in_list(
1302 get_iterable(vnf_params, "vdu"),
1303 lambda i_vdu: i_vdu["id"] == vdud["id"],
1304 )
1305 if vdu_instantiation_params:
1306 # Parse the vdu_volumes from the instantiation params
1307 vdu_volumes = get_volumes_from_instantiation_params(
1308 vdu_instantiation_params, vdud
1309 )
1310 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1311 vdur_list.append(vdur)
1312 target_vnf["vdur"] = vdur_list
1313 target["vnf"].append(target_vnf)
1314
1315 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1316 desc = await self.RO.deploy(nsr_id, target)
1317 self.logger.debug("RO return > {}".format(desc))
1318 action_id = desc["action_id"]
1319 await self._wait_ng_ro(
1320 nsr_id,
1321 action_id,
1322 nslcmop_id,
1323 start_deploy,
1324 timeout_ns_deploy,
1325 stage,
1326 operation="instantiation",
1327 )
1328
1329 # Updating NSR
1330 db_nsr_update = {
1331 "_admin.deployed.RO.operational-status": "running",
1332 "detailed-status": " ".join(stage),
1333 }
1334 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1335 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1336 self._write_op_status(nslcmop_id, stage)
1337 self.logger.debug(
1338 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1339 )
1340 return
1341
1342 async def _wait_ng_ro(
1343 self,
1344 nsr_id,
1345 action_id,
1346 nslcmop_id=None,
1347 start_time=None,
1348 timeout=600,
1349 stage=None,
1350 operation=None,
1351 ):
1352 detailed_status_old = None
1353 db_nsr_update = {}
1354 start_time = start_time or time()
1355 while time() <= start_time + timeout:
1356 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1357 self.logger.debug("Wait NG RO > {}".format(desc_status))
1358 if desc_status["status"] == "FAILED":
1359 raise NgRoException(desc_status["details"])
1360 elif desc_status["status"] == "BUILD":
1361 if stage:
1362 stage[2] = "VIM: ({})".format(desc_status["details"])
1363 elif desc_status["status"] == "DONE":
1364 if stage:
1365 stage[2] = "Deployed at VIM"
1366 break
1367 else:
1368 assert False, "ROclient.check_ns_status returns unknown {}".format(
1369 desc_status["status"]
1370 )
1371 if stage and nslcmop_id and stage[2] != detailed_status_old:
1372 detailed_status_old = stage[2]
1373 db_nsr_update["detailed-status"] = " ".join(stage)
1374 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1375 self._write_op_status(nslcmop_id, stage)
1376 await asyncio.sleep(15, loop=self.loop)
1377 else: # timeout_ns_deploy
1378 raise NgRoException("Timeout waiting ns to deploy")
1379
1380 async def _terminate_ng_ro(
1381 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1382 ):
1383 db_nsr_update = {}
1384 failed_detail = []
1385 action_id = None
1386 start_deploy = time()
1387 try:
1388 target = {
1389 "ns": {"vld": []},
1390 "vnf": [],
1391 "image": [],
1392 "flavor": [],
1393 "action_id": nslcmop_id,
1394 }
1395 desc = await self.RO.deploy(nsr_id, target)
1396 action_id = desc["action_id"]
1397 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1398 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1399 self.logger.debug(
1400 logging_text
1401 + "ns terminate action at RO. action_id={}".format(action_id)
1402 )
1403
1404 # wait until done
1405 delete_timeout = 20 * 60 # 20 minutes
1406 await self._wait_ng_ro(
1407 nsr_id,
1408 action_id,
1409 nslcmop_id,
1410 start_deploy,
1411 delete_timeout,
1412 stage,
1413 operation="termination",
1414 )
1415
1416 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1417 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1418 # delete all nsr
1419 await self.RO.delete(nsr_id)
1420 except Exception as e:
1421 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1422 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1423 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1424 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1425 self.logger.debug(
1426 logging_text + "RO_action_id={} already deleted".format(action_id)
1427 )
1428 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1429 failed_detail.append("delete conflict: {}".format(e))
1430 self.logger.debug(
1431 logging_text
1432 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1433 )
1434 else:
1435 failed_detail.append("delete error: {}".format(e))
1436 self.logger.error(
1437 logging_text
1438 + "RO_action_id={} delete error: {}".format(action_id, e)
1439 )
1440
1441 if failed_detail:
1442 stage[2] = "Error deleting from VIM"
1443 else:
1444 stage[2] = "Deleted from VIM"
1445 db_nsr_update["detailed-status"] = " ".join(stage)
1446 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1447 self._write_op_status(nslcmop_id, stage)
1448
1449 if failed_detail:
1450 raise LcmException("; ".join(failed_detail))
1451 return
1452
1453 async def instantiate_RO(
1454 self,
1455 logging_text,
1456 nsr_id,
1457 nsd,
1458 db_nsr,
1459 db_nslcmop,
1460 db_vnfrs,
1461 db_vnfds,
1462 n2vc_key_list,
1463 stage,
1464 ):
1465 """
1466 Instantiate at RO
1467 :param logging_text: preffix text to use at logging
1468 :param nsr_id: nsr identity
1469 :param nsd: database content of ns descriptor
1470 :param db_nsr: database content of ns record
1471 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1472 :param db_vnfrs:
1473 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1474 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1475 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1476 :return: None or exception
1477 """
1478 try:
1479 start_deploy = time()
1480 ns_params = db_nslcmop.get("operationParams")
1481 if ns_params and ns_params.get("timeout_ns_deploy"):
1482 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1483 else:
1484 timeout_ns_deploy = self.timeout.get(
1485 "ns_deploy", self.timeout_ns_deploy
1486 )
1487
1488 # Check for and optionally request placement optimization. Database will be updated if placement activated
1489 stage[2] = "Waiting for Placement."
1490 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1491 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1492 for vnfr in db_vnfrs.values():
1493 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1494 break
1495 else:
1496 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1497
1498 return await self._instantiate_ng_ro(
1499 logging_text,
1500 nsr_id,
1501 nsd,
1502 db_nsr,
1503 db_nslcmop,
1504 db_vnfrs,
1505 db_vnfds,
1506 n2vc_key_list,
1507 stage,
1508 start_deploy,
1509 timeout_ns_deploy,
1510 )
1511 except Exception as e:
1512 stage[2] = "ERROR deploying at VIM"
1513 self.set_vnfr_at_error(db_vnfrs, str(e))
1514 self.logger.error(
1515 "Error deploying at VIM {}".format(e),
1516 exc_info=not isinstance(
1517 e,
1518 (
1519 ROclient.ROClientException,
1520 LcmException,
1521 DbException,
1522 NgRoException,
1523 ),
1524 ),
1525 )
1526 raise
1527
1528 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1529 """
1530 Wait for kdu to be up, get ip address
1531 :param logging_text: prefix use for logging
1532 :param nsr_id:
1533 :param vnfr_id:
1534 :param kdu_name:
1535 :return: IP address, K8s services
1536 """
1537
1538 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1539 nb_tries = 0
1540
1541 while nb_tries < 360:
1542 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1543 kdur = next(
1544 (
1545 x
1546 for x in get_iterable(db_vnfr, "kdur")
1547 if x.get("kdu-name") == kdu_name
1548 ),
1549 None,
1550 )
1551 if not kdur:
1552 raise LcmException(
1553 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1554 )
1555 if kdur.get("status"):
1556 if kdur["status"] in ("READY", "ENABLED"):
1557 return kdur.get("ip-address"), kdur.get("services")
1558 else:
1559 raise LcmException(
1560 "target KDU={} is in error state".format(kdu_name)
1561 )
1562
1563 await asyncio.sleep(10, loop=self.loop)
1564 nb_tries += 1
1565 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1566
1567 async def wait_vm_up_insert_key_ro(
1568 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1569 ):
1570 """
1571 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1572 :param logging_text: prefix use for logging
1573 :param nsr_id:
1574 :param vnfr_id:
1575 :param vdu_id:
1576 :param vdu_index:
1577 :param pub_key: public ssh key to inject, None to skip
1578 :param user: user to apply the public ssh key
1579 :return: IP address
1580 """
1581
1582 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1583 ro_nsr_id = None
1584 ip_address = None
1585 nb_tries = 0
1586 target_vdu_id = None
1587 ro_retries = 0
1588
1589 while True:
1590
1591 ro_retries += 1
1592 if ro_retries >= 360: # 1 hour
1593 raise LcmException(
1594 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1595 )
1596
1597 await asyncio.sleep(10, loop=self.loop)
1598
1599 # get ip address
1600 if not target_vdu_id:
1601 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1602
1603 if not vdu_id: # for the VNF case
1604 if db_vnfr.get("status") == "ERROR":
1605 raise LcmException(
1606 "Cannot inject ssh-key because target VNF is in error state"
1607 )
1608 ip_address = db_vnfr.get("ip-address")
1609 if not ip_address:
1610 continue
1611 vdur = next(
1612 (
1613 x
1614 for x in get_iterable(db_vnfr, "vdur")
1615 if x.get("ip-address") == ip_address
1616 ),
1617 None,
1618 )
1619 else: # VDU case
1620 vdur = next(
1621 (
1622 x
1623 for x in get_iterable(db_vnfr, "vdur")
1624 if x.get("vdu-id-ref") == vdu_id
1625 and x.get("count-index") == vdu_index
1626 ),
1627 None,
1628 )
1629
1630 if (
1631 not vdur and len(db_vnfr.get("vdur", ())) == 1
1632 ): # If only one, this should be the target vdu
1633 vdur = db_vnfr["vdur"][0]
1634 if not vdur:
1635 raise LcmException(
1636 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1637 vnfr_id, vdu_id, vdu_index
1638 )
1639 )
1640 # New generation RO stores information at "vim_info"
1641 ng_ro_status = None
1642 target_vim = None
1643 if vdur.get("vim_info"):
1644 target_vim = next(
1645 t for t in vdur["vim_info"]
1646 ) # there should be only one key
1647 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1648 if (
1649 vdur.get("pdu-type")
1650 or vdur.get("status") == "ACTIVE"
1651 or ng_ro_status == "ACTIVE"
1652 ):
1653 ip_address = vdur.get("ip-address")
1654 if not ip_address:
1655 continue
1656 target_vdu_id = vdur["vdu-id-ref"]
1657 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1658 raise LcmException(
1659 "Cannot inject ssh-key because target VM is in error state"
1660 )
1661
1662 if not target_vdu_id:
1663 continue
1664
1665 # inject public key into machine
1666 if pub_key and user:
1667 self.logger.debug(logging_text + "Inserting RO key")
1668 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1669 if vdur.get("pdu-type"):
1670 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1671 return ip_address
1672 try:
1673 ro_vm_id = "{}-{}".format(
1674 db_vnfr["member-vnf-index-ref"], target_vdu_id
1675 ) # TODO add vdu_index
1676 if self.ng_ro:
1677 target = {
1678 "action": {
1679 "action": "inject_ssh_key",
1680 "key": pub_key,
1681 "user": user,
1682 },
1683 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1684 }
1685 desc = await self.RO.deploy(nsr_id, target)
1686 action_id = desc["action_id"]
1687 await self._wait_ng_ro(
1688 nsr_id, action_id, timeout=600, operation="instantiation"
1689 )
1690 break
1691 else:
1692 # wait until NS is deployed at RO
1693 if not ro_nsr_id:
1694 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1695 ro_nsr_id = deep_get(
1696 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1697 )
1698 if not ro_nsr_id:
1699 continue
1700 result_dict = await self.RO.create_action(
1701 item="ns",
1702 item_id_name=ro_nsr_id,
1703 descriptor={
1704 "add_public_key": pub_key,
1705 "vms": [ro_vm_id],
1706 "user": user,
1707 },
1708 )
1709 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1710 if not result_dict or not isinstance(result_dict, dict):
1711 raise LcmException(
1712 "Unknown response from RO when injecting key"
1713 )
1714 for result in result_dict.values():
1715 if result.get("vim_result") == 200:
1716 break
1717 else:
1718 raise ROclient.ROClientException(
1719 "error injecting key: {}".format(
1720 result.get("description")
1721 )
1722 )
1723 break
1724 except NgRoException as e:
1725 raise LcmException(
1726 "Reaching max tries injecting key. Error: {}".format(e)
1727 )
1728 except ROclient.ROClientException as e:
1729 if not nb_tries:
1730 self.logger.debug(
1731 logging_text
1732 + "error injecting key: {}. Retrying until {} seconds".format(
1733 e, 20 * 10
1734 )
1735 )
1736 nb_tries += 1
1737 if nb_tries >= 20:
1738 raise LcmException(
1739 "Reaching max tries injecting key. Error: {}".format(e)
1740 )
1741 else:
1742 break
1743
1744 return ip_address
1745
1746 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1747 """
1748 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1749 """
1750 my_vca = vca_deployed_list[vca_index]
1751 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1752 # vdu or kdu: no dependencies
1753 return
1754 timeout = 300
1755 while timeout >= 0:
1756 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1757 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1758 configuration_status_list = db_nsr["configurationStatus"]
1759 for index, vca_deployed in enumerate(configuration_status_list):
1760 if index == vca_index:
1761 # myself
1762 continue
1763 if not my_vca.get("member-vnf-index") or (
1764 vca_deployed.get("member-vnf-index")
1765 == my_vca.get("member-vnf-index")
1766 ):
1767 internal_status = configuration_status_list[index].get("status")
1768 if internal_status == "READY":
1769 continue
1770 elif internal_status == "BROKEN":
1771 raise LcmException(
1772 "Configuration aborted because dependent charm/s has failed"
1773 )
1774 else:
1775 break
1776 else:
1777 # no dependencies, return
1778 return
1779 await asyncio.sleep(10)
1780 timeout -= 1
1781
1782 raise LcmException("Configuration aborted because dependent charm/s timeout")
1783
1784 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1785 vca_id = None
1786 if db_vnfr:
1787 vca_id = deep_get(db_vnfr, ("vca-id",))
1788 elif db_nsr:
1789 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1790 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1791 return vca_id
1792
1793 async def instantiate_N2VC(
1794 self,
1795 logging_text,
1796 vca_index,
1797 nsi_id,
1798 db_nsr,
1799 db_vnfr,
1800 vdu_id,
1801 kdu_name,
1802 vdu_index,
1803 config_descriptor,
1804 deploy_params,
1805 base_folder,
1806 nslcmop_id,
1807 stage,
1808 vca_type,
1809 vca_name,
1810 ee_config_descriptor,
1811 ):
1812 nsr_id = db_nsr["_id"]
1813 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1814 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1815 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1816 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1817 db_dict = {
1818 "collection": "nsrs",
1819 "filter": {"_id": nsr_id},
1820 "path": db_update_entry,
1821 }
1822 step = ""
1823 try:
1824
1825 element_type = "NS"
1826 element_under_configuration = nsr_id
1827
1828 vnfr_id = None
1829 if db_vnfr:
1830 vnfr_id = db_vnfr["_id"]
1831 osm_config["osm"]["vnf_id"] = vnfr_id
1832
1833 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1834
1835 if vca_type == "native_charm":
1836 index_number = 0
1837 else:
1838 index_number = vdu_index or 0
1839
1840 if vnfr_id:
1841 element_type = "VNF"
1842 element_under_configuration = vnfr_id
1843 namespace += ".{}-{}".format(vnfr_id, index_number)
1844 if vdu_id:
1845 namespace += ".{}-{}".format(vdu_id, index_number)
1846 element_type = "VDU"
1847 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1848 osm_config["osm"]["vdu_id"] = vdu_id
1849 elif kdu_name:
1850 namespace += ".{}".format(kdu_name)
1851 element_type = "KDU"
1852 element_under_configuration = kdu_name
1853 osm_config["osm"]["kdu_name"] = kdu_name
1854
1855 # Get artifact path
1856 if base_folder["pkg-dir"]:
1857 artifact_path = "{}/{}/{}/{}".format(
1858 base_folder["folder"],
1859 base_folder["pkg-dir"],
1860 "charms"
1861 if vca_type
1862 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1863 else "helm-charts",
1864 vca_name,
1865 )
1866 else:
1867 artifact_path = "{}/Scripts/{}/{}/".format(
1868 base_folder["folder"],
1869 "charms"
1870 if vca_type
1871 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1872 else "helm-charts",
1873 vca_name,
1874 )
1875
1876 self.logger.debug("Artifact path > {}".format(artifact_path))
1877
1878 # get initial_config_primitive_list that applies to this element
1879 initial_config_primitive_list = config_descriptor.get(
1880 "initial-config-primitive"
1881 )
1882
1883 self.logger.debug(
1884 "Initial config primitive list > {}".format(
1885 initial_config_primitive_list
1886 )
1887 )
1888
1889 # add config if not present for NS charm
1890 ee_descriptor_id = ee_config_descriptor.get("id")
1891 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1892 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1893 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1894 )
1895
1896 self.logger.debug(
1897 "Initial config primitive list #2 > {}".format(
1898 initial_config_primitive_list
1899 )
1900 )
1901 # n2vc_redesign STEP 3.1
1902 # find old ee_id if exists
1903 ee_id = vca_deployed.get("ee_id")
1904
1905 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1906 # create or register execution environment in VCA
1907 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1908
1909 self._write_configuration_status(
1910 nsr_id=nsr_id,
1911 vca_index=vca_index,
1912 status="CREATING",
1913 element_under_configuration=element_under_configuration,
1914 element_type=element_type,
1915 )
1916
1917 step = "create execution environment"
1918 self.logger.debug(logging_text + step)
1919
1920 ee_id = None
1921 credentials = None
1922 if vca_type == "k8s_proxy_charm":
1923 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1924 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1925 namespace=namespace,
1926 artifact_path=artifact_path,
1927 db_dict=db_dict,
1928 vca_id=vca_id,
1929 )
1930 elif vca_type == "helm" or vca_type == "helm-v3":
1931 ee_id, credentials = await self.vca_map[
1932 vca_type
1933 ].create_execution_environment(
1934 namespace=namespace,
1935 reuse_ee_id=ee_id,
1936 db_dict=db_dict,
1937 config=osm_config,
1938 artifact_path=artifact_path,
1939 chart_model=vca_name,
1940 vca_type=vca_type,
1941 )
1942 else:
1943 ee_id, credentials = await self.vca_map[
1944 vca_type
1945 ].create_execution_environment(
1946 namespace=namespace,
1947 reuse_ee_id=ee_id,
1948 db_dict=db_dict,
1949 vca_id=vca_id,
1950 )
1951
1952 elif vca_type == "native_charm":
1953 step = "Waiting to VM being up and getting IP address"
1954 self.logger.debug(logging_text + step)
1955 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1956 logging_text,
1957 nsr_id,
1958 vnfr_id,
1959 vdu_id,
1960 vdu_index,
1961 user=None,
1962 pub_key=None,
1963 )
1964 credentials = {"hostname": rw_mgmt_ip}
1965 # get username
1966 username = deep_get(
1967 config_descriptor, ("config-access", "ssh-access", "default-user")
1968 )
1969 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1970 # merged. Meanwhile let's get username from initial-config-primitive
1971 if not username and initial_config_primitive_list:
1972 for config_primitive in initial_config_primitive_list:
1973 for param in config_primitive.get("parameter", ()):
1974 if param["name"] == "ssh-username":
1975 username = param["value"]
1976 break
1977 if not username:
1978 raise LcmException(
1979 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1980 "'config-access.ssh-access.default-user'"
1981 )
1982 credentials["username"] = username
1983 # n2vc_redesign STEP 3.2
1984
1985 self._write_configuration_status(
1986 nsr_id=nsr_id,
1987 vca_index=vca_index,
1988 status="REGISTERING",
1989 element_under_configuration=element_under_configuration,
1990 element_type=element_type,
1991 )
1992
1993 step = "register execution environment {}".format(credentials)
1994 self.logger.debug(logging_text + step)
1995 ee_id = await self.vca_map[vca_type].register_execution_environment(
1996 credentials=credentials,
1997 namespace=namespace,
1998 db_dict=db_dict,
1999 vca_id=vca_id,
2000 )
2001
2002 # for compatibility with MON/POL modules, the need model and application name at database
2003 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
2004 ee_id_parts = ee_id.split(".")
2005 db_nsr_update = {db_update_entry + "ee_id": ee_id}
2006 if len(ee_id_parts) >= 2:
2007 model_name = ee_id_parts[0]
2008 application_name = ee_id_parts[1]
2009 db_nsr_update[db_update_entry + "model"] = model_name
2010 db_nsr_update[db_update_entry + "application"] = application_name
2011
2012 # n2vc_redesign STEP 3.3
2013 step = "Install configuration Software"
2014
2015 self._write_configuration_status(
2016 nsr_id=nsr_id,
2017 vca_index=vca_index,
2018 status="INSTALLING SW",
2019 element_under_configuration=element_under_configuration,
2020 element_type=element_type,
2021 other_update=db_nsr_update,
2022 )
2023
2024 # TODO check if already done
2025 self.logger.debug(logging_text + step)
2026 config = None
2027 if vca_type == "native_charm":
2028 config_primitive = next(
2029 (p for p in initial_config_primitive_list if p["name"] == "config"),
2030 None,
2031 )
2032 if config_primitive:
2033 config = self._map_primitive_params(
2034 config_primitive, {}, deploy_params
2035 )
2036 num_units = 1
2037 if vca_type == "lxc_proxy_charm":
2038 if element_type == "NS":
2039 num_units = db_nsr.get("config-units") or 1
2040 elif element_type == "VNF":
2041 num_units = db_vnfr.get("config-units") or 1
2042 elif element_type == "VDU":
2043 for v in db_vnfr["vdur"]:
2044 if vdu_id == v["vdu-id-ref"]:
2045 num_units = v.get("config-units") or 1
2046 break
2047 if vca_type != "k8s_proxy_charm":
2048 await self.vca_map[vca_type].install_configuration_sw(
2049 ee_id=ee_id,
2050 artifact_path=artifact_path,
2051 db_dict=db_dict,
2052 config=config,
2053 num_units=num_units,
2054 vca_id=vca_id,
2055 vca_type=vca_type,
2056 )
2057
2058 # write in db flag of configuration_sw already installed
2059 self.update_db_2(
2060 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2061 )
2062
2063 # add relations for this VCA (wait for other peers related with this VCA)
2064 await self._add_vca_relations(
2065 logging_text=logging_text,
2066 nsr_id=nsr_id,
2067 vca_type=vca_type,
2068 vca_index=vca_index,
2069 )
2070
2071 # if SSH access is required, then get execution environment SSH public
2072 # if native charm we have waited already to VM be UP
2073 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2074 pub_key = None
2075 user = None
2076 # self.logger.debug("get ssh key block")
2077 if deep_get(
2078 config_descriptor, ("config-access", "ssh-access", "required")
2079 ):
2080 # self.logger.debug("ssh key needed")
2081 # Needed to inject a ssh key
2082 user = deep_get(
2083 config_descriptor,
2084 ("config-access", "ssh-access", "default-user"),
2085 )
2086 step = "Install configuration Software, getting public ssh key"
2087 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2088 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2089 )
2090
2091 step = "Insert public key into VM user={} ssh_key={}".format(
2092 user, pub_key
2093 )
2094 else:
2095 # self.logger.debug("no need to get ssh key")
2096 step = "Waiting to VM being up and getting IP address"
2097 self.logger.debug(logging_text + step)
2098
2099 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2100 rw_mgmt_ip = None
2101
2102 # n2vc_redesign STEP 5.1
2103 # wait for RO (ip-address) Insert pub_key into VM
2104 if vnfr_id:
2105 if kdu_name:
2106 rw_mgmt_ip, services = await self.wait_kdu_up(
2107 logging_text, nsr_id, vnfr_id, kdu_name
2108 )
2109 vnfd = self.db.get_one(
2110 "vnfds_revisions",
2111 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2112 )
2113 kdu = get_kdu(vnfd, kdu_name)
2114 kdu_services = [
2115 service["name"] for service in get_kdu_services(kdu)
2116 ]
2117 exposed_services = []
2118 for service in services:
2119 if any(s in service["name"] for s in kdu_services):
2120 exposed_services.append(service)
2121 await self.vca_map[vca_type].exec_primitive(
2122 ee_id=ee_id,
2123 primitive_name="config",
2124 params_dict={
2125 "osm-config": json.dumps(
2126 OsmConfigBuilder(
2127 k8s={"services": exposed_services}
2128 ).build()
2129 )
2130 },
2131 vca_id=vca_id,
2132 )
2133
2134 # This verification is needed in order to avoid trying to add a public key
2135 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2136 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2137 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2138 # or it is a KNF)
2139 elif db_vnfr.get("vdur"):
2140 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2141 logging_text,
2142 nsr_id,
2143 vnfr_id,
2144 vdu_id,
2145 vdu_index,
2146 user=user,
2147 pub_key=pub_key,
2148 )
2149
2150 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2151
2152 # store rw_mgmt_ip in deploy params for later replacement
2153 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2154
2155 # n2vc_redesign STEP 6 Execute initial config primitive
2156 step = "execute initial config primitive"
2157
2158 # wait for dependent primitives execution (NS -> VNF -> VDU)
2159 if initial_config_primitive_list:
2160 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2161
2162 # stage, in function of element type: vdu, kdu, vnf or ns
2163 my_vca = vca_deployed_list[vca_index]
2164 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2165 # VDU or KDU
2166 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2167 elif my_vca.get("member-vnf-index"):
2168 # VNF
2169 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2170 else:
2171 # NS
2172 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2173
2174 self._write_configuration_status(
2175 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2176 )
2177
2178 self._write_op_status(op_id=nslcmop_id, stage=stage)
2179
2180 check_if_terminated_needed = True
2181 for initial_config_primitive in initial_config_primitive_list:
2182 # adding information on the vca_deployed if it is a NS execution environment
2183 if not vca_deployed["member-vnf-index"]:
2184 deploy_params["ns_config_info"] = json.dumps(
2185 self._get_ns_config_info(nsr_id)
2186 )
2187 # TODO check if already done
2188 primitive_params_ = self._map_primitive_params(
2189 initial_config_primitive, {}, deploy_params
2190 )
2191
2192 step = "execute primitive '{}' params '{}'".format(
2193 initial_config_primitive["name"], primitive_params_
2194 )
2195 self.logger.debug(logging_text + step)
2196 await self.vca_map[vca_type].exec_primitive(
2197 ee_id=ee_id,
2198 primitive_name=initial_config_primitive["name"],
2199 params_dict=primitive_params_,
2200 db_dict=db_dict,
2201 vca_id=vca_id,
2202 vca_type=vca_type,
2203 )
2204 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2205 if check_if_terminated_needed:
2206 if config_descriptor.get("terminate-config-primitive"):
2207 self.update_db_2(
2208 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2209 )
2210 check_if_terminated_needed = False
2211
2212 # TODO register in database that primitive is done
2213
2214 # STEP 7 Configure metrics
2215 if vca_type == "helm" or vca_type == "helm-v3":
2216 # TODO: review for those cases where the helm chart is a reference and
2217 # is not part of the NF package
2218 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2219 ee_id=ee_id,
2220 artifact_path=artifact_path,
2221 ee_config_descriptor=ee_config_descriptor,
2222 vnfr_id=vnfr_id,
2223 nsr_id=nsr_id,
2224 target_ip=rw_mgmt_ip,
2225 )
2226 if prometheus_jobs:
2227 self.update_db_2(
2228 "nsrs",
2229 nsr_id,
2230 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2231 )
2232
2233 for job in prometheus_jobs:
2234 self.db.set_one(
2235 "prometheus_jobs",
2236 {"job_name": job["job_name"]},
2237 job,
2238 upsert=True,
2239 fail_on_empty=False,
2240 )
2241
2242 step = "instantiated at VCA"
2243 self.logger.debug(logging_text + step)
2244
2245 self._write_configuration_status(
2246 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2247 )
2248
2249 except Exception as e: # TODO not use Exception but N2VC exception
2250 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2251 if not isinstance(
2252 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2253 ):
2254 self.logger.error(
2255 "Exception while {} : {}".format(step, e), exc_info=True
2256 )
2257 self._write_configuration_status(
2258 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2259 )
2260 raise LcmException("{} {}".format(step, e)) from e
2261
2262 def _write_ns_status(
2263 self,
2264 nsr_id: str,
2265 ns_state: str,
2266 current_operation: str,
2267 current_operation_id: str,
2268 error_description: str = None,
2269 error_detail: str = None,
2270 other_update: dict = None,
2271 ):
2272 """
2273 Update db_nsr fields.
2274 :param nsr_id:
2275 :param ns_state:
2276 :param current_operation:
2277 :param current_operation_id:
2278 :param error_description:
2279 :param error_detail:
2280 :param other_update: Other required changes at database if provided, will be cleared
2281 :return:
2282 """
2283 try:
2284 db_dict = other_update or {}
2285 db_dict[
2286 "_admin.nslcmop"
2287 ] = current_operation_id # for backward compatibility
2288 db_dict["_admin.current-operation"] = current_operation_id
2289 db_dict["_admin.operation-type"] = (
2290 current_operation if current_operation != "IDLE" else None
2291 )
2292 db_dict["currentOperation"] = current_operation
2293 db_dict["currentOperationID"] = current_operation_id
2294 db_dict["errorDescription"] = error_description
2295 db_dict["errorDetail"] = error_detail
2296
2297 if ns_state:
2298 db_dict["nsState"] = ns_state
2299 self.update_db_2("nsrs", nsr_id, db_dict)
2300 except DbException as e:
2301 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2302
2303 def _write_op_status(
2304 self,
2305 op_id: str,
2306 stage: list = None,
2307 error_message: str = None,
2308 queuePosition: int = 0,
2309 operation_state: str = None,
2310 other_update: dict = None,
2311 ):
2312 try:
2313 db_dict = other_update or {}
2314 db_dict["queuePosition"] = queuePosition
2315 if isinstance(stage, list):
2316 db_dict["stage"] = stage[0]
2317 db_dict["detailed-status"] = " ".join(stage)
2318 elif stage is not None:
2319 db_dict["stage"] = str(stage)
2320
2321 if error_message is not None:
2322 db_dict["errorMessage"] = error_message
2323 if operation_state is not None:
2324 db_dict["operationState"] = operation_state
2325 db_dict["statusEnteredTime"] = time()
2326 self.update_db_2("nslcmops", op_id, db_dict)
2327 except DbException as e:
2328 self.logger.warn(
2329 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2330 )
2331
2332 def _write_all_config_status(self, db_nsr: dict, status: str):
2333 try:
2334 nsr_id = db_nsr["_id"]
2335 # configurationStatus
2336 config_status = db_nsr.get("configurationStatus")
2337 if config_status:
2338 db_nsr_update = {
2339 "configurationStatus.{}.status".format(index): status
2340 for index, v in enumerate(config_status)
2341 if v
2342 }
2343 # update status
2344 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2345
2346 except DbException as e:
2347 self.logger.warn(
2348 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2349 )
2350
2351 def _write_configuration_status(
2352 self,
2353 nsr_id: str,
2354 vca_index: int,
2355 status: str = None,
2356 element_under_configuration: str = None,
2357 element_type: str = None,
2358 other_update: dict = None,
2359 ):
2360
2361 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2362 # .format(vca_index, status))
2363
2364 try:
2365 db_path = "configurationStatus.{}.".format(vca_index)
2366 db_dict = other_update or {}
2367 if status:
2368 db_dict[db_path + "status"] = status
2369 if element_under_configuration:
2370 db_dict[
2371 db_path + "elementUnderConfiguration"
2372 ] = element_under_configuration
2373 if element_type:
2374 db_dict[db_path + "elementType"] = element_type
2375 self.update_db_2("nsrs", nsr_id, db_dict)
2376 except DbException as e:
2377 self.logger.warn(
2378 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2379 status, nsr_id, vca_index, e
2380 )
2381 )
2382
2383 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2384 """
2385 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2386 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2387 Database is used because the result can be obtained from a different LCM worker in case of HA.
2388 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2389 :param db_nslcmop: database content of nslcmop
2390 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2391 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2392 computed 'vim-account-id'
2393 """
2394 modified = False
2395 nslcmop_id = db_nslcmop["_id"]
2396 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2397 if placement_engine == "PLA":
2398 self.logger.debug(
2399 logging_text + "Invoke and wait for placement optimization"
2400 )
2401 await self.msg.aiowrite(
2402 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2403 )
2404 db_poll_interval = 5
2405 wait = db_poll_interval * 10
2406 pla_result = None
2407 while not pla_result and wait >= 0:
2408 await asyncio.sleep(db_poll_interval)
2409 wait -= db_poll_interval
2410 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2411 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2412
2413 if not pla_result:
2414 raise LcmException(
2415 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2416 )
2417
2418 for pla_vnf in pla_result["vnf"]:
2419 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2420 if not pla_vnf.get("vimAccountId") or not vnfr:
2421 continue
2422 modified = True
2423 self.db.set_one(
2424 "vnfrs",
2425 {"_id": vnfr["_id"]},
2426 {"vim-account-id": pla_vnf["vimAccountId"]},
2427 )
2428 # Modifies db_vnfrs
2429 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2430 return modified
2431
2432 def update_nsrs_with_pla_result(self, params):
2433 try:
2434 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2435 self.update_db_2(
2436 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2437 )
2438 except Exception as e:
2439 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2440
2441 async def instantiate(self, nsr_id, nslcmop_id):
2442 """
2443
2444 :param nsr_id: ns instance to deploy
2445 :param nslcmop_id: operation to run
2446 :return:
2447 """
2448
2449 # Try to lock HA task here
2450 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2451 if not task_is_locked_by_me:
2452 self.logger.debug(
2453 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2454 )
2455 return
2456
2457 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2458 self.logger.debug(logging_text + "Enter")
2459
2460 # get all needed from database
2461
2462 # database nsrs record
2463 db_nsr = None
2464
2465 # database nslcmops record
2466 db_nslcmop = None
2467
2468 # update operation on nsrs
2469 db_nsr_update = {}
2470 # update operation on nslcmops
2471 db_nslcmop_update = {}
2472
2473 nslcmop_operation_state = None
2474 db_vnfrs = {} # vnf's info indexed by member-index
2475 # n2vc_info = {}
2476 tasks_dict_info = {} # from task to info text
2477 exc = None
2478 error_list = []
2479 stage = [
2480 "Stage 1/5: preparation of the environment.",
2481 "Waiting for previous operations to terminate.",
2482 "",
2483 ]
2484 # ^ stage, step, VIM progress
2485 try:
2486 # wait for any previous tasks in process
2487 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2488
2489 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2490 stage[1] = "Reading from database."
2491 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2492 db_nsr_update["detailed-status"] = "creating"
2493 db_nsr_update["operational-status"] = "init"
2494 self._write_ns_status(
2495 nsr_id=nsr_id,
2496 ns_state="BUILDING",
2497 current_operation="INSTANTIATING",
2498 current_operation_id=nslcmop_id,
2499 other_update=db_nsr_update,
2500 )
2501 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2502
2503 # read from db: operation
2504 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2505 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2506 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2507 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2508 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2509 )
2510 ns_params = db_nslcmop.get("operationParams")
2511 if ns_params and ns_params.get("timeout_ns_deploy"):
2512 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2513 else:
2514 timeout_ns_deploy = self.timeout.get(
2515 "ns_deploy", self.timeout_ns_deploy
2516 )
2517
2518 # read from db: ns
2519 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2520 self.logger.debug(logging_text + stage[1])
2521 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2522 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2523 self.logger.debug(logging_text + stage[1])
2524 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2525 self.fs.sync(db_nsr["nsd-id"])
2526 db_nsr["nsd"] = nsd
2527 # nsr_name = db_nsr["name"] # TODO short-name??
2528
2529 # read from db: vnf's of this ns
2530 stage[1] = "Getting vnfrs from db."
2531 self.logger.debug(logging_text + stage[1])
2532 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2533
2534 # read from db: vnfd's for every vnf
2535 db_vnfds = [] # every vnfd data
2536
2537 # for each vnf in ns, read vnfd
2538 for vnfr in db_vnfrs_list:
2539 if vnfr.get("kdur"):
2540 kdur_list = []
2541 for kdur in vnfr["kdur"]:
2542 if kdur.get("additionalParams"):
2543 kdur["additionalParams"] = json.loads(
2544 kdur["additionalParams"]
2545 )
2546 kdur_list.append(kdur)
2547 vnfr["kdur"] = kdur_list
2548
2549 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2550 vnfd_id = vnfr["vnfd-id"]
2551 vnfd_ref = vnfr["vnfd-ref"]
2552 self.fs.sync(vnfd_id)
2553
2554 # if we haven't this vnfd, read it from db
2555 if vnfd_id not in db_vnfds:
2556 # read from db
2557 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2558 vnfd_id, vnfd_ref
2559 )
2560 self.logger.debug(logging_text + stage[1])
2561 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2562
2563 # store vnfd
2564 db_vnfds.append(vnfd)
2565
2566 # Get or generates the _admin.deployed.VCA list
2567 vca_deployed_list = None
2568 if db_nsr["_admin"].get("deployed"):
2569 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2570 if vca_deployed_list is None:
2571 vca_deployed_list = []
2572 configuration_status_list = []
2573 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2574 db_nsr_update["configurationStatus"] = configuration_status_list
2575 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2576 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2577 elif isinstance(vca_deployed_list, dict):
2578 # maintain backward compatibility. Change a dict to list at database
2579 vca_deployed_list = list(vca_deployed_list.values())
2580 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2581 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2582
2583 if not isinstance(
2584 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2585 ):
2586 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2587 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2588
2589 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2590 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2591 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2592 self.db.set_list(
2593 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2594 )
2595
2596 # n2vc_redesign STEP 2 Deploy Network Scenario
2597 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2598 self._write_op_status(op_id=nslcmop_id, stage=stage)
2599
2600 stage[1] = "Deploying KDUs."
2601 # self.logger.debug(logging_text + "Before deploy_kdus")
2602 # Call to deploy_kdus in case exists the "vdu:kdu" param
2603 await self.deploy_kdus(
2604 logging_text=logging_text,
2605 nsr_id=nsr_id,
2606 nslcmop_id=nslcmop_id,
2607 db_vnfrs=db_vnfrs,
2608 db_vnfds=db_vnfds,
2609 task_instantiation_info=tasks_dict_info,
2610 )
2611
2612 stage[1] = "Getting VCA public key."
2613 # n2vc_redesign STEP 1 Get VCA public ssh-key
2614 # feature 1429. Add n2vc public key to needed VMs
2615 n2vc_key = self.n2vc.get_public_key()
2616 n2vc_key_list = [n2vc_key]
2617 if self.vca_config.get("public_key"):
2618 n2vc_key_list.append(self.vca_config["public_key"])
2619
2620 stage[1] = "Deploying NS at VIM."
2621 task_ro = asyncio.ensure_future(
2622 self.instantiate_RO(
2623 logging_text=logging_text,
2624 nsr_id=nsr_id,
2625 nsd=nsd,
2626 db_nsr=db_nsr,
2627 db_nslcmop=db_nslcmop,
2628 db_vnfrs=db_vnfrs,
2629 db_vnfds=db_vnfds,
2630 n2vc_key_list=n2vc_key_list,
2631 stage=stage,
2632 )
2633 )
2634 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2635 tasks_dict_info[task_ro] = "Deploying at VIM"
2636
2637 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2638 stage[1] = "Deploying Execution Environments."
2639 self.logger.debug(logging_text + stage[1])
2640
2641 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2642 for vnf_profile in get_vnf_profiles(nsd):
2643 vnfd_id = vnf_profile["vnfd-id"]
2644 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2645 member_vnf_index = str(vnf_profile["id"])
2646 db_vnfr = db_vnfrs[member_vnf_index]
2647 base_folder = vnfd["_admin"]["storage"]
2648 vdu_id = None
2649 vdu_index = 0
2650 vdu_name = None
2651 kdu_name = None
2652
2653 # Get additional parameters
2654 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2655 if db_vnfr.get("additionalParamsForVnf"):
2656 deploy_params.update(
2657 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2658 )
2659
2660 descriptor_config = get_configuration(vnfd, vnfd["id"])
2661 if descriptor_config:
2662 self._deploy_n2vc(
2663 logging_text=logging_text
2664 + "member_vnf_index={} ".format(member_vnf_index),
2665 db_nsr=db_nsr,
2666 db_vnfr=db_vnfr,
2667 nslcmop_id=nslcmop_id,
2668 nsr_id=nsr_id,
2669 nsi_id=nsi_id,
2670 vnfd_id=vnfd_id,
2671 vdu_id=vdu_id,
2672 kdu_name=kdu_name,
2673 member_vnf_index=member_vnf_index,
2674 vdu_index=vdu_index,
2675 vdu_name=vdu_name,
2676 deploy_params=deploy_params,
2677 descriptor_config=descriptor_config,
2678 base_folder=base_folder,
2679 task_instantiation_info=tasks_dict_info,
2680 stage=stage,
2681 )
2682
2683 # Deploy charms for each VDU that supports one.
2684 for vdud in get_vdu_list(vnfd):
2685 vdu_id = vdud["id"]
2686 descriptor_config = get_configuration(vnfd, vdu_id)
2687 vdur = find_in_list(
2688 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2689 )
2690
2691 if vdur.get("additionalParams"):
2692 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2693 else:
2694 deploy_params_vdu = deploy_params
2695 deploy_params_vdu["OSM"] = get_osm_params(
2696 db_vnfr, vdu_id, vdu_count_index=0
2697 )
2698 vdud_count = get_number_of_instances(vnfd, vdu_id)
2699
2700 self.logger.debug("VDUD > {}".format(vdud))
2701 self.logger.debug(
2702 "Descriptor config > {}".format(descriptor_config)
2703 )
2704 if descriptor_config:
2705 vdu_name = None
2706 kdu_name = None
2707 for vdu_index in range(vdud_count):
2708 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2709 self._deploy_n2vc(
2710 logging_text=logging_text
2711 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2712 member_vnf_index, vdu_id, vdu_index
2713 ),
2714 db_nsr=db_nsr,
2715 db_vnfr=db_vnfr,
2716 nslcmop_id=nslcmop_id,
2717 nsr_id=nsr_id,
2718 nsi_id=nsi_id,
2719 vnfd_id=vnfd_id,
2720 vdu_id=vdu_id,
2721 kdu_name=kdu_name,
2722 member_vnf_index=member_vnf_index,
2723 vdu_index=vdu_index,
2724 vdu_name=vdu_name,
2725 deploy_params=deploy_params_vdu,
2726 descriptor_config=descriptor_config,
2727 base_folder=base_folder,
2728 task_instantiation_info=tasks_dict_info,
2729 stage=stage,
2730 )
2731 for kdud in get_kdu_list(vnfd):
2732 kdu_name = kdud["name"]
2733 descriptor_config = get_configuration(vnfd, kdu_name)
2734 if descriptor_config:
2735 vdu_id = None
2736 vdu_index = 0
2737 vdu_name = None
2738 kdur = next(
2739 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2740 )
2741 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2742 if kdur.get("additionalParams"):
2743 deploy_params_kdu.update(
2744 parse_yaml_strings(kdur["additionalParams"].copy())
2745 )
2746
2747 self._deploy_n2vc(
2748 logging_text=logging_text,
2749 db_nsr=db_nsr,
2750 db_vnfr=db_vnfr,
2751 nslcmop_id=nslcmop_id,
2752 nsr_id=nsr_id,
2753 nsi_id=nsi_id,
2754 vnfd_id=vnfd_id,
2755 vdu_id=vdu_id,
2756 kdu_name=kdu_name,
2757 member_vnf_index=member_vnf_index,
2758 vdu_index=vdu_index,
2759 vdu_name=vdu_name,
2760 deploy_params=deploy_params_kdu,
2761 descriptor_config=descriptor_config,
2762 base_folder=base_folder,
2763 task_instantiation_info=tasks_dict_info,
2764 stage=stage,
2765 )
2766
2767 # Check if this NS has a charm configuration
2768 descriptor_config = nsd.get("ns-configuration")
2769 if descriptor_config and descriptor_config.get("juju"):
2770 vnfd_id = None
2771 db_vnfr = None
2772 member_vnf_index = None
2773 vdu_id = None
2774 kdu_name = None
2775 vdu_index = 0
2776 vdu_name = None
2777
2778 # Get additional parameters
2779 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2780 if db_nsr.get("additionalParamsForNs"):
2781 deploy_params.update(
2782 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2783 )
2784 base_folder = nsd["_admin"]["storage"]
2785 self._deploy_n2vc(
2786 logging_text=logging_text,
2787 db_nsr=db_nsr,
2788 db_vnfr=db_vnfr,
2789 nslcmop_id=nslcmop_id,
2790 nsr_id=nsr_id,
2791 nsi_id=nsi_id,
2792 vnfd_id=vnfd_id,
2793 vdu_id=vdu_id,
2794 kdu_name=kdu_name,
2795 member_vnf_index=member_vnf_index,
2796 vdu_index=vdu_index,
2797 vdu_name=vdu_name,
2798 deploy_params=deploy_params,
2799 descriptor_config=descriptor_config,
2800 base_folder=base_folder,
2801 task_instantiation_info=tasks_dict_info,
2802 stage=stage,
2803 )
2804
2805 # rest of staff will be done at finally
2806
2807 except (
2808 ROclient.ROClientException,
2809 DbException,
2810 LcmException,
2811 N2VCException,
2812 ) as e:
2813 self.logger.error(
2814 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2815 )
2816 exc = e
2817 except asyncio.CancelledError:
2818 self.logger.error(
2819 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2820 )
2821 exc = "Operation was cancelled"
2822 except Exception as e:
2823 exc = traceback.format_exc()
2824 self.logger.critical(
2825 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2826 exc_info=True,
2827 )
2828 finally:
2829 if exc:
2830 error_list.append(str(exc))
2831 try:
2832 # wait for pending tasks
2833 if tasks_dict_info:
2834 stage[1] = "Waiting for instantiate pending tasks."
2835 self.logger.debug(logging_text + stage[1])
2836 error_list += await self._wait_for_tasks(
2837 logging_text,
2838 tasks_dict_info,
2839 timeout_ns_deploy,
2840 stage,
2841 nslcmop_id,
2842 nsr_id=nsr_id,
2843 )
2844 stage[1] = stage[2] = ""
2845 except asyncio.CancelledError:
2846 error_list.append("Cancelled")
2847 # TODO cancel all tasks
2848 except Exception as exc:
2849 error_list.append(str(exc))
2850
2851 # update operation-status
2852 db_nsr_update["operational-status"] = "running"
2853 # let's begin with VCA 'configured' status (later we can change it)
2854 db_nsr_update["config-status"] = "configured"
2855 for task, task_name in tasks_dict_info.items():
2856 if not task.done() or task.cancelled() or task.exception():
2857 if task_name.startswith(self.task_name_deploy_vca):
2858 # A N2VC task is pending
2859 db_nsr_update["config-status"] = "failed"
2860 else:
2861 # RO or KDU task is pending
2862 db_nsr_update["operational-status"] = "failed"
2863
2864 # update status at database
2865 if error_list:
2866 error_detail = ". ".join(error_list)
2867 self.logger.error(logging_text + error_detail)
2868 error_description_nslcmop = "{} Detail: {}".format(
2869 stage[0], error_detail
2870 )
2871 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2872 nslcmop_id, stage[0]
2873 )
2874
2875 db_nsr_update["detailed-status"] = (
2876 error_description_nsr + " Detail: " + error_detail
2877 )
2878 db_nslcmop_update["detailed-status"] = error_detail
2879 nslcmop_operation_state = "FAILED"
2880 ns_state = "BROKEN"
2881 else:
2882 error_detail = None
2883 error_description_nsr = error_description_nslcmop = None
2884 ns_state = "READY"
2885 db_nsr_update["detailed-status"] = "Done"
2886 db_nslcmop_update["detailed-status"] = "Done"
2887 nslcmop_operation_state = "COMPLETED"
2888
2889 if db_nsr:
2890 self._write_ns_status(
2891 nsr_id=nsr_id,
2892 ns_state=ns_state,
2893 current_operation="IDLE",
2894 current_operation_id=None,
2895 error_description=error_description_nsr,
2896 error_detail=error_detail,
2897 other_update=db_nsr_update,
2898 )
2899 self._write_op_status(
2900 op_id=nslcmop_id,
2901 stage="",
2902 error_message=error_description_nslcmop,
2903 operation_state=nslcmop_operation_state,
2904 other_update=db_nslcmop_update,
2905 )
2906
2907 if nslcmop_operation_state:
2908 try:
2909 await self.msg.aiowrite(
2910 "ns",
2911 "instantiated",
2912 {
2913 "nsr_id": nsr_id,
2914 "nslcmop_id": nslcmop_id,
2915 "operationState": nslcmop_operation_state,
2916 },
2917 loop=self.loop,
2918 )
2919 except Exception as e:
2920 self.logger.error(
2921 logging_text + "kafka_write notification Exception {}".format(e)
2922 )
2923
2924 self.logger.debug(logging_text + "Exit")
2925 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2926
2927 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2928 if vnfd_id not in cached_vnfds:
2929 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2930 return cached_vnfds[vnfd_id]
2931
2932 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2933 if vnf_profile_id not in cached_vnfrs:
2934 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2935 "vnfrs",
2936 {
2937 "member-vnf-index-ref": vnf_profile_id,
2938 "nsr-id-ref": nsr_id,
2939 },
2940 )
2941 return cached_vnfrs[vnf_profile_id]
2942
2943 def _is_deployed_vca_in_relation(
2944 self, vca: DeployedVCA, relation: Relation
2945 ) -> bool:
2946 found = False
2947 for endpoint in (relation.provider, relation.requirer):
2948 if endpoint["kdu-resource-profile-id"]:
2949 continue
2950 found = (
2951 vca.vnf_profile_id == endpoint.vnf_profile_id
2952 and vca.vdu_profile_id == endpoint.vdu_profile_id
2953 and vca.execution_environment_ref == endpoint.execution_environment_ref
2954 )
2955 if found:
2956 break
2957 return found
2958
2959 def _update_ee_relation_data_with_implicit_data(
2960 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2961 ):
2962 ee_relation_data = safe_get_ee_relation(
2963 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2964 )
2965 ee_relation_level = EELevel.get_level(ee_relation_data)
2966 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2967 "execution-environment-ref"
2968 ]:
2969 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2970 vnfd_id = vnf_profile["vnfd-id"]
2971 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2972 entity_id = (
2973 vnfd_id
2974 if ee_relation_level == EELevel.VNF
2975 else ee_relation_data["vdu-profile-id"]
2976 )
2977 ee = get_juju_ee_ref(db_vnfd, entity_id)
2978 if not ee:
2979 raise Exception(
2980 f"not execution environments found for ee_relation {ee_relation_data}"
2981 )
2982 ee_relation_data["execution-environment-ref"] = ee["id"]
2983 return ee_relation_data
2984
2985 def _get_ns_relations(
2986 self,
2987 nsr_id: str,
2988 nsd: Dict[str, Any],
2989 vca: DeployedVCA,
2990 cached_vnfds: Dict[str, Any],
2991 ) -> List[Relation]:
2992 relations = []
2993 db_ns_relations = get_ns_configuration_relation_list(nsd)
2994 for r in db_ns_relations:
2995 provider_dict = None
2996 requirer_dict = None
2997 if all(key in r for key in ("provider", "requirer")):
2998 provider_dict = r["provider"]
2999 requirer_dict = r["requirer"]
3000 elif "entities" in r:
3001 provider_id = r["entities"][0]["id"]
3002 provider_dict = {
3003 "nsr-id": nsr_id,
3004 "endpoint": r["entities"][0]["endpoint"],
3005 }
3006 if provider_id != nsd["id"]:
3007 provider_dict["vnf-profile-id"] = provider_id
3008 requirer_id = r["entities"][1]["id"]
3009 requirer_dict = {
3010 "nsr-id": nsr_id,
3011 "endpoint": r["entities"][1]["endpoint"],
3012 }
3013 if requirer_id != nsd["id"]:
3014 requirer_dict["vnf-profile-id"] = requirer_id
3015 else:
3016 raise Exception(
3017 "provider/requirer or entities must be included in the relation."
3018 )
3019 relation_provider = self._update_ee_relation_data_with_implicit_data(
3020 nsr_id, nsd, provider_dict, cached_vnfds
3021 )
3022 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3023 nsr_id, nsd, requirer_dict, cached_vnfds
3024 )
3025 provider = EERelation(relation_provider)
3026 requirer = EERelation(relation_requirer)
3027 relation = Relation(r["name"], provider, requirer)
3028 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3029 if vca_in_relation:
3030 relations.append(relation)
3031 return relations
3032
3033 def _get_vnf_relations(
3034 self,
3035 nsr_id: str,
3036 nsd: Dict[str, Any],
3037 vca: DeployedVCA,
3038 cached_vnfds: Dict[str, Any],
3039 ) -> List[Relation]:
3040 relations = []
3041 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3042 vnf_profile_id = vnf_profile["id"]
3043 vnfd_id = vnf_profile["vnfd-id"]
3044 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3045 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3046 for r in db_vnf_relations:
3047 provider_dict = None
3048 requirer_dict = None
3049 if all(key in r for key in ("provider", "requirer")):
3050 provider_dict = r["provider"]
3051 requirer_dict = r["requirer"]
3052 elif "entities" in r:
3053 provider_id = r["entities"][0]["id"]
3054 provider_dict = {
3055 "nsr-id": nsr_id,
3056 "vnf-profile-id": vnf_profile_id,
3057 "endpoint": r["entities"][0]["endpoint"],
3058 }
3059 if provider_id != vnfd_id:
3060 provider_dict["vdu-profile-id"] = provider_id
3061 requirer_id = r["entities"][1]["id"]
3062 requirer_dict = {
3063 "nsr-id": nsr_id,
3064 "vnf-profile-id": vnf_profile_id,
3065 "endpoint": r["entities"][1]["endpoint"],
3066 }
3067 if requirer_id != vnfd_id:
3068 requirer_dict["vdu-profile-id"] = requirer_id
3069 else:
3070 raise Exception(
3071 "provider/requirer or entities must be included in the relation."
3072 )
3073 relation_provider = self._update_ee_relation_data_with_implicit_data(
3074 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3075 )
3076 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3077 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3078 )
3079 provider = EERelation(relation_provider)
3080 requirer = EERelation(relation_requirer)
3081 relation = Relation(r["name"], provider, requirer)
3082 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3083 if vca_in_relation:
3084 relations.append(relation)
3085 return relations
3086
3087 def _get_kdu_resource_data(
3088 self,
3089 ee_relation: EERelation,
3090 db_nsr: Dict[str, Any],
3091 cached_vnfds: Dict[str, Any],
3092 ) -> DeployedK8sResource:
3093 nsd = get_nsd(db_nsr)
3094 vnf_profiles = get_vnf_profiles(nsd)
3095 vnfd_id = find_in_list(
3096 vnf_profiles,
3097 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3098 )["vnfd-id"]
3099 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3100 kdu_resource_profile = get_kdu_resource_profile(
3101 db_vnfd, ee_relation.kdu_resource_profile_id
3102 )
3103 kdu_name = kdu_resource_profile["kdu-name"]
3104 deployed_kdu, _ = get_deployed_kdu(
3105 db_nsr.get("_admin", ()).get("deployed", ()),
3106 kdu_name,
3107 ee_relation.vnf_profile_id,
3108 )
3109 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3110 return deployed_kdu
3111
3112 def _get_deployed_component(
3113 self,
3114 ee_relation: EERelation,
3115 db_nsr: Dict[str, Any],
3116 cached_vnfds: Dict[str, Any],
3117 ) -> DeployedComponent:
3118 nsr_id = db_nsr["_id"]
3119 deployed_component = None
3120 ee_level = EELevel.get_level(ee_relation)
3121 if ee_level == EELevel.NS:
3122 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3123 if vca:
3124 deployed_component = DeployedVCA(nsr_id, vca)
3125 elif ee_level == EELevel.VNF:
3126 vca = get_deployed_vca(
3127 db_nsr,
3128 {
3129 "vdu_id": None,
3130 "member-vnf-index": ee_relation.vnf_profile_id,
3131 "ee_descriptor_id": ee_relation.execution_environment_ref,
3132 },
3133 )
3134 if vca:
3135 deployed_component = DeployedVCA(nsr_id, vca)
3136 elif ee_level == EELevel.VDU:
3137 vca = get_deployed_vca(
3138 db_nsr,
3139 {
3140 "vdu_id": ee_relation.vdu_profile_id,
3141 "member-vnf-index": ee_relation.vnf_profile_id,
3142 "ee_descriptor_id": ee_relation.execution_environment_ref,
3143 },
3144 )
3145 if vca:
3146 deployed_component = DeployedVCA(nsr_id, vca)
3147 elif ee_level == EELevel.KDU:
3148 kdu_resource_data = self._get_kdu_resource_data(
3149 ee_relation, db_nsr, cached_vnfds
3150 )
3151 if kdu_resource_data:
3152 deployed_component = DeployedK8sResource(kdu_resource_data)
3153 return deployed_component
3154
3155 async def _add_relation(
3156 self,
3157 relation: Relation,
3158 vca_type: str,
3159 db_nsr: Dict[str, Any],
3160 cached_vnfds: Dict[str, Any],
3161 cached_vnfrs: Dict[str, Any],
3162 ) -> bool:
3163 deployed_provider = self._get_deployed_component(
3164 relation.provider, db_nsr, cached_vnfds
3165 )
3166 deployed_requirer = self._get_deployed_component(
3167 relation.requirer, db_nsr, cached_vnfds
3168 )
3169 if (
3170 deployed_provider
3171 and deployed_requirer
3172 and deployed_provider.config_sw_installed
3173 and deployed_requirer.config_sw_installed
3174 ):
3175 provider_db_vnfr = (
3176 self._get_vnfr(
3177 relation.provider.nsr_id,
3178 relation.provider.vnf_profile_id,
3179 cached_vnfrs,
3180 )
3181 if relation.provider.vnf_profile_id
3182 else None
3183 )
3184 requirer_db_vnfr = (
3185 self._get_vnfr(
3186 relation.requirer.nsr_id,
3187 relation.requirer.vnf_profile_id,
3188 cached_vnfrs,
3189 )
3190 if relation.requirer.vnf_profile_id
3191 else None
3192 )
3193 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3194 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3195 provider_relation_endpoint = RelationEndpoint(
3196 deployed_provider.ee_id,
3197 provider_vca_id,
3198 relation.provider.endpoint,
3199 )
3200 requirer_relation_endpoint = RelationEndpoint(
3201 deployed_requirer.ee_id,
3202 requirer_vca_id,
3203 relation.requirer.endpoint,
3204 )
3205 await self.vca_map[vca_type].add_relation(
3206 provider=provider_relation_endpoint,
3207 requirer=requirer_relation_endpoint,
3208 )
3209 # remove entry from relations list
3210 return True
3211 return False
3212
3213 async def _add_vca_relations(
3214 self,
3215 logging_text,
3216 nsr_id,
3217 vca_type: str,
3218 vca_index: int,
3219 timeout: int = 3600,
3220 ) -> bool:
3221
3222 # steps:
3223 # 1. find all relations for this VCA
3224 # 2. wait for other peers related
3225 # 3. add relations
3226
3227 try:
3228 # STEP 1: find all relations for this VCA
3229
3230 # read nsr record
3231 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3232 nsd = get_nsd(db_nsr)
3233
3234 # this VCA data
3235 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3236 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3237
3238 cached_vnfds = {}
3239 cached_vnfrs = {}
3240 relations = []
3241 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3242 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3243
3244 # if no relations, terminate
3245 if not relations:
3246 self.logger.debug(logging_text + " No relations")
3247 return True
3248
3249 self.logger.debug(logging_text + " adding relations {}".format(relations))
3250
3251 # add all relations
3252 start = time()
3253 while True:
3254 # check timeout
3255 now = time()
3256 if now - start >= timeout:
3257 self.logger.error(logging_text + " : timeout adding relations")
3258 return False
3259
3260 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3261 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3262
3263 # for each relation, find the VCA's related
3264 for relation in relations.copy():
3265 added = await self._add_relation(
3266 relation,
3267 vca_type,
3268 db_nsr,
3269 cached_vnfds,
3270 cached_vnfrs,
3271 )
3272 if added:
3273 relations.remove(relation)
3274
3275 if not relations:
3276 self.logger.debug("Relations added")
3277 break
3278 await asyncio.sleep(5.0)
3279
3280 return True
3281
3282 except Exception as e:
3283 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3284 return False
3285
3286 async def _install_kdu(
3287 self,
3288 nsr_id: str,
3289 nsr_db_path: str,
3290 vnfr_data: dict,
3291 kdu_index: int,
3292 kdud: dict,
3293 vnfd: dict,
3294 k8s_instance_info: dict,
3295 k8params: dict = None,
3296 timeout: int = 600,
3297 vca_id: str = None,
3298 ):
3299
3300 try:
3301 k8sclustertype = k8s_instance_info["k8scluster-type"]
3302 # Instantiate kdu
3303 db_dict_install = {
3304 "collection": "nsrs",
3305 "filter": {"_id": nsr_id},
3306 "path": nsr_db_path,
3307 }
3308
3309 if k8s_instance_info.get("kdu-deployment-name"):
3310 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3311 else:
3312 kdu_instance = self.k8scluster_map[
3313 k8sclustertype
3314 ].generate_kdu_instance_name(
3315 db_dict=db_dict_install,
3316 kdu_model=k8s_instance_info["kdu-model"],
3317 kdu_name=k8s_instance_info["kdu-name"],
3318 )
3319
3320 # Update the nsrs table with the kdu-instance value
3321 self.update_db_2(
3322 item="nsrs",
3323 _id=nsr_id,
3324 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3325 )
3326
3327 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3328 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3329 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3330 # namespace, this first verification could be removed, and the next step would be done for any kind
3331 # of KNF.
3332 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3333 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3334 if k8sclustertype in ("juju", "juju-bundle"):
3335 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3336 # that the user passed a namespace which he wants its KDU to be deployed in)
3337 if (
3338 self.db.count(
3339 table="nsrs",
3340 q_filter={
3341 "_id": nsr_id,
3342 "_admin.projects_write": k8s_instance_info["namespace"],
3343 "_admin.projects_read": k8s_instance_info["namespace"],
3344 },
3345 )
3346 > 0
3347 ):
3348 self.logger.debug(
3349 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3350 )
3351 self.update_db_2(
3352 item="nsrs",
3353 _id=nsr_id,
3354 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3355 )
3356 k8s_instance_info["namespace"] = kdu_instance
3357
3358 await self.k8scluster_map[k8sclustertype].install(
3359 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3360 kdu_model=k8s_instance_info["kdu-model"],
3361 atomic=True,
3362 params=k8params,
3363 db_dict=db_dict_install,
3364 timeout=timeout,
3365 kdu_name=k8s_instance_info["kdu-name"],
3366 namespace=k8s_instance_info["namespace"],
3367 kdu_instance=kdu_instance,
3368 vca_id=vca_id,
3369 )
3370
3371 # Obtain services to obtain management service ip
3372 services = await self.k8scluster_map[k8sclustertype].get_services(
3373 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3374 kdu_instance=kdu_instance,
3375 namespace=k8s_instance_info["namespace"],
3376 )
3377
3378 # Obtain management service info (if exists)
3379 vnfr_update_dict = {}
3380 kdu_config = get_configuration(vnfd, kdud["name"])
3381 if kdu_config:
3382 target_ee_list = kdu_config.get("execution-environment-list", [])
3383 else:
3384 target_ee_list = []
3385
3386 if services:
3387 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3388 mgmt_services = [
3389 service
3390 for service in kdud.get("service", [])
3391 if service.get("mgmt-service")
3392 ]
3393 for mgmt_service in mgmt_services:
3394 for service in services:
3395 if service["name"].startswith(mgmt_service["name"]):
3396 # Mgmt service found, Obtain service ip
3397 ip = service.get("external_ip", service.get("cluster_ip"))
3398 if isinstance(ip, list) and len(ip) == 1:
3399 ip = ip[0]
3400
3401 vnfr_update_dict[
3402 "kdur.{}.ip-address".format(kdu_index)
3403 ] = ip
3404
3405 # Check if must update also mgmt ip at the vnf
3406 service_external_cp = mgmt_service.get(
3407 "external-connection-point-ref"
3408 )
3409 if service_external_cp:
3410 if (
3411 deep_get(vnfd, ("mgmt-interface", "cp"))
3412 == service_external_cp
3413 ):
3414 vnfr_update_dict["ip-address"] = ip
3415
3416 if find_in_list(
3417 target_ee_list,
3418 lambda ee: ee.get(
3419 "external-connection-point-ref", ""
3420 )
3421 == service_external_cp,
3422 ):
3423 vnfr_update_dict[
3424 "kdur.{}.ip-address".format(kdu_index)
3425 ] = ip
3426 break
3427 else:
3428 self.logger.warn(
3429 "Mgmt service name: {} not found".format(
3430 mgmt_service["name"]
3431 )
3432 )
3433
3434 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3435 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3436
3437 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3438 if (
3439 kdu_config
3440 and kdu_config.get("initial-config-primitive")
3441 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3442 ):
3443 initial_config_primitive_list = kdu_config.get(
3444 "initial-config-primitive"
3445 )
3446 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3447
3448 for initial_config_primitive in initial_config_primitive_list:
3449 primitive_params_ = self._map_primitive_params(
3450 initial_config_primitive, {}, {}
3451 )
3452
3453 await asyncio.wait_for(
3454 self.k8scluster_map[k8sclustertype].exec_primitive(
3455 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3456 kdu_instance=kdu_instance,
3457 primitive_name=initial_config_primitive["name"],
3458 params=primitive_params_,
3459 db_dict=db_dict_install,
3460 vca_id=vca_id,
3461 ),
3462 timeout=timeout,
3463 )
3464
3465 except Exception as e:
3466 # Prepare update db with error and raise exception
3467 try:
3468 self.update_db_2(
3469 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3470 )
3471 self.update_db_2(
3472 "vnfrs",
3473 vnfr_data.get("_id"),
3474 {"kdur.{}.status".format(kdu_index): "ERROR"},
3475 )
3476 except Exception:
3477 # ignore to keep original exception
3478 pass
3479 # reraise original error
3480 raise
3481
3482 return kdu_instance
3483
3484 async def deploy_kdus(
3485 self,
3486 logging_text,
3487 nsr_id,
3488 nslcmop_id,
3489 db_vnfrs,
3490 db_vnfds,
3491 task_instantiation_info,
3492 ):
3493 # Launch kdus if present in the descriptor
3494
3495 k8scluster_id_2_uuic = {
3496 "helm-chart-v3": {},
3497 "helm-chart": {},
3498 "juju-bundle": {},
3499 }
3500
3501 async def _get_cluster_id(cluster_id, cluster_type):
3502 nonlocal k8scluster_id_2_uuic
3503 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3504 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3505
3506 # check if K8scluster is creating and wait look if previous tasks in process
3507 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3508 "k8scluster", cluster_id
3509 )
3510 if task_dependency:
3511 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3512 task_name, cluster_id
3513 )
3514 self.logger.debug(logging_text + text)
3515 await asyncio.wait(task_dependency, timeout=3600)
3516
3517 db_k8scluster = self.db.get_one(
3518 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3519 )
3520 if not db_k8scluster:
3521 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3522
3523 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3524 if not k8s_id:
3525 if cluster_type == "helm-chart-v3":
3526 try:
3527 # backward compatibility for existing clusters that have not been initialized for helm v3
3528 k8s_credentials = yaml.safe_dump(
3529 db_k8scluster.get("credentials")
3530 )
3531 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3532 k8s_credentials, reuse_cluster_uuid=cluster_id
3533 )
3534 db_k8scluster_update = {}
3535 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3536 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3537 db_k8scluster_update[
3538 "_admin.helm-chart-v3.created"
3539 ] = uninstall_sw
3540 db_k8scluster_update[
3541 "_admin.helm-chart-v3.operationalState"
3542 ] = "ENABLED"
3543 self.update_db_2(
3544 "k8sclusters", cluster_id, db_k8scluster_update
3545 )
3546 except Exception as e:
3547 self.logger.error(
3548 logging_text
3549 + "error initializing helm-v3 cluster: {}".format(str(e))
3550 )
3551 raise LcmException(
3552 "K8s cluster '{}' has not been initialized for '{}'".format(
3553 cluster_id, cluster_type
3554 )
3555 )
3556 else:
3557 raise LcmException(
3558 "K8s cluster '{}' has not been initialized for '{}'".format(
3559 cluster_id, cluster_type
3560 )
3561 )
3562 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3563 return k8s_id
3564
3565 logging_text += "Deploy kdus: "
3566 step = ""
3567 try:
3568 db_nsr_update = {"_admin.deployed.K8s": []}
3569 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3570
3571 index = 0
3572 updated_cluster_list = []
3573 updated_v3_cluster_list = []
3574
3575 for vnfr_data in db_vnfrs.values():
3576 vca_id = self.get_vca_id(vnfr_data, {})
3577 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3578 # Step 0: Prepare and set parameters
3579 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3580 vnfd_id = vnfr_data.get("vnfd-id")
3581 vnfd_with_id = find_in_list(
3582 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3583 )
3584 kdud = next(
3585 kdud
3586 for kdud in vnfd_with_id["kdu"]
3587 if kdud["name"] == kdur["kdu-name"]
3588 )
3589 namespace = kdur.get("k8s-namespace")
3590 kdu_deployment_name = kdur.get("kdu-deployment-name")
3591 if kdur.get("helm-chart"):
3592 kdumodel = kdur["helm-chart"]
3593 # Default version: helm3, if helm-version is v2 assign v2
3594 k8sclustertype = "helm-chart-v3"
3595 self.logger.debug("kdur: {}".format(kdur))
3596 if (
3597 kdur.get("helm-version")
3598 and kdur.get("helm-version") == "v2"
3599 ):
3600 k8sclustertype = "helm-chart"
3601 elif kdur.get("juju-bundle"):
3602 kdumodel = kdur["juju-bundle"]
3603 k8sclustertype = "juju-bundle"
3604 else:
3605 raise LcmException(
3606 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3607 "juju-bundle. Maybe an old NBI version is running".format(
3608 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3609 )
3610 )
3611 # check if kdumodel is a file and exists
3612 try:
3613 vnfd_with_id = find_in_list(
3614 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3615 )
3616 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3617 if storage: # may be not present if vnfd has not artifacts
3618 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3619 if storage["pkg-dir"]:
3620 filename = "{}/{}/{}s/{}".format(
3621 storage["folder"],
3622 storage["pkg-dir"],
3623 k8sclustertype,
3624 kdumodel,
3625 )
3626 else:
3627 filename = "{}/Scripts/{}s/{}".format(
3628 storage["folder"],
3629 k8sclustertype,
3630 kdumodel,
3631 )
3632 if self.fs.file_exists(
3633 filename, mode="file"
3634 ) or self.fs.file_exists(filename, mode="dir"):
3635 kdumodel = self.fs.path + filename
3636 except (asyncio.TimeoutError, asyncio.CancelledError):
3637 raise
3638 except Exception: # it is not a file
3639 pass
3640
3641 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3642 step = "Synchronize repos for k8s cluster '{}'".format(
3643 k8s_cluster_id
3644 )
3645 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3646
3647 # Synchronize repos
3648 if (
3649 k8sclustertype == "helm-chart"
3650 and cluster_uuid not in updated_cluster_list
3651 ) or (
3652 k8sclustertype == "helm-chart-v3"
3653 and cluster_uuid not in updated_v3_cluster_list
3654 ):
3655 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3656 self.k8scluster_map[k8sclustertype].synchronize_repos(
3657 cluster_uuid=cluster_uuid
3658 )
3659 )
3660 if del_repo_list or added_repo_dict:
3661 if k8sclustertype == "helm-chart":
3662 unset = {
3663 "_admin.helm_charts_added." + item: None
3664 for item in del_repo_list
3665 }
3666 updated = {
3667 "_admin.helm_charts_added." + item: name
3668 for item, name in added_repo_dict.items()
3669 }
3670 updated_cluster_list.append(cluster_uuid)
3671 elif k8sclustertype == "helm-chart-v3":
3672 unset = {
3673 "_admin.helm_charts_v3_added." + item: None
3674 for item in del_repo_list
3675 }
3676 updated = {
3677 "_admin.helm_charts_v3_added." + item: name
3678 for item, name in added_repo_dict.items()
3679 }
3680 updated_v3_cluster_list.append(cluster_uuid)
3681 self.logger.debug(
3682 logging_text + "repos synchronized on k8s cluster "
3683 "'{}' to_delete: {}, to_add: {}".format(
3684 k8s_cluster_id, del_repo_list, added_repo_dict
3685 )
3686 )
3687 self.db.set_one(
3688 "k8sclusters",
3689 {"_id": k8s_cluster_id},
3690 updated,
3691 unset=unset,
3692 )
3693
3694 # Instantiate kdu
3695 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3696 vnfr_data["member-vnf-index-ref"],
3697 kdur["kdu-name"],
3698 k8s_cluster_id,
3699 )
3700 k8s_instance_info = {
3701 "kdu-instance": None,
3702 "k8scluster-uuid": cluster_uuid,
3703 "k8scluster-type": k8sclustertype,
3704 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3705 "kdu-name": kdur["kdu-name"],
3706 "kdu-model": kdumodel,
3707 "namespace": namespace,
3708 "kdu-deployment-name": kdu_deployment_name,
3709 }
3710 db_path = "_admin.deployed.K8s.{}".format(index)
3711 db_nsr_update[db_path] = k8s_instance_info
3712 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3713 vnfd_with_id = find_in_list(
3714 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3715 )
3716 task = asyncio.ensure_future(
3717 self._install_kdu(
3718 nsr_id,
3719 db_path,
3720 vnfr_data,
3721 kdu_index,
3722 kdud,
3723 vnfd_with_id,
3724 k8s_instance_info,
3725 k8params=desc_params,
3726 timeout=1800,
3727 vca_id=vca_id,
3728 )
3729 )
3730 self.lcm_tasks.register(
3731 "ns",
3732 nsr_id,
3733 nslcmop_id,
3734 "instantiate_KDU-{}".format(index),
3735 task,
3736 )
3737 task_instantiation_info[task] = "Deploying KDU {}".format(
3738 kdur["kdu-name"]
3739 )
3740
3741 index += 1
3742
3743 except (LcmException, asyncio.CancelledError):
3744 raise
3745 except Exception as e:
3746 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3747 if isinstance(e, (N2VCException, DbException)):
3748 self.logger.error(logging_text + msg)
3749 else:
3750 self.logger.critical(logging_text + msg, exc_info=True)
3751 raise LcmException(msg)
3752 finally:
3753 if db_nsr_update:
3754 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3755
3756 def _deploy_n2vc(
3757 self,
3758 logging_text,
3759 db_nsr,
3760 db_vnfr,
3761 nslcmop_id,
3762 nsr_id,
3763 nsi_id,
3764 vnfd_id,
3765 vdu_id,
3766 kdu_name,
3767 member_vnf_index,
3768 vdu_index,
3769 vdu_name,
3770 deploy_params,
3771 descriptor_config,
3772 base_folder,
3773 task_instantiation_info,
3774 stage,
3775 ):
3776 # launch instantiate_N2VC in a asyncio task and register task object
3777 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3778 # if not found, create one entry and update database
3779 # fill db_nsr._admin.deployed.VCA.<index>
3780
3781 self.logger.debug(
3782 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3783 )
3784
3785 charm_name = ""
3786 get_charm_name = False
3787 if "execution-environment-list" in descriptor_config:
3788 ee_list = descriptor_config.get("execution-environment-list", [])
3789 elif "juju" in descriptor_config:
3790 ee_list = [descriptor_config] # ns charms
3791 if "execution-environment-list" not in descriptor_config:
3792 # charm name is only required for ns charms
3793 get_charm_name = True
3794 else: # other types as script are not supported
3795 ee_list = []
3796
3797 for ee_item in ee_list:
3798 self.logger.debug(
3799 logging_text
3800 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3801 ee_item.get("juju"), ee_item.get("helm-chart")
3802 )
3803 )
3804 ee_descriptor_id = ee_item.get("id")
3805 if ee_item.get("juju"):
3806 vca_name = ee_item["juju"].get("charm")
3807 if get_charm_name:
3808 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3809 vca_type = (
3810 "lxc_proxy_charm"
3811 if ee_item["juju"].get("charm") is not None
3812 else "native_charm"
3813 )
3814 if ee_item["juju"].get("cloud") == "k8s":
3815 vca_type = "k8s_proxy_charm"
3816 elif ee_item["juju"].get("proxy") is False:
3817 vca_type = "native_charm"
3818 elif ee_item.get("helm-chart"):
3819 vca_name = ee_item["helm-chart"]
3820 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3821 vca_type = "helm"
3822 else:
3823 vca_type = "helm-v3"
3824 else:
3825 self.logger.debug(
3826 logging_text + "skipping non juju neither charm configuration"
3827 )
3828 continue
3829
3830 vca_index = -1
3831 for vca_index, vca_deployed in enumerate(
3832 db_nsr["_admin"]["deployed"]["VCA"]
3833 ):
3834 if not vca_deployed:
3835 continue
3836 if (
3837 vca_deployed.get("member-vnf-index") == member_vnf_index
3838 and vca_deployed.get("vdu_id") == vdu_id
3839 and vca_deployed.get("kdu_name") == kdu_name
3840 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3841 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3842 ):
3843 break
3844 else:
3845 # not found, create one.
3846 target = (
3847 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3848 )
3849 if vdu_id:
3850 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3851 elif kdu_name:
3852 target += "/kdu/{}".format(kdu_name)
3853 vca_deployed = {
3854 "target_element": target,
3855 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3856 "member-vnf-index": member_vnf_index,
3857 "vdu_id": vdu_id,
3858 "kdu_name": kdu_name,
3859 "vdu_count_index": vdu_index,
3860 "operational-status": "init", # TODO revise
3861 "detailed-status": "", # TODO revise
3862 "step": "initial-deploy", # TODO revise
3863 "vnfd_id": vnfd_id,
3864 "vdu_name": vdu_name,
3865 "type": vca_type,
3866 "ee_descriptor_id": ee_descriptor_id,
3867 "charm_name": charm_name,
3868 }
3869 vca_index += 1
3870
3871 # create VCA and configurationStatus in db
3872 db_dict = {
3873 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3874 "configurationStatus.{}".format(vca_index): dict(),
3875 }
3876 self.update_db_2("nsrs", nsr_id, db_dict)
3877
3878 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3879
3880 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3881 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3882 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3883
3884 # Launch task
3885 task_n2vc = asyncio.ensure_future(
3886 self.instantiate_N2VC(
3887 logging_text=logging_text,
3888 vca_index=vca_index,
3889 nsi_id=nsi_id,
3890 db_nsr=db_nsr,
3891 db_vnfr=db_vnfr,
3892 vdu_id=vdu_id,
3893 kdu_name=kdu_name,
3894 vdu_index=vdu_index,
3895 deploy_params=deploy_params,
3896 config_descriptor=descriptor_config,
3897 base_folder=base_folder,
3898 nslcmop_id=nslcmop_id,
3899 stage=stage,
3900 vca_type=vca_type,
3901 vca_name=vca_name,
3902 ee_config_descriptor=ee_item,
3903 )
3904 )
3905 self.lcm_tasks.register(
3906 "ns",
3907 nsr_id,
3908 nslcmop_id,
3909 "instantiate_N2VC-{}".format(vca_index),
3910 task_n2vc,
3911 )
3912 task_instantiation_info[
3913 task_n2vc
3914 ] = self.task_name_deploy_vca + " {}.{}".format(
3915 member_vnf_index or "", vdu_id or ""
3916 )
3917
3918 @staticmethod
3919 def _create_nslcmop(nsr_id, operation, params):
3920 """
3921 Creates a ns-lcm-opp content to be stored at database.
3922 :param nsr_id: internal id of the instance
3923 :param operation: instantiate, terminate, scale, action, ...
3924 :param params: user parameters for the operation
3925 :return: dictionary following SOL005 format
3926 """
3927 # Raise exception if invalid arguments
3928 if not (nsr_id and operation and params):
3929 raise LcmException(
3930 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3931 )
3932 now = time()
3933 _id = str(uuid4())
3934 nslcmop = {
3935 "id": _id,
3936 "_id": _id,
3937 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3938 "operationState": "PROCESSING",
3939 "statusEnteredTime": now,
3940 "nsInstanceId": nsr_id,
3941 "lcmOperationType": operation,
3942 "startTime": now,
3943 "isAutomaticInvocation": False,
3944 "operationParams": params,
3945 "isCancelPending": False,
3946 "links": {
3947 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3948 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3949 },
3950 }
3951 return nslcmop
3952
3953 def _format_additional_params(self, params):
3954 params = params or {}
3955 for key, value in params.items():
3956 if str(value).startswith("!!yaml "):
3957 params[key] = yaml.safe_load(value[7:])
3958 return params
3959
3960 def _get_terminate_primitive_params(self, seq, vnf_index):
3961 primitive = seq.get("name")
3962 primitive_params = {}
3963 params = {
3964 "member_vnf_index": vnf_index,
3965 "primitive": primitive,
3966 "primitive_params": primitive_params,
3967 }
3968 desc_params = {}
3969 return self._map_primitive_params(seq, params, desc_params)
3970
3971 # sub-operations
3972
3973 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3974 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3975 if op.get("operationState") == "COMPLETED":
3976 # b. Skip sub-operation
3977 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3978 return self.SUBOPERATION_STATUS_SKIP
3979 else:
3980 # c. retry executing sub-operation
3981 # The sub-operation exists, and operationState != 'COMPLETED'
3982 # Update operationState = 'PROCESSING' to indicate a retry.
3983 operationState = "PROCESSING"
3984 detailed_status = "In progress"
3985 self._update_suboperation_status(
3986 db_nslcmop, op_index, operationState, detailed_status
3987 )
3988 # Return the sub-operation index
3989 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3990 # with arguments extracted from the sub-operation
3991 return op_index
3992
3993 # Find a sub-operation where all keys in a matching dictionary must match
3994 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3995 def _find_suboperation(self, db_nslcmop, match):
3996 if db_nslcmop and match:
3997 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3998 for i, op in enumerate(op_list):
3999 if all(op.get(k) == match[k] for k in match):
4000 return i
4001 return self.SUBOPERATION_STATUS_NOT_FOUND
4002
4003 # Update status for a sub-operation given its index
4004 def _update_suboperation_status(
4005 self, db_nslcmop, op_index, operationState, detailed_status
4006 ):
4007 # Update DB for HA tasks
4008 q_filter = {"_id": db_nslcmop["_id"]}
4009 update_dict = {
4010 "_admin.operations.{}.operationState".format(op_index): operationState,
4011 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4012 }
4013 self.db.set_one(
4014 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4015 )
4016
4017 # Add sub-operation, return the index of the added sub-operation
4018 # Optionally, set operationState, detailed-status, and operationType
4019 # Status and type are currently set for 'scale' sub-operations:
4020 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4021 # 'detailed-status' : status message
4022 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4023 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4024 def _add_suboperation(
4025 self,
4026 db_nslcmop,
4027 vnf_index,
4028 vdu_id,
4029 vdu_count_index,
4030 vdu_name,
4031 primitive,
4032 mapped_primitive_params,
4033 operationState=None,
4034 detailed_status=None,
4035 operationType=None,
4036 RO_nsr_id=None,
4037 RO_scaling_info=None,
4038 ):
4039 if not db_nslcmop:
4040 return self.SUBOPERATION_STATUS_NOT_FOUND
4041 # Get the "_admin.operations" list, if it exists
4042 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4043 op_list = db_nslcmop_admin.get("operations")
4044 # Create or append to the "_admin.operations" list
4045 new_op = {
4046 "member_vnf_index": vnf_index,
4047 "vdu_id": vdu_id,
4048 "vdu_count_index": vdu_count_index,
4049 "primitive": primitive,
4050 "primitive_params": mapped_primitive_params,
4051 }
4052 if operationState:
4053 new_op["operationState"] = operationState
4054 if detailed_status:
4055 new_op["detailed-status"] = detailed_status
4056 if operationType:
4057 new_op["lcmOperationType"] = operationType
4058 if RO_nsr_id:
4059 new_op["RO_nsr_id"] = RO_nsr_id
4060 if RO_scaling_info:
4061 new_op["RO_scaling_info"] = RO_scaling_info
4062 if not op_list:
4063 # No existing operations, create key 'operations' with current operation as first list element
4064 db_nslcmop_admin.update({"operations": [new_op]})
4065 op_list = db_nslcmop_admin.get("operations")
4066 else:
4067 # Existing operations, append operation to list
4068 op_list.append(new_op)
4069
4070 db_nslcmop_update = {"_admin.operations": op_list}
4071 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4072 op_index = len(op_list) - 1
4073 return op_index
4074
4075 # Helper methods for scale() sub-operations
4076
4077 # pre-scale/post-scale:
4078 # Check for 3 different cases:
4079 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4080 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4081 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4082 def _check_or_add_scale_suboperation(
4083 self,
4084 db_nslcmop,
4085 vnf_index,
4086 vnf_config_primitive,
4087 primitive_params,
4088 operationType,
4089 RO_nsr_id=None,
4090 RO_scaling_info=None,
4091 ):
4092 # Find this sub-operation
4093 if RO_nsr_id and RO_scaling_info:
4094 operationType = "SCALE-RO"
4095 match = {
4096 "member_vnf_index": vnf_index,
4097 "RO_nsr_id": RO_nsr_id,
4098 "RO_scaling_info": RO_scaling_info,
4099 }
4100 else:
4101 match = {
4102 "member_vnf_index": vnf_index,
4103 "primitive": vnf_config_primitive,
4104 "primitive_params": primitive_params,
4105 "lcmOperationType": operationType,
4106 }
4107 op_index = self._find_suboperation(db_nslcmop, match)
4108 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4109 # a. New sub-operation
4110 # The sub-operation does not exist, add it.
4111 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4112 # The following parameters are set to None for all kind of scaling:
4113 vdu_id = None
4114 vdu_count_index = None
4115 vdu_name = None
4116 if RO_nsr_id and RO_scaling_info:
4117 vnf_config_primitive = None
4118 primitive_params = None
4119 else:
4120 RO_nsr_id = None
4121 RO_scaling_info = None
4122 # Initial status for sub-operation
4123 operationState = "PROCESSING"
4124 detailed_status = "In progress"
4125 # Add sub-operation for pre/post-scaling (zero or more operations)
4126 self._add_suboperation(
4127 db_nslcmop,
4128 vnf_index,
4129 vdu_id,
4130 vdu_count_index,
4131 vdu_name,
4132 vnf_config_primitive,
4133 primitive_params,
4134 operationState,
4135 detailed_status,
4136 operationType,
4137 RO_nsr_id,
4138 RO_scaling_info,
4139 )
4140 return self.SUBOPERATION_STATUS_NEW
4141 else:
4142 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4143 # or op_index (operationState != 'COMPLETED')
4144 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4145
4146 # Function to return execution_environment id
4147
4148 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4149 # TODO vdu_index_count
4150 for vca in vca_deployed_list:
4151 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4152 return vca["ee_id"]
4153
4154 async def destroy_N2VC(
4155 self,
4156 logging_text,
4157 db_nslcmop,
4158 vca_deployed,
4159 config_descriptor,
4160 vca_index,
4161 destroy_ee=True,
4162 exec_primitives=True,
4163 scaling_in=False,
4164 vca_id: str = None,
4165 ):
4166 """
4167 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4168 :param logging_text:
4169 :param db_nslcmop:
4170 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4171 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4172 :param vca_index: index in the database _admin.deployed.VCA
4173 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4174 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4175 not executed properly
4176 :param scaling_in: True destroys the application, False destroys the model
4177 :return: None or exception
4178 """
4179
4180 self.logger.debug(
4181 logging_text
4182 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4183 vca_index, vca_deployed, config_descriptor, destroy_ee
4184 )
4185 )
4186
4187 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4188
4189 # execute terminate_primitives
4190 if exec_primitives:
4191 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4192 config_descriptor.get("terminate-config-primitive"),
4193 vca_deployed.get("ee_descriptor_id"),
4194 )
4195 vdu_id = vca_deployed.get("vdu_id")
4196 vdu_count_index = vca_deployed.get("vdu_count_index")
4197 vdu_name = vca_deployed.get("vdu_name")
4198 vnf_index = vca_deployed.get("member-vnf-index")
4199 if terminate_primitives and vca_deployed.get("needed_terminate"):
4200 for seq in terminate_primitives:
4201 # For each sequence in list, get primitive and call _ns_execute_primitive()
4202 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4203 vnf_index, seq.get("name")
4204 )
4205 self.logger.debug(logging_text + step)
4206 # Create the primitive for each sequence, i.e. "primitive": "touch"
4207 primitive = seq.get("name")
4208 mapped_primitive_params = self._get_terminate_primitive_params(
4209 seq, vnf_index
4210 )
4211
4212 # Add sub-operation
4213 self._add_suboperation(
4214 db_nslcmop,
4215 vnf_index,
4216 vdu_id,
4217 vdu_count_index,
4218 vdu_name,
4219 primitive,
4220 mapped_primitive_params,
4221 )
4222 # Sub-operations: Call _ns_execute_primitive() instead of action()
4223 try:
4224 result, result_detail = await self._ns_execute_primitive(
4225 vca_deployed["ee_id"],
4226 primitive,
4227 mapped_primitive_params,
4228 vca_type=vca_type,
4229 vca_id=vca_id,
4230 )
4231 except LcmException:
4232 # this happens when VCA is not deployed. In this case it is not needed to terminate
4233 continue
4234 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4235 if result not in result_ok:
4236 raise LcmException(
4237 "terminate_primitive {} for vnf_member_index={} fails with "
4238 "error {}".format(seq.get("name"), vnf_index, result_detail)
4239 )
4240 # set that this VCA do not need terminated
4241 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4242 vca_index
4243 )
4244 self.update_db_2(
4245 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4246 )
4247
4248 # Delete Prometheus Jobs if any
4249 # This uses NSR_ID, so it will destroy any jobs under this index
4250 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4251
4252 if destroy_ee:
4253 await self.vca_map[vca_type].delete_execution_environment(
4254 vca_deployed["ee_id"],
4255 scaling_in=scaling_in,
4256 vca_type=vca_type,
4257 vca_id=vca_id,
4258 )
4259
4260 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4261 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4262 namespace = "." + db_nsr["_id"]
4263 try:
4264 await self.n2vc.delete_namespace(
4265 namespace=namespace,
4266 total_timeout=self.timeout_charm_delete,
4267 vca_id=vca_id,
4268 )
4269 except N2VCNotFound: # already deleted. Skip
4270 pass
4271 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4272
4273 async def _terminate_RO(
4274 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4275 ):
4276 """
4277 Terminates a deployment from RO
4278 :param logging_text:
4279 :param nsr_deployed: db_nsr._admin.deployed
4280 :param nsr_id:
4281 :param nslcmop_id:
4282 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4283 this method will update only the index 2, but it will write on database the concatenated content of the list
4284 :return:
4285 """
4286 db_nsr_update = {}
4287 failed_detail = []
4288 ro_nsr_id = ro_delete_action = None
4289 if nsr_deployed and nsr_deployed.get("RO"):
4290 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4291 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4292 try:
4293 if ro_nsr_id:
4294 stage[2] = "Deleting ns from VIM."
4295 db_nsr_update["detailed-status"] = " ".join(stage)
4296 self._write_op_status(nslcmop_id, stage)
4297 self.logger.debug(logging_text + stage[2])
4298 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4299 self._write_op_status(nslcmop_id, stage)
4300 desc = await self.RO.delete("ns", ro_nsr_id)
4301 ro_delete_action = desc["action_id"]
4302 db_nsr_update[
4303 "_admin.deployed.RO.nsr_delete_action_id"
4304 ] = ro_delete_action
4305 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4306 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4307 if ro_delete_action:
4308 # wait until NS is deleted from VIM
4309 stage[2] = "Waiting ns deleted from VIM."
4310 detailed_status_old = None
4311 self.logger.debug(
4312 logging_text
4313 + stage[2]
4314 + " RO_id={} ro_delete_action={}".format(
4315 ro_nsr_id, ro_delete_action
4316 )
4317 )
4318 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4319 self._write_op_status(nslcmop_id, stage)
4320
4321 delete_timeout = 20 * 60 # 20 minutes
4322 while delete_timeout > 0:
4323 desc = await self.RO.show(
4324 "ns",
4325 item_id_name=ro_nsr_id,
4326 extra_item="action",
4327 extra_item_id=ro_delete_action,
4328 )
4329
4330 # deploymentStatus
4331 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4332
4333 ns_status, ns_status_info = self.RO.check_action_status(desc)
4334 if ns_status == "ERROR":
4335 raise ROclient.ROClientException(ns_status_info)
4336 elif ns_status == "BUILD":
4337 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4338 elif ns_status == "ACTIVE":
4339 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4340 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4341 break
4342 else:
4343 assert (
4344 False
4345 ), "ROclient.check_action_status returns unknown {}".format(
4346 ns_status
4347 )
4348 if stage[2] != detailed_status_old:
4349 detailed_status_old = stage[2]
4350 db_nsr_update["detailed-status"] = " ".join(stage)
4351 self._write_op_status(nslcmop_id, stage)
4352 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4353 await asyncio.sleep(5, loop=self.loop)
4354 delete_timeout -= 5
4355 else: # delete_timeout <= 0:
4356 raise ROclient.ROClientException(
4357 "Timeout waiting ns deleted from VIM"
4358 )
4359
4360 except Exception as e:
4361 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4362 if (
4363 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4364 ): # not found
4365 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4366 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4367 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4368 self.logger.debug(
4369 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4370 )
4371 elif (
4372 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4373 ): # conflict
4374 failed_detail.append("delete conflict: {}".format(e))
4375 self.logger.debug(
4376 logging_text
4377 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4378 )
4379 else:
4380 failed_detail.append("delete error: {}".format(e))
4381 self.logger.error(
4382 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4383 )
4384
4385 # Delete nsd
4386 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4387 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4388 try:
4389 stage[2] = "Deleting nsd from RO."
4390 db_nsr_update["detailed-status"] = " ".join(stage)
4391 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4392 self._write_op_status(nslcmop_id, stage)
4393 await self.RO.delete("nsd", ro_nsd_id)
4394 self.logger.debug(
4395 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4396 )
4397 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4398 except Exception as e:
4399 if (
4400 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4401 ): # not found
4402 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4403 self.logger.debug(
4404 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4405 )
4406 elif (
4407 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4408 ): # conflict
4409 failed_detail.append(
4410 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4411 )
4412 self.logger.debug(logging_text + failed_detail[-1])
4413 else:
4414 failed_detail.append(
4415 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4416 )
4417 self.logger.error(logging_text + failed_detail[-1])
4418
4419 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4420 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4421 if not vnf_deployed or not vnf_deployed["id"]:
4422 continue
4423 try:
4424 ro_vnfd_id = vnf_deployed["id"]
4425 stage[
4426 2
4427 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4428 vnf_deployed["member-vnf-index"], ro_vnfd_id
4429 )
4430 db_nsr_update["detailed-status"] = " ".join(stage)
4431 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4432 self._write_op_status(nslcmop_id, stage)
4433 await self.RO.delete("vnfd", ro_vnfd_id)
4434 self.logger.debug(
4435 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4436 )
4437 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4438 except Exception as e:
4439 if (
4440 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4441 ): # not found
4442 db_nsr_update[
4443 "_admin.deployed.RO.vnfd.{}.id".format(index)
4444 ] = None
4445 self.logger.debug(
4446 logging_text
4447 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4448 )
4449 elif (
4450 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4451 ): # conflict
4452 failed_detail.append(
4453 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4454 )
4455 self.logger.debug(logging_text + failed_detail[-1])
4456 else:
4457 failed_detail.append(
4458 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4459 )
4460 self.logger.error(logging_text + failed_detail[-1])
4461
4462 if failed_detail:
4463 stage[2] = "Error deleting from VIM"
4464 else:
4465 stage[2] = "Deleted from VIM"
4466 db_nsr_update["detailed-status"] = " ".join(stage)
4467 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4468 self._write_op_status(nslcmop_id, stage)
4469
4470 if failed_detail:
4471 raise LcmException("; ".join(failed_detail))
4472
4473 async def terminate(self, nsr_id, nslcmop_id):
4474 # Try to lock HA task here
4475 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4476 if not task_is_locked_by_me:
4477 return
4478
4479 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4480 self.logger.debug(logging_text + "Enter")
4481 timeout_ns_terminate = self.timeout_ns_terminate
4482 db_nsr = None
4483 db_nslcmop = None
4484 operation_params = None
4485 exc = None
4486 error_list = [] # annotates all failed error messages
4487 db_nslcmop_update = {}
4488 autoremove = False # autoremove after terminated
4489 tasks_dict_info = {}
4490 db_nsr_update = {}
4491 stage = [
4492 "Stage 1/3: Preparing task.",
4493 "Waiting for previous operations to terminate.",
4494 "",
4495 ]
4496 # ^ contains [stage, step, VIM-status]
4497 try:
4498 # wait for any previous tasks in process
4499 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4500
4501 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4502 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4503 operation_params = db_nslcmop.get("operationParams") or {}
4504 if operation_params.get("timeout_ns_terminate"):
4505 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4506 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4507 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4508
4509 db_nsr_update["operational-status"] = "terminating"
4510 db_nsr_update["config-status"] = "terminating"
4511 self._write_ns_status(
4512 nsr_id=nsr_id,
4513 ns_state="TERMINATING",
4514 current_operation="TERMINATING",
4515 current_operation_id=nslcmop_id,
4516 other_update=db_nsr_update,
4517 )
4518 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4519 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4520 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4521 return
4522
4523 stage[1] = "Getting vnf descriptors from db."
4524 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4525 db_vnfrs_dict = {
4526 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4527 }
4528 db_vnfds_from_id = {}
4529 db_vnfds_from_member_index = {}
4530 # Loop over VNFRs
4531 for vnfr in db_vnfrs_list:
4532 vnfd_id = vnfr["vnfd-id"]
4533 if vnfd_id not in db_vnfds_from_id:
4534 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4535 db_vnfds_from_id[vnfd_id] = vnfd
4536 db_vnfds_from_member_index[
4537 vnfr["member-vnf-index-ref"]
4538 ] = db_vnfds_from_id[vnfd_id]
4539
4540 # Destroy individual execution environments when there are terminating primitives.
4541 # Rest of EE will be deleted at once
4542 # TODO - check before calling _destroy_N2VC
4543 # if not operation_params.get("skip_terminate_primitives"):#
4544 # or not vca.get("needed_terminate"):
4545 stage[0] = "Stage 2/3 execute terminating primitives."
4546 self.logger.debug(logging_text + stage[0])
4547 stage[1] = "Looking execution environment that needs terminate."
4548 self.logger.debug(logging_text + stage[1])
4549
4550 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4551 config_descriptor = None
4552 vca_member_vnf_index = vca.get("member-vnf-index")
4553 vca_id = self.get_vca_id(
4554 db_vnfrs_dict.get(vca_member_vnf_index)
4555 if vca_member_vnf_index
4556 else None,
4557 db_nsr,
4558 )
4559 if not vca or not vca.get("ee_id"):
4560 continue
4561 if not vca.get("member-vnf-index"):
4562 # ns
4563 config_descriptor = db_nsr.get("ns-configuration")
4564 elif vca.get("vdu_id"):
4565 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4566 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4567 elif vca.get("kdu_name"):
4568 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4569 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4570 else:
4571 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4572 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4573 vca_type = vca.get("type")
4574 exec_terminate_primitives = not operation_params.get(
4575 "skip_terminate_primitives"
4576 ) and vca.get("needed_terminate")
4577 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4578 # pending native charms
4579 destroy_ee = (
4580 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4581 )
4582 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4583 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4584 task = asyncio.ensure_future(
4585 self.destroy_N2VC(
4586 logging_text,
4587 db_nslcmop,
4588 vca,
4589 config_descriptor,
4590 vca_index,
4591 destroy_ee,
4592 exec_terminate_primitives,
4593 vca_id=vca_id,
4594 )
4595 )
4596 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4597
4598 # wait for pending tasks of terminate primitives
4599 if tasks_dict_info:
4600 self.logger.debug(
4601 logging_text
4602 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4603 )
4604 error_list = await self._wait_for_tasks(
4605 logging_text,
4606 tasks_dict_info,
4607 min(self.timeout_charm_delete, timeout_ns_terminate),
4608 stage,
4609 nslcmop_id,
4610 )
4611 tasks_dict_info.clear()
4612 if error_list:
4613 return # raise LcmException("; ".join(error_list))
4614
4615 # remove All execution environments at once
4616 stage[0] = "Stage 3/3 delete all."
4617
4618 if nsr_deployed.get("VCA"):
4619 stage[1] = "Deleting all execution environments."
4620 self.logger.debug(logging_text + stage[1])
4621 vca_id = self.get_vca_id({}, db_nsr)
4622 task_delete_ee = asyncio.ensure_future(
4623 asyncio.wait_for(
4624 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4625 timeout=self.timeout_charm_delete,
4626 )
4627 )
4628 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4629 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4630
4631 # Delete from k8scluster
4632 stage[1] = "Deleting KDUs."
4633 self.logger.debug(logging_text + stage[1])
4634 # print(nsr_deployed)
4635 for kdu in get_iterable(nsr_deployed, "K8s"):
4636 if not kdu or not kdu.get("kdu-instance"):
4637 continue
4638 kdu_instance = kdu.get("kdu-instance")
4639 if kdu.get("k8scluster-type") in self.k8scluster_map:
4640 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4641 vca_id = self.get_vca_id({}, db_nsr)
4642 task_delete_kdu_instance = asyncio.ensure_future(
4643 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4644 cluster_uuid=kdu.get("k8scluster-uuid"),
4645 kdu_instance=kdu_instance,
4646 vca_id=vca_id,
4647 namespace=kdu.get("namespace"),
4648 )
4649 )
4650 else:
4651 self.logger.error(
4652 logging_text
4653 + "Unknown k8s deployment type {}".format(
4654 kdu.get("k8scluster-type")
4655 )
4656 )
4657 continue
4658 tasks_dict_info[
4659 task_delete_kdu_instance
4660 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4661
4662 # remove from RO
4663 stage[1] = "Deleting ns from VIM."
4664 if self.ng_ro:
4665 task_delete_ro = asyncio.ensure_future(
4666 self._terminate_ng_ro(
4667 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4668 )
4669 )
4670 else:
4671 task_delete_ro = asyncio.ensure_future(
4672 self._terminate_RO(
4673 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4674 )
4675 )
4676 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4677
4678 # rest of staff will be done at finally
4679
4680 except (
4681 ROclient.ROClientException,
4682 DbException,
4683 LcmException,
4684 N2VCException,
4685 ) as e:
4686 self.logger.error(logging_text + "Exit Exception {}".format(e))
4687 exc = e
4688 except asyncio.CancelledError:
4689 self.logger.error(
4690 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4691 )
4692 exc = "Operation was cancelled"
4693 except Exception as e:
4694 exc = traceback.format_exc()
4695 self.logger.critical(
4696 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4697 exc_info=True,
4698 )
4699 finally:
4700 if exc:
4701 error_list.append(str(exc))
4702 try:
4703 # wait for pending tasks
4704 if tasks_dict_info:
4705 stage[1] = "Waiting for terminate pending tasks."
4706 self.logger.debug(logging_text + stage[1])
4707 error_list += await self._wait_for_tasks(
4708 logging_text,
4709 tasks_dict_info,
4710 timeout_ns_terminate,
4711 stage,
4712 nslcmop_id,
4713 )
4714 stage[1] = stage[2] = ""
4715 except asyncio.CancelledError:
4716 error_list.append("Cancelled")
4717 # TODO cancell all tasks
4718 except Exception as exc:
4719 error_list.append(str(exc))
4720 # update status at database
4721 if error_list:
4722 error_detail = "; ".join(error_list)
4723 # self.logger.error(logging_text + error_detail)
4724 error_description_nslcmop = "{} Detail: {}".format(
4725 stage[0], error_detail
4726 )
4727 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4728 nslcmop_id, stage[0]
4729 )
4730
4731 db_nsr_update["operational-status"] = "failed"
4732 db_nsr_update["detailed-status"] = (
4733 error_description_nsr + " Detail: " + error_detail
4734 )
4735 db_nslcmop_update["detailed-status"] = error_detail
4736 nslcmop_operation_state = "FAILED"
4737 ns_state = "BROKEN"
4738 else:
4739 error_detail = None
4740 error_description_nsr = error_description_nslcmop = None
4741 ns_state = "NOT_INSTANTIATED"
4742 db_nsr_update["operational-status"] = "terminated"
4743 db_nsr_update["detailed-status"] = "Done"
4744 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4745 db_nslcmop_update["detailed-status"] = "Done"
4746 nslcmop_operation_state = "COMPLETED"
4747
4748 if db_nsr:
4749 self._write_ns_status(
4750 nsr_id=nsr_id,
4751 ns_state=ns_state,
4752 current_operation="IDLE",
4753 current_operation_id=None,
4754 error_description=error_description_nsr,
4755 error_detail=error_detail,
4756 other_update=db_nsr_update,
4757 )
4758 self._write_op_status(
4759 op_id=nslcmop_id,
4760 stage="",
4761 error_message=error_description_nslcmop,
4762 operation_state=nslcmop_operation_state,
4763 other_update=db_nslcmop_update,
4764 )
4765 if ns_state == "NOT_INSTANTIATED":
4766 try:
4767 self.db.set_list(
4768 "vnfrs",
4769 {"nsr-id-ref": nsr_id},
4770 {"_admin.nsState": "NOT_INSTANTIATED"},
4771 )
4772 except DbException as e:
4773 self.logger.warn(
4774 logging_text
4775 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4776 nsr_id, e
4777 )
4778 )
4779 if operation_params:
4780 autoremove = operation_params.get("autoremove", False)
4781 if nslcmop_operation_state:
4782 try:
4783 await self.msg.aiowrite(
4784 "ns",
4785 "terminated",
4786 {
4787 "nsr_id": nsr_id,
4788 "nslcmop_id": nslcmop_id,
4789 "operationState": nslcmop_operation_state,
4790 "autoremove": autoremove,
4791 },
4792 loop=self.loop,
4793 )
4794 except Exception as e:
4795 self.logger.error(
4796 logging_text + "kafka_write notification Exception {}".format(e)
4797 )
4798
4799 self.logger.debug(logging_text + "Exit")
4800 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4801
4802 async def _wait_for_tasks(
4803 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4804 ):
4805 time_start = time()
4806 error_detail_list = []
4807 error_list = []
4808 pending_tasks = list(created_tasks_info.keys())
4809 num_tasks = len(pending_tasks)
4810 num_done = 0
4811 stage[1] = "{}/{}.".format(num_done, num_tasks)
4812 self._write_op_status(nslcmop_id, stage)
4813 while pending_tasks:
4814 new_error = None
4815 _timeout = timeout + time_start - time()
4816 done, pending_tasks = await asyncio.wait(
4817 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4818 )
4819 num_done += len(done)
4820 if not done: # Timeout
4821 for task in pending_tasks:
4822 new_error = created_tasks_info[task] + ": Timeout"
4823 error_detail_list.append(new_error)
4824 error_list.append(new_error)
4825 break
4826 for task in done:
4827 if task.cancelled():
4828 exc = "Cancelled"
4829 else:
4830 exc = task.exception()
4831 if exc:
4832 if isinstance(exc, asyncio.TimeoutError):
4833 exc = "Timeout"
4834 new_error = created_tasks_info[task] + ": {}".format(exc)
4835 error_list.append(created_tasks_info[task])
4836 error_detail_list.append(new_error)
4837 if isinstance(
4838 exc,
4839 (
4840 str,
4841 DbException,
4842 N2VCException,
4843 ROclient.ROClientException,
4844 LcmException,
4845 K8sException,
4846 NgRoException,
4847 ),
4848 ):
4849 self.logger.error(logging_text + new_error)
4850 else:
4851 exc_traceback = "".join(
4852 traceback.format_exception(None, exc, exc.__traceback__)
4853 )
4854 self.logger.error(
4855 logging_text
4856 + created_tasks_info[task]
4857 + " "
4858 + exc_traceback
4859 )
4860 else:
4861 self.logger.debug(
4862 logging_text + created_tasks_info[task] + ": Done"
4863 )
4864 stage[1] = "{}/{}.".format(num_done, num_tasks)
4865 if new_error:
4866 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4867 if nsr_id: # update also nsr
4868 self.update_db_2(
4869 "nsrs",
4870 nsr_id,
4871 {
4872 "errorDescription": "Error at: " + ", ".join(error_list),
4873 "errorDetail": ". ".join(error_detail_list),
4874 },
4875 )
4876 self._write_op_status(nslcmop_id, stage)
4877 return error_detail_list
4878
4879 @staticmethod
4880 def _map_primitive_params(primitive_desc, params, instantiation_params):
4881 """
4882 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4883 The default-value is used. If it is between < > it look for a value at instantiation_params
4884 :param primitive_desc: portion of VNFD/NSD that describes primitive
4885 :param params: Params provided by user
4886 :param instantiation_params: Instantiation params provided by user
4887 :return: a dictionary with the calculated params
4888 """
4889 calculated_params = {}
4890 for parameter in primitive_desc.get("parameter", ()):
4891 param_name = parameter["name"]
4892 if param_name in params:
4893 calculated_params[param_name] = params[param_name]
4894 elif "default-value" in parameter or "value" in parameter:
4895 if "value" in parameter:
4896 calculated_params[param_name] = parameter["value"]
4897 else:
4898 calculated_params[param_name] = parameter["default-value"]
4899 if (
4900 isinstance(calculated_params[param_name], str)
4901 and calculated_params[param_name].startswith("<")
4902 and calculated_params[param_name].endswith(">")
4903 ):
4904 if calculated_params[param_name][1:-1] in instantiation_params:
4905 calculated_params[param_name] = instantiation_params[
4906 calculated_params[param_name][1:-1]
4907 ]
4908 else:
4909 raise LcmException(
4910 "Parameter {} needed to execute primitive {} not provided".format(
4911 calculated_params[param_name], primitive_desc["name"]
4912 )
4913 )
4914 else:
4915 raise LcmException(
4916 "Parameter {} needed to execute primitive {} not provided".format(
4917 param_name, primitive_desc["name"]
4918 )
4919 )
4920
4921 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4922 calculated_params[param_name] = yaml.safe_dump(
4923 calculated_params[param_name], default_flow_style=True, width=256
4924 )
4925 elif isinstance(calculated_params[param_name], str) and calculated_params[
4926 param_name
4927 ].startswith("!!yaml "):
4928 calculated_params[param_name] = calculated_params[param_name][7:]
4929 if parameter.get("data-type") == "INTEGER":
4930 try:
4931 calculated_params[param_name] = int(calculated_params[param_name])
4932 except ValueError: # error converting string to int
4933 raise LcmException(
4934 "Parameter {} of primitive {} must be integer".format(
4935 param_name, primitive_desc["name"]
4936 )
4937 )
4938 elif parameter.get("data-type") == "BOOLEAN":
4939 calculated_params[param_name] = not (
4940 (str(calculated_params[param_name])).lower() == "false"
4941 )
4942
4943 # add always ns_config_info if primitive name is config
4944 if primitive_desc["name"] == "config":
4945 if "ns_config_info" in instantiation_params:
4946 calculated_params["ns_config_info"] = instantiation_params[
4947 "ns_config_info"
4948 ]
4949 return calculated_params
4950
4951 def _look_for_deployed_vca(
4952 self,
4953 deployed_vca,
4954 member_vnf_index,
4955 vdu_id,
4956 vdu_count_index,
4957 kdu_name=None,
4958 ee_descriptor_id=None,
4959 ):
4960 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4961 for vca in deployed_vca:
4962 if not vca:
4963 continue
4964 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4965 continue
4966 if (
4967 vdu_count_index is not None
4968 and vdu_count_index != vca["vdu_count_index"]
4969 ):
4970 continue
4971 if kdu_name and kdu_name != vca["kdu_name"]:
4972 continue
4973 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4974 continue
4975 break
4976 else:
4977 # vca_deployed not found
4978 raise LcmException(
4979 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4980 " is not deployed".format(
4981 member_vnf_index,
4982 vdu_id,
4983 vdu_count_index,
4984 kdu_name,
4985 ee_descriptor_id,
4986 )
4987 )
4988 # get ee_id
4989 ee_id = vca.get("ee_id")
4990 vca_type = vca.get(
4991 "type", "lxc_proxy_charm"
4992 ) # default value for backward compatibility - proxy charm
4993 if not ee_id:
4994 raise LcmException(
4995 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4996 "execution environment".format(
4997 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4998 )
4999 )
5000 return ee_id, vca_type
5001
5002 async def _ns_execute_primitive(
5003 self,
5004 ee_id,
5005 primitive,
5006 primitive_params,
5007 retries=0,
5008 retries_interval=30,
5009 timeout=None,
5010 vca_type=None,
5011 db_dict=None,
5012 vca_id: str = None,
5013 ) -> (str, str):
5014 try:
5015 if primitive == "config":
5016 primitive_params = {"params": primitive_params}
5017
5018 vca_type = vca_type or "lxc_proxy_charm"
5019
5020 while retries >= 0:
5021 try:
5022 output = await asyncio.wait_for(
5023 self.vca_map[vca_type].exec_primitive(
5024 ee_id=ee_id,
5025 primitive_name=primitive,
5026 params_dict=primitive_params,
5027 progress_timeout=self.timeout_progress_primitive,
5028 total_timeout=self.timeout_primitive,
5029 db_dict=db_dict,
5030 vca_id=vca_id,
5031 vca_type=vca_type,
5032 ),
5033 timeout=timeout or self.timeout_primitive,
5034 )
5035 # execution was OK
5036 break
5037 except asyncio.CancelledError:
5038 raise
5039 except Exception as e:
5040 retries -= 1
5041 if retries >= 0:
5042 self.logger.debug(
5043 "Error executing action {} on {} -> {}".format(
5044 primitive, ee_id, e
5045 )
5046 )
5047 # wait and retry
5048 await asyncio.sleep(retries_interval, loop=self.loop)
5049 else:
5050 if isinstance(e, asyncio.TimeoutError):
5051 e = N2VCException(
5052 message="Timed out waiting for action to complete"
5053 )
5054 return "FAILED", getattr(e, "message", repr(e))
5055
5056 return "COMPLETED", output
5057
5058 except (LcmException, asyncio.CancelledError):
5059 raise
5060 except Exception as e:
5061 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5062
5063 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5064 """
5065 Updating the vca_status with latest juju information in nsrs record
5066 :param: nsr_id: Id of the nsr
5067 :param: nslcmop_id: Id of the nslcmop
5068 :return: None
5069 """
5070
5071 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5072 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5073 vca_id = self.get_vca_id({}, db_nsr)
5074 if db_nsr["_admin"]["deployed"]["K8s"]:
5075 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5076 cluster_uuid, kdu_instance, cluster_type = (
5077 k8s["k8scluster-uuid"],
5078 k8s["kdu-instance"],
5079 k8s["k8scluster-type"],
5080 )
5081 await self._on_update_k8s_db(
5082 cluster_uuid=cluster_uuid,
5083 kdu_instance=kdu_instance,
5084 filter={"_id": nsr_id},
5085 vca_id=vca_id,
5086 cluster_type=cluster_type,
5087 )
5088 else:
5089 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5090 table, filter = "nsrs", {"_id": nsr_id}
5091 path = "_admin.deployed.VCA.{}.".format(vca_index)
5092 await self._on_update_n2vc_db(table, filter, path, {})
5093
5094 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5095 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5096
5097 async def action(self, nsr_id, nslcmop_id):
5098 # Try to lock HA task here
5099 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5100 if not task_is_locked_by_me:
5101 return
5102
5103 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5104 self.logger.debug(logging_text + "Enter")
5105 # get all needed from database
5106 db_nsr = None
5107 db_nslcmop = None
5108 db_nsr_update = {}
5109 db_nslcmop_update = {}
5110 nslcmop_operation_state = None
5111 error_description_nslcmop = None
5112 exc = None
5113 try:
5114 # wait for any previous tasks in process
5115 step = "Waiting for previous operations to terminate"
5116 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5117
5118 self._write_ns_status(
5119 nsr_id=nsr_id,
5120 ns_state=None,
5121 current_operation="RUNNING ACTION",
5122 current_operation_id=nslcmop_id,
5123 )
5124
5125 step = "Getting information from database"
5126 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5127 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5128 if db_nslcmop["operationParams"].get("primitive_params"):
5129 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5130 db_nslcmop["operationParams"]["primitive_params"]
5131 )
5132
5133 nsr_deployed = db_nsr["_admin"].get("deployed")
5134 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5135 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5136 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5137 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5138 primitive = db_nslcmop["operationParams"]["primitive"]
5139 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5140 timeout_ns_action = db_nslcmop["operationParams"].get(
5141 "timeout_ns_action", self.timeout_primitive
5142 )
5143
5144 if vnf_index:
5145 step = "Getting vnfr from database"
5146 db_vnfr = self.db.get_one(
5147 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5148 )
5149 if db_vnfr.get("kdur"):
5150 kdur_list = []
5151 for kdur in db_vnfr["kdur"]:
5152 if kdur.get("additionalParams"):
5153 kdur["additionalParams"] = json.loads(
5154 kdur["additionalParams"]
5155 )
5156 kdur_list.append(kdur)
5157 db_vnfr["kdur"] = kdur_list
5158 step = "Getting vnfd from database"
5159 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5160
5161 # Sync filesystem before running a primitive
5162 self.fs.sync(db_vnfr["vnfd-id"])
5163 else:
5164 step = "Getting nsd from database"
5165 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5166
5167 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5168 # for backward compatibility
5169 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5170 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5171 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5173
5174 # look for primitive
5175 config_primitive_desc = descriptor_configuration = None
5176 if vdu_id:
5177 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5178 elif kdu_name:
5179 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5180 elif vnf_index:
5181 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5182 else:
5183 descriptor_configuration = db_nsd.get("ns-configuration")
5184
5185 if descriptor_configuration and descriptor_configuration.get(
5186 "config-primitive"
5187 ):
5188 for config_primitive in descriptor_configuration["config-primitive"]:
5189 if config_primitive["name"] == primitive:
5190 config_primitive_desc = config_primitive
5191 break
5192
5193 if not config_primitive_desc:
5194 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5195 raise LcmException(
5196 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5197 primitive
5198 )
5199 )
5200 primitive_name = primitive
5201 ee_descriptor_id = None
5202 else:
5203 primitive_name = config_primitive_desc.get(
5204 "execution-environment-primitive", primitive
5205 )
5206 ee_descriptor_id = config_primitive_desc.get(
5207 "execution-environment-ref"
5208 )
5209
5210 if vnf_index:
5211 if vdu_id:
5212 vdur = next(
5213 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5214 )
5215 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5216 elif kdu_name:
5217 kdur = next(
5218 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5219 )
5220 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5221 else:
5222 desc_params = parse_yaml_strings(
5223 db_vnfr.get("additionalParamsForVnf")
5224 )
5225 else:
5226 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5227 if kdu_name and get_configuration(db_vnfd, kdu_name):
5228 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5229 actions = set()
5230 for primitive in kdu_configuration.get("initial-config-primitive", []):
5231 actions.add(primitive["name"])
5232 for primitive in kdu_configuration.get("config-primitive", []):
5233 actions.add(primitive["name"])
5234 kdu = find_in_list(
5235 nsr_deployed["K8s"],
5236 lambda kdu: kdu_name == kdu["kdu-name"]
5237 and kdu["member-vnf-index"] == vnf_index,
5238 )
5239 kdu_action = (
5240 True
5241 if primitive_name in actions
5242 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5243 else False
5244 )
5245
5246 # TODO check if ns is in a proper status
5247 if kdu_name and (
5248 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5249 ):
5250 # kdur and desc_params already set from before
5251 if primitive_params:
5252 desc_params.update(primitive_params)
5253 # TODO Check if we will need something at vnf level
5254 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5255 if (
5256 kdu_name == kdu["kdu-name"]
5257 and kdu["member-vnf-index"] == vnf_index
5258 ):
5259 break
5260 else:
5261 raise LcmException(
5262 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5263 )
5264
5265 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5266 msg = "unknown k8scluster-type '{}'".format(
5267 kdu.get("k8scluster-type")
5268 )
5269 raise LcmException(msg)
5270
5271 db_dict = {
5272 "collection": "nsrs",
5273 "filter": {"_id": nsr_id},
5274 "path": "_admin.deployed.K8s.{}".format(index),
5275 }
5276 self.logger.debug(
5277 logging_text
5278 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5279 )
5280 step = "Executing kdu {}".format(primitive_name)
5281 if primitive_name == "upgrade":
5282 if desc_params.get("kdu_model"):
5283 kdu_model = desc_params.get("kdu_model")
5284 del desc_params["kdu_model"]
5285 else:
5286 kdu_model = kdu.get("kdu-model")
5287 parts = kdu_model.split(sep=":")
5288 if len(parts) == 2:
5289 kdu_model = parts[0]
5290 if desc_params.get("kdu_atomic_upgrade"):
5291 atomic_upgrade = desc_params.get(
5292 "kdu_atomic_upgrade"
5293 ).lower() in ("yes", "true", "1")
5294 del desc_params["kdu_atomic_upgrade"]
5295 else:
5296 atomic_upgrade = True
5297
5298 detailed_status = await asyncio.wait_for(
5299 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5300 cluster_uuid=kdu.get("k8scluster-uuid"),
5301 kdu_instance=kdu.get("kdu-instance"),
5302 atomic=atomic_upgrade,
5303 kdu_model=kdu_model,
5304 params=desc_params,
5305 db_dict=db_dict,
5306 timeout=timeout_ns_action,
5307 ),
5308 timeout=timeout_ns_action + 10,
5309 )
5310 self.logger.debug(
5311 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5312 )
5313 elif primitive_name == "rollback":
5314 detailed_status = await asyncio.wait_for(
5315 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5316 cluster_uuid=kdu.get("k8scluster-uuid"),
5317 kdu_instance=kdu.get("kdu-instance"),
5318 db_dict=db_dict,
5319 ),
5320 timeout=timeout_ns_action,
5321 )
5322 elif primitive_name == "status":
5323 detailed_status = await asyncio.wait_for(
5324 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5325 cluster_uuid=kdu.get("k8scluster-uuid"),
5326 kdu_instance=kdu.get("kdu-instance"),
5327 vca_id=vca_id,
5328 ),
5329 timeout=timeout_ns_action,
5330 )
5331 else:
5332 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5333 kdu["kdu-name"], nsr_id
5334 )
5335 params = self._map_primitive_params(
5336 config_primitive_desc, primitive_params, desc_params
5337 )
5338
5339 detailed_status = await asyncio.wait_for(
5340 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5341 cluster_uuid=kdu.get("k8scluster-uuid"),
5342 kdu_instance=kdu_instance,
5343 primitive_name=primitive_name,
5344 params=params,
5345 db_dict=db_dict,
5346 timeout=timeout_ns_action,
5347 vca_id=vca_id,
5348 ),
5349 timeout=timeout_ns_action,
5350 )
5351
5352 if detailed_status:
5353 nslcmop_operation_state = "COMPLETED"
5354 else:
5355 detailed_status = ""
5356 nslcmop_operation_state = "FAILED"
5357 else:
5358 ee_id, vca_type = self._look_for_deployed_vca(
5359 nsr_deployed["VCA"],
5360 member_vnf_index=vnf_index,
5361 vdu_id=vdu_id,
5362 vdu_count_index=vdu_count_index,
5363 ee_descriptor_id=ee_descriptor_id,
5364 )
5365 for vca_index, vca_deployed in enumerate(
5366 db_nsr["_admin"]["deployed"]["VCA"]
5367 ):
5368 if vca_deployed.get("member-vnf-index") == vnf_index:
5369 db_dict = {
5370 "collection": "nsrs",
5371 "filter": {"_id": nsr_id},
5372 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5373 }
5374 break
5375 (
5376 nslcmop_operation_state,
5377 detailed_status,
5378 ) = await self._ns_execute_primitive(
5379 ee_id,
5380 primitive=primitive_name,
5381 primitive_params=self._map_primitive_params(
5382 config_primitive_desc, primitive_params, desc_params
5383 ),
5384 timeout=timeout_ns_action,
5385 vca_type=vca_type,
5386 db_dict=db_dict,
5387 vca_id=vca_id,
5388 )
5389
5390 db_nslcmop_update["detailed-status"] = detailed_status
5391 error_description_nslcmop = (
5392 detailed_status if nslcmop_operation_state == "FAILED" else ""
5393 )
5394 self.logger.debug(
5395 logging_text
5396 + "Done with result {} {}".format(
5397 nslcmop_operation_state, detailed_status
5398 )
5399 )
5400 return # database update is called inside finally
5401
5402 except (DbException, LcmException, N2VCException, K8sException) as e:
5403 self.logger.error(logging_text + "Exit Exception {}".format(e))
5404 exc = e
5405 except asyncio.CancelledError:
5406 self.logger.error(
5407 logging_text + "Cancelled Exception while '{}'".format(step)
5408 )
5409 exc = "Operation was cancelled"
5410 except asyncio.TimeoutError:
5411 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5412 exc = "Timeout"
5413 except Exception as e:
5414 exc = traceback.format_exc()
5415 self.logger.critical(
5416 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5417 exc_info=True,
5418 )
5419 finally:
5420 if exc:
5421 db_nslcmop_update[
5422 "detailed-status"
5423 ] = (
5424 detailed_status
5425 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5426 nslcmop_operation_state = "FAILED"
5427 if db_nsr:
5428 self._write_ns_status(
5429 nsr_id=nsr_id,
5430 ns_state=db_nsr[
5431 "nsState"
5432 ], # TODO check if degraded. For the moment use previous status
5433 current_operation="IDLE",
5434 current_operation_id=None,
5435 # error_description=error_description_nsr,
5436 # error_detail=error_detail,
5437 other_update=db_nsr_update,
5438 )
5439
5440 self._write_op_status(
5441 op_id=nslcmop_id,
5442 stage="",
5443 error_message=error_description_nslcmop,
5444 operation_state=nslcmop_operation_state,
5445 other_update=db_nslcmop_update,
5446 )
5447
5448 if nslcmop_operation_state:
5449 try:
5450 await self.msg.aiowrite(
5451 "ns",
5452 "actioned",
5453 {
5454 "nsr_id": nsr_id,
5455 "nslcmop_id": nslcmop_id,
5456 "operationState": nslcmop_operation_state,
5457 },
5458 loop=self.loop,
5459 )
5460 except Exception as e:
5461 self.logger.error(
5462 logging_text + "kafka_write notification Exception {}".format(e)
5463 )
5464 self.logger.debug(logging_text + "Exit")
5465 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5466 return nslcmop_operation_state, detailed_status
5467
5468 async def terminate_vdus(
5469 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5470 ):
5471 """This method terminates VDUs
5472
5473 Args:
5474 db_vnfr: VNF instance record
5475 member_vnf_index: VNF index to identify the VDUs to be removed
5476 db_nsr: NS instance record
5477 update_db_nslcmops: Nslcmop update record
5478 """
5479 vca_scaling_info = []
5480 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5481 scaling_info["scaling_direction"] = "IN"
5482 scaling_info["vdu-delete"] = {}
5483 scaling_info["kdu-delete"] = {}
5484 db_vdur = db_vnfr.get("vdur")
5485 vdur_list = copy(db_vdur)
5486 count_index = 0
5487 for index, vdu in enumerate(vdur_list):
5488 vca_scaling_info.append(
5489 {
5490 "osm_vdu_id": vdu["vdu-id-ref"],
5491 "member-vnf-index": member_vnf_index,
5492 "type": "delete",
5493 "vdu_index": count_index,
5494 }
5495 )
5496 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5497 scaling_info["vdu"].append(
5498 {
5499 "name": vdu.get("name") or vdu.get("vdu-name"),
5500 "vdu_id": vdu["vdu-id-ref"],
5501 "interface": [],
5502 }
5503 )
5504 for interface in vdu["interfaces"]:
5505 scaling_info["vdu"][index]["interface"].append(
5506 {
5507 "name": interface["name"],
5508 "ip_address": interface["ip-address"],
5509 "mac_address": interface.get("mac-address"),
5510 }
5511 )
5512 self.logger.info("NS update scaling info{}".format(scaling_info))
5513 stage[2] = "Terminating VDUs"
5514 if scaling_info.get("vdu-delete"):
5515 # scale_process = "RO"
5516 if self.ro_config.get("ng"):
5517 await self._scale_ng_ro(
5518 logging_text,
5519 db_nsr,
5520 update_db_nslcmops,
5521 db_vnfr,
5522 scaling_info,
5523 stage,
5524 )
5525
5526 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5527 """This method is to Remove VNF instances from NS.
5528
5529 Args:
5530 nsr_id: NS instance id
5531 nslcmop_id: nslcmop id of update
5532 vnf_instance_id: id of the VNF instance to be removed
5533
5534 Returns:
5535 result: (str, str) COMPLETED/FAILED, details
5536 """
5537 try:
5538 db_nsr_update = {}
5539 logging_text = "Task ns={} update ".format(nsr_id)
5540 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5541 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5542 if check_vnfr_count > 1:
5543 stage = ["", "", ""]
5544 step = "Getting nslcmop from database"
5545 self.logger.debug(
5546 step + " after having waited for previous tasks to be completed"
5547 )
5548 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5549 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5550 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5551 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5552 """ db_vnfr = self.db.get_one(
5553 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5554
5555 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5556 await self.terminate_vdus(
5557 db_vnfr,
5558 member_vnf_index,
5559 db_nsr,
5560 update_db_nslcmops,
5561 stage,
5562 logging_text,
5563 )
5564
5565 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5566 constituent_vnfr.remove(db_vnfr.get("_id"))
5567 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5568 "constituent-vnfr-ref"
5569 )
5570 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5571 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5572 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5573 return "COMPLETED", "Done"
5574 else:
5575 step = "Terminate VNF Failed with"
5576 raise LcmException(
5577 "{} Cannot terminate the last VNF in this NS.".format(
5578 vnf_instance_id
5579 )
5580 )
5581 except (LcmException, asyncio.CancelledError):
5582 raise
5583 except Exception as e:
5584 self.logger.debug("Error removing VNF {}".format(e))
5585 return "FAILED", "Error removing VNF {}".format(e)
5586
5587 async def _ns_redeploy_vnf(
5588 self,
5589 nsr_id,
5590 nslcmop_id,
5591 db_vnfd,
5592 db_vnfr,
5593 db_nsr,
5594 ):
5595 """This method updates and redeploys VNF instances
5596
5597 Args:
5598 nsr_id: NS instance id
5599 nslcmop_id: nslcmop id
5600 db_vnfd: VNF descriptor
5601 db_vnfr: VNF instance record
5602 db_nsr: NS instance record
5603
5604 Returns:
5605 result: (str, str) COMPLETED/FAILED, details
5606 """
5607 try:
5608 count_index = 0
5609 stage = ["", "", ""]
5610 logging_text = "Task ns={} update ".format(nsr_id)
5611 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5612 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5613
5614 # Terminate old VNF resources
5615 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5616 await self.terminate_vdus(
5617 db_vnfr,
5618 member_vnf_index,
5619 db_nsr,
5620 update_db_nslcmops,
5621 stage,
5622 logging_text,
5623 )
5624
5625 # old_vnfd_id = db_vnfr["vnfd-id"]
5626 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5627 new_db_vnfd = db_vnfd
5628 # new_vnfd_ref = new_db_vnfd["id"]
5629 # new_vnfd_id = vnfd_id
5630
5631 # Create VDUR
5632 new_vnfr_cp = []
5633 for cp in new_db_vnfd.get("ext-cpd", ()):
5634 vnf_cp = {
5635 "name": cp.get("id"),
5636 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5637 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5638 "id": cp.get("id"),
5639 }
5640 new_vnfr_cp.append(vnf_cp)
5641 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5642 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5643 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5644 new_vnfr_update = {
5645 "revision": latest_vnfd_revision,
5646 "connection-point": new_vnfr_cp,
5647 "vdur": new_vdur,
5648 "ip-address": "",
5649 }
5650 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5651 updated_db_vnfr = self.db.get_one(
5652 "vnfrs",
5653 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5654 )
5655
5656 # Instantiate new VNF resources
5657 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5658 vca_scaling_info = []
5659 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5660 scaling_info["scaling_direction"] = "OUT"
5661 scaling_info["vdu-create"] = {}
5662 scaling_info["kdu-create"] = {}
5663 vdud_instantiate_list = db_vnfd["vdu"]
5664 for index, vdud in enumerate(vdud_instantiate_list):
5665 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5666 if cloud_init_text:
5667 additional_params = (
5668 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5669 or {}
5670 )
5671 cloud_init_list = []
5672 if cloud_init_text:
5673 # TODO Information of its own ip is not available because db_vnfr is not updated.
5674 additional_params["OSM"] = get_osm_params(
5675 updated_db_vnfr, vdud["id"], 1
5676 )
5677 cloud_init_list.append(
5678 self._parse_cloud_init(
5679 cloud_init_text,
5680 additional_params,
5681 db_vnfd["id"],
5682 vdud["id"],
5683 )
5684 )
5685 vca_scaling_info.append(
5686 {
5687 "osm_vdu_id": vdud["id"],
5688 "member-vnf-index": member_vnf_index,
5689 "type": "create",
5690 "vdu_index": count_index,
5691 }
5692 )
5693 scaling_info["vdu-create"][vdud["id"]] = count_index
5694 if self.ro_config.get("ng"):
5695 self.logger.debug(
5696 "New Resources to be deployed: {}".format(scaling_info)
5697 )
5698 await self._scale_ng_ro(
5699 logging_text,
5700 db_nsr,
5701 update_db_nslcmops,
5702 updated_db_vnfr,
5703 scaling_info,
5704 stage,
5705 )
5706 return "COMPLETED", "Done"
5707 except (LcmException, asyncio.CancelledError):
5708 raise
5709 except Exception as e:
5710 self.logger.debug("Error updating VNF {}".format(e))
5711 return "FAILED", "Error updating VNF {}".format(e)
5712
5713 async def _ns_charm_upgrade(
5714 self,
5715 ee_id,
5716 charm_id,
5717 charm_type,
5718 path,
5719 timeout: float = None,
5720 ) -> (str, str):
5721 """This method upgrade charms in VNF instances
5722
5723 Args:
5724 ee_id: Execution environment id
5725 path: Local path to the charm
5726 charm_id: charm-id
5727 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5728 timeout: (Float) Timeout for the ns update operation
5729
5730 Returns:
5731 result: (str, str) COMPLETED/FAILED, details
5732 """
5733 try:
5734 charm_type = charm_type or "lxc_proxy_charm"
5735 output = await self.vca_map[charm_type].upgrade_charm(
5736 ee_id=ee_id,
5737 path=path,
5738 charm_id=charm_id,
5739 charm_type=charm_type,
5740 timeout=timeout or self.timeout_ns_update,
5741 )
5742
5743 if output:
5744 return "COMPLETED", output
5745
5746 except (LcmException, asyncio.CancelledError):
5747 raise
5748
5749 except Exception as e:
5750
5751 self.logger.debug("Error upgrading charm {}".format(path))
5752
5753 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5754
5755 async def update(self, nsr_id, nslcmop_id):
5756 """Update NS according to different update types
5757
5758 This method performs upgrade of VNF instances then updates the revision
5759 number in VNF record
5760
5761 Args:
5762 nsr_id: Network service will be updated
5763 nslcmop_id: ns lcm operation id
5764
5765 Returns:
5766 It may raise DbException, LcmException, N2VCException, K8sException
5767
5768 """
5769 # Try to lock HA task here
5770 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5771 if not task_is_locked_by_me:
5772 return
5773
5774 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5775 self.logger.debug(logging_text + "Enter")
5776
5777 # Set the required variables to be filled up later
5778 db_nsr = None
5779 db_nslcmop_update = {}
5780 vnfr_update = {}
5781 nslcmop_operation_state = None
5782 db_nsr_update = {}
5783 error_description_nslcmop = ""
5784 exc = None
5785 change_type = "updated"
5786 detailed_status = ""
5787
5788 try:
5789 # wait for any previous tasks in process
5790 step = "Waiting for previous operations to terminate"
5791 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5792 self._write_ns_status(
5793 nsr_id=nsr_id,
5794 ns_state=None,
5795 current_operation="UPDATING",
5796 current_operation_id=nslcmop_id,
5797 )
5798
5799 step = "Getting nslcmop from database"
5800 db_nslcmop = self.db.get_one(
5801 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5802 )
5803 update_type = db_nslcmop["operationParams"]["updateType"]
5804
5805 step = "Getting nsr from database"
5806 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5807 old_operational_status = db_nsr["operational-status"]
5808 db_nsr_update["operational-status"] = "updating"
5809 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5810 nsr_deployed = db_nsr["_admin"].get("deployed")
5811
5812 if update_type == "CHANGE_VNFPKG":
5813
5814 # Get the input parameters given through update request
5815 vnf_instance_id = db_nslcmop["operationParams"][
5816 "changeVnfPackageData"
5817 ].get("vnfInstanceId")
5818
5819 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5820 "vnfdId"
5821 )
5822 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5823
5824 step = "Getting vnfr from database"
5825 db_vnfr = self.db.get_one(
5826 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5827 )
5828
5829 step = "Getting vnfds from database"
5830 # Latest VNFD
5831 latest_vnfd = self.db.get_one(
5832 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5833 )
5834 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5835
5836 # Current VNFD
5837 current_vnf_revision = db_vnfr.get("revision", 1)
5838 current_vnfd = self.db.get_one(
5839 "vnfds_revisions",
5840 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5841 fail_on_empty=False,
5842 )
5843 # Charm artifact paths will be filled up later
5844 (
5845 current_charm_artifact_path,
5846 target_charm_artifact_path,
5847 charm_artifact_paths,
5848 helm_artifacts,
5849 ) = ([], [], [], [])
5850
5851 step = "Checking if revision has changed in VNFD"
5852 if current_vnf_revision != latest_vnfd_revision:
5853
5854 change_type = "policy_updated"
5855
5856 # There is new revision of VNFD, update operation is required
5857 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5858 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5859
5860 step = "Removing the VNFD packages if they exist in the local path"
5861 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5862 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5863
5864 step = "Get the VNFD packages from FSMongo"
5865 self.fs.sync(from_path=latest_vnfd_path)
5866 self.fs.sync(from_path=current_vnfd_path)
5867
5868 step = (
5869 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5870 )
5871 current_base_folder = current_vnfd["_admin"]["storage"]
5872 latest_base_folder = latest_vnfd["_admin"]["storage"]
5873
5874 for vca_index, vca_deployed in enumerate(
5875 get_iterable(nsr_deployed, "VCA")
5876 ):
5877 vnf_index = db_vnfr.get("member-vnf-index-ref")
5878
5879 # Getting charm-id and charm-type
5880 if vca_deployed.get("member-vnf-index") == vnf_index:
5881 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5882 vca_type = vca_deployed.get("type")
5883 vdu_count_index = vca_deployed.get("vdu_count_index")
5884
5885 # Getting ee-id
5886 ee_id = vca_deployed.get("ee_id")
5887
5888 step = "Getting descriptor config"
5889 descriptor_config = get_configuration(
5890 current_vnfd, current_vnfd["id"]
5891 )
5892
5893 if "execution-environment-list" in descriptor_config:
5894 ee_list = descriptor_config.get(
5895 "execution-environment-list", []
5896 )
5897 else:
5898 ee_list = []
5899
5900 # There could be several charm used in the same VNF
5901 for ee_item in ee_list:
5902 if ee_item.get("juju"):
5903
5904 step = "Getting charm name"
5905 charm_name = ee_item["juju"].get("charm")
5906
5907 step = "Setting Charm artifact paths"
5908 current_charm_artifact_path.append(
5909 get_charm_artifact_path(
5910 current_base_folder,
5911 charm_name,
5912 vca_type,
5913 current_vnf_revision,
5914 )
5915 )
5916 target_charm_artifact_path.append(
5917 get_charm_artifact_path(
5918 latest_base_folder,
5919 charm_name,
5920 vca_type,
5921 latest_vnfd_revision,
5922 )
5923 )
5924 elif ee_item.get("helm-chart"):
5925 # add chart to list and all parameters
5926 step = "Getting helm chart name"
5927 chart_name = ee_item.get("helm-chart")
5928 if (
5929 ee_item.get("helm-version")
5930 and ee_item.get("helm-version") == "v2"
5931 ):
5932 vca_type = "helm"
5933 else:
5934 vca_type = "helm-v3"
5935 step = "Setting Helm chart artifact paths"
5936
5937 helm_artifacts.append(
5938 {
5939 "current_artifact_path": get_charm_artifact_path(
5940 current_base_folder,
5941 chart_name,
5942 vca_type,
5943 current_vnf_revision,
5944 ),
5945 "target_artifact_path": get_charm_artifact_path(
5946 latest_base_folder,
5947 chart_name,
5948 vca_type,
5949 latest_vnfd_revision,
5950 ),
5951 "ee_id": ee_id,
5952 "vca_index": vca_index,
5953 "vdu_index": vdu_count_index,
5954 }
5955 )
5956
5957 charm_artifact_paths = zip(
5958 current_charm_artifact_path, target_charm_artifact_path
5959 )
5960
5961 step = "Checking if software version has changed in VNFD"
5962 if find_software_version(current_vnfd) != find_software_version(
5963 latest_vnfd
5964 ):
5965
5966 step = "Checking if existing VNF has charm"
5967 for current_charm_path, target_charm_path in list(
5968 charm_artifact_paths
5969 ):
5970 if current_charm_path:
5971 raise LcmException(
5972 "Software version change is not supported as VNF instance {} has charm.".format(
5973 vnf_instance_id
5974 )
5975 )
5976
5977 # There is no change in the charm package, then redeploy the VNF
5978 # based on new descriptor
5979 step = "Redeploying VNF"
5980 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5981 (result, detailed_status) = await self._ns_redeploy_vnf(
5982 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5983 )
5984 if result == "FAILED":
5985 nslcmop_operation_state = result
5986 error_description_nslcmop = detailed_status
5987 db_nslcmop_update["detailed-status"] = detailed_status
5988 self.logger.debug(
5989 logging_text
5990 + " step {} Done with result {} {}".format(
5991 step, nslcmop_operation_state, detailed_status
5992 )
5993 )
5994
5995 else:
5996 step = "Checking if any charm package has changed or not"
5997 for current_charm_path, target_charm_path in list(
5998 charm_artifact_paths
5999 ):
6000 if (
6001 current_charm_path
6002 and target_charm_path
6003 and self.check_charm_hash_changed(
6004 current_charm_path, target_charm_path
6005 )
6006 ):
6007
6008 step = "Checking whether VNF uses juju bundle"
6009 if check_juju_bundle_existence(current_vnfd):
6010
6011 raise LcmException(
6012 "Charm upgrade is not supported for the instance which"
6013 " uses juju-bundle: {}".format(
6014 check_juju_bundle_existence(current_vnfd)
6015 )
6016 )
6017
6018 step = "Upgrading Charm"
6019 (
6020 result,
6021 detailed_status,
6022 ) = await self._ns_charm_upgrade(
6023 ee_id=ee_id,
6024 charm_id=vca_id,
6025 charm_type=vca_type,
6026 path=self.fs.path + target_charm_path,
6027 timeout=timeout_seconds,
6028 )
6029
6030 if result == "FAILED":
6031 nslcmop_operation_state = result
6032 error_description_nslcmop = detailed_status
6033
6034 db_nslcmop_update["detailed-status"] = detailed_status
6035 self.logger.debug(
6036 logging_text
6037 + " step {} Done with result {} {}".format(
6038 step, nslcmop_operation_state, detailed_status
6039 )
6040 )
6041
6042 step = "Updating policies"
6043 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6044 result = "COMPLETED"
6045 detailed_status = "Done"
6046 db_nslcmop_update["detailed-status"] = "Done"
6047
6048 # helm base EE
6049 for item in helm_artifacts:
6050 if not (
6051 item["current_artifact_path"]
6052 and item["target_artifact_path"]
6053 and self.check_charm_hash_changed(
6054 item["current_artifact_path"],
6055 item["target_artifact_path"],
6056 )
6057 ):
6058 continue
6059 db_update_entry = "_admin.deployed.VCA.{}.".format(
6060 item["vca_index"]
6061 )
6062 vnfr_id = db_vnfr["_id"]
6063 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6064 db_dict = {
6065 "collection": "nsrs",
6066 "filter": {"_id": nsr_id},
6067 "path": db_update_entry,
6068 }
6069 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6070 await self.vca_map[vca_type].upgrade_execution_environment(
6071 namespace=namespace,
6072 helm_id=helm_id,
6073 db_dict=db_dict,
6074 config=osm_config,
6075 artifact_path=item["target_artifact_path"],
6076 vca_type=vca_type,
6077 )
6078 vnf_id = db_vnfr.get("vnfd-ref")
6079 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6080 self.logger.debug("get ssh key block")
6081 rw_mgmt_ip = None
6082 if deep_get(
6083 config_descriptor,
6084 ("config-access", "ssh-access", "required"),
6085 ):
6086 # Needed to inject a ssh key
6087 user = deep_get(
6088 config_descriptor,
6089 ("config-access", "ssh-access", "default-user"),
6090 )
6091 step = (
6092 "Install configuration Software, getting public ssh key"
6093 )
6094 pub_key = await self.vca_map[
6095 vca_type
6096 ].get_ee_ssh_public__key(
6097 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6098 )
6099
6100 step = (
6101 "Insert public key into VM user={} ssh_key={}".format(
6102 user, pub_key
6103 )
6104 )
6105 self.logger.debug(logging_text + step)
6106
6107 # wait for RO (ip-address) Insert pub_key into VM
6108 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6109 logging_text,
6110 nsr_id,
6111 vnfr_id,
6112 None,
6113 item["vdu_index"],
6114 user=user,
6115 pub_key=pub_key,
6116 )
6117
6118 initial_config_primitive_list = config_descriptor.get(
6119 "initial-config-primitive"
6120 )
6121 config_primitive = next(
6122 (
6123 p
6124 for p in initial_config_primitive_list
6125 if p["name"] == "config"
6126 ),
6127 None,
6128 )
6129 if not config_primitive:
6130 continue
6131
6132 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6133 if rw_mgmt_ip:
6134 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6135 if db_vnfr.get("additionalParamsForVnf"):
6136 deploy_params.update(
6137 parse_yaml_strings(
6138 db_vnfr["additionalParamsForVnf"].copy()
6139 )
6140 )
6141 primitive_params_ = self._map_primitive_params(
6142 config_primitive, {}, deploy_params
6143 )
6144
6145 step = "execute primitive '{}' params '{}'".format(
6146 config_primitive["name"], primitive_params_
6147 )
6148 self.logger.debug(logging_text + step)
6149 await self.vca_map[vca_type].exec_primitive(
6150 ee_id=ee_id,
6151 primitive_name=config_primitive["name"],
6152 params_dict=primitive_params_,
6153 db_dict=db_dict,
6154 vca_id=vca_id,
6155 vca_type=vca_type,
6156 )
6157
6158 step = "Updating policies"
6159 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6160 detailed_status = "Done"
6161 db_nslcmop_update["detailed-status"] = "Done"
6162
6163 # If nslcmop_operation_state is None, so any operation is not failed.
6164 if not nslcmop_operation_state:
6165 nslcmop_operation_state = "COMPLETED"
6166
6167 # If update CHANGE_VNFPKG nslcmop_operation is successful
6168 # vnf revision need to be updated
6169 vnfr_update["revision"] = latest_vnfd_revision
6170 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6171
6172 self.logger.debug(
6173 logging_text
6174 + " task Done with result {} {}".format(
6175 nslcmop_operation_state, detailed_status
6176 )
6177 )
6178 elif update_type == "REMOVE_VNF":
6179 # This part is included in https://osm.etsi.org/gerrit/11876
6180 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6181 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6182 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6183 step = "Removing VNF"
6184 (result, detailed_status) = await self.remove_vnf(
6185 nsr_id, nslcmop_id, vnf_instance_id
6186 )
6187 if result == "FAILED":
6188 nslcmop_operation_state = result
6189 error_description_nslcmop = detailed_status
6190 db_nslcmop_update["detailed-status"] = detailed_status
6191 change_type = "vnf_terminated"
6192 if not nslcmop_operation_state:
6193 nslcmop_operation_state = "COMPLETED"
6194 self.logger.debug(
6195 logging_text
6196 + " task Done with result {} {}".format(
6197 nslcmop_operation_state, detailed_status
6198 )
6199 )
6200
6201 elif update_type == "OPERATE_VNF":
6202 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6203 "vnfInstanceId"
6204 ]
6205 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6206 "changeStateTo"
6207 ]
6208 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6209 "additionalParam"
6210 ]
6211 (result, detailed_status) = await self.rebuild_start_stop(
6212 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6213 )
6214 if result == "FAILED":
6215 nslcmop_operation_state = result
6216 error_description_nslcmop = detailed_status
6217 db_nslcmop_update["detailed-status"] = detailed_status
6218 if not nslcmop_operation_state:
6219 nslcmop_operation_state = "COMPLETED"
6220 self.logger.debug(
6221 logging_text
6222 + " task Done with result {} {}".format(
6223 nslcmop_operation_state, detailed_status
6224 )
6225 )
6226
6227 # If nslcmop_operation_state is None, so any operation is not failed.
6228 # All operations are executed in overall.
6229 if not nslcmop_operation_state:
6230 nslcmop_operation_state = "COMPLETED"
6231 db_nsr_update["operational-status"] = old_operational_status
6232
6233 except (DbException, LcmException, N2VCException, K8sException) as e:
6234 self.logger.error(logging_text + "Exit Exception {}".format(e))
6235 exc = e
6236 except asyncio.CancelledError:
6237 self.logger.error(
6238 logging_text + "Cancelled Exception while '{}'".format(step)
6239 )
6240 exc = "Operation was cancelled"
6241 except asyncio.TimeoutError:
6242 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6243 exc = "Timeout"
6244 except Exception as e:
6245 exc = traceback.format_exc()
6246 self.logger.critical(
6247 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6248 exc_info=True,
6249 )
6250 finally:
6251 if exc:
6252 db_nslcmop_update[
6253 "detailed-status"
6254 ] = (
6255 detailed_status
6256 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6257 nslcmop_operation_state = "FAILED"
6258 db_nsr_update["operational-status"] = old_operational_status
6259 if db_nsr:
6260 self._write_ns_status(
6261 nsr_id=nsr_id,
6262 ns_state=db_nsr["nsState"],
6263 current_operation="IDLE",
6264 current_operation_id=None,
6265 other_update=db_nsr_update,
6266 )
6267
6268 self._write_op_status(
6269 op_id=nslcmop_id,
6270 stage="",
6271 error_message=error_description_nslcmop,
6272 operation_state=nslcmop_operation_state,
6273 other_update=db_nslcmop_update,
6274 )
6275
6276 if nslcmop_operation_state:
6277 try:
6278 msg = {
6279 "nsr_id": nsr_id,
6280 "nslcmop_id": nslcmop_id,
6281 "operationState": nslcmop_operation_state,
6282 }
6283 if change_type in ("vnf_terminated", "policy_updated"):
6284 msg.update({"vnf_member_index": member_vnf_index})
6285 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6286 except Exception as e:
6287 self.logger.error(
6288 logging_text + "kafka_write notification Exception {}".format(e)
6289 )
6290 self.logger.debug(logging_text + "Exit")
6291 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6292 return nslcmop_operation_state, detailed_status
6293
6294 async def scale(self, nsr_id, nslcmop_id):
6295 # Try to lock HA task here
6296 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6297 if not task_is_locked_by_me:
6298 return
6299
6300 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6301 stage = ["", "", ""]
6302 tasks_dict_info = {}
6303 # ^ stage, step, VIM progress
6304 self.logger.debug(logging_text + "Enter")
6305 # get all needed from database
6306 db_nsr = None
6307 db_nslcmop_update = {}
6308 db_nsr_update = {}
6309 exc = None
6310 # in case of error, indicates what part of scale was failed to put nsr at error status
6311 scale_process = None
6312 old_operational_status = ""
6313 old_config_status = ""
6314 nsi_id = None
6315 try:
6316 # wait for any previous tasks in process
6317 step = "Waiting for previous operations to terminate"
6318 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6319 self._write_ns_status(
6320 nsr_id=nsr_id,
6321 ns_state=None,
6322 current_operation="SCALING",
6323 current_operation_id=nslcmop_id,
6324 )
6325
6326 step = "Getting nslcmop from database"
6327 self.logger.debug(
6328 step + " after having waited for previous tasks to be completed"
6329 )
6330 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6331
6332 step = "Getting nsr from database"
6333 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6334 old_operational_status = db_nsr["operational-status"]
6335 old_config_status = db_nsr["config-status"]
6336
6337 step = "Parsing scaling parameters"
6338 db_nsr_update["operational-status"] = "scaling"
6339 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6340 nsr_deployed = db_nsr["_admin"].get("deployed")
6341
6342 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6343 "scaleByStepData"
6344 ]["member-vnf-index"]
6345 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6346 "scaleByStepData"
6347 ]["scaling-group-descriptor"]
6348 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6349 # for backward compatibility
6350 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6351 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6352 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6353 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6354
6355 step = "Getting vnfr from database"
6356 db_vnfr = self.db.get_one(
6357 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6358 )
6359
6360 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6361
6362 step = "Getting vnfd from database"
6363 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6364
6365 base_folder = db_vnfd["_admin"]["storage"]
6366
6367 step = "Getting scaling-group-descriptor"
6368 scaling_descriptor = find_in_list(
6369 get_scaling_aspect(db_vnfd),
6370 lambda scale_desc: scale_desc["name"] == scaling_group,
6371 )
6372 if not scaling_descriptor:
6373 raise LcmException(
6374 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6375 "at vnfd:scaling-group-descriptor".format(scaling_group)
6376 )
6377
6378 step = "Sending scale order to VIM"
6379 # TODO check if ns is in a proper status
6380 nb_scale_op = 0
6381 if not db_nsr["_admin"].get("scaling-group"):
6382 self.update_db_2(
6383 "nsrs",
6384 nsr_id,
6385 {
6386 "_admin.scaling-group": [
6387 {"name": scaling_group, "nb-scale-op": 0}
6388 ]
6389 },
6390 )
6391 admin_scale_index = 0
6392 else:
6393 for admin_scale_index, admin_scale_info in enumerate(
6394 db_nsr["_admin"]["scaling-group"]
6395 ):
6396 if admin_scale_info["name"] == scaling_group:
6397 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6398 break
6399 else: # not found, set index one plus last element and add new entry with the name
6400 admin_scale_index += 1
6401 db_nsr_update[
6402 "_admin.scaling-group.{}.name".format(admin_scale_index)
6403 ] = scaling_group
6404
6405 vca_scaling_info = []
6406 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6407 if scaling_type == "SCALE_OUT":
6408 if "aspect-delta-details" not in scaling_descriptor:
6409 raise LcmException(
6410 "Aspect delta details not fount in scaling descriptor {}".format(
6411 scaling_descriptor["name"]
6412 )
6413 )
6414 # count if max-instance-count is reached
6415 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6416
6417 scaling_info["scaling_direction"] = "OUT"
6418 scaling_info["vdu-create"] = {}
6419 scaling_info["kdu-create"] = {}
6420 for delta in deltas:
6421 for vdu_delta in delta.get("vdu-delta", {}):
6422 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6423 # vdu_index also provides the number of instance of the targeted vdu
6424 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6425 cloud_init_text = self._get_vdu_cloud_init_content(
6426 vdud, db_vnfd
6427 )
6428 if cloud_init_text:
6429 additional_params = (
6430 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6431 or {}
6432 )
6433 cloud_init_list = []
6434
6435 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6436 max_instance_count = 10
6437 if vdu_profile and "max-number-of-instances" in vdu_profile:
6438 max_instance_count = vdu_profile.get(
6439 "max-number-of-instances", 10
6440 )
6441
6442 default_instance_num = get_number_of_instances(
6443 db_vnfd, vdud["id"]
6444 )
6445 instances_number = vdu_delta.get("number-of-instances", 1)
6446 nb_scale_op += instances_number
6447
6448 new_instance_count = nb_scale_op + default_instance_num
6449 # Control if new count is over max and vdu count is less than max.
6450 # Then assign new instance count
6451 if new_instance_count > max_instance_count > vdu_count:
6452 instances_number = new_instance_count - max_instance_count
6453 else:
6454 instances_number = instances_number
6455
6456 if new_instance_count > max_instance_count:
6457 raise LcmException(
6458 "reached the limit of {} (max-instance-count) "
6459 "scaling-out operations for the "
6460 "scaling-group-descriptor '{}'".format(
6461 nb_scale_op, scaling_group
6462 )
6463 )
6464 for x in range(vdu_delta.get("number-of-instances", 1)):
6465 if cloud_init_text:
6466 # TODO Information of its own ip is not available because db_vnfr is not updated.
6467 additional_params["OSM"] = get_osm_params(
6468 db_vnfr, vdu_delta["id"], vdu_index + x
6469 )
6470 cloud_init_list.append(
6471 self._parse_cloud_init(
6472 cloud_init_text,
6473 additional_params,
6474 db_vnfd["id"],
6475 vdud["id"],
6476 )
6477 )
6478 vca_scaling_info.append(
6479 {
6480 "osm_vdu_id": vdu_delta["id"],
6481 "member-vnf-index": vnf_index,
6482 "type": "create",
6483 "vdu_index": vdu_index + x,
6484 }
6485 )
6486 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6487 for kdu_delta in delta.get("kdu-resource-delta", {}):
6488 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6489 kdu_name = kdu_profile["kdu-name"]
6490 resource_name = kdu_profile.get("resource-name", "")
6491
6492 # Might have different kdus in the same delta
6493 # Should have list for each kdu
6494 if not scaling_info["kdu-create"].get(kdu_name, None):
6495 scaling_info["kdu-create"][kdu_name] = []
6496
6497 kdur = get_kdur(db_vnfr, kdu_name)
6498 if kdur.get("helm-chart"):
6499 k8s_cluster_type = "helm-chart-v3"
6500 self.logger.debug("kdur: {}".format(kdur))
6501 if (
6502 kdur.get("helm-version")
6503 and kdur.get("helm-version") == "v2"
6504 ):
6505 k8s_cluster_type = "helm-chart"
6506 elif kdur.get("juju-bundle"):
6507 k8s_cluster_type = "juju-bundle"
6508 else:
6509 raise LcmException(
6510 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6511 "juju-bundle. Maybe an old NBI version is running".format(
6512 db_vnfr["member-vnf-index-ref"], kdu_name
6513 )
6514 )
6515
6516 max_instance_count = 10
6517 if kdu_profile and "max-number-of-instances" in kdu_profile:
6518 max_instance_count = kdu_profile.get(
6519 "max-number-of-instances", 10
6520 )
6521
6522 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6523 deployed_kdu, _ = get_deployed_kdu(
6524 nsr_deployed, kdu_name, vnf_index
6525 )
6526 if deployed_kdu is None:
6527 raise LcmException(
6528 "KDU '{}' for vnf '{}' not deployed".format(
6529 kdu_name, vnf_index
6530 )
6531 )
6532 kdu_instance = deployed_kdu.get("kdu-instance")
6533 instance_num = await self.k8scluster_map[
6534 k8s_cluster_type
6535 ].get_scale_count(
6536 resource_name,
6537 kdu_instance,
6538 vca_id=vca_id,
6539 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6540 kdu_model=deployed_kdu.get("kdu-model"),
6541 )
6542 kdu_replica_count = instance_num + kdu_delta.get(
6543 "number-of-instances", 1
6544 )
6545
6546 # Control if new count is over max and instance_num is less than max.
6547 # Then assign max instance number to kdu replica count
6548 if kdu_replica_count > max_instance_count > instance_num:
6549 kdu_replica_count = max_instance_count
6550 if kdu_replica_count > max_instance_count:
6551 raise LcmException(
6552 "reached the limit of {} (max-instance-count) "
6553 "scaling-out operations for the "
6554 "scaling-group-descriptor '{}'".format(
6555 instance_num, scaling_group
6556 )
6557 )
6558
6559 for x in range(kdu_delta.get("number-of-instances", 1)):
6560 vca_scaling_info.append(
6561 {
6562 "osm_kdu_id": kdu_name,
6563 "member-vnf-index": vnf_index,
6564 "type": "create",
6565 "kdu_index": instance_num + x - 1,
6566 }
6567 )
6568 scaling_info["kdu-create"][kdu_name].append(
6569 {
6570 "member-vnf-index": vnf_index,
6571 "type": "create",
6572 "k8s-cluster-type": k8s_cluster_type,
6573 "resource-name": resource_name,
6574 "scale": kdu_replica_count,
6575 }
6576 )
6577 elif scaling_type == "SCALE_IN":
6578 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6579
6580 scaling_info["scaling_direction"] = "IN"
6581 scaling_info["vdu-delete"] = {}
6582 scaling_info["kdu-delete"] = {}
6583
6584 for delta in deltas:
6585 for vdu_delta in delta.get("vdu-delta", {}):
6586 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6587 min_instance_count = 0
6588 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6589 if vdu_profile and "min-number-of-instances" in vdu_profile:
6590 min_instance_count = vdu_profile["min-number-of-instances"]
6591
6592 default_instance_num = get_number_of_instances(
6593 db_vnfd, vdu_delta["id"]
6594 )
6595 instance_num = vdu_delta.get("number-of-instances", 1)
6596 nb_scale_op -= instance_num
6597
6598 new_instance_count = nb_scale_op + default_instance_num
6599
6600 if new_instance_count < min_instance_count < vdu_count:
6601 instances_number = min_instance_count - new_instance_count
6602 else:
6603 instances_number = instance_num
6604
6605 if new_instance_count < min_instance_count:
6606 raise LcmException(
6607 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6608 "scaling-group-descriptor '{}'".format(
6609 nb_scale_op, scaling_group
6610 )
6611 )
6612 for x in range(vdu_delta.get("number-of-instances", 1)):
6613 vca_scaling_info.append(
6614 {
6615 "osm_vdu_id": vdu_delta["id"],
6616 "member-vnf-index": vnf_index,
6617 "type": "delete",
6618 "vdu_index": vdu_index - 1 - x,
6619 }
6620 )
6621 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6622 for kdu_delta in delta.get("kdu-resource-delta", {}):
6623 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6624 kdu_name = kdu_profile["kdu-name"]
6625 resource_name = kdu_profile.get("resource-name", "")
6626
6627 if not scaling_info["kdu-delete"].get(kdu_name, None):
6628 scaling_info["kdu-delete"][kdu_name] = []
6629
6630 kdur = get_kdur(db_vnfr, kdu_name)
6631 if kdur.get("helm-chart"):
6632 k8s_cluster_type = "helm-chart-v3"
6633 self.logger.debug("kdur: {}".format(kdur))
6634 if (
6635 kdur.get("helm-version")
6636 and kdur.get("helm-version") == "v2"
6637 ):
6638 k8s_cluster_type = "helm-chart"
6639 elif kdur.get("juju-bundle"):
6640 k8s_cluster_type = "juju-bundle"
6641 else:
6642 raise LcmException(
6643 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6644 "juju-bundle. Maybe an old NBI version is running".format(
6645 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6646 )
6647 )
6648
6649 min_instance_count = 0
6650 if kdu_profile and "min-number-of-instances" in kdu_profile:
6651 min_instance_count = kdu_profile["min-number-of-instances"]
6652
6653 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6654 deployed_kdu, _ = get_deployed_kdu(
6655 nsr_deployed, kdu_name, vnf_index
6656 )
6657 if deployed_kdu is None:
6658 raise LcmException(
6659 "KDU '{}' for vnf '{}' not deployed".format(
6660 kdu_name, vnf_index
6661 )
6662 )
6663 kdu_instance = deployed_kdu.get("kdu-instance")
6664 instance_num = await self.k8scluster_map[
6665 k8s_cluster_type
6666 ].get_scale_count(
6667 resource_name,
6668 kdu_instance,
6669 vca_id=vca_id,
6670 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6671 kdu_model=deployed_kdu.get("kdu-model"),
6672 )
6673 kdu_replica_count = instance_num - kdu_delta.get(
6674 "number-of-instances", 1
6675 )
6676
6677 if kdu_replica_count < min_instance_count < instance_num:
6678 kdu_replica_count = min_instance_count
6679 if kdu_replica_count < min_instance_count:
6680 raise LcmException(
6681 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6682 "scaling-group-descriptor '{}'".format(
6683 instance_num, scaling_group
6684 )
6685 )
6686
6687 for x in range(kdu_delta.get("number-of-instances", 1)):
6688 vca_scaling_info.append(
6689 {
6690 "osm_kdu_id": kdu_name,
6691 "member-vnf-index": vnf_index,
6692 "type": "delete",
6693 "kdu_index": instance_num - x - 1,
6694 }
6695 )
6696 scaling_info["kdu-delete"][kdu_name].append(
6697 {
6698 "member-vnf-index": vnf_index,
6699 "type": "delete",
6700 "k8s-cluster-type": k8s_cluster_type,
6701 "resource-name": resource_name,
6702 "scale": kdu_replica_count,
6703 }
6704 )
6705
6706 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6707 vdu_delete = copy(scaling_info.get("vdu-delete"))
6708 if scaling_info["scaling_direction"] == "IN":
6709 for vdur in reversed(db_vnfr["vdur"]):
6710 if vdu_delete.get(vdur["vdu-id-ref"]):
6711 vdu_delete[vdur["vdu-id-ref"]] -= 1
6712 scaling_info["vdu"].append(
6713 {
6714 "name": vdur.get("name") or vdur.get("vdu-name"),
6715 "vdu_id": vdur["vdu-id-ref"],
6716 "interface": [],
6717 }
6718 )
6719 for interface in vdur["interfaces"]:
6720 scaling_info["vdu"][-1]["interface"].append(
6721 {
6722 "name": interface["name"],
6723 "ip_address": interface["ip-address"],
6724 "mac_address": interface.get("mac-address"),
6725 }
6726 )
6727 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6728
6729 # PRE-SCALE BEGIN
6730 step = "Executing pre-scale vnf-config-primitive"
6731 if scaling_descriptor.get("scaling-config-action"):
6732 for scaling_config_action in scaling_descriptor[
6733 "scaling-config-action"
6734 ]:
6735 if (
6736 scaling_config_action.get("trigger") == "pre-scale-in"
6737 and scaling_type == "SCALE_IN"
6738 ) or (
6739 scaling_config_action.get("trigger") == "pre-scale-out"
6740 and scaling_type == "SCALE_OUT"
6741 ):
6742 vnf_config_primitive = scaling_config_action[
6743 "vnf-config-primitive-name-ref"
6744 ]
6745 step = db_nslcmop_update[
6746 "detailed-status"
6747 ] = "executing pre-scale scaling-config-action '{}'".format(
6748 vnf_config_primitive
6749 )
6750
6751 # look for primitive
6752 for config_primitive in (
6753 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6754 ).get("config-primitive", ()):
6755 if config_primitive["name"] == vnf_config_primitive:
6756 break
6757 else:
6758 raise LcmException(
6759 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6760 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6761 "primitive".format(scaling_group, vnf_config_primitive)
6762 )
6763
6764 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6765 if db_vnfr.get("additionalParamsForVnf"):
6766 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6767
6768 scale_process = "VCA"
6769 db_nsr_update["config-status"] = "configuring pre-scaling"
6770 primitive_params = self._map_primitive_params(
6771 config_primitive, {}, vnfr_params
6772 )
6773
6774 # Pre-scale retry check: Check if this sub-operation has been executed before
6775 op_index = self._check_or_add_scale_suboperation(
6776 db_nslcmop,
6777 vnf_index,
6778 vnf_config_primitive,
6779 primitive_params,
6780 "PRE-SCALE",
6781 )
6782 if op_index == self.SUBOPERATION_STATUS_SKIP:
6783 # Skip sub-operation
6784 result = "COMPLETED"
6785 result_detail = "Done"
6786 self.logger.debug(
6787 logging_text
6788 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6789 vnf_config_primitive, result, result_detail
6790 )
6791 )
6792 else:
6793 if op_index == self.SUBOPERATION_STATUS_NEW:
6794 # New sub-operation: Get index of this sub-operation
6795 op_index = (
6796 len(db_nslcmop.get("_admin", {}).get("operations"))
6797 - 1
6798 )
6799 self.logger.debug(
6800 logging_text
6801 + "vnf_config_primitive={} New sub-operation".format(
6802 vnf_config_primitive
6803 )
6804 )
6805 else:
6806 # retry: Get registered params for this existing sub-operation
6807 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6808 op_index
6809 ]
6810 vnf_index = op.get("member_vnf_index")
6811 vnf_config_primitive = op.get("primitive")
6812 primitive_params = op.get("primitive_params")
6813 self.logger.debug(
6814 logging_text
6815 + "vnf_config_primitive={} Sub-operation retry".format(
6816 vnf_config_primitive
6817 )
6818 )
6819 # Execute the primitive, either with new (first-time) or registered (reintent) args
6820 ee_descriptor_id = config_primitive.get(
6821 "execution-environment-ref"
6822 )
6823 primitive_name = config_primitive.get(
6824 "execution-environment-primitive", vnf_config_primitive
6825 )
6826 ee_id, vca_type = self._look_for_deployed_vca(
6827 nsr_deployed["VCA"],
6828 member_vnf_index=vnf_index,
6829 vdu_id=None,
6830 vdu_count_index=None,
6831 ee_descriptor_id=ee_descriptor_id,
6832 )
6833 result, result_detail = await self._ns_execute_primitive(
6834 ee_id,
6835 primitive_name,
6836 primitive_params,
6837 vca_type=vca_type,
6838 vca_id=vca_id,
6839 )
6840 self.logger.debug(
6841 logging_text
6842 + "vnf_config_primitive={} Done with result {} {}".format(
6843 vnf_config_primitive, result, result_detail
6844 )
6845 )
6846 # Update operationState = COMPLETED | FAILED
6847 self._update_suboperation_status(
6848 db_nslcmop, op_index, result, result_detail
6849 )
6850
6851 if result == "FAILED":
6852 raise LcmException(result_detail)
6853 db_nsr_update["config-status"] = old_config_status
6854 scale_process = None
6855 # PRE-SCALE END
6856
6857 db_nsr_update[
6858 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6859 ] = nb_scale_op
6860 db_nsr_update[
6861 "_admin.scaling-group.{}.time".format(admin_scale_index)
6862 ] = time()
6863
6864 # SCALE-IN VCA - BEGIN
6865 if vca_scaling_info:
6866 step = db_nslcmop_update[
6867 "detailed-status"
6868 ] = "Deleting the execution environments"
6869 scale_process = "VCA"
6870 for vca_info in vca_scaling_info:
6871 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6872 member_vnf_index = str(vca_info["member-vnf-index"])
6873 self.logger.debug(
6874 logging_text + "vdu info: {}".format(vca_info)
6875 )
6876 if vca_info.get("osm_vdu_id"):
6877 vdu_id = vca_info["osm_vdu_id"]
6878 vdu_index = int(vca_info["vdu_index"])
6879 stage[
6880 1
6881 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6882 member_vnf_index, vdu_id, vdu_index
6883 )
6884 stage[2] = step = "Scaling in VCA"
6885 self._write_op_status(op_id=nslcmop_id, stage=stage)
6886 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6887 config_update = db_nsr["configurationStatus"]
6888 for vca_index, vca in enumerate(vca_update):
6889 if (
6890 (vca or vca.get("ee_id"))
6891 and vca["member-vnf-index"] == member_vnf_index
6892 and vca["vdu_count_index"] == vdu_index
6893 ):
6894 if vca.get("vdu_id"):
6895 config_descriptor = get_configuration(
6896 db_vnfd, vca.get("vdu_id")
6897 )
6898 elif vca.get("kdu_name"):
6899 config_descriptor = get_configuration(
6900 db_vnfd, vca.get("kdu_name")
6901 )
6902 else:
6903 config_descriptor = get_configuration(
6904 db_vnfd, db_vnfd["id"]
6905 )
6906 operation_params = (
6907 db_nslcmop.get("operationParams") or {}
6908 )
6909 exec_terminate_primitives = not operation_params.get(
6910 "skip_terminate_primitives"
6911 ) and vca.get("needed_terminate")
6912 task = asyncio.ensure_future(
6913 asyncio.wait_for(
6914 self.destroy_N2VC(
6915 logging_text,
6916 db_nslcmop,
6917 vca,
6918 config_descriptor,
6919 vca_index,
6920 destroy_ee=True,
6921 exec_primitives=exec_terminate_primitives,
6922 scaling_in=True,
6923 vca_id=vca_id,
6924 ),
6925 timeout=self.timeout_charm_delete,
6926 )
6927 )
6928 tasks_dict_info[task] = "Terminating VCA {}".format(
6929 vca.get("ee_id")
6930 )
6931 del vca_update[vca_index]
6932 del config_update[vca_index]
6933 # wait for pending tasks of terminate primitives
6934 if tasks_dict_info:
6935 self.logger.debug(
6936 logging_text
6937 + "Waiting for tasks {}".format(
6938 list(tasks_dict_info.keys())
6939 )
6940 )
6941 error_list = await self._wait_for_tasks(
6942 logging_text,
6943 tasks_dict_info,
6944 min(
6945 self.timeout_charm_delete, self.timeout_ns_terminate
6946 ),
6947 stage,
6948 nslcmop_id,
6949 )
6950 tasks_dict_info.clear()
6951 if error_list:
6952 raise LcmException("; ".join(error_list))
6953
6954 db_vca_and_config_update = {
6955 "_admin.deployed.VCA": vca_update,
6956 "configurationStatus": config_update,
6957 }
6958 self.update_db_2(
6959 "nsrs", db_nsr["_id"], db_vca_and_config_update
6960 )
6961 scale_process = None
6962 # SCALE-IN VCA - END
6963
6964 # SCALE RO - BEGIN
6965 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6966 scale_process = "RO"
6967 if self.ro_config.get("ng"):
6968 await self._scale_ng_ro(
6969 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6970 )
6971 scaling_info.pop("vdu-create", None)
6972 scaling_info.pop("vdu-delete", None)
6973
6974 scale_process = None
6975 # SCALE RO - END
6976
6977 # SCALE KDU - BEGIN
6978 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6979 scale_process = "KDU"
6980 await self._scale_kdu(
6981 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6982 )
6983 scaling_info.pop("kdu-create", None)
6984 scaling_info.pop("kdu-delete", None)
6985
6986 scale_process = None
6987 # SCALE KDU - END
6988
6989 if db_nsr_update:
6990 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6991
6992 # SCALE-UP VCA - BEGIN
6993 if vca_scaling_info:
6994 step = db_nslcmop_update[
6995 "detailed-status"
6996 ] = "Creating new execution environments"
6997 scale_process = "VCA"
6998 for vca_info in vca_scaling_info:
6999 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7000 member_vnf_index = str(vca_info["member-vnf-index"])
7001 self.logger.debug(
7002 logging_text + "vdu info: {}".format(vca_info)
7003 )
7004 vnfd_id = db_vnfr["vnfd-ref"]
7005 if vca_info.get("osm_vdu_id"):
7006 vdu_index = int(vca_info["vdu_index"])
7007 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7008 if db_vnfr.get("additionalParamsForVnf"):
7009 deploy_params.update(
7010 parse_yaml_strings(
7011 db_vnfr["additionalParamsForVnf"].copy()
7012 )
7013 )
7014 descriptor_config = get_configuration(
7015 db_vnfd, db_vnfd["id"]
7016 )
7017 if descriptor_config:
7018 vdu_id = None
7019 vdu_name = None
7020 kdu_name = None
7021 self._deploy_n2vc(
7022 logging_text=logging_text
7023 + "member_vnf_index={} ".format(member_vnf_index),
7024 db_nsr=db_nsr,
7025 db_vnfr=db_vnfr,
7026 nslcmop_id=nslcmop_id,
7027 nsr_id=nsr_id,
7028 nsi_id=nsi_id,
7029 vnfd_id=vnfd_id,
7030 vdu_id=vdu_id,
7031 kdu_name=kdu_name,
7032 member_vnf_index=member_vnf_index,
7033 vdu_index=vdu_index,
7034 vdu_name=vdu_name,
7035 deploy_params=deploy_params,
7036 descriptor_config=descriptor_config,
7037 base_folder=base_folder,
7038 task_instantiation_info=tasks_dict_info,
7039 stage=stage,
7040 )
7041 vdu_id = vca_info["osm_vdu_id"]
7042 vdur = find_in_list(
7043 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7044 )
7045 descriptor_config = get_configuration(db_vnfd, vdu_id)
7046 if vdur.get("additionalParams"):
7047 deploy_params_vdu = parse_yaml_strings(
7048 vdur["additionalParams"]
7049 )
7050 else:
7051 deploy_params_vdu = deploy_params
7052 deploy_params_vdu["OSM"] = get_osm_params(
7053 db_vnfr, vdu_id, vdu_count_index=vdu_index
7054 )
7055 if descriptor_config:
7056 vdu_name = None
7057 kdu_name = None
7058 stage[
7059 1
7060 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7061 member_vnf_index, vdu_id, vdu_index
7062 )
7063 stage[2] = step = "Scaling out VCA"
7064 self._write_op_status(op_id=nslcmop_id, stage=stage)
7065 self._deploy_n2vc(
7066 logging_text=logging_text
7067 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7068 member_vnf_index, vdu_id, vdu_index
7069 ),
7070 db_nsr=db_nsr,
7071 db_vnfr=db_vnfr,
7072 nslcmop_id=nslcmop_id,
7073 nsr_id=nsr_id,
7074 nsi_id=nsi_id,
7075 vnfd_id=vnfd_id,
7076 vdu_id=vdu_id,
7077 kdu_name=kdu_name,
7078 member_vnf_index=member_vnf_index,
7079 vdu_index=vdu_index,
7080 vdu_name=vdu_name,
7081 deploy_params=deploy_params_vdu,
7082 descriptor_config=descriptor_config,
7083 base_folder=base_folder,
7084 task_instantiation_info=tasks_dict_info,
7085 stage=stage,
7086 )
7087 # SCALE-UP VCA - END
7088 scale_process = None
7089
7090 # POST-SCALE BEGIN
7091 # execute primitive service POST-SCALING
7092 step = "Executing post-scale vnf-config-primitive"
7093 if scaling_descriptor.get("scaling-config-action"):
7094 for scaling_config_action in scaling_descriptor[
7095 "scaling-config-action"
7096 ]:
7097 if (
7098 scaling_config_action.get("trigger") == "post-scale-in"
7099 and scaling_type == "SCALE_IN"
7100 ) or (
7101 scaling_config_action.get("trigger") == "post-scale-out"
7102 and scaling_type == "SCALE_OUT"
7103 ):
7104 vnf_config_primitive = scaling_config_action[
7105 "vnf-config-primitive-name-ref"
7106 ]
7107 step = db_nslcmop_update[
7108 "detailed-status"
7109 ] = "executing post-scale scaling-config-action '{}'".format(
7110 vnf_config_primitive
7111 )
7112
7113 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7114 if db_vnfr.get("additionalParamsForVnf"):
7115 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7116
7117 # look for primitive
7118 for config_primitive in (
7119 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7120 ).get("config-primitive", ()):
7121 if config_primitive["name"] == vnf_config_primitive:
7122 break
7123 else:
7124 raise LcmException(
7125 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7126 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7127 "config-primitive".format(
7128 scaling_group, vnf_config_primitive
7129 )
7130 )
7131 scale_process = "VCA"
7132 db_nsr_update["config-status"] = "configuring post-scaling"
7133 primitive_params = self._map_primitive_params(
7134 config_primitive, {}, vnfr_params
7135 )
7136
7137 # Post-scale retry check: Check if this sub-operation has been executed before
7138 op_index = self._check_or_add_scale_suboperation(
7139 db_nslcmop,
7140 vnf_index,
7141 vnf_config_primitive,
7142 primitive_params,
7143 "POST-SCALE",
7144 )
7145 if op_index == self.SUBOPERATION_STATUS_SKIP:
7146 # Skip sub-operation
7147 result = "COMPLETED"
7148 result_detail = "Done"
7149 self.logger.debug(
7150 logging_text
7151 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7152 vnf_config_primitive, result, result_detail
7153 )
7154 )
7155 else:
7156 if op_index == self.SUBOPERATION_STATUS_NEW:
7157 # New sub-operation: Get index of this sub-operation
7158 op_index = (
7159 len(db_nslcmop.get("_admin", {}).get("operations"))
7160 - 1
7161 )
7162 self.logger.debug(
7163 logging_text
7164 + "vnf_config_primitive={} New sub-operation".format(
7165 vnf_config_primitive
7166 )
7167 )
7168 else:
7169 # retry: Get registered params for this existing sub-operation
7170 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7171 op_index
7172 ]
7173 vnf_index = op.get("member_vnf_index")
7174 vnf_config_primitive = op.get("primitive")
7175 primitive_params = op.get("primitive_params")
7176 self.logger.debug(
7177 logging_text
7178 + "vnf_config_primitive={} Sub-operation retry".format(
7179 vnf_config_primitive
7180 )
7181 )
7182 # Execute the primitive, either with new (first-time) or registered (reintent) args
7183 ee_descriptor_id = config_primitive.get(
7184 "execution-environment-ref"
7185 )
7186 primitive_name = config_primitive.get(
7187 "execution-environment-primitive", vnf_config_primitive
7188 )
7189 ee_id, vca_type = self._look_for_deployed_vca(
7190 nsr_deployed["VCA"],
7191 member_vnf_index=vnf_index,
7192 vdu_id=None,
7193 vdu_count_index=None,
7194 ee_descriptor_id=ee_descriptor_id,
7195 )
7196 result, result_detail = await self._ns_execute_primitive(
7197 ee_id,
7198 primitive_name,
7199 primitive_params,
7200 vca_type=vca_type,
7201 vca_id=vca_id,
7202 )
7203 self.logger.debug(
7204 logging_text
7205 + "vnf_config_primitive={} Done with result {} {}".format(
7206 vnf_config_primitive, result, result_detail
7207 )
7208 )
7209 # Update operationState = COMPLETED | FAILED
7210 self._update_suboperation_status(
7211 db_nslcmop, op_index, result, result_detail
7212 )
7213
7214 if result == "FAILED":
7215 raise LcmException(result_detail)
7216 db_nsr_update["config-status"] = old_config_status
7217 scale_process = None
7218 # POST-SCALE END
7219
7220 db_nsr_update[
7221 "detailed-status"
7222 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7223 db_nsr_update["operational-status"] = (
7224 "running"
7225 if old_operational_status == "failed"
7226 else old_operational_status
7227 )
7228 db_nsr_update["config-status"] = old_config_status
7229 return
7230 except (
7231 ROclient.ROClientException,
7232 DbException,
7233 LcmException,
7234 NgRoException,
7235 ) as e:
7236 self.logger.error(logging_text + "Exit Exception {}".format(e))
7237 exc = e
7238 except asyncio.CancelledError:
7239 self.logger.error(
7240 logging_text + "Cancelled Exception while '{}'".format(step)
7241 )
7242 exc = "Operation was cancelled"
7243 except Exception as e:
7244 exc = traceback.format_exc()
7245 self.logger.critical(
7246 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7247 exc_info=True,
7248 )
7249 finally:
7250 self._write_ns_status(
7251 nsr_id=nsr_id,
7252 ns_state=None,
7253 current_operation="IDLE",
7254 current_operation_id=None,
7255 )
7256 if tasks_dict_info:
7257 stage[1] = "Waiting for instantiate pending tasks."
7258 self.logger.debug(logging_text + stage[1])
7259 exc = await self._wait_for_tasks(
7260 logging_text,
7261 tasks_dict_info,
7262 self.timeout_ns_deploy,
7263 stage,
7264 nslcmop_id,
7265 nsr_id=nsr_id,
7266 )
7267 if exc:
7268 db_nslcmop_update[
7269 "detailed-status"
7270 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7271 nslcmop_operation_state = "FAILED"
7272 if db_nsr:
7273 db_nsr_update["operational-status"] = old_operational_status
7274 db_nsr_update["config-status"] = old_config_status
7275 db_nsr_update["detailed-status"] = ""
7276 if scale_process:
7277 if "VCA" in scale_process:
7278 db_nsr_update["config-status"] = "failed"
7279 if "RO" in scale_process:
7280 db_nsr_update["operational-status"] = "failed"
7281 db_nsr_update[
7282 "detailed-status"
7283 ] = "FAILED scaling nslcmop={} {}: {}".format(
7284 nslcmop_id, step, exc
7285 )
7286 else:
7287 error_description_nslcmop = None
7288 nslcmop_operation_state = "COMPLETED"
7289 db_nslcmop_update["detailed-status"] = "Done"
7290
7291 self._write_op_status(
7292 op_id=nslcmop_id,
7293 stage="",
7294 error_message=error_description_nslcmop,
7295 operation_state=nslcmop_operation_state,
7296 other_update=db_nslcmop_update,
7297 )
7298 if db_nsr:
7299 self._write_ns_status(
7300 nsr_id=nsr_id,
7301 ns_state=None,
7302 current_operation="IDLE",
7303 current_operation_id=None,
7304 other_update=db_nsr_update,
7305 )
7306
7307 if nslcmop_operation_state:
7308 try:
7309 msg = {
7310 "nsr_id": nsr_id,
7311 "nslcmop_id": nslcmop_id,
7312 "operationState": nslcmop_operation_state,
7313 }
7314 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7315 except Exception as e:
7316 self.logger.error(
7317 logging_text + "kafka_write notification Exception {}".format(e)
7318 )
7319 self.logger.debug(logging_text + "Exit")
7320 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7321
7322 async def _scale_kdu(
7323 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7324 ):
7325 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7326 for kdu_name in _scaling_info:
7327 for kdu_scaling_info in _scaling_info[kdu_name]:
7328 deployed_kdu, index = get_deployed_kdu(
7329 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7330 )
7331 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7332 kdu_instance = deployed_kdu["kdu-instance"]
7333 kdu_model = deployed_kdu.get("kdu-model")
7334 scale = int(kdu_scaling_info["scale"])
7335 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7336
7337 db_dict = {
7338 "collection": "nsrs",
7339 "filter": {"_id": nsr_id},
7340 "path": "_admin.deployed.K8s.{}".format(index),
7341 }
7342
7343 step = "scaling application {}".format(
7344 kdu_scaling_info["resource-name"]
7345 )
7346 self.logger.debug(logging_text + step)
7347
7348 if kdu_scaling_info["type"] == "delete":
7349 kdu_config = get_configuration(db_vnfd, kdu_name)
7350 if (
7351 kdu_config
7352 and kdu_config.get("terminate-config-primitive")
7353 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7354 ):
7355 terminate_config_primitive_list = kdu_config.get(
7356 "terminate-config-primitive"
7357 )
7358 terminate_config_primitive_list.sort(
7359 key=lambda val: int(val["seq"])
7360 )
7361
7362 for (
7363 terminate_config_primitive
7364 ) in terminate_config_primitive_list:
7365 primitive_params_ = self._map_primitive_params(
7366 terminate_config_primitive, {}, {}
7367 )
7368 step = "execute terminate config primitive"
7369 self.logger.debug(logging_text + step)
7370 await asyncio.wait_for(
7371 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7372 cluster_uuid=cluster_uuid,
7373 kdu_instance=kdu_instance,
7374 primitive_name=terminate_config_primitive["name"],
7375 params=primitive_params_,
7376 db_dict=db_dict,
7377 total_timeout=self.timeout_primitive,
7378 vca_id=vca_id,
7379 ),
7380 timeout=self.timeout_primitive
7381 * self.timeout_primitive_outer_factor,
7382 )
7383
7384 await asyncio.wait_for(
7385 self.k8scluster_map[k8s_cluster_type].scale(
7386 kdu_instance=kdu_instance,
7387 scale=scale,
7388 resource_name=kdu_scaling_info["resource-name"],
7389 total_timeout=self.timeout_scale_on_error,
7390 vca_id=vca_id,
7391 cluster_uuid=cluster_uuid,
7392 kdu_model=kdu_model,
7393 atomic=True,
7394 db_dict=db_dict,
7395 ),
7396 timeout=self.timeout_scale_on_error
7397 * self.timeout_scale_on_error_outer_factor,
7398 )
7399
7400 if kdu_scaling_info["type"] == "create":
7401 kdu_config = get_configuration(db_vnfd, kdu_name)
7402 if (
7403 kdu_config
7404 and kdu_config.get("initial-config-primitive")
7405 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7406 ):
7407 initial_config_primitive_list = kdu_config.get(
7408 "initial-config-primitive"
7409 )
7410 initial_config_primitive_list.sort(
7411 key=lambda val: int(val["seq"])
7412 )
7413
7414 for initial_config_primitive in initial_config_primitive_list:
7415 primitive_params_ = self._map_primitive_params(
7416 initial_config_primitive, {}, {}
7417 )
7418 step = "execute initial config primitive"
7419 self.logger.debug(logging_text + step)
7420 await asyncio.wait_for(
7421 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7422 cluster_uuid=cluster_uuid,
7423 kdu_instance=kdu_instance,
7424 primitive_name=initial_config_primitive["name"],
7425 params=primitive_params_,
7426 db_dict=db_dict,
7427 vca_id=vca_id,
7428 ),
7429 timeout=600,
7430 )
7431
7432 async def _scale_ng_ro(
7433 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7434 ):
7435 nsr_id = db_nslcmop["nsInstanceId"]
7436 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7437 db_vnfrs = {}
7438
7439 # read from db: vnfd's for every vnf
7440 db_vnfds = []
7441
7442 # for each vnf in ns, read vnfd
7443 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7444 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7445 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7446 # if we haven't this vnfd, read it from db
7447 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7448 # read from db
7449 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7450 db_vnfds.append(vnfd)
7451 n2vc_key = self.n2vc.get_public_key()
7452 n2vc_key_list = [n2vc_key]
7453 self.scale_vnfr(
7454 db_vnfr,
7455 vdu_scaling_info.get("vdu-create"),
7456 vdu_scaling_info.get("vdu-delete"),
7457 mark_delete=True,
7458 )
7459 # db_vnfr has been updated, update db_vnfrs to use it
7460 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7461 await self._instantiate_ng_ro(
7462 logging_text,
7463 nsr_id,
7464 db_nsd,
7465 db_nsr,
7466 db_nslcmop,
7467 db_vnfrs,
7468 db_vnfds,
7469 n2vc_key_list,
7470 stage=stage,
7471 start_deploy=time(),
7472 timeout_ns_deploy=self.timeout_ns_deploy,
7473 )
7474 if vdu_scaling_info.get("vdu-delete"):
7475 self.scale_vnfr(
7476 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7477 )
7478
7479 async def extract_prometheus_scrape_jobs(
7480 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7481 ):
7482 # look if exist a file called 'prometheus*.j2' and
7483 artifact_content = self.fs.dir_ls(artifact_path)
7484 job_file = next(
7485 (
7486 f
7487 for f in artifact_content
7488 if f.startswith("prometheus") and f.endswith(".j2")
7489 ),
7490 None,
7491 )
7492 if not job_file:
7493 return
7494 with self.fs.file_open((artifact_path, job_file), "r") as f:
7495 job_data = f.read()
7496
7497 # TODO get_service
7498 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7499 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7500 host_port = "80"
7501 vnfr_id = vnfr_id.replace("-", "")
7502 variables = {
7503 "JOB_NAME": vnfr_id,
7504 "TARGET_IP": target_ip,
7505 "EXPORTER_POD_IP": host_name,
7506 "EXPORTER_POD_PORT": host_port,
7507 }
7508 job_list = parse_job(job_data, variables)
7509 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7510 for job in job_list:
7511 if (
7512 not isinstance(job.get("job_name"), str)
7513 or vnfr_id not in job["job_name"]
7514 ):
7515 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7516 job["nsr_id"] = nsr_id
7517 job["vnfr_id"] = vnfr_id
7518 return job_list
7519
7520 async def rebuild_start_stop(
7521 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7522 ):
7523 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7524 self.logger.info(logging_text + "Enter")
7525 stage = ["Preparing the environment", ""]
7526 # database nsrs record
7527 db_nsr_update = {}
7528 vdu_vim_name = None
7529 vim_vm_id = None
7530 # in case of error, indicates what part of scale was failed to put nsr at error status
7531 start_deploy = time()
7532 try:
7533 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7534 vim_account_id = db_vnfr.get("vim-account-id")
7535 vim_info_key = "vim:" + vim_account_id
7536 vdu_id = additional_param["vdu_id"]
7537 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7538 vdur = find_in_list(
7539 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7540 )
7541 if vdur:
7542 vdu_vim_name = vdur["name"]
7543 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7544 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7545 else:
7546 raise LcmException("Target vdu is not found")
7547 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7548 # wait for any previous tasks in process
7549 stage[1] = "Waiting for previous operations to terminate"
7550 self.logger.info(stage[1])
7551 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7552
7553 stage[1] = "Reading from database."
7554 self.logger.info(stage[1])
7555 self._write_ns_status(
7556 nsr_id=nsr_id,
7557 ns_state=None,
7558 current_operation=operation_type.upper(),
7559 current_operation_id=nslcmop_id,
7560 )
7561 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7562
7563 # read from db: ns
7564 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7565 db_nsr_update["operational-status"] = operation_type
7566 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7567 # Payload for RO
7568 desc = {
7569 operation_type: {
7570 "vim_vm_id": vim_vm_id,
7571 "vnf_id": vnf_id,
7572 "vdu_index": additional_param["count-index"],
7573 "vdu_id": vdur["id"],
7574 "target_vim": target_vim,
7575 "vim_account_id": vim_account_id,
7576 }
7577 }
7578 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7579 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7580 self.logger.info("ro nsr id: {}".format(nsr_id))
7581 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7582 self.logger.info("response from RO: {}".format(result_dict))
7583 action_id = result_dict["action_id"]
7584 await self._wait_ng_ro(
7585 nsr_id,
7586 action_id,
7587 nslcmop_id,
7588 start_deploy,
7589 self.timeout_operate,
7590 None,
7591 "start_stop_rebuild",
7592 )
7593 return "COMPLETED", "Done"
7594 except (ROclient.ROClientException, DbException, LcmException) as e:
7595 self.logger.error("Exit Exception {}".format(e))
7596 exc = e
7597 except asyncio.CancelledError:
7598 self.logger.error("Cancelled Exception while '{}'".format(stage))
7599 exc = "Operation was cancelled"
7600 except Exception as e:
7601 exc = traceback.format_exc()
7602 self.logger.critical(
7603 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7604 )
7605 return "FAILED", "Error in operate VNF {}".format(exc)
7606
7607 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7608 """
7609 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7610
7611 :param: vim_account_id: VIM Account ID
7612
7613 :return: (cloud_name, cloud_credential)
7614 """
7615 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7616 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7617
7618 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7619 """
7620 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7621
7622 :param: vim_account_id: VIM Account ID
7623
7624 :return: (cloud_name, cloud_credential)
7625 """
7626 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7627 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7628
7629 async def migrate(self, nsr_id, nslcmop_id):
7630 """
7631 Migrate VNFs and VDUs instances in a NS
7632
7633 :param: nsr_id: NS Instance ID
7634 :param: nslcmop_id: nslcmop ID of migrate
7635
7636 """
7637 # Try to lock HA task here
7638 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7639 if not task_is_locked_by_me:
7640 return
7641 logging_text = "Task ns={} migrate ".format(nsr_id)
7642 self.logger.debug(logging_text + "Enter")
7643 # get all needed from database
7644 db_nslcmop = None
7645 db_nslcmop_update = {}
7646 nslcmop_operation_state = None
7647 db_nsr_update = {}
7648 target = {}
7649 exc = None
7650 # in case of error, indicates what part of scale was failed to put nsr at error status
7651 start_deploy = time()
7652
7653 try:
7654 # wait for any previous tasks in process
7655 step = "Waiting for previous operations to terminate"
7656 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7657
7658 self._write_ns_status(
7659 nsr_id=nsr_id,
7660 ns_state=None,
7661 current_operation="MIGRATING",
7662 current_operation_id=nslcmop_id,
7663 )
7664 step = "Getting nslcmop from database"
7665 self.logger.debug(
7666 step + " after having waited for previous tasks to be completed"
7667 )
7668 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7669 migrate_params = db_nslcmop.get("operationParams")
7670
7671 target = {}
7672 target.update(migrate_params)
7673 desc = await self.RO.migrate(nsr_id, target)
7674 self.logger.debug("RO return > {}".format(desc))
7675 action_id = desc["action_id"]
7676 await self._wait_ng_ro(
7677 nsr_id,
7678 action_id,
7679 nslcmop_id,
7680 start_deploy,
7681 self.timeout_migrate,
7682 operation="migrate",
7683 )
7684 except (ROclient.ROClientException, DbException, LcmException) as e:
7685 self.logger.error("Exit Exception {}".format(e))
7686 exc = e
7687 except asyncio.CancelledError:
7688 self.logger.error("Cancelled Exception while '{}'".format(step))
7689 exc = "Operation was cancelled"
7690 except Exception as e:
7691 exc = traceback.format_exc()
7692 self.logger.critical(
7693 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7694 )
7695 finally:
7696 self._write_ns_status(
7697 nsr_id=nsr_id,
7698 ns_state=None,
7699 current_operation="IDLE",
7700 current_operation_id=None,
7701 )
7702 if exc:
7703 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7704 nslcmop_operation_state = "FAILED"
7705 else:
7706 nslcmop_operation_state = "COMPLETED"
7707 db_nslcmop_update["detailed-status"] = "Done"
7708 db_nsr_update["detailed-status"] = "Done"
7709
7710 self._write_op_status(
7711 op_id=nslcmop_id,
7712 stage="",
7713 error_message="",
7714 operation_state=nslcmop_operation_state,
7715 other_update=db_nslcmop_update,
7716 )
7717 if nslcmop_operation_state:
7718 try:
7719 msg = {
7720 "nsr_id": nsr_id,
7721 "nslcmop_id": nslcmop_id,
7722 "operationState": nslcmop_operation_state,
7723 }
7724 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7725 except Exception as e:
7726 self.logger.error(
7727 logging_text + "kafka_write notification Exception {}".format(e)
7728 )
7729 self.logger.debug(logging_text + "Exit")
7730 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7731
7732 async def heal(self, nsr_id, nslcmop_id):
7733 """
7734 Heal NS
7735
7736 :param nsr_id: ns instance to heal
7737 :param nslcmop_id: operation to run
7738 :return:
7739 """
7740
7741 # Try to lock HA task here
7742 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7743 if not task_is_locked_by_me:
7744 return
7745
7746 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7747 stage = ["", "", ""]
7748 tasks_dict_info = {}
7749 # ^ stage, step, VIM progress
7750 self.logger.debug(logging_text + "Enter")
7751 # get all needed from database
7752 db_nsr = None
7753 db_nslcmop_update = {}
7754 db_nsr_update = {}
7755 db_vnfrs = {} # vnf's info indexed by _id
7756 exc = None
7757 old_operational_status = ""
7758 old_config_status = ""
7759 nsi_id = None
7760 try:
7761 # wait for any previous tasks in process
7762 step = "Waiting for previous operations to terminate"
7763 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7764 self._write_ns_status(
7765 nsr_id=nsr_id,
7766 ns_state=None,
7767 current_operation="HEALING",
7768 current_operation_id=nslcmop_id,
7769 )
7770
7771 step = "Getting nslcmop from database"
7772 self.logger.debug(
7773 step + " after having waited for previous tasks to be completed"
7774 )
7775 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7776
7777 step = "Getting nsr from database"
7778 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7779 old_operational_status = db_nsr["operational-status"]
7780 old_config_status = db_nsr["config-status"]
7781
7782 db_nsr_update = {
7783 "_admin.deployed.RO.operational-status": "healing",
7784 }
7785 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7786
7787 step = "Sending heal order to VIM"
7788 await self.heal_RO(
7789 logging_text=logging_text,
7790 nsr_id=nsr_id,
7791 db_nslcmop=db_nslcmop,
7792 stage=stage,
7793 )
7794 # VCA tasks
7795 # read from db: nsd
7796 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7797 self.logger.debug(logging_text + stage[1])
7798 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7799 self.fs.sync(db_nsr["nsd-id"])
7800 db_nsr["nsd"] = nsd
7801 # read from db: vnfr's of this ns
7802 step = "Getting vnfrs from db"
7803 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7804 for vnfr in db_vnfrs_list:
7805 db_vnfrs[vnfr["_id"]] = vnfr
7806 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7807
7808 # Check for each target VNF
7809 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7810 for target_vnf in target_list:
7811 # Find this VNF in the list from DB
7812 vnfr_id = target_vnf.get("vnfInstanceId", None)
7813 if vnfr_id:
7814 db_vnfr = db_vnfrs[vnfr_id]
7815 vnfd_id = db_vnfr.get("vnfd-id")
7816 vnfd_ref = db_vnfr.get("vnfd-ref")
7817 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7818 base_folder = vnfd["_admin"]["storage"]
7819 vdu_id = None
7820 vdu_index = 0
7821 vdu_name = None
7822 kdu_name = None
7823 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7824 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7825
7826 # Check each target VDU and deploy N2VC
7827 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7828 "vdu", []
7829 )
7830 if not target_vdu_list:
7831 # Codigo nuevo para crear diccionario
7832 target_vdu_list = []
7833 for existing_vdu in db_vnfr.get("vdur"):
7834 vdu_name = existing_vdu.get("vdu-name", None)
7835 vdu_index = existing_vdu.get("count-index", 0)
7836 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7837 "run-day1", False
7838 )
7839 vdu_to_be_healed = {
7840 "vdu-id": vdu_name,
7841 "count-index": vdu_index,
7842 "run-day1": vdu_run_day1,
7843 }
7844 target_vdu_list.append(vdu_to_be_healed)
7845 for target_vdu in target_vdu_list:
7846 deploy_params_vdu = target_vdu
7847 # Set run-day1 vnf level value if not vdu level value exists
7848 if not deploy_params_vdu.get("run-day1") and target_vnf[
7849 "additionalParams"
7850 ].get("run-day1"):
7851 deploy_params_vdu["run-day1"] = target_vnf[
7852 "additionalParams"
7853 ].get("run-day1")
7854 vdu_name = target_vdu.get("vdu-id", None)
7855 # TODO: Get vdu_id from vdud.
7856 vdu_id = vdu_name
7857 # For multi instance VDU count-index is mandatory
7858 # For single session VDU count-indes is 0
7859 vdu_index = target_vdu.get("count-index", 0)
7860
7861 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7862 stage[1] = "Deploying Execution Environments."
7863 self.logger.debug(logging_text + stage[1])
7864
7865 # VNF Level charm. Normal case when proxy charms.
7866 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7867 descriptor_config = get_configuration(vnfd, vnfd_ref)
7868 if descriptor_config:
7869 # Continue if healed machine is management machine
7870 vnf_ip_address = db_vnfr.get("ip-address")
7871 target_instance = None
7872 for instance in db_vnfr.get("vdur", None):
7873 if (
7874 instance["vdu-name"] == vdu_name
7875 and instance["count-index"] == vdu_index
7876 ):
7877 target_instance = instance
7878 break
7879 if vnf_ip_address == target_instance.get("ip-address"):
7880 self._heal_n2vc(
7881 logging_text=logging_text
7882 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7883 member_vnf_index, vdu_name, vdu_index
7884 ),
7885 db_nsr=db_nsr,
7886 db_vnfr=db_vnfr,
7887 nslcmop_id=nslcmop_id,
7888 nsr_id=nsr_id,
7889 nsi_id=nsi_id,
7890 vnfd_id=vnfd_ref,
7891 vdu_id=None,
7892 kdu_name=None,
7893 member_vnf_index=member_vnf_index,
7894 vdu_index=0,
7895 vdu_name=None,
7896 deploy_params=deploy_params_vdu,
7897 descriptor_config=descriptor_config,
7898 base_folder=base_folder,
7899 task_instantiation_info=tasks_dict_info,
7900 stage=stage,
7901 )
7902
7903 # VDU Level charm. Normal case with native charms.
7904 descriptor_config = get_configuration(vnfd, vdu_name)
7905 if descriptor_config:
7906 self._heal_n2vc(
7907 logging_text=logging_text
7908 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7909 member_vnf_index, vdu_name, vdu_index
7910 ),
7911 db_nsr=db_nsr,
7912 db_vnfr=db_vnfr,
7913 nslcmop_id=nslcmop_id,
7914 nsr_id=nsr_id,
7915 nsi_id=nsi_id,
7916 vnfd_id=vnfd_ref,
7917 vdu_id=vdu_id,
7918 kdu_name=kdu_name,
7919 member_vnf_index=member_vnf_index,
7920 vdu_index=vdu_index,
7921 vdu_name=vdu_name,
7922 deploy_params=deploy_params_vdu,
7923 descriptor_config=descriptor_config,
7924 base_folder=base_folder,
7925 task_instantiation_info=tasks_dict_info,
7926 stage=stage,
7927 )
7928
7929 except (
7930 ROclient.ROClientException,
7931 DbException,
7932 LcmException,
7933 NgRoException,
7934 ) as e:
7935 self.logger.error(logging_text + "Exit Exception {}".format(e))
7936 exc = e
7937 except asyncio.CancelledError:
7938 self.logger.error(
7939 logging_text + "Cancelled Exception while '{}'".format(step)
7940 )
7941 exc = "Operation was cancelled"
7942 except Exception as e:
7943 exc = traceback.format_exc()
7944 self.logger.critical(
7945 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7946 exc_info=True,
7947 )
7948 finally:
7949 if tasks_dict_info:
7950 stage[1] = "Waiting for healing pending tasks."
7951 self.logger.debug(logging_text + stage[1])
7952 exc = await self._wait_for_tasks(
7953 logging_text,
7954 tasks_dict_info,
7955 self.timeout_ns_deploy,
7956 stage,
7957 nslcmop_id,
7958 nsr_id=nsr_id,
7959 )
7960 if exc:
7961 db_nslcmop_update[
7962 "detailed-status"
7963 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7964 nslcmop_operation_state = "FAILED"
7965 if db_nsr:
7966 db_nsr_update["operational-status"] = old_operational_status
7967 db_nsr_update["config-status"] = old_config_status
7968 db_nsr_update[
7969 "detailed-status"
7970 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7971 for task, task_name in tasks_dict_info.items():
7972 if not task.done() or task.cancelled() or task.exception():
7973 if task_name.startswith(self.task_name_deploy_vca):
7974 # A N2VC task is pending
7975 db_nsr_update["config-status"] = "failed"
7976 else:
7977 # RO task is pending
7978 db_nsr_update["operational-status"] = "failed"
7979 else:
7980 error_description_nslcmop = None
7981 nslcmop_operation_state = "COMPLETED"
7982 db_nslcmop_update["detailed-status"] = "Done"
7983 db_nsr_update["detailed-status"] = "Done"
7984 db_nsr_update["operational-status"] = "running"
7985 db_nsr_update["config-status"] = "configured"
7986
7987 self._write_op_status(
7988 op_id=nslcmop_id,
7989 stage="",
7990 error_message=error_description_nslcmop,
7991 operation_state=nslcmop_operation_state,
7992 other_update=db_nslcmop_update,
7993 )
7994 if db_nsr:
7995 self._write_ns_status(
7996 nsr_id=nsr_id,
7997 ns_state=None,
7998 current_operation="IDLE",
7999 current_operation_id=None,
8000 other_update=db_nsr_update,
8001 )
8002
8003 if nslcmop_operation_state:
8004 try:
8005 msg = {
8006 "nsr_id": nsr_id,
8007 "nslcmop_id": nslcmop_id,
8008 "operationState": nslcmop_operation_state,
8009 }
8010 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8011 except Exception as e:
8012 self.logger.error(
8013 logging_text + "kafka_write notification Exception {}".format(e)
8014 )
8015 self.logger.debug(logging_text + "Exit")
8016 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8017
8018 async def heal_RO(
8019 self,
8020 logging_text,
8021 nsr_id,
8022 db_nslcmop,
8023 stage,
8024 ):
8025 """
8026 Heal at RO
8027 :param logging_text: preffix text to use at logging
8028 :param nsr_id: nsr identity
8029 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8030 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8031 :return: None or exception
8032 """
8033
8034 def get_vim_account(vim_account_id):
8035 nonlocal db_vims
8036 if vim_account_id in db_vims:
8037 return db_vims[vim_account_id]
8038 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8039 db_vims[vim_account_id] = db_vim
8040 return db_vim
8041
8042 try:
8043 start_heal = time()
8044 ns_params = db_nslcmop.get("operationParams")
8045 if ns_params and ns_params.get("timeout_ns_heal"):
8046 timeout_ns_heal = ns_params["timeout_ns_heal"]
8047 else:
8048 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
8049
8050 db_vims = {}
8051
8052 nslcmop_id = db_nslcmop["_id"]
8053 target = {
8054 "action_id": nslcmop_id,
8055 }
8056 self.logger.warning(
8057 "db_nslcmop={} and timeout_ns_heal={}".format(
8058 db_nslcmop, timeout_ns_heal
8059 )
8060 )
8061 target.update(db_nslcmop.get("operationParams", {}))
8062
8063 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8064 desc = await self.RO.recreate(nsr_id, target)
8065 self.logger.debug("RO return > {}".format(desc))
8066 action_id = desc["action_id"]
8067 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8068 await self._wait_ng_ro(
8069 nsr_id,
8070 action_id,
8071 nslcmop_id,
8072 start_heal,
8073 timeout_ns_heal,
8074 stage,
8075 operation="healing",
8076 )
8077
8078 # Updating NSR
8079 db_nsr_update = {
8080 "_admin.deployed.RO.operational-status": "running",
8081 "detailed-status": " ".join(stage),
8082 }
8083 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8084 self._write_op_status(nslcmop_id, stage)
8085 self.logger.debug(
8086 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8087 )
8088
8089 except Exception as e:
8090 stage[2] = "ERROR healing at VIM"
8091 # self.set_vnfr_at_error(db_vnfrs, str(e))
8092 self.logger.error(
8093 "Error healing at VIM {}".format(e),
8094 exc_info=not isinstance(
8095 e,
8096 (
8097 ROclient.ROClientException,
8098 LcmException,
8099 DbException,
8100 NgRoException,
8101 ),
8102 ),
8103 )
8104 raise
8105
8106 def _heal_n2vc(
8107 self,
8108 logging_text,
8109 db_nsr,
8110 db_vnfr,
8111 nslcmop_id,
8112 nsr_id,
8113 nsi_id,
8114 vnfd_id,
8115 vdu_id,
8116 kdu_name,
8117 member_vnf_index,
8118 vdu_index,
8119 vdu_name,
8120 deploy_params,
8121 descriptor_config,
8122 base_folder,
8123 task_instantiation_info,
8124 stage,
8125 ):
8126 # launch instantiate_N2VC in a asyncio task and register task object
8127 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8128 # if not found, create one entry and update database
8129 # fill db_nsr._admin.deployed.VCA.<index>
8130
8131 self.logger.debug(
8132 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8133 )
8134
8135 charm_name = ""
8136 get_charm_name = False
8137 if "execution-environment-list" in descriptor_config:
8138 ee_list = descriptor_config.get("execution-environment-list", [])
8139 elif "juju" in descriptor_config:
8140 ee_list = [descriptor_config] # ns charms
8141 if "execution-environment-list" not in descriptor_config:
8142 # charm name is only required for ns charms
8143 get_charm_name = True
8144 else: # other types as script are not supported
8145 ee_list = []
8146
8147 for ee_item in ee_list:
8148 self.logger.debug(
8149 logging_text
8150 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8151 ee_item.get("juju"), ee_item.get("helm-chart")
8152 )
8153 )
8154 ee_descriptor_id = ee_item.get("id")
8155 if ee_item.get("juju"):
8156 vca_name = ee_item["juju"].get("charm")
8157 if get_charm_name:
8158 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8159 vca_type = (
8160 "lxc_proxy_charm"
8161 if ee_item["juju"].get("charm") is not None
8162 else "native_charm"
8163 )
8164 if ee_item["juju"].get("cloud") == "k8s":
8165 vca_type = "k8s_proxy_charm"
8166 elif ee_item["juju"].get("proxy") is False:
8167 vca_type = "native_charm"
8168 elif ee_item.get("helm-chart"):
8169 vca_name = ee_item["helm-chart"]
8170 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8171 vca_type = "helm"
8172 else:
8173 vca_type = "helm-v3"
8174 else:
8175 self.logger.debug(
8176 logging_text + "skipping non juju neither charm configuration"
8177 )
8178 continue
8179
8180 vca_index = -1
8181 for vca_index, vca_deployed in enumerate(
8182 db_nsr["_admin"]["deployed"]["VCA"]
8183 ):
8184 if not vca_deployed:
8185 continue
8186 if (
8187 vca_deployed.get("member-vnf-index") == member_vnf_index
8188 and vca_deployed.get("vdu_id") == vdu_id
8189 and vca_deployed.get("kdu_name") == kdu_name
8190 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8191 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8192 ):
8193 break
8194 else:
8195 # not found, create one.
8196 target = (
8197 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8198 )
8199 if vdu_id:
8200 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8201 elif kdu_name:
8202 target += "/kdu/{}".format(kdu_name)
8203 vca_deployed = {
8204 "target_element": target,
8205 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8206 "member-vnf-index": member_vnf_index,
8207 "vdu_id": vdu_id,
8208 "kdu_name": kdu_name,
8209 "vdu_count_index": vdu_index,
8210 "operational-status": "init", # TODO revise
8211 "detailed-status": "", # TODO revise
8212 "step": "initial-deploy", # TODO revise
8213 "vnfd_id": vnfd_id,
8214 "vdu_name": vdu_name,
8215 "type": vca_type,
8216 "ee_descriptor_id": ee_descriptor_id,
8217 "charm_name": charm_name,
8218 }
8219 vca_index += 1
8220
8221 # create VCA and configurationStatus in db
8222 db_dict = {
8223 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8224 "configurationStatus.{}".format(vca_index): dict(),
8225 }
8226 self.update_db_2("nsrs", nsr_id, db_dict)
8227
8228 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8229
8230 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8231 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8232 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8233
8234 # Launch task
8235 task_n2vc = asyncio.ensure_future(
8236 self.heal_N2VC(
8237 logging_text=logging_text,
8238 vca_index=vca_index,
8239 nsi_id=nsi_id,
8240 db_nsr=db_nsr,
8241 db_vnfr=db_vnfr,
8242 vdu_id=vdu_id,
8243 kdu_name=kdu_name,
8244 vdu_index=vdu_index,
8245 deploy_params=deploy_params,
8246 config_descriptor=descriptor_config,
8247 base_folder=base_folder,
8248 nslcmop_id=nslcmop_id,
8249 stage=stage,
8250 vca_type=vca_type,
8251 vca_name=vca_name,
8252 ee_config_descriptor=ee_item,
8253 )
8254 )
8255 self.lcm_tasks.register(
8256 "ns",
8257 nsr_id,
8258 nslcmop_id,
8259 "instantiate_N2VC-{}".format(vca_index),
8260 task_n2vc,
8261 )
8262 task_instantiation_info[
8263 task_n2vc
8264 ] = self.task_name_deploy_vca + " {}.{}".format(
8265 member_vnf_index or "", vdu_id or ""
8266 )
8267
8268 async def heal_N2VC(
8269 self,
8270 logging_text,
8271 vca_index,
8272 nsi_id,
8273 db_nsr,
8274 db_vnfr,
8275 vdu_id,
8276 kdu_name,
8277 vdu_index,
8278 config_descriptor,
8279 deploy_params,
8280 base_folder,
8281 nslcmop_id,
8282 stage,
8283 vca_type,
8284 vca_name,
8285 ee_config_descriptor,
8286 ):
8287 nsr_id = db_nsr["_id"]
8288 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8289 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8290 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8291 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8292 db_dict = {
8293 "collection": "nsrs",
8294 "filter": {"_id": nsr_id},
8295 "path": db_update_entry,
8296 }
8297 step = ""
8298 try:
8299
8300 element_type = "NS"
8301 element_under_configuration = nsr_id
8302
8303 vnfr_id = None
8304 if db_vnfr:
8305 vnfr_id = db_vnfr["_id"]
8306 osm_config["osm"]["vnf_id"] = vnfr_id
8307
8308 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8309
8310 if vca_type == "native_charm":
8311 index_number = 0
8312 else:
8313 index_number = vdu_index or 0
8314
8315 if vnfr_id:
8316 element_type = "VNF"
8317 element_under_configuration = vnfr_id
8318 namespace += ".{}-{}".format(vnfr_id, index_number)
8319 if vdu_id:
8320 namespace += ".{}-{}".format(vdu_id, index_number)
8321 element_type = "VDU"
8322 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8323 osm_config["osm"]["vdu_id"] = vdu_id
8324 elif kdu_name:
8325 namespace += ".{}".format(kdu_name)
8326 element_type = "KDU"
8327 element_under_configuration = kdu_name
8328 osm_config["osm"]["kdu_name"] = kdu_name
8329
8330 # Get artifact path
8331 if base_folder["pkg-dir"]:
8332 artifact_path = "{}/{}/{}/{}".format(
8333 base_folder["folder"],
8334 base_folder["pkg-dir"],
8335 "charms"
8336 if vca_type
8337 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8338 else "helm-charts",
8339 vca_name,
8340 )
8341 else:
8342 artifact_path = "{}/Scripts/{}/{}/".format(
8343 base_folder["folder"],
8344 "charms"
8345 if vca_type
8346 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8347 else "helm-charts",
8348 vca_name,
8349 )
8350
8351 self.logger.debug("Artifact path > {}".format(artifact_path))
8352
8353 # get initial_config_primitive_list that applies to this element
8354 initial_config_primitive_list = config_descriptor.get(
8355 "initial-config-primitive"
8356 )
8357
8358 self.logger.debug(
8359 "Initial config primitive list > {}".format(
8360 initial_config_primitive_list
8361 )
8362 )
8363
8364 # add config if not present for NS charm
8365 ee_descriptor_id = ee_config_descriptor.get("id")
8366 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8367 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8368 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8369 )
8370
8371 self.logger.debug(
8372 "Initial config primitive list #2 > {}".format(
8373 initial_config_primitive_list
8374 )
8375 )
8376 # n2vc_redesign STEP 3.1
8377 # find old ee_id if exists
8378 ee_id = vca_deployed.get("ee_id")
8379
8380 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8381 # create or register execution environment in VCA. Only for native charms when healing
8382 if vca_type == "native_charm":
8383 step = "Waiting to VM being up and getting IP address"
8384 self.logger.debug(logging_text + step)
8385 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8386 logging_text,
8387 nsr_id,
8388 vnfr_id,
8389 vdu_id,
8390 vdu_index,
8391 user=None,
8392 pub_key=None,
8393 )
8394 credentials = {"hostname": rw_mgmt_ip}
8395 # get username
8396 username = deep_get(
8397 config_descriptor, ("config-access", "ssh-access", "default-user")
8398 )
8399 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8400 # merged. Meanwhile let's get username from initial-config-primitive
8401 if not username and initial_config_primitive_list:
8402 for config_primitive in initial_config_primitive_list:
8403 for param in config_primitive.get("parameter", ()):
8404 if param["name"] == "ssh-username":
8405 username = param["value"]
8406 break
8407 if not username:
8408 raise LcmException(
8409 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8410 "'config-access.ssh-access.default-user'"
8411 )
8412 credentials["username"] = username
8413
8414 # n2vc_redesign STEP 3.2
8415 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8416 self._write_configuration_status(
8417 nsr_id=nsr_id,
8418 vca_index=vca_index,
8419 status="REGISTERING",
8420 element_under_configuration=element_under_configuration,
8421 element_type=element_type,
8422 )
8423
8424 step = "register execution environment {}".format(credentials)
8425 self.logger.debug(logging_text + step)
8426 ee_id = await self.vca_map[vca_type].register_execution_environment(
8427 credentials=credentials,
8428 namespace=namespace,
8429 db_dict=db_dict,
8430 vca_id=vca_id,
8431 )
8432
8433 # update ee_id en db
8434 db_dict_ee_id = {
8435 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8436 }
8437 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8438
8439 # for compatibility with MON/POL modules, the need model and application name at database
8440 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8441 # Not sure if this need to be done when healing
8442 """
8443 ee_id_parts = ee_id.split(".")
8444 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8445 if len(ee_id_parts) >= 2:
8446 model_name = ee_id_parts[0]
8447 application_name = ee_id_parts[1]
8448 db_nsr_update[db_update_entry + "model"] = model_name
8449 db_nsr_update[db_update_entry + "application"] = application_name
8450 """
8451
8452 # n2vc_redesign STEP 3.3
8453 # Install configuration software. Only for native charms.
8454 step = "Install configuration Software"
8455
8456 self._write_configuration_status(
8457 nsr_id=nsr_id,
8458 vca_index=vca_index,
8459 status="INSTALLING SW",
8460 element_under_configuration=element_under_configuration,
8461 element_type=element_type,
8462 # other_update=db_nsr_update,
8463 other_update=None,
8464 )
8465
8466 # TODO check if already done
8467 self.logger.debug(logging_text + step)
8468 config = None
8469 if vca_type == "native_charm":
8470 config_primitive = next(
8471 (p for p in initial_config_primitive_list if p["name"] == "config"),
8472 None,
8473 )
8474 if config_primitive:
8475 config = self._map_primitive_params(
8476 config_primitive, {}, deploy_params
8477 )
8478 await self.vca_map[vca_type].install_configuration_sw(
8479 ee_id=ee_id,
8480 artifact_path=artifact_path,
8481 db_dict=db_dict,
8482 config=config,
8483 num_units=1,
8484 vca_id=vca_id,
8485 vca_type=vca_type,
8486 )
8487
8488 # write in db flag of configuration_sw already installed
8489 self.update_db_2(
8490 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8491 )
8492
8493 # Not sure if this need to be done when healing
8494 """
8495 # add relations for this VCA (wait for other peers related with this VCA)
8496 await self._add_vca_relations(
8497 logging_text=logging_text,
8498 nsr_id=nsr_id,
8499 vca_type=vca_type,
8500 vca_index=vca_index,
8501 )
8502 """
8503
8504 # if SSH access is required, then get execution environment SSH public
8505 # if native charm we have waited already to VM be UP
8506 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8507 pub_key = None
8508 user = None
8509 # self.logger.debug("get ssh key block")
8510 if deep_get(
8511 config_descriptor, ("config-access", "ssh-access", "required")
8512 ):
8513 # self.logger.debug("ssh key needed")
8514 # Needed to inject a ssh key
8515 user = deep_get(
8516 config_descriptor,
8517 ("config-access", "ssh-access", "default-user"),
8518 )
8519 step = "Install configuration Software, getting public ssh key"
8520 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8521 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8522 )
8523
8524 step = "Insert public key into VM user={} ssh_key={}".format(
8525 user, pub_key
8526 )
8527 else:
8528 # self.logger.debug("no need to get ssh key")
8529 step = "Waiting to VM being up and getting IP address"
8530 self.logger.debug(logging_text + step)
8531
8532 # n2vc_redesign STEP 5.1
8533 # wait for RO (ip-address) Insert pub_key into VM
8534 # IMPORTANT: We need do wait for RO to complete healing operation.
8535 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8536 if vnfr_id:
8537 if kdu_name:
8538 rw_mgmt_ip = await self.wait_kdu_up(
8539 logging_text, nsr_id, vnfr_id, kdu_name
8540 )
8541 else:
8542 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8543 logging_text,
8544 nsr_id,
8545 vnfr_id,
8546 vdu_id,
8547 vdu_index,
8548 user=user,
8549 pub_key=pub_key,
8550 )
8551 else:
8552 rw_mgmt_ip = None # This is for a NS configuration
8553
8554 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8555
8556 # store rw_mgmt_ip in deploy params for later replacement
8557 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8558
8559 # Day1 operations.
8560 # get run-day1 operation parameter
8561 runDay1 = deploy_params.get("run-day1", False)
8562 self.logger.debug(
8563 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8564 )
8565 if runDay1:
8566 # n2vc_redesign STEP 6 Execute initial config primitive
8567 step = "execute initial config primitive"
8568
8569 # wait for dependent primitives execution (NS -> VNF -> VDU)
8570 if initial_config_primitive_list:
8571 await self._wait_dependent_n2vc(
8572 nsr_id, vca_deployed_list, vca_index
8573 )
8574
8575 # stage, in function of element type: vdu, kdu, vnf or ns
8576 my_vca = vca_deployed_list[vca_index]
8577 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8578 # VDU or KDU
8579 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8580 elif my_vca.get("member-vnf-index"):
8581 # VNF
8582 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8583 else:
8584 # NS
8585 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8586
8587 self._write_configuration_status(
8588 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8589 )
8590
8591 self._write_op_status(op_id=nslcmop_id, stage=stage)
8592
8593 check_if_terminated_needed = True
8594 for initial_config_primitive in initial_config_primitive_list:
8595 # adding information on the vca_deployed if it is a NS execution environment
8596 if not vca_deployed["member-vnf-index"]:
8597 deploy_params["ns_config_info"] = json.dumps(
8598 self._get_ns_config_info(nsr_id)
8599 )
8600 # TODO check if already done
8601 primitive_params_ = self._map_primitive_params(
8602 initial_config_primitive, {}, deploy_params
8603 )
8604
8605 step = "execute primitive '{}' params '{}'".format(
8606 initial_config_primitive["name"], primitive_params_
8607 )
8608 self.logger.debug(logging_text + step)
8609 await self.vca_map[vca_type].exec_primitive(
8610 ee_id=ee_id,
8611 primitive_name=initial_config_primitive["name"],
8612 params_dict=primitive_params_,
8613 db_dict=db_dict,
8614 vca_id=vca_id,
8615 vca_type=vca_type,
8616 )
8617 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8618 if check_if_terminated_needed:
8619 if config_descriptor.get("terminate-config-primitive"):
8620 self.update_db_2(
8621 "nsrs",
8622 nsr_id,
8623 {db_update_entry + "needed_terminate": True},
8624 )
8625 check_if_terminated_needed = False
8626
8627 # TODO register in database that primitive is done
8628
8629 # STEP 7 Configure metrics
8630 # Not sure if this need to be done when healing
8631 """
8632 if vca_type == "helm" or vca_type == "helm-v3":
8633 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8634 ee_id=ee_id,
8635 artifact_path=artifact_path,
8636 ee_config_descriptor=ee_config_descriptor,
8637 vnfr_id=vnfr_id,
8638 nsr_id=nsr_id,
8639 target_ip=rw_mgmt_ip,
8640 )
8641 if prometheus_jobs:
8642 self.update_db_2(
8643 "nsrs",
8644 nsr_id,
8645 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8646 )
8647
8648 for job in prometheus_jobs:
8649 self.db.set_one(
8650 "prometheus_jobs",
8651 {"job_name": job["job_name"]},
8652 job,
8653 upsert=True,
8654 fail_on_empty=False,
8655 )
8656
8657 """
8658 step = "instantiated at VCA"
8659 self.logger.debug(logging_text + step)
8660
8661 self._write_configuration_status(
8662 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8663 )
8664
8665 except Exception as e: # TODO not use Exception but N2VC exception
8666 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8667 if not isinstance(
8668 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8669 ):
8670 self.logger.error(
8671 "Exception while {} : {}".format(step, e), exc_info=True
8672 )
8673 self._write_configuration_status(
8674 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8675 )
8676 raise LcmException("{} {}".format(step, e)) from e
8677
8678 async def _wait_heal_ro(
8679 self,
8680 nsr_id,
8681 timeout=600,
8682 ):
8683 start_time = time()
8684 while time() <= start_time + timeout:
8685 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8686 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8687 "operational-status"
8688 ]
8689 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8690 if operational_status_ro != "healing":
8691 break
8692 await asyncio.sleep(15, loop=self.loop)
8693 else: # timeout_ns_deploy
8694 raise NgRoException("Timeout waiting ns to deploy")
8695
8696 async def vertical_scale(self, nsr_id, nslcmop_id):
8697 """
8698 Vertical Scale the VDUs in a NS
8699
8700 :param: nsr_id: NS Instance ID
8701 :param: nslcmop_id: nslcmop ID of migrate
8702
8703 """
8704 # Try to lock HA task here
8705 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8706 if not task_is_locked_by_me:
8707 return
8708 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8709 self.logger.debug(logging_text + "Enter")
8710 # get all needed from database
8711 db_nslcmop = None
8712 db_nslcmop_update = {}
8713 nslcmop_operation_state = None
8714 db_nsr_update = {}
8715 target = {}
8716 exc = None
8717 # in case of error, indicates what part of scale was failed to put nsr at error status
8718 start_deploy = time()
8719
8720 try:
8721 # wait for any previous tasks in process
8722 step = "Waiting for previous operations to terminate"
8723 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8724
8725 self._write_ns_status(
8726 nsr_id=nsr_id,
8727 ns_state=None,
8728 current_operation="VerticalScale",
8729 current_operation_id=nslcmop_id,
8730 )
8731 step = "Getting nslcmop from database"
8732 self.logger.debug(
8733 step + " after having waited for previous tasks to be completed"
8734 )
8735 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8736 operationParams = db_nslcmop.get("operationParams")
8737 target = {}
8738 target.update(operationParams)
8739 desc = await self.RO.vertical_scale(nsr_id, target)
8740 self.logger.debug("RO return > {}".format(desc))
8741 action_id = desc["action_id"]
8742 await self._wait_ng_ro(
8743 nsr_id,
8744 action_id,
8745 nslcmop_id,
8746 start_deploy,
8747 self.timeout_verticalscale,
8748 operation="verticalscale",
8749 )
8750 except (ROclient.ROClientException, DbException, LcmException) as e:
8751 self.logger.error("Exit Exception {}".format(e))
8752 exc = e
8753 except asyncio.CancelledError:
8754 self.logger.error("Cancelled Exception while '{}'".format(step))
8755 exc = "Operation was cancelled"
8756 except Exception as e:
8757 exc = traceback.format_exc()
8758 self.logger.critical(
8759 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8760 )
8761 finally:
8762 self._write_ns_status(
8763 nsr_id=nsr_id,
8764 ns_state=None,
8765 current_operation="IDLE",
8766 current_operation_id=None,
8767 )
8768 if exc:
8769 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8770 nslcmop_operation_state = "FAILED"
8771 else:
8772 nslcmop_operation_state = "COMPLETED"
8773 db_nslcmop_update["detailed-status"] = "Done"
8774 db_nsr_update["detailed-status"] = "Done"
8775
8776 self._write_op_status(
8777 op_id=nslcmop_id,
8778 stage="",
8779 error_message="",
8780 operation_state=nslcmop_operation_state,
8781 other_update=db_nslcmop_update,
8782 )
8783 if nslcmop_operation_state:
8784 try:
8785 msg = {
8786 "nsr_id": nsr_id,
8787 "nslcmop_id": nslcmop_id,
8788 "operationState": nslcmop_operation_state,
8789 }
8790 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8791 except Exception as e:
8792 self.logger.error(
8793 logging_text + "kafka_write notification Exception {}".format(e)
8794 )
8795 self.logger.debug(logging_text + "Exit")
8796 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")