968e1f3f52f2740dde807ee64b3a4e4c4b615037
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 get_ee_id_parts,
63 )
64 from osm_lcm.data_utils.nsd import (
65 get_ns_configuration_relation_list,
66 get_vnf_profile,
67 get_vnf_profiles,
68 )
69 from osm_lcm.data_utils.vnfd import (
70 get_kdu,
71 get_kdu_services,
72 get_relation_list,
73 get_vdu_list,
74 get_vdu_profile,
75 get_ee_sorted_initial_config_primitive_list,
76 get_ee_sorted_terminate_config_primitive_list,
77 get_kdu_list,
78 get_virtual_link_profiles,
79 get_vdu,
80 get_configuration,
81 get_vdu_index,
82 get_scaling_aspect,
83 get_number_of_instances,
84 get_juju_ee_ref,
85 get_kdu_resource_profile,
86 find_software_version,
87 check_helm_ee_in_ns,
88 )
89 from osm_lcm.data_utils.list_utils import find_in_list
90 from osm_lcm.data_utils.vnfr import (
91 get_osm_params,
92 get_vdur_index,
93 get_kdur,
94 get_volumes_from_instantiation_params,
95 )
96 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
97 from osm_lcm.data_utils.database.vim_account import VimAccountDB
98 from n2vc.definitions import RelationEndpoint
99 from n2vc.k8s_helm_conn import K8sHelmConnector
100 from n2vc.k8s_helm3_conn import K8sHelm3Connector
101 from n2vc.k8s_juju_conn import K8sJujuConnector
102
103 from osm_common.dbbase import DbException
104 from osm_common.fsbase import FsException
105
106 from osm_lcm.data_utils.database.database import Database
107 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
108 from osm_lcm.data_utils.wim import (
109 get_sdn_ports,
110 get_target_wim_attrs,
111 select_feasible_wim_account,
112 )
113
114 from n2vc.n2vc_juju_conn import N2VCJujuConnector
115 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
116
117 from osm_lcm.lcm_helm_conn import LCMHelmConn
118 from osm_lcm.osm_config import OsmConfigBuilder
119 from osm_lcm.prometheus import parse_job
120
121 from copy import copy, deepcopy
122 from time import time
123 from uuid import uuid4
124
125 from random import randint
126
127 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
128
129
130 class NsLcm(LcmBase):
131 timeout_scale_on_error = (
132 5 * 60
133 ) # Time for charm from first time at blocked,error status to mark as failed
134 timeout_scale_on_error_outer_factor = 1.05 # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
135 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
136 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
137 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
138 timeout_charm_delete = 10 * 60
139 timeout_primitive = 30 * 60 # Timeout for primitive execution
140 timeout_primitive_outer_factor = 1.05 # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
141 timeout_ns_update = 30 * 60 # timeout for ns update
142 timeout_progress_primitive = (
143 10 * 60
144 ) # timeout for some progress in a primitive execution
145 timeout_migrate = 1800 # default global timeout for migrating vnfs
146 timeout_operate = 1800 # default global timeout for migrating vnfs
147 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
148 SUBOPERATION_STATUS_NOT_FOUND = -1
149 SUBOPERATION_STATUS_NEW = -2
150 SUBOPERATION_STATUS_SKIP = -3
151 task_name_deploy_vca = "Deploying VCA"
152
153 def __init__(self, msg, lcm_tasks, config, loop):
154 """
155 Init, Connect to database, filesystem storage, and messaging
156 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
157 :return: None
158 """
159 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
160
161 self.db = Database().instance.db
162 self.fs = Filesystem().instance.fs
163 self.loop = loop
164 self.lcm_tasks = lcm_tasks
165 self.timeout = config["timeout"]
166 self.ro_config = config["ro_config"]
167 self.ng_ro = config["ro_config"].get("ng")
168 self.vca_config = config["VCA"].copy()
169
170 # create N2VC connector
171 self.n2vc = N2VCJujuConnector(
172 log=self.logger,
173 loop=self.loop,
174 on_update_db=self._on_update_n2vc_db,
175 fs=self.fs,
176 db=self.db,
177 )
178
179 self.conn_helm_ee = LCMHelmConn(
180 log=self.logger,
181 loop=self.loop,
182 vca_config=self.vca_config,
183 on_update_db=self._on_update_n2vc_db,
184 )
185
186 self.k8sclusterhelm2 = K8sHelmConnector(
187 kubectl_command=self.vca_config.get("kubectlpath"),
188 helm_command=self.vca_config.get("helmpath"),
189 log=self.logger,
190 on_update_db=None,
191 fs=self.fs,
192 db=self.db,
193 )
194
195 self.k8sclusterhelm3 = K8sHelm3Connector(
196 kubectl_command=self.vca_config.get("kubectlpath"),
197 helm_command=self.vca_config.get("helm3path"),
198 fs=self.fs,
199 log=self.logger,
200 db=self.db,
201 on_update_db=None,
202 )
203
204 self.k8sclusterjuju = K8sJujuConnector(
205 kubectl_command=self.vca_config.get("kubectlpath"),
206 juju_command=self.vca_config.get("jujupath"),
207 log=self.logger,
208 loop=self.loop,
209 on_update_db=self._on_update_k8s_db,
210 fs=self.fs,
211 db=self.db,
212 )
213
214 self.k8scluster_map = {
215 "helm-chart": self.k8sclusterhelm2,
216 "helm-chart-v3": self.k8sclusterhelm3,
217 "chart": self.k8sclusterhelm3,
218 "juju-bundle": self.k8sclusterjuju,
219 "juju": self.k8sclusterjuju,
220 }
221
222 self.vca_map = {
223 "lxc_proxy_charm": self.n2vc,
224 "native_charm": self.n2vc,
225 "k8s_proxy_charm": self.n2vc,
226 "helm": self.conn_helm_ee,
227 "helm-v3": self.conn_helm_ee,
228 }
229
230 # create RO client
231 self.RO = NgRoClient(self.loop, **self.ro_config)
232
233 self.op_status_map = {
234 "instantiation": self.RO.status,
235 "termination": self.RO.status,
236 "migrate": self.RO.status,
237 "healing": self.RO.recreate_status,
238 "verticalscale": self.RO.status,
239 "start_stop_rebuild": self.RO.status,
240 }
241
242 @staticmethod
243 def increment_ip_mac(ip_mac, vm_index=1):
244 if not isinstance(ip_mac, str):
245 return ip_mac
246 try:
247 # try with ipv4 look for last dot
248 i = ip_mac.rfind(".")
249 if i > 0:
250 i += 1
251 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
252 # try with ipv6 or mac look for last colon. Operate in hex
253 i = ip_mac.rfind(":")
254 if i > 0:
255 i += 1
256 # format in hex, len can be 2 for mac or 4 for ipv6
257 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
258 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
259 )
260 except Exception:
261 pass
262 return None
263
264 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
265
266 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
267
268 try:
269 # TODO filter RO descriptor fields...
270
271 # write to database
272 db_dict = dict()
273 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
274 db_dict["deploymentStatus"] = ro_descriptor
275 self.update_db_2("nsrs", nsrs_id, db_dict)
276
277 except Exception as e:
278 self.logger.warn(
279 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
280 )
281
282 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
283
284 # remove last dot from path (if exists)
285 if path.endswith("."):
286 path = path[:-1]
287
288 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
289 # .format(table, filter, path, updated_data))
290 try:
291
292 nsr_id = filter.get("_id")
293
294 # read ns record from database
295 nsr = self.db.get_one(table="nsrs", q_filter=filter)
296 current_ns_status = nsr.get("nsState")
297
298 # get vca status for NS
299 status_dict = await self.n2vc.get_status(
300 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
301 )
302
303 # vcaStatus
304 db_dict = dict()
305 db_dict["vcaStatus"] = status_dict
306
307 # update configurationStatus for this VCA
308 try:
309 vca_index = int(path[path.rfind(".") + 1 :])
310
311 vca_list = deep_get(
312 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
313 )
314 vca_status = vca_list[vca_index].get("status")
315
316 configuration_status_list = nsr.get("configurationStatus")
317 config_status = configuration_status_list[vca_index].get("status")
318
319 if config_status == "BROKEN" and vca_status != "failed":
320 db_dict["configurationStatus"][vca_index] = "READY"
321 elif config_status != "BROKEN" and vca_status == "failed":
322 db_dict["configurationStatus"][vca_index] = "BROKEN"
323 except Exception as e:
324 # not update configurationStatus
325 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
326
327 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
328 # if nsState = 'DEGRADED' check if all is OK
329 is_degraded = False
330 if current_ns_status in ("READY", "DEGRADED"):
331 error_description = ""
332 # check machines
333 if status_dict.get("machines"):
334 for machine_id in status_dict.get("machines"):
335 machine = status_dict.get("machines").get(machine_id)
336 # check machine agent-status
337 if machine.get("agent-status"):
338 s = machine.get("agent-status").get("status")
339 if s != "started":
340 is_degraded = True
341 error_description += (
342 "machine {} agent-status={} ; ".format(
343 machine_id, s
344 )
345 )
346 # check machine instance status
347 if machine.get("instance-status"):
348 s = machine.get("instance-status").get("status")
349 if s != "running":
350 is_degraded = True
351 error_description += (
352 "machine {} instance-status={} ; ".format(
353 machine_id, s
354 )
355 )
356 # check applications
357 if status_dict.get("applications"):
358 for app_id in status_dict.get("applications"):
359 app = status_dict.get("applications").get(app_id)
360 # check application status
361 if app.get("status"):
362 s = app.get("status").get("status")
363 if s != "active":
364 is_degraded = True
365 error_description += (
366 "application {} status={} ; ".format(app_id, s)
367 )
368
369 if error_description:
370 db_dict["errorDescription"] = error_description
371 if current_ns_status == "READY" and is_degraded:
372 db_dict["nsState"] = "DEGRADED"
373 if current_ns_status == "DEGRADED" and not is_degraded:
374 db_dict["nsState"] = "READY"
375
376 # write to database
377 self.update_db_2("nsrs", nsr_id, db_dict)
378
379 except (asyncio.CancelledError, asyncio.TimeoutError):
380 raise
381 except Exception as e:
382 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
383
384 async def _on_update_k8s_db(
385 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
386 ):
387 """
388 Updating vca status in NSR record
389 :param cluster_uuid: UUID of a k8s cluster
390 :param kdu_instance: The unique name of the KDU instance
391 :param filter: To get nsr_id
392 :cluster_type: The cluster type (juju, k8s)
393 :return: none
394 """
395
396 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
397 # .format(cluster_uuid, kdu_instance, filter))
398
399 nsr_id = filter.get("_id")
400 try:
401 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
402 cluster_uuid=cluster_uuid,
403 kdu_instance=kdu_instance,
404 yaml_format=False,
405 complete_status=True,
406 vca_id=vca_id,
407 )
408
409 # vcaStatus
410 db_dict = dict()
411 db_dict["vcaStatus"] = {nsr_id: vca_status}
412
413 self.logger.debug(
414 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
415 )
416
417 # write to database
418 self.update_db_2("nsrs", nsr_id, db_dict)
419 except (asyncio.CancelledError, asyncio.TimeoutError):
420 raise
421 except Exception as e:
422 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
423
424 @staticmethod
425 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
426 try:
427 env = Environment(
428 undefined=StrictUndefined,
429 autoescape=select_autoescape(default_for_string=True, default=True),
430 )
431 template = env.from_string(cloud_init_text)
432 return template.render(additional_params or {})
433 except UndefinedError as e:
434 raise LcmException(
435 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
436 "file, must be provided in the instantiation parameters inside the "
437 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
438 )
439 except (TemplateError, TemplateNotFound) as e:
440 raise LcmException(
441 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
442 vnfd_id, vdu_id, e
443 )
444 )
445
446 def _get_vdu_cloud_init_content(self, vdu, vnfd):
447 cloud_init_content = cloud_init_file = None
448 try:
449 if vdu.get("cloud-init-file"):
450 base_folder = vnfd["_admin"]["storage"]
451 if base_folder["pkg-dir"]:
452 cloud_init_file = "{}/{}/cloud_init/{}".format(
453 base_folder["folder"],
454 base_folder["pkg-dir"],
455 vdu["cloud-init-file"],
456 )
457 else:
458 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
459 base_folder["folder"],
460 vdu["cloud-init-file"],
461 )
462 with self.fs.file_open(cloud_init_file, "r") as ci_file:
463 cloud_init_content = ci_file.read()
464 elif vdu.get("cloud-init"):
465 cloud_init_content = vdu["cloud-init"]
466
467 return cloud_init_content
468 except FsException as e:
469 raise LcmException(
470 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
471 vnfd["id"], vdu["id"], cloud_init_file, e
472 )
473 )
474
475 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
476 vdur = next(
477 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
478 )
479 additional_params = vdur.get("additionalParams")
480 return parse_yaml_strings(additional_params)
481
482 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
483 """
484 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
485 :param vnfd: input vnfd
486 :param new_id: overrides vnf id if provided
487 :param additionalParams: Instantiation params for VNFs provided
488 :param nsrId: Id of the NSR
489 :return: copy of vnfd
490 """
491 vnfd_RO = deepcopy(vnfd)
492 # remove unused by RO configuration, monitoring, scaling and internal keys
493 vnfd_RO.pop("_id", None)
494 vnfd_RO.pop("_admin", None)
495 vnfd_RO.pop("monitoring-param", None)
496 vnfd_RO.pop("scaling-group-descriptor", None)
497 vnfd_RO.pop("kdu", None)
498 vnfd_RO.pop("k8s-cluster", None)
499 if new_id:
500 vnfd_RO["id"] = new_id
501
502 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
503 for vdu in get_iterable(vnfd_RO, "vdu"):
504 vdu.pop("cloud-init-file", None)
505 vdu.pop("cloud-init", None)
506 return vnfd_RO
507
508 @staticmethod
509 def ip_profile_2_RO(ip_profile):
510 RO_ip_profile = deepcopy(ip_profile)
511 if "dns-server" in RO_ip_profile:
512 if isinstance(RO_ip_profile["dns-server"], list):
513 RO_ip_profile["dns-address"] = []
514 for ds in RO_ip_profile.pop("dns-server"):
515 RO_ip_profile["dns-address"].append(ds["address"])
516 else:
517 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
518 if RO_ip_profile.get("ip-version") == "ipv4":
519 RO_ip_profile["ip-version"] = "IPv4"
520 if RO_ip_profile.get("ip-version") == "ipv6":
521 RO_ip_profile["ip-version"] = "IPv6"
522 if "dhcp-params" in RO_ip_profile:
523 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
524 return RO_ip_profile
525
526 def _get_ro_vim_id_for_vim_account(self, vim_account):
527 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
528 if db_vim["_admin"]["operationalState"] != "ENABLED":
529 raise LcmException(
530 "VIM={} is not available. operationalState={}".format(
531 vim_account, db_vim["_admin"]["operationalState"]
532 )
533 )
534 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
535 return RO_vim_id
536
537 def get_ro_wim_id_for_wim_account(self, wim_account):
538 if isinstance(wim_account, str):
539 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
540 if db_wim["_admin"]["operationalState"] != "ENABLED":
541 raise LcmException(
542 "WIM={} is not available. operationalState={}".format(
543 wim_account, db_wim["_admin"]["operationalState"]
544 )
545 )
546 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
547 return RO_wim_id
548 else:
549 return wim_account
550
551 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
552
553 db_vdu_push_list = []
554 template_vdur = []
555 db_update = {"_admin.modified": time()}
556 if vdu_create:
557 for vdu_id, vdu_count in vdu_create.items():
558 vdur = next(
559 (
560 vdur
561 for vdur in reversed(db_vnfr["vdur"])
562 if vdur["vdu-id-ref"] == vdu_id
563 ),
564 None,
565 )
566 if not vdur:
567 # Read the template saved in the db:
568 self.logger.debug(
569 "No vdur in the database. Using the vdur-template to scale"
570 )
571 vdur_template = db_vnfr.get("vdur-template")
572 if not vdur_template:
573 raise LcmException(
574 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
575 vdu_id
576 )
577 )
578 vdur = vdur_template[0]
579 # Delete a template from the database after using it
580 self.db.set_one(
581 "vnfrs",
582 {"_id": db_vnfr["_id"]},
583 None,
584 pull={"vdur-template": {"_id": vdur["_id"]}},
585 )
586 for count in range(vdu_count):
587 vdur_copy = deepcopy(vdur)
588 vdur_copy["status"] = "BUILD"
589 vdur_copy["status-detailed"] = None
590 vdur_copy["ip-address"] = None
591 vdur_copy["_id"] = str(uuid4())
592 vdur_copy["count-index"] += count + 1
593 vdur_copy["id"] = "{}-{}".format(
594 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
595 )
596 vdur_copy.pop("vim_info", None)
597 for iface in vdur_copy["interfaces"]:
598 if iface.get("fixed-ip"):
599 iface["ip-address"] = self.increment_ip_mac(
600 iface["ip-address"], count + 1
601 )
602 else:
603 iface.pop("ip-address", None)
604 if iface.get("fixed-mac"):
605 iface["mac-address"] = self.increment_ip_mac(
606 iface["mac-address"], count + 1
607 )
608 else:
609 iface.pop("mac-address", None)
610 if db_vnfr["vdur"]:
611 iface.pop(
612 "mgmt_vnf", None
613 ) # only first vdu can be managment of vnf
614 db_vdu_push_list.append(vdur_copy)
615 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
616 if vdu_delete:
617 if len(db_vnfr["vdur"]) == 1:
618 # The scale will move to 0 instances
619 self.logger.debug(
620 "Scaling to 0 !, creating the template with the last vdur"
621 )
622 template_vdur = [db_vnfr["vdur"][0]]
623 for vdu_id, vdu_count in vdu_delete.items():
624 if mark_delete:
625 indexes_to_delete = [
626 iv[0]
627 for iv in enumerate(db_vnfr["vdur"])
628 if iv[1]["vdu-id-ref"] == vdu_id
629 ]
630 db_update.update(
631 {
632 "vdur.{}.status".format(i): "DELETING"
633 for i in indexes_to_delete[-vdu_count:]
634 }
635 )
636 else:
637 # it must be deleted one by one because common.db does not allow otherwise
638 vdus_to_delete = [
639 v
640 for v in reversed(db_vnfr["vdur"])
641 if v["vdu-id-ref"] == vdu_id
642 ]
643 for vdu in vdus_to_delete[:vdu_count]:
644 self.db.set_one(
645 "vnfrs",
646 {"_id": db_vnfr["_id"]},
647 None,
648 pull={"vdur": {"_id": vdu["_id"]}},
649 )
650 db_push = {}
651 if db_vdu_push_list:
652 db_push["vdur"] = db_vdu_push_list
653 if template_vdur:
654 db_push["vdur-template"] = template_vdur
655 if not db_push:
656 db_push = None
657 db_vnfr["vdur-template"] = template_vdur
658 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
659 # modify passed dictionary db_vnfr
660 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
661 db_vnfr["vdur"] = db_vnfr_["vdur"]
662
663 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
664 """
665 Updates database nsr with the RO info for the created vld
666 :param ns_update_nsr: dictionary to be filled with the updated info
667 :param db_nsr: content of db_nsr. This is also modified
668 :param nsr_desc_RO: nsr descriptor from RO
669 :return: Nothing, LcmException is raised on errors
670 """
671
672 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
673 for net_RO in get_iterable(nsr_desc_RO, "nets"):
674 if vld["id"] != net_RO.get("ns_net_osm_id"):
675 continue
676 vld["vim-id"] = net_RO.get("vim_net_id")
677 vld["name"] = net_RO.get("vim_name")
678 vld["status"] = net_RO.get("status")
679 vld["status-detailed"] = net_RO.get("error_msg")
680 ns_update_nsr["vld.{}".format(vld_index)] = vld
681 break
682 else:
683 raise LcmException(
684 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
685 )
686
687 def set_vnfr_at_error(self, db_vnfrs, error_text):
688 try:
689 for db_vnfr in db_vnfrs.values():
690 vnfr_update = {"status": "ERROR"}
691 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
692 if "status" not in vdur:
693 vdur["status"] = "ERROR"
694 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
695 if error_text:
696 vdur["status-detailed"] = str(error_text)
697 vnfr_update[
698 "vdur.{}.status-detailed".format(vdu_index)
699 ] = "ERROR"
700 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
701 except DbException as e:
702 self.logger.error("Cannot update vnf. {}".format(e))
703
704 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
705 """
706 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
707 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
708 :param nsr_desc_RO: nsr descriptor from RO
709 :return: Nothing, LcmException is raised on errors
710 """
711 for vnf_index, db_vnfr in db_vnfrs.items():
712 for vnf_RO in nsr_desc_RO["vnfs"]:
713 if vnf_RO["member_vnf_index"] != vnf_index:
714 continue
715 vnfr_update = {}
716 if vnf_RO.get("ip_address"):
717 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
718 "ip_address"
719 ].split(";")[0]
720 elif not db_vnfr.get("ip-address"):
721 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
722 raise LcmExceptionNoMgmtIP(
723 "ns member_vnf_index '{}' has no IP address".format(
724 vnf_index
725 )
726 )
727
728 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
729 vdur_RO_count_index = 0
730 if vdur.get("pdu-type"):
731 continue
732 for vdur_RO in get_iterable(vnf_RO, "vms"):
733 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
734 continue
735 if vdur["count-index"] != vdur_RO_count_index:
736 vdur_RO_count_index += 1
737 continue
738 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
739 if vdur_RO.get("ip_address"):
740 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
741 else:
742 vdur["ip-address"] = None
743 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
744 vdur["name"] = vdur_RO.get("vim_name")
745 vdur["status"] = vdur_RO.get("status")
746 vdur["status-detailed"] = vdur_RO.get("error_msg")
747 for ifacer in get_iterable(vdur, "interfaces"):
748 for interface_RO in get_iterable(vdur_RO, "interfaces"):
749 if ifacer["name"] == interface_RO.get("internal_name"):
750 ifacer["ip-address"] = interface_RO.get(
751 "ip_address"
752 )
753 ifacer["mac-address"] = interface_RO.get(
754 "mac_address"
755 )
756 break
757 else:
758 raise LcmException(
759 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
760 "from VIM info".format(
761 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
762 )
763 )
764 vnfr_update["vdur.{}".format(vdu_index)] = vdur
765 break
766 else:
767 raise LcmException(
768 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
769 "VIM info".format(
770 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
771 )
772 )
773
774 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
775 for net_RO in get_iterable(nsr_desc_RO, "nets"):
776 if vld["id"] != net_RO.get("vnf_net_osm_id"):
777 continue
778 vld["vim-id"] = net_RO.get("vim_net_id")
779 vld["name"] = net_RO.get("vim_name")
780 vld["status"] = net_RO.get("status")
781 vld["status-detailed"] = net_RO.get("error_msg")
782 vnfr_update["vld.{}".format(vld_index)] = vld
783 break
784 else:
785 raise LcmException(
786 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
787 vnf_index, vld["id"]
788 )
789 )
790
791 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
792 break
793
794 else:
795 raise LcmException(
796 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
797 vnf_index
798 )
799 )
800
801 def _get_ns_config_info(self, nsr_id):
802 """
803 Generates a mapping between vnf,vdu elements and the N2VC id
804 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
805 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
806 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
807 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
808 """
809 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
810 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
811 mapping = {}
812 ns_config_info = {"osm-config-mapping": mapping}
813 for vca in vca_deployed_list:
814 if not vca["member-vnf-index"]:
815 continue
816 if not vca["vdu_id"]:
817 mapping[vca["member-vnf-index"]] = vca["application"]
818 else:
819 mapping[
820 "{}.{}.{}".format(
821 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
822 )
823 ] = vca["application"]
824 return ns_config_info
825
826 async def _instantiate_ng_ro(
827 self,
828 logging_text,
829 nsr_id,
830 nsd,
831 db_nsr,
832 db_nslcmop,
833 db_vnfrs,
834 db_vnfds,
835 n2vc_key_list,
836 stage,
837 start_deploy,
838 timeout_ns_deploy,
839 ):
840
841 db_vims = {}
842
843 def get_vim_account(vim_account_id):
844 nonlocal db_vims
845 if vim_account_id in db_vims:
846 return db_vims[vim_account_id]
847 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
848 db_vims[vim_account_id] = db_vim
849 return db_vim
850
851 # modify target_vld info with instantiation parameters
852 def parse_vld_instantiation_params(
853 target_vim, target_vld, vld_params, target_sdn
854 ):
855 if vld_params.get("ip-profile"):
856 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
857 "ip-profile"
858 ]
859 if vld_params.get("provider-network"):
860 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
861 "provider-network"
862 ]
863 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
864 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
865 "provider-network"
866 ]["sdn-ports"]
867
868 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
869 # if wim_account_id is specified in vld_params, validate if it is feasible.
870 wim_account_id, db_wim = select_feasible_wim_account(
871 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
872 )
873
874 if wim_account_id:
875 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
876 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
877 # update vld_params with correct WIM account Id
878 vld_params["wimAccountId"] = wim_account_id
879
880 target_wim = "wim:{}".format(wim_account_id)
881 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
882 sdn_ports = get_sdn_ports(vld_params, db_wim)
883 if len(sdn_ports) > 0:
884 target_vld["vim_info"][target_wim] = target_wim_attrs
885 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
886
887 self.logger.debug(
888 "Target VLD with WIM data: {:s}".format(str(target_vld))
889 )
890
891 for param in ("vim-network-name", "vim-network-id"):
892 if vld_params.get(param):
893 if isinstance(vld_params[param], dict):
894 for vim, vim_net in vld_params[param].items():
895 other_target_vim = "vim:" + vim
896 populate_dict(
897 target_vld["vim_info"],
898 (other_target_vim, param.replace("-", "_")),
899 vim_net,
900 )
901 else: # isinstance str
902 target_vld["vim_info"][target_vim][
903 param.replace("-", "_")
904 ] = vld_params[param]
905 if vld_params.get("common_id"):
906 target_vld["common_id"] = vld_params.get("common_id")
907
908 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
909 def update_ns_vld_target(target, ns_params):
910 for vnf_params in ns_params.get("vnf", ()):
911 if vnf_params.get("vimAccountId"):
912 target_vnf = next(
913 (
914 vnfr
915 for vnfr in db_vnfrs.values()
916 if vnf_params["member-vnf-index"]
917 == vnfr["member-vnf-index-ref"]
918 ),
919 None,
920 )
921 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
922 if not vdur:
923 return
924 for a_index, a_vld in enumerate(target["ns"]["vld"]):
925 target_vld = find_in_list(
926 get_iterable(vdur, "interfaces"),
927 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
928 )
929
930 vld_params = find_in_list(
931 get_iterable(ns_params, "vld"),
932 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
933 )
934 if target_vld:
935
936 if vnf_params.get("vimAccountId") not in a_vld.get(
937 "vim_info", {}
938 ):
939 target_vim_network_list = [
940 v for _, v in a_vld.get("vim_info").items()
941 ]
942 target_vim_network_name = next(
943 (
944 item.get("vim_network_name", "")
945 for item in target_vim_network_list
946 ),
947 "",
948 )
949
950 target["ns"]["vld"][a_index].get("vim_info").update(
951 {
952 "vim:{}".format(vnf_params["vimAccountId"]): {
953 "vim_network_name": target_vim_network_name,
954 }
955 }
956 )
957
958 if vld_params:
959 for param in ("vim-network-name", "vim-network-id"):
960 if vld_params.get(param) and isinstance(
961 vld_params[param], dict
962 ):
963 for vim, vim_net in vld_params[
964 param
965 ].items():
966 other_target_vim = "vim:" + vim
967 populate_dict(
968 target["ns"]["vld"][a_index].get(
969 "vim_info"
970 ),
971 (
972 other_target_vim,
973 param.replace("-", "_"),
974 ),
975 vim_net,
976 )
977
978 nslcmop_id = db_nslcmop["_id"]
979 target = {
980 "name": db_nsr["name"],
981 "ns": {"vld": []},
982 "vnf": [],
983 "image": deepcopy(db_nsr["image"]),
984 "flavor": deepcopy(db_nsr["flavor"]),
985 "action_id": nslcmop_id,
986 "cloud_init_content": {},
987 }
988 for image in target["image"]:
989 image["vim_info"] = {}
990 for flavor in target["flavor"]:
991 flavor["vim_info"] = {}
992 if db_nsr.get("affinity-or-anti-affinity-group"):
993 target["affinity-or-anti-affinity-group"] = deepcopy(
994 db_nsr["affinity-or-anti-affinity-group"]
995 )
996 for affinity_or_anti_affinity_group in target[
997 "affinity-or-anti-affinity-group"
998 ]:
999 affinity_or_anti_affinity_group["vim_info"] = {}
1000
1001 if db_nslcmop.get("lcmOperationType") != "instantiate":
1002 # get parameters of instantiation:
1003 db_nslcmop_instantiate = self.db.get_list(
1004 "nslcmops",
1005 {
1006 "nsInstanceId": db_nslcmop["nsInstanceId"],
1007 "lcmOperationType": "instantiate",
1008 },
1009 )[-1]
1010 ns_params = db_nslcmop_instantiate.get("operationParams")
1011 else:
1012 ns_params = db_nslcmop.get("operationParams")
1013 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
1014 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
1015
1016 cp2target = {}
1017 for vld_index, vld in enumerate(db_nsr.get("vld")):
1018 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1019 target_vld = {
1020 "id": vld["id"],
1021 "name": vld["name"],
1022 "mgmt-network": vld.get("mgmt-network", False),
1023 "type": vld.get("type"),
1024 "vim_info": {
1025 target_vim: {
1026 "vim_network_name": vld.get("vim-network-name"),
1027 "vim_account_id": ns_params["vimAccountId"],
1028 }
1029 },
1030 }
1031 # check if this network needs SDN assist
1032 if vld.get("pci-interfaces"):
1033 db_vim = get_vim_account(ns_params["vimAccountId"])
1034 sdnc_id = db_vim["config"].get("sdn-controller")
1035 if sdnc_id:
1036 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1037 target_sdn = "sdn:{}".format(sdnc_id)
1038 target_vld["vim_info"][target_sdn] = {
1039 "sdn": True,
1040 "target_vim": target_vim,
1041 "vlds": [sdn_vld],
1042 "type": vld.get("type"),
1043 }
1044
1045 nsd_vnf_profiles = get_vnf_profiles(nsd)
1046 for nsd_vnf_profile in nsd_vnf_profiles:
1047 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1048 if cp["virtual-link-profile-id"] == vld["id"]:
1049 cp2target[
1050 "member_vnf:{}.{}".format(
1051 cp["constituent-cpd-id"][0][
1052 "constituent-base-element-id"
1053 ],
1054 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1055 )
1056 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1057
1058 # check at nsd descriptor, if there is an ip-profile
1059 vld_params = {}
1060 nsd_vlp = find_in_list(
1061 get_virtual_link_profiles(nsd),
1062 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1063 == vld["id"],
1064 )
1065 if (
1066 nsd_vlp
1067 and nsd_vlp.get("virtual-link-protocol-data")
1068 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1069 ):
1070 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1071 "l3-protocol-data"
1072 ]
1073 ip_profile_dest_data = {}
1074 if "ip-version" in ip_profile_source_data:
1075 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1076 "ip-version"
1077 ]
1078 if "cidr" in ip_profile_source_data:
1079 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1080 "cidr"
1081 ]
1082 if "gateway-ip" in ip_profile_source_data:
1083 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1084 "gateway-ip"
1085 ]
1086 if "dhcp-enabled" in ip_profile_source_data:
1087 ip_profile_dest_data["dhcp-params"] = {
1088 "enabled": ip_profile_source_data["dhcp-enabled"]
1089 }
1090 vld_params["ip-profile"] = ip_profile_dest_data
1091
1092 # update vld_params with instantiation params
1093 vld_instantiation_params = find_in_list(
1094 get_iterable(ns_params, "vld"),
1095 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1096 )
1097 if vld_instantiation_params:
1098 vld_params.update(vld_instantiation_params)
1099 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1100 target["ns"]["vld"].append(target_vld)
1101 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1102 update_ns_vld_target(target, ns_params)
1103
1104 for vnfr in db_vnfrs.values():
1105 vnfd = find_in_list(
1106 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1107 )
1108 vnf_params = find_in_list(
1109 get_iterable(ns_params, "vnf"),
1110 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1111 )
1112 target_vnf = deepcopy(vnfr)
1113 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1114 for vld in target_vnf.get("vld", ()):
1115 # check if connected to a ns.vld, to fill target'
1116 vnf_cp = find_in_list(
1117 vnfd.get("int-virtual-link-desc", ()),
1118 lambda cpd: cpd.get("id") == vld["id"],
1119 )
1120 if vnf_cp:
1121 ns_cp = "member_vnf:{}.{}".format(
1122 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1123 )
1124 if cp2target.get(ns_cp):
1125 vld["target"] = cp2target[ns_cp]
1126
1127 vld["vim_info"] = {
1128 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1129 }
1130 # check if this network needs SDN assist
1131 target_sdn = None
1132 if vld.get("pci-interfaces"):
1133 db_vim = get_vim_account(vnfr["vim-account-id"])
1134 sdnc_id = db_vim["config"].get("sdn-controller")
1135 if sdnc_id:
1136 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1137 target_sdn = "sdn:{}".format(sdnc_id)
1138 vld["vim_info"][target_sdn] = {
1139 "sdn": True,
1140 "target_vim": target_vim,
1141 "vlds": [sdn_vld],
1142 "type": vld.get("type"),
1143 }
1144
1145 # check at vnfd descriptor, if there is an ip-profile
1146 vld_params = {}
1147 vnfd_vlp = find_in_list(
1148 get_virtual_link_profiles(vnfd),
1149 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1150 )
1151 if (
1152 vnfd_vlp
1153 and vnfd_vlp.get("virtual-link-protocol-data")
1154 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1155 ):
1156 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1157 "l3-protocol-data"
1158 ]
1159 ip_profile_dest_data = {}
1160 if "ip-version" in ip_profile_source_data:
1161 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1162 "ip-version"
1163 ]
1164 if "cidr" in ip_profile_source_data:
1165 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1166 "cidr"
1167 ]
1168 if "gateway-ip" in ip_profile_source_data:
1169 ip_profile_dest_data[
1170 "gateway-address"
1171 ] = ip_profile_source_data["gateway-ip"]
1172 if "dhcp-enabled" in ip_profile_source_data:
1173 ip_profile_dest_data["dhcp-params"] = {
1174 "enabled": ip_profile_source_data["dhcp-enabled"]
1175 }
1176
1177 vld_params["ip-profile"] = ip_profile_dest_data
1178 # update vld_params with instantiation params
1179 if vnf_params:
1180 vld_instantiation_params = find_in_list(
1181 get_iterable(vnf_params, "internal-vld"),
1182 lambda i_vld: i_vld["name"] == vld["id"],
1183 )
1184 if vld_instantiation_params:
1185 vld_params.update(vld_instantiation_params)
1186 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1187
1188 vdur_list = []
1189 for vdur in target_vnf.get("vdur", ()):
1190 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1191 continue # This vdu must not be created
1192 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1193
1194 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1195
1196 if ssh_keys_all:
1197 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1198 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1199 if (
1200 vdu_configuration
1201 and vdu_configuration.get("config-access")
1202 and vdu_configuration.get("config-access").get("ssh-access")
1203 ):
1204 vdur["ssh-keys"] = ssh_keys_all
1205 vdur["ssh-access-required"] = vdu_configuration[
1206 "config-access"
1207 ]["ssh-access"]["required"]
1208 elif (
1209 vnf_configuration
1210 and vnf_configuration.get("config-access")
1211 and vnf_configuration.get("config-access").get("ssh-access")
1212 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1213 ):
1214 vdur["ssh-keys"] = ssh_keys_all
1215 vdur["ssh-access-required"] = vnf_configuration[
1216 "config-access"
1217 ]["ssh-access"]["required"]
1218 elif ssh_keys_instantiation and find_in_list(
1219 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1220 ):
1221 vdur["ssh-keys"] = ssh_keys_instantiation
1222
1223 self.logger.debug("NS > vdur > {}".format(vdur))
1224
1225 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1226 # cloud-init
1227 if vdud.get("cloud-init-file"):
1228 vdur["cloud-init"] = "{}:file:{}".format(
1229 vnfd["_id"], vdud.get("cloud-init-file")
1230 )
1231 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1232 if vdur["cloud-init"] not in target["cloud_init_content"]:
1233 base_folder = vnfd["_admin"]["storage"]
1234 if base_folder["pkg-dir"]:
1235 cloud_init_file = "{}/{}/cloud_init/{}".format(
1236 base_folder["folder"],
1237 base_folder["pkg-dir"],
1238 vdud.get("cloud-init-file"),
1239 )
1240 else:
1241 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1242 base_folder["folder"],
1243 vdud.get("cloud-init-file"),
1244 )
1245 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1246 target["cloud_init_content"][
1247 vdur["cloud-init"]
1248 ] = ci_file.read()
1249 elif vdud.get("cloud-init"):
1250 vdur["cloud-init"] = "{}:vdu:{}".format(
1251 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1252 )
1253 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1254 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1255 "cloud-init"
1256 ]
1257 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1258 deploy_params_vdu = self._format_additional_params(
1259 vdur.get("additionalParams") or {}
1260 )
1261 deploy_params_vdu["OSM"] = get_osm_params(
1262 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1263 )
1264 vdur["additionalParams"] = deploy_params_vdu
1265
1266 # flavor
1267 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1268 if target_vim not in ns_flavor["vim_info"]:
1269 ns_flavor["vim_info"][target_vim] = {}
1270
1271 # deal with images
1272 # in case alternative images are provided we must check if they should be applied
1273 # for the vim_type, modify the vim_type taking into account
1274 ns_image_id = int(vdur["ns-image-id"])
1275 if vdur.get("alt-image-ids"):
1276 db_vim = get_vim_account(vnfr["vim-account-id"])
1277 vim_type = db_vim["vim_type"]
1278 for alt_image_id in vdur.get("alt-image-ids"):
1279 ns_alt_image = target["image"][int(alt_image_id)]
1280 if vim_type == ns_alt_image.get("vim-type"):
1281 # must use alternative image
1282 self.logger.debug(
1283 "use alternative image id: {}".format(alt_image_id)
1284 )
1285 ns_image_id = alt_image_id
1286 vdur["ns-image-id"] = ns_image_id
1287 break
1288 ns_image = target["image"][int(ns_image_id)]
1289 if target_vim not in ns_image["vim_info"]:
1290 ns_image["vim_info"][target_vim] = {}
1291
1292 # Affinity groups
1293 if vdur.get("affinity-or-anti-affinity-group-id"):
1294 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1295 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1296 if target_vim not in ns_ags["vim_info"]:
1297 ns_ags["vim_info"][target_vim] = {}
1298
1299 vdur["vim_info"] = {target_vim: {}}
1300 # instantiation parameters
1301 if vnf_params:
1302 vdu_instantiation_params = find_in_list(
1303 get_iterable(vnf_params, "vdu"),
1304 lambda i_vdu: i_vdu["id"] == vdud["id"],
1305 )
1306 if vdu_instantiation_params:
1307 # Parse the vdu_volumes from the instantiation params
1308 vdu_volumes = get_volumes_from_instantiation_params(
1309 vdu_instantiation_params, vdud
1310 )
1311 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1312 vdur_list.append(vdur)
1313 target_vnf["vdur"] = vdur_list
1314 target["vnf"].append(target_vnf)
1315
1316 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1317 desc = await self.RO.deploy(nsr_id, target)
1318 self.logger.debug("RO return > {}".format(desc))
1319 action_id = desc["action_id"]
1320 await self._wait_ng_ro(
1321 nsr_id,
1322 action_id,
1323 nslcmop_id,
1324 start_deploy,
1325 timeout_ns_deploy,
1326 stage,
1327 operation="instantiation",
1328 )
1329
1330 # Updating NSR
1331 db_nsr_update = {
1332 "_admin.deployed.RO.operational-status": "running",
1333 "detailed-status": " ".join(stage),
1334 }
1335 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1336 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1337 self._write_op_status(nslcmop_id, stage)
1338 self.logger.debug(
1339 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1340 )
1341 return
1342
1343 async def _wait_ng_ro(
1344 self,
1345 nsr_id,
1346 action_id,
1347 nslcmop_id=None,
1348 start_time=None,
1349 timeout=600,
1350 stage=None,
1351 operation=None,
1352 ):
1353 detailed_status_old = None
1354 db_nsr_update = {}
1355 start_time = start_time or time()
1356 while time() <= start_time + timeout:
1357 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1358 self.logger.debug("Wait NG RO > {}".format(desc_status))
1359 if desc_status["status"] == "FAILED":
1360 raise NgRoException(desc_status["details"])
1361 elif desc_status["status"] == "BUILD":
1362 if stage:
1363 stage[2] = "VIM: ({})".format(desc_status["details"])
1364 elif desc_status["status"] == "DONE":
1365 if stage:
1366 stage[2] = "Deployed at VIM"
1367 break
1368 else:
1369 assert False, "ROclient.check_ns_status returns unknown {}".format(
1370 desc_status["status"]
1371 )
1372 if stage and nslcmop_id and stage[2] != detailed_status_old:
1373 detailed_status_old = stage[2]
1374 db_nsr_update["detailed-status"] = " ".join(stage)
1375 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1376 self._write_op_status(nslcmop_id, stage)
1377 await asyncio.sleep(15, loop=self.loop)
1378 else: # timeout_ns_deploy
1379 raise NgRoException("Timeout waiting ns to deploy")
1380
1381 async def _terminate_ng_ro(
1382 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1383 ):
1384 db_nsr_update = {}
1385 failed_detail = []
1386 action_id = None
1387 start_deploy = time()
1388 try:
1389 target = {
1390 "ns": {"vld": []},
1391 "vnf": [],
1392 "image": [],
1393 "flavor": [],
1394 "action_id": nslcmop_id,
1395 }
1396 desc = await self.RO.deploy(nsr_id, target)
1397 action_id = desc["action_id"]
1398 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1399 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1400 self.logger.debug(
1401 logging_text
1402 + "ns terminate action at RO. action_id={}".format(action_id)
1403 )
1404
1405 # wait until done
1406 delete_timeout = 20 * 60 # 20 minutes
1407 await self._wait_ng_ro(
1408 nsr_id,
1409 action_id,
1410 nslcmop_id,
1411 start_deploy,
1412 delete_timeout,
1413 stage,
1414 operation="termination",
1415 )
1416
1417 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1418 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1419 # delete all nsr
1420 await self.RO.delete(nsr_id)
1421 except Exception as e:
1422 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1423 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1424 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1425 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1426 self.logger.debug(
1427 logging_text + "RO_action_id={} already deleted".format(action_id)
1428 )
1429 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1430 failed_detail.append("delete conflict: {}".format(e))
1431 self.logger.debug(
1432 logging_text
1433 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1434 )
1435 else:
1436 failed_detail.append("delete error: {}".format(e))
1437 self.logger.error(
1438 logging_text
1439 + "RO_action_id={} delete error: {}".format(action_id, e)
1440 )
1441
1442 if failed_detail:
1443 stage[2] = "Error deleting from VIM"
1444 else:
1445 stage[2] = "Deleted from VIM"
1446 db_nsr_update["detailed-status"] = " ".join(stage)
1447 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1448 self._write_op_status(nslcmop_id, stage)
1449
1450 if failed_detail:
1451 raise LcmException("; ".join(failed_detail))
1452 return
1453
1454 async def instantiate_RO(
1455 self,
1456 logging_text,
1457 nsr_id,
1458 nsd,
1459 db_nsr,
1460 db_nslcmop,
1461 db_vnfrs,
1462 db_vnfds,
1463 n2vc_key_list,
1464 stage,
1465 ):
1466 """
1467 Instantiate at RO
1468 :param logging_text: preffix text to use at logging
1469 :param nsr_id: nsr identity
1470 :param nsd: database content of ns descriptor
1471 :param db_nsr: database content of ns record
1472 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1473 :param db_vnfrs:
1474 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1475 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1476 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1477 :return: None or exception
1478 """
1479 try:
1480 start_deploy = time()
1481 ns_params = db_nslcmop.get("operationParams")
1482 if ns_params and ns_params.get("timeout_ns_deploy"):
1483 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1484 else:
1485 timeout_ns_deploy = self.timeout.get(
1486 "ns_deploy", self.timeout_ns_deploy
1487 )
1488
1489 # Check for and optionally request placement optimization. Database will be updated if placement activated
1490 stage[2] = "Waiting for Placement."
1491 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1492 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1493 for vnfr in db_vnfrs.values():
1494 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1495 break
1496 else:
1497 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1498
1499 return await self._instantiate_ng_ro(
1500 logging_text,
1501 nsr_id,
1502 nsd,
1503 db_nsr,
1504 db_nslcmop,
1505 db_vnfrs,
1506 db_vnfds,
1507 n2vc_key_list,
1508 stage,
1509 start_deploy,
1510 timeout_ns_deploy,
1511 )
1512 except Exception as e:
1513 stage[2] = "ERROR deploying at VIM"
1514 self.set_vnfr_at_error(db_vnfrs, str(e))
1515 self.logger.error(
1516 "Error deploying at VIM {}".format(e),
1517 exc_info=not isinstance(
1518 e,
1519 (
1520 ROclient.ROClientException,
1521 LcmException,
1522 DbException,
1523 NgRoException,
1524 ),
1525 ),
1526 )
1527 raise
1528
1529 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1530 """
1531 Wait for kdu to be up, get ip address
1532 :param logging_text: prefix use for logging
1533 :param nsr_id:
1534 :param vnfr_id:
1535 :param kdu_name:
1536 :return: IP address, K8s services
1537 """
1538
1539 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1540 nb_tries = 0
1541
1542 while nb_tries < 360:
1543 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1544 kdur = next(
1545 (
1546 x
1547 for x in get_iterable(db_vnfr, "kdur")
1548 if x.get("kdu-name") == kdu_name
1549 ),
1550 None,
1551 )
1552 if not kdur:
1553 raise LcmException(
1554 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1555 )
1556 if kdur.get("status"):
1557 if kdur["status"] in ("READY", "ENABLED"):
1558 return kdur.get("ip-address"), kdur.get("services")
1559 else:
1560 raise LcmException(
1561 "target KDU={} is in error state".format(kdu_name)
1562 )
1563
1564 await asyncio.sleep(10, loop=self.loop)
1565 nb_tries += 1
1566 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1567
1568 async def wait_vm_up_insert_key_ro(
1569 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1570 ):
1571 """
1572 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1573 :param logging_text: prefix use for logging
1574 :param nsr_id:
1575 :param vnfr_id:
1576 :param vdu_id:
1577 :param vdu_index:
1578 :param pub_key: public ssh key to inject, None to skip
1579 :param user: user to apply the public ssh key
1580 :return: IP address
1581 """
1582
1583 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1584 ro_nsr_id = None
1585 ip_address = None
1586 nb_tries = 0
1587 target_vdu_id = None
1588 ro_retries = 0
1589
1590 while True:
1591
1592 ro_retries += 1
1593 if ro_retries >= 360: # 1 hour
1594 raise LcmException(
1595 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1596 )
1597
1598 await asyncio.sleep(10, loop=self.loop)
1599
1600 # get ip address
1601 if not target_vdu_id:
1602 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1603
1604 if not vdu_id: # for the VNF case
1605 if db_vnfr.get("status") == "ERROR":
1606 raise LcmException(
1607 "Cannot inject ssh-key because target VNF is in error state"
1608 )
1609 ip_address = db_vnfr.get("ip-address")
1610 if not ip_address:
1611 continue
1612 vdur = next(
1613 (
1614 x
1615 for x in get_iterable(db_vnfr, "vdur")
1616 if x.get("ip-address") == ip_address
1617 ),
1618 None,
1619 )
1620 else: # VDU case
1621 vdur = next(
1622 (
1623 x
1624 for x in get_iterable(db_vnfr, "vdur")
1625 if x.get("vdu-id-ref") == vdu_id
1626 and x.get("count-index") == vdu_index
1627 ),
1628 None,
1629 )
1630
1631 if (
1632 not vdur and len(db_vnfr.get("vdur", ())) == 1
1633 ): # If only one, this should be the target vdu
1634 vdur = db_vnfr["vdur"][0]
1635 if not vdur:
1636 raise LcmException(
1637 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1638 vnfr_id, vdu_id, vdu_index
1639 )
1640 )
1641 # New generation RO stores information at "vim_info"
1642 ng_ro_status = None
1643 target_vim = None
1644 if vdur.get("vim_info"):
1645 target_vim = next(
1646 t for t in vdur["vim_info"]
1647 ) # there should be only one key
1648 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1649 if (
1650 vdur.get("pdu-type")
1651 or vdur.get("status") == "ACTIVE"
1652 or ng_ro_status == "ACTIVE"
1653 ):
1654 ip_address = vdur.get("ip-address")
1655 if not ip_address:
1656 continue
1657 target_vdu_id = vdur["vdu-id-ref"]
1658 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1659 raise LcmException(
1660 "Cannot inject ssh-key because target VM is in error state"
1661 )
1662
1663 if not target_vdu_id:
1664 continue
1665
1666 # inject public key into machine
1667 if pub_key and user:
1668 self.logger.debug(logging_text + "Inserting RO key")
1669 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1670 if vdur.get("pdu-type"):
1671 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1672 return ip_address
1673 try:
1674 ro_vm_id = "{}-{}".format(
1675 db_vnfr["member-vnf-index-ref"], target_vdu_id
1676 ) # TODO add vdu_index
1677 if self.ng_ro:
1678 target = {
1679 "action": {
1680 "action": "inject_ssh_key",
1681 "key": pub_key,
1682 "user": user,
1683 },
1684 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1685 }
1686 desc = await self.RO.deploy(nsr_id, target)
1687 action_id = desc["action_id"]
1688 await self._wait_ng_ro(
1689 nsr_id, action_id, timeout=600, operation="instantiation"
1690 )
1691 break
1692 else:
1693 # wait until NS is deployed at RO
1694 if not ro_nsr_id:
1695 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1696 ro_nsr_id = deep_get(
1697 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1698 )
1699 if not ro_nsr_id:
1700 continue
1701 result_dict = await self.RO.create_action(
1702 item="ns",
1703 item_id_name=ro_nsr_id,
1704 descriptor={
1705 "add_public_key": pub_key,
1706 "vms": [ro_vm_id],
1707 "user": user,
1708 },
1709 )
1710 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1711 if not result_dict or not isinstance(result_dict, dict):
1712 raise LcmException(
1713 "Unknown response from RO when injecting key"
1714 )
1715 for result in result_dict.values():
1716 if result.get("vim_result") == 200:
1717 break
1718 else:
1719 raise ROclient.ROClientException(
1720 "error injecting key: {}".format(
1721 result.get("description")
1722 )
1723 )
1724 break
1725 except NgRoException as e:
1726 raise LcmException(
1727 "Reaching max tries injecting key. Error: {}".format(e)
1728 )
1729 except ROclient.ROClientException as e:
1730 if not nb_tries:
1731 self.logger.debug(
1732 logging_text
1733 + "error injecting key: {}. Retrying until {} seconds".format(
1734 e, 20 * 10
1735 )
1736 )
1737 nb_tries += 1
1738 if nb_tries >= 20:
1739 raise LcmException(
1740 "Reaching max tries injecting key. Error: {}".format(e)
1741 )
1742 else:
1743 break
1744
1745 return ip_address
1746
1747 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1748 """
1749 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1750 """
1751 my_vca = vca_deployed_list[vca_index]
1752 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1753 # vdu or kdu: no dependencies
1754 return
1755 timeout = 300
1756 while timeout >= 0:
1757 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1758 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1759 configuration_status_list = db_nsr["configurationStatus"]
1760 for index, vca_deployed in enumerate(configuration_status_list):
1761 if index == vca_index:
1762 # myself
1763 continue
1764 if not my_vca.get("member-vnf-index") or (
1765 vca_deployed.get("member-vnf-index")
1766 == my_vca.get("member-vnf-index")
1767 ):
1768 internal_status = configuration_status_list[index].get("status")
1769 if internal_status == "READY":
1770 continue
1771 elif internal_status == "BROKEN":
1772 raise LcmException(
1773 "Configuration aborted because dependent charm/s has failed"
1774 )
1775 else:
1776 break
1777 else:
1778 # no dependencies, return
1779 return
1780 await asyncio.sleep(10)
1781 timeout -= 1
1782
1783 raise LcmException("Configuration aborted because dependent charm/s timeout")
1784
1785 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1786 vca_id = None
1787 if db_vnfr:
1788 vca_id = deep_get(db_vnfr, ("vca-id",))
1789 elif db_nsr:
1790 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1791 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1792 return vca_id
1793
1794 async def instantiate_N2VC(
1795 self,
1796 logging_text,
1797 vca_index,
1798 nsi_id,
1799 db_nsr,
1800 db_vnfr,
1801 vdu_id,
1802 kdu_name,
1803 vdu_index,
1804 config_descriptor,
1805 deploy_params,
1806 base_folder,
1807 nslcmop_id,
1808 stage,
1809 vca_type,
1810 vca_name,
1811 ee_config_descriptor,
1812 ):
1813 nsr_id = db_nsr["_id"]
1814 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1815 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1816 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1817 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1818 db_dict = {
1819 "collection": "nsrs",
1820 "filter": {"_id": nsr_id},
1821 "path": db_update_entry,
1822 }
1823 step = ""
1824 try:
1825
1826 element_type = "NS"
1827 element_under_configuration = nsr_id
1828
1829 vnfr_id = None
1830 if db_vnfr:
1831 vnfr_id = db_vnfr["_id"]
1832 osm_config["osm"]["vnf_id"] = vnfr_id
1833
1834 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1835
1836 if vca_type == "native_charm":
1837 index_number = 0
1838 else:
1839 index_number = vdu_index or 0
1840
1841 if vnfr_id:
1842 element_type = "VNF"
1843 element_under_configuration = vnfr_id
1844 namespace += ".{}-{}".format(vnfr_id, index_number)
1845 if vdu_id:
1846 namespace += ".{}-{}".format(vdu_id, index_number)
1847 element_type = "VDU"
1848 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1849 osm_config["osm"]["vdu_id"] = vdu_id
1850 elif kdu_name:
1851 namespace += ".{}".format(kdu_name)
1852 element_type = "KDU"
1853 element_under_configuration = kdu_name
1854 osm_config["osm"]["kdu_name"] = kdu_name
1855
1856 # Get artifact path
1857 if base_folder["pkg-dir"]:
1858 artifact_path = "{}/{}/{}/{}".format(
1859 base_folder["folder"],
1860 base_folder["pkg-dir"],
1861 "charms"
1862 if vca_type
1863 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1864 else "helm-charts",
1865 vca_name,
1866 )
1867 else:
1868 artifact_path = "{}/Scripts/{}/{}/".format(
1869 base_folder["folder"],
1870 "charms"
1871 if vca_type
1872 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1873 else "helm-charts",
1874 vca_name,
1875 )
1876
1877 self.logger.debug("Artifact path > {}".format(artifact_path))
1878
1879 # get initial_config_primitive_list that applies to this element
1880 initial_config_primitive_list = config_descriptor.get(
1881 "initial-config-primitive"
1882 )
1883
1884 self.logger.debug(
1885 "Initial config primitive list > {}".format(
1886 initial_config_primitive_list
1887 )
1888 )
1889
1890 # add config if not present for NS charm
1891 ee_descriptor_id = ee_config_descriptor.get("id")
1892 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1893 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1894 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1895 )
1896
1897 self.logger.debug(
1898 "Initial config primitive list #2 > {}".format(
1899 initial_config_primitive_list
1900 )
1901 )
1902 # n2vc_redesign STEP 3.1
1903 # find old ee_id if exists
1904 ee_id = vca_deployed.get("ee_id")
1905
1906 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1907 # create or register execution environment in VCA
1908 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1909
1910 self._write_configuration_status(
1911 nsr_id=nsr_id,
1912 vca_index=vca_index,
1913 status="CREATING",
1914 element_under_configuration=element_under_configuration,
1915 element_type=element_type,
1916 )
1917
1918 step = "create execution environment"
1919 self.logger.debug(logging_text + step)
1920
1921 ee_id = None
1922 credentials = None
1923 if vca_type == "k8s_proxy_charm":
1924 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1925 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1926 namespace=namespace,
1927 artifact_path=artifact_path,
1928 db_dict=db_dict,
1929 vca_id=vca_id,
1930 )
1931 elif vca_type == "helm" or vca_type == "helm-v3":
1932 ee_id, credentials = await self.vca_map[
1933 vca_type
1934 ].create_execution_environment(
1935 namespace=namespace,
1936 reuse_ee_id=ee_id,
1937 db_dict=db_dict,
1938 config=osm_config,
1939 artifact_path=artifact_path,
1940 chart_model=vca_name,
1941 vca_type=vca_type,
1942 )
1943 else:
1944 ee_id, credentials = await self.vca_map[
1945 vca_type
1946 ].create_execution_environment(
1947 namespace=namespace,
1948 reuse_ee_id=ee_id,
1949 db_dict=db_dict,
1950 vca_id=vca_id,
1951 )
1952
1953 elif vca_type == "native_charm":
1954 step = "Waiting to VM being up and getting IP address"
1955 self.logger.debug(logging_text + step)
1956 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1957 logging_text,
1958 nsr_id,
1959 vnfr_id,
1960 vdu_id,
1961 vdu_index,
1962 user=None,
1963 pub_key=None,
1964 )
1965 credentials = {"hostname": rw_mgmt_ip}
1966 # get username
1967 username = deep_get(
1968 config_descriptor, ("config-access", "ssh-access", "default-user")
1969 )
1970 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1971 # merged. Meanwhile let's get username from initial-config-primitive
1972 if not username and initial_config_primitive_list:
1973 for config_primitive in initial_config_primitive_list:
1974 for param in config_primitive.get("parameter", ()):
1975 if param["name"] == "ssh-username":
1976 username = param["value"]
1977 break
1978 if not username:
1979 raise LcmException(
1980 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1981 "'config-access.ssh-access.default-user'"
1982 )
1983 credentials["username"] = username
1984 # n2vc_redesign STEP 3.2
1985
1986 self._write_configuration_status(
1987 nsr_id=nsr_id,
1988 vca_index=vca_index,
1989 status="REGISTERING",
1990 element_under_configuration=element_under_configuration,
1991 element_type=element_type,
1992 )
1993
1994 step = "register execution environment {}".format(credentials)
1995 self.logger.debug(logging_text + step)
1996 ee_id = await self.vca_map[vca_type].register_execution_environment(
1997 credentials=credentials,
1998 namespace=namespace,
1999 db_dict=db_dict,
2000 vca_id=vca_id,
2001 )
2002
2003 # for compatibility with MON/POL modules, the need model and application name at database
2004 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
2005 ee_id_parts = ee_id.split(".")
2006 db_nsr_update = {db_update_entry + "ee_id": ee_id}
2007 if len(ee_id_parts) >= 2:
2008 model_name = ee_id_parts[0]
2009 application_name = ee_id_parts[1]
2010 db_nsr_update[db_update_entry + "model"] = model_name
2011 db_nsr_update[db_update_entry + "application"] = application_name
2012
2013 # n2vc_redesign STEP 3.3
2014 step = "Install configuration Software"
2015
2016 self._write_configuration_status(
2017 nsr_id=nsr_id,
2018 vca_index=vca_index,
2019 status="INSTALLING SW",
2020 element_under_configuration=element_under_configuration,
2021 element_type=element_type,
2022 other_update=db_nsr_update,
2023 )
2024
2025 # TODO check if already done
2026 self.logger.debug(logging_text + step)
2027 config = None
2028 if vca_type == "native_charm":
2029 config_primitive = next(
2030 (p for p in initial_config_primitive_list if p["name"] == "config"),
2031 None,
2032 )
2033 if config_primitive:
2034 config = self._map_primitive_params(
2035 config_primitive, {}, deploy_params
2036 )
2037 num_units = 1
2038 if vca_type == "lxc_proxy_charm":
2039 if element_type == "NS":
2040 num_units = db_nsr.get("config-units") or 1
2041 elif element_type == "VNF":
2042 num_units = db_vnfr.get("config-units") or 1
2043 elif element_type == "VDU":
2044 for v in db_vnfr["vdur"]:
2045 if vdu_id == v["vdu-id-ref"]:
2046 num_units = v.get("config-units") or 1
2047 break
2048 if vca_type != "k8s_proxy_charm":
2049 await self.vca_map[vca_type].install_configuration_sw(
2050 ee_id=ee_id,
2051 artifact_path=artifact_path,
2052 db_dict=db_dict,
2053 config=config,
2054 num_units=num_units,
2055 vca_id=vca_id,
2056 vca_type=vca_type,
2057 )
2058
2059 # write in db flag of configuration_sw already installed
2060 self.update_db_2(
2061 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2062 )
2063
2064 # add relations for this VCA (wait for other peers related with this VCA)
2065 await self._add_vca_relations(
2066 logging_text=logging_text,
2067 nsr_id=nsr_id,
2068 vca_type=vca_type,
2069 vca_index=vca_index,
2070 )
2071
2072 # if SSH access is required, then get execution environment SSH public
2073 # if native charm we have waited already to VM be UP
2074 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2075 pub_key = None
2076 user = None
2077 # self.logger.debug("get ssh key block")
2078 if deep_get(
2079 config_descriptor, ("config-access", "ssh-access", "required")
2080 ):
2081 # self.logger.debug("ssh key needed")
2082 # Needed to inject a ssh key
2083 user = deep_get(
2084 config_descriptor,
2085 ("config-access", "ssh-access", "default-user"),
2086 )
2087 step = "Install configuration Software, getting public ssh key"
2088 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2089 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2090 )
2091
2092 step = "Insert public key into VM user={} ssh_key={}".format(
2093 user, pub_key
2094 )
2095 else:
2096 # self.logger.debug("no need to get ssh key")
2097 step = "Waiting to VM being up and getting IP address"
2098 self.logger.debug(logging_text + step)
2099
2100 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2101 rw_mgmt_ip = None
2102
2103 # n2vc_redesign STEP 5.1
2104 # wait for RO (ip-address) Insert pub_key into VM
2105 if vnfr_id:
2106 if kdu_name:
2107 rw_mgmt_ip, services = await self.wait_kdu_up(
2108 logging_text, nsr_id, vnfr_id, kdu_name
2109 )
2110 vnfd = self.db.get_one(
2111 "vnfds_revisions",
2112 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2113 )
2114 kdu = get_kdu(vnfd, kdu_name)
2115 kdu_services = [
2116 service["name"] for service in get_kdu_services(kdu)
2117 ]
2118 exposed_services = []
2119 for service in services:
2120 if any(s in service["name"] for s in kdu_services):
2121 exposed_services.append(service)
2122 await self.vca_map[vca_type].exec_primitive(
2123 ee_id=ee_id,
2124 primitive_name="config",
2125 params_dict={
2126 "osm-config": json.dumps(
2127 OsmConfigBuilder(
2128 k8s={"services": exposed_services}
2129 ).build()
2130 )
2131 },
2132 vca_id=vca_id,
2133 )
2134
2135 # This verification is needed in order to avoid trying to add a public key
2136 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2137 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2138 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2139 # or it is a KNF)
2140 elif db_vnfr.get("vdur"):
2141 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2142 logging_text,
2143 nsr_id,
2144 vnfr_id,
2145 vdu_id,
2146 vdu_index,
2147 user=user,
2148 pub_key=pub_key,
2149 )
2150
2151 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2152
2153 # store rw_mgmt_ip in deploy params for later replacement
2154 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2155
2156 # n2vc_redesign STEP 6 Execute initial config primitive
2157 step = "execute initial config primitive"
2158
2159 # wait for dependent primitives execution (NS -> VNF -> VDU)
2160 if initial_config_primitive_list:
2161 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2162
2163 # stage, in function of element type: vdu, kdu, vnf or ns
2164 my_vca = vca_deployed_list[vca_index]
2165 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2166 # VDU or KDU
2167 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2168 elif my_vca.get("member-vnf-index"):
2169 # VNF
2170 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2171 else:
2172 # NS
2173 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2174
2175 self._write_configuration_status(
2176 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2177 )
2178
2179 self._write_op_status(op_id=nslcmop_id, stage=stage)
2180
2181 check_if_terminated_needed = True
2182 for initial_config_primitive in initial_config_primitive_list:
2183 # adding information on the vca_deployed if it is a NS execution environment
2184 if not vca_deployed["member-vnf-index"]:
2185 deploy_params["ns_config_info"] = json.dumps(
2186 self._get_ns_config_info(nsr_id)
2187 )
2188 # TODO check if already done
2189 primitive_params_ = self._map_primitive_params(
2190 initial_config_primitive, {}, deploy_params
2191 )
2192
2193 step = "execute primitive '{}' params '{}'".format(
2194 initial_config_primitive["name"], primitive_params_
2195 )
2196 self.logger.debug(logging_text + step)
2197 await self.vca_map[vca_type].exec_primitive(
2198 ee_id=ee_id,
2199 primitive_name=initial_config_primitive["name"],
2200 params_dict=primitive_params_,
2201 db_dict=db_dict,
2202 vca_id=vca_id,
2203 vca_type=vca_type,
2204 )
2205 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2206 if check_if_terminated_needed:
2207 if config_descriptor.get("terminate-config-primitive"):
2208 self.update_db_2(
2209 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2210 )
2211 check_if_terminated_needed = False
2212
2213 # TODO register in database that primitive is done
2214
2215 # STEP 7 Configure metrics
2216 if vca_type == "helm" or vca_type == "helm-v3":
2217 # TODO: review for those cases where the helm chart is a reference and
2218 # is not part of the NF package
2219 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2220 ee_id=ee_id,
2221 artifact_path=artifact_path,
2222 ee_config_descriptor=ee_config_descriptor,
2223 vnfr_id=vnfr_id,
2224 nsr_id=nsr_id,
2225 target_ip=rw_mgmt_ip,
2226 )
2227 if prometheus_jobs:
2228 self.update_db_2(
2229 "nsrs",
2230 nsr_id,
2231 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2232 )
2233
2234 for job in prometheus_jobs:
2235 self.db.set_one(
2236 "prometheus_jobs",
2237 {"job_name": job["job_name"]},
2238 job,
2239 upsert=True,
2240 fail_on_empty=False,
2241 )
2242
2243 step = "instantiated at VCA"
2244 self.logger.debug(logging_text + step)
2245
2246 self._write_configuration_status(
2247 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2248 )
2249
2250 except Exception as e: # TODO not use Exception but N2VC exception
2251 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2252 if not isinstance(
2253 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2254 ):
2255 self.logger.error(
2256 "Exception while {} : {}".format(step, e), exc_info=True
2257 )
2258 self._write_configuration_status(
2259 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2260 )
2261 raise LcmException("{} {}".format(step, e)) from e
2262
2263 def _write_ns_status(
2264 self,
2265 nsr_id: str,
2266 ns_state: str,
2267 current_operation: str,
2268 current_operation_id: str,
2269 error_description: str = None,
2270 error_detail: str = None,
2271 other_update: dict = None,
2272 ):
2273 """
2274 Update db_nsr fields.
2275 :param nsr_id:
2276 :param ns_state:
2277 :param current_operation:
2278 :param current_operation_id:
2279 :param error_description:
2280 :param error_detail:
2281 :param other_update: Other required changes at database if provided, will be cleared
2282 :return:
2283 """
2284 try:
2285 db_dict = other_update or {}
2286 db_dict[
2287 "_admin.nslcmop"
2288 ] = current_operation_id # for backward compatibility
2289 db_dict["_admin.current-operation"] = current_operation_id
2290 db_dict["_admin.operation-type"] = (
2291 current_operation if current_operation != "IDLE" else None
2292 )
2293 db_dict["currentOperation"] = current_operation
2294 db_dict["currentOperationID"] = current_operation_id
2295 db_dict["errorDescription"] = error_description
2296 db_dict["errorDetail"] = error_detail
2297
2298 if ns_state:
2299 db_dict["nsState"] = ns_state
2300 self.update_db_2("nsrs", nsr_id, db_dict)
2301 except DbException as e:
2302 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2303
2304 def _write_op_status(
2305 self,
2306 op_id: str,
2307 stage: list = None,
2308 error_message: str = None,
2309 queuePosition: int = 0,
2310 operation_state: str = None,
2311 other_update: dict = None,
2312 ):
2313 try:
2314 db_dict = other_update or {}
2315 db_dict["queuePosition"] = queuePosition
2316 if isinstance(stage, list):
2317 db_dict["stage"] = stage[0]
2318 db_dict["detailed-status"] = " ".join(stage)
2319 elif stage is not None:
2320 db_dict["stage"] = str(stage)
2321
2322 if error_message is not None:
2323 db_dict["errorMessage"] = error_message
2324 if operation_state is not None:
2325 db_dict["operationState"] = operation_state
2326 db_dict["statusEnteredTime"] = time()
2327 self.update_db_2("nslcmops", op_id, db_dict)
2328 except DbException as e:
2329 self.logger.warn(
2330 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2331 )
2332
2333 def _write_all_config_status(self, db_nsr: dict, status: str):
2334 try:
2335 nsr_id = db_nsr["_id"]
2336 # configurationStatus
2337 config_status = db_nsr.get("configurationStatus")
2338 if config_status:
2339 db_nsr_update = {
2340 "configurationStatus.{}.status".format(index): status
2341 for index, v in enumerate(config_status)
2342 if v
2343 }
2344 # update status
2345 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2346
2347 except DbException as e:
2348 self.logger.warn(
2349 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2350 )
2351
2352 def _write_configuration_status(
2353 self,
2354 nsr_id: str,
2355 vca_index: int,
2356 status: str = None,
2357 element_under_configuration: str = None,
2358 element_type: str = None,
2359 other_update: dict = None,
2360 ):
2361
2362 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2363 # .format(vca_index, status))
2364
2365 try:
2366 db_path = "configurationStatus.{}.".format(vca_index)
2367 db_dict = other_update or {}
2368 if status:
2369 db_dict[db_path + "status"] = status
2370 if element_under_configuration:
2371 db_dict[
2372 db_path + "elementUnderConfiguration"
2373 ] = element_under_configuration
2374 if element_type:
2375 db_dict[db_path + "elementType"] = element_type
2376 self.update_db_2("nsrs", nsr_id, db_dict)
2377 except DbException as e:
2378 self.logger.warn(
2379 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2380 status, nsr_id, vca_index, e
2381 )
2382 )
2383
2384 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2385 """
2386 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2387 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2388 Database is used because the result can be obtained from a different LCM worker in case of HA.
2389 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2390 :param db_nslcmop: database content of nslcmop
2391 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2392 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2393 computed 'vim-account-id'
2394 """
2395 modified = False
2396 nslcmop_id = db_nslcmop["_id"]
2397 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2398 if placement_engine == "PLA":
2399 self.logger.debug(
2400 logging_text + "Invoke and wait for placement optimization"
2401 )
2402 await self.msg.aiowrite(
2403 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2404 )
2405 db_poll_interval = 5
2406 wait = db_poll_interval * 10
2407 pla_result = None
2408 while not pla_result and wait >= 0:
2409 await asyncio.sleep(db_poll_interval)
2410 wait -= db_poll_interval
2411 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2412 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2413
2414 if not pla_result:
2415 raise LcmException(
2416 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2417 )
2418
2419 for pla_vnf in pla_result["vnf"]:
2420 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2421 if not pla_vnf.get("vimAccountId") or not vnfr:
2422 continue
2423 modified = True
2424 self.db.set_one(
2425 "vnfrs",
2426 {"_id": vnfr["_id"]},
2427 {"vim-account-id": pla_vnf["vimAccountId"]},
2428 )
2429 # Modifies db_vnfrs
2430 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2431 return modified
2432
2433 def update_nsrs_with_pla_result(self, params):
2434 try:
2435 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2436 self.update_db_2(
2437 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2438 )
2439 except Exception as e:
2440 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2441
2442 async def instantiate(self, nsr_id, nslcmop_id):
2443 """
2444
2445 :param nsr_id: ns instance to deploy
2446 :param nslcmop_id: operation to run
2447 :return:
2448 """
2449
2450 # Try to lock HA task here
2451 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2452 if not task_is_locked_by_me:
2453 self.logger.debug(
2454 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2455 )
2456 return
2457
2458 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2459 self.logger.debug(logging_text + "Enter")
2460
2461 # get all needed from database
2462
2463 # database nsrs record
2464 db_nsr = None
2465
2466 # database nslcmops record
2467 db_nslcmop = None
2468
2469 # update operation on nsrs
2470 db_nsr_update = {}
2471 # update operation on nslcmops
2472 db_nslcmop_update = {}
2473
2474 nslcmop_operation_state = None
2475 db_vnfrs = {} # vnf's info indexed by member-index
2476 # n2vc_info = {}
2477 tasks_dict_info = {} # from task to info text
2478 exc = None
2479 error_list = []
2480 stage = [
2481 "Stage 1/5: preparation of the environment.",
2482 "Waiting for previous operations to terminate.",
2483 "",
2484 ]
2485 # ^ stage, step, VIM progress
2486 try:
2487 # wait for any previous tasks in process
2488 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2489
2490 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2491 stage[1] = "Reading from database."
2492 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2493 db_nsr_update["detailed-status"] = "creating"
2494 db_nsr_update["operational-status"] = "init"
2495 self._write_ns_status(
2496 nsr_id=nsr_id,
2497 ns_state="BUILDING",
2498 current_operation="INSTANTIATING",
2499 current_operation_id=nslcmop_id,
2500 other_update=db_nsr_update,
2501 )
2502 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2503
2504 # read from db: operation
2505 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2506 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2507 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2508 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2509 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2510 )
2511 ns_params = db_nslcmop.get("operationParams")
2512 if ns_params and ns_params.get("timeout_ns_deploy"):
2513 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2514 else:
2515 timeout_ns_deploy = self.timeout.get(
2516 "ns_deploy", self.timeout_ns_deploy
2517 )
2518
2519 # read from db: ns
2520 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2521 self.logger.debug(logging_text + stage[1])
2522 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2523 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2524 self.logger.debug(logging_text + stage[1])
2525 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2526 self.fs.sync(db_nsr["nsd-id"])
2527 db_nsr["nsd"] = nsd
2528 # nsr_name = db_nsr["name"] # TODO short-name??
2529
2530 # read from db: vnf's of this ns
2531 stage[1] = "Getting vnfrs from db."
2532 self.logger.debug(logging_text + stage[1])
2533 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2534
2535 # read from db: vnfd's for every vnf
2536 db_vnfds = [] # every vnfd data
2537
2538 # for each vnf in ns, read vnfd
2539 for vnfr in db_vnfrs_list:
2540 if vnfr.get("kdur"):
2541 kdur_list = []
2542 for kdur in vnfr["kdur"]:
2543 if kdur.get("additionalParams"):
2544 kdur["additionalParams"] = json.loads(
2545 kdur["additionalParams"]
2546 )
2547 kdur_list.append(kdur)
2548 vnfr["kdur"] = kdur_list
2549
2550 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2551 vnfd_id = vnfr["vnfd-id"]
2552 vnfd_ref = vnfr["vnfd-ref"]
2553 self.fs.sync(vnfd_id)
2554
2555 # if we haven't this vnfd, read it from db
2556 if vnfd_id not in db_vnfds:
2557 # read from db
2558 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2559 vnfd_id, vnfd_ref
2560 )
2561 self.logger.debug(logging_text + stage[1])
2562 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2563
2564 # store vnfd
2565 db_vnfds.append(vnfd)
2566
2567 # Get or generates the _admin.deployed.VCA list
2568 vca_deployed_list = None
2569 if db_nsr["_admin"].get("deployed"):
2570 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2571 if vca_deployed_list is None:
2572 vca_deployed_list = []
2573 configuration_status_list = []
2574 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2575 db_nsr_update["configurationStatus"] = configuration_status_list
2576 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2577 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2578 elif isinstance(vca_deployed_list, dict):
2579 # maintain backward compatibility. Change a dict to list at database
2580 vca_deployed_list = list(vca_deployed_list.values())
2581 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2582 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2583
2584 if not isinstance(
2585 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2586 ):
2587 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2588 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2589
2590 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2591 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2592 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2593 self.db.set_list(
2594 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2595 )
2596
2597 # n2vc_redesign STEP 2 Deploy Network Scenario
2598 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2599 self._write_op_status(op_id=nslcmop_id, stage=stage)
2600
2601 stage[1] = "Deploying KDUs."
2602 # self.logger.debug(logging_text + "Before deploy_kdus")
2603 # Call to deploy_kdus in case exists the "vdu:kdu" param
2604 await self.deploy_kdus(
2605 logging_text=logging_text,
2606 nsr_id=nsr_id,
2607 nslcmop_id=nslcmop_id,
2608 db_vnfrs=db_vnfrs,
2609 db_vnfds=db_vnfds,
2610 task_instantiation_info=tasks_dict_info,
2611 )
2612
2613 stage[1] = "Getting VCA public key."
2614 # n2vc_redesign STEP 1 Get VCA public ssh-key
2615 # feature 1429. Add n2vc public key to needed VMs
2616 n2vc_key = self.n2vc.get_public_key()
2617 n2vc_key_list = [n2vc_key]
2618 if self.vca_config.get("public_key"):
2619 n2vc_key_list.append(self.vca_config["public_key"])
2620
2621 stage[1] = "Deploying NS at VIM."
2622 task_ro = asyncio.ensure_future(
2623 self.instantiate_RO(
2624 logging_text=logging_text,
2625 nsr_id=nsr_id,
2626 nsd=nsd,
2627 db_nsr=db_nsr,
2628 db_nslcmop=db_nslcmop,
2629 db_vnfrs=db_vnfrs,
2630 db_vnfds=db_vnfds,
2631 n2vc_key_list=n2vc_key_list,
2632 stage=stage,
2633 )
2634 )
2635 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2636 tasks_dict_info[task_ro] = "Deploying at VIM"
2637
2638 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2639 stage[1] = "Deploying Execution Environments."
2640 self.logger.debug(logging_text + stage[1])
2641
2642 # create namespace and certificate if any helm based EE is present in the NS
2643 if check_helm_ee_in_ns(db_vnfds):
2644 # TODO: create EE namespace
2645 # create TLS certificates
2646 await self.vca_map["helm-v3"].create_tls_certificate(
2647 secret_name="ee-tls-{}".format(nsr_id),
2648 dns_prefix="*",
2649 nsr_id=nsr_id,
2650 usage="server auth",
2651 )
2652
2653 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2654 for vnf_profile in get_vnf_profiles(nsd):
2655 vnfd_id = vnf_profile["vnfd-id"]
2656 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2657 member_vnf_index = str(vnf_profile["id"])
2658 db_vnfr = db_vnfrs[member_vnf_index]
2659 base_folder = vnfd["_admin"]["storage"]
2660 vdu_id = None
2661 vdu_index = 0
2662 vdu_name = None
2663 kdu_name = None
2664
2665 # Get additional parameters
2666 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2667 if db_vnfr.get("additionalParamsForVnf"):
2668 deploy_params.update(
2669 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2670 )
2671
2672 descriptor_config = get_configuration(vnfd, vnfd["id"])
2673 if descriptor_config:
2674 self._deploy_n2vc(
2675 logging_text=logging_text
2676 + "member_vnf_index={} ".format(member_vnf_index),
2677 db_nsr=db_nsr,
2678 db_vnfr=db_vnfr,
2679 nslcmop_id=nslcmop_id,
2680 nsr_id=nsr_id,
2681 nsi_id=nsi_id,
2682 vnfd_id=vnfd_id,
2683 vdu_id=vdu_id,
2684 kdu_name=kdu_name,
2685 member_vnf_index=member_vnf_index,
2686 vdu_index=vdu_index,
2687 vdu_name=vdu_name,
2688 deploy_params=deploy_params,
2689 descriptor_config=descriptor_config,
2690 base_folder=base_folder,
2691 task_instantiation_info=tasks_dict_info,
2692 stage=stage,
2693 )
2694
2695 # Deploy charms for each VDU that supports one.
2696 for vdud in get_vdu_list(vnfd):
2697 vdu_id = vdud["id"]
2698 descriptor_config = get_configuration(vnfd, vdu_id)
2699 vdur = find_in_list(
2700 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2701 )
2702
2703 if vdur.get("additionalParams"):
2704 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2705 else:
2706 deploy_params_vdu = deploy_params
2707 deploy_params_vdu["OSM"] = get_osm_params(
2708 db_vnfr, vdu_id, vdu_count_index=0
2709 )
2710 vdud_count = get_number_of_instances(vnfd, vdu_id)
2711
2712 self.logger.debug("VDUD > {}".format(vdud))
2713 self.logger.debug(
2714 "Descriptor config > {}".format(descriptor_config)
2715 )
2716 if descriptor_config:
2717 vdu_name = None
2718 kdu_name = None
2719 for vdu_index in range(vdud_count):
2720 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2721 self._deploy_n2vc(
2722 logging_text=logging_text
2723 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2724 member_vnf_index, vdu_id, vdu_index
2725 ),
2726 db_nsr=db_nsr,
2727 db_vnfr=db_vnfr,
2728 nslcmop_id=nslcmop_id,
2729 nsr_id=nsr_id,
2730 nsi_id=nsi_id,
2731 vnfd_id=vnfd_id,
2732 vdu_id=vdu_id,
2733 kdu_name=kdu_name,
2734 member_vnf_index=member_vnf_index,
2735 vdu_index=vdu_index,
2736 vdu_name=vdu_name,
2737 deploy_params=deploy_params_vdu,
2738 descriptor_config=descriptor_config,
2739 base_folder=base_folder,
2740 task_instantiation_info=tasks_dict_info,
2741 stage=stage,
2742 )
2743 for kdud in get_kdu_list(vnfd):
2744 kdu_name = kdud["name"]
2745 descriptor_config = get_configuration(vnfd, kdu_name)
2746 if descriptor_config:
2747 vdu_id = None
2748 vdu_index = 0
2749 vdu_name = None
2750 kdur = next(
2751 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2752 )
2753 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2754 if kdur.get("additionalParams"):
2755 deploy_params_kdu.update(
2756 parse_yaml_strings(kdur["additionalParams"].copy())
2757 )
2758
2759 self._deploy_n2vc(
2760 logging_text=logging_text,
2761 db_nsr=db_nsr,
2762 db_vnfr=db_vnfr,
2763 nslcmop_id=nslcmop_id,
2764 nsr_id=nsr_id,
2765 nsi_id=nsi_id,
2766 vnfd_id=vnfd_id,
2767 vdu_id=vdu_id,
2768 kdu_name=kdu_name,
2769 member_vnf_index=member_vnf_index,
2770 vdu_index=vdu_index,
2771 vdu_name=vdu_name,
2772 deploy_params=deploy_params_kdu,
2773 descriptor_config=descriptor_config,
2774 base_folder=base_folder,
2775 task_instantiation_info=tasks_dict_info,
2776 stage=stage,
2777 )
2778
2779 # Check if this NS has a charm configuration
2780 descriptor_config = nsd.get("ns-configuration")
2781 if descriptor_config and descriptor_config.get("juju"):
2782 vnfd_id = None
2783 db_vnfr = None
2784 member_vnf_index = None
2785 vdu_id = None
2786 kdu_name = None
2787 vdu_index = 0
2788 vdu_name = None
2789
2790 # Get additional parameters
2791 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2792 if db_nsr.get("additionalParamsForNs"):
2793 deploy_params.update(
2794 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2795 )
2796 base_folder = nsd["_admin"]["storage"]
2797 self._deploy_n2vc(
2798 logging_text=logging_text,
2799 db_nsr=db_nsr,
2800 db_vnfr=db_vnfr,
2801 nslcmop_id=nslcmop_id,
2802 nsr_id=nsr_id,
2803 nsi_id=nsi_id,
2804 vnfd_id=vnfd_id,
2805 vdu_id=vdu_id,
2806 kdu_name=kdu_name,
2807 member_vnf_index=member_vnf_index,
2808 vdu_index=vdu_index,
2809 vdu_name=vdu_name,
2810 deploy_params=deploy_params,
2811 descriptor_config=descriptor_config,
2812 base_folder=base_folder,
2813 task_instantiation_info=tasks_dict_info,
2814 stage=stage,
2815 )
2816
2817 # rest of staff will be done at finally
2818
2819 except (
2820 ROclient.ROClientException,
2821 DbException,
2822 LcmException,
2823 N2VCException,
2824 ) as e:
2825 self.logger.error(
2826 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2827 )
2828 exc = e
2829 except asyncio.CancelledError:
2830 self.logger.error(
2831 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2832 )
2833 exc = "Operation was cancelled"
2834 except Exception as e:
2835 exc = traceback.format_exc()
2836 self.logger.critical(
2837 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2838 exc_info=True,
2839 )
2840 finally:
2841 if exc:
2842 error_list.append(str(exc))
2843 try:
2844 # wait for pending tasks
2845 if tasks_dict_info:
2846 stage[1] = "Waiting for instantiate pending tasks."
2847 self.logger.debug(logging_text + stage[1])
2848 error_list += await self._wait_for_tasks(
2849 logging_text,
2850 tasks_dict_info,
2851 timeout_ns_deploy,
2852 stage,
2853 nslcmop_id,
2854 nsr_id=nsr_id,
2855 )
2856 stage[1] = stage[2] = ""
2857 except asyncio.CancelledError:
2858 error_list.append("Cancelled")
2859 # TODO cancel all tasks
2860 except Exception as exc:
2861 error_list.append(str(exc))
2862
2863 # update operation-status
2864 db_nsr_update["operational-status"] = "running"
2865 # let's begin with VCA 'configured' status (later we can change it)
2866 db_nsr_update["config-status"] = "configured"
2867 for task, task_name in tasks_dict_info.items():
2868 if not task.done() or task.cancelled() or task.exception():
2869 if task_name.startswith(self.task_name_deploy_vca):
2870 # A N2VC task is pending
2871 db_nsr_update["config-status"] = "failed"
2872 else:
2873 # RO or KDU task is pending
2874 db_nsr_update["operational-status"] = "failed"
2875
2876 # update status at database
2877 if error_list:
2878 error_detail = ". ".join(error_list)
2879 self.logger.error(logging_text + error_detail)
2880 error_description_nslcmop = "{} Detail: {}".format(
2881 stage[0], error_detail
2882 )
2883 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2884 nslcmop_id, stage[0]
2885 )
2886
2887 db_nsr_update["detailed-status"] = (
2888 error_description_nsr + " Detail: " + error_detail
2889 )
2890 db_nslcmop_update["detailed-status"] = error_detail
2891 nslcmop_operation_state = "FAILED"
2892 ns_state = "BROKEN"
2893 else:
2894 error_detail = None
2895 error_description_nsr = error_description_nslcmop = None
2896 ns_state = "READY"
2897 db_nsr_update["detailed-status"] = "Done"
2898 db_nslcmop_update["detailed-status"] = "Done"
2899 nslcmop_operation_state = "COMPLETED"
2900
2901 if db_nsr:
2902 self._write_ns_status(
2903 nsr_id=nsr_id,
2904 ns_state=ns_state,
2905 current_operation="IDLE",
2906 current_operation_id=None,
2907 error_description=error_description_nsr,
2908 error_detail=error_detail,
2909 other_update=db_nsr_update,
2910 )
2911 self._write_op_status(
2912 op_id=nslcmop_id,
2913 stage="",
2914 error_message=error_description_nslcmop,
2915 operation_state=nslcmop_operation_state,
2916 other_update=db_nslcmop_update,
2917 )
2918
2919 if nslcmop_operation_state:
2920 try:
2921 await self.msg.aiowrite(
2922 "ns",
2923 "instantiated",
2924 {
2925 "nsr_id": nsr_id,
2926 "nslcmop_id": nslcmop_id,
2927 "operationState": nslcmop_operation_state,
2928 },
2929 loop=self.loop,
2930 )
2931 except Exception as e:
2932 self.logger.error(
2933 logging_text + "kafka_write notification Exception {}".format(e)
2934 )
2935
2936 self.logger.debug(logging_text + "Exit")
2937 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2938
2939 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2940 if vnfd_id not in cached_vnfds:
2941 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2942 return cached_vnfds[vnfd_id]
2943
2944 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2945 if vnf_profile_id not in cached_vnfrs:
2946 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2947 "vnfrs",
2948 {
2949 "member-vnf-index-ref": vnf_profile_id,
2950 "nsr-id-ref": nsr_id,
2951 },
2952 )
2953 return cached_vnfrs[vnf_profile_id]
2954
2955 def _is_deployed_vca_in_relation(
2956 self, vca: DeployedVCA, relation: Relation
2957 ) -> bool:
2958 found = False
2959 for endpoint in (relation.provider, relation.requirer):
2960 if endpoint["kdu-resource-profile-id"]:
2961 continue
2962 found = (
2963 vca.vnf_profile_id == endpoint.vnf_profile_id
2964 and vca.vdu_profile_id == endpoint.vdu_profile_id
2965 and vca.execution_environment_ref == endpoint.execution_environment_ref
2966 )
2967 if found:
2968 break
2969 return found
2970
2971 def _update_ee_relation_data_with_implicit_data(
2972 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2973 ):
2974 ee_relation_data = safe_get_ee_relation(
2975 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2976 )
2977 ee_relation_level = EELevel.get_level(ee_relation_data)
2978 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2979 "execution-environment-ref"
2980 ]:
2981 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2982 vnfd_id = vnf_profile["vnfd-id"]
2983 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2984 entity_id = (
2985 vnfd_id
2986 if ee_relation_level == EELevel.VNF
2987 else ee_relation_data["vdu-profile-id"]
2988 )
2989 ee = get_juju_ee_ref(db_vnfd, entity_id)
2990 if not ee:
2991 raise Exception(
2992 f"not execution environments found for ee_relation {ee_relation_data}"
2993 )
2994 ee_relation_data["execution-environment-ref"] = ee["id"]
2995 return ee_relation_data
2996
2997 def _get_ns_relations(
2998 self,
2999 nsr_id: str,
3000 nsd: Dict[str, Any],
3001 vca: DeployedVCA,
3002 cached_vnfds: Dict[str, Any],
3003 ) -> List[Relation]:
3004 relations = []
3005 db_ns_relations = get_ns_configuration_relation_list(nsd)
3006 for r in db_ns_relations:
3007 provider_dict = None
3008 requirer_dict = None
3009 if all(key in r for key in ("provider", "requirer")):
3010 provider_dict = r["provider"]
3011 requirer_dict = r["requirer"]
3012 elif "entities" in r:
3013 provider_id = r["entities"][0]["id"]
3014 provider_dict = {
3015 "nsr-id": nsr_id,
3016 "endpoint": r["entities"][0]["endpoint"],
3017 }
3018 if provider_id != nsd["id"]:
3019 provider_dict["vnf-profile-id"] = provider_id
3020 requirer_id = r["entities"][1]["id"]
3021 requirer_dict = {
3022 "nsr-id": nsr_id,
3023 "endpoint": r["entities"][1]["endpoint"],
3024 }
3025 if requirer_id != nsd["id"]:
3026 requirer_dict["vnf-profile-id"] = requirer_id
3027 else:
3028 raise Exception(
3029 "provider/requirer or entities must be included in the relation."
3030 )
3031 relation_provider = self._update_ee_relation_data_with_implicit_data(
3032 nsr_id, nsd, provider_dict, cached_vnfds
3033 )
3034 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3035 nsr_id, nsd, requirer_dict, cached_vnfds
3036 )
3037 provider = EERelation(relation_provider)
3038 requirer = EERelation(relation_requirer)
3039 relation = Relation(r["name"], provider, requirer)
3040 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3041 if vca_in_relation:
3042 relations.append(relation)
3043 return relations
3044
3045 def _get_vnf_relations(
3046 self,
3047 nsr_id: str,
3048 nsd: Dict[str, Any],
3049 vca: DeployedVCA,
3050 cached_vnfds: Dict[str, Any],
3051 ) -> List[Relation]:
3052 relations = []
3053 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3054 vnf_profile_id = vnf_profile["id"]
3055 vnfd_id = vnf_profile["vnfd-id"]
3056 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3057 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3058 for r in db_vnf_relations:
3059 provider_dict = None
3060 requirer_dict = None
3061 if all(key in r for key in ("provider", "requirer")):
3062 provider_dict = r["provider"]
3063 requirer_dict = r["requirer"]
3064 elif "entities" in r:
3065 provider_id = r["entities"][0]["id"]
3066 provider_dict = {
3067 "nsr-id": nsr_id,
3068 "vnf-profile-id": vnf_profile_id,
3069 "endpoint": r["entities"][0]["endpoint"],
3070 }
3071 if provider_id != vnfd_id:
3072 provider_dict["vdu-profile-id"] = provider_id
3073 requirer_id = r["entities"][1]["id"]
3074 requirer_dict = {
3075 "nsr-id": nsr_id,
3076 "vnf-profile-id": vnf_profile_id,
3077 "endpoint": r["entities"][1]["endpoint"],
3078 }
3079 if requirer_id != vnfd_id:
3080 requirer_dict["vdu-profile-id"] = requirer_id
3081 else:
3082 raise Exception(
3083 "provider/requirer or entities must be included in the relation."
3084 )
3085 relation_provider = self._update_ee_relation_data_with_implicit_data(
3086 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3087 )
3088 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3089 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3090 )
3091 provider = EERelation(relation_provider)
3092 requirer = EERelation(relation_requirer)
3093 relation = Relation(r["name"], provider, requirer)
3094 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3095 if vca_in_relation:
3096 relations.append(relation)
3097 return relations
3098
3099 def _get_kdu_resource_data(
3100 self,
3101 ee_relation: EERelation,
3102 db_nsr: Dict[str, Any],
3103 cached_vnfds: Dict[str, Any],
3104 ) -> DeployedK8sResource:
3105 nsd = get_nsd(db_nsr)
3106 vnf_profiles = get_vnf_profiles(nsd)
3107 vnfd_id = find_in_list(
3108 vnf_profiles,
3109 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3110 )["vnfd-id"]
3111 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3112 kdu_resource_profile = get_kdu_resource_profile(
3113 db_vnfd, ee_relation.kdu_resource_profile_id
3114 )
3115 kdu_name = kdu_resource_profile["kdu-name"]
3116 deployed_kdu, _ = get_deployed_kdu(
3117 db_nsr.get("_admin", ()).get("deployed", ()),
3118 kdu_name,
3119 ee_relation.vnf_profile_id,
3120 )
3121 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3122 return deployed_kdu
3123
3124 def _get_deployed_component(
3125 self,
3126 ee_relation: EERelation,
3127 db_nsr: Dict[str, Any],
3128 cached_vnfds: Dict[str, Any],
3129 ) -> DeployedComponent:
3130 nsr_id = db_nsr["_id"]
3131 deployed_component = None
3132 ee_level = EELevel.get_level(ee_relation)
3133 if ee_level == EELevel.NS:
3134 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3135 if vca:
3136 deployed_component = DeployedVCA(nsr_id, vca)
3137 elif ee_level == EELevel.VNF:
3138 vca = get_deployed_vca(
3139 db_nsr,
3140 {
3141 "vdu_id": None,
3142 "member-vnf-index": ee_relation.vnf_profile_id,
3143 "ee_descriptor_id": ee_relation.execution_environment_ref,
3144 },
3145 )
3146 if vca:
3147 deployed_component = DeployedVCA(nsr_id, vca)
3148 elif ee_level == EELevel.VDU:
3149 vca = get_deployed_vca(
3150 db_nsr,
3151 {
3152 "vdu_id": ee_relation.vdu_profile_id,
3153 "member-vnf-index": ee_relation.vnf_profile_id,
3154 "ee_descriptor_id": ee_relation.execution_environment_ref,
3155 },
3156 )
3157 if vca:
3158 deployed_component = DeployedVCA(nsr_id, vca)
3159 elif ee_level == EELevel.KDU:
3160 kdu_resource_data = self._get_kdu_resource_data(
3161 ee_relation, db_nsr, cached_vnfds
3162 )
3163 if kdu_resource_data:
3164 deployed_component = DeployedK8sResource(kdu_resource_data)
3165 return deployed_component
3166
3167 async def _add_relation(
3168 self,
3169 relation: Relation,
3170 vca_type: str,
3171 db_nsr: Dict[str, Any],
3172 cached_vnfds: Dict[str, Any],
3173 cached_vnfrs: Dict[str, Any],
3174 ) -> bool:
3175 deployed_provider = self._get_deployed_component(
3176 relation.provider, db_nsr, cached_vnfds
3177 )
3178 deployed_requirer = self._get_deployed_component(
3179 relation.requirer, db_nsr, cached_vnfds
3180 )
3181 if (
3182 deployed_provider
3183 and deployed_requirer
3184 and deployed_provider.config_sw_installed
3185 and deployed_requirer.config_sw_installed
3186 ):
3187 provider_db_vnfr = (
3188 self._get_vnfr(
3189 relation.provider.nsr_id,
3190 relation.provider.vnf_profile_id,
3191 cached_vnfrs,
3192 )
3193 if relation.provider.vnf_profile_id
3194 else None
3195 )
3196 requirer_db_vnfr = (
3197 self._get_vnfr(
3198 relation.requirer.nsr_id,
3199 relation.requirer.vnf_profile_id,
3200 cached_vnfrs,
3201 )
3202 if relation.requirer.vnf_profile_id
3203 else None
3204 )
3205 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3206 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3207 provider_relation_endpoint = RelationEndpoint(
3208 deployed_provider.ee_id,
3209 provider_vca_id,
3210 relation.provider.endpoint,
3211 )
3212 requirer_relation_endpoint = RelationEndpoint(
3213 deployed_requirer.ee_id,
3214 requirer_vca_id,
3215 relation.requirer.endpoint,
3216 )
3217 await self.vca_map[vca_type].add_relation(
3218 provider=provider_relation_endpoint,
3219 requirer=requirer_relation_endpoint,
3220 )
3221 # remove entry from relations list
3222 return True
3223 return False
3224
3225 async def _add_vca_relations(
3226 self,
3227 logging_text,
3228 nsr_id,
3229 vca_type: str,
3230 vca_index: int,
3231 timeout: int = 3600,
3232 ) -> bool:
3233
3234 # steps:
3235 # 1. find all relations for this VCA
3236 # 2. wait for other peers related
3237 # 3. add relations
3238
3239 try:
3240 # STEP 1: find all relations for this VCA
3241
3242 # read nsr record
3243 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3244 nsd = get_nsd(db_nsr)
3245
3246 # this VCA data
3247 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3248 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3249
3250 cached_vnfds = {}
3251 cached_vnfrs = {}
3252 relations = []
3253 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3254 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3255
3256 # if no relations, terminate
3257 if not relations:
3258 self.logger.debug(logging_text + " No relations")
3259 return True
3260
3261 self.logger.debug(logging_text + " adding relations {}".format(relations))
3262
3263 # add all relations
3264 start = time()
3265 while True:
3266 # check timeout
3267 now = time()
3268 if now - start >= timeout:
3269 self.logger.error(logging_text + " : timeout adding relations")
3270 return False
3271
3272 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3273 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3274
3275 # for each relation, find the VCA's related
3276 for relation in relations.copy():
3277 added = await self._add_relation(
3278 relation,
3279 vca_type,
3280 db_nsr,
3281 cached_vnfds,
3282 cached_vnfrs,
3283 )
3284 if added:
3285 relations.remove(relation)
3286
3287 if not relations:
3288 self.logger.debug("Relations added")
3289 break
3290 await asyncio.sleep(5.0)
3291
3292 return True
3293
3294 except Exception as e:
3295 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3296 return False
3297
3298 async def _install_kdu(
3299 self,
3300 nsr_id: str,
3301 nsr_db_path: str,
3302 vnfr_data: dict,
3303 kdu_index: int,
3304 kdud: dict,
3305 vnfd: dict,
3306 k8s_instance_info: dict,
3307 k8params: dict = None,
3308 timeout: int = 600,
3309 vca_id: str = None,
3310 ):
3311
3312 try:
3313 k8sclustertype = k8s_instance_info["k8scluster-type"]
3314 # Instantiate kdu
3315 db_dict_install = {
3316 "collection": "nsrs",
3317 "filter": {"_id": nsr_id},
3318 "path": nsr_db_path,
3319 }
3320
3321 if k8s_instance_info.get("kdu-deployment-name"):
3322 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3323 else:
3324 kdu_instance = self.k8scluster_map[
3325 k8sclustertype
3326 ].generate_kdu_instance_name(
3327 db_dict=db_dict_install,
3328 kdu_model=k8s_instance_info["kdu-model"],
3329 kdu_name=k8s_instance_info["kdu-name"],
3330 )
3331
3332 # Update the nsrs table with the kdu-instance value
3333 self.update_db_2(
3334 item="nsrs",
3335 _id=nsr_id,
3336 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3337 )
3338
3339 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3340 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3341 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3342 # namespace, this first verification could be removed, and the next step would be done for any kind
3343 # of KNF.
3344 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3345 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3346 if k8sclustertype in ("juju", "juju-bundle"):
3347 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3348 # that the user passed a namespace which he wants its KDU to be deployed in)
3349 if (
3350 self.db.count(
3351 table="nsrs",
3352 q_filter={
3353 "_id": nsr_id,
3354 "_admin.projects_write": k8s_instance_info["namespace"],
3355 "_admin.projects_read": k8s_instance_info["namespace"],
3356 },
3357 )
3358 > 0
3359 ):
3360 self.logger.debug(
3361 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3362 )
3363 self.update_db_2(
3364 item="nsrs",
3365 _id=nsr_id,
3366 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3367 )
3368 k8s_instance_info["namespace"] = kdu_instance
3369
3370 await self.k8scluster_map[k8sclustertype].install(
3371 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3372 kdu_model=k8s_instance_info["kdu-model"],
3373 atomic=True,
3374 params=k8params,
3375 db_dict=db_dict_install,
3376 timeout=timeout,
3377 kdu_name=k8s_instance_info["kdu-name"],
3378 namespace=k8s_instance_info["namespace"],
3379 kdu_instance=kdu_instance,
3380 vca_id=vca_id,
3381 )
3382
3383 # Obtain services to obtain management service ip
3384 services = await self.k8scluster_map[k8sclustertype].get_services(
3385 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3386 kdu_instance=kdu_instance,
3387 namespace=k8s_instance_info["namespace"],
3388 )
3389
3390 # Obtain management service info (if exists)
3391 vnfr_update_dict = {}
3392 kdu_config = get_configuration(vnfd, kdud["name"])
3393 if kdu_config:
3394 target_ee_list = kdu_config.get("execution-environment-list", [])
3395 else:
3396 target_ee_list = []
3397
3398 if services:
3399 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3400 mgmt_services = [
3401 service
3402 for service in kdud.get("service", [])
3403 if service.get("mgmt-service")
3404 ]
3405 for mgmt_service in mgmt_services:
3406 for service in services:
3407 if service["name"].startswith(mgmt_service["name"]):
3408 # Mgmt service found, Obtain service ip
3409 ip = service.get("external_ip", service.get("cluster_ip"))
3410 if isinstance(ip, list) and len(ip) == 1:
3411 ip = ip[0]
3412
3413 vnfr_update_dict[
3414 "kdur.{}.ip-address".format(kdu_index)
3415 ] = ip
3416
3417 # Check if must update also mgmt ip at the vnf
3418 service_external_cp = mgmt_service.get(
3419 "external-connection-point-ref"
3420 )
3421 if service_external_cp:
3422 if (
3423 deep_get(vnfd, ("mgmt-interface", "cp"))
3424 == service_external_cp
3425 ):
3426 vnfr_update_dict["ip-address"] = ip
3427
3428 if find_in_list(
3429 target_ee_list,
3430 lambda ee: ee.get(
3431 "external-connection-point-ref", ""
3432 )
3433 == service_external_cp,
3434 ):
3435 vnfr_update_dict[
3436 "kdur.{}.ip-address".format(kdu_index)
3437 ] = ip
3438 break
3439 else:
3440 self.logger.warn(
3441 "Mgmt service name: {} not found".format(
3442 mgmt_service["name"]
3443 )
3444 )
3445
3446 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3447 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3448
3449 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3450 if (
3451 kdu_config
3452 and kdu_config.get("initial-config-primitive")
3453 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3454 ):
3455 initial_config_primitive_list = kdu_config.get(
3456 "initial-config-primitive"
3457 )
3458 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3459
3460 for initial_config_primitive in initial_config_primitive_list:
3461 primitive_params_ = self._map_primitive_params(
3462 initial_config_primitive, {}, {}
3463 )
3464
3465 await asyncio.wait_for(
3466 self.k8scluster_map[k8sclustertype].exec_primitive(
3467 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3468 kdu_instance=kdu_instance,
3469 primitive_name=initial_config_primitive["name"],
3470 params=primitive_params_,
3471 db_dict=db_dict_install,
3472 vca_id=vca_id,
3473 ),
3474 timeout=timeout,
3475 )
3476
3477 except Exception as e:
3478 # Prepare update db with error and raise exception
3479 try:
3480 self.update_db_2(
3481 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3482 )
3483 self.update_db_2(
3484 "vnfrs",
3485 vnfr_data.get("_id"),
3486 {"kdur.{}.status".format(kdu_index): "ERROR"},
3487 )
3488 except Exception:
3489 # ignore to keep original exception
3490 pass
3491 # reraise original error
3492 raise
3493
3494 return kdu_instance
3495
3496 async def deploy_kdus(
3497 self,
3498 logging_text,
3499 nsr_id,
3500 nslcmop_id,
3501 db_vnfrs,
3502 db_vnfds,
3503 task_instantiation_info,
3504 ):
3505 # Launch kdus if present in the descriptor
3506
3507 k8scluster_id_2_uuic = {
3508 "helm-chart-v3": {},
3509 "helm-chart": {},
3510 "juju-bundle": {},
3511 }
3512
3513 async def _get_cluster_id(cluster_id, cluster_type):
3514 nonlocal k8scluster_id_2_uuic
3515 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3516 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3517
3518 # check if K8scluster is creating and wait look if previous tasks in process
3519 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3520 "k8scluster", cluster_id
3521 )
3522 if task_dependency:
3523 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3524 task_name, cluster_id
3525 )
3526 self.logger.debug(logging_text + text)
3527 await asyncio.wait(task_dependency, timeout=3600)
3528
3529 db_k8scluster = self.db.get_one(
3530 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3531 )
3532 if not db_k8scluster:
3533 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3534
3535 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3536 if not k8s_id:
3537 if cluster_type == "helm-chart-v3":
3538 try:
3539 # backward compatibility for existing clusters that have not been initialized for helm v3
3540 k8s_credentials = yaml.safe_dump(
3541 db_k8scluster.get("credentials")
3542 )
3543 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3544 k8s_credentials, reuse_cluster_uuid=cluster_id
3545 )
3546 db_k8scluster_update = {}
3547 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3548 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3549 db_k8scluster_update[
3550 "_admin.helm-chart-v3.created"
3551 ] = uninstall_sw
3552 db_k8scluster_update[
3553 "_admin.helm-chart-v3.operationalState"
3554 ] = "ENABLED"
3555 self.update_db_2(
3556 "k8sclusters", cluster_id, db_k8scluster_update
3557 )
3558 except Exception as e:
3559 self.logger.error(
3560 logging_text
3561 + "error initializing helm-v3 cluster: {}".format(str(e))
3562 )
3563 raise LcmException(
3564 "K8s cluster '{}' has not been initialized for '{}'".format(
3565 cluster_id, cluster_type
3566 )
3567 )
3568 else:
3569 raise LcmException(
3570 "K8s cluster '{}' has not been initialized for '{}'".format(
3571 cluster_id, cluster_type
3572 )
3573 )
3574 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3575 return k8s_id
3576
3577 logging_text += "Deploy kdus: "
3578 step = ""
3579 try:
3580 db_nsr_update = {"_admin.deployed.K8s": []}
3581 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3582
3583 index = 0
3584 updated_cluster_list = []
3585 updated_v3_cluster_list = []
3586
3587 for vnfr_data in db_vnfrs.values():
3588 vca_id = self.get_vca_id(vnfr_data, {})
3589 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3590 # Step 0: Prepare and set parameters
3591 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3592 vnfd_id = vnfr_data.get("vnfd-id")
3593 vnfd_with_id = find_in_list(
3594 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3595 )
3596 kdud = next(
3597 kdud
3598 for kdud in vnfd_with_id["kdu"]
3599 if kdud["name"] == kdur["kdu-name"]
3600 )
3601 namespace = kdur.get("k8s-namespace")
3602 kdu_deployment_name = kdur.get("kdu-deployment-name")
3603 if kdur.get("helm-chart"):
3604 kdumodel = kdur["helm-chart"]
3605 # Default version: helm3, if helm-version is v2 assign v2
3606 k8sclustertype = "helm-chart-v3"
3607 self.logger.debug("kdur: {}".format(kdur))
3608 if (
3609 kdur.get("helm-version")
3610 and kdur.get("helm-version") == "v2"
3611 ):
3612 k8sclustertype = "helm-chart"
3613 elif kdur.get("juju-bundle"):
3614 kdumodel = kdur["juju-bundle"]
3615 k8sclustertype = "juju-bundle"
3616 else:
3617 raise LcmException(
3618 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3619 "juju-bundle. Maybe an old NBI version is running".format(
3620 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3621 )
3622 )
3623 # check if kdumodel is a file and exists
3624 try:
3625 vnfd_with_id = find_in_list(
3626 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3627 )
3628 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3629 if storage: # may be not present if vnfd has not artifacts
3630 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3631 if storage["pkg-dir"]:
3632 filename = "{}/{}/{}s/{}".format(
3633 storage["folder"],
3634 storage["pkg-dir"],
3635 k8sclustertype,
3636 kdumodel,
3637 )
3638 else:
3639 filename = "{}/Scripts/{}s/{}".format(
3640 storage["folder"],
3641 k8sclustertype,
3642 kdumodel,
3643 )
3644 if self.fs.file_exists(
3645 filename, mode="file"
3646 ) or self.fs.file_exists(filename, mode="dir"):
3647 kdumodel = self.fs.path + filename
3648 except (asyncio.TimeoutError, asyncio.CancelledError):
3649 raise
3650 except Exception: # it is not a file
3651 pass
3652
3653 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3654 step = "Synchronize repos for k8s cluster '{}'".format(
3655 k8s_cluster_id
3656 )
3657 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3658
3659 # Synchronize repos
3660 if (
3661 k8sclustertype == "helm-chart"
3662 and cluster_uuid not in updated_cluster_list
3663 ) or (
3664 k8sclustertype == "helm-chart-v3"
3665 and cluster_uuid not in updated_v3_cluster_list
3666 ):
3667 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3668 self.k8scluster_map[k8sclustertype].synchronize_repos(
3669 cluster_uuid=cluster_uuid
3670 )
3671 )
3672 if del_repo_list or added_repo_dict:
3673 if k8sclustertype == "helm-chart":
3674 unset = {
3675 "_admin.helm_charts_added." + item: None
3676 for item in del_repo_list
3677 }
3678 updated = {
3679 "_admin.helm_charts_added." + item: name
3680 for item, name in added_repo_dict.items()
3681 }
3682 updated_cluster_list.append(cluster_uuid)
3683 elif k8sclustertype == "helm-chart-v3":
3684 unset = {
3685 "_admin.helm_charts_v3_added." + item: None
3686 for item in del_repo_list
3687 }
3688 updated = {
3689 "_admin.helm_charts_v3_added." + item: name
3690 for item, name in added_repo_dict.items()
3691 }
3692 updated_v3_cluster_list.append(cluster_uuid)
3693 self.logger.debug(
3694 logging_text + "repos synchronized on k8s cluster "
3695 "'{}' to_delete: {}, to_add: {}".format(
3696 k8s_cluster_id, del_repo_list, added_repo_dict
3697 )
3698 )
3699 self.db.set_one(
3700 "k8sclusters",
3701 {"_id": k8s_cluster_id},
3702 updated,
3703 unset=unset,
3704 )
3705
3706 # Instantiate kdu
3707 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3708 vnfr_data["member-vnf-index-ref"],
3709 kdur["kdu-name"],
3710 k8s_cluster_id,
3711 )
3712 k8s_instance_info = {
3713 "kdu-instance": None,
3714 "k8scluster-uuid": cluster_uuid,
3715 "k8scluster-type": k8sclustertype,
3716 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3717 "kdu-name": kdur["kdu-name"],
3718 "kdu-model": kdumodel,
3719 "namespace": namespace,
3720 "kdu-deployment-name": kdu_deployment_name,
3721 }
3722 db_path = "_admin.deployed.K8s.{}".format(index)
3723 db_nsr_update[db_path] = k8s_instance_info
3724 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3725 vnfd_with_id = find_in_list(
3726 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3727 )
3728 task = asyncio.ensure_future(
3729 self._install_kdu(
3730 nsr_id,
3731 db_path,
3732 vnfr_data,
3733 kdu_index,
3734 kdud,
3735 vnfd_with_id,
3736 k8s_instance_info,
3737 k8params=desc_params,
3738 timeout=1800,
3739 vca_id=vca_id,
3740 )
3741 )
3742 self.lcm_tasks.register(
3743 "ns",
3744 nsr_id,
3745 nslcmop_id,
3746 "instantiate_KDU-{}".format(index),
3747 task,
3748 )
3749 task_instantiation_info[task] = "Deploying KDU {}".format(
3750 kdur["kdu-name"]
3751 )
3752
3753 index += 1
3754
3755 except (LcmException, asyncio.CancelledError):
3756 raise
3757 except Exception as e:
3758 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3759 if isinstance(e, (N2VCException, DbException)):
3760 self.logger.error(logging_text + msg)
3761 else:
3762 self.logger.critical(logging_text + msg, exc_info=True)
3763 raise LcmException(msg)
3764 finally:
3765 if db_nsr_update:
3766 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3767
3768 def _deploy_n2vc(
3769 self,
3770 logging_text,
3771 db_nsr,
3772 db_vnfr,
3773 nslcmop_id,
3774 nsr_id,
3775 nsi_id,
3776 vnfd_id,
3777 vdu_id,
3778 kdu_name,
3779 member_vnf_index,
3780 vdu_index,
3781 vdu_name,
3782 deploy_params,
3783 descriptor_config,
3784 base_folder,
3785 task_instantiation_info,
3786 stage,
3787 ):
3788 # launch instantiate_N2VC in a asyncio task and register task object
3789 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3790 # if not found, create one entry and update database
3791 # fill db_nsr._admin.deployed.VCA.<index>
3792
3793 self.logger.debug(
3794 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3795 )
3796
3797 charm_name = ""
3798 get_charm_name = False
3799 if "execution-environment-list" in descriptor_config:
3800 ee_list = descriptor_config.get("execution-environment-list", [])
3801 elif "juju" in descriptor_config:
3802 ee_list = [descriptor_config] # ns charms
3803 if "execution-environment-list" not in descriptor_config:
3804 # charm name is only required for ns charms
3805 get_charm_name = True
3806 else: # other types as script are not supported
3807 ee_list = []
3808
3809 for ee_item in ee_list:
3810 self.logger.debug(
3811 logging_text
3812 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3813 ee_item.get("juju"), ee_item.get("helm-chart")
3814 )
3815 )
3816 ee_descriptor_id = ee_item.get("id")
3817 if ee_item.get("juju"):
3818 vca_name = ee_item["juju"].get("charm")
3819 if get_charm_name:
3820 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3821 vca_type = (
3822 "lxc_proxy_charm"
3823 if ee_item["juju"].get("charm") is not None
3824 else "native_charm"
3825 )
3826 if ee_item["juju"].get("cloud") == "k8s":
3827 vca_type = "k8s_proxy_charm"
3828 elif ee_item["juju"].get("proxy") is False:
3829 vca_type = "native_charm"
3830 elif ee_item.get("helm-chart"):
3831 vca_name = ee_item["helm-chart"]
3832 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3833 vca_type = "helm"
3834 else:
3835 vca_type = "helm-v3"
3836 else:
3837 self.logger.debug(
3838 logging_text + "skipping non juju neither charm configuration"
3839 )
3840 continue
3841
3842 vca_index = -1
3843 for vca_index, vca_deployed in enumerate(
3844 db_nsr["_admin"]["deployed"]["VCA"]
3845 ):
3846 if not vca_deployed:
3847 continue
3848 if (
3849 vca_deployed.get("member-vnf-index") == member_vnf_index
3850 and vca_deployed.get("vdu_id") == vdu_id
3851 and vca_deployed.get("kdu_name") == kdu_name
3852 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3853 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3854 ):
3855 break
3856 else:
3857 # not found, create one.
3858 target = (
3859 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3860 )
3861 if vdu_id:
3862 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3863 elif kdu_name:
3864 target += "/kdu/{}".format(kdu_name)
3865 vca_deployed = {
3866 "target_element": target,
3867 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3868 "member-vnf-index": member_vnf_index,
3869 "vdu_id": vdu_id,
3870 "kdu_name": kdu_name,
3871 "vdu_count_index": vdu_index,
3872 "operational-status": "init", # TODO revise
3873 "detailed-status": "", # TODO revise
3874 "step": "initial-deploy", # TODO revise
3875 "vnfd_id": vnfd_id,
3876 "vdu_name": vdu_name,
3877 "type": vca_type,
3878 "ee_descriptor_id": ee_descriptor_id,
3879 "charm_name": charm_name,
3880 }
3881 vca_index += 1
3882
3883 # create VCA and configurationStatus in db
3884 db_dict = {
3885 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3886 "configurationStatus.{}".format(vca_index): dict(),
3887 }
3888 self.update_db_2("nsrs", nsr_id, db_dict)
3889
3890 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3891
3892 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3893 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3894 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3895
3896 # Launch task
3897 task_n2vc = asyncio.ensure_future(
3898 self.instantiate_N2VC(
3899 logging_text=logging_text,
3900 vca_index=vca_index,
3901 nsi_id=nsi_id,
3902 db_nsr=db_nsr,
3903 db_vnfr=db_vnfr,
3904 vdu_id=vdu_id,
3905 kdu_name=kdu_name,
3906 vdu_index=vdu_index,
3907 deploy_params=deploy_params,
3908 config_descriptor=descriptor_config,
3909 base_folder=base_folder,
3910 nslcmop_id=nslcmop_id,
3911 stage=stage,
3912 vca_type=vca_type,
3913 vca_name=vca_name,
3914 ee_config_descriptor=ee_item,
3915 )
3916 )
3917 self.lcm_tasks.register(
3918 "ns",
3919 nsr_id,
3920 nslcmop_id,
3921 "instantiate_N2VC-{}".format(vca_index),
3922 task_n2vc,
3923 )
3924 task_instantiation_info[
3925 task_n2vc
3926 ] = self.task_name_deploy_vca + " {}.{}".format(
3927 member_vnf_index or "", vdu_id or ""
3928 )
3929
3930 @staticmethod
3931 def _create_nslcmop(nsr_id, operation, params):
3932 """
3933 Creates a ns-lcm-opp content to be stored at database.
3934 :param nsr_id: internal id of the instance
3935 :param operation: instantiate, terminate, scale, action, ...
3936 :param params: user parameters for the operation
3937 :return: dictionary following SOL005 format
3938 """
3939 # Raise exception if invalid arguments
3940 if not (nsr_id and operation and params):
3941 raise LcmException(
3942 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3943 )
3944 now = time()
3945 _id = str(uuid4())
3946 nslcmop = {
3947 "id": _id,
3948 "_id": _id,
3949 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3950 "operationState": "PROCESSING",
3951 "statusEnteredTime": now,
3952 "nsInstanceId": nsr_id,
3953 "lcmOperationType": operation,
3954 "startTime": now,
3955 "isAutomaticInvocation": False,
3956 "operationParams": params,
3957 "isCancelPending": False,
3958 "links": {
3959 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3960 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3961 },
3962 }
3963 return nslcmop
3964
3965 def _format_additional_params(self, params):
3966 params = params or {}
3967 for key, value in params.items():
3968 if str(value).startswith("!!yaml "):
3969 params[key] = yaml.safe_load(value[7:])
3970 return params
3971
3972 def _get_terminate_primitive_params(self, seq, vnf_index):
3973 primitive = seq.get("name")
3974 primitive_params = {}
3975 params = {
3976 "member_vnf_index": vnf_index,
3977 "primitive": primitive,
3978 "primitive_params": primitive_params,
3979 }
3980 desc_params = {}
3981 return self._map_primitive_params(seq, params, desc_params)
3982
3983 # sub-operations
3984
3985 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3986 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3987 if op.get("operationState") == "COMPLETED":
3988 # b. Skip sub-operation
3989 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3990 return self.SUBOPERATION_STATUS_SKIP
3991 else:
3992 # c. retry executing sub-operation
3993 # The sub-operation exists, and operationState != 'COMPLETED'
3994 # Update operationState = 'PROCESSING' to indicate a retry.
3995 operationState = "PROCESSING"
3996 detailed_status = "In progress"
3997 self._update_suboperation_status(
3998 db_nslcmop, op_index, operationState, detailed_status
3999 )
4000 # Return the sub-operation index
4001 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4002 # with arguments extracted from the sub-operation
4003 return op_index
4004
4005 # Find a sub-operation where all keys in a matching dictionary must match
4006 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4007 def _find_suboperation(self, db_nslcmop, match):
4008 if db_nslcmop and match:
4009 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
4010 for i, op in enumerate(op_list):
4011 if all(op.get(k) == match[k] for k in match):
4012 return i
4013 return self.SUBOPERATION_STATUS_NOT_FOUND
4014
4015 # Update status for a sub-operation given its index
4016 def _update_suboperation_status(
4017 self, db_nslcmop, op_index, operationState, detailed_status
4018 ):
4019 # Update DB for HA tasks
4020 q_filter = {"_id": db_nslcmop["_id"]}
4021 update_dict = {
4022 "_admin.operations.{}.operationState".format(op_index): operationState,
4023 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4024 }
4025 self.db.set_one(
4026 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4027 )
4028
4029 # Add sub-operation, return the index of the added sub-operation
4030 # Optionally, set operationState, detailed-status, and operationType
4031 # Status and type are currently set for 'scale' sub-operations:
4032 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4033 # 'detailed-status' : status message
4034 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4035 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4036 def _add_suboperation(
4037 self,
4038 db_nslcmop,
4039 vnf_index,
4040 vdu_id,
4041 vdu_count_index,
4042 vdu_name,
4043 primitive,
4044 mapped_primitive_params,
4045 operationState=None,
4046 detailed_status=None,
4047 operationType=None,
4048 RO_nsr_id=None,
4049 RO_scaling_info=None,
4050 ):
4051 if not db_nslcmop:
4052 return self.SUBOPERATION_STATUS_NOT_FOUND
4053 # Get the "_admin.operations" list, if it exists
4054 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4055 op_list = db_nslcmop_admin.get("operations")
4056 # Create or append to the "_admin.operations" list
4057 new_op = {
4058 "member_vnf_index": vnf_index,
4059 "vdu_id": vdu_id,
4060 "vdu_count_index": vdu_count_index,
4061 "primitive": primitive,
4062 "primitive_params": mapped_primitive_params,
4063 }
4064 if operationState:
4065 new_op["operationState"] = operationState
4066 if detailed_status:
4067 new_op["detailed-status"] = detailed_status
4068 if operationType:
4069 new_op["lcmOperationType"] = operationType
4070 if RO_nsr_id:
4071 new_op["RO_nsr_id"] = RO_nsr_id
4072 if RO_scaling_info:
4073 new_op["RO_scaling_info"] = RO_scaling_info
4074 if not op_list:
4075 # No existing operations, create key 'operations' with current operation as first list element
4076 db_nslcmop_admin.update({"operations": [new_op]})
4077 op_list = db_nslcmop_admin.get("operations")
4078 else:
4079 # Existing operations, append operation to list
4080 op_list.append(new_op)
4081
4082 db_nslcmop_update = {"_admin.operations": op_list}
4083 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4084 op_index = len(op_list) - 1
4085 return op_index
4086
4087 # Helper methods for scale() sub-operations
4088
4089 # pre-scale/post-scale:
4090 # Check for 3 different cases:
4091 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4092 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4093 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4094 def _check_or_add_scale_suboperation(
4095 self,
4096 db_nslcmop,
4097 vnf_index,
4098 vnf_config_primitive,
4099 primitive_params,
4100 operationType,
4101 RO_nsr_id=None,
4102 RO_scaling_info=None,
4103 ):
4104 # Find this sub-operation
4105 if RO_nsr_id and RO_scaling_info:
4106 operationType = "SCALE-RO"
4107 match = {
4108 "member_vnf_index": vnf_index,
4109 "RO_nsr_id": RO_nsr_id,
4110 "RO_scaling_info": RO_scaling_info,
4111 }
4112 else:
4113 match = {
4114 "member_vnf_index": vnf_index,
4115 "primitive": vnf_config_primitive,
4116 "primitive_params": primitive_params,
4117 "lcmOperationType": operationType,
4118 }
4119 op_index = self._find_suboperation(db_nslcmop, match)
4120 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4121 # a. New sub-operation
4122 # The sub-operation does not exist, add it.
4123 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4124 # The following parameters are set to None for all kind of scaling:
4125 vdu_id = None
4126 vdu_count_index = None
4127 vdu_name = None
4128 if RO_nsr_id and RO_scaling_info:
4129 vnf_config_primitive = None
4130 primitive_params = None
4131 else:
4132 RO_nsr_id = None
4133 RO_scaling_info = None
4134 # Initial status for sub-operation
4135 operationState = "PROCESSING"
4136 detailed_status = "In progress"
4137 # Add sub-operation for pre/post-scaling (zero or more operations)
4138 self._add_suboperation(
4139 db_nslcmop,
4140 vnf_index,
4141 vdu_id,
4142 vdu_count_index,
4143 vdu_name,
4144 vnf_config_primitive,
4145 primitive_params,
4146 operationState,
4147 detailed_status,
4148 operationType,
4149 RO_nsr_id,
4150 RO_scaling_info,
4151 )
4152 return self.SUBOPERATION_STATUS_NEW
4153 else:
4154 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4155 # or op_index (operationState != 'COMPLETED')
4156 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4157
4158 # Function to return execution_environment id
4159
4160 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4161 # TODO vdu_index_count
4162 for vca in vca_deployed_list:
4163 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4164 return vca["ee_id"]
4165
4166 async def destroy_N2VC(
4167 self,
4168 logging_text,
4169 db_nslcmop,
4170 vca_deployed,
4171 config_descriptor,
4172 vca_index,
4173 destroy_ee=True,
4174 exec_primitives=True,
4175 scaling_in=False,
4176 vca_id: str = None,
4177 ):
4178 """
4179 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4180 :param logging_text:
4181 :param db_nslcmop:
4182 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4183 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4184 :param vca_index: index in the database _admin.deployed.VCA
4185 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4186 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4187 not executed properly
4188 :param scaling_in: True destroys the application, False destroys the model
4189 :return: None or exception
4190 """
4191
4192 self.logger.debug(
4193 logging_text
4194 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4195 vca_index, vca_deployed, config_descriptor, destroy_ee
4196 )
4197 )
4198
4199 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4200
4201 # execute terminate_primitives
4202 if exec_primitives:
4203 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4204 config_descriptor.get("terminate-config-primitive"),
4205 vca_deployed.get("ee_descriptor_id"),
4206 )
4207 vdu_id = vca_deployed.get("vdu_id")
4208 vdu_count_index = vca_deployed.get("vdu_count_index")
4209 vdu_name = vca_deployed.get("vdu_name")
4210 vnf_index = vca_deployed.get("member-vnf-index")
4211 if terminate_primitives and vca_deployed.get("needed_terminate"):
4212 for seq in terminate_primitives:
4213 # For each sequence in list, get primitive and call _ns_execute_primitive()
4214 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4215 vnf_index, seq.get("name")
4216 )
4217 self.logger.debug(logging_text + step)
4218 # Create the primitive for each sequence, i.e. "primitive": "touch"
4219 primitive = seq.get("name")
4220 mapped_primitive_params = self._get_terminate_primitive_params(
4221 seq, vnf_index
4222 )
4223
4224 # Add sub-operation
4225 self._add_suboperation(
4226 db_nslcmop,
4227 vnf_index,
4228 vdu_id,
4229 vdu_count_index,
4230 vdu_name,
4231 primitive,
4232 mapped_primitive_params,
4233 )
4234 # Sub-operations: Call _ns_execute_primitive() instead of action()
4235 try:
4236 result, result_detail = await self._ns_execute_primitive(
4237 vca_deployed["ee_id"],
4238 primitive,
4239 mapped_primitive_params,
4240 vca_type=vca_type,
4241 vca_id=vca_id,
4242 )
4243 except LcmException:
4244 # this happens when VCA is not deployed. In this case it is not needed to terminate
4245 continue
4246 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4247 if result not in result_ok:
4248 raise LcmException(
4249 "terminate_primitive {} for vnf_member_index={} fails with "
4250 "error {}".format(seq.get("name"), vnf_index, result_detail)
4251 )
4252 # set that this VCA do not need terminated
4253 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4254 vca_index
4255 )
4256 self.update_db_2(
4257 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4258 )
4259
4260 # Delete Prometheus Jobs if any
4261 # This uses NSR_ID, so it will destroy any jobs under this index
4262 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4263
4264 if destroy_ee:
4265 await self.vca_map[vca_type].delete_execution_environment(
4266 vca_deployed["ee_id"],
4267 scaling_in=scaling_in,
4268 vca_type=vca_type,
4269 vca_id=vca_id,
4270 )
4271
4272 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4273 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4274 namespace = "." + db_nsr["_id"]
4275 try:
4276 await self.n2vc.delete_namespace(
4277 namespace=namespace,
4278 total_timeout=self.timeout_charm_delete,
4279 vca_id=vca_id,
4280 )
4281 except N2VCNotFound: # already deleted. Skip
4282 pass
4283 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4284
4285 async def _terminate_RO(
4286 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4287 ):
4288 """
4289 Terminates a deployment from RO
4290 :param logging_text:
4291 :param nsr_deployed: db_nsr._admin.deployed
4292 :param nsr_id:
4293 :param nslcmop_id:
4294 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4295 this method will update only the index 2, but it will write on database the concatenated content of the list
4296 :return:
4297 """
4298 db_nsr_update = {}
4299 failed_detail = []
4300 ro_nsr_id = ro_delete_action = None
4301 if nsr_deployed and nsr_deployed.get("RO"):
4302 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4303 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4304 try:
4305 if ro_nsr_id:
4306 stage[2] = "Deleting ns from VIM."
4307 db_nsr_update["detailed-status"] = " ".join(stage)
4308 self._write_op_status(nslcmop_id, stage)
4309 self.logger.debug(logging_text + stage[2])
4310 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4311 self._write_op_status(nslcmop_id, stage)
4312 desc = await self.RO.delete("ns", ro_nsr_id)
4313 ro_delete_action = desc["action_id"]
4314 db_nsr_update[
4315 "_admin.deployed.RO.nsr_delete_action_id"
4316 ] = ro_delete_action
4317 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4318 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4319 if ro_delete_action:
4320 # wait until NS is deleted from VIM
4321 stage[2] = "Waiting ns deleted from VIM."
4322 detailed_status_old = None
4323 self.logger.debug(
4324 logging_text
4325 + stage[2]
4326 + " RO_id={} ro_delete_action={}".format(
4327 ro_nsr_id, ro_delete_action
4328 )
4329 )
4330 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4331 self._write_op_status(nslcmop_id, stage)
4332
4333 delete_timeout = 20 * 60 # 20 minutes
4334 while delete_timeout > 0:
4335 desc = await self.RO.show(
4336 "ns",
4337 item_id_name=ro_nsr_id,
4338 extra_item="action",
4339 extra_item_id=ro_delete_action,
4340 )
4341
4342 # deploymentStatus
4343 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4344
4345 ns_status, ns_status_info = self.RO.check_action_status(desc)
4346 if ns_status == "ERROR":
4347 raise ROclient.ROClientException(ns_status_info)
4348 elif ns_status == "BUILD":
4349 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4350 elif ns_status == "ACTIVE":
4351 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4352 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4353 break
4354 else:
4355 assert (
4356 False
4357 ), "ROclient.check_action_status returns unknown {}".format(
4358 ns_status
4359 )
4360 if stage[2] != detailed_status_old:
4361 detailed_status_old = stage[2]
4362 db_nsr_update["detailed-status"] = " ".join(stage)
4363 self._write_op_status(nslcmop_id, stage)
4364 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4365 await asyncio.sleep(5, loop=self.loop)
4366 delete_timeout -= 5
4367 else: # delete_timeout <= 0:
4368 raise ROclient.ROClientException(
4369 "Timeout waiting ns deleted from VIM"
4370 )
4371
4372 except Exception as e:
4373 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4374 if (
4375 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4376 ): # not found
4377 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4378 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4379 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4380 self.logger.debug(
4381 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4382 )
4383 elif (
4384 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4385 ): # conflict
4386 failed_detail.append("delete conflict: {}".format(e))
4387 self.logger.debug(
4388 logging_text
4389 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4390 )
4391 else:
4392 failed_detail.append("delete error: {}".format(e))
4393 self.logger.error(
4394 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4395 )
4396
4397 # Delete nsd
4398 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4399 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4400 try:
4401 stage[2] = "Deleting nsd from RO."
4402 db_nsr_update["detailed-status"] = " ".join(stage)
4403 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4404 self._write_op_status(nslcmop_id, stage)
4405 await self.RO.delete("nsd", ro_nsd_id)
4406 self.logger.debug(
4407 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4408 )
4409 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4410 except Exception as e:
4411 if (
4412 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4413 ): # not found
4414 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4415 self.logger.debug(
4416 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4417 )
4418 elif (
4419 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4420 ): # conflict
4421 failed_detail.append(
4422 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4423 )
4424 self.logger.debug(logging_text + failed_detail[-1])
4425 else:
4426 failed_detail.append(
4427 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4428 )
4429 self.logger.error(logging_text + failed_detail[-1])
4430
4431 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4432 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4433 if not vnf_deployed or not vnf_deployed["id"]:
4434 continue
4435 try:
4436 ro_vnfd_id = vnf_deployed["id"]
4437 stage[
4438 2
4439 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4440 vnf_deployed["member-vnf-index"], ro_vnfd_id
4441 )
4442 db_nsr_update["detailed-status"] = " ".join(stage)
4443 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4444 self._write_op_status(nslcmop_id, stage)
4445 await self.RO.delete("vnfd", ro_vnfd_id)
4446 self.logger.debug(
4447 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4448 )
4449 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4450 except Exception as e:
4451 if (
4452 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4453 ): # not found
4454 db_nsr_update[
4455 "_admin.deployed.RO.vnfd.{}.id".format(index)
4456 ] = None
4457 self.logger.debug(
4458 logging_text
4459 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4460 )
4461 elif (
4462 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4463 ): # conflict
4464 failed_detail.append(
4465 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4466 )
4467 self.logger.debug(logging_text + failed_detail[-1])
4468 else:
4469 failed_detail.append(
4470 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4471 )
4472 self.logger.error(logging_text + failed_detail[-1])
4473
4474 if failed_detail:
4475 stage[2] = "Error deleting from VIM"
4476 else:
4477 stage[2] = "Deleted from VIM"
4478 db_nsr_update["detailed-status"] = " ".join(stage)
4479 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4480 self._write_op_status(nslcmop_id, stage)
4481
4482 if failed_detail:
4483 raise LcmException("; ".join(failed_detail))
4484
4485 async def terminate(self, nsr_id, nslcmop_id):
4486 # Try to lock HA task here
4487 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4488 if not task_is_locked_by_me:
4489 return
4490
4491 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4492 self.logger.debug(logging_text + "Enter")
4493 timeout_ns_terminate = self.timeout_ns_terminate
4494 db_nsr = None
4495 db_nslcmop = None
4496 operation_params = None
4497 exc = None
4498 error_list = [] # annotates all failed error messages
4499 db_nslcmop_update = {}
4500 autoremove = False # autoremove after terminated
4501 tasks_dict_info = {}
4502 db_nsr_update = {}
4503 stage = [
4504 "Stage 1/3: Preparing task.",
4505 "Waiting for previous operations to terminate.",
4506 "",
4507 ]
4508 # ^ contains [stage, step, VIM-status]
4509 try:
4510 # wait for any previous tasks in process
4511 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4512
4513 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4514 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4515 operation_params = db_nslcmop.get("operationParams") or {}
4516 if operation_params.get("timeout_ns_terminate"):
4517 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4518 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4519 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4520
4521 db_nsr_update["operational-status"] = "terminating"
4522 db_nsr_update["config-status"] = "terminating"
4523 self._write_ns_status(
4524 nsr_id=nsr_id,
4525 ns_state="TERMINATING",
4526 current_operation="TERMINATING",
4527 current_operation_id=nslcmop_id,
4528 other_update=db_nsr_update,
4529 )
4530 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4531 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4532 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4533 return
4534
4535 stage[1] = "Getting vnf descriptors from db."
4536 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4537 db_vnfrs_dict = {
4538 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4539 }
4540 db_vnfds_from_id = {}
4541 db_vnfds_from_member_index = {}
4542 # Loop over VNFRs
4543 for vnfr in db_vnfrs_list:
4544 vnfd_id = vnfr["vnfd-id"]
4545 if vnfd_id not in db_vnfds_from_id:
4546 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4547 db_vnfds_from_id[vnfd_id] = vnfd
4548 db_vnfds_from_member_index[
4549 vnfr["member-vnf-index-ref"]
4550 ] = db_vnfds_from_id[vnfd_id]
4551
4552 # Destroy individual execution environments when there are terminating primitives.
4553 # Rest of EE will be deleted at once
4554 # TODO - check before calling _destroy_N2VC
4555 # if not operation_params.get("skip_terminate_primitives"):#
4556 # or not vca.get("needed_terminate"):
4557 stage[0] = "Stage 2/3 execute terminating primitives."
4558 self.logger.debug(logging_text + stage[0])
4559 stage[1] = "Looking execution environment that needs terminate."
4560 self.logger.debug(logging_text + stage[1])
4561
4562 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4563 config_descriptor = None
4564 vca_member_vnf_index = vca.get("member-vnf-index")
4565 vca_id = self.get_vca_id(
4566 db_vnfrs_dict.get(vca_member_vnf_index)
4567 if vca_member_vnf_index
4568 else None,
4569 db_nsr,
4570 )
4571 if not vca or not vca.get("ee_id"):
4572 continue
4573 if not vca.get("member-vnf-index"):
4574 # ns
4575 config_descriptor = db_nsr.get("ns-configuration")
4576 elif vca.get("vdu_id"):
4577 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4578 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4579 elif vca.get("kdu_name"):
4580 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4581 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4582 else:
4583 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4584 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4585 vca_type = vca.get("type")
4586 exec_terminate_primitives = not operation_params.get(
4587 "skip_terminate_primitives"
4588 ) and vca.get("needed_terminate")
4589 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4590 # pending native charms
4591 destroy_ee = (
4592 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4593 )
4594 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4595 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4596 task = asyncio.ensure_future(
4597 self.destroy_N2VC(
4598 logging_text,
4599 db_nslcmop,
4600 vca,
4601 config_descriptor,
4602 vca_index,
4603 destroy_ee,
4604 exec_terminate_primitives,
4605 vca_id=vca_id,
4606 )
4607 )
4608 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4609
4610 # wait for pending tasks of terminate primitives
4611 if tasks_dict_info:
4612 self.logger.debug(
4613 logging_text
4614 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4615 )
4616 error_list = await self._wait_for_tasks(
4617 logging_text,
4618 tasks_dict_info,
4619 min(self.timeout_charm_delete, timeout_ns_terminate),
4620 stage,
4621 nslcmop_id,
4622 )
4623 tasks_dict_info.clear()
4624 if error_list:
4625 return # raise LcmException("; ".join(error_list))
4626
4627 # remove All execution environments at once
4628 stage[0] = "Stage 3/3 delete all."
4629
4630 if nsr_deployed.get("VCA"):
4631 stage[1] = "Deleting all execution environments."
4632 self.logger.debug(logging_text + stage[1])
4633 vca_id = self.get_vca_id({}, db_nsr)
4634 task_delete_ee = asyncio.ensure_future(
4635 asyncio.wait_for(
4636 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4637 timeout=self.timeout_charm_delete,
4638 )
4639 )
4640 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4641 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4642
4643 # Delete Namespace and Certificates if necessary
4644 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4645 await self.vca_map["helm-v3"].delete_tls_certificate(
4646 certificate_name=db_nslcmop["nsInstanceId"],
4647 )
4648 # TODO: Delete namespace
4649
4650 # Delete from k8scluster
4651 stage[1] = "Deleting KDUs."
4652 self.logger.debug(logging_text + stage[1])
4653 # print(nsr_deployed)
4654 for kdu in get_iterable(nsr_deployed, "K8s"):
4655 if not kdu or not kdu.get("kdu-instance"):
4656 continue
4657 kdu_instance = kdu.get("kdu-instance")
4658 if kdu.get("k8scluster-type") in self.k8scluster_map:
4659 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4660 vca_id = self.get_vca_id({}, db_nsr)
4661 task_delete_kdu_instance = asyncio.ensure_future(
4662 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4663 cluster_uuid=kdu.get("k8scluster-uuid"),
4664 kdu_instance=kdu_instance,
4665 vca_id=vca_id,
4666 namespace=kdu.get("namespace"),
4667 )
4668 )
4669 else:
4670 self.logger.error(
4671 logging_text
4672 + "Unknown k8s deployment type {}".format(
4673 kdu.get("k8scluster-type")
4674 )
4675 )
4676 continue
4677 tasks_dict_info[
4678 task_delete_kdu_instance
4679 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4680
4681 # remove from RO
4682 stage[1] = "Deleting ns from VIM."
4683 if self.ng_ro:
4684 task_delete_ro = asyncio.ensure_future(
4685 self._terminate_ng_ro(
4686 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4687 )
4688 )
4689 else:
4690 task_delete_ro = asyncio.ensure_future(
4691 self._terminate_RO(
4692 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4693 )
4694 )
4695 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4696
4697 # rest of staff will be done at finally
4698
4699 except (
4700 ROclient.ROClientException,
4701 DbException,
4702 LcmException,
4703 N2VCException,
4704 ) as e:
4705 self.logger.error(logging_text + "Exit Exception {}".format(e))
4706 exc = e
4707 except asyncio.CancelledError:
4708 self.logger.error(
4709 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4710 )
4711 exc = "Operation was cancelled"
4712 except Exception as e:
4713 exc = traceback.format_exc()
4714 self.logger.critical(
4715 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4716 exc_info=True,
4717 )
4718 finally:
4719 if exc:
4720 error_list.append(str(exc))
4721 try:
4722 # wait for pending tasks
4723 if tasks_dict_info:
4724 stage[1] = "Waiting for terminate pending tasks."
4725 self.logger.debug(logging_text + stage[1])
4726 error_list += await self._wait_for_tasks(
4727 logging_text,
4728 tasks_dict_info,
4729 timeout_ns_terminate,
4730 stage,
4731 nslcmop_id,
4732 )
4733 stage[1] = stage[2] = ""
4734 except asyncio.CancelledError:
4735 error_list.append("Cancelled")
4736 # TODO cancell all tasks
4737 except Exception as exc:
4738 error_list.append(str(exc))
4739 # update status at database
4740 if error_list:
4741 error_detail = "; ".join(error_list)
4742 # self.logger.error(logging_text + error_detail)
4743 error_description_nslcmop = "{} Detail: {}".format(
4744 stage[0], error_detail
4745 )
4746 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4747 nslcmop_id, stage[0]
4748 )
4749
4750 db_nsr_update["operational-status"] = "failed"
4751 db_nsr_update["detailed-status"] = (
4752 error_description_nsr + " Detail: " + error_detail
4753 )
4754 db_nslcmop_update["detailed-status"] = error_detail
4755 nslcmop_operation_state = "FAILED"
4756 ns_state = "BROKEN"
4757 else:
4758 error_detail = None
4759 error_description_nsr = error_description_nslcmop = None
4760 ns_state = "NOT_INSTANTIATED"
4761 db_nsr_update["operational-status"] = "terminated"
4762 db_nsr_update["detailed-status"] = "Done"
4763 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4764 db_nslcmop_update["detailed-status"] = "Done"
4765 nslcmop_operation_state = "COMPLETED"
4766
4767 if db_nsr:
4768 self._write_ns_status(
4769 nsr_id=nsr_id,
4770 ns_state=ns_state,
4771 current_operation="IDLE",
4772 current_operation_id=None,
4773 error_description=error_description_nsr,
4774 error_detail=error_detail,
4775 other_update=db_nsr_update,
4776 )
4777 self._write_op_status(
4778 op_id=nslcmop_id,
4779 stage="",
4780 error_message=error_description_nslcmop,
4781 operation_state=nslcmop_operation_state,
4782 other_update=db_nslcmop_update,
4783 )
4784 if ns_state == "NOT_INSTANTIATED":
4785 try:
4786 self.db.set_list(
4787 "vnfrs",
4788 {"nsr-id-ref": nsr_id},
4789 {"_admin.nsState": "NOT_INSTANTIATED"},
4790 )
4791 except DbException as e:
4792 self.logger.warn(
4793 logging_text
4794 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4795 nsr_id, e
4796 )
4797 )
4798 if operation_params:
4799 autoremove = operation_params.get("autoremove", False)
4800 if nslcmop_operation_state:
4801 try:
4802 await self.msg.aiowrite(
4803 "ns",
4804 "terminated",
4805 {
4806 "nsr_id": nsr_id,
4807 "nslcmop_id": nslcmop_id,
4808 "operationState": nslcmop_operation_state,
4809 "autoremove": autoremove,
4810 },
4811 loop=self.loop,
4812 )
4813 except Exception as e:
4814 self.logger.error(
4815 logging_text + "kafka_write notification Exception {}".format(e)
4816 )
4817
4818 self.logger.debug(logging_text + "Exit")
4819 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4820
4821 async def _wait_for_tasks(
4822 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4823 ):
4824 time_start = time()
4825 error_detail_list = []
4826 error_list = []
4827 pending_tasks = list(created_tasks_info.keys())
4828 num_tasks = len(pending_tasks)
4829 num_done = 0
4830 stage[1] = "{}/{}.".format(num_done, num_tasks)
4831 self._write_op_status(nslcmop_id, stage)
4832 while pending_tasks:
4833 new_error = None
4834 _timeout = timeout + time_start - time()
4835 done, pending_tasks = await asyncio.wait(
4836 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4837 )
4838 num_done += len(done)
4839 if not done: # Timeout
4840 for task in pending_tasks:
4841 new_error = created_tasks_info[task] + ": Timeout"
4842 error_detail_list.append(new_error)
4843 error_list.append(new_error)
4844 break
4845 for task in done:
4846 if task.cancelled():
4847 exc = "Cancelled"
4848 else:
4849 exc = task.exception()
4850 if exc:
4851 if isinstance(exc, asyncio.TimeoutError):
4852 exc = "Timeout"
4853 new_error = created_tasks_info[task] + ": {}".format(exc)
4854 error_list.append(created_tasks_info[task])
4855 error_detail_list.append(new_error)
4856 if isinstance(
4857 exc,
4858 (
4859 str,
4860 DbException,
4861 N2VCException,
4862 ROclient.ROClientException,
4863 LcmException,
4864 K8sException,
4865 NgRoException,
4866 ),
4867 ):
4868 self.logger.error(logging_text + new_error)
4869 else:
4870 exc_traceback = "".join(
4871 traceback.format_exception(None, exc, exc.__traceback__)
4872 )
4873 self.logger.error(
4874 logging_text
4875 + created_tasks_info[task]
4876 + " "
4877 + exc_traceback
4878 )
4879 else:
4880 self.logger.debug(
4881 logging_text + created_tasks_info[task] + ": Done"
4882 )
4883 stage[1] = "{}/{}.".format(num_done, num_tasks)
4884 if new_error:
4885 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4886 if nsr_id: # update also nsr
4887 self.update_db_2(
4888 "nsrs",
4889 nsr_id,
4890 {
4891 "errorDescription": "Error at: " + ", ".join(error_list),
4892 "errorDetail": ". ".join(error_detail_list),
4893 },
4894 )
4895 self._write_op_status(nslcmop_id, stage)
4896 return error_detail_list
4897
4898 @staticmethod
4899 def _map_primitive_params(primitive_desc, params, instantiation_params):
4900 """
4901 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4902 The default-value is used. If it is between < > it look for a value at instantiation_params
4903 :param primitive_desc: portion of VNFD/NSD that describes primitive
4904 :param params: Params provided by user
4905 :param instantiation_params: Instantiation params provided by user
4906 :return: a dictionary with the calculated params
4907 """
4908 calculated_params = {}
4909 for parameter in primitive_desc.get("parameter", ()):
4910 param_name = parameter["name"]
4911 if param_name in params:
4912 calculated_params[param_name] = params[param_name]
4913 elif "default-value" in parameter or "value" in parameter:
4914 if "value" in parameter:
4915 calculated_params[param_name] = parameter["value"]
4916 else:
4917 calculated_params[param_name] = parameter["default-value"]
4918 if (
4919 isinstance(calculated_params[param_name], str)
4920 and calculated_params[param_name].startswith("<")
4921 and calculated_params[param_name].endswith(">")
4922 ):
4923 if calculated_params[param_name][1:-1] in instantiation_params:
4924 calculated_params[param_name] = instantiation_params[
4925 calculated_params[param_name][1:-1]
4926 ]
4927 else:
4928 raise LcmException(
4929 "Parameter {} needed to execute primitive {} not provided".format(
4930 calculated_params[param_name], primitive_desc["name"]
4931 )
4932 )
4933 else:
4934 raise LcmException(
4935 "Parameter {} needed to execute primitive {} not provided".format(
4936 param_name, primitive_desc["name"]
4937 )
4938 )
4939
4940 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4941 calculated_params[param_name] = yaml.safe_dump(
4942 calculated_params[param_name], default_flow_style=True, width=256
4943 )
4944 elif isinstance(calculated_params[param_name], str) and calculated_params[
4945 param_name
4946 ].startswith("!!yaml "):
4947 calculated_params[param_name] = calculated_params[param_name][7:]
4948 if parameter.get("data-type") == "INTEGER":
4949 try:
4950 calculated_params[param_name] = int(calculated_params[param_name])
4951 except ValueError: # error converting string to int
4952 raise LcmException(
4953 "Parameter {} of primitive {} must be integer".format(
4954 param_name, primitive_desc["name"]
4955 )
4956 )
4957 elif parameter.get("data-type") == "BOOLEAN":
4958 calculated_params[param_name] = not (
4959 (str(calculated_params[param_name])).lower() == "false"
4960 )
4961
4962 # add always ns_config_info if primitive name is config
4963 if primitive_desc["name"] == "config":
4964 if "ns_config_info" in instantiation_params:
4965 calculated_params["ns_config_info"] = instantiation_params[
4966 "ns_config_info"
4967 ]
4968 return calculated_params
4969
4970 def _look_for_deployed_vca(
4971 self,
4972 deployed_vca,
4973 member_vnf_index,
4974 vdu_id,
4975 vdu_count_index,
4976 kdu_name=None,
4977 ee_descriptor_id=None,
4978 ):
4979 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4980 for vca in deployed_vca:
4981 if not vca:
4982 continue
4983 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4984 continue
4985 if (
4986 vdu_count_index is not None
4987 and vdu_count_index != vca["vdu_count_index"]
4988 ):
4989 continue
4990 if kdu_name and kdu_name != vca["kdu_name"]:
4991 continue
4992 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4993 continue
4994 break
4995 else:
4996 # vca_deployed not found
4997 raise LcmException(
4998 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4999 " is not deployed".format(
5000 member_vnf_index,
5001 vdu_id,
5002 vdu_count_index,
5003 kdu_name,
5004 ee_descriptor_id,
5005 )
5006 )
5007 # get ee_id
5008 ee_id = vca.get("ee_id")
5009 vca_type = vca.get(
5010 "type", "lxc_proxy_charm"
5011 ) # default value for backward compatibility - proxy charm
5012 if not ee_id:
5013 raise LcmException(
5014 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5015 "execution environment".format(
5016 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5017 )
5018 )
5019 return ee_id, vca_type
5020
5021 async def _ns_execute_primitive(
5022 self,
5023 ee_id,
5024 primitive,
5025 primitive_params,
5026 retries=0,
5027 retries_interval=30,
5028 timeout=None,
5029 vca_type=None,
5030 db_dict=None,
5031 vca_id: str = None,
5032 ) -> (str, str):
5033 try:
5034 if primitive == "config":
5035 primitive_params = {"params": primitive_params}
5036
5037 vca_type = vca_type or "lxc_proxy_charm"
5038
5039 while retries >= 0:
5040 try:
5041 output = await asyncio.wait_for(
5042 self.vca_map[vca_type].exec_primitive(
5043 ee_id=ee_id,
5044 primitive_name=primitive,
5045 params_dict=primitive_params,
5046 progress_timeout=self.timeout_progress_primitive,
5047 total_timeout=self.timeout_primitive,
5048 db_dict=db_dict,
5049 vca_id=vca_id,
5050 vca_type=vca_type,
5051 ),
5052 timeout=timeout or self.timeout_primitive,
5053 )
5054 # execution was OK
5055 break
5056 except asyncio.CancelledError:
5057 raise
5058 except Exception as e:
5059 retries -= 1
5060 if retries >= 0:
5061 self.logger.debug(
5062 "Error executing action {} on {} -> {}".format(
5063 primitive, ee_id, e
5064 )
5065 )
5066 # wait and retry
5067 await asyncio.sleep(retries_interval, loop=self.loop)
5068 else:
5069 if isinstance(e, asyncio.TimeoutError):
5070 e = N2VCException(
5071 message="Timed out waiting for action to complete"
5072 )
5073 return "FAILED", getattr(e, "message", repr(e))
5074
5075 return "COMPLETED", output
5076
5077 except (LcmException, asyncio.CancelledError):
5078 raise
5079 except Exception as e:
5080 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5081
5082 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5083 """
5084 Updating the vca_status with latest juju information in nsrs record
5085 :param: nsr_id: Id of the nsr
5086 :param: nslcmop_id: Id of the nslcmop
5087 :return: None
5088 """
5089
5090 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5091 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5092 vca_id = self.get_vca_id({}, db_nsr)
5093 if db_nsr["_admin"]["deployed"]["K8s"]:
5094 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5095 cluster_uuid, kdu_instance, cluster_type = (
5096 k8s["k8scluster-uuid"],
5097 k8s["kdu-instance"],
5098 k8s["k8scluster-type"],
5099 )
5100 await self._on_update_k8s_db(
5101 cluster_uuid=cluster_uuid,
5102 kdu_instance=kdu_instance,
5103 filter={"_id": nsr_id},
5104 vca_id=vca_id,
5105 cluster_type=cluster_type,
5106 )
5107 else:
5108 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5109 table, filter = "nsrs", {"_id": nsr_id}
5110 path = "_admin.deployed.VCA.{}.".format(vca_index)
5111 await self._on_update_n2vc_db(table, filter, path, {})
5112
5113 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5114 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5115
5116 async def action(self, nsr_id, nslcmop_id):
5117 # Try to lock HA task here
5118 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5119 if not task_is_locked_by_me:
5120 return
5121
5122 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5123 self.logger.debug(logging_text + "Enter")
5124 # get all needed from database
5125 db_nsr = None
5126 db_nslcmop = None
5127 db_nsr_update = {}
5128 db_nslcmop_update = {}
5129 nslcmop_operation_state = None
5130 error_description_nslcmop = None
5131 exc = None
5132 try:
5133 # wait for any previous tasks in process
5134 step = "Waiting for previous operations to terminate"
5135 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5136
5137 self._write_ns_status(
5138 nsr_id=nsr_id,
5139 ns_state=None,
5140 current_operation="RUNNING ACTION",
5141 current_operation_id=nslcmop_id,
5142 )
5143
5144 step = "Getting information from database"
5145 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5146 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5147 if db_nslcmop["operationParams"].get("primitive_params"):
5148 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5149 db_nslcmop["operationParams"]["primitive_params"]
5150 )
5151
5152 nsr_deployed = db_nsr["_admin"].get("deployed")
5153 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5154 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5155 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5156 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5157 primitive = db_nslcmop["operationParams"]["primitive"]
5158 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5159 timeout_ns_action = db_nslcmop["operationParams"].get(
5160 "timeout_ns_action", self.timeout_primitive
5161 )
5162
5163 if vnf_index:
5164 step = "Getting vnfr from database"
5165 db_vnfr = self.db.get_one(
5166 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5167 )
5168 if db_vnfr.get("kdur"):
5169 kdur_list = []
5170 for kdur in db_vnfr["kdur"]:
5171 if kdur.get("additionalParams"):
5172 kdur["additionalParams"] = json.loads(
5173 kdur["additionalParams"]
5174 )
5175 kdur_list.append(kdur)
5176 db_vnfr["kdur"] = kdur_list
5177 step = "Getting vnfd from database"
5178 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5179
5180 # Sync filesystem before running a primitive
5181 self.fs.sync(db_vnfr["vnfd-id"])
5182 else:
5183 step = "Getting nsd from database"
5184 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5185
5186 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5187 # for backward compatibility
5188 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5189 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5190 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5191 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5192
5193 # look for primitive
5194 config_primitive_desc = descriptor_configuration = None
5195 if vdu_id:
5196 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5197 elif kdu_name:
5198 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5199 elif vnf_index:
5200 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5201 else:
5202 descriptor_configuration = db_nsd.get("ns-configuration")
5203
5204 if descriptor_configuration and descriptor_configuration.get(
5205 "config-primitive"
5206 ):
5207 for config_primitive in descriptor_configuration["config-primitive"]:
5208 if config_primitive["name"] == primitive:
5209 config_primitive_desc = config_primitive
5210 break
5211
5212 if not config_primitive_desc:
5213 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5214 raise LcmException(
5215 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5216 primitive
5217 )
5218 )
5219 primitive_name = primitive
5220 ee_descriptor_id = None
5221 else:
5222 primitive_name = config_primitive_desc.get(
5223 "execution-environment-primitive", primitive
5224 )
5225 ee_descriptor_id = config_primitive_desc.get(
5226 "execution-environment-ref"
5227 )
5228
5229 if vnf_index:
5230 if vdu_id:
5231 vdur = next(
5232 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5233 )
5234 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5235 elif kdu_name:
5236 kdur = next(
5237 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5238 )
5239 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5240 else:
5241 desc_params = parse_yaml_strings(
5242 db_vnfr.get("additionalParamsForVnf")
5243 )
5244 else:
5245 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5246 if kdu_name and get_configuration(db_vnfd, kdu_name):
5247 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5248 actions = set()
5249 for primitive in kdu_configuration.get("initial-config-primitive", []):
5250 actions.add(primitive["name"])
5251 for primitive in kdu_configuration.get("config-primitive", []):
5252 actions.add(primitive["name"])
5253 kdu = find_in_list(
5254 nsr_deployed["K8s"],
5255 lambda kdu: kdu_name == kdu["kdu-name"]
5256 and kdu["member-vnf-index"] == vnf_index,
5257 )
5258 kdu_action = (
5259 True
5260 if primitive_name in actions
5261 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5262 else False
5263 )
5264
5265 # TODO check if ns is in a proper status
5266 if kdu_name and (
5267 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5268 ):
5269 # kdur and desc_params already set from before
5270 if primitive_params:
5271 desc_params.update(primitive_params)
5272 # TODO Check if we will need something at vnf level
5273 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5274 if (
5275 kdu_name == kdu["kdu-name"]
5276 and kdu["member-vnf-index"] == vnf_index
5277 ):
5278 break
5279 else:
5280 raise LcmException(
5281 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5282 )
5283
5284 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5285 msg = "unknown k8scluster-type '{}'".format(
5286 kdu.get("k8scluster-type")
5287 )
5288 raise LcmException(msg)
5289
5290 db_dict = {
5291 "collection": "nsrs",
5292 "filter": {"_id": nsr_id},
5293 "path": "_admin.deployed.K8s.{}".format(index),
5294 }
5295 self.logger.debug(
5296 logging_text
5297 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5298 )
5299 step = "Executing kdu {}".format(primitive_name)
5300 if primitive_name == "upgrade":
5301 if desc_params.get("kdu_model"):
5302 kdu_model = desc_params.get("kdu_model")
5303 del desc_params["kdu_model"]
5304 else:
5305 kdu_model = kdu.get("kdu-model")
5306 parts = kdu_model.split(sep=":")
5307 if len(parts) == 2:
5308 kdu_model = parts[0]
5309 if desc_params.get("kdu_atomic_upgrade"):
5310 atomic_upgrade = desc_params.get(
5311 "kdu_atomic_upgrade"
5312 ).lower() in ("yes", "true", "1")
5313 del desc_params["kdu_atomic_upgrade"]
5314 else:
5315 atomic_upgrade = True
5316
5317 detailed_status = await asyncio.wait_for(
5318 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5319 cluster_uuid=kdu.get("k8scluster-uuid"),
5320 kdu_instance=kdu.get("kdu-instance"),
5321 atomic=atomic_upgrade,
5322 kdu_model=kdu_model,
5323 params=desc_params,
5324 db_dict=db_dict,
5325 timeout=timeout_ns_action,
5326 ),
5327 timeout=timeout_ns_action + 10,
5328 )
5329 self.logger.debug(
5330 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5331 )
5332 elif primitive_name == "rollback":
5333 detailed_status = await asyncio.wait_for(
5334 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5335 cluster_uuid=kdu.get("k8scluster-uuid"),
5336 kdu_instance=kdu.get("kdu-instance"),
5337 db_dict=db_dict,
5338 ),
5339 timeout=timeout_ns_action,
5340 )
5341 elif primitive_name == "status":
5342 detailed_status = await asyncio.wait_for(
5343 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5344 cluster_uuid=kdu.get("k8scluster-uuid"),
5345 kdu_instance=kdu.get("kdu-instance"),
5346 vca_id=vca_id,
5347 ),
5348 timeout=timeout_ns_action,
5349 )
5350 else:
5351 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5352 kdu["kdu-name"], nsr_id
5353 )
5354 params = self._map_primitive_params(
5355 config_primitive_desc, primitive_params, desc_params
5356 )
5357
5358 detailed_status = await asyncio.wait_for(
5359 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5360 cluster_uuid=kdu.get("k8scluster-uuid"),
5361 kdu_instance=kdu_instance,
5362 primitive_name=primitive_name,
5363 params=params,
5364 db_dict=db_dict,
5365 timeout=timeout_ns_action,
5366 vca_id=vca_id,
5367 ),
5368 timeout=timeout_ns_action,
5369 )
5370
5371 if detailed_status:
5372 nslcmop_operation_state = "COMPLETED"
5373 else:
5374 detailed_status = ""
5375 nslcmop_operation_state = "FAILED"
5376 else:
5377 ee_id, vca_type = self._look_for_deployed_vca(
5378 nsr_deployed["VCA"],
5379 member_vnf_index=vnf_index,
5380 vdu_id=vdu_id,
5381 vdu_count_index=vdu_count_index,
5382 ee_descriptor_id=ee_descriptor_id,
5383 )
5384 for vca_index, vca_deployed in enumerate(
5385 db_nsr["_admin"]["deployed"]["VCA"]
5386 ):
5387 if vca_deployed.get("member-vnf-index") == vnf_index:
5388 db_dict = {
5389 "collection": "nsrs",
5390 "filter": {"_id": nsr_id},
5391 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5392 }
5393 break
5394 (
5395 nslcmop_operation_state,
5396 detailed_status,
5397 ) = await self._ns_execute_primitive(
5398 ee_id,
5399 primitive=primitive_name,
5400 primitive_params=self._map_primitive_params(
5401 config_primitive_desc, primitive_params, desc_params
5402 ),
5403 timeout=timeout_ns_action,
5404 vca_type=vca_type,
5405 db_dict=db_dict,
5406 vca_id=vca_id,
5407 )
5408
5409 db_nslcmop_update["detailed-status"] = detailed_status
5410 error_description_nslcmop = (
5411 detailed_status if nslcmop_operation_state == "FAILED" else ""
5412 )
5413 self.logger.debug(
5414 logging_text
5415 + "Done with result {} {}".format(
5416 nslcmop_operation_state, detailed_status
5417 )
5418 )
5419 return # database update is called inside finally
5420
5421 except (DbException, LcmException, N2VCException, K8sException) as e:
5422 self.logger.error(logging_text + "Exit Exception {}".format(e))
5423 exc = e
5424 except asyncio.CancelledError:
5425 self.logger.error(
5426 logging_text + "Cancelled Exception while '{}'".format(step)
5427 )
5428 exc = "Operation was cancelled"
5429 except asyncio.TimeoutError:
5430 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5431 exc = "Timeout"
5432 except Exception as e:
5433 exc = traceback.format_exc()
5434 self.logger.critical(
5435 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5436 exc_info=True,
5437 )
5438 finally:
5439 if exc:
5440 db_nslcmop_update[
5441 "detailed-status"
5442 ] = (
5443 detailed_status
5444 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5445 nslcmop_operation_state = "FAILED"
5446 if db_nsr:
5447 self._write_ns_status(
5448 nsr_id=nsr_id,
5449 ns_state=db_nsr[
5450 "nsState"
5451 ], # TODO check if degraded. For the moment use previous status
5452 current_operation="IDLE",
5453 current_operation_id=None,
5454 # error_description=error_description_nsr,
5455 # error_detail=error_detail,
5456 other_update=db_nsr_update,
5457 )
5458
5459 self._write_op_status(
5460 op_id=nslcmop_id,
5461 stage="",
5462 error_message=error_description_nslcmop,
5463 operation_state=nslcmop_operation_state,
5464 other_update=db_nslcmop_update,
5465 )
5466
5467 if nslcmop_operation_state:
5468 try:
5469 await self.msg.aiowrite(
5470 "ns",
5471 "actioned",
5472 {
5473 "nsr_id": nsr_id,
5474 "nslcmop_id": nslcmop_id,
5475 "operationState": nslcmop_operation_state,
5476 },
5477 loop=self.loop,
5478 )
5479 except Exception as e:
5480 self.logger.error(
5481 logging_text + "kafka_write notification Exception {}".format(e)
5482 )
5483 self.logger.debug(logging_text + "Exit")
5484 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5485 return nslcmop_operation_state, detailed_status
5486
5487 async def terminate_vdus(
5488 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5489 ):
5490 """This method terminates VDUs
5491
5492 Args:
5493 db_vnfr: VNF instance record
5494 member_vnf_index: VNF index to identify the VDUs to be removed
5495 db_nsr: NS instance record
5496 update_db_nslcmops: Nslcmop update record
5497 """
5498 vca_scaling_info = []
5499 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5500 scaling_info["scaling_direction"] = "IN"
5501 scaling_info["vdu-delete"] = {}
5502 scaling_info["kdu-delete"] = {}
5503 db_vdur = db_vnfr.get("vdur")
5504 vdur_list = copy(db_vdur)
5505 count_index = 0
5506 for index, vdu in enumerate(vdur_list):
5507 vca_scaling_info.append(
5508 {
5509 "osm_vdu_id": vdu["vdu-id-ref"],
5510 "member-vnf-index": member_vnf_index,
5511 "type": "delete",
5512 "vdu_index": count_index,
5513 }
5514 )
5515 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5516 scaling_info["vdu"].append(
5517 {
5518 "name": vdu.get("name") or vdu.get("vdu-name"),
5519 "vdu_id": vdu["vdu-id-ref"],
5520 "interface": [],
5521 }
5522 )
5523 for interface in vdu["interfaces"]:
5524 scaling_info["vdu"][index]["interface"].append(
5525 {
5526 "name": interface["name"],
5527 "ip_address": interface["ip-address"],
5528 "mac_address": interface.get("mac-address"),
5529 }
5530 )
5531 self.logger.info("NS update scaling info{}".format(scaling_info))
5532 stage[2] = "Terminating VDUs"
5533 if scaling_info.get("vdu-delete"):
5534 # scale_process = "RO"
5535 if self.ro_config.get("ng"):
5536 await self._scale_ng_ro(
5537 logging_text,
5538 db_nsr,
5539 update_db_nslcmops,
5540 db_vnfr,
5541 scaling_info,
5542 stage,
5543 )
5544
5545 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5546 """This method is to Remove VNF instances from NS.
5547
5548 Args:
5549 nsr_id: NS instance id
5550 nslcmop_id: nslcmop id of update
5551 vnf_instance_id: id of the VNF instance to be removed
5552
5553 Returns:
5554 result: (str, str) COMPLETED/FAILED, details
5555 """
5556 try:
5557 db_nsr_update = {}
5558 logging_text = "Task ns={} update ".format(nsr_id)
5559 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5560 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5561 if check_vnfr_count > 1:
5562 stage = ["", "", ""]
5563 step = "Getting nslcmop from database"
5564 self.logger.debug(
5565 step + " after having waited for previous tasks to be completed"
5566 )
5567 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5568 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5569 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5570 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5571 """ db_vnfr = self.db.get_one(
5572 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5573
5574 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5575 await self.terminate_vdus(
5576 db_vnfr,
5577 member_vnf_index,
5578 db_nsr,
5579 update_db_nslcmops,
5580 stage,
5581 logging_text,
5582 )
5583
5584 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5585 constituent_vnfr.remove(db_vnfr.get("_id"))
5586 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5587 "constituent-vnfr-ref"
5588 )
5589 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5590 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5591 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5592 return "COMPLETED", "Done"
5593 else:
5594 step = "Terminate VNF Failed with"
5595 raise LcmException(
5596 "{} Cannot terminate the last VNF in this NS.".format(
5597 vnf_instance_id
5598 )
5599 )
5600 except (LcmException, asyncio.CancelledError):
5601 raise
5602 except Exception as e:
5603 self.logger.debug("Error removing VNF {}".format(e))
5604 return "FAILED", "Error removing VNF {}".format(e)
5605
5606 async def _ns_redeploy_vnf(
5607 self,
5608 nsr_id,
5609 nslcmop_id,
5610 db_vnfd,
5611 db_vnfr,
5612 db_nsr,
5613 ):
5614 """This method updates and redeploys VNF instances
5615
5616 Args:
5617 nsr_id: NS instance id
5618 nslcmop_id: nslcmop id
5619 db_vnfd: VNF descriptor
5620 db_vnfr: VNF instance record
5621 db_nsr: NS instance record
5622
5623 Returns:
5624 result: (str, str) COMPLETED/FAILED, details
5625 """
5626 try:
5627 count_index = 0
5628 stage = ["", "", ""]
5629 logging_text = "Task ns={} update ".format(nsr_id)
5630 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5631 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5632
5633 # Terminate old VNF resources
5634 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5635 await self.terminate_vdus(
5636 db_vnfr,
5637 member_vnf_index,
5638 db_nsr,
5639 update_db_nslcmops,
5640 stage,
5641 logging_text,
5642 )
5643
5644 # old_vnfd_id = db_vnfr["vnfd-id"]
5645 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5646 new_db_vnfd = db_vnfd
5647 # new_vnfd_ref = new_db_vnfd["id"]
5648 # new_vnfd_id = vnfd_id
5649
5650 # Create VDUR
5651 new_vnfr_cp = []
5652 for cp in new_db_vnfd.get("ext-cpd", ()):
5653 vnf_cp = {
5654 "name": cp.get("id"),
5655 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5656 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5657 "id": cp.get("id"),
5658 }
5659 new_vnfr_cp.append(vnf_cp)
5660 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5661 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5662 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5663 new_vnfr_update = {
5664 "revision": latest_vnfd_revision,
5665 "connection-point": new_vnfr_cp,
5666 "vdur": new_vdur,
5667 "ip-address": "",
5668 }
5669 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5670 updated_db_vnfr = self.db.get_one(
5671 "vnfrs",
5672 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5673 )
5674
5675 # Instantiate new VNF resources
5676 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5677 vca_scaling_info = []
5678 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5679 scaling_info["scaling_direction"] = "OUT"
5680 scaling_info["vdu-create"] = {}
5681 scaling_info["kdu-create"] = {}
5682 vdud_instantiate_list = db_vnfd["vdu"]
5683 for index, vdud in enumerate(vdud_instantiate_list):
5684 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5685 if cloud_init_text:
5686 additional_params = (
5687 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5688 or {}
5689 )
5690 cloud_init_list = []
5691 if cloud_init_text:
5692 # TODO Information of its own ip is not available because db_vnfr is not updated.
5693 additional_params["OSM"] = get_osm_params(
5694 updated_db_vnfr, vdud["id"], 1
5695 )
5696 cloud_init_list.append(
5697 self._parse_cloud_init(
5698 cloud_init_text,
5699 additional_params,
5700 db_vnfd["id"],
5701 vdud["id"],
5702 )
5703 )
5704 vca_scaling_info.append(
5705 {
5706 "osm_vdu_id": vdud["id"],
5707 "member-vnf-index": member_vnf_index,
5708 "type": "create",
5709 "vdu_index": count_index,
5710 }
5711 )
5712 scaling_info["vdu-create"][vdud["id"]] = count_index
5713 if self.ro_config.get("ng"):
5714 self.logger.debug(
5715 "New Resources to be deployed: {}".format(scaling_info)
5716 )
5717 await self._scale_ng_ro(
5718 logging_text,
5719 db_nsr,
5720 update_db_nslcmops,
5721 updated_db_vnfr,
5722 scaling_info,
5723 stage,
5724 )
5725 return "COMPLETED", "Done"
5726 except (LcmException, asyncio.CancelledError):
5727 raise
5728 except Exception as e:
5729 self.logger.debug("Error updating VNF {}".format(e))
5730 return "FAILED", "Error updating VNF {}".format(e)
5731
5732 async def _ns_charm_upgrade(
5733 self,
5734 ee_id,
5735 charm_id,
5736 charm_type,
5737 path,
5738 timeout: float = None,
5739 ) -> (str, str):
5740 """This method upgrade charms in VNF instances
5741
5742 Args:
5743 ee_id: Execution environment id
5744 path: Local path to the charm
5745 charm_id: charm-id
5746 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5747 timeout: (Float) Timeout for the ns update operation
5748
5749 Returns:
5750 result: (str, str) COMPLETED/FAILED, details
5751 """
5752 try:
5753 charm_type = charm_type or "lxc_proxy_charm"
5754 output = await self.vca_map[charm_type].upgrade_charm(
5755 ee_id=ee_id,
5756 path=path,
5757 charm_id=charm_id,
5758 charm_type=charm_type,
5759 timeout=timeout or self.timeout_ns_update,
5760 )
5761
5762 if output:
5763 return "COMPLETED", output
5764
5765 except (LcmException, asyncio.CancelledError):
5766 raise
5767
5768 except Exception as e:
5769
5770 self.logger.debug("Error upgrading charm {}".format(path))
5771
5772 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5773
5774 async def update(self, nsr_id, nslcmop_id):
5775 """Update NS according to different update types
5776
5777 This method performs upgrade of VNF instances then updates the revision
5778 number in VNF record
5779
5780 Args:
5781 nsr_id: Network service will be updated
5782 nslcmop_id: ns lcm operation id
5783
5784 Returns:
5785 It may raise DbException, LcmException, N2VCException, K8sException
5786
5787 """
5788 # Try to lock HA task here
5789 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5790 if not task_is_locked_by_me:
5791 return
5792
5793 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5794 self.logger.debug(logging_text + "Enter")
5795
5796 # Set the required variables to be filled up later
5797 db_nsr = None
5798 db_nslcmop_update = {}
5799 vnfr_update = {}
5800 nslcmop_operation_state = None
5801 db_nsr_update = {}
5802 error_description_nslcmop = ""
5803 exc = None
5804 change_type = "updated"
5805 detailed_status = ""
5806
5807 try:
5808 # wait for any previous tasks in process
5809 step = "Waiting for previous operations to terminate"
5810 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5811 self._write_ns_status(
5812 nsr_id=nsr_id,
5813 ns_state=None,
5814 current_operation="UPDATING",
5815 current_operation_id=nslcmop_id,
5816 )
5817
5818 step = "Getting nslcmop from database"
5819 db_nslcmop = self.db.get_one(
5820 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5821 )
5822 update_type = db_nslcmop["operationParams"]["updateType"]
5823
5824 step = "Getting nsr from database"
5825 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5826 old_operational_status = db_nsr["operational-status"]
5827 db_nsr_update["operational-status"] = "updating"
5828 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5829 nsr_deployed = db_nsr["_admin"].get("deployed")
5830
5831 if update_type == "CHANGE_VNFPKG":
5832
5833 # Get the input parameters given through update request
5834 vnf_instance_id = db_nslcmop["operationParams"][
5835 "changeVnfPackageData"
5836 ].get("vnfInstanceId")
5837
5838 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5839 "vnfdId"
5840 )
5841 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5842
5843 step = "Getting vnfr from database"
5844 db_vnfr = self.db.get_one(
5845 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5846 )
5847
5848 step = "Getting vnfds from database"
5849 # Latest VNFD
5850 latest_vnfd = self.db.get_one(
5851 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5852 )
5853 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5854
5855 # Current VNFD
5856 current_vnf_revision = db_vnfr.get("revision", 1)
5857 current_vnfd = self.db.get_one(
5858 "vnfds_revisions",
5859 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5860 fail_on_empty=False,
5861 )
5862 # Charm artifact paths will be filled up later
5863 (
5864 current_charm_artifact_path,
5865 target_charm_artifact_path,
5866 charm_artifact_paths,
5867 helm_artifacts,
5868 ) = ([], [], [], [])
5869
5870 step = "Checking if revision has changed in VNFD"
5871 if current_vnf_revision != latest_vnfd_revision:
5872
5873 change_type = "policy_updated"
5874
5875 # There is new revision of VNFD, update operation is required
5876 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5877 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5878
5879 step = "Removing the VNFD packages if they exist in the local path"
5880 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5881 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5882
5883 step = "Get the VNFD packages from FSMongo"
5884 self.fs.sync(from_path=latest_vnfd_path)
5885 self.fs.sync(from_path=current_vnfd_path)
5886
5887 step = (
5888 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5889 )
5890 current_base_folder = current_vnfd["_admin"]["storage"]
5891 latest_base_folder = latest_vnfd["_admin"]["storage"]
5892
5893 for vca_index, vca_deployed in enumerate(
5894 get_iterable(nsr_deployed, "VCA")
5895 ):
5896 vnf_index = db_vnfr.get("member-vnf-index-ref")
5897
5898 # Getting charm-id and charm-type
5899 if vca_deployed.get("member-vnf-index") == vnf_index:
5900 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5901 vca_type = vca_deployed.get("type")
5902 vdu_count_index = vca_deployed.get("vdu_count_index")
5903
5904 # Getting ee-id
5905 ee_id = vca_deployed.get("ee_id")
5906
5907 step = "Getting descriptor config"
5908 if current_vnfd.get("kdu"):
5909
5910 search_key = "kdu_name"
5911 else:
5912 search_key = "vnfd_id"
5913
5914 entity_id = vca_deployed.get(search_key)
5915
5916 descriptor_config = get_configuration(
5917 current_vnfd, entity_id
5918 )
5919
5920 if "execution-environment-list" in descriptor_config:
5921 ee_list = descriptor_config.get(
5922 "execution-environment-list", []
5923 )
5924 else:
5925 ee_list = []
5926
5927 # There could be several charm used in the same VNF
5928 for ee_item in ee_list:
5929 if ee_item.get("juju"):
5930
5931 step = "Getting charm name"
5932 charm_name = ee_item["juju"].get("charm")
5933
5934 step = "Setting Charm artifact paths"
5935 current_charm_artifact_path.append(
5936 get_charm_artifact_path(
5937 current_base_folder,
5938 charm_name,
5939 vca_type,
5940 current_vnf_revision,
5941 )
5942 )
5943 target_charm_artifact_path.append(
5944 get_charm_artifact_path(
5945 latest_base_folder,
5946 charm_name,
5947 vca_type,
5948 latest_vnfd_revision,
5949 )
5950 )
5951 elif ee_item.get("helm-chart"):
5952 # add chart to list and all parameters
5953 step = "Getting helm chart name"
5954 chart_name = ee_item.get("helm-chart")
5955 if (
5956 ee_item.get("helm-version")
5957 and ee_item.get("helm-version") == "v2"
5958 ):
5959 vca_type = "helm"
5960 else:
5961 vca_type = "helm-v3"
5962 step = "Setting Helm chart artifact paths"
5963
5964 helm_artifacts.append(
5965 {
5966 "current_artifact_path": get_charm_artifact_path(
5967 current_base_folder,
5968 chart_name,
5969 vca_type,
5970 current_vnf_revision,
5971 ),
5972 "target_artifact_path": get_charm_artifact_path(
5973 latest_base_folder,
5974 chart_name,
5975 vca_type,
5976 latest_vnfd_revision,
5977 ),
5978 "ee_id": ee_id,
5979 "vca_index": vca_index,
5980 "vdu_index": vdu_count_index,
5981 }
5982 )
5983
5984 charm_artifact_paths = zip(
5985 current_charm_artifact_path, target_charm_artifact_path
5986 )
5987
5988 step = "Checking if software version has changed in VNFD"
5989 if find_software_version(current_vnfd) != find_software_version(
5990 latest_vnfd
5991 ):
5992
5993 step = "Checking if existing VNF has charm"
5994 for current_charm_path, target_charm_path in list(
5995 charm_artifact_paths
5996 ):
5997 if current_charm_path:
5998 raise LcmException(
5999 "Software version change is not supported as VNF instance {} has charm.".format(
6000 vnf_instance_id
6001 )
6002 )
6003
6004 # There is no change in the charm package, then redeploy the VNF
6005 # based on new descriptor
6006 step = "Redeploying VNF"
6007 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6008 (result, detailed_status) = await self._ns_redeploy_vnf(
6009 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
6010 )
6011 if result == "FAILED":
6012 nslcmop_operation_state = result
6013 error_description_nslcmop = detailed_status
6014 db_nslcmop_update["detailed-status"] = detailed_status
6015 self.logger.debug(
6016 logging_text
6017 + " step {} Done with result {} {}".format(
6018 step, nslcmop_operation_state, detailed_status
6019 )
6020 )
6021
6022 else:
6023 step = "Checking if any charm package has changed or not"
6024 for current_charm_path, target_charm_path in list(
6025 charm_artifact_paths
6026 ):
6027 if (
6028 current_charm_path
6029 and target_charm_path
6030 and self.check_charm_hash_changed(
6031 current_charm_path, target_charm_path
6032 )
6033 ):
6034
6035 step = "Checking whether VNF uses juju bundle"
6036 if check_juju_bundle_existence(current_vnfd):
6037
6038 raise LcmException(
6039 "Charm upgrade is not supported for the instance which"
6040 " uses juju-bundle: {}".format(
6041 check_juju_bundle_existence(current_vnfd)
6042 )
6043 )
6044
6045 step = "Upgrading Charm"
6046 (
6047 result,
6048 detailed_status,
6049 ) = await self._ns_charm_upgrade(
6050 ee_id=ee_id,
6051 charm_id=vca_id,
6052 charm_type=vca_type,
6053 path=self.fs.path + target_charm_path,
6054 timeout=timeout_seconds,
6055 )
6056
6057 if result == "FAILED":
6058 nslcmop_operation_state = result
6059 error_description_nslcmop = detailed_status
6060
6061 db_nslcmop_update["detailed-status"] = detailed_status
6062 self.logger.debug(
6063 logging_text
6064 + " step {} Done with result {} {}".format(
6065 step, nslcmop_operation_state, detailed_status
6066 )
6067 )
6068
6069 step = "Updating policies"
6070 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6071 result = "COMPLETED"
6072 detailed_status = "Done"
6073 db_nslcmop_update["detailed-status"] = "Done"
6074
6075 # helm base EE
6076 for item in helm_artifacts:
6077 if not (
6078 item["current_artifact_path"]
6079 and item["target_artifact_path"]
6080 and self.check_charm_hash_changed(
6081 item["current_artifact_path"],
6082 item["target_artifact_path"],
6083 )
6084 ):
6085 continue
6086 db_update_entry = "_admin.deployed.VCA.{}.".format(
6087 item["vca_index"]
6088 )
6089 vnfr_id = db_vnfr["_id"]
6090 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6091 db_dict = {
6092 "collection": "nsrs",
6093 "filter": {"_id": nsr_id},
6094 "path": db_update_entry,
6095 }
6096 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6097 await self.vca_map[vca_type].upgrade_execution_environment(
6098 namespace=namespace,
6099 helm_id=helm_id,
6100 db_dict=db_dict,
6101 config=osm_config,
6102 artifact_path=item["target_artifact_path"],
6103 vca_type=vca_type,
6104 )
6105 vnf_id = db_vnfr.get("vnfd-ref")
6106 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6107 self.logger.debug("get ssh key block")
6108 rw_mgmt_ip = None
6109 if deep_get(
6110 config_descriptor,
6111 ("config-access", "ssh-access", "required"),
6112 ):
6113 # Needed to inject a ssh key
6114 user = deep_get(
6115 config_descriptor,
6116 ("config-access", "ssh-access", "default-user"),
6117 )
6118 step = (
6119 "Install configuration Software, getting public ssh key"
6120 )
6121 pub_key = await self.vca_map[
6122 vca_type
6123 ].get_ee_ssh_public__key(
6124 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6125 )
6126
6127 step = (
6128 "Insert public key into VM user={} ssh_key={}".format(
6129 user, pub_key
6130 )
6131 )
6132 self.logger.debug(logging_text + step)
6133
6134 # wait for RO (ip-address) Insert pub_key into VM
6135 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6136 logging_text,
6137 nsr_id,
6138 vnfr_id,
6139 None,
6140 item["vdu_index"],
6141 user=user,
6142 pub_key=pub_key,
6143 )
6144
6145 initial_config_primitive_list = config_descriptor.get(
6146 "initial-config-primitive"
6147 )
6148 config_primitive = next(
6149 (
6150 p
6151 for p in initial_config_primitive_list
6152 if p["name"] == "config"
6153 ),
6154 None,
6155 )
6156 if not config_primitive:
6157 continue
6158
6159 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6160 if rw_mgmt_ip:
6161 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6162 if db_vnfr.get("additionalParamsForVnf"):
6163 deploy_params.update(
6164 parse_yaml_strings(
6165 db_vnfr["additionalParamsForVnf"].copy()
6166 )
6167 )
6168 primitive_params_ = self._map_primitive_params(
6169 config_primitive, {}, deploy_params
6170 )
6171
6172 step = "execute primitive '{}' params '{}'".format(
6173 config_primitive["name"], primitive_params_
6174 )
6175 self.logger.debug(logging_text + step)
6176 await self.vca_map[vca_type].exec_primitive(
6177 ee_id=ee_id,
6178 primitive_name=config_primitive["name"],
6179 params_dict=primitive_params_,
6180 db_dict=db_dict,
6181 vca_id=vca_id,
6182 vca_type=vca_type,
6183 )
6184
6185 step = "Updating policies"
6186 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6187 detailed_status = "Done"
6188 db_nslcmop_update["detailed-status"] = "Done"
6189
6190 # If nslcmop_operation_state is None, so any operation is not failed.
6191 if not nslcmop_operation_state:
6192 nslcmop_operation_state = "COMPLETED"
6193
6194 # If update CHANGE_VNFPKG nslcmop_operation is successful
6195 # vnf revision need to be updated
6196 vnfr_update["revision"] = latest_vnfd_revision
6197 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6198
6199 self.logger.debug(
6200 logging_text
6201 + " task Done with result {} {}".format(
6202 nslcmop_operation_state, detailed_status
6203 )
6204 )
6205 elif update_type == "REMOVE_VNF":
6206 # This part is included in https://osm.etsi.org/gerrit/11876
6207 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6208 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6209 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6210 step = "Removing VNF"
6211 (result, detailed_status) = await self.remove_vnf(
6212 nsr_id, nslcmop_id, vnf_instance_id
6213 )
6214 if result == "FAILED":
6215 nslcmop_operation_state = result
6216 error_description_nslcmop = detailed_status
6217 db_nslcmop_update["detailed-status"] = detailed_status
6218 change_type = "vnf_terminated"
6219 if not nslcmop_operation_state:
6220 nslcmop_operation_state = "COMPLETED"
6221 self.logger.debug(
6222 logging_text
6223 + " task Done with result {} {}".format(
6224 nslcmop_operation_state, detailed_status
6225 )
6226 )
6227
6228 elif update_type == "OPERATE_VNF":
6229 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6230 "vnfInstanceId"
6231 ]
6232 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6233 "changeStateTo"
6234 ]
6235 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6236 "additionalParam"
6237 ]
6238 (result, detailed_status) = await self.rebuild_start_stop(
6239 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6240 )
6241 if result == "FAILED":
6242 nslcmop_operation_state = result
6243 error_description_nslcmop = detailed_status
6244 db_nslcmop_update["detailed-status"] = detailed_status
6245 if not nslcmop_operation_state:
6246 nslcmop_operation_state = "COMPLETED"
6247 self.logger.debug(
6248 logging_text
6249 + " task Done with result {} {}".format(
6250 nslcmop_operation_state, detailed_status
6251 )
6252 )
6253
6254 # If nslcmop_operation_state is None, so any operation is not failed.
6255 # All operations are executed in overall.
6256 if not nslcmop_operation_state:
6257 nslcmop_operation_state = "COMPLETED"
6258 db_nsr_update["operational-status"] = old_operational_status
6259
6260 except (DbException, LcmException, N2VCException, K8sException) as e:
6261 self.logger.error(logging_text + "Exit Exception {}".format(e))
6262 exc = e
6263 except asyncio.CancelledError:
6264 self.logger.error(
6265 logging_text + "Cancelled Exception while '{}'".format(step)
6266 )
6267 exc = "Operation was cancelled"
6268 except asyncio.TimeoutError:
6269 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6270 exc = "Timeout"
6271 except Exception as e:
6272 exc = traceback.format_exc()
6273 self.logger.critical(
6274 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6275 exc_info=True,
6276 )
6277 finally:
6278 if exc:
6279 db_nslcmop_update[
6280 "detailed-status"
6281 ] = (
6282 detailed_status
6283 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6284 nslcmop_operation_state = "FAILED"
6285 db_nsr_update["operational-status"] = old_operational_status
6286 if db_nsr:
6287 self._write_ns_status(
6288 nsr_id=nsr_id,
6289 ns_state=db_nsr["nsState"],
6290 current_operation="IDLE",
6291 current_operation_id=None,
6292 other_update=db_nsr_update,
6293 )
6294
6295 self._write_op_status(
6296 op_id=nslcmop_id,
6297 stage="",
6298 error_message=error_description_nslcmop,
6299 operation_state=nslcmop_operation_state,
6300 other_update=db_nslcmop_update,
6301 )
6302
6303 if nslcmop_operation_state:
6304 try:
6305 msg = {
6306 "nsr_id": nsr_id,
6307 "nslcmop_id": nslcmop_id,
6308 "operationState": nslcmop_operation_state,
6309 }
6310 if change_type in ("vnf_terminated", "policy_updated"):
6311 msg.update({"vnf_member_index": member_vnf_index})
6312 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6313 except Exception as e:
6314 self.logger.error(
6315 logging_text + "kafka_write notification Exception {}".format(e)
6316 )
6317 self.logger.debug(logging_text + "Exit")
6318 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6319 return nslcmop_operation_state, detailed_status
6320
6321 async def scale(self, nsr_id, nslcmop_id):
6322 # Try to lock HA task here
6323 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6324 if not task_is_locked_by_me:
6325 return
6326
6327 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6328 stage = ["", "", ""]
6329 tasks_dict_info = {}
6330 # ^ stage, step, VIM progress
6331 self.logger.debug(logging_text + "Enter")
6332 # get all needed from database
6333 db_nsr = None
6334 db_nslcmop_update = {}
6335 db_nsr_update = {}
6336 exc = None
6337 # in case of error, indicates what part of scale was failed to put nsr at error status
6338 scale_process = None
6339 old_operational_status = ""
6340 old_config_status = ""
6341 nsi_id = None
6342 try:
6343 # wait for any previous tasks in process
6344 step = "Waiting for previous operations to terminate"
6345 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6346 self._write_ns_status(
6347 nsr_id=nsr_id,
6348 ns_state=None,
6349 current_operation="SCALING",
6350 current_operation_id=nslcmop_id,
6351 )
6352
6353 step = "Getting nslcmop from database"
6354 self.logger.debug(
6355 step + " after having waited for previous tasks to be completed"
6356 )
6357 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6358
6359 step = "Getting nsr from database"
6360 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6361 old_operational_status = db_nsr["operational-status"]
6362 old_config_status = db_nsr["config-status"]
6363
6364 step = "Parsing scaling parameters"
6365 db_nsr_update["operational-status"] = "scaling"
6366 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6367 nsr_deployed = db_nsr["_admin"].get("deployed")
6368
6369 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6370 "scaleByStepData"
6371 ]["member-vnf-index"]
6372 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6373 "scaleByStepData"
6374 ]["scaling-group-descriptor"]
6375 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6376 # for backward compatibility
6377 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6378 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6379 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6380 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6381
6382 step = "Getting vnfr from database"
6383 db_vnfr = self.db.get_one(
6384 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6385 )
6386
6387 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6388
6389 step = "Getting vnfd from database"
6390 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6391
6392 base_folder = db_vnfd["_admin"]["storage"]
6393
6394 step = "Getting scaling-group-descriptor"
6395 scaling_descriptor = find_in_list(
6396 get_scaling_aspect(db_vnfd),
6397 lambda scale_desc: scale_desc["name"] == scaling_group,
6398 )
6399 if not scaling_descriptor:
6400 raise LcmException(
6401 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6402 "at vnfd:scaling-group-descriptor".format(scaling_group)
6403 )
6404
6405 step = "Sending scale order to VIM"
6406 # TODO check if ns is in a proper status
6407 nb_scale_op = 0
6408 if not db_nsr["_admin"].get("scaling-group"):
6409 self.update_db_2(
6410 "nsrs",
6411 nsr_id,
6412 {
6413 "_admin.scaling-group": [
6414 {"name": scaling_group, "nb-scale-op": 0}
6415 ]
6416 },
6417 )
6418 admin_scale_index = 0
6419 else:
6420 for admin_scale_index, admin_scale_info in enumerate(
6421 db_nsr["_admin"]["scaling-group"]
6422 ):
6423 if admin_scale_info["name"] == scaling_group:
6424 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6425 break
6426 else: # not found, set index one plus last element and add new entry with the name
6427 admin_scale_index += 1
6428 db_nsr_update[
6429 "_admin.scaling-group.{}.name".format(admin_scale_index)
6430 ] = scaling_group
6431
6432 vca_scaling_info = []
6433 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6434 if scaling_type == "SCALE_OUT":
6435 if "aspect-delta-details" not in scaling_descriptor:
6436 raise LcmException(
6437 "Aspect delta details not fount in scaling descriptor {}".format(
6438 scaling_descriptor["name"]
6439 )
6440 )
6441 # count if max-instance-count is reached
6442 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6443
6444 scaling_info["scaling_direction"] = "OUT"
6445 scaling_info["vdu-create"] = {}
6446 scaling_info["kdu-create"] = {}
6447 for delta in deltas:
6448 for vdu_delta in delta.get("vdu-delta", {}):
6449 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6450 # vdu_index also provides the number of instance of the targeted vdu
6451 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6452 cloud_init_text = self._get_vdu_cloud_init_content(
6453 vdud, db_vnfd
6454 )
6455 if cloud_init_text:
6456 additional_params = (
6457 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6458 or {}
6459 )
6460 cloud_init_list = []
6461
6462 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6463 max_instance_count = 10
6464 if vdu_profile and "max-number-of-instances" in vdu_profile:
6465 max_instance_count = vdu_profile.get(
6466 "max-number-of-instances", 10
6467 )
6468
6469 default_instance_num = get_number_of_instances(
6470 db_vnfd, vdud["id"]
6471 )
6472 instances_number = vdu_delta.get("number-of-instances", 1)
6473 nb_scale_op += instances_number
6474
6475 new_instance_count = nb_scale_op + default_instance_num
6476 # Control if new count is over max and vdu count is less than max.
6477 # Then assign new instance count
6478 if new_instance_count > max_instance_count > vdu_count:
6479 instances_number = new_instance_count - max_instance_count
6480 else:
6481 instances_number = instances_number
6482
6483 if new_instance_count > max_instance_count:
6484 raise LcmException(
6485 "reached the limit of {} (max-instance-count) "
6486 "scaling-out operations for the "
6487 "scaling-group-descriptor '{}'".format(
6488 nb_scale_op, scaling_group
6489 )
6490 )
6491 for x in range(vdu_delta.get("number-of-instances", 1)):
6492 if cloud_init_text:
6493 # TODO Information of its own ip is not available because db_vnfr is not updated.
6494 additional_params["OSM"] = get_osm_params(
6495 db_vnfr, vdu_delta["id"], vdu_index + x
6496 )
6497 cloud_init_list.append(
6498 self._parse_cloud_init(
6499 cloud_init_text,
6500 additional_params,
6501 db_vnfd["id"],
6502 vdud["id"],
6503 )
6504 )
6505 vca_scaling_info.append(
6506 {
6507 "osm_vdu_id": vdu_delta["id"],
6508 "member-vnf-index": vnf_index,
6509 "type": "create",
6510 "vdu_index": vdu_index + x,
6511 }
6512 )
6513 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6514 for kdu_delta in delta.get("kdu-resource-delta", {}):
6515 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6516 kdu_name = kdu_profile["kdu-name"]
6517 resource_name = kdu_profile.get("resource-name", "")
6518
6519 # Might have different kdus in the same delta
6520 # Should have list for each kdu
6521 if not scaling_info["kdu-create"].get(kdu_name, None):
6522 scaling_info["kdu-create"][kdu_name] = []
6523
6524 kdur = get_kdur(db_vnfr, kdu_name)
6525 if kdur.get("helm-chart"):
6526 k8s_cluster_type = "helm-chart-v3"
6527 self.logger.debug("kdur: {}".format(kdur))
6528 if (
6529 kdur.get("helm-version")
6530 and kdur.get("helm-version") == "v2"
6531 ):
6532 k8s_cluster_type = "helm-chart"
6533 elif kdur.get("juju-bundle"):
6534 k8s_cluster_type = "juju-bundle"
6535 else:
6536 raise LcmException(
6537 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6538 "juju-bundle. Maybe an old NBI version is running".format(
6539 db_vnfr["member-vnf-index-ref"], kdu_name
6540 )
6541 )
6542
6543 max_instance_count = 10
6544 if kdu_profile and "max-number-of-instances" in kdu_profile:
6545 max_instance_count = kdu_profile.get(
6546 "max-number-of-instances", 10
6547 )
6548
6549 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6550 deployed_kdu, _ = get_deployed_kdu(
6551 nsr_deployed, kdu_name, vnf_index
6552 )
6553 if deployed_kdu is None:
6554 raise LcmException(
6555 "KDU '{}' for vnf '{}' not deployed".format(
6556 kdu_name, vnf_index
6557 )
6558 )
6559 kdu_instance = deployed_kdu.get("kdu-instance")
6560 instance_num = await self.k8scluster_map[
6561 k8s_cluster_type
6562 ].get_scale_count(
6563 resource_name,
6564 kdu_instance,
6565 vca_id=vca_id,
6566 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6567 kdu_model=deployed_kdu.get("kdu-model"),
6568 )
6569 kdu_replica_count = instance_num + kdu_delta.get(
6570 "number-of-instances", 1
6571 )
6572
6573 # Control if new count is over max and instance_num is less than max.
6574 # Then assign max instance number to kdu replica count
6575 if kdu_replica_count > max_instance_count > instance_num:
6576 kdu_replica_count = max_instance_count
6577 if kdu_replica_count > max_instance_count:
6578 raise LcmException(
6579 "reached the limit of {} (max-instance-count) "
6580 "scaling-out operations for the "
6581 "scaling-group-descriptor '{}'".format(
6582 instance_num, scaling_group
6583 )
6584 )
6585
6586 for x in range(kdu_delta.get("number-of-instances", 1)):
6587 vca_scaling_info.append(
6588 {
6589 "osm_kdu_id": kdu_name,
6590 "member-vnf-index": vnf_index,
6591 "type": "create",
6592 "kdu_index": instance_num + x - 1,
6593 }
6594 )
6595 scaling_info["kdu-create"][kdu_name].append(
6596 {
6597 "member-vnf-index": vnf_index,
6598 "type": "create",
6599 "k8s-cluster-type": k8s_cluster_type,
6600 "resource-name": resource_name,
6601 "scale": kdu_replica_count,
6602 }
6603 )
6604 elif scaling_type == "SCALE_IN":
6605 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6606
6607 scaling_info["scaling_direction"] = "IN"
6608 scaling_info["vdu-delete"] = {}
6609 scaling_info["kdu-delete"] = {}
6610
6611 for delta in deltas:
6612 for vdu_delta in delta.get("vdu-delta", {}):
6613 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6614 min_instance_count = 0
6615 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6616 if vdu_profile and "min-number-of-instances" in vdu_profile:
6617 min_instance_count = vdu_profile["min-number-of-instances"]
6618
6619 default_instance_num = get_number_of_instances(
6620 db_vnfd, vdu_delta["id"]
6621 )
6622 instance_num = vdu_delta.get("number-of-instances", 1)
6623 nb_scale_op -= instance_num
6624
6625 new_instance_count = nb_scale_op + default_instance_num
6626
6627 if new_instance_count < min_instance_count < vdu_count:
6628 instances_number = min_instance_count - new_instance_count
6629 else:
6630 instances_number = instance_num
6631
6632 if new_instance_count < min_instance_count:
6633 raise LcmException(
6634 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6635 "scaling-group-descriptor '{}'".format(
6636 nb_scale_op, scaling_group
6637 )
6638 )
6639 for x in range(vdu_delta.get("number-of-instances", 1)):
6640 vca_scaling_info.append(
6641 {
6642 "osm_vdu_id": vdu_delta["id"],
6643 "member-vnf-index": vnf_index,
6644 "type": "delete",
6645 "vdu_index": vdu_index - 1 - x,
6646 }
6647 )
6648 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6649 for kdu_delta in delta.get("kdu-resource-delta", {}):
6650 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6651 kdu_name = kdu_profile["kdu-name"]
6652 resource_name = kdu_profile.get("resource-name", "")
6653
6654 if not scaling_info["kdu-delete"].get(kdu_name, None):
6655 scaling_info["kdu-delete"][kdu_name] = []
6656
6657 kdur = get_kdur(db_vnfr, kdu_name)
6658 if kdur.get("helm-chart"):
6659 k8s_cluster_type = "helm-chart-v3"
6660 self.logger.debug("kdur: {}".format(kdur))
6661 if (
6662 kdur.get("helm-version")
6663 and kdur.get("helm-version") == "v2"
6664 ):
6665 k8s_cluster_type = "helm-chart"
6666 elif kdur.get("juju-bundle"):
6667 k8s_cluster_type = "juju-bundle"
6668 else:
6669 raise LcmException(
6670 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6671 "juju-bundle. Maybe an old NBI version is running".format(
6672 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6673 )
6674 )
6675
6676 min_instance_count = 0
6677 if kdu_profile and "min-number-of-instances" in kdu_profile:
6678 min_instance_count = kdu_profile["min-number-of-instances"]
6679
6680 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6681 deployed_kdu, _ = get_deployed_kdu(
6682 nsr_deployed, kdu_name, vnf_index
6683 )
6684 if deployed_kdu is None:
6685 raise LcmException(
6686 "KDU '{}' for vnf '{}' not deployed".format(
6687 kdu_name, vnf_index
6688 )
6689 )
6690 kdu_instance = deployed_kdu.get("kdu-instance")
6691 instance_num = await self.k8scluster_map[
6692 k8s_cluster_type
6693 ].get_scale_count(
6694 resource_name,
6695 kdu_instance,
6696 vca_id=vca_id,
6697 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6698 kdu_model=deployed_kdu.get("kdu-model"),
6699 )
6700 kdu_replica_count = instance_num - kdu_delta.get(
6701 "number-of-instances", 1
6702 )
6703
6704 if kdu_replica_count < min_instance_count < instance_num:
6705 kdu_replica_count = min_instance_count
6706 if kdu_replica_count < min_instance_count:
6707 raise LcmException(
6708 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6709 "scaling-group-descriptor '{}'".format(
6710 instance_num, scaling_group
6711 )
6712 )
6713
6714 for x in range(kdu_delta.get("number-of-instances", 1)):
6715 vca_scaling_info.append(
6716 {
6717 "osm_kdu_id": kdu_name,
6718 "member-vnf-index": vnf_index,
6719 "type": "delete",
6720 "kdu_index": instance_num - x - 1,
6721 }
6722 )
6723 scaling_info["kdu-delete"][kdu_name].append(
6724 {
6725 "member-vnf-index": vnf_index,
6726 "type": "delete",
6727 "k8s-cluster-type": k8s_cluster_type,
6728 "resource-name": resource_name,
6729 "scale": kdu_replica_count,
6730 }
6731 )
6732
6733 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6734 vdu_delete = copy(scaling_info.get("vdu-delete"))
6735 if scaling_info["scaling_direction"] == "IN":
6736 for vdur in reversed(db_vnfr["vdur"]):
6737 if vdu_delete.get(vdur["vdu-id-ref"]):
6738 vdu_delete[vdur["vdu-id-ref"]] -= 1
6739 scaling_info["vdu"].append(
6740 {
6741 "name": vdur.get("name") or vdur.get("vdu-name"),
6742 "vdu_id": vdur["vdu-id-ref"],
6743 "interface": [],
6744 }
6745 )
6746 for interface in vdur["interfaces"]:
6747 scaling_info["vdu"][-1]["interface"].append(
6748 {
6749 "name": interface["name"],
6750 "ip_address": interface["ip-address"],
6751 "mac_address": interface.get("mac-address"),
6752 }
6753 )
6754 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6755
6756 # PRE-SCALE BEGIN
6757 step = "Executing pre-scale vnf-config-primitive"
6758 if scaling_descriptor.get("scaling-config-action"):
6759 for scaling_config_action in scaling_descriptor[
6760 "scaling-config-action"
6761 ]:
6762 if (
6763 scaling_config_action.get("trigger") == "pre-scale-in"
6764 and scaling_type == "SCALE_IN"
6765 ) or (
6766 scaling_config_action.get("trigger") == "pre-scale-out"
6767 and scaling_type == "SCALE_OUT"
6768 ):
6769 vnf_config_primitive = scaling_config_action[
6770 "vnf-config-primitive-name-ref"
6771 ]
6772 step = db_nslcmop_update[
6773 "detailed-status"
6774 ] = "executing pre-scale scaling-config-action '{}'".format(
6775 vnf_config_primitive
6776 )
6777
6778 # look for primitive
6779 for config_primitive in (
6780 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6781 ).get("config-primitive", ()):
6782 if config_primitive["name"] == vnf_config_primitive:
6783 break
6784 else:
6785 raise LcmException(
6786 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6787 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6788 "primitive".format(scaling_group, vnf_config_primitive)
6789 )
6790
6791 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6792 if db_vnfr.get("additionalParamsForVnf"):
6793 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6794
6795 scale_process = "VCA"
6796 db_nsr_update["config-status"] = "configuring pre-scaling"
6797 primitive_params = self._map_primitive_params(
6798 config_primitive, {}, vnfr_params
6799 )
6800
6801 # Pre-scale retry check: Check if this sub-operation has been executed before
6802 op_index = self._check_or_add_scale_suboperation(
6803 db_nslcmop,
6804 vnf_index,
6805 vnf_config_primitive,
6806 primitive_params,
6807 "PRE-SCALE",
6808 )
6809 if op_index == self.SUBOPERATION_STATUS_SKIP:
6810 # Skip sub-operation
6811 result = "COMPLETED"
6812 result_detail = "Done"
6813 self.logger.debug(
6814 logging_text
6815 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6816 vnf_config_primitive, result, result_detail
6817 )
6818 )
6819 else:
6820 if op_index == self.SUBOPERATION_STATUS_NEW:
6821 # New sub-operation: Get index of this sub-operation
6822 op_index = (
6823 len(db_nslcmop.get("_admin", {}).get("operations"))
6824 - 1
6825 )
6826 self.logger.debug(
6827 logging_text
6828 + "vnf_config_primitive={} New sub-operation".format(
6829 vnf_config_primitive
6830 )
6831 )
6832 else:
6833 # retry: Get registered params for this existing sub-operation
6834 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6835 op_index
6836 ]
6837 vnf_index = op.get("member_vnf_index")
6838 vnf_config_primitive = op.get("primitive")
6839 primitive_params = op.get("primitive_params")
6840 self.logger.debug(
6841 logging_text
6842 + "vnf_config_primitive={} Sub-operation retry".format(
6843 vnf_config_primitive
6844 )
6845 )
6846 # Execute the primitive, either with new (first-time) or registered (reintent) args
6847 ee_descriptor_id = config_primitive.get(
6848 "execution-environment-ref"
6849 )
6850 primitive_name = config_primitive.get(
6851 "execution-environment-primitive", vnf_config_primitive
6852 )
6853 ee_id, vca_type = self._look_for_deployed_vca(
6854 nsr_deployed["VCA"],
6855 member_vnf_index=vnf_index,
6856 vdu_id=None,
6857 vdu_count_index=None,
6858 ee_descriptor_id=ee_descriptor_id,
6859 )
6860 result, result_detail = await self._ns_execute_primitive(
6861 ee_id,
6862 primitive_name,
6863 primitive_params,
6864 vca_type=vca_type,
6865 vca_id=vca_id,
6866 )
6867 self.logger.debug(
6868 logging_text
6869 + "vnf_config_primitive={} Done with result {} {}".format(
6870 vnf_config_primitive, result, result_detail
6871 )
6872 )
6873 # Update operationState = COMPLETED | FAILED
6874 self._update_suboperation_status(
6875 db_nslcmop, op_index, result, result_detail
6876 )
6877
6878 if result == "FAILED":
6879 raise LcmException(result_detail)
6880 db_nsr_update["config-status"] = old_config_status
6881 scale_process = None
6882 # PRE-SCALE END
6883
6884 db_nsr_update[
6885 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6886 ] = nb_scale_op
6887 db_nsr_update[
6888 "_admin.scaling-group.{}.time".format(admin_scale_index)
6889 ] = time()
6890
6891 # SCALE-IN VCA - BEGIN
6892 if vca_scaling_info:
6893 step = db_nslcmop_update[
6894 "detailed-status"
6895 ] = "Deleting the execution environments"
6896 scale_process = "VCA"
6897 for vca_info in vca_scaling_info:
6898 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6899 member_vnf_index = str(vca_info["member-vnf-index"])
6900 self.logger.debug(
6901 logging_text + "vdu info: {}".format(vca_info)
6902 )
6903 if vca_info.get("osm_vdu_id"):
6904 vdu_id = vca_info["osm_vdu_id"]
6905 vdu_index = int(vca_info["vdu_index"])
6906 stage[
6907 1
6908 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6909 member_vnf_index, vdu_id, vdu_index
6910 )
6911 stage[2] = step = "Scaling in VCA"
6912 self._write_op_status(op_id=nslcmop_id, stage=stage)
6913 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6914 config_update = db_nsr["configurationStatus"]
6915 for vca_index, vca in enumerate(vca_update):
6916 if (
6917 (vca or vca.get("ee_id"))
6918 and vca["member-vnf-index"] == member_vnf_index
6919 and vca["vdu_count_index"] == vdu_index
6920 ):
6921 if vca.get("vdu_id"):
6922 config_descriptor = get_configuration(
6923 db_vnfd, vca.get("vdu_id")
6924 )
6925 elif vca.get("kdu_name"):
6926 config_descriptor = get_configuration(
6927 db_vnfd, vca.get("kdu_name")
6928 )
6929 else:
6930 config_descriptor = get_configuration(
6931 db_vnfd, db_vnfd["id"]
6932 )
6933 operation_params = (
6934 db_nslcmop.get("operationParams") or {}
6935 )
6936 exec_terminate_primitives = not operation_params.get(
6937 "skip_terminate_primitives"
6938 ) and vca.get("needed_terminate")
6939 task = asyncio.ensure_future(
6940 asyncio.wait_for(
6941 self.destroy_N2VC(
6942 logging_text,
6943 db_nslcmop,
6944 vca,
6945 config_descriptor,
6946 vca_index,
6947 destroy_ee=True,
6948 exec_primitives=exec_terminate_primitives,
6949 scaling_in=True,
6950 vca_id=vca_id,
6951 ),
6952 timeout=self.timeout_charm_delete,
6953 )
6954 )
6955 tasks_dict_info[task] = "Terminating VCA {}".format(
6956 vca.get("ee_id")
6957 )
6958 del vca_update[vca_index]
6959 del config_update[vca_index]
6960 # wait for pending tasks of terminate primitives
6961 if tasks_dict_info:
6962 self.logger.debug(
6963 logging_text
6964 + "Waiting for tasks {}".format(
6965 list(tasks_dict_info.keys())
6966 )
6967 )
6968 error_list = await self._wait_for_tasks(
6969 logging_text,
6970 tasks_dict_info,
6971 min(
6972 self.timeout_charm_delete, self.timeout_ns_terminate
6973 ),
6974 stage,
6975 nslcmop_id,
6976 )
6977 tasks_dict_info.clear()
6978 if error_list:
6979 raise LcmException("; ".join(error_list))
6980
6981 db_vca_and_config_update = {
6982 "_admin.deployed.VCA": vca_update,
6983 "configurationStatus": config_update,
6984 }
6985 self.update_db_2(
6986 "nsrs", db_nsr["_id"], db_vca_and_config_update
6987 )
6988 scale_process = None
6989 # SCALE-IN VCA - END
6990
6991 # SCALE RO - BEGIN
6992 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6993 scale_process = "RO"
6994 if self.ro_config.get("ng"):
6995 await self._scale_ng_ro(
6996 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6997 )
6998 scaling_info.pop("vdu-create", None)
6999 scaling_info.pop("vdu-delete", None)
7000
7001 scale_process = None
7002 # SCALE RO - END
7003
7004 # SCALE KDU - BEGIN
7005 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
7006 scale_process = "KDU"
7007 await self._scale_kdu(
7008 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7009 )
7010 scaling_info.pop("kdu-create", None)
7011 scaling_info.pop("kdu-delete", None)
7012
7013 scale_process = None
7014 # SCALE KDU - END
7015
7016 if db_nsr_update:
7017 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7018
7019 # SCALE-UP VCA - BEGIN
7020 if vca_scaling_info:
7021 step = db_nslcmop_update[
7022 "detailed-status"
7023 ] = "Creating new execution environments"
7024 scale_process = "VCA"
7025 for vca_info in vca_scaling_info:
7026 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7027 member_vnf_index = str(vca_info["member-vnf-index"])
7028 self.logger.debug(
7029 logging_text + "vdu info: {}".format(vca_info)
7030 )
7031 vnfd_id = db_vnfr["vnfd-ref"]
7032 if vca_info.get("osm_vdu_id"):
7033 vdu_index = int(vca_info["vdu_index"])
7034 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7035 if db_vnfr.get("additionalParamsForVnf"):
7036 deploy_params.update(
7037 parse_yaml_strings(
7038 db_vnfr["additionalParamsForVnf"].copy()
7039 )
7040 )
7041 descriptor_config = get_configuration(
7042 db_vnfd, db_vnfd["id"]
7043 )
7044 if descriptor_config:
7045 vdu_id = None
7046 vdu_name = None
7047 kdu_name = None
7048 self._deploy_n2vc(
7049 logging_text=logging_text
7050 + "member_vnf_index={} ".format(member_vnf_index),
7051 db_nsr=db_nsr,
7052 db_vnfr=db_vnfr,
7053 nslcmop_id=nslcmop_id,
7054 nsr_id=nsr_id,
7055 nsi_id=nsi_id,
7056 vnfd_id=vnfd_id,
7057 vdu_id=vdu_id,
7058 kdu_name=kdu_name,
7059 member_vnf_index=member_vnf_index,
7060 vdu_index=vdu_index,
7061 vdu_name=vdu_name,
7062 deploy_params=deploy_params,
7063 descriptor_config=descriptor_config,
7064 base_folder=base_folder,
7065 task_instantiation_info=tasks_dict_info,
7066 stage=stage,
7067 )
7068 vdu_id = vca_info["osm_vdu_id"]
7069 vdur = find_in_list(
7070 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7071 )
7072 descriptor_config = get_configuration(db_vnfd, vdu_id)
7073 if vdur.get("additionalParams"):
7074 deploy_params_vdu = parse_yaml_strings(
7075 vdur["additionalParams"]
7076 )
7077 else:
7078 deploy_params_vdu = deploy_params
7079 deploy_params_vdu["OSM"] = get_osm_params(
7080 db_vnfr, vdu_id, vdu_count_index=vdu_index
7081 )
7082 if descriptor_config:
7083 vdu_name = None
7084 kdu_name = None
7085 stage[
7086 1
7087 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7088 member_vnf_index, vdu_id, vdu_index
7089 )
7090 stage[2] = step = "Scaling out VCA"
7091 self._write_op_status(op_id=nslcmop_id, stage=stage)
7092 self._deploy_n2vc(
7093 logging_text=logging_text
7094 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7095 member_vnf_index, vdu_id, vdu_index
7096 ),
7097 db_nsr=db_nsr,
7098 db_vnfr=db_vnfr,
7099 nslcmop_id=nslcmop_id,
7100 nsr_id=nsr_id,
7101 nsi_id=nsi_id,
7102 vnfd_id=vnfd_id,
7103 vdu_id=vdu_id,
7104 kdu_name=kdu_name,
7105 member_vnf_index=member_vnf_index,
7106 vdu_index=vdu_index,
7107 vdu_name=vdu_name,
7108 deploy_params=deploy_params_vdu,
7109 descriptor_config=descriptor_config,
7110 base_folder=base_folder,
7111 task_instantiation_info=tasks_dict_info,
7112 stage=stage,
7113 )
7114 # SCALE-UP VCA - END
7115 scale_process = None
7116
7117 # POST-SCALE BEGIN
7118 # execute primitive service POST-SCALING
7119 step = "Executing post-scale vnf-config-primitive"
7120 if scaling_descriptor.get("scaling-config-action"):
7121 for scaling_config_action in scaling_descriptor[
7122 "scaling-config-action"
7123 ]:
7124 if (
7125 scaling_config_action.get("trigger") == "post-scale-in"
7126 and scaling_type == "SCALE_IN"
7127 ) or (
7128 scaling_config_action.get("trigger") == "post-scale-out"
7129 and scaling_type == "SCALE_OUT"
7130 ):
7131 vnf_config_primitive = scaling_config_action[
7132 "vnf-config-primitive-name-ref"
7133 ]
7134 step = db_nslcmop_update[
7135 "detailed-status"
7136 ] = "executing post-scale scaling-config-action '{}'".format(
7137 vnf_config_primitive
7138 )
7139
7140 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7141 if db_vnfr.get("additionalParamsForVnf"):
7142 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7143
7144 # look for primitive
7145 for config_primitive in (
7146 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7147 ).get("config-primitive", ()):
7148 if config_primitive["name"] == vnf_config_primitive:
7149 break
7150 else:
7151 raise LcmException(
7152 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7153 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7154 "config-primitive".format(
7155 scaling_group, vnf_config_primitive
7156 )
7157 )
7158 scale_process = "VCA"
7159 db_nsr_update["config-status"] = "configuring post-scaling"
7160 primitive_params = self._map_primitive_params(
7161 config_primitive, {}, vnfr_params
7162 )
7163
7164 # Post-scale retry check: Check if this sub-operation has been executed before
7165 op_index = self._check_or_add_scale_suboperation(
7166 db_nslcmop,
7167 vnf_index,
7168 vnf_config_primitive,
7169 primitive_params,
7170 "POST-SCALE",
7171 )
7172 if op_index == self.SUBOPERATION_STATUS_SKIP:
7173 # Skip sub-operation
7174 result = "COMPLETED"
7175 result_detail = "Done"
7176 self.logger.debug(
7177 logging_text
7178 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7179 vnf_config_primitive, result, result_detail
7180 )
7181 )
7182 else:
7183 if op_index == self.SUBOPERATION_STATUS_NEW:
7184 # New sub-operation: Get index of this sub-operation
7185 op_index = (
7186 len(db_nslcmop.get("_admin", {}).get("operations"))
7187 - 1
7188 )
7189 self.logger.debug(
7190 logging_text
7191 + "vnf_config_primitive={} New sub-operation".format(
7192 vnf_config_primitive
7193 )
7194 )
7195 else:
7196 # retry: Get registered params for this existing sub-operation
7197 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7198 op_index
7199 ]
7200 vnf_index = op.get("member_vnf_index")
7201 vnf_config_primitive = op.get("primitive")
7202 primitive_params = op.get("primitive_params")
7203 self.logger.debug(
7204 logging_text
7205 + "vnf_config_primitive={} Sub-operation retry".format(
7206 vnf_config_primitive
7207 )
7208 )
7209 # Execute the primitive, either with new (first-time) or registered (reintent) args
7210 ee_descriptor_id = config_primitive.get(
7211 "execution-environment-ref"
7212 )
7213 primitive_name = config_primitive.get(
7214 "execution-environment-primitive", vnf_config_primitive
7215 )
7216 ee_id, vca_type = self._look_for_deployed_vca(
7217 nsr_deployed["VCA"],
7218 member_vnf_index=vnf_index,
7219 vdu_id=None,
7220 vdu_count_index=None,
7221 ee_descriptor_id=ee_descriptor_id,
7222 )
7223 result, result_detail = await self._ns_execute_primitive(
7224 ee_id,
7225 primitive_name,
7226 primitive_params,
7227 vca_type=vca_type,
7228 vca_id=vca_id,
7229 )
7230 self.logger.debug(
7231 logging_text
7232 + "vnf_config_primitive={} Done with result {} {}".format(
7233 vnf_config_primitive, result, result_detail
7234 )
7235 )
7236 # Update operationState = COMPLETED | FAILED
7237 self._update_suboperation_status(
7238 db_nslcmop, op_index, result, result_detail
7239 )
7240
7241 if result == "FAILED":
7242 raise LcmException(result_detail)
7243 db_nsr_update["config-status"] = old_config_status
7244 scale_process = None
7245 # POST-SCALE END
7246
7247 db_nsr_update[
7248 "detailed-status"
7249 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7250 db_nsr_update["operational-status"] = (
7251 "running"
7252 if old_operational_status == "failed"
7253 else old_operational_status
7254 )
7255 db_nsr_update["config-status"] = old_config_status
7256 return
7257 except (
7258 ROclient.ROClientException,
7259 DbException,
7260 LcmException,
7261 NgRoException,
7262 ) as e:
7263 self.logger.error(logging_text + "Exit Exception {}".format(e))
7264 exc = e
7265 except asyncio.CancelledError:
7266 self.logger.error(
7267 logging_text + "Cancelled Exception while '{}'".format(step)
7268 )
7269 exc = "Operation was cancelled"
7270 except Exception as e:
7271 exc = traceback.format_exc()
7272 self.logger.critical(
7273 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7274 exc_info=True,
7275 )
7276 finally:
7277 self._write_ns_status(
7278 nsr_id=nsr_id,
7279 ns_state=None,
7280 current_operation="IDLE",
7281 current_operation_id=None,
7282 )
7283 if tasks_dict_info:
7284 stage[1] = "Waiting for instantiate pending tasks."
7285 self.logger.debug(logging_text + stage[1])
7286 exc = await self._wait_for_tasks(
7287 logging_text,
7288 tasks_dict_info,
7289 self.timeout_ns_deploy,
7290 stage,
7291 nslcmop_id,
7292 nsr_id=nsr_id,
7293 )
7294 if exc:
7295 db_nslcmop_update[
7296 "detailed-status"
7297 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7298 nslcmop_operation_state = "FAILED"
7299 if db_nsr:
7300 db_nsr_update["operational-status"] = old_operational_status
7301 db_nsr_update["config-status"] = old_config_status
7302 db_nsr_update["detailed-status"] = ""
7303 if scale_process:
7304 if "VCA" in scale_process:
7305 db_nsr_update["config-status"] = "failed"
7306 if "RO" in scale_process:
7307 db_nsr_update["operational-status"] = "failed"
7308 db_nsr_update[
7309 "detailed-status"
7310 ] = "FAILED scaling nslcmop={} {}: {}".format(
7311 nslcmop_id, step, exc
7312 )
7313 else:
7314 error_description_nslcmop = None
7315 nslcmop_operation_state = "COMPLETED"
7316 db_nslcmop_update["detailed-status"] = "Done"
7317
7318 self._write_op_status(
7319 op_id=nslcmop_id,
7320 stage="",
7321 error_message=error_description_nslcmop,
7322 operation_state=nslcmop_operation_state,
7323 other_update=db_nslcmop_update,
7324 )
7325 if db_nsr:
7326 self._write_ns_status(
7327 nsr_id=nsr_id,
7328 ns_state=None,
7329 current_operation="IDLE",
7330 current_operation_id=None,
7331 other_update=db_nsr_update,
7332 )
7333
7334 if nslcmop_operation_state:
7335 try:
7336 msg = {
7337 "nsr_id": nsr_id,
7338 "nslcmop_id": nslcmop_id,
7339 "operationState": nslcmop_operation_state,
7340 }
7341 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7342 except Exception as e:
7343 self.logger.error(
7344 logging_text + "kafka_write notification Exception {}".format(e)
7345 )
7346 self.logger.debug(logging_text + "Exit")
7347 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7348
7349 async def _scale_kdu(
7350 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7351 ):
7352 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7353 for kdu_name in _scaling_info:
7354 for kdu_scaling_info in _scaling_info[kdu_name]:
7355 deployed_kdu, index = get_deployed_kdu(
7356 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7357 )
7358 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7359 kdu_instance = deployed_kdu["kdu-instance"]
7360 kdu_model = deployed_kdu.get("kdu-model")
7361 scale = int(kdu_scaling_info["scale"])
7362 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7363
7364 db_dict = {
7365 "collection": "nsrs",
7366 "filter": {"_id": nsr_id},
7367 "path": "_admin.deployed.K8s.{}".format(index),
7368 }
7369
7370 step = "scaling application {}".format(
7371 kdu_scaling_info["resource-name"]
7372 )
7373 self.logger.debug(logging_text + step)
7374
7375 if kdu_scaling_info["type"] == "delete":
7376 kdu_config = get_configuration(db_vnfd, kdu_name)
7377 if (
7378 kdu_config
7379 and kdu_config.get("terminate-config-primitive")
7380 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7381 ):
7382 terminate_config_primitive_list = kdu_config.get(
7383 "terminate-config-primitive"
7384 )
7385 terminate_config_primitive_list.sort(
7386 key=lambda val: int(val["seq"])
7387 )
7388
7389 for (
7390 terminate_config_primitive
7391 ) in terminate_config_primitive_list:
7392 primitive_params_ = self._map_primitive_params(
7393 terminate_config_primitive, {}, {}
7394 )
7395 step = "execute terminate config primitive"
7396 self.logger.debug(logging_text + step)
7397 await asyncio.wait_for(
7398 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7399 cluster_uuid=cluster_uuid,
7400 kdu_instance=kdu_instance,
7401 primitive_name=terminate_config_primitive["name"],
7402 params=primitive_params_,
7403 db_dict=db_dict,
7404 total_timeout=self.timeout_primitive,
7405 vca_id=vca_id,
7406 ),
7407 timeout=self.timeout_primitive
7408 * self.timeout_primitive_outer_factor,
7409 )
7410
7411 await asyncio.wait_for(
7412 self.k8scluster_map[k8s_cluster_type].scale(
7413 kdu_instance=kdu_instance,
7414 scale=scale,
7415 resource_name=kdu_scaling_info["resource-name"],
7416 total_timeout=self.timeout_scale_on_error,
7417 vca_id=vca_id,
7418 cluster_uuid=cluster_uuid,
7419 kdu_model=kdu_model,
7420 atomic=True,
7421 db_dict=db_dict,
7422 ),
7423 timeout=self.timeout_scale_on_error
7424 * self.timeout_scale_on_error_outer_factor,
7425 )
7426
7427 if kdu_scaling_info["type"] == "create":
7428 kdu_config = get_configuration(db_vnfd, kdu_name)
7429 if (
7430 kdu_config
7431 and kdu_config.get("initial-config-primitive")
7432 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7433 ):
7434 initial_config_primitive_list = kdu_config.get(
7435 "initial-config-primitive"
7436 )
7437 initial_config_primitive_list.sort(
7438 key=lambda val: int(val["seq"])
7439 )
7440
7441 for initial_config_primitive in initial_config_primitive_list:
7442 primitive_params_ = self._map_primitive_params(
7443 initial_config_primitive, {}, {}
7444 )
7445 step = "execute initial config primitive"
7446 self.logger.debug(logging_text + step)
7447 await asyncio.wait_for(
7448 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7449 cluster_uuid=cluster_uuid,
7450 kdu_instance=kdu_instance,
7451 primitive_name=initial_config_primitive["name"],
7452 params=primitive_params_,
7453 db_dict=db_dict,
7454 vca_id=vca_id,
7455 ),
7456 timeout=600,
7457 )
7458
7459 async def _scale_ng_ro(
7460 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7461 ):
7462 nsr_id = db_nslcmop["nsInstanceId"]
7463 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7464 db_vnfrs = {}
7465
7466 # read from db: vnfd's for every vnf
7467 db_vnfds = []
7468
7469 # for each vnf in ns, read vnfd
7470 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7471 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7472 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7473 # if we haven't this vnfd, read it from db
7474 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7475 # read from db
7476 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7477 db_vnfds.append(vnfd)
7478 n2vc_key = self.n2vc.get_public_key()
7479 n2vc_key_list = [n2vc_key]
7480 self.scale_vnfr(
7481 db_vnfr,
7482 vdu_scaling_info.get("vdu-create"),
7483 vdu_scaling_info.get("vdu-delete"),
7484 mark_delete=True,
7485 )
7486 # db_vnfr has been updated, update db_vnfrs to use it
7487 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7488 await self._instantiate_ng_ro(
7489 logging_text,
7490 nsr_id,
7491 db_nsd,
7492 db_nsr,
7493 db_nslcmop,
7494 db_vnfrs,
7495 db_vnfds,
7496 n2vc_key_list,
7497 stage=stage,
7498 start_deploy=time(),
7499 timeout_ns_deploy=self.timeout_ns_deploy,
7500 )
7501 if vdu_scaling_info.get("vdu-delete"):
7502 self.scale_vnfr(
7503 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7504 )
7505
7506 async def extract_prometheus_scrape_jobs(
7507 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7508 ):
7509 # look if exist a file called 'prometheus*.j2' and
7510 artifact_content = self.fs.dir_ls(artifact_path)
7511 job_file = next(
7512 (
7513 f
7514 for f in artifact_content
7515 if f.startswith("prometheus") and f.endswith(".j2")
7516 ),
7517 None,
7518 )
7519 if not job_file:
7520 return
7521 with self.fs.file_open((artifact_path, job_file), "r") as f:
7522 job_data = f.read()
7523
7524 # TODO get_service
7525 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7526 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7527 host_port = "80"
7528 vnfr_id = vnfr_id.replace("-", "")
7529 variables = {
7530 "JOB_NAME": vnfr_id,
7531 "TARGET_IP": target_ip,
7532 "EXPORTER_POD_IP": host_name,
7533 "EXPORTER_POD_PORT": host_port,
7534 }
7535 job_list = parse_job(job_data, variables)
7536 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7537 for job in job_list:
7538 if (
7539 not isinstance(job.get("job_name"), str)
7540 or vnfr_id not in job["job_name"]
7541 ):
7542 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7543 job["nsr_id"] = nsr_id
7544 job["vnfr_id"] = vnfr_id
7545 return job_list
7546
7547 async def rebuild_start_stop(
7548 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7549 ):
7550 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7551 self.logger.info(logging_text + "Enter")
7552 stage = ["Preparing the environment", ""]
7553 # database nsrs record
7554 db_nsr_update = {}
7555 vdu_vim_name = None
7556 vim_vm_id = None
7557 # in case of error, indicates what part of scale was failed to put nsr at error status
7558 start_deploy = time()
7559 try:
7560 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7561 vim_account_id = db_vnfr.get("vim-account-id")
7562 vim_info_key = "vim:" + vim_account_id
7563 vdu_id = additional_param["vdu_id"]
7564 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7565 vdur = find_in_list(
7566 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7567 )
7568 if vdur:
7569 vdu_vim_name = vdur["name"]
7570 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7571 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7572 else:
7573 raise LcmException("Target vdu is not found")
7574 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7575 # wait for any previous tasks in process
7576 stage[1] = "Waiting for previous operations to terminate"
7577 self.logger.info(stage[1])
7578 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7579
7580 stage[1] = "Reading from database."
7581 self.logger.info(stage[1])
7582 self._write_ns_status(
7583 nsr_id=nsr_id,
7584 ns_state=None,
7585 current_operation=operation_type.upper(),
7586 current_operation_id=nslcmop_id,
7587 )
7588 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7589
7590 # read from db: ns
7591 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7592 db_nsr_update["operational-status"] = operation_type
7593 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7594 # Payload for RO
7595 desc = {
7596 operation_type: {
7597 "vim_vm_id": vim_vm_id,
7598 "vnf_id": vnf_id,
7599 "vdu_index": additional_param["count-index"],
7600 "vdu_id": vdur["id"],
7601 "target_vim": target_vim,
7602 "vim_account_id": vim_account_id,
7603 }
7604 }
7605 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7606 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7607 self.logger.info("ro nsr id: {}".format(nsr_id))
7608 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7609 self.logger.info("response from RO: {}".format(result_dict))
7610 action_id = result_dict["action_id"]
7611 await self._wait_ng_ro(
7612 nsr_id,
7613 action_id,
7614 nslcmop_id,
7615 start_deploy,
7616 self.timeout_operate,
7617 None,
7618 "start_stop_rebuild",
7619 )
7620 return "COMPLETED", "Done"
7621 except (ROclient.ROClientException, DbException, LcmException) as e:
7622 self.logger.error("Exit Exception {}".format(e))
7623 exc = e
7624 except asyncio.CancelledError:
7625 self.logger.error("Cancelled Exception while '{}'".format(stage))
7626 exc = "Operation was cancelled"
7627 except Exception as e:
7628 exc = traceback.format_exc()
7629 self.logger.critical(
7630 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7631 )
7632 return "FAILED", "Error in operate VNF {}".format(exc)
7633
7634 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7635 """
7636 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7637
7638 :param: vim_account_id: VIM Account ID
7639
7640 :return: (cloud_name, cloud_credential)
7641 """
7642 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7643 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7644
7645 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7646 """
7647 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7648
7649 :param: vim_account_id: VIM Account ID
7650
7651 :return: (cloud_name, cloud_credential)
7652 """
7653 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7654 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7655
7656 async def migrate(self, nsr_id, nslcmop_id):
7657 """
7658 Migrate VNFs and VDUs instances in a NS
7659
7660 :param: nsr_id: NS Instance ID
7661 :param: nslcmop_id: nslcmop ID of migrate
7662
7663 """
7664 # Try to lock HA task here
7665 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7666 if not task_is_locked_by_me:
7667 return
7668 logging_text = "Task ns={} migrate ".format(nsr_id)
7669 self.logger.debug(logging_text + "Enter")
7670 # get all needed from database
7671 db_nslcmop = None
7672 db_nslcmop_update = {}
7673 nslcmop_operation_state = None
7674 db_nsr_update = {}
7675 target = {}
7676 exc = None
7677 # in case of error, indicates what part of scale was failed to put nsr at error status
7678 start_deploy = time()
7679
7680 try:
7681 # wait for any previous tasks in process
7682 step = "Waiting for previous operations to terminate"
7683 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7684
7685 self._write_ns_status(
7686 nsr_id=nsr_id,
7687 ns_state=None,
7688 current_operation="MIGRATING",
7689 current_operation_id=nslcmop_id,
7690 )
7691 step = "Getting nslcmop from database"
7692 self.logger.debug(
7693 step + " after having waited for previous tasks to be completed"
7694 )
7695 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7696 migrate_params = db_nslcmop.get("operationParams")
7697
7698 target = {}
7699 target.update(migrate_params)
7700 desc = await self.RO.migrate(nsr_id, target)
7701 self.logger.debug("RO return > {}".format(desc))
7702 action_id = desc["action_id"]
7703 await self._wait_ng_ro(
7704 nsr_id,
7705 action_id,
7706 nslcmop_id,
7707 start_deploy,
7708 self.timeout_migrate,
7709 operation="migrate",
7710 )
7711 except (ROclient.ROClientException, DbException, LcmException) as e:
7712 self.logger.error("Exit Exception {}".format(e))
7713 exc = e
7714 except asyncio.CancelledError:
7715 self.logger.error("Cancelled Exception while '{}'".format(step))
7716 exc = "Operation was cancelled"
7717 except Exception as e:
7718 exc = traceback.format_exc()
7719 self.logger.critical(
7720 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7721 )
7722 finally:
7723 self._write_ns_status(
7724 nsr_id=nsr_id,
7725 ns_state=None,
7726 current_operation="IDLE",
7727 current_operation_id=None,
7728 )
7729 if exc:
7730 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7731 nslcmop_operation_state = "FAILED"
7732 else:
7733 nslcmop_operation_state = "COMPLETED"
7734 db_nslcmop_update["detailed-status"] = "Done"
7735 db_nsr_update["detailed-status"] = "Done"
7736
7737 self._write_op_status(
7738 op_id=nslcmop_id,
7739 stage="",
7740 error_message="",
7741 operation_state=nslcmop_operation_state,
7742 other_update=db_nslcmop_update,
7743 )
7744 if nslcmop_operation_state:
7745 try:
7746 msg = {
7747 "nsr_id": nsr_id,
7748 "nslcmop_id": nslcmop_id,
7749 "operationState": nslcmop_operation_state,
7750 }
7751 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7752 except Exception as e:
7753 self.logger.error(
7754 logging_text + "kafka_write notification Exception {}".format(e)
7755 )
7756 self.logger.debug(logging_text + "Exit")
7757 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7758
7759 async def heal(self, nsr_id, nslcmop_id):
7760 """
7761 Heal NS
7762
7763 :param nsr_id: ns instance to heal
7764 :param nslcmop_id: operation to run
7765 :return:
7766 """
7767
7768 # Try to lock HA task here
7769 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7770 if not task_is_locked_by_me:
7771 return
7772
7773 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7774 stage = ["", "", ""]
7775 tasks_dict_info = {}
7776 # ^ stage, step, VIM progress
7777 self.logger.debug(logging_text + "Enter")
7778 # get all needed from database
7779 db_nsr = None
7780 db_nslcmop_update = {}
7781 db_nsr_update = {}
7782 db_vnfrs = {} # vnf's info indexed by _id
7783 exc = None
7784 old_operational_status = ""
7785 old_config_status = ""
7786 nsi_id = None
7787 try:
7788 # wait for any previous tasks in process
7789 step = "Waiting for previous operations to terminate"
7790 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7791 self._write_ns_status(
7792 nsr_id=nsr_id,
7793 ns_state=None,
7794 current_operation="HEALING",
7795 current_operation_id=nslcmop_id,
7796 )
7797
7798 step = "Getting nslcmop from database"
7799 self.logger.debug(
7800 step + " after having waited for previous tasks to be completed"
7801 )
7802 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7803
7804 step = "Getting nsr from database"
7805 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7806 old_operational_status = db_nsr["operational-status"]
7807 old_config_status = db_nsr["config-status"]
7808
7809 db_nsr_update = {
7810 "_admin.deployed.RO.operational-status": "healing",
7811 }
7812 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7813
7814 step = "Sending heal order to VIM"
7815 await self.heal_RO(
7816 logging_text=logging_text,
7817 nsr_id=nsr_id,
7818 db_nslcmop=db_nslcmop,
7819 stage=stage,
7820 )
7821 # VCA tasks
7822 # read from db: nsd
7823 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7824 self.logger.debug(logging_text + stage[1])
7825 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7826 self.fs.sync(db_nsr["nsd-id"])
7827 db_nsr["nsd"] = nsd
7828 # read from db: vnfr's of this ns
7829 step = "Getting vnfrs from db"
7830 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7831 for vnfr in db_vnfrs_list:
7832 db_vnfrs[vnfr["_id"]] = vnfr
7833 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7834
7835 # Check for each target VNF
7836 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7837 for target_vnf in target_list:
7838 # Find this VNF in the list from DB
7839 vnfr_id = target_vnf.get("vnfInstanceId", None)
7840 if vnfr_id:
7841 db_vnfr = db_vnfrs[vnfr_id]
7842 vnfd_id = db_vnfr.get("vnfd-id")
7843 vnfd_ref = db_vnfr.get("vnfd-ref")
7844 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7845 base_folder = vnfd["_admin"]["storage"]
7846 vdu_id = None
7847 vdu_index = 0
7848 vdu_name = None
7849 kdu_name = None
7850 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7851 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7852
7853 # Check each target VDU and deploy N2VC
7854 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7855 "vdu", []
7856 )
7857 if not target_vdu_list:
7858 # Codigo nuevo para crear diccionario
7859 target_vdu_list = []
7860 for existing_vdu in db_vnfr.get("vdur"):
7861 vdu_name = existing_vdu.get("vdu-name", None)
7862 vdu_index = existing_vdu.get("count-index", 0)
7863 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7864 "run-day1", False
7865 )
7866 vdu_to_be_healed = {
7867 "vdu-id": vdu_name,
7868 "count-index": vdu_index,
7869 "run-day1": vdu_run_day1,
7870 }
7871 target_vdu_list.append(vdu_to_be_healed)
7872 for target_vdu in target_vdu_list:
7873 deploy_params_vdu = target_vdu
7874 # Set run-day1 vnf level value if not vdu level value exists
7875 if not deploy_params_vdu.get("run-day1") and target_vnf[
7876 "additionalParams"
7877 ].get("run-day1"):
7878 deploy_params_vdu["run-day1"] = target_vnf[
7879 "additionalParams"
7880 ].get("run-day1")
7881 vdu_name = target_vdu.get("vdu-id", None)
7882 # TODO: Get vdu_id from vdud.
7883 vdu_id = vdu_name
7884 # For multi instance VDU count-index is mandatory
7885 # For single session VDU count-indes is 0
7886 vdu_index = target_vdu.get("count-index", 0)
7887
7888 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7889 stage[1] = "Deploying Execution Environments."
7890 self.logger.debug(logging_text + stage[1])
7891
7892 # VNF Level charm. Normal case when proxy charms.
7893 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7894 descriptor_config = get_configuration(vnfd, vnfd_ref)
7895 if descriptor_config:
7896 # Continue if healed machine is management machine
7897 vnf_ip_address = db_vnfr.get("ip-address")
7898 target_instance = None
7899 for instance in db_vnfr.get("vdur", None):
7900 if (
7901 instance["vdu-name"] == vdu_name
7902 and instance["count-index"] == vdu_index
7903 ):
7904 target_instance = instance
7905 break
7906 if vnf_ip_address == target_instance.get("ip-address"):
7907 self._heal_n2vc(
7908 logging_text=logging_text
7909 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7910 member_vnf_index, vdu_name, vdu_index
7911 ),
7912 db_nsr=db_nsr,
7913 db_vnfr=db_vnfr,
7914 nslcmop_id=nslcmop_id,
7915 nsr_id=nsr_id,
7916 nsi_id=nsi_id,
7917 vnfd_id=vnfd_ref,
7918 vdu_id=None,
7919 kdu_name=None,
7920 member_vnf_index=member_vnf_index,
7921 vdu_index=0,
7922 vdu_name=None,
7923 deploy_params=deploy_params_vdu,
7924 descriptor_config=descriptor_config,
7925 base_folder=base_folder,
7926 task_instantiation_info=tasks_dict_info,
7927 stage=stage,
7928 )
7929
7930 # VDU Level charm. Normal case with native charms.
7931 descriptor_config = get_configuration(vnfd, vdu_name)
7932 if descriptor_config:
7933 self._heal_n2vc(
7934 logging_text=logging_text
7935 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7936 member_vnf_index, vdu_name, vdu_index
7937 ),
7938 db_nsr=db_nsr,
7939 db_vnfr=db_vnfr,
7940 nslcmop_id=nslcmop_id,
7941 nsr_id=nsr_id,
7942 nsi_id=nsi_id,
7943 vnfd_id=vnfd_ref,
7944 vdu_id=vdu_id,
7945 kdu_name=kdu_name,
7946 member_vnf_index=member_vnf_index,
7947 vdu_index=vdu_index,
7948 vdu_name=vdu_name,
7949 deploy_params=deploy_params_vdu,
7950 descriptor_config=descriptor_config,
7951 base_folder=base_folder,
7952 task_instantiation_info=tasks_dict_info,
7953 stage=stage,
7954 )
7955
7956 except (
7957 ROclient.ROClientException,
7958 DbException,
7959 LcmException,
7960 NgRoException,
7961 ) as e:
7962 self.logger.error(logging_text + "Exit Exception {}".format(e))
7963 exc = e
7964 except asyncio.CancelledError:
7965 self.logger.error(
7966 logging_text + "Cancelled Exception while '{}'".format(step)
7967 )
7968 exc = "Operation was cancelled"
7969 except Exception as e:
7970 exc = traceback.format_exc()
7971 self.logger.critical(
7972 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7973 exc_info=True,
7974 )
7975 finally:
7976 if tasks_dict_info:
7977 stage[1] = "Waiting for healing pending tasks."
7978 self.logger.debug(logging_text + stage[1])
7979 exc = await self._wait_for_tasks(
7980 logging_text,
7981 tasks_dict_info,
7982 self.timeout_ns_deploy,
7983 stage,
7984 nslcmop_id,
7985 nsr_id=nsr_id,
7986 )
7987 if exc:
7988 db_nslcmop_update[
7989 "detailed-status"
7990 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7991 nslcmop_operation_state = "FAILED"
7992 if db_nsr:
7993 db_nsr_update["operational-status"] = old_operational_status
7994 db_nsr_update["config-status"] = old_config_status
7995 db_nsr_update[
7996 "detailed-status"
7997 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7998 for task, task_name in tasks_dict_info.items():
7999 if not task.done() or task.cancelled() or task.exception():
8000 if task_name.startswith(self.task_name_deploy_vca):
8001 # A N2VC task is pending
8002 db_nsr_update["config-status"] = "failed"
8003 else:
8004 # RO task is pending
8005 db_nsr_update["operational-status"] = "failed"
8006 else:
8007 error_description_nslcmop = None
8008 nslcmop_operation_state = "COMPLETED"
8009 db_nslcmop_update["detailed-status"] = "Done"
8010 db_nsr_update["detailed-status"] = "Done"
8011 db_nsr_update["operational-status"] = "running"
8012 db_nsr_update["config-status"] = "configured"
8013
8014 self._write_op_status(
8015 op_id=nslcmop_id,
8016 stage="",
8017 error_message=error_description_nslcmop,
8018 operation_state=nslcmop_operation_state,
8019 other_update=db_nslcmop_update,
8020 )
8021 if db_nsr:
8022 self._write_ns_status(
8023 nsr_id=nsr_id,
8024 ns_state=None,
8025 current_operation="IDLE",
8026 current_operation_id=None,
8027 other_update=db_nsr_update,
8028 )
8029
8030 if nslcmop_operation_state:
8031 try:
8032 msg = {
8033 "nsr_id": nsr_id,
8034 "nslcmop_id": nslcmop_id,
8035 "operationState": nslcmop_operation_state,
8036 }
8037 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8038 except Exception as e:
8039 self.logger.error(
8040 logging_text + "kafka_write notification Exception {}".format(e)
8041 )
8042 self.logger.debug(logging_text + "Exit")
8043 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8044
8045 async def heal_RO(
8046 self,
8047 logging_text,
8048 nsr_id,
8049 db_nslcmop,
8050 stage,
8051 ):
8052 """
8053 Heal at RO
8054 :param logging_text: preffix text to use at logging
8055 :param nsr_id: nsr identity
8056 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8057 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8058 :return: None or exception
8059 """
8060
8061 def get_vim_account(vim_account_id):
8062 nonlocal db_vims
8063 if vim_account_id in db_vims:
8064 return db_vims[vim_account_id]
8065 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8066 db_vims[vim_account_id] = db_vim
8067 return db_vim
8068
8069 try:
8070 start_heal = time()
8071 ns_params = db_nslcmop.get("operationParams")
8072 if ns_params and ns_params.get("timeout_ns_heal"):
8073 timeout_ns_heal = ns_params["timeout_ns_heal"]
8074 else:
8075 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
8076
8077 db_vims = {}
8078
8079 nslcmop_id = db_nslcmop["_id"]
8080 target = {
8081 "action_id": nslcmop_id,
8082 }
8083 self.logger.warning(
8084 "db_nslcmop={} and timeout_ns_heal={}".format(
8085 db_nslcmop, timeout_ns_heal
8086 )
8087 )
8088 target.update(db_nslcmop.get("operationParams", {}))
8089
8090 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8091 desc = await self.RO.recreate(nsr_id, target)
8092 self.logger.debug("RO return > {}".format(desc))
8093 action_id = desc["action_id"]
8094 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8095 await self._wait_ng_ro(
8096 nsr_id,
8097 action_id,
8098 nslcmop_id,
8099 start_heal,
8100 timeout_ns_heal,
8101 stage,
8102 operation="healing",
8103 )
8104
8105 # Updating NSR
8106 db_nsr_update = {
8107 "_admin.deployed.RO.operational-status": "running",
8108 "detailed-status": " ".join(stage),
8109 }
8110 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8111 self._write_op_status(nslcmop_id, stage)
8112 self.logger.debug(
8113 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8114 )
8115
8116 except Exception as e:
8117 stage[2] = "ERROR healing at VIM"
8118 # self.set_vnfr_at_error(db_vnfrs, str(e))
8119 self.logger.error(
8120 "Error healing at VIM {}".format(e),
8121 exc_info=not isinstance(
8122 e,
8123 (
8124 ROclient.ROClientException,
8125 LcmException,
8126 DbException,
8127 NgRoException,
8128 ),
8129 ),
8130 )
8131 raise
8132
8133 def _heal_n2vc(
8134 self,
8135 logging_text,
8136 db_nsr,
8137 db_vnfr,
8138 nslcmop_id,
8139 nsr_id,
8140 nsi_id,
8141 vnfd_id,
8142 vdu_id,
8143 kdu_name,
8144 member_vnf_index,
8145 vdu_index,
8146 vdu_name,
8147 deploy_params,
8148 descriptor_config,
8149 base_folder,
8150 task_instantiation_info,
8151 stage,
8152 ):
8153 # launch instantiate_N2VC in a asyncio task and register task object
8154 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8155 # if not found, create one entry and update database
8156 # fill db_nsr._admin.deployed.VCA.<index>
8157
8158 self.logger.debug(
8159 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8160 )
8161
8162 charm_name = ""
8163 get_charm_name = False
8164 if "execution-environment-list" in descriptor_config:
8165 ee_list = descriptor_config.get("execution-environment-list", [])
8166 elif "juju" in descriptor_config:
8167 ee_list = [descriptor_config] # ns charms
8168 if "execution-environment-list" not in descriptor_config:
8169 # charm name is only required for ns charms
8170 get_charm_name = True
8171 else: # other types as script are not supported
8172 ee_list = []
8173
8174 for ee_item in ee_list:
8175 self.logger.debug(
8176 logging_text
8177 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8178 ee_item.get("juju"), ee_item.get("helm-chart")
8179 )
8180 )
8181 ee_descriptor_id = ee_item.get("id")
8182 if ee_item.get("juju"):
8183 vca_name = ee_item["juju"].get("charm")
8184 if get_charm_name:
8185 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8186 vca_type = (
8187 "lxc_proxy_charm"
8188 if ee_item["juju"].get("charm") is not None
8189 else "native_charm"
8190 )
8191 if ee_item["juju"].get("cloud") == "k8s":
8192 vca_type = "k8s_proxy_charm"
8193 elif ee_item["juju"].get("proxy") is False:
8194 vca_type = "native_charm"
8195 elif ee_item.get("helm-chart"):
8196 vca_name = ee_item["helm-chart"]
8197 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8198 vca_type = "helm"
8199 else:
8200 vca_type = "helm-v3"
8201 else:
8202 self.logger.debug(
8203 logging_text + "skipping non juju neither charm configuration"
8204 )
8205 continue
8206
8207 vca_index = -1
8208 for vca_index, vca_deployed in enumerate(
8209 db_nsr["_admin"]["deployed"]["VCA"]
8210 ):
8211 if not vca_deployed:
8212 continue
8213 if (
8214 vca_deployed.get("member-vnf-index") == member_vnf_index
8215 and vca_deployed.get("vdu_id") == vdu_id
8216 and vca_deployed.get("kdu_name") == kdu_name
8217 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8218 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8219 ):
8220 break
8221 else:
8222 # not found, create one.
8223 target = (
8224 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8225 )
8226 if vdu_id:
8227 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8228 elif kdu_name:
8229 target += "/kdu/{}".format(kdu_name)
8230 vca_deployed = {
8231 "target_element": target,
8232 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8233 "member-vnf-index": member_vnf_index,
8234 "vdu_id": vdu_id,
8235 "kdu_name": kdu_name,
8236 "vdu_count_index": vdu_index,
8237 "operational-status": "init", # TODO revise
8238 "detailed-status": "", # TODO revise
8239 "step": "initial-deploy", # TODO revise
8240 "vnfd_id": vnfd_id,
8241 "vdu_name": vdu_name,
8242 "type": vca_type,
8243 "ee_descriptor_id": ee_descriptor_id,
8244 "charm_name": charm_name,
8245 }
8246 vca_index += 1
8247
8248 # create VCA and configurationStatus in db
8249 db_dict = {
8250 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8251 "configurationStatus.{}".format(vca_index): dict(),
8252 }
8253 self.update_db_2("nsrs", nsr_id, db_dict)
8254
8255 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8256
8257 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8258 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8259 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8260
8261 # Launch task
8262 task_n2vc = asyncio.ensure_future(
8263 self.heal_N2VC(
8264 logging_text=logging_text,
8265 vca_index=vca_index,
8266 nsi_id=nsi_id,
8267 db_nsr=db_nsr,
8268 db_vnfr=db_vnfr,
8269 vdu_id=vdu_id,
8270 kdu_name=kdu_name,
8271 vdu_index=vdu_index,
8272 deploy_params=deploy_params,
8273 config_descriptor=descriptor_config,
8274 base_folder=base_folder,
8275 nslcmop_id=nslcmop_id,
8276 stage=stage,
8277 vca_type=vca_type,
8278 vca_name=vca_name,
8279 ee_config_descriptor=ee_item,
8280 )
8281 )
8282 self.lcm_tasks.register(
8283 "ns",
8284 nsr_id,
8285 nslcmop_id,
8286 "instantiate_N2VC-{}".format(vca_index),
8287 task_n2vc,
8288 )
8289 task_instantiation_info[
8290 task_n2vc
8291 ] = self.task_name_deploy_vca + " {}.{}".format(
8292 member_vnf_index or "", vdu_id or ""
8293 )
8294
8295 async def heal_N2VC(
8296 self,
8297 logging_text,
8298 vca_index,
8299 nsi_id,
8300 db_nsr,
8301 db_vnfr,
8302 vdu_id,
8303 kdu_name,
8304 vdu_index,
8305 config_descriptor,
8306 deploy_params,
8307 base_folder,
8308 nslcmop_id,
8309 stage,
8310 vca_type,
8311 vca_name,
8312 ee_config_descriptor,
8313 ):
8314 nsr_id = db_nsr["_id"]
8315 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8316 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8317 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8318 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8319 db_dict = {
8320 "collection": "nsrs",
8321 "filter": {"_id": nsr_id},
8322 "path": db_update_entry,
8323 }
8324 step = ""
8325 try:
8326
8327 element_type = "NS"
8328 element_under_configuration = nsr_id
8329
8330 vnfr_id = None
8331 if db_vnfr:
8332 vnfr_id = db_vnfr["_id"]
8333 osm_config["osm"]["vnf_id"] = vnfr_id
8334
8335 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8336
8337 if vca_type == "native_charm":
8338 index_number = 0
8339 else:
8340 index_number = vdu_index or 0
8341
8342 if vnfr_id:
8343 element_type = "VNF"
8344 element_under_configuration = vnfr_id
8345 namespace += ".{}-{}".format(vnfr_id, index_number)
8346 if vdu_id:
8347 namespace += ".{}-{}".format(vdu_id, index_number)
8348 element_type = "VDU"
8349 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8350 osm_config["osm"]["vdu_id"] = vdu_id
8351 elif kdu_name:
8352 namespace += ".{}".format(kdu_name)
8353 element_type = "KDU"
8354 element_under_configuration = kdu_name
8355 osm_config["osm"]["kdu_name"] = kdu_name
8356
8357 # Get artifact path
8358 if base_folder["pkg-dir"]:
8359 artifact_path = "{}/{}/{}/{}".format(
8360 base_folder["folder"],
8361 base_folder["pkg-dir"],
8362 "charms"
8363 if vca_type
8364 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8365 else "helm-charts",
8366 vca_name,
8367 )
8368 else:
8369 artifact_path = "{}/Scripts/{}/{}/".format(
8370 base_folder["folder"],
8371 "charms"
8372 if vca_type
8373 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8374 else "helm-charts",
8375 vca_name,
8376 )
8377
8378 self.logger.debug("Artifact path > {}".format(artifact_path))
8379
8380 # get initial_config_primitive_list that applies to this element
8381 initial_config_primitive_list = config_descriptor.get(
8382 "initial-config-primitive"
8383 )
8384
8385 self.logger.debug(
8386 "Initial config primitive list > {}".format(
8387 initial_config_primitive_list
8388 )
8389 )
8390
8391 # add config if not present for NS charm
8392 ee_descriptor_id = ee_config_descriptor.get("id")
8393 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8394 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8395 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8396 )
8397
8398 self.logger.debug(
8399 "Initial config primitive list #2 > {}".format(
8400 initial_config_primitive_list
8401 )
8402 )
8403 # n2vc_redesign STEP 3.1
8404 # find old ee_id if exists
8405 ee_id = vca_deployed.get("ee_id")
8406
8407 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8408 # create or register execution environment in VCA. Only for native charms when healing
8409 if vca_type == "native_charm":
8410 step = "Waiting to VM being up and getting IP address"
8411 self.logger.debug(logging_text + step)
8412 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8413 logging_text,
8414 nsr_id,
8415 vnfr_id,
8416 vdu_id,
8417 vdu_index,
8418 user=None,
8419 pub_key=None,
8420 )
8421 credentials = {"hostname": rw_mgmt_ip}
8422 # get username
8423 username = deep_get(
8424 config_descriptor, ("config-access", "ssh-access", "default-user")
8425 )
8426 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8427 # merged. Meanwhile let's get username from initial-config-primitive
8428 if not username and initial_config_primitive_list:
8429 for config_primitive in initial_config_primitive_list:
8430 for param in config_primitive.get("parameter", ()):
8431 if param["name"] == "ssh-username":
8432 username = param["value"]
8433 break
8434 if not username:
8435 raise LcmException(
8436 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8437 "'config-access.ssh-access.default-user'"
8438 )
8439 credentials["username"] = username
8440
8441 # n2vc_redesign STEP 3.2
8442 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8443 self._write_configuration_status(
8444 nsr_id=nsr_id,
8445 vca_index=vca_index,
8446 status="REGISTERING",
8447 element_under_configuration=element_under_configuration,
8448 element_type=element_type,
8449 )
8450
8451 step = "register execution environment {}".format(credentials)
8452 self.logger.debug(logging_text + step)
8453 ee_id = await self.vca_map[vca_type].register_execution_environment(
8454 credentials=credentials,
8455 namespace=namespace,
8456 db_dict=db_dict,
8457 vca_id=vca_id,
8458 )
8459
8460 # update ee_id en db
8461 db_dict_ee_id = {
8462 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8463 }
8464 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8465
8466 # for compatibility with MON/POL modules, the need model and application name at database
8467 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8468 # Not sure if this need to be done when healing
8469 """
8470 ee_id_parts = ee_id.split(".")
8471 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8472 if len(ee_id_parts) >= 2:
8473 model_name = ee_id_parts[0]
8474 application_name = ee_id_parts[1]
8475 db_nsr_update[db_update_entry + "model"] = model_name
8476 db_nsr_update[db_update_entry + "application"] = application_name
8477 """
8478
8479 # n2vc_redesign STEP 3.3
8480 # Install configuration software. Only for native charms.
8481 step = "Install configuration Software"
8482
8483 self._write_configuration_status(
8484 nsr_id=nsr_id,
8485 vca_index=vca_index,
8486 status="INSTALLING SW",
8487 element_under_configuration=element_under_configuration,
8488 element_type=element_type,
8489 # other_update=db_nsr_update,
8490 other_update=None,
8491 )
8492
8493 # TODO check if already done
8494 self.logger.debug(logging_text + step)
8495 config = None
8496 if vca_type == "native_charm":
8497 config_primitive = next(
8498 (p for p in initial_config_primitive_list if p["name"] == "config"),
8499 None,
8500 )
8501 if config_primitive:
8502 config = self._map_primitive_params(
8503 config_primitive, {}, deploy_params
8504 )
8505 await self.vca_map[vca_type].install_configuration_sw(
8506 ee_id=ee_id,
8507 artifact_path=artifact_path,
8508 db_dict=db_dict,
8509 config=config,
8510 num_units=1,
8511 vca_id=vca_id,
8512 vca_type=vca_type,
8513 )
8514
8515 # write in db flag of configuration_sw already installed
8516 self.update_db_2(
8517 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8518 )
8519
8520 # Not sure if this need to be done when healing
8521 """
8522 # add relations for this VCA (wait for other peers related with this VCA)
8523 await self._add_vca_relations(
8524 logging_text=logging_text,
8525 nsr_id=nsr_id,
8526 vca_type=vca_type,
8527 vca_index=vca_index,
8528 )
8529 """
8530
8531 # if SSH access is required, then get execution environment SSH public
8532 # if native charm we have waited already to VM be UP
8533 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8534 pub_key = None
8535 user = None
8536 # self.logger.debug("get ssh key block")
8537 if deep_get(
8538 config_descriptor, ("config-access", "ssh-access", "required")
8539 ):
8540 # self.logger.debug("ssh key needed")
8541 # Needed to inject a ssh key
8542 user = deep_get(
8543 config_descriptor,
8544 ("config-access", "ssh-access", "default-user"),
8545 )
8546 step = "Install configuration Software, getting public ssh key"
8547 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8548 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8549 )
8550
8551 step = "Insert public key into VM user={} ssh_key={}".format(
8552 user, pub_key
8553 )
8554 else:
8555 # self.logger.debug("no need to get ssh key")
8556 step = "Waiting to VM being up and getting IP address"
8557 self.logger.debug(logging_text + step)
8558
8559 # n2vc_redesign STEP 5.1
8560 # wait for RO (ip-address) Insert pub_key into VM
8561 # IMPORTANT: We need do wait for RO to complete healing operation.
8562 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8563 if vnfr_id:
8564 if kdu_name:
8565 rw_mgmt_ip = await self.wait_kdu_up(
8566 logging_text, nsr_id, vnfr_id, kdu_name
8567 )
8568 else:
8569 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8570 logging_text,
8571 nsr_id,
8572 vnfr_id,
8573 vdu_id,
8574 vdu_index,
8575 user=user,
8576 pub_key=pub_key,
8577 )
8578 else:
8579 rw_mgmt_ip = None # This is for a NS configuration
8580
8581 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8582
8583 # store rw_mgmt_ip in deploy params for later replacement
8584 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8585
8586 # Day1 operations.
8587 # get run-day1 operation parameter
8588 runDay1 = deploy_params.get("run-day1", False)
8589 self.logger.debug(
8590 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8591 )
8592 if runDay1:
8593 # n2vc_redesign STEP 6 Execute initial config primitive
8594 step = "execute initial config primitive"
8595
8596 # wait for dependent primitives execution (NS -> VNF -> VDU)
8597 if initial_config_primitive_list:
8598 await self._wait_dependent_n2vc(
8599 nsr_id, vca_deployed_list, vca_index
8600 )
8601
8602 # stage, in function of element type: vdu, kdu, vnf or ns
8603 my_vca = vca_deployed_list[vca_index]
8604 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8605 # VDU or KDU
8606 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8607 elif my_vca.get("member-vnf-index"):
8608 # VNF
8609 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8610 else:
8611 # NS
8612 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8613
8614 self._write_configuration_status(
8615 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8616 )
8617
8618 self._write_op_status(op_id=nslcmop_id, stage=stage)
8619
8620 check_if_terminated_needed = True
8621 for initial_config_primitive in initial_config_primitive_list:
8622 # adding information on the vca_deployed if it is a NS execution environment
8623 if not vca_deployed["member-vnf-index"]:
8624 deploy_params["ns_config_info"] = json.dumps(
8625 self._get_ns_config_info(nsr_id)
8626 )
8627 # TODO check if already done
8628 primitive_params_ = self._map_primitive_params(
8629 initial_config_primitive, {}, deploy_params
8630 )
8631
8632 step = "execute primitive '{}' params '{}'".format(
8633 initial_config_primitive["name"], primitive_params_
8634 )
8635 self.logger.debug(logging_text + step)
8636 await self.vca_map[vca_type].exec_primitive(
8637 ee_id=ee_id,
8638 primitive_name=initial_config_primitive["name"],
8639 params_dict=primitive_params_,
8640 db_dict=db_dict,
8641 vca_id=vca_id,
8642 vca_type=vca_type,
8643 )
8644 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8645 if check_if_terminated_needed:
8646 if config_descriptor.get("terminate-config-primitive"):
8647 self.update_db_2(
8648 "nsrs",
8649 nsr_id,
8650 {db_update_entry + "needed_terminate": True},
8651 )
8652 check_if_terminated_needed = False
8653
8654 # TODO register in database that primitive is done
8655
8656 # STEP 7 Configure metrics
8657 # Not sure if this need to be done when healing
8658 """
8659 if vca_type == "helm" or vca_type == "helm-v3":
8660 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8661 ee_id=ee_id,
8662 artifact_path=artifact_path,
8663 ee_config_descriptor=ee_config_descriptor,
8664 vnfr_id=vnfr_id,
8665 nsr_id=nsr_id,
8666 target_ip=rw_mgmt_ip,
8667 )
8668 if prometheus_jobs:
8669 self.update_db_2(
8670 "nsrs",
8671 nsr_id,
8672 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8673 )
8674
8675 for job in prometheus_jobs:
8676 self.db.set_one(
8677 "prometheus_jobs",
8678 {"job_name": job["job_name"]},
8679 job,
8680 upsert=True,
8681 fail_on_empty=False,
8682 )
8683
8684 """
8685 step = "instantiated at VCA"
8686 self.logger.debug(logging_text + step)
8687
8688 self._write_configuration_status(
8689 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8690 )
8691
8692 except Exception as e: # TODO not use Exception but N2VC exception
8693 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8694 if not isinstance(
8695 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8696 ):
8697 self.logger.error(
8698 "Exception while {} : {}".format(step, e), exc_info=True
8699 )
8700 self._write_configuration_status(
8701 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8702 )
8703 raise LcmException("{} {}".format(step, e)) from e
8704
8705 async def _wait_heal_ro(
8706 self,
8707 nsr_id,
8708 timeout=600,
8709 ):
8710 start_time = time()
8711 while time() <= start_time + timeout:
8712 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8713 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8714 "operational-status"
8715 ]
8716 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8717 if operational_status_ro != "healing":
8718 break
8719 await asyncio.sleep(15, loop=self.loop)
8720 else: # timeout_ns_deploy
8721 raise NgRoException("Timeout waiting ns to deploy")
8722
8723 async def vertical_scale(self, nsr_id, nslcmop_id):
8724 """
8725 Vertical Scale the VDUs in a NS
8726
8727 :param: nsr_id: NS Instance ID
8728 :param: nslcmop_id: nslcmop ID of migrate
8729
8730 """
8731 # Try to lock HA task here
8732 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8733 if not task_is_locked_by_me:
8734 return
8735 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8736 self.logger.debug(logging_text + "Enter")
8737 # get all needed from database
8738 db_nslcmop = None
8739 db_nslcmop_update = {}
8740 nslcmop_operation_state = None
8741 db_nsr_update = {}
8742 target = {}
8743 exc = None
8744 # in case of error, indicates what part of scale was failed to put nsr at error status
8745 start_deploy = time()
8746
8747 try:
8748 # wait for any previous tasks in process
8749 step = "Waiting for previous operations to terminate"
8750 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8751
8752 self._write_ns_status(
8753 nsr_id=nsr_id,
8754 ns_state=None,
8755 current_operation="VerticalScale",
8756 current_operation_id=nslcmop_id,
8757 )
8758 step = "Getting nslcmop from database"
8759 self.logger.debug(
8760 step + " after having waited for previous tasks to be completed"
8761 )
8762 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8763 operationParams = db_nslcmop.get("operationParams")
8764 target = {}
8765 target.update(operationParams)
8766 desc = await self.RO.vertical_scale(nsr_id, target)
8767 self.logger.debug("RO return > {}".format(desc))
8768 action_id = desc["action_id"]
8769 await self._wait_ng_ro(
8770 nsr_id,
8771 action_id,
8772 nslcmop_id,
8773 start_deploy,
8774 self.timeout_verticalscale,
8775 operation="verticalscale",
8776 )
8777 except (ROclient.ROClientException, DbException, LcmException) as e:
8778 self.logger.error("Exit Exception {}".format(e))
8779 exc = e
8780 except asyncio.CancelledError:
8781 self.logger.error("Cancelled Exception while '{}'".format(step))
8782 exc = "Operation was cancelled"
8783 except Exception as e:
8784 exc = traceback.format_exc()
8785 self.logger.critical(
8786 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8787 )
8788 finally:
8789 self._write_ns_status(
8790 nsr_id=nsr_id,
8791 ns_state=None,
8792 current_operation="IDLE",
8793 current_operation_id=None,
8794 )
8795 if exc:
8796 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8797 nslcmop_operation_state = "FAILED"
8798 else:
8799 nslcmop_operation_state = "COMPLETED"
8800 db_nslcmop_update["detailed-status"] = "Done"
8801 db_nsr_update["detailed-status"] = "Done"
8802
8803 self._write_op_status(
8804 op_id=nslcmop_id,
8805 stage="",
8806 error_message="",
8807 operation_state=nslcmop_operation_state,
8808 other_update=db_nslcmop_update,
8809 )
8810 if nslcmop_operation_state:
8811 try:
8812 msg = {
8813 "nsr_id": nsr_id,
8814 "nslcmop_id": nslcmop_id,
8815 "operationState": nslcmop_operation_state,
8816 }
8817 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8818 except Exception as e:
8819 self.logger.error(
8820 logging_text + "kafka_write notification Exception {}".format(e)
8821 )
8822 self.logger.debug(logging_text + "Exit")
8823 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")