614ff46f8ecc740e791d346819eda3bd71ba6559
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105 from osm_lcm.data_utils.wim import (
106 get_sdn_ports,
107 get_target_wim_attrs,
108 select_feasible_wim_account,
109 )
110
111 from n2vc.n2vc_juju_conn import N2VCJujuConnector
112 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
113
114 from osm_lcm.lcm_helm_conn import LCMHelmConn
115 from osm_lcm.osm_config import OsmConfigBuilder
116 from osm_lcm.prometheus import parse_job
117
118 from copy import copy, deepcopy
119 from time import time
120 from uuid import uuid4
121
122 from random import randint
123
124 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
125
126
127 class NsLcm(LcmBase):
128 timeout_vca_on_error = (
129 5 * 60
130 ) # Time for charm from first time at blocked,error status to mark as failed
131 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
132 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
133 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
134 timeout_charm_delete = 10 * 60
135 timeout_primitive = 30 * 60 # timeout for primitive execution
136 timeout_ns_update = 30 * 60 # timeout for ns update
137 timeout_progress_primitive = (
138 10 * 60
139 ) # timeout for some progress in a primitive execution
140 timeout_migrate = 1800 # default global timeout for migrating vnfs
141 timeout_operate = 1800 # default global timeout for migrating vnfs
142 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
143 SUBOPERATION_STATUS_NOT_FOUND = -1
144 SUBOPERATION_STATUS_NEW = -2
145 SUBOPERATION_STATUS_SKIP = -3
146 task_name_deploy_vca = "Deploying VCA"
147
148 def __init__(self, msg, lcm_tasks, config, loop):
149 """
150 Init, Connect to database, filesystem storage, and messaging
151 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
152 :return: None
153 """
154 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
155
156 self.db = Database().instance.db
157 self.fs = Filesystem().instance.fs
158 self.loop = loop
159 self.lcm_tasks = lcm_tasks
160 self.timeout = config["timeout"]
161 self.ro_config = config["ro_config"]
162 self.ng_ro = config["ro_config"].get("ng")
163 self.vca_config = config["VCA"].copy()
164
165 # create N2VC connector
166 self.n2vc = N2VCJujuConnector(
167 log=self.logger,
168 loop=self.loop,
169 on_update_db=self._on_update_n2vc_db,
170 fs=self.fs,
171 db=self.db,
172 )
173
174 self.conn_helm_ee = LCMHelmConn(
175 log=self.logger,
176 loop=self.loop,
177 vca_config=self.vca_config,
178 on_update_db=self._on_update_n2vc_db,
179 )
180
181 self.k8sclusterhelm2 = K8sHelmConnector(
182 kubectl_command=self.vca_config.get("kubectlpath"),
183 helm_command=self.vca_config.get("helmpath"),
184 log=self.logger,
185 on_update_db=None,
186 fs=self.fs,
187 db=self.db,
188 )
189
190 self.k8sclusterhelm3 = K8sHelm3Connector(
191 kubectl_command=self.vca_config.get("kubectlpath"),
192 helm_command=self.vca_config.get("helm3path"),
193 fs=self.fs,
194 log=self.logger,
195 db=self.db,
196 on_update_db=None,
197 )
198
199 self.k8sclusterjuju = K8sJujuConnector(
200 kubectl_command=self.vca_config.get("kubectlpath"),
201 juju_command=self.vca_config.get("jujupath"),
202 log=self.logger,
203 loop=self.loop,
204 on_update_db=self._on_update_k8s_db,
205 fs=self.fs,
206 db=self.db,
207 )
208
209 self.k8scluster_map = {
210 "helm-chart": self.k8sclusterhelm2,
211 "helm-chart-v3": self.k8sclusterhelm3,
212 "chart": self.k8sclusterhelm3,
213 "juju-bundle": self.k8sclusterjuju,
214 "juju": self.k8sclusterjuju,
215 }
216
217 self.vca_map = {
218 "lxc_proxy_charm": self.n2vc,
219 "native_charm": self.n2vc,
220 "k8s_proxy_charm": self.n2vc,
221 "helm": self.conn_helm_ee,
222 "helm-v3": self.conn_helm_ee,
223 }
224
225 # create RO client
226 self.RO = NgRoClient(self.loop, **self.ro_config)
227
228 self.op_status_map = {
229 "instantiation": self.RO.status,
230 "termination": self.RO.status,
231 "migrate": self.RO.status,
232 "healing": self.RO.recreate_status,
233 "verticalscale": self.RO.status,
234 "start_stop_rebuild": self.RO.status,
235 }
236
237 @staticmethod
238 def increment_ip_mac(ip_mac, vm_index=1):
239 if not isinstance(ip_mac, str):
240 return ip_mac
241 try:
242 # try with ipv4 look for last dot
243 i = ip_mac.rfind(".")
244 if i > 0:
245 i += 1
246 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
247 # try with ipv6 or mac look for last colon. Operate in hex
248 i = ip_mac.rfind(":")
249 if i > 0:
250 i += 1
251 # format in hex, len can be 2 for mac or 4 for ipv6
252 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
253 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
254 )
255 except Exception:
256 pass
257 return None
258
259 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
260 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
261
262 try:
263 # TODO filter RO descriptor fields...
264
265 # write to database
266 db_dict = dict()
267 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
268 db_dict["deploymentStatus"] = ro_descriptor
269 self.update_db_2("nsrs", nsrs_id, db_dict)
270
271 except Exception as e:
272 self.logger.warn(
273 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
274 )
275
276 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
277 # remove last dot from path (if exists)
278 if path.endswith("."):
279 path = path[:-1]
280
281 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
282 # .format(table, filter, path, updated_data))
283 try:
284 nsr_id = filter.get("_id")
285
286 # read ns record from database
287 nsr = self.db.get_one(table="nsrs", q_filter=filter)
288 current_ns_status = nsr.get("nsState")
289
290 # get vca status for NS
291 status_dict = await self.n2vc.get_status(
292 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
293 )
294
295 # vcaStatus
296 db_dict = dict()
297 db_dict["vcaStatus"] = status_dict
298 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
299
300 # update configurationStatus for this VCA
301 try:
302 vca_index = int(path[path.rfind(".") + 1 :])
303
304 vca_list = deep_get(
305 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
306 )
307 vca_status = vca_list[vca_index].get("status")
308
309 configuration_status_list = nsr.get("configurationStatus")
310 config_status = configuration_status_list[vca_index].get("status")
311
312 if config_status == "BROKEN" and vca_status != "failed":
313 db_dict["configurationStatus"][vca_index] = "READY"
314 elif config_status != "BROKEN" and vca_status == "failed":
315 db_dict["configurationStatus"][vca_index] = "BROKEN"
316 except Exception as e:
317 # not update configurationStatus
318 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
319
320 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
321 # if nsState = 'DEGRADED' check if all is OK
322 is_degraded = False
323 if current_ns_status in ("READY", "DEGRADED"):
324 error_description = ""
325 # check machines
326 if status_dict.get("machines"):
327 for machine_id in status_dict.get("machines"):
328 machine = status_dict.get("machines").get(machine_id)
329 # check machine agent-status
330 if machine.get("agent-status"):
331 s = machine.get("agent-status").get("status")
332 if s != "started":
333 is_degraded = True
334 error_description += (
335 "machine {} agent-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check machine instance status
340 if machine.get("instance-status"):
341 s = machine.get("instance-status").get("status")
342 if s != "running":
343 is_degraded = True
344 error_description += (
345 "machine {} instance-status={} ; ".format(
346 machine_id, s
347 )
348 )
349 # check applications
350 if status_dict.get("applications"):
351 for app_id in status_dict.get("applications"):
352 app = status_dict.get("applications").get(app_id)
353 # check application status
354 if app.get("status"):
355 s = app.get("status").get("status")
356 if s != "active":
357 is_degraded = True
358 error_description += (
359 "application {} status={} ; ".format(app_id, s)
360 )
361
362 if error_description:
363 db_dict["errorDescription"] = error_description
364 if current_ns_status == "READY" and is_degraded:
365 db_dict["nsState"] = "DEGRADED"
366 if current_ns_status == "DEGRADED" and not is_degraded:
367 db_dict["nsState"] = "READY"
368
369 # write to database
370 self.update_db_2("nsrs", nsr_id, db_dict)
371
372 except (asyncio.CancelledError, asyncio.TimeoutError):
373 raise
374 except Exception as e:
375 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
376
377 async def _on_update_k8s_db(
378 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
379 ):
380 """
381 Updating vca status in NSR record
382 :param cluster_uuid: UUID of a k8s cluster
383 :param kdu_instance: The unique name of the KDU instance
384 :param filter: To get nsr_id
385 :cluster_type: The cluster type (juju, k8s)
386 :return: none
387 """
388
389 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
390 # .format(cluster_uuid, kdu_instance, filter))
391
392 nsr_id = filter.get("_id")
393 try:
394 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
395 cluster_uuid=cluster_uuid,
396 kdu_instance=kdu_instance,
397 yaml_format=False,
398 complete_status=True,
399 vca_id=vca_id,
400 )
401
402 # vcaStatus
403 db_dict = dict()
404 db_dict["vcaStatus"] = {nsr_id: vca_status}
405
406 if cluster_type in ("juju-bundle", "juju"):
407 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
408 # status in a similar way between Juju Bundles and Helm Charts on this side
409 await self.k8sclusterjuju.update_vca_status(
410 db_dict["vcaStatus"],
411 kdu_instance,
412 vca_id=vca_id,
413 )
414
415 self.logger.debug(
416 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
417 )
418
419 # write to database
420 self.update_db_2("nsrs", nsr_id, db_dict)
421 except (asyncio.CancelledError, asyncio.TimeoutError):
422 raise
423 except Exception as e:
424 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
425
426 @staticmethod
427 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
428 try:
429 env = Environment(undefined=StrictUndefined, autoescape=True)
430 template = env.from_string(cloud_init_text)
431 return template.render(additional_params or {})
432 except UndefinedError as e:
433 raise LcmException(
434 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
435 "file, must be provided in the instantiation parameters inside the "
436 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
437 )
438 except (TemplateError, TemplateNotFound) as e:
439 raise LcmException(
440 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
441 vnfd_id, vdu_id, e
442 )
443 )
444
445 def _get_vdu_cloud_init_content(self, vdu, vnfd):
446 cloud_init_content = cloud_init_file = None
447 try:
448 if vdu.get("cloud-init-file"):
449 base_folder = vnfd["_admin"]["storage"]
450 if base_folder["pkg-dir"]:
451 cloud_init_file = "{}/{}/cloud_init/{}".format(
452 base_folder["folder"],
453 base_folder["pkg-dir"],
454 vdu["cloud-init-file"],
455 )
456 else:
457 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
458 base_folder["folder"],
459 vdu["cloud-init-file"],
460 )
461 with self.fs.file_open(cloud_init_file, "r") as ci_file:
462 cloud_init_content = ci_file.read()
463 elif vdu.get("cloud-init"):
464 cloud_init_content = vdu["cloud-init"]
465
466 return cloud_init_content
467 except FsException as e:
468 raise LcmException(
469 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
470 vnfd["id"], vdu["id"], cloud_init_file, e
471 )
472 )
473
474 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
475 vdur = next(
476 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
477 )
478 additional_params = vdur.get("additionalParams")
479 return parse_yaml_strings(additional_params)
480
481 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
482 """
483 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
484 :param vnfd: input vnfd
485 :param new_id: overrides vnf id if provided
486 :param additionalParams: Instantiation params for VNFs provided
487 :param nsrId: Id of the NSR
488 :return: copy of vnfd
489 """
490 vnfd_RO = deepcopy(vnfd)
491 # remove unused by RO configuration, monitoring, scaling and internal keys
492 vnfd_RO.pop("_id", None)
493 vnfd_RO.pop("_admin", None)
494 vnfd_RO.pop("monitoring-param", None)
495 vnfd_RO.pop("scaling-group-descriptor", None)
496 vnfd_RO.pop("kdu", None)
497 vnfd_RO.pop("k8s-cluster", None)
498 if new_id:
499 vnfd_RO["id"] = new_id
500
501 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
502 for vdu in get_iterable(vnfd_RO, "vdu"):
503 vdu.pop("cloud-init-file", None)
504 vdu.pop("cloud-init", None)
505 return vnfd_RO
506
507 @staticmethod
508 def ip_profile_2_RO(ip_profile):
509 RO_ip_profile = deepcopy(ip_profile)
510 if "dns-server" in RO_ip_profile:
511 if isinstance(RO_ip_profile["dns-server"], list):
512 RO_ip_profile["dns-address"] = []
513 for ds in RO_ip_profile.pop("dns-server"):
514 RO_ip_profile["dns-address"].append(ds["address"])
515 else:
516 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
517 if RO_ip_profile.get("ip-version") == "ipv4":
518 RO_ip_profile["ip-version"] = "IPv4"
519 if RO_ip_profile.get("ip-version") == "ipv6":
520 RO_ip_profile["ip-version"] = "IPv6"
521 if "dhcp-params" in RO_ip_profile:
522 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
523 return RO_ip_profile
524
525 def _get_ro_vim_id_for_vim_account(self, vim_account):
526 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
527 if db_vim["_admin"]["operationalState"] != "ENABLED":
528 raise LcmException(
529 "VIM={} is not available. operationalState={}".format(
530 vim_account, db_vim["_admin"]["operationalState"]
531 )
532 )
533 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
534 return RO_vim_id
535
536 def get_ro_wim_id_for_wim_account(self, wim_account):
537 if isinstance(wim_account, str):
538 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
539 if db_wim["_admin"]["operationalState"] != "ENABLED":
540 raise LcmException(
541 "WIM={} is not available. operationalState={}".format(
542 wim_account, db_wim["_admin"]["operationalState"]
543 )
544 )
545 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
546 return RO_wim_id
547 else:
548 return wim_account
549
550 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
551 db_vdu_push_list = []
552 template_vdur = []
553 db_update = {"_admin.modified": time()}
554 if vdu_create:
555 for vdu_id, vdu_count in vdu_create.items():
556 vdur = next(
557 (
558 vdur
559 for vdur in reversed(db_vnfr["vdur"])
560 if vdur["vdu-id-ref"] == vdu_id
561 ),
562 None,
563 )
564 if not vdur:
565 # Read the template saved in the db:
566 self.logger.debug(
567 "No vdur in the database. Using the vdur-template to scale"
568 )
569 vdur_template = db_vnfr.get("vdur-template")
570 if not vdur_template:
571 raise LcmException(
572 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
573 vdu_id
574 )
575 )
576 vdur = vdur_template[0]
577 # Delete a template from the database after using it
578 self.db.set_one(
579 "vnfrs",
580 {"_id": db_vnfr["_id"]},
581 None,
582 pull={"vdur-template": {"_id": vdur["_id"]}},
583 )
584 for count in range(vdu_count):
585 vdur_copy = deepcopy(vdur)
586 vdur_copy["status"] = "BUILD"
587 vdur_copy["status-detailed"] = None
588 vdur_copy["ip-address"] = None
589 vdur_copy["_id"] = str(uuid4())
590 vdur_copy["count-index"] += count + 1
591 vdur_copy["id"] = "{}-{}".format(
592 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
593 )
594 vdur_copy.pop("vim_info", None)
595 for iface in vdur_copy["interfaces"]:
596 if iface.get("fixed-ip"):
597 iface["ip-address"] = self.increment_ip_mac(
598 iface["ip-address"], count + 1
599 )
600 else:
601 iface.pop("ip-address", None)
602 if iface.get("fixed-mac"):
603 iface["mac-address"] = self.increment_ip_mac(
604 iface["mac-address"], count + 1
605 )
606 else:
607 iface.pop("mac-address", None)
608 if db_vnfr["vdur"]:
609 iface.pop(
610 "mgmt_vnf", None
611 ) # only first vdu can be managment of vnf
612 db_vdu_push_list.append(vdur_copy)
613 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
614 if vdu_delete:
615 if len(db_vnfr["vdur"]) == 1:
616 # The scale will move to 0 instances
617 self.logger.debug(
618 "Scaling to 0 !, creating the template with the last vdur"
619 )
620 template_vdur = [db_vnfr["vdur"][0]]
621 for vdu_id, vdu_count in vdu_delete.items():
622 if mark_delete:
623 indexes_to_delete = [
624 iv[0]
625 for iv in enumerate(db_vnfr["vdur"])
626 if iv[1]["vdu-id-ref"] == vdu_id
627 ]
628 db_update.update(
629 {
630 "vdur.{}.status".format(i): "DELETING"
631 for i in indexes_to_delete[-vdu_count:]
632 }
633 )
634 else:
635 # it must be deleted one by one because common.db does not allow otherwise
636 vdus_to_delete = [
637 v
638 for v in reversed(db_vnfr["vdur"])
639 if v["vdu-id-ref"] == vdu_id
640 ]
641 for vdu in vdus_to_delete[:vdu_count]:
642 self.db.set_one(
643 "vnfrs",
644 {"_id": db_vnfr["_id"]},
645 None,
646 pull={"vdur": {"_id": vdu["_id"]}},
647 )
648 db_push = {}
649 if db_vdu_push_list:
650 db_push["vdur"] = db_vdu_push_list
651 if template_vdur:
652 db_push["vdur-template"] = template_vdur
653 if not db_push:
654 db_push = None
655 db_vnfr["vdur-template"] = template_vdur
656 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
657 # modify passed dictionary db_vnfr
658 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
659 db_vnfr["vdur"] = db_vnfr_["vdur"]
660
661 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
662 """
663 Updates database nsr with the RO info for the created vld
664 :param ns_update_nsr: dictionary to be filled with the updated info
665 :param db_nsr: content of db_nsr. This is also modified
666 :param nsr_desc_RO: nsr descriptor from RO
667 :return: Nothing, LcmException is raised on errors
668 """
669
670 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
671 for net_RO in get_iterable(nsr_desc_RO, "nets"):
672 if vld["id"] != net_RO.get("ns_net_osm_id"):
673 continue
674 vld["vim-id"] = net_RO.get("vim_net_id")
675 vld["name"] = net_RO.get("vim_name")
676 vld["status"] = net_RO.get("status")
677 vld["status-detailed"] = net_RO.get("error_msg")
678 ns_update_nsr["vld.{}".format(vld_index)] = vld
679 break
680 else:
681 raise LcmException(
682 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
683 )
684
685 def set_vnfr_at_error(self, db_vnfrs, error_text):
686 try:
687 for db_vnfr in db_vnfrs.values():
688 vnfr_update = {"status": "ERROR"}
689 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
690 if "status" not in vdur:
691 vdur["status"] = "ERROR"
692 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
693 if error_text:
694 vdur["status-detailed"] = str(error_text)
695 vnfr_update[
696 "vdur.{}.status-detailed".format(vdu_index)
697 ] = "ERROR"
698 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
699 except DbException as e:
700 self.logger.error("Cannot update vnf. {}".format(e))
701
702 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
703 """
704 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
705 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
706 :param nsr_desc_RO: nsr descriptor from RO
707 :return: Nothing, LcmException is raised on errors
708 """
709 for vnf_index, db_vnfr in db_vnfrs.items():
710 for vnf_RO in nsr_desc_RO["vnfs"]:
711 if vnf_RO["member_vnf_index"] != vnf_index:
712 continue
713 vnfr_update = {}
714 if vnf_RO.get("ip_address"):
715 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
716 "ip_address"
717 ].split(";")[0]
718 elif not db_vnfr.get("ip-address"):
719 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
720 raise LcmExceptionNoMgmtIP(
721 "ns member_vnf_index '{}' has no IP address".format(
722 vnf_index
723 )
724 )
725
726 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
727 vdur_RO_count_index = 0
728 if vdur.get("pdu-type"):
729 continue
730 for vdur_RO in get_iterable(vnf_RO, "vms"):
731 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
732 continue
733 if vdur["count-index"] != vdur_RO_count_index:
734 vdur_RO_count_index += 1
735 continue
736 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
737 if vdur_RO.get("ip_address"):
738 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
739 else:
740 vdur["ip-address"] = None
741 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
742 vdur["name"] = vdur_RO.get("vim_name")
743 vdur["status"] = vdur_RO.get("status")
744 vdur["status-detailed"] = vdur_RO.get("error_msg")
745 for ifacer in get_iterable(vdur, "interfaces"):
746 for interface_RO in get_iterable(vdur_RO, "interfaces"):
747 if ifacer["name"] == interface_RO.get("internal_name"):
748 ifacer["ip-address"] = interface_RO.get(
749 "ip_address"
750 )
751 ifacer["mac-address"] = interface_RO.get(
752 "mac_address"
753 )
754 break
755 else:
756 raise LcmException(
757 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
758 "from VIM info".format(
759 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
760 )
761 )
762 vnfr_update["vdur.{}".format(vdu_index)] = vdur
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
767 "VIM info".format(
768 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
769 )
770 )
771
772 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
773 for net_RO in get_iterable(nsr_desc_RO, "nets"):
774 if vld["id"] != net_RO.get("vnf_net_osm_id"):
775 continue
776 vld["vim-id"] = net_RO.get("vim_net_id")
777 vld["name"] = net_RO.get("vim_name")
778 vld["status"] = net_RO.get("status")
779 vld["status-detailed"] = net_RO.get("error_msg")
780 vnfr_update["vld.{}".format(vld_index)] = vld
781 break
782 else:
783 raise LcmException(
784 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
785 vnf_index, vld["id"]
786 )
787 )
788
789 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
790 break
791
792 else:
793 raise LcmException(
794 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
795 vnf_index
796 )
797 )
798
799 def _get_ns_config_info(self, nsr_id):
800 """
801 Generates a mapping between vnf,vdu elements and the N2VC id
802 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
803 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
804 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
805 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
806 """
807 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
808 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
809 mapping = {}
810 ns_config_info = {"osm-config-mapping": mapping}
811 for vca in vca_deployed_list:
812 if not vca["member-vnf-index"]:
813 continue
814 if not vca["vdu_id"]:
815 mapping[vca["member-vnf-index"]] = vca["application"]
816 else:
817 mapping[
818 "{}.{}.{}".format(
819 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
820 )
821 ] = vca["application"]
822 return ns_config_info
823
824 async def _instantiate_ng_ro(
825 self,
826 logging_text,
827 nsr_id,
828 nsd,
829 db_nsr,
830 db_nslcmop,
831 db_vnfrs,
832 db_vnfds,
833 n2vc_key_list,
834 stage,
835 start_deploy,
836 timeout_ns_deploy,
837 ):
838 db_vims = {}
839
840 def get_vim_account(vim_account_id):
841 nonlocal db_vims
842 if vim_account_id in db_vims:
843 return db_vims[vim_account_id]
844 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
845 db_vims[vim_account_id] = db_vim
846 return db_vim
847
848 # modify target_vld info with instantiation parameters
849 def parse_vld_instantiation_params(
850 target_vim, target_vld, vld_params, target_sdn
851 ):
852 if vld_params.get("ip-profile"):
853 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
854 "ip-profile"
855 ]
856 if vld_params.get("provider-network"):
857 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
858 "provider-network"
859 ]
860 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
861 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
862 "provider-network"
863 ]["sdn-ports"]
864
865 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
866 # if wim_account_id is specified in vld_params, validate if it is feasible.
867 wim_account_id, db_wim = select_feasible_wim_account(
868 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
869 )
870
871 if wim_account_id:
872 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
873 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
874 # update vld_params with correct WIM account Id
875 vld_params["wimAccountId"] = wim_account_id
876
877 target_wim = "wim:{}".format(wim_account_id)
878 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
879 sdn_ports = get_sdn_ports(vld_params, db_wim)
880 if len(sdn_ports) > 0:
881 target_vld["vim_info"][target_wim] = target_wim_attrs
882 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
883
884 self.logger.debug(
885 "Target VLD with WIM data: {:s}".format(str(target_vld))
886 )
887
888 for param in ("vim-network-name", "vim-network-id"):
889 if vld_params.get(param):
890 if isinstance(vld_params[param], dict):
891 for vim, vim_net in vld_params[param].items():
892 other_target_vim = "vim:" + vim
893 populate_dict(
894 target_vld["vim_info"],
895 (other_target_vim, param.replace("-", "_")),
896 vim_net,
897 )
898 else: # isinstance str
899 target_vld["vim_info"][target_vim][
900 param.replace("-", "_")
901 ] = vld_params[param]
902 if vld_params.get("common_id"):
903 target_vld["common_id"] = vld_params.get("common_id")
904
905 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
906 def update_ns_vld_target(target, ns_params):
907 for vnf_params in ns_params.get("vnf", ()):
908 if vnf_params.get("vimAccountId"):
909 target_vnf = next(
910 (
911 vnfr
912 for vnfr in db_vnfrs.values()
913 if vnf_params["member-vnf-index"]
914 == vnfr["member-vnf-index-ref"]
915 ),
916 None,
917 )
918 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
919 for a_index, a_vld in enumerate(target["ns"]["vld"]):
920 target_vld = find_in_list(
921 get_iterable(vdur, "interfaces"),
922 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
923 )
924
925 vld_params = find_in_list(
926 get_iterable(ns_params, "vld"),
927 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
928 )
929 if target_vld:
930 if vnf_params.get("vimAccountId") not in a_vld.get(
931 "vim_info", {}
932 ):
933 target_vim_network_list = [
934 v for _, v in a_vld.get("vim_info").items()
935 ]
936 target_vim_network_name = next(
937 (
938 item.get("vim_network_name", "")
939 for item in target_vim_network_list
940 ),
941 "",
942 )
943
944 target["ns"]["vld"][a_index].get("vim_info").update(
945 {
946 "vim:{}".format(vnf_params["vimAccountId"]): {
947 "vim_network_name": target_vim_network_name,
948 }
949 }
950 )
951
952 if vld_params:
953 for param in ("vim-network-name", "vim-network-id"):
954 if vld_params.get(param) and isinstance(
955 vld_params[param], dict
956 ):
957 for vim, vim_net in vld_params[
958 param
959 ].items():
960 other_target_vim = "vim:" + vim
961 populate_dict(
962 target["ns"]["vld"][a_index].get(
963 "vim_info"
964 ),
965 (
966 other_target_vim,
967 param.replace("-", "_"),
968 ),
969 vim_net,
970 )
971
972 nslcmop_id = db_nslcmop["_id"]
973 target = {
974 "name": db_nsr["name"],
975 "ns": {"vld": []},
976 "vnf": [],
977 "image": deepcopy(db_nsr["image"]),
978 "flavor": deepcopy(db_nsr["flavor"]),
979 "action_id": nslcmop_id,
980 "cloud_init_content": {},
981 }
982 for image in target["image"]:
983 image["vim_info"] = {}
984 for flavor in target["flavor"]:
985 flavor["vim_info"] = {}
986 if db_nsr.get("affinity-or-anti-affinity-group"):
987 target["affinity-or-anti-affinity-group"] = deepcopy(
988 db_nsr["affinity-or-anti-affinity-group"]
989 )
990 for affinity_or_anti_affinity_group in target[
991 "affinity-or-anti-affinity-group"
992 ]:
993 affinity_or_anti_affinity_group["vim_info"] = {}
994
995 if db_nslcmop.get("lcmOperationType") != "instantiate":
996 # get parameters of instantiation:
997 db_nslcmop_instantiate = self.db.get_list(
998 "nslcmops",
999 {
1000 "nsInstanceId": db_nslcmop["nsInstanceId"],
1001 "lcmOperationType": "instantiate",
1002 },
1003 )[-1]
1004 ns_params = db_nslcmop_instantiate.get("operationParams")
1005 else:
1006 ns_params = db_nslcmop.get("operationParams")
1007 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
1008 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
1009
1010 cp2target = {}
1011 for vld_index, vld in enumerate(db_nsr.get("vld")):
1012 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1013 target_vld = {
1014 "id": vld["id"],
1015 "name": vld["name"],
1016 "mgmt-network": vld.get("mgmt-network", False),
1017 "type": vld.get("type"),
1018 "vim_info": {
1019 target_vim: {
1020 "vim_network_name": vld.get("vim-network-name"),
1021 "vim_account_id": ns_params["vimAccountId"],
1022 }
1023 },
1024 }
1025 # check if this network needs SDN assist
1026 if vld.get("pci-interfaces"):
1027 db_vim = get_vim_account(ns_params["vimAccountId"])
1028 if vim_config := db_vim.get("config"):
1029 if sdnc_id := vim_config.get("sdn-controller"):
1030 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1031 target_sdn = "sdn:{}".format(sdnc_id)
1032 target_vld["vim_info"][target_sdn] = {
1033 "sdn": True,
1034 "target_vim": target_vim,
1035 "vlds": [sdn_vld],
1036 "type": vld.get("type"),
1037 }
1038
1039 nsd_vnf_profiles = get_vnf_profiles(nsd)
1040 for nsd_vnf_profile in nsd_vnf_profiles:
1041 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1042 if cp["virtual-link-profile-id"] == vld["id"]:
1043 cp2target[
1044 "member_vnf:{}.{}".format(
1045 cp["constituent-cpd-id"][0][
1046 "constituent-base-element-id"
1047 ],
1048 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1049 )
1050 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1051
1052 # check at nsd descriptor, if there is an ip-profile
1053 vld_params = {}
1054 nsd_vlp = find_in_list(
1055 get_virtual_link_profiles(nsd),
1056 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1057 == vld["id"],
1058 )
1059 if (
1060 nsd_vlp
1061 and nsd_vlp.get("virtual-link-protocol-data")
1062 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1063 ):
1064 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1065 "l3-protocol-data"
1066 ]
1067 ip_profile_dest_data = {}
1068 if "ip-version" in ip_profile_source_data:
1069 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1070 "ip-version"
1071 ]
1072 if "cidr" in ip_profile_source_data:
1073 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1074 "cidr"
1075 ]
1076 if "gateway-ip" in ip_profile_source_data:
1077 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1078 "gateway-ip"
1079 ]
1080 if "dhcp-enabled" in ip_profile_source_data:
1081 ip_profile_dest_data["dhcp-params"] = {
1082 "enabled": ip_profile_source_data["dhcp-enabled"]
1083 }
1084 vld_params["ip-profile"] = ip_profile_dest_data
1085
1086 # update vld_params with instantiation params
1087 vld_instantiation_params = find_in_list(
1088 get_iterable(ns_params, "vld"),
1089 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1090 )
1091 if vld_instantiation_params:
1092 vld_params.update(vld_instantiation_params)
1093 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1094 target["ns"]["vld"].append(target_vld)
1095 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1096 update_ns_vld_target(target, ns_params)
1097
1098 for vnfr in db_vnfrs.values():
1099 vnfd = find_in_list(
1100 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1101 )
1102 vnf_params = find_in_list(
1103 get_iterable(ns_params, "vnf"),
1104 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1105 )
1106 target_vnf = deepcopy(vnfr)
1107 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1108 for vld in target_vnf.get("vld", ()):
1109 # check if connected to a ns.vld, to fill target'
1110 vnf_cp = find_in_list(
1111 vnfd.get("int-virtual-link-desc", ()),
1112 lambda cpd: cpd.get("id") == vld["id"],
1113 )
1114 if vnf_cp:
1115 ns_cp = "member_vnf:{}.{}".format(
1116 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1117 )
1118 if cp2target.get(ns_cp):
1119 vld["target"] = cp2target[ns_cp]
1120
1121 vld["vim_info"] = {
1122 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1123 }
1124 # check if this network needs SDN assist
1125 target_sdn = None
1126 if vld.get("pci-interfaces"):
1127 db_vim = get_vim_account(vnfr["vim-account-id"])
1128 sdnc_id = db_vim["config"].get("sdn-controller")
1129 if sdnc_id:
1130 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1131 target_sdn = "sdn:{}".format(sdnc_id)
1132 vld["vim_info"][target_sdn] = {
1133 "sdn": True,
1134 "target_vim": target_vim,
1135 "vlds": [sdn_vld],
1136 "type": vld.get("type"),
1137 }
1138
1139 # check at vnfd descriptor, if there is an ip-profile
1140 vld_params = {}
1141 vnfd_vlp = find_in_list(
1142 get_virtual_link_profiles(vnfd),
1143 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1144 )
1145 if (
1146 vnfd_vlp
1147 and vnfd_vlp.get("virtual-link-protocol-data")
1148 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1149 ):
1150 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1151 "l3-protocol-data"
1152 ]
1153 ip_profile_dest_data = {}
1154 if "ip-version" in ip_profile_source_data:
1155 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1156 "ip-version"
1157 ]
1158 if "cidr" in ip_profile_source_data:
1159 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1160 "cidr"
1161 ]
1162 if "gateway-ip" in ip_profile_source_data:
1163 ip_profile_dest_data[
1164 "gateway-address"
1165 ] = ip_profile_source_data["gateway-ip"]
1166 if "dhcp-enabled" in ip_profile_source_data:
1167 ip_profile_dest_data["dhcp-params"] = {
1168 "enabled": ip_profile_source_data["dhcp-enabled"]
1169 }
1170
1171 vld_params["ip-profile"] = ip_profile_dest_data
1172 # update vld_params with instantiation params
1173 if vnf_params:
1174 vld_instantiation_params = find_in_list(
1175 get_iterable(vnf_params, "internal-vld"),
1176 lambda i_vld: i_vld["name"] == vld["id"],
1177 )
1178 if vld_instantiation_params:
1179 vld_params.update(vld_instantiation_params)
1180 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1181
1182 vdur_list = []
1183 for vdur in target_vnf.get("vdur", ()):
1184 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1185 continue # This vdu must not be created
1186 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1187
1188 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1189
1190 if ssh_keys_all:
1191 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1192 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1193 if (
1194 vdu_configuration
1195 and vdu_configuration.get("config-access")
1196 and vdu_configuration.get("config-access").get("ssh-access")
1197 ):
1198 vdur["ssh-keys"] = ssh_keys_all
1199 vdur["ssh-access-required"] = vdu_configuration[
1200 "config-access"
1201 ]["ssh-access"]["required"]
1202 elif (
1203 vnf_configuration
1204 and vnf_configuration.get("config-access")
1205 and vnf_configuration.get("config-access").get("ssh-access")
1206 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1207 ):
1208 vdur["ssh-keys"] = ssh_keys_all
1209 vdur["ssh-access-required"] = vnf_configuration[
1210 "config-access"
1211 ]["ssh-access"]["required"]
1212 elif ssh_keys_instantiation and find_in_list(
1213 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1214 ):
1215 vdur["ssh-keys"] = ssh_keys_instantiation
1216
1217 self.logger.debug("NS > vdur > {}".format(vdur))
1218
1219 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1220 # cloud-init
1221 if vdud.get("cloud-init-file"):
1222 vdur["cloud-init"] = "{}:file:{}".format(
1223 vnfd["_id"], vdud.get("cloud-init-file")
1224 )
1225 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1226 if vdur["cloud-init"] not in target["cloud_init_content"]:
1227 base_folder = vnfd["_admin"]["storage"]
1228 if base_folder["pkg-dir"]:
1229 cloud_init_file = "{}/{}/cloud_init/{}".format(
1230 base_folder["folder"],
1231 base_folder["pkg-dir"],
1232 vdud.get("cloud-init-file"),
1233 )
1234 else:
1235 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1236 base_folder["folder"],
1237 vdud.get("cloud-init-file"),
1238 )
1239 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1240 target["cloud_init_content"][
1241 vdur["cloud-init"]
1242 ] = ci_file.read()
1243 elif vdud.get("cloud-init"):
1244 vdur["cloud-init"] = "{}:vdu:{}".format(
1245 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1246 )
1247 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1248 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1249 "cloud-init"
1250 ]
1251 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1252 deploy_params_vdu = self._format_additional_params(
1253 vdur.get("additionalParams") or {}
1254 )
1255 deploy_params_vdu["OSM"] = get_osm_params(
1256 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1257 )
1258 vdur["additionalParams"] = deploy_params_vdu
1259
1260 # flavor
1261 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1262 if target_vim not in ns_flavor["vim_info"]:
1263 ns_flavor["vim_info"][target_vim] = {}
1264
1265 # deal with images
1266 # in case alternative images are provided we must check if they should be applied
1267 # for the vim_type, modify the vim_type taking into account
1268 ns_image_id = int(vdur["ns-image-id"])
1269 if vdur.get("alt-image-ids"):
1270 db_vim = get_vim_account(vnfr["vim-account-id"])
1271 vim_type = db_vim["vim_type"]
1272 for alt_image_id in vdur.get("alt-image-ids"):
1273 ns_alt_image = target["image"][int(alt_image_id)]
1274 if vim_type == ns_alt_image.get("vim-type"):
1275 # must use alternative image
1276 self.logger.debug(
1277 "use alternative image id: {}".format(alt_image_id)
1278 )
1279 ns_image_id = alt_image_id
1280 vdur["ns-image-id"] = ns_image_id
1281 break
1282 ns_image = target["image"][int(ns_image_id)]
1283 if target_vim not in ns_image["vim_info"]:
1284 ns_image["vim_info"][target_vim] = {}
1285
1286 # Affinity groups
1287 if vdur.get("affinity-or-anti-affinity-group-id"):
1288 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1289 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1290 if target_vim not in ns_ags["vim_info"]:
1291 ns_ags["vim_info"][target_vim] = {}
1292
1293 vdur["vim_info"] = {target_vim: {}}
1294 # instantiation parameters
1295 if vnf_params:
1296 vdu_instantiation_params = find_in_list(
1297 get_iterable(vnf_params, "vdu"),
1298 lambda i_vdu: i_vdu["id"] == vdud["id"],
1299 )
1300 if vdu_instantiation_params:
1301 # Parse the vdu_volumes from the instantiation params
1302 vdu_volumes = get_volumes_from_instantiation_params(
1303 vdu_instantiation_params, vdud
1304 )
1305 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1306 vdur_list.append(vdur)
1307 target_vnf["vdur"] = vdur_list
1308 target["vnf"].append(target_vnf)
1309
1310 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1311 desc = await self.RO.deploy(nsr_id, target)
1312 self.logger.debug("RO return > {}".format(desc))
1313 action_id = desc["action_id"]
1314 await self._wait_ng_ro(
1315 nsr_id,
1316 action_id,
1317 nslcmop_id,
1318 start_deploy,
1319 timeout_ns_deploy,
1320 stage,
1321 operation="instantiation",
1322 )
1323
1324 # Updating NSR
1325 db_nsr_update = {
1326 "_admin.deployed.RO.operational-status": "running",
1327 "detailed-status": " ".join(stage),
1328 }
1329 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1330 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1331 self._write_op_status(nslcmop_id, stage)
1332 self.logger.debug(
1333 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1334 )
1335 return
1336
1337 async def _wait_ng_ro(
1338 self,
1339 nsr_id,
1340 action_id,
1341 nslcmop_id=None,
1342 start_time=None,
1343 timeout=600,
1344 stage=None,
1345 operation=None,
1346 ):
1347 detailed_status_old = None
1348 db_nsr_update = {}
1349 start_time = start_time or time()
1350 while time() <= start_time + timeout:
1351 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1352 self.logger.debug("Wait NG RO > {}".format(desc_status))
1353 if desc_status["status"] == "FAILED":
1354 raise NgRoException(desc_status["details"])
1355 elif desc_status["status"] == "BUILD":
1356 if stage:
1357 stage[2] = "VIM: ({})".format(desc_status["details"])
1358 elif desc_status["status"] == "DONE":
1359 if stage:
1360 stage[2] = "Deployed at VIM"
1361 break
1362 else:
1363 assert False, "ROclient.check_ns_status returns unknown {}".format(
1364 desc_status["status"]
1365 )
1366 if stage and nslcmop_id and stage[2] != detailed_status_old:
1367 detailed_status_old = stage[2]
1368 db_nsr_update["detailed-status"] = " ".join(stage)
1369 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1370 self._write_op_status(nslcmop_id, stage)
1371 await asyncio.sleep(15, loop=self.loop)
1372 else: # timeout_ns_deploy
1373 raise NgRoException("Timeout waiting ns to deploy")
1374
1375 async def _terminate_ng_ro(
1376 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1377 ):
1378 db_nsr_update = {}
1379 failed_detail = []
1380 action_id = None
1381 start_deploy = time()
1382 try:
1383 target = {
1384 "ns": {"vld": []},
1385 "vnf": [],
1386 "image": [],
1387 "flavor": [],
1388 "action_id": nslcmop_id,
1389 }
1390 desc = await self.RO.deploy(nsr_id, target)
1391 action_id = desc["action_id"]
1392 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1393 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1394 self.logger.debug(
1395 logging_text
1396 + "ns terminate action at RO. action_id={}".format(action_id)
1397 )
1398
1399 # wait until done
1400 delete_timeout = 20 * 60 # 20 minutes
1401 await self._wait_ng_ro(
1402 nsr_id,
1403 action_id,
1404 nslcmop_id,
1405 start_deploy,
1406 delete_timeout,
1407 stage,
1408 operation="termination",
1409 )
1410
1411 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1412 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1413 # delete all nsr
1414 await self.RO.delete(nsr_id)
1415 except Exception as e:
1416 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1417 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1418 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1419 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1420 self.logger.debug(
1421 logging_text + "RO_action_id={} already deleted".format(action_id)
1422 )
1423 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1424 failed_detail.append("delete conflict: {}".format(e))
1425 self.logger.debug(
1426 logging_text
1427 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1428 )
1429 else:
1430 failed_detail.append("delete error: {}".format(e))
1431 self.logger.error(
1432 logging_text
1433 + "RO_action_id={} delete error: {}".format(action_id, e)
1434 )
1435
1436 if failed_detail:
1437 stage[2] = "Error deleting from VIM"
1438 else:
1439 stage[2] = "Deleted from VIM"
1440 db_nsr_update["detailed-status"] = " ".join(stage)
1441 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1442 self._write_op_status(nslcmop_id, stage)
1443
1444 if failed_detail:
1445 raise LcmException("; ".join(failed_detail))
1446 return
1447
1448 async def instantiate_RO(
1449 self,
1450 logging_text,
1451 nsr_id,
1452 nsd,
1453 db_nsr,
1454 db_nslcmop,
1455 db_vnfrs,
1456 db_vnfds,
1457 n2vc_key_list,
1458 stage,
1459 ):
1460 """
1461 Instantiate at RO
1462 :param logging_text: preffix text to use at logging
1463 :param nsr_id: nsr identity
1464 :param nsd: database content of ns descriptor
1465 :param db_nsr: database content of ns record
1466 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1467 :param db_vnfrs:
1468 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1469 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1470 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1471 :return: None or exception
1472 """
1473 try:
1474 start_deploy = time()
1475 ns_params = db_nslcmop.get("operationParams")
1476 if ns_params and ns_params.get("timeout_ns_deploy"):
1477 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1478 else:
1479 timeout_ns_deploy = self.timeout.get(
1480 "ns_deploy", self.timeout_ns_deploy
1481 )
1482
1483 # Check for and optionally request placement optimization. Database will be updated if placement activated
1484 stage[2] = "Waiting for Placement."
1485 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1486 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1487 for vnfr in db_vnfrs.values():
1488 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1489 break
1490 else:
1491 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1492
1493 return await self._instantiate_ng_ro(
1494 logging_text,
1495 nsr_id,
1496 nsd,
1497 db_nsr,
1498 db_nslcmop,
1499 db_vnfrs,
1500 db_vnfds,
1501 n2vc_key_list,
1502 stage,
1503 start_deploy,
1504 timeout_ns_deploy,
1505 )
1506 except Exception as e:
1507 stage[2] = "ERROR deploying at VIM"
1508 self.set_vnfr_at_error(db_vnfrs, str(e))
1509 self.logger.error(
1510 "Error deploying at VIM {}".format(e),
1511 exc_info=not isinstance(
1512 e,
1513 (
1514 ROclient.ROClientException,
1515 LcmException,
1516 DbException,
1517 NgRoException,
1518 ),
1519 ),
1520 )
1521 raise
1522
1523 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1524 """
1525 Wait for kdu to be up, get ip address
1526 :param logging_text: prefix use for logging
1527 :param nsr_id:
1528 :param vnfr_id:
1529 :param kdu_name:
1530 :return: IP address, K8s services
1531 """
1532
1533 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1534 nb_tries = 0
1535
1536 while nb_tries < 360:
1537 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1538 kdur = next(
1539 (
1540 x
1541 for x in get_iterable(db_vnfr, "kdur")
1542 if x.get("kdu-name") == kdu_name
1543 ),
1544 None,
1545 )
1546 if not kdur:
1547 raise LcmException(
1548 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1549 )
1550 if kdur.get("status"):
1551 if kdur["status"] in ("READY", "ENABLED"):
1552 return kdur.get("ip-address"), kdur.get("services")
1553 else:
1554 raise LcmException(
1555 "target KDU={} is in error state".format(kdu_name)
1556 )
1557
1558 await asyncio.sleep(10, loop=self.loop)
1559 nb_tries += 1
1560 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1561
1562 async def wait_vm_up_insert_key_ro(
1563 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1564 ):
1565 """
1566 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1567 :param logging_text: prefix use for logging
1568 :param nsr_id:
1569 :param vnfr_id:
1570 :param vdu_id:
1571 :param vdu_index:
1572 :param pub_key: public ssh key to inject, None to skip
1573 :param user: user to apply the public ssh key
1574 :return: IP address
1575 """
1576
1577 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1578 ro_nsr_id = None
1579 ip_address = None
1580 nb_tries = 0
1581 target_vdu_id = None
1582 ro_retries = 0
1583
1584 while True:
1585 ro_retries += 1
1586 if ro_retries >= 360: # 1 hour
1587 raise LcmException(
1588 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1589 )
1590
1591 await asyncio.sleep(10, loop=self.loop)
1592
1593 # get ip address
1594 if not target_vdu_id:
1595 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1596
1597 if not vdu_id: # for the VNF case
1598 if db_vnfr.get("status") == "ERROR":
1599 raise LcmException(
1600 "Cannot inject ssh-key because target VNF is in error state"
1601 )
1602 ip_address = db_vnfr.get("ip-address")
1603 if not ip_address:
1604 continue
1605 vdur = next(
1606 (
1607 x
1608 for x in get_iterable(db_vnfr, "vdur")
1609 if x.get("ip-address") == ip_address
1610 ),
1611 None,
1612 )
1613 else: # VDU case
1614 vdur = next(
1615 (
1616 x
1617 for x in get_iterable(db_vnfr, "vdur")
1618 if x.get("vdu-id-ref") == vdu_id
1619 and x.get("count-index") == vdu_index
1620 ),
1621 None,
1622 )
1623
1624 if (
1625 not vdur and len(db_vnfr.get("vdur", ())) == 1
1626 ): # If only one, this should be the target vdu
1627 vdur = db_vnfr["vdur"][0]
1628 if not vdur:
1629 raise LcmException(
1630 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1631 vnfr_id, vdu_id, vdu_index
1632 )
1633 )
1634 # New generation RO stores information at "vim_info"
1635 ng_ro_status = None
1636 target_vim = None
1637 if vdur.get("vim_info"):
1638 target_vim = next(
1639 t for t in vdur["vim_info"]
1640 ) # there should be only one key
1641 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1642 if (
1643 vdur.get("pdu-type")
1644 or vdur.get("status") == "ACTIVE"
1645 or ng_ro_status == "ACTIVE"
1646 ):
1647 ip_address = vdur.get("ip-address")
1648 if not ip_address:
1649 continue
1650 target_vdu_id = vdur["vdu-id-ref"]
1651 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1652 raise LcmException(
1653 "Cannot inject ssh-key because target VM is in error state"
1654 )
1655
1656 if not target_vdu_id:
1657 continue
1658
1659 # inject public key into machine
1660 if pub_key and user:
1661 self.logger.debug(logging_text + "Inserting RO key")
1662 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1663 if vdur.get("pdu-type"):
1664 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1665 return ip_address
1666 try:
1667 ro_vm_id = "{}-{}".format(
1668 db_vnfr["member-vnf-index-ref"], target_vdu_id
1669 ) # TODO add vdu_index
1670 if self.ng_ro:
1671 target = {
1672 "action": {
1673 "action": "inject_ssh_key",
1674 "key": pub_key,
1675 "user": user,
1676 },
1677 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1678 }
1679 desc = await self.RO.deploy(nsr_id, target)
1680 action_id = desc["action_id"]
1681 await self._wait_ng_ro(
1682 nsr_id, action_id, timeout=600, operation="instantiation"
1683 )
1684 break
1685 else:
1686 # wait until NS is deployed at RO
1687 if not ro_nsr_id:
1688 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1689 ro_nsr_id = deep_get(
1690 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1691 )
1692 if not ro_nsr_id:
1693 continue
1694 result_dict = await self.RO.create_action(
1695 item="ns",
1696 item_id_name=ro_nsr_id,
1697 descriptor={
1698 "add_public_key": pub_key,
1699 "vms": [ro_vm_id],
1700 "user": user,
1701 },
1702 )
1703 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1704 if not result_dict or not isinstance(result_dict, dict):
1705 raise LcmException(
1706 "Unknown response from RO when injecting key"
1707 )
1708 for result in result_dict.values():
1709 if result.get("vim_result") == 200:
1710 break
1711 else:
1712 raise ROclient.ROClientException(
1713 "error injecting key: {}".format(
1714 result.get("description")
1715 )
1716 )
1717 break
1718 except NgRoException as e:
1719 raise LcmException(
1720 "Reaching max tries injecting key. Error: {}".format(e)
1721 )
1722 except ROclient.ROClientException as e:
1723 if not nb_tries:
1724 self.logger.debug(
1725 logging_text
1726 + "error injecting key: {}. Retrying until {} seconds".format(
1727 e, 20 * 10
1728 )
1729 )
1730 nb_tries += 1
1731 if nb_tries >= 20:
1732 raise LcmException(
1733 "Reaching max tries injecting key. Error: {}".format(e)
1734 )
1735 else:
1736 break
1737
1738 return ip_address
1739
1740 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1741 """
1742 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1743 """
1744 my_vca = vca_deployed_list[vca_index]
1745 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1746 # vdu or kdu: no dependencies
1747 return
1748 timeout = 300
1749 while timeout >= 0:
1750 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1751 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1752 configuration_status_list = db_nsr["configurationStatus"]
1753 for index, vca_deployed in enumerate(configuration_status_list):
1754 if index == vca_index:
1755 # myself
1756 continue
1757 if not my_vca.get("member-vnf-index") or (
1758 vca_deployed.get("member-vnf-index")
1759 == my_vca.get("member-vnf-index")
1760 ):
1761 internal_status = configuration_status_list[index].get("status")
1762 if internal_status == "READY":
1763 continue
1764 elif internal_status == "BROKEN":
1765 raise LcmException(
1766 "Configuration aborted because dependent charm/s has failed"
1767 )
1768 else:
1769 break
1770 else:
1771 # no dependencies, return
1772 return
1773 await asyncio.sleep(10)
1774 timeout -= 1
1775
1776 raise LcmException("Configuration aborted because dependent charm/s timeout")
1777
1778 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1779 vca_id = None
1780 if db_vnfr:
1781 vca_id = deep_get(db_vnfr, ("vca-id",))
1782 elif db_nsr:
1783 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1784 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1785 return vca_id
1786
1787 async def instantiate_N2VC(
1788 self,
1789 logging_text,
1790 vca_index,
1791 nsi_id,
1792 db_nsr,
1793 db_vnfr,
1794 vdu_id,
1795 kdu_name,
1796 vdu_index,
1797 config_descriptor,
1798 deploy_params,
1799 base_folder,
1800 nslcmop_id,
1801 stage,
1802 vca_type,
1803 vca_name,
1804 ee_config_descriptor,
1805 ):
1806 nsr_id = db_nsr["_id"]
1807 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1808 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1809 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1810 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1811 db_dict = {
1812 "collection": "nsrs",
1813 "filter": {"_id": nsr_id},
1814 "path": db_update_entry,
1815 }
1816 step = ""
1817 try:
1818 element_type = "NS"
1819 element_under_configuration = nsr_id
1820
1821 vnfr_id = None
1822 if db_vnfr:
1823 vnfr_id = db_vnfr["_id"]
1824 osm_config["osm"]["vnf_id"] = vnfr_id
1825
1826 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1827
1828 if vca_type == "native_charm":
1829 index_number = 0
1830 else:
1831 index_number = vdu_index or 0
1832
1833 if vnfr_id:
1834 element_type = "VNF"
1835 element_under_configuration = vnfr_id
1836 namespace += ".{}-{}".format(vnfr_id, index_number)
1837 if vdu_id:
1838 namespace += ".{}-{}".format(vdu_id, index_number)
1839 element_type = "VDU"
1840 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1841 osm_config["osm"]["vdu_id"] = vdu_id
1842 elif kdu_name:
1843 namespace += ".{}".format(kdu_name)
1844 element_type = "KDU"
1845 element_under_configuration = kdu_name
1846 osm_config["osm"]["kdu_name"] = kdu_name
1847
1848 # Get artifact path
1849 if base_folder["pkg-dir"]:
1850 artifact_path = "{}/{}/{}/{}".format(
1851 base_folder["folder"],
1852 base_folder["pkg-dir"],
1853 "charms"
1854 if vca_type
1855 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1856 else "helm-charts",
1857 vca_name,
1858 )
1859 else:
1860 artifact_path = "{}/Scripts/{}/{}/".format(
1861 base_folder["folder"],
1862 "charms"
1863 if vca_type
1864 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1865 else "helm-charts",
1866 vca_name,
1867 )
1868
1869 self.logger.debug("Artifact path > {}".format(artifact_path))
1870
1871 # get initial_config_primitive_list that applies to this element
1872 initial_config_primitive_list = config_descriptor.get(
1873 "initial-config-primitive"
1874 )
1875
1876 self.logger.debug(
1877 "Initial config primitive list > {}".format(
1878 initial_config_primitive_list
1879 )
1880 )
1881
1882 # add config if not present for NS charm
1883 ee_descriptor_id = ee_config_descriptor.get("id")
1884 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1885 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1886 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1887 )
1888
1889 self.logger.debug(
1890 "Initial config primitive list #2 > {}".format(
1891 initial_config_primitive_list
1892 )
1893 )
1894 # n2vc_redesign STEP 3.1
1895 # find old ee_id if exists
1896 ee_id = vca_deployed.get("ee_id")
1897
1898 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1899 # create or register execution environment in VCA
1900 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1901 self._write_configuration_status(
1902 nsr_id=nsr_id,
1903 vca_index=vca_index,
1904 status="CREATING",
1905 element_under_configuration=element_under_configuration,
1906 element_type=element_type,
1907 )
1908
1909 step = "create execution environment"
1910 self.logger.debug(logging_text + step)
1911
1912 ee_id = None
1913 credentials = None
1914 if vca_type == "k8s_proxy_charm":
1915 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1916 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1917 namespace=namespace,
1918 artifact_path=artifact_path,
1919 db_dict=db_dict,
1920 vca_id=vca_id,
1921 )
1922 elif vca_type == "helm" or vca_type == "helm-v3":
1923 ee_id, credentials = await self.vca_map[
1924 vca_type
1925 ].create_execution_environment(
1926 namespace=namespace,
1927 reuse_ee_id=ee_id,
1928 db_dict=db_dict,
1929 config=osm_config,
1930 artifact_path=artifact_path,
1931 vca_type=vca_type,
1932 )
1933 else:
1934 ee_id, credentials = await self.vca_map[
1935 vca_type
1936 ].create_execution_environment(
1937 namespace=namespace,
1938 reuse_ee_id=ee_id,
1939 db_dict=db_dict,
1940 vca_id=vca_id,
1941 )
1942
1943 elif vca_type == "native_charm":
1944 step = "Waiting to VM being up and getting IP address"
1945 self.logger.debug(logging_text + step)
1946 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1947 logging_text,
1948 nsr_id,
1949 vnfr_id,
1950 vdu_id,
1951 vdu_index,
1952 user=None,
1953 pub_key=None,
1954 )
1955 credentials = {"hostname": rw_mgmt_ip}
1956 # get username
1957 username = deep_get(
1958 config_descriptor, ("config-access", "ssh-access", "default-user")
1959 )
1960 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1961 # merged. Meanwhile let's get username from initial-config-primitive
1962 if not username and initial_config_primitive_list:
1963 for config_primitive in initial_config_primitive_list:
1964 for param in config_primitive.get("parameter", ()):
1965 if param["name"] == "ssh-username":
1966 username = param["value"]
1967 break
1968 if not username:
1969 raise LcmException(
1970 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1971 "'config-access.ssh-access.default-user'"
1972 )
1973 credentials["username"] = username
1974 # n2vc_redesign STEP 3.2
1975
1976 self._write_configuration_status(
1977 nsr_id=nsr_id,
1978 vca_index=vca_index,
1979 status="REGISTERING",
1980 element_under_configuration=element_under_configuration,
1981 element_type=element_type,
1982 )
1983
1984 step = "register execution environment {}".format(credentials)
1985 self.logger.debug(logging_text + step)
1986 ee_id = await self.vca_map[vca_type].register_execution_environment(
1987 credentials=credentials,
1988 namespace=namespace,
1989 db_dict=db_dict,
1990 vca_id=vca_id,
1991 )
1992
1993 # for compatibility with MON/POL modules, the need model and application name at database
1994 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1995 ee_id_parts = ee_id.split(".")
1996 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1997 if len(ee_id_parts) >= 2:
1998 model_name = ee_id_parts[0]
1999 application_name = ee_id_parts[1]
2000 db_nsr_update[db_update_entry + "model"] = model_name
2001 db_nsr_update[db_update_entry + "application"] = application_name
2002
2003 # n2vc_redesign STEP 3.3
2004 step = "Install configuration Software"
2005
2006 self._write_configuration_status(
2007 nsr_id=nsr_id,
2008 vca_index=vca_index,
2009 status="INSTALLING SW",
2010 element_under_configuration=element_under_configuration,
2011 element_type=element_type,
2012 other_update=db_nsr_update,
2013 )
2014
2015 # TODO check if already done
2016 self.logger.debug(logging_text + step)
2017 config = None
2018 if vca_type == "native_charm":
2019 config_primitive = next(
2020 (p for p in initial_config_primitive_list if p["name"] == "config"),
2021 None,
2022 )
2023 if config_primitive:
2024 config = self._map_primitive_params(
2025 config_primitive, {}, deploy_params
2026 )
2027 num_units = 1
2028 if vca_type == "lxc_proxy_charm":
2029 if element_type == "NS":
2030 num_units = db_nsr.get("config-units") or 1
2031 elif element_type == "VNF":
2032 num_units = db_vnfr.get("config-units") or 1
2033 elif element_type == "VDU":
2034 for v in db_vnfr["vdur"]:
2035 if vdu_id == v["vdu-id-ref"]:
2036 num_units = v.get("config-units") or 1
2037 break
2038 if vca_type != "k8s_proxy_charm":
2039 await self.vca_map[vca_type].install_configuration_sw(
2040 ee_id=ee_id,
2041 artifact_path=artifact_path,
2042 db_dict=db_dict,
2043 config=config,
2044 num_units=num_units,
2045 vca_id=vca_id,
2046 vca_type=vca_type,
2047 )
2048
2049 # write in db flag of configuration_sw already installed
2050 self.update_db_2(
2051 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2052 )
2053
2054 # add relations for this VCA (wait for other peers related with this VCA)
2055 await self._add_vca_relations(
2056 logging_text=logging_text,
2057 nsr_id=nsr_id,
2058 vca_type=vca_type,
2059 vca_index=vca_index,
2060 )
2061
2062 # if SSH access is required, then get execution environment SSH public
2063 # if native charm we have waited already to VM be UP
2064 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2065 pub_key = None
2066 user = None
2067 # self.logger.debug("get ssh key block")
2068 if deep_get(
2069 config_descriptor, ("config-access", "ssh-access", "required")
2070 ):
2071 # self.logger.debug("ssh key needed")
2072 # Needed to inject a ssh key
2073 user = deep_get(
2074 config_descriptor,
2075 ("config-access", "ssh-access", "default-user"),
2076 )
2077 step = "Install configuration Software, getting public ssh key"
2078 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2079 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2080 )
2081
2082 step = "Insert public key into VM user={} ssh_key={}".format(
2083 user, pub_key
2084 )
2085 else:
2086 # self.logger.debug("no need to get ssh key")
2087 step = "Waiting to VM being up and getting IP address"
2088 self.logger.debug(logging_text + step)
2089
2090 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2091 rw_mgmt_ip = None
2092
2093 # n2vc_redesign STEP 5.1
2094 # wait for RO (ip-address) Insert pub_key into VM
2095 if vnfr_id:
2096 if kdu_name:
2097 rw_mgmt_ip, services = await self.wait_kdu_up(
2098 logging_text, nsr_id, vnfr_id, kdu_name
2099 )
2100 vnfd = self.db.get_one(
2101 "vnfds_revisions",
2102 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2103 )
2104 kdu = get_kdu(vnfd, kdu_name)
2105 kdu_services = [
2106 service["name"] for service in get_kdu_services(kdu)
2107 ]
2108 exposed_services = []
2109 for service in services:
2110 if any(s in service["name"] for s in kdu_services):
2111 exposed_services.append(service)
2112 await self.vca_map[vca_type].exec_primitive(
2113 ee_id=ee_id,
2114 primitive_name="config",
2115 params_dict={
2116 "osm-config": json.dumps(
2117 OsmConfigBuilder(
2118 k8s={"services": exposed_services}
2119 ).build()
2120 )
2121 },
2122 vca_id=vca_id,
2123 )
2124
2125 # This verification is needed in order to avoid trying to add a public key
2126 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2127 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2128 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2129 # or it is a KNF)
2130 elif db_vnfr.get("vdur"):
2131 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2132 logging_text,
2133 nsr_id,
2134 vnfr_id,
2135 vdu_id,
2136 vdu_index,
2137 user=user,
2138 pub_key=pub_key,
2139 )
2140
2141 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2142
2143 # store rw_mgmt_ip in deploy params for later replacement
2144 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2145
2146 # n2vc_redesign STEP 6 Execute initial config primitive
2147 step = "execute initial config primitive"
2148
2149 # wait for dependent primitives execution (NS -> VNF -> VDU)
2150 if initial_config_primitive_list:
2151 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2152
2153 # stage, in function of element type: vdu, kdu, vnf or ns
2154 my_vca = vca_deployed_list[vca_index]
2155 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2156 # VDU or KDU
2157 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2158 elif my_vca.get("member-vnf-index"):
2159 # VNF
2160 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2161 else:
2162 # NS
2163 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2164
2165 self._write_configuration_status(
2166 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2167 )
2168
2169 self._write_op_status(op_id=nslcmop_id, stage=stage)
2170
2171 check_if_terminated_needed = True
2172 for initial_config_primitive in initial_config_primitive_list:
2173 # adding information on the vca_deployed if it is a NS execution environment
2174 if not vca_deployed["member-vnf-index"]:
2175 deploy_params["ns_config_info"] = json.dumps(
2176 self._get_ns_config_info(nsr_id)
2177 )
2178 # TODO check if already done
2179 primitive_params_ = self._map_primitive_params(
2180 initial_config_primitive, {}, deploy_params
2181 )
2182
2183 step = "execute primitive '{}' params '{}'".format(
2184 initial_config_primitive["name"], primitive_params_
2185 )
2186 self.logger.debug(logging_text + step)
2187 await self.vca_map[vca_type].exec_primitive(
2188 ee_id=ee_id,
2189 primitive_name=initial_config_primitive["name"],
2190 params_dict=primitive_params_,
2191 db_dict=db_dict,
2192 vca_id=vca_id,
2193 vca_type=vca_type,
2194 )
2195 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2196 if check_if_terminated_needed:
2197 if config_descriptor.get("terminate-config-primitive"):
2198 self.update_db_2(
2199 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2200 )
2201 check_if_terminated_needed = False
2202
2203 # TODO register in database that primitive is done
2204
2205 # STEP 7 Configure metrics
2206 if vca_type == "helm" or vca_type == "helm-v3":
2207 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2208 ee_id=ee_id,
2209 artifact_path=artifact_path,
2210 ee_config_descriptor=ee_config_descriptor,
2211 vnfr_id=vnfr_id,
2212 nsr_id=nsr_id,
2213 target_ip=rw_mgmt_ip,
2214 )
2215 if prometheus_jobs:
2216 self.update_db_2(
2217 "nsrs",
2218 nsr_id,
2219 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2220 )
2221
2222 for job in prometheus_jobs:
2223 self.db.set_one(
2224 "prometheus_jobs",
2225 {"job_name": job["job_name"]},
2226 job,
2227 upsert=True,
2228 fail_on_empty=False,
2229 )
2230
2231 step = "instantiated at VCA"
2232 self.logger.debug(logging_text + step)
2233
2234 self._write_configuration_status(
2235 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2236 )
2237
2238 except Exception as e: # TODO not use Exception but N2VC exception
2239 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2240 if not isinstance(
2241 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2242 ):
2243 self.logger.error(
2244 "Exception while {} : {}".format(step, e), exc_info=True
2245 )
2246 self._write_configuration_status(
2247 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2248 )
2249 raise LcmException("{} {}".format(step, e)) from e
2250
2251 def _write_ns_status(
2252 self,
2253 nsr_id: str,
2254 ns_state: str,
2255 current_operation: str,
2256 current_operation_id: str,
2257 error_description: str = None,
2258 error_detail: str = None,
2259 other_update: dict = None,
2260 ):
2261 """
2262 Update db_nsr fields.
2263 :param nsr_id:
2264 :param ns_state:
2265 :param current_operation:
2266 :param current_operation_id:
2267 :param error_description:
2268 :param error_detail:
2269 :param other_update: Other required changes at database if provided, will be cleared
2270 :return:
2271 """
2272 try:
2273 db_dict = other_update or {}
2274 db_dict[
2275 "_admin.nslcmop"
2276 ] = current_operation_id # for backward compatibility
2277 db_dict["_admin.current-operation"] = current_operation_id
2278 db_dict["_admin.operation-type"] = (
2279 current_operation if current_operation != "IDLE" else None
2280 )
2281 db_dict["currentOperation"] = current_operation
2282 db_dict["currentOperationID"] = current_operation_id
2283 db_dict["errorDescription"] = error_description
2284 db_dict["errorDetail"] = error_detail
2285
2286 if ns_state:
2287 db_dict["nsState"] = ns_state
2288 self.update_db_2("nsrs", nsr_id, db_dict)
2289 except DbException as e:
2290 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2291
2292 def _write_op_status(
2293 self,
2294 op_id: str,
2295 stage: list = None,
2296 error_message: str = None,
2297 queuePosition: int = 0,
2298 operation_state: str = None,
2299 other_update: dict = None,
2300 ):
2301 try:
2302 db_dict = other_update or {}
2303 db_dict["queuePosition"] = queuePosition
2304 if isinstance(stage, list):
2305 db_dict["stage"] = stage[0]
2306 db_dict["detailed-status"] = " ".join(stage)
2307 elif stage is not None:
2308 db_dict["stage"] = str(stage)
2309
2310 if error_message is not None:
2311 db_dict["errorMessage"] = error_message
2312 if operation_state is not None:
2313 db_dict["operationState"] = operation_state
2314 db_dict["statusEnteredTime"] = time()
2315 self.update_db_2("nslcmops", op_id, db_dict)
2316 except DbException as e:
2317 self.logger.warn(
2318 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2319 )
2320
2321 def _write_all_config_status(self, db_nsr: dict, status: str):
2322 try:
2323 nsr_id = db_nsr["_id"]
2324 # configurationStatus
2325 config_status = db_nsr.get("configurationStatus")
2326 if config_status:
2327 db_nsr_update = {
2328 "configurationStatus.{}.status".format(index): status
2329 for index, v in enumerate(config_status)
2330 if v
2331 }
2332 # update status
2333 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2334
2335 except DbException as e:
2336 self.logger.warn(
2337 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2338 )
2339
2340 def _write_configuration_status(
2341 self,
2342 nsr_id: str,
2343 vca_index: int,
2344 status: str = None,
2345 element_under_configuration: str = None,
2346 element_type: str = None,
2347 other_update: dict = None,
2348 ):
2349 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2350 # .format(vca_index, status))
2351
2352 try:
2353 db_path = "configurationStatus.{}.".format(vca_index)
2354 db_dict = other_update or {}
2355 if status:
2356 db_dict[db_path + "status"] = status
2357 if element_under_configuration:
2358 db_dict[
2359 db_path + "elementUnderConfiguration"
2360 ] = element_under_configuration
2361 if element_type:
2362 db_dict[db_path + "elementType"] = element_type
2363 self.update_db_2("nsrs", nsr_id, db_dict)
2364 except DbException as e:
2365 self.logger.warn(
2366 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2367 status, nsr_id, vca_index, e
2368 )
2369 )
2370
2371 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2372 """
2373 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2374 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2375 Database is used because the result can be obtained from a different LCM worker in case of HA.
2376 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2377 :param db_nslcmop: database content of nslcmop
2378 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2379 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2380 computed 'vim-account-id'
2381 """
2382 modified = False
2383 nslcmop_id = db_nslcmop["_id"]
2384 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2385 if placement_engine == "PLA":
2386 self.logger.debug(
2387 logging_text + "Invoke and wait for placement optimization"
2388 )
2389 await self.msg.aiowrite(
2390 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2391 )
2392 db_poll_interval = 5
2393 wait = db_poll_interval * 10
2394 pla_result = None
2395 while not pla_result and wait >= 0:
2396 await asyncio.sleep(db_poll_interval)
2397 wait -= db_poll_interval
2398 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2399 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2400
2401 if not pla_result:
2402 raise LcmException(
2403 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2404 )
2405
2406 for pla_vnf in pla_result["vnf"]:
2407 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2408 if not pla_vnf.get("vimAccountId") or not vnfr:
2409 continue
2410 modified = True
2411 self.db.set_one(
2412 "vnfrs",
2413 {"_id": vnfr["_id"]},
2414 {"vim-account-id": pla_vnf["vimAccountId"]},
2415 )
2416 # Modifies db_vnfrs
2417 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2418 return modified
2419
2420 def update_nsrs_with_pla_result(self, params):
2421 try:
2422 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2423 self.update_db_2(
2424 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2425 )
2426 except Exception as e:
2427 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2428
2429 async def instantiate(self, nsr_id, nslcmop_id):
2430 """
2431
2432 :param nsr_id: ns instance to deploy
2433 :param nslcmop_id: operation to run
2434 :return:
2435 """
2436
2437 # Try to lock HA task here
2438 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2439 if not task_is_locked_by_me:
2440 self.logger.debug(
2441 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2442 )
2443 return
2444
2445 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2446 self.logger.debug(logging_text + "Enter")
2447
2448 # get all needed from database
2449
2450 # database nsrs record
2451 db_nsr = None
2452
2453 # database nslcmops record
2454 db_nslcmop = None
2455
2456 # update operation on nsrs
2457 db_nsr_update = {}
2458 # update operation on nslcmops
2459 db_nslcmop_update = {}
2460
2461 nslcmop_operation_state = None
2462 db_vnfrs = {} # vnf's info indexed by member-index
2463 # n2vc_info = {}
2464 tasks_dict_info = {} # from task to info text
2465 exc = None
2466 error_list = []
2467 stage = [
2468 "Stage 1/5: preparation of the environment.",
2469 "Waiting for previous operations to terminate.",
2470 "",
2471 ]
2472 # ^ stage, step, VIM progress
2473 try:
2474 # wait for any previous tasks in process
2475 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2476
2477 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2478 stage[1] = "Reading from database."
2479 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2480 db_nsr_update["detailed-status"] = "creating"
2481 db_nsr_update["operational-status"] = "init"
2482 self._write_ns_status(
2483 nsr_id=nsr_id,
2484 ns_state="BUILDING",
2485 current_operation="INSTANTIATING",
2486 current_operation_id=nslcmop_id,
2487 other_update=db_nsr_update,
2488 )
2489 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2490
2491 # read from db: operation
2492 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2493 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2494 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2495 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2496 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2497 )
2498 ns_params = db_nslcmop.get("operationParams")
2499 if ns_params and ns_params.get("timeout_ns_deploy"):
2500 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2501 else:
2502 timeout_ns_deploy = self.timeout.get(
2503 "ns_deploy", self.timeout_ns_deploy
2504 )
2505
2506 # read from db: ns
2507 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2508 self.logger.debug(logging_text + stage[1])
2509 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2510 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2511 self.logger.debug(logging_text + stage[1])
2512 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2513 self.fs.sync(db_nsr["nsd-id"])
2514 db_nsr["nsd"] = nsd
2515 # nsr_name = db_nsr["name"] # TODO short-name??
2516
2517 # read from db: vnf's of this ns
2518 stage[1] = "Getting vnfrs from db."
2519 self.logger.debug(logging_text + stage[1])
2520 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2521
2522 # read from db: vnfd's for every vnf
2523 db_vnfds = [] # every vnfd data
2524
2525 # for each vnf in ns, read vnfd
2526 for vnfr in db_vnfrs_list:
2527 if vnfr.get("kdur"):
2528 kdur_list = []
2529 for kdur in vnfr["kdur"]:
2530 if kdur.get("additionalParams"):
2531 kdur["additionalParams"] = json.loads(
2532 kdur["additionalParams"]
2533 )
2534 kdur_list.append(kdur)
2535 vnfr["kdur"] = kdur_list
2536
2537 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2538 vnfd_id = vnfr["vnfd-id"]
2539 vnfd_ref = vnfr["vnfd-ref"]
2540 self.fs.sync(vnfd_id)
2541
2542 # if we haven't this vnfd, read it from db
2543 if vnfd_id not in db_vnfds:
2544 # read from db
2545 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2546 vnfd_id, vnfd_ref
2547 )
2548 self.logger.debug(logging_text + stage[1])
2549 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2550
2551 # store vnfd
2552 db_vnfds.append(vnfd)
2553
2554 # Get or generates the _admin.deployed.VCA list
2555 vca_deployed_list = None
2556 if db_nsr["_admin"].get("deployed"):
2557 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2558 if vca_deployed_list is None:
2559 vca_deployed_list = []
2560 configuration_status_list = []
2561 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2562 db_nsr_update["configurationStatus"] = configuration_status_list
2563 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2564 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2565 elif isinstance(vca_deployed_list, dict):
2566 # maintain backward compatibility. Change a dict to list at database
2567 vca_deployed_list = list(vca_deployed_list.values())
2568 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2569 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2570
2571 if not isinstance(
2572 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2573 ):
2574 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2575 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2576
2577 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2578 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2579 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2580 self.db.set_list(
2581 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2582 )
2583
2584 # n2vc_redesign STEP 2 Deploy Network Scenario
2585 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2586 self._write_op_status(op_id=nslcmop_id, stage=stage)
2587
2588 stage[1] = "Deploying KDUs."
2589 # self.logger.debug(logging_text + "Before deploy_kdus")
2590 # Call to deploy_kdus in case exists the "vdu:kdu" param
2591 await self.deploy_kdus(
2592 logging_text=logging_text,
2593 nsr_id=nsr_id,
2594 nslcmop_id=nslcmop_id,
2595 db_vnfrs=db_vnfrs,
2596 db_vnfds=db_vnfds,
2597 task_instantiation_info=tasks_dict_info,
2598 )
2599
2600 stage[1] = "Getting VCA public key."
2601 # n2vc_redesign STEP 1 Get VCA public ssh-key
2602 # feature 1429. Add n2vc public key to needed VMs
2603 n2vc_key = self.n2vc.get_public_key()
2604 n2vc_key_list = [n2vc_key]
2605 if self.vca_config.get("public_key"):
2606 n2vc_key_list.append(self.vca_config["public_key"])
2607
2608 stage[1] = "Deploying NS at VIM."
2609 task_ro = asyncio.ensure_future(
2610 self.instantiate_RO(
2611 logging_text=logging_text,
2612 nsr_id=nsr_id,
2613 nsd=nsd,
2614 db_nsr=db_nsr,
2615 db_nslcmop=db_nslcmop,
2616 db_vnfrs=db_vnfrs,
2617 db_vnfds=db_vnfds,
2618 n2vc_key_list=n2vc_key_list,
2619 stage=stage,
2620 )
2621 )
2622 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2623 tasks_dict_info[task_ro] = "Deploying at VIM"
2624
2625 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2626 stage[1] = "Deploying Execution Environments."
2627 self.logger.debug(logging_text + stage[1])
2628
2629 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2630 for vnf_profile in get_vnf_profiles(nsd):
2631 vnfd_id = vnf_profile["vnfd-id"]
2632 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2633 member_vnf_index = str(vnf_profile["id"])
2634 db_vnfr = db_vnfrs[member_vnf_index]
2635 base_folder = vnfd["_admin"]["storage"]
2636 vdu_id = None
2637 vdu_index = 0
2638 vdu_name = None
2639 kdu_name = None
2640
2641 # Get additional parameters
2642 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2643 if db_vnfr.get("additionalParamsForVnf"):
2644 deploy_params.update(
2645 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2646 )
2647
2648 descriptor_config = get_configuration(vnfd, vnfd["id"])
2649 if descriptor_config:
2650 self._deploy_n2vc(
2651 logging_text=logging_text
2652 + "member_vnf_index={} ".format(member_vnf_index),
2653 db_nsr=db_nsr,
2654 db_vnfr=db_vnfr,
2655 nslcmop_id=nslcmop_id,
2656 nsr_id=nsr_id,
2657 nsi_id=nsi_id,
2658 vnfd_id=vnfd_id,
2659 vdu_id=vdu_id,
2660 kdu_name=kdu_name,
2661 member_vnf_index=member_vnf_index,
2662 vdu_index=vdu_index,
2663 vdu_name=vdu_name,
2664 deploy_params=deploy_params,
2665 descriptor_config=descriptor_config,
2666 base_folder=base_folder,
2667 task_instantiation_info=tasks_dict_info,
2668 stage=stage,
2669 )
2670
2671 # Deploy charms for each VDU that supports one.
2672 for vdud in get_vdu_list(vnfd):
2673 vdu_id = vdud["id"]
2674 descriptor_config = get_configuration(vnfd, vdu_id)
2675 vdur = find_in_list(
2676 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2677 )
2678
2679 if vdur.get("additionalParams"):
2680 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2681 else:
2682 deploy_params_vdu = deploy_params
2683 deploy_params_vdu["OSM"] = get_osm_params(
2684 db_vnfr, vdu_id, vdu_count_index=0
2685 )
2686 vdud_count = get_number_of_instances(vnfd, vdu_id)
2687
2688 self.logger.debug("VDUD > {}".format(vdud))
2689 self.logger.debug(
2690 "Descriptor config > {}".format(descriptor_config)
2691 )
2692 if descriptor_config:
2693 vdu_name = None
2694 kdu_name = None
2695 for vdu_index in range(vdud_count):
2696 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2697 self._deploy_n2vc(
2698 logging_text=logging_text
2699 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2700 member_vnf_index, vdu_id, vdu_index
2701 ),
2702 db_nsr=db_nsr,
2703 db_vnfr=db_vnfr,
2704 nslcmop_id=nslcmop_id,
2705 nsr_id=nsr_id,
2706 nsi_id=nsi_id,
2707 vnfd_id=vnfd_id,
2708 vdu_id=vdu_id,
2709 kdu_name=kdu_name,
2710 member_vnf_index=member_vnf_index,
2711 vdu_index=vdu_index,
2712 vdu_name=vdu_name,
2713 deploy_params=deploy_params_vdu,
2714 descriptor_config=descriptor_config,
2715 base_folder=base_folder,
2716 task_instantiation_info=tasks_dict_info,
2717 stage=stage,
2718 )
2719 for kdud in get_kdu_list(vnfd):
2720 kdu_name = kdud["name"]
2721 descriptor_config = get_configuration(vnfd, kdu_name)
2722 if descriptor_config:
2723 vdu_id = None
2724 vdu_index = 0
2725 vdu_name = None
2726 kdur = next(
2727 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2728 )
2729 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2730 if kdur.get("additionalParams"):
2731 deploy_params_kdu.update(
2732 parse_yaml_strings(kdur["additionalParams"].copy())
2733 )
2734
2735 self._deploy_n2vc(
2736 logging_text=logging_text,
2737 db_nsr=db_nsr,
2738 db_vnfr=db_vnfr,
2739 nslcmop_id=nslcmop_id,
2740 nsr_id=nsr_id,
2741 nsi_id=nsi_id,
2742 vnfd_id=vnfd_id,
2743 vdu_id=vdu_id,
2744 kdu_name=kdu_name,
2745 member_vnf_index=member_vnf_index,
2746 vdu_index=vdu_index,
2747 vdu_name=vdu_name,
2748 deploy_params=deploy_params_kdu,
2749 descriptor_config=descriptor_config,
2750 base_folder=base_folder,
2751 task_instantiation_info=tasks_dict_info,
2752 stage=stage,
2753 )
2754
2755 # Check if this NS has a charm configuration
2756 descriptor_config = nsd.get("ns-configuration")
2757 if descriptor_config and descriptor_config.get("juju"):
2758 vnfd_id = None
2759 db_vnfr = None
2760 member_vnf_index = None
2761 vdu_id = None
2762 kdu_name = None
2763 vdu_index = 0
2764 vdu_name = None
2765
2766 # Get additional parameters
2767 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2768 if db_nsr.get("additionalParamsForNs"):
2769 deploy_params.update(
2770 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2771 )
2772 base_folder = nsd["_admin"]["storage"]
2773 self._deploy_n2vc(
2774 logging_text=logging_text,
2775 db_nsr=db_nsr,
2776 db_vnfr=db_vnfr,
2777 nslcmop_id=nslcmop_id,
2778 nsr_id=nsr_id,
2779 nsi_id=nsi_id,
2780 vnfd_id=vnfd_id,
2781 vdu_id=vdu_id,
2782 kdu_name=kdu_name,
2783 member_vnf_index=member_vnf_index,
2784 vdu_index=vdu_index,
2785 vdu_name=vdu_name,
2786 deploy_params=deploy_params,
2787 descriptor_config=descriptor_config,
2788 base_folder=base_folder,
2789 task_instantiation_info=tasks_dict_info,
2790 stage=stage,
2791 )
2792
2793 # rest of staff will be done at finally
2794
2795 except (
2796 ROclient.ROClientException,
2797 DbException,
2798 LcmException,
2799 N2VCException,
2800 ) as e:
2801 self.logger.error(
2802 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2803 )
2804 exc = e
2805 except asyncio.CancelledError:
2806 self.logger.error(
2807 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2808 )
2809 exc = "Operation was cancelled"
2810 except Exception as e:
2811 exc = traceback.format_exc()
2812 self.logger.critical(
2813 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2814 exc_info=True,
2815 )
2816 finally:
2817 if exc:
2818 error_list.append(str(exc))
2819 try:
2820 # wait for pending tasks
2821 if tasks_dict_info:
2822 stage[1] = "Waiting for instantiate pending tasks."
2823 self.logger.debug(logging_text + stage[1])
2824 error_list += await self._wait_for_tasks(
2825 logging_text,
2826 tasks_dict_info,
2827 timeout_ns_deploy,
2828 stage,
2829 nslcmop_id,
2830 nsr_id=nsr_id,
2831 )
2832 stage[1] = stage[2] = ""
2833 except asyncio.CancelledError:
2834 error_list.append("Cancelled")
2835 # TODO cancel all tasks
2836 except Exception as exc:
2837 error_list.append(str(exc))
2838
2839 # update operation-status
2840 db_nsr_update["operational-status"] = "running"
2841 # let's begin with VCA 'configured' status (later we can change it)
2842 db_nsr_update["config-status"] = "configured"
2843 for task, task_name in tasks_dict_info.items():
2844 if not task.done() or task.cancelled() or task.exception():
2845 if task_name.startswith(self.task_name_deploy_vca):
2846 # A N2VC task is pending
2847 db_nsr_update["config-status"] = "failed"
2848 else:
2849 # RO or KDU task is pending
2850 db_nsr_update["operational-status"] = "failed"
2851
2852 # update status at database
2853 if error_list:
2854 error_detail = ". ".join(error_list)
2855 self.logger.error(logging_text + error_detail)
2856 error_description_nslcmop = "{} Detail: {}".format(
2857 stage[0], error_detail
2858 )
2859 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2860 nslcmop_id, stage[0]
2861 )
2862
2863 db_nsr_update["detailed-status"] = (
2864 error_description_nsr + " Detail: " + error_detail
2865 )
2866 db_nslcmop_update["detailed-status"] = error_detail
2867 nslcmop_operation_state = "FAILED"
2868 ns_state = "BROKEN"
2869 else:
2870 error_detail = None
2871 error_description_nsr = error_description_nslcmop = None
2872 ns_state = "READY"
2873 db_nsr_update["detailed-status"] = "Done"
2874 db_nslcmop_update["detailed-status"] = "Done"
2875 nslcmop_operation_state = "COMPLETED"
2876
2877 if db_nsr:
2878 self._write_ns_status(
2879 nsr_id=nsr_id,
2880 ns_state=ns_state,
2881 current_operation="IDLE",
2882 current_operation_id=None,
2883 error_description=error_description_nsr,
2884 error_detail=error_detail,
2885 other_update=db_nsr_update,
2886 )
2887 self._write_op_status(
2888 op_id=nslcmop_id,
2889 stage="",
2890 error_message=error_description_nslcmop,
2891 operation_state=nslcmop_operation_state,
2892 other_update=db_nslcmop_update,
2893 )
2894
2895 if nslcmop_operation_state:
2896 try:
2897 await self.msg.aiowrite(
2898 "ns",
2899 "instantiated",
2900 {
2901 "nsr_id": nsr_id,
2902 "nslcmop_id": nslcmop_id,
2903 "operationState": nslcmop_operation_state,
2904 },
2905 loop=self.loop,
2906 )
2907 except Exception as e:
2908 self.logger.error(
2909 logging_text + "kafka_write notification Exception {}".format(e)
2910 )
2911
2912 self.logger.debug(logging_text + "Exit")
2913 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2914
2915 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2916 if vnfd_id not in cached_vnfds:
2917 cached_vnfds[vnfd_id] = self.db.get_one(
2918 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2919 )
2920 return cached_vnfds[vnfd_id]
2921
2922 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2923 if vnf_profile_id not in cached_vnfrs:
2924 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2925 "vnfrs",
2926 {
2927 "member-vnf-index-ref": vnf_profile_id,
2928 "nsr-id-ref": nsr_id,
2929 },
2930 )
2931 return cached_vnfrs[vnf_profile_id]
2932
2933 def _is_deployed_vca_in_relation(
2934 self, vca: DeployedVCA, relation: Relation
2935 ) -> bool:
2936 found = False
2937 for endpoint in (relation.provider, relation.requirer):
2938 if endpoint["kdu-resource-profile-id"]:
2939 continue
2940 found = (
2941 vca.vnf_profile_id == endpoint.vnf_profile_id
2942 and vca.vdu_profile_id == endpoint.vdu_profile_id
2943 and vca.execution_environment_ref == endpoint.execution_environment_ref
2944 )
2945 if found:
2946 break
2947 return found
2948
2949 def _update_ee_relation_data_with_implicit_data(
2950 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2951 ):
2952 ee_relation_data = safe_get_ee_relation(
2953 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2954 )
2955 ee_relation_level = EELevel.get_level(ee_relation_data)
2956 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2957 "execution-environment-ref"
2958 ]:
2959 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2960 vnfd_id = vnf_profile["vnfd-id"]
2961 project = nsd["_admin"]["projects_read"][0]
2962 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2963 entity_id = (
2964 vnfd_id
2965 if ee_relation_level == EELevel.VNF
2966 else ee_relation_data["vdu-profile-id"]
2967 )
2968 ee = get_juju_ee_ref(db_vnfd, entity_id)
2969 if not ee:
2970 raise Exception(
2971 f"not execution environments found for ee_relation {ee_relation_data}"
2972 )
2973 ee_relation_data["execution-environment-ref"] = ee["id"]
2974 return ee_relation_data
2975
2976 def _get_ns_relations(
2977 self,
2978 nsr_id: str,
2979 nsd: Dict[str, Any],
2980 vca: DeployedVCA,
2981 cached_vnfds: Dict[str, Any],
2982 ) -> List[Relation]:
2983 relations = []
2984 db_ns_relations = get_ns_configuration_relation_list(nsd)
2985 for r in db_ns_relations:
2986 provider_dict = None
2987 requirer_dict = None
2988 if all(key in r for key in ("provider", "requirer")):
2989 provider_dict = r["provider"]
2990 requirer_dict = r["requirer"]
2991 elif "entities" in r:
2992 provider_id = r["entities"][0]["id"]
2993 provider_dict = {
2994 "nsr-id": nsr_id,
2995 "endpoint": r["entities"][0]["endpoint"],
2996 }
2997 if provider_id != nsd["id"]:
2998 provider_dict["vnf-profile-id"] = provider_id
2999 requirer_id = r["entities"][1]["id"]
3000 requirer_dict = {
3001 "nsr-id": nsr_id,
3002 "endpoint": r["entities"][1]["endpoint"],
3003 }
3004 if requirer_id != nsd["id"]:
3005 requirer_dict["vnf-profile-id"] = requirer_id
3006 else:
3007 raise Exception(
3008 "provider/requirer or entities must be included in the relation."
3009 )
3010 relation_provider = self._update_ee_relation_data_with_implicit_data(
3011 nsr_id, nsd, provider_dict, cached_vnfds
3012 )
3013 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3014 nsr_id, nsd, requirer_dict, cached_vnfds
3015 )
3016 provider = EERelation(relation_provider)
3017 requirer = EERelation(relation_requirer)
3018 relation = Relation(r["name"], provider, requirer)
3019 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3020 if vca_in_relation:
3021 relations.append(relation)
3022 return relations
3023
3024 def _get_vnf_relations(
3025 self,
3026 nsr_id: str,
3027 nsd: Dict[str, Any],
3028 vca: DeployedVCA,
3029 cached_vnfds: Dict[str, Any],
3030 ) -> List[Relation]:
3031 relations = []
3032 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3033 vnf_profile_id = vnf_profile["id"]
3034 vnfd_id = vnf_profile["vnfd-id"]
3035 project = nsd["_admin"]["projects_read"][0]
3036 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3037 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3038 for r in db_vnf_relations:
3039 provider_dict = None
3040 requirer_dict = None
3041 if all(key in r for key in ("provider", "requirer")):
3042 provider_dict = r["provider"]
3043 requirer_dict = r["requirer"]
3044 elif "entities" in r:
3045 provider_id = r["entities"][0]["id"]
3046 provider_dict = {
3047 "nsr-id": nsr_id,
3048 "vnf-profile-id": vnf_profile_id,
3049 "endpoint": r["entities"][0]["endpoint"],
3050 }
3051 if provider_id != vnfd_id:
3052 provider_dict["vdu-profile-id"] = provider_id
3053 requirer_id = r["entities"][1]["id"]
3054 requirer_dict = {
3055 "nsr-id": nsr_id,
3056 "vnf-profile-id": vnf_profile_id,
3057 "endpoint": r["entities"][1]["endpoint"],
3058 }
3059 if requirer_id != vnfd_id:
3060 requirer_dict["vdu-profile-id"] = requirer_id
3061 else:
3062 raise Exception(
3063 "provider/requirer or entities must be included in the relation."
3064 )
3065 relation_provider = self._update_ee_relation_data_with_implicit_data(
3066 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3067 )
3068 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3069 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3070 )
3071 provider = EERelation(relation_provider)
3072 requirer = EERelation(relation_requirer)
3073 relation = Relation(r["name"], provider, requirer)
3074 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3075 if vca_in_relation:
3076 relations.append(relation)
3077 return relations
3078
3079 def _get_kdu_resource_data(
3080 self,
3081 ee_relation: EERelation,
3082 db_nsr: Dict[str, Any],
3083 cached_vnfds: Dict[str, Any],
3084 ) -> DeployedK8sResource:
3085 nsd = get_nsd(db_nsr)
3086 vnf_profiles = get_vnf_profiles(nsd)
3087 vnfd_id = find_in_list(
3088 vnf_profiles,
3089 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3090 )["vnfd-id"]
3091 project = nsd["_admin"]["projects_read"][0]
3092 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3093 kdu_resource_profile = get_kdu_resource_profile(
3094 db_vnfd, ee_relation.kdu_resource_profile_id
3095 )
3096 kdu_name = kdu_resource_profile["kdu-name"]
3097 deployed_kdu, _ = get_deployed_kdu(
3098 db_nsr.get("_admin", ()).get("deployed", ()),
3099 kdu_name,
3100 ee_relation.vnf_profile_id,
3101 )
3102 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3103 return deployed_kdu
3104
3105 def _get_deployed_component(
3106 self,
3107 ee_relation: EERelation,
3108 db_nsr: Dict[str, Any],
3109 cached_vnfds: Dict[str, Any],
3110 ) -> DeployedComponent:
3111 nsr_id = db_nsr["_id"]
3112 deployed_component = None
3113 ee_level = EELevel.get_level(ee_relation)
3114 if ee_level == EELevel.NS:
3115 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3116 if vca:
3117 deployed_component = DeployedVCA(nsr_id, vca)
3118 elif ee_level == EELevel.VNF:
3119 vca = get_deployed_vca(
3120 db_nsr,
3121 {
3122 "vdu_id": None,
3123 "member-vnf-index": ee_relation.vnf_profile_id,
3124 "ee_descriptor_id": ee_relation.execution_environment_ref,
3125 },
3126 )
3127 if vca:
3128 deployed_component = DeployedVCA(nsr_id, vca)
3129 elif ee_level == EELevel.VDU:
3130 vca = get_deployed_vca(
3131 db_nsr,
3132 {
3133 "vdu_id": ee_relation.vdu_profile_id,
3134 "member-vnf-index": ee_relation.vnf_profile_id,
3135 "ee_descriptor_id": ee_relation.execution_environment_ref,
3136 },
3137 )
3138 if vca:
3139 deployed_component = DeployedVCA(nsr_id, vca)
3140 elif ee_level == EELevel.KDU:
3141 kdu_resource_data = self._get_kdu_resource_data(
3142 ee_relation, db_nsr, cached_vnfds
3143 )
3144 if kdu_resource_data:
3145 deployed_component = DeployedK8sResource(kdu_resource_data)
3146 return deployed_component
3147
3148 async def _add_relation(
3149 self,
3150 relation: Relation,
3151 vca_type: str,
3152 db_nsr: Dict[str, Any],
3153 cached_vnfds: Dict[str, Any],
3154 cached_vnfrs: Dict[str, Any],
3155 ) -> bool:
3156 deployed_provider = self._get_deployed_component(
3157 relation.provider, db_nsr, cached_vnfds
3158 )
3159 deployed_requirer = self._get_deployed_component(
3160 relation.requirer, db_nsr, cached_vnfds
3161 )
3162 if (
3163 deployed_provider
3164 and deployed_requirer
3165 and deployed_provider.config_sw_installed
3166 and deployed_requirer.config_sw_installed
3167 ):
3168 provider_db_vnfr = (
3169 self._get_vnfr(
3170 relation.provider.nsr_id,
3171 relation.provider.vnf_profile_id,
3172 cached_vnfrs,
3173 )
3174 if relation.provider.vnf_profile_id
3175 else None
3176 )
3177 requirer_db_vnfr = (
3178 self._get_vnfr(
3179 relation.requirer.nsr_id,
3180 relation.requirer.vnf_profile_id,
3181 cached_vnfrs,
3182 )
3183 if relation.requirer.vnf_profile_id
3184 else None
3185 )
3186 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3187 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3188 provider_relation_endpoint = RelationEndpoint(
3189 deployed_provider.ee_id,
3190 provider_vca_id,
3191 relation.provider.endpoint,
3192 )
3193 requirer_relation_endpoint = RelationEndpoint(
3194 deployed_requirer.ee_id,
3195 requirer_vca_id,
3196 relation.requirer.endpoint,
3197 )
3198 await self.vca_map[vca_type].add_relation(
3199 provider=provider_relation_endpoint,
3200 requirer=requirer_relation_endpoint,
3201 )
3202 # remove entry from relations list
3203 return True
3204 return False
3205
3206 async def _add_vca_relations(
3207 self,
3208 logging_text,
3209 nsr_id,
3210 vca_type: str,
3211 vca_index: int,
3212 timeout: int = 3600,
3213 ) -> bool:
3214 # steps:
3215 # 1. find all relations for this VCA
3216 # 2. wait for other peers related
3217 # 3. add relations
3218
3219 try:
3220 # STEP 1: find all relations for this VCA
3221
3222 # read nsr record
3223 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3224 nsd = get_nsd(db_nsr)
3225
3226 # this VCA data
3227 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3228 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3229
3230 cached_vnfds = {}
3231 cached_vnfrs = {}
3232 relations = []
3233 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3234 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3235
3236 # if no relations, terminate
3237 if not relations:
3238 self.logger.debug(logging_text + " No relations")
3239 return True
3240
3241 self.logger.debug(logging_text + " adding relations {}".format(relations))
3242
3243 # add all relations
3244 start = time()
3245 while True:
3246 # check timeout
3247 now = time()
3248 if now - start >= timeout:
3249 self.logger.error(logging_text + " : timeout adding relations")
3250 return False
3251
3252 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3253 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3254
3255 # for each relation, find the VCA's related
3256 for relation in relations.copy():
3257 added = await self._add_relation(
3258 relation,
3259 vca_type,
3260 db_nsr,
3261 cached_vnfds,
3262 cached_vnfrs,
3263 )
3264 if added:
3265 relations.remove(relation)
3266
3267 if not relations:
3268 self.logger.debug("Relations added")
3269 break
3270 await asyncio.sleep(5.0)
3271
3272 return True
3273
3274 except Exception as e:
3275 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3276 return False
3277
3278 async def _install_kdu(
3279 self,
3280 nsr_id: str,
3281 nsr_db_path: str,
3282 vnfr_data: dict,
3283 kdu_index: int,
3284 kdud: dict,
3285 vnfd: dict,
3286 k8s_instance_info: dict,
3287 k8params: dict = None,
3288 timeout: int = 600,
3289 vca_id: str = None,
3290 ):
3291 try:
3292 k8sclustertype = k8s_instance_info["k8scluster-type"]
3293 # Instantiate kdu
3294 db_dict_install = {
3295 "collection": "nsrs",
3296 "filter": {"_id": nsr_id},
3297 "path": nsr_db_path,
3298 }
3299
3300 if k8s_instance_info.get("kdu-deployment-name"):
3301 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3302 else:
3303 kdu_instance = self.k8scluster_map[
3304 k8sclustertype
3305 ].generate_kdu_instance_name(
3306 db_dict=db_dict_install,
3307 kdu_model=k8s_instance_info["kdu-model"],
3308 kdu_name=k8s_instance_info["kdu-name"],
3309 )
3310
3311 # Update the nsrs table with the kdu-instance value
3312 self.update_db_2(
3313 item="nsrs",
3314 _id=nsr_id,
3315 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3316 )
3317
3318 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3319 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3320 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3321 # namespace, this first verification could be removed, and the next step would be done for any kind
3322 # of KNF.
3323 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3324 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3325 if k8sclustertype in ("juju", "juju-bundle"):
3326 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3327 # that the user passed a namespace which he wants its KDU to be deployed in)
3328 if (
3329 self.db.count(
3330 table="nsrs",
3331 q_filter={
3332 "_id": nsr_id,
3333 "_admin.projects_write": k8s_instance_info["namespace"],
3334 "_admin.projects_read": k8s_instance_info["namespace"],
3335 },
3336 )
3337 > 0
3338 ):
3339 self.logger.debug(
3340 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3341 )
3342 self.update_db_2(
3343 item="nsrs",
3344 _id=nsr_id,
3345 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3346 )
3347 k8s_instance_info["namespace"] = kdu_instance
3348
3349 await self.k8scluster_map[k8sclustertype].install(
3350 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3351 kdu_model=k8s_instance_info["kdu-model"],
3352 atomic=True,
3353 params=k8params,
3354 db_dict=db_dict_install,
3355 timeout=timeout,
3356 kdu_name=k8s_instance_info["kdu-name"],
3357 namespace=k8s_instance_info["namespace"],
3358 kdu_instance=kdu_instance,
3359 vca_id=vca_id,
3360 )
3361
3362 # Obtain services to obtain management service ip
3363 services = await self.k8scluster_map[k8sclustertype].get_services(
3364 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3365 kdu_instance=kdu_instance,
3366 namespace=k8s_instance_info["namespace"],
3367 )
3368
3369 # Obtain management service info (if exists)
3370 vnfr_update_dict = {}
3371 kdu_config = get_configuration(vnfd, kdud["name"])
3372 if kdu_config:
3373 target_ee_list = kdu_config.get("execution-environment-list", [])
3374 else:
3375 target_ee_list = []
3376
3377 if services:
3378 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3379 mgmt_services = [
3380 service
3381 for service in kdud.get("service", [])
3382 if service.get("mgmt-service")
3383 ]
3384 for mgmt_service in mgmt_services:
3385 for service in services:
3386 if service["name"].startswith(mgmt_service["name"]):
3387 # Mgmt service found, Obtain service ip
3388 ip = service.get("external_ip", service.get("cluster_ip"))
3389 if isinstance(ip, list) and len(ip) == 1:
3390 ip = ip[0]
3391
3392 vnfr_update_dict[
3393 "kdur.{}.ip-address".format(kdu_index)
3394 ] = ip
3395
3396 # Check if must update also mgmt ip at the vnf
3397 service_external_cp = mgmt_service.get(
3398 "external-connection-point-ref"
3399 )
3400 if service_external_cp:
3401 if (
3402 deep_get(vnfd, ("mgmt-interface", "cp"))
3403 == service_external_cp
3404 ):
3405 vnfr_update_dict["ip-address"] = ip
3406
3407 if find_in_list(
3408 target_ee_list,
3409 lambda ee: ee.get(
3410 "external-connection-point-ref", ""
3411 )
3412 == service_external_cp,
3413 ):
3414 vnfr_update_dict[
3415 "kdur.{}.ip-address".format(kdu_index)
3416 ] = ip
3417 break
3418 else:
3419 self.logger.warn(
3420 "Mgmt service name: {} not found".format(
3421 mgmt_service["name"]
3422 )
3423 )
3424
3425 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3426 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3427
3428 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3429 if (
3430 kdu_config
3431 and kdu_config.get("initial-config-primitive")
3432 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3433 ):
3434 initial_config_primitive_list = kdu_config.get(
3435 "initial-config-primitive"
3436 )
3437 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3438
3439 for initial_config_primitive in initial_config_primitive_list:
3440 primitive_params_ = self._map_primitive_params(
3441 initial_config_primitive, {}, {}
3442 )
3443
3444 await asyncio.wait_for(
3445 self.k8scluster_map[k8sclustertype].exec_primitive(
3446 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3447 kdu_instance=kdu_instance,
3448 primitive_name=initial_config_primitive["name"],
3449 params=primitive_params_,
3450 db_dict=db_dict_install,
3451 vca_id=vca_id,
3452 ),
3453 timeout=timeout,
3454 )
3455
3456 except Exception as e:
3457 # Prepare update db with error and raise exception
3458 try:
3459 self.update_db_2(
3460 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3461 )
3462 self.update_db_2(
3463 "vnfrs",
3464 vnfr_data.get("_id"),
3465 {"kdur.{}.status".format(kdu_index): "ERROR"},
3466 )
3467 except Exception:
3468 # ignore to keep original exception
3469 pass
3470 # reraise original error
3471 raise
3472
3473 return kdu_instance
3474
3475 async def deploy_kdus(
3476 self,
3477 logging_text,
3478 nsr_id,
3479 nslcmop_id,
3480 db_vnfrs,
3481 db_vnfds,
3482 task_instantiation_info,
3483 ):
3484 # Launch kdus if present in the descriptor
3485
3486 k8scluster_id_2_uuic = {
3487 "helm-chart-v3": {},
3488 "helm-chart": {},
3489 "juju-bundle": {},
3490 }
3491
3492 async def _get_cluster_id(cluster_id, cluster_type):
3493 nonlocal k8scluster_id_2_uuic
3494 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3495 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3496
3497 # check if K8scluster is creating and wait look if previous tasks in process
3498 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3499 "k8scluster", cluster_id
3500 )
3501 if task_dependency:
3502 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3503 task_name, cluster_id
3504 )
3505 self.logger.debug(logging_text + text)
3506 await asyncio.wait(task_dependency, timeout=3600)
3507
3508 db_k8scluster = self.db.get_one(
3509 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3510 )
3511 if not db_k8scluster:
3512 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3513
3514 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3515 if not k8s_id:
3516 if cluster_type == "helm-chart-v3":
3517 try:
3518 # backward compatibility for existing clusters that have not been initialized for helm v3
3519 k8s_credentials = yaml.safe_dump(
3520 db_k8scluster.get("credentials")
3521 )
3522 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3523 k8s_credentials, reuse_cluster_uuid=cluster_id
3524 )
3525 db_k8scluster_update = {}
3526 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3527 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3528 db_k8scluster_update[
3529 "_admin.helm-chart-v3.created"
3530 ] = uninstall_sw
3531 db_k8scluster_update[
3532 "_admin.helm-chart-v3.operationalState"
3533 ] = "ENABLED"
3534 self.update_db_2(
3535 "k8sclusters", cluster_id, db_k8scluster_update
3536 )
3537 except Exception as e:
3538 self.logger.error(
3539 logging_text
3540 + "error initializing helm-v3 cluster: {}".format(str(e))
3541 )
3542 raise LcmException(
3543 "K8s cluster '{}' has not been initialized for '{}'".format(
3544 cluster_id, cluster_type
3545 )
3546 )
3547 else:
3548 raise LcmException(
3549 "K8s cluster '{}' has not been initialized for '{}'".format(
3550 cluster_id, cluster_type
3551 )
3552 )
3553 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3554 return k8s_id
3555
3556 logging_text += "Deploy kdus: "
3557 step = ""
3558 try:
3559 db_nsr_update = {"_admin.deployed.K8s": []}
3560 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3561
3562 index = 0
3563 updated_cluster_list = []
3564 updated_v3_cluster_list = []
3565
3566 for vnfr_data in db_vnfrs.values():
3567 vca_id = self.get_vca_id(vnfr_data, {})
3568 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3569 # Step 0: Prepare and set parameters
3570 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3571 vnfd_id = vnfr_data.get("vnfd-id")
3572 vnfd_with_id = find_in_list(
3573 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3574 )
3575 kdud = next(
3576 kdud
3577 for kdud in vnfd_with_id["kdu"]
3578 if kdud["name"] == kdur["kdu-name"]
3579 )
3580 namespace = kdur.get("k8s-namespace")
3581 kdu_deployment_name = kdur.get("kdu-deployment-name")
3582 if kdur.get("helm-chart"):
3583 kdumodel = kdur["helm-chart"]
3584 # Default version: helm3, if helm-version is v2 assign v2
3585 k8sclustertype = "helm-chart-v3"
3586 self.logger.debug("kdur: {}".format(kdur))
3587 if (
3588 kdur.get("helm-version")
3589 and kdur.get("helm-version") == "v2"
3590 ):
3591 k8sclustertype = "helm-chart"
3592 elif kdur.get("juju-bundle"):
3593 kdumodel = kdur["juju-bundle"]
3594 k8sclustertype = "juju-bundle"
3595 else:
3596 raise LcmException(
3597 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3598 "juju-bundle. Maybe an old NBI version is running".format(
3599 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3600 )
3601 )
3602 # check if kdumodel is a file and exists
3603 try:
3604 vnfd_with_id = find_in_list(
3605 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3606 )
3607 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3608 if storage: # may be not present if vnfd has not artifacts
3609 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3610 if storage["pkg-dir"]:
3611 filename = "{}/{}/{}s/{}".format(
3612 storage["folder"],
3613 storage["pkg-dir"],
3614 k8sclustertype,
3615 kdumodel,
3616 )
3617 else:
3618 filename = "{}/Scripts/{}s/{}".format(
3619 storage["folder"],
3620 k8sclustertype,
3621 kdumodel,
3622 )
3623 if self.fs.file_exists(
3624 filename, mode="file"
3625 ) or self.fs.file_exists(filename, mode="dir"):
3626 kdumodel = self.fs.path + filename
3627 except (asyncio.TimeoutError, asyncio.CancelledError):
3628 raise
3629 except Exception: # it is not a file
3630 pass
3631
3632 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3633 step = "Synchronize repos for k8s cluster '{}'".format(
3634 k8s_cluster_id
3635 )
3636 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3637
3638 # Synchronize repos
3639 if (
3640 k8sclustertype == "helm-chart"
3641 and cluster_uuid not in updated_cluster_list
3642 ) or (
3643 k8sclustertype == "helm-chart-v3"
3644 and cluster_uuid not in updated_v3_cluster_list
3645 ):
3646 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3647 self.k8scluster_map[k8sclustertype].synchronize_repos(
3648 cluster_uuid=cluster_uuid
3649 )
3650 )
3651 if del_repo_list or added_repo_dict:
3652 if k8sclustertype == "helm-chart":
3653 unset = {
3654 "_admin.helm_charts_added." + item: None
3655 for item in del_repo_list
3656 }
3657 updated = {
3658 "_admin.helm_charts_added." + item: name
3659 for item, name in added_repo_dict.items()
3660 }
3661 updated_cluster_list.append(cluster_uuid)
3662 elif k8sclustertype == "helm-chart-v3":
3663 unset = {
3664 "_admin.helm_charts_v3_added." + item: None
3665 for item in del_repo_list
3666 }
3667 updated = {
3668 "_admin.helm_charts_v3_added." + item: name
3669 for item, name in added_repo_dict.items()
3670 }
3671 updated_v3_cluster_list.append(cluster_uuid)
3672 self.logger.debug(
3673 logging_text + "repos synchronized on k8s cluster "
3674 "'{}' to_delete: {}, to_add: {}".format(
3675 k8s_cluster_id, del_repo_list, added_repo_dict
3676 )
3677 )
3678 self.db.set_one(
3679 "k8sclusters",
3680 {"_id": k8s_cluster_id},
3681 updated,
3682 unset=unset,
3683 )
3684
3685 # Instantiate kdu
3686 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3687 vnfr_data["member-vnf-index-ref"],
3688 kdur["kdu-name"],
3689 k8s_cluster_id,
3690 )
3691 k8s_instance_info = {
3692 "kdu-instance": None,
3693 "k8scluster-uuid": cluster_uuid,
3694 "k8scluster-type": k8sclustertype,
3695 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3696 "kdu-name": kdur["kdu-name"],
3697 "kdu-model": kdumodel,
3698 "namespace": namespace,
3699 "kdu-deployment-name": kdu_deployment_name,
3700 }
3701 db_path = "_admin.deployed.K8s.{}".format(index)
3702 db_nsr_update[db_path] = k8s_instance_info
3703 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3704 vnfd_with_id = find_in_list(
3705 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3706 )
3707 task = asyncio.ensure_future(
3708 self._install_kdu(
3709 nsr_id,
3710 db_path,
3711 vnfr_data,
3712 kdu_index,
3713 kdud,
3714 vnfd_with_id,
3715 k8s_instance_info,
3716 k8params=desc_params,
3717 timeout=1800,
3718 vca_id=vca_id,
3719 )
3720 )
3721 self.lcm_tasks.register(
3722 "ns",
3723 nsr_id,
3724 nslcmop_id,
3725 "instantiate_KDU-{}".format(index),
3726 task,
3727 )
3728 task_instantiation_info[task] = "Deploying KDU {}".format(
3729 kdur["kdu-name"]
3730 )
3731
3732 index += 1
3733
3734 except (LcmException, asyncio.CancelledError):
3735 raise
3736 except Exception as e:
3737 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3738 if isinstance(e, (N2VCException, DbException)):
3739 self.logger.error(logging_text + msg)
3740 else:
3741 self.logger.critical(logging_text + msg, exc_info=True)
3742 raise LcmException(msg)
3743 finally:
3744 if db_nsr_update:
3745 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3746
3747 def _deploy_n2vc(
3748 self,
3749 logging_text,
3750 db_nsr,
3751 db_vnfr,
3752 nslcmop_id,
3753 nsr_id,
3754 nsi_id,
3755 vnfd_id,
3756 vdu_id,
3757 kdu_name,
3758 member_vnf_index,
3759 vdu_index,
3760 vdu_name,
3761 deploy_params,
3762 descriptor_config,
3763 base_folder,
3764 task_instantiation_info,
3765 stage,
3766 ):
3767 # launch instantiate_N2VC in a asyncio task and register task object
3768 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3769 # if not found, create one entry and update database
3770 # fill db_nsr._admin.deployed.VCA.<index>
3771
3772 self.logger.debug(
3773 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3774 )
3775 if "execution-environment-list" in descriptor_config:
3776 ee_list = descriptor_config.get("execution-environment-list", [])
3777 elif "juju" in descriptor_config:
3778 ee_list = [descriptor_config] # ns charms
3779 else: # other types as script are not supported
3780 ee_list = []
3781
3782 for ee_item in ee_list:
3783 self.logger.debug(
3784 logging_text
3785 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3786 ee_item.get("juju"), ee_item.get("helm-chart")
3787 )
3788 )
3789 ee_descriptor_id = ee_item.get("id")
3790 if ee_item.get("juju"):
3791 vca_name = ee_item["juju"].get("charm")
3792 vca_type = (
3793 "lxc_proxy_charm"
3794 if ee_item["juju"].get("charm") is not None
3795 else "native_charm"
3796 )
3797 if ee_item["juju"].get("cloud") == "k8s":
3798 vca_type = "k8s_proxy_charm"
3799 elif ee_item["juju"].get("proxy") is False:
3800 vca_type = "native_charm"
3801 elif ee_item.get("helm-chart"):
3802 vca_name = ee_item["helm-chart"]
3803 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3804 vca_type = "helm"
3805 else:
3806 vca_type = "helm-v3"
3807 else:
3808 self.logger.debug(
3809 logging_text + "skipping non juju neither charm configuration"
3810 )
3811 continue
3812
3813 vca_index = -1
3814 for vca_index, vca_deployed in enumerate(
3815 db_nsr["_admin"]["deployed"]["VCA"]
3816 ):
3817 if not vca_deployed:
3818 continue
3819 if (
3820 vca_deployed.get("member-vnf-index") == member_vnf_index
3821 and vca_deployed.get("vdu_id") == vdu_id
3822 and vca_deployed.get("kdu_name") == kdu_name
3823 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3824 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3825 ):
3826 break
3827 else:
3828 # not found, create one.
3829 target = (
3830 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3831 )
3832 if vdu_id:
3833 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3834 elif kdu_name:
3835 target += "/kdu/{}".format(kdu_name)
3836 vca_deployed = {
3837 "target_element": target,
3838 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3839 "member-vnf-index": member_vnf_index,
3840 "vdu_id": vdu_id,
3841 "kdu_name": kdu_name,
3842 "vdu_count_index": vdu_index,
3843 "operational-status": "init", # TODO revise
3844 "detailed-status": "", # TODO revise
3845 "step": "initial-deploy", # TODO revise
3846 "vnfd_id": vnfd_id,
3847 "vdu_name": vdu_name,
3848 "type": vca_type,
3849 "ee_descriptor_id": ee_descriptor_id,
3850 }
3851 vca_index += 1
3852
3853 # create VCA and configurationStatus in db
3854 db_dict = {
3855 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3856 "configurationStatus.{}".format(vca_index): dict(),
3857 }
3858 self.update_db_2("nsrs", nsr_id, db_dict)
3859
3860 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3861
3862 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3863 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3864 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3865
3866 # Launch task
3867 task_n2vc = asyncio.ensure_future(
3868 self.instantiate_N2VC(
3869 logging_text=logging_text,
3870 vca_index=vca_index,
3871 nsi_id=nsi_id,
3872 db_nsr=db_nsr,
3873 db_vnfr=db_vnfr,
3874 vdu_id=vdu_id,
3875 kdu_name=kdu_name,
3876 vdu_index=vdu_index,
3877 deploy_params=deploy_params,
3878 config_descriptor=descriptor_config,
3879 base_folder=base_folder,
3880 nslcmop_id=nslcmop_id,
3881 stage=stage,
3882 vca_type=vca_type,
3883 vca_name=vca_name,
3884 ee_config_descriptor=ee_item,
3885 )
3886 )
3887 self.lcm_tasks.register(
3888 "ns",
3889 nsr_id,
3890 nslcmop_id,
3891 "instantiate_N2VC-{}".format(vca_index),
3892 task_n2vc,
3893 )
3894 task_instantiation_info[
3895 task_n2vc
3896 ] = self.task_name_deploy_vca + " {}.{}".format(
3897 member_vnf_index or "", vdu_id or ""
3898 )
3899
3900 @staticmethod
3901 def _create_nslcmop(nsr_id, operation, params):
3902 """
3903 Creates a ns-lcm-opp content to be stored at database.
3904 :param nsr_id: internal id of the instance
3905 :param operation: instantiate, terminate, scale, action, ...
3906 :param params: user parameters for the operation
3907 :return: dictionary following SOL005 format
3908 """
3909 # Raise exception if invalid arguments
3910 if not (nsr_id and operation and params):
3911 raise LcmException(
3912 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3913 )
3914 now = time()
3915 _id = str(uuid4())
3916 nslcmop = {
3917 "id": _id,
3918 "_id": _id,
3919 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3920 "operationState": "PROCESSING",
3921 "statusEnteredTime": now,
3922 "nsInstanceId": nsr_id,
3923 "lcmOperationType": operation,
3924 "startTime": now,
3925 "isAutomaticInvocation": False,
3926 "operationParams": params,
3927 "isCancelPending": False,
3928 "links": {
3929 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3930 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3931 },
3932 }
3933 return nslcmop
3934
3935 def _format_additional_params(self, params):
3936 params = params or {}
3937 for key, value in params.items():
3938 if str(value).startswith("!!yaml "):
3939 params[key] = yaml.safe_load(value[7:])
3940 return params
3941
3942 def _get_terminate_primitive_params(self, seq, vnf_index):
3943 primitive = seq.get("name")
3944 primitive_params = {}
3945 params = {
3946 "member_vnf_index": vnf_index,
3947 "primitive": primitive,
3948 "primitive_params": primitive_params,
3949 }
3950 desc_params = {}
3951 return self._map_primitive_params(seq, params, desc_params)
3952
3953 # sub-operations
3954
3955 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3956 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3957 if op.get("operationState") == "COMPLETED":
3958 # b. Skip sub-operation
3959 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3960 return self.SUBOPERATION_STATUS_SKIP
3961 else:
3962 # c. retry executing sub-operation
3963 # The sub-operation exists, and operationState != 'COMPLETED'
3964 # Update operationState = 'PROCESSING' to indicate a retry.
3965 operationState = "PROCESSING"
3966 detailed_status = "In progress"
3967 self._update_suboperation_status(
3968 db_nslcmop, op_index, operationState, detailed_status
3969 )
3970 # Return the sub-operation index
3971 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3972 # with arguments extracted from the sub-operation
3973 return op_index
3974
3975 # Find a sub-operation where all keys in a matching dictionary must match
3976 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3977 def _find_suboperation(self, db_nslcmop, match):
3978 if db_nslcmop and match:
3979 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3980 for i, op in enumerate(op_list):
3981 if all(op.get(k) == match[k] for k in match):
3982 return i
3983 return self.SUBOPERATION_STATUS_NOT_FOUND
3984
3985 # Update status for a sub-operation given its index
3986 def _update_suboperation_status(
3987 self, db_nslcmop, op_index, operationState, detailed_status
3988 ):
3989 # Update DB for HA tasks
3990 q_filter = {"_id": db_nslcmop["_id"]}
3991 update_dict = {
3992 "_admin.operations.{}.operationState".format(op_index): operationState,
3993 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3994 }
3995 self.db.set_one(
3996 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3997 )
3998
3999 # Add sub-operation, return the index of the added sub-operation
4000 # Optionally, set operationState, detailed-status, and operationType
4001 # Status and type are currently set for 'scale' sub-operations:
4002 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4003 # 'detailed-status' : status message
4004 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4005 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4006 def _add_suboperation(
4007 self,
4008 db_nslcmop,
4009 vnf_index,
4010 vdu_id,
4011 vdu_count_index,
4012 vdu_name,
4013 primitive,
4014 mapped_primitive_params,
4015 operationState=None,
4016 detailed_status=None,
4017 operationType=None,
4018 RO_nsr_id=None,
4019 RO_scaling_info=None,
4020 ):
4021 if not db_nslcmop:
4022 return self.SUBOPERATION_STATUS_NOT_FOUND
4023 # Get the "_admin.operations" list, if it exists
4024 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4025 op_list = db_nslcmop_admin.get("operations")
4026 # Create or append to the "_admin.operations" list
4027 new_op = {
4028 "member_vnf_index": vnf_index,
4029 "vdu_id": vdu_id,
4030 "vdu_count_index": vdu_count_index,
4031 "primitive": primitive,
4032 "primitive_params": mapped_primitive_params,
4033 }
4034 if operationState:
4035 new_op["operationState"] = operationState
4036 if detailed_status:
4037 new_op["detailed-status"] = detailed_status
4038 if operationType:
4039 new_op["lcmOperationType"] = operationType
4040 if RO_nsr_id:
4041 new_op["RO_nsr_id"] = RO_nsr_id
4042 if RO_scaling_info:
4043 new_op["RO_scaling_info"] = RO_scaling_info
4044 if not op_list:
4045 # No existing operations, create key 'operations' with current operation as first list element
4046 db_nslcmop_admin.update({"operations": [new_op]})
4047 op_list = db_nslcmop_admin.get("operations")
4048 else:
4049 # Existing operations, append operation to list
4050 op_list.append(new_op)
4051
4052 db_nslcmop_update = {"_admin.operations": op_list}
4053 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4054 op_index = len(op_list) - 1
4055 return op_index
4056
4057 # Helper methods for scale() sub-operations
4058
4059 # pre-scale/post-scale:
4060 # Check for 3 different cases:
4061 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4062 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4063 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4064 def _check_or_add_scale_suboperation(
4065 self,
4066 db_nslcmop,
4067 vnf_index,
4068 vnf_config_primitive,
4069 primitive_params,
4070 operationType,
4071 RO_nsr_id=None,
4072 RO_scaling_info=None,
4073 ):
4074 # Find this sub-operation
4075 if RO_nsr_id and RO_scaling_info:
4076 operationType = "SCALE-RO"
4077 match = {
4078 "member_vnf_index": vnf_index,
4079 "RO_nsr_id": RO_nsr_id,
4080 "RO_scaling_info": RO_scaling_info,
4081 }
4082 else:
4083 match = {
4084 "member_vnf_index": vnf_index,
4085 "primitive": vnf_config_primitive,
4086 "primitive_params": primitive_params,
4087 "lcmOperationType": operationType,
4088 }
4089 op_index = self._find_suboperation(db_nslcmop, match)
4090 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4091 # a. New sub-operation
4092 # The sub-operation does not exist, add it.
4093 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4094 # The following parameters are set to None for all kind of scaling:
4095 vdu_id = None
4096 vdu_count_index = None
4097 vdu_name = None
4098 if RO_nsr_id and RO_scaling_info:
4099 vnf_config_primitive = None
4100 primitive_params = None
4101 else:
4102 RO_nsr_id = None
4103 RO_scaling_info = None
4104 # Initial status for sub-operation
4105 operationState = "PROCESSING"
4106 detailed_status = "In progress"
4107 # Add sub-operation for pre/post-scaling (zero or more operations)
4108 self._add_suboperation(
4109 db_nslcmop,
4110 vnf_index,
4111 vdu_id,
4112 vdu_count_index,
4113 vdu_name,
4114 vnf_config_primitive,
4115 primitive_params,
4116 operationState,
4117 detailed_status,
4118 operationType,
4119 RO_nsr_id,
4120 RO_scaling_info,
4121 )
4122 return self.SUBOPERATION_STATUS_NEW
4123 else:
4124 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4125 # or op_index (operationState != 'COMPLETED')
4126 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4127
4128 # Function to return execution_environment id
4129
4130 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4131 # TODO vdu_index_count
4132 for vca in vca_deployed_list:
4133 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4134 return vca["ee_id"]
4135
4136 async def destroy_N2VC(
4137 self,
4138 logging_text,
4139 db_nslcmop,
4140 vca_deployed,
4141 config_descriptor,
4142 vca_index,
4143 destroy_ee=True,
4144 exec_primitives=True,
4145 scaling_in=False,
4146 vca_id: str = None,
4147 ):
4148 """
4149 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4150 :param logging_text:
4151 :param db_nslcmop:
4152 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4153 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4154 :param vca_index: index in the database _admin.deployed.VCA
4155 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4156 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4157 not executed properly
4158 :param scaling_in: True destroys the application, False destroys the model
4159 :return: None or exception
4160 """
4161
4162 self.logger.debug(
4163 logging_text
4164 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4165 vca_index, vca_deployed, config_descriptor, destroy_ee
4166 )
4167 )
4168
4169 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4170
4171 # execute terminate_primitives
4172 if exec_primitives:
4173 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4174 config_descriptor.get("terminate-config-primitive"),
4175 vca_deployed.get("ee_descriptor_id"),
4176 )
4177 vdu_id = vca_deployed.get("vdu_id")
4178 vdu_count_index = vca_deployed.get("vdu_count_index")
4179 vdu_name = vca_deployed.get("vdu_name")
4180 vnf_index = vca_deployed.get("member-vnf-index")
4181 if terminate_primitives and vca_deployed.get("needed_terminate"):
4182 for seq in terminate_primitives:
4183 # For each sequence in list, get primitive and call _ns_execute_primitive()
4184 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4185 vnf_index, seq.get("name")
4186 )
4187 self.logger.debug(logging_text + step)
4188 # Create the primitive for each sequence, i.e. "primitive": "touch"
4189 primitive = seq.get("name")
4190 mapped_primitive_params = self._get_terminate_primitive_params(
4191 seq, vnf_index
4192 )
4193
4194 # Add sub-operation
4195 self._add_suboperation(
4196 db_nslcmop,
4197 vnf_index,
4198 vdu_id,
4199 vdu_count_index,
4200 vdu_name,
4201 primitive,
4202 mapped_primitive_params,
4203 )
4204 # Sub-operations: Call _ns_execute_primitive() instead of action()
4205 try:
4206 result, result_detail = await self._ns_execute_primitive(
4207 vca_deployed["ee_id"],
4208 primitive,
4209 mapped_primitive_params,
4210 vca_type=vca_type,
4211 vca_id=vca_id,
4212 )
4213 except LcmException:
4214 # this happens when VCA is not deployed. In this case it is not needed to terminate
4215 continue
4216 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4217 if result not in result_ok:
4218 raise LcmException(
4219 "terminate_primitive {} for vnf_member_index={} fails with "
4220 "error {}".format(seq.get("name"), vnf_index, result_detail)
4221 )
4222 # set that this VCA do not need terminated
4223 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4224 vca_index
4225 )
4226 self.update_db_2(
4227 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4228 )
4229
4230 # Delete Prometheus Jobs if any
4231 # This uses NSR_ID, so it will destroy any jobs under this index
4232 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4233
4234 if destroy_ee:
4235 await self.vca_map[vca_type].delete_execution_environment(
4236 vca_deployed["ee_id"],
4237 scaling_in=scaling_in,
4238 vca_type=vca_type,
4239 vca_id=vca_id,
4240 )
4241
4242 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4243 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4244 namespace = "." + db_nsr["_id"]
4245 try:
4246 await self.n2vc.delete_namespace(
4247 namespace=namespace,
4248 total_timeout=self.timeout_charm_delete,
4249 vca_id=vca_id,
4250 )
4251 except N2VCNotFound: # already deleted. Skip
4252 pass
4253 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4254
4255 async def _terminate_RO(
4256 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4257 ):
4258 """
4259 Terminates a deployment from RO
4260 :param logging_text:
4261 :param nsr_deployed: db_nsr._admin.deployed
4262 :param nsr_id:
4263 :param nslcmop_id:
4264 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4265 this method will update only the index 2, but it will write on database the concatenated content of the list
4266 :return:
4267 """
4268 db_nsr_update = {}
4269 failed_detail = []
4270 ro_nsr_id = ro_delete_action = None
4271 if nsr_deployed and nsr_deployed.get("RO"):
4272 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4273 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4274 try:
4275 if ro_nsr_id:
4276 stage[2] = "Deleting ns from VIM."
4277 db_nsr_update["detailed-status"] = " ".join(stage)
4278 self._write_op_status(nslcmop_id, stage)
4279 self.logger.debug(logging_text + stage[2])
4280 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4281 self._write_op_status(nslcmop_id, stage)
4282 desc = await self.RO.delete("ns", ro_nsr_id)
4283 ro_delete_action = desc["action_id"]
4284 db_nsr_update[
4285 "_admin.deployed.RO.nsr_delete_action_id"
4286 ] = ro_delete_action
4287 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4288 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4289 if ro_delete_action:
4290 # wait until NS is deleted from VIM
4291 stage[2] = "Waiting ns deleted from VIM."
4292 detailed_status_old = None
4293 self.logger.debug(
4294 logging_text
4295 + stage[2]
4296 + " RO_id={} ro_delete_action={}".format(
4297 ro_nsr_id, ro_delete_action
4298 )
4299 )
4300 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4301 self._write_op_status(nslcmop_id, stage)
4302
4303 delete_timeout = 20 * 60 # 20 minutes
4304 while delete_timeout > 0:
4305 desc = await self.RO.show(
4306 "ns",
4307 item_id_name=ro_nsr_id,
4308 extra_item="action",
4309 extra_item_id=ro_delete_action,
4310 )
4311
4312 # deploymentStatus
4313 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4314
4315 ns_status, ns_status_info = self.RO.check_action_status(desc)
4316 if ns_status == "ERROR":
4317 raise ROclient.ROClientException(ns_status_info)
4318 elif ns_status == "BUILD":
4319 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4320 elif ns_status == "ACTIVE":
4321 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4322 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4323 break
4324 else:
4325 assert (
4326 False
4327 ), "ROclient.check_action_status returns unknown {}".format(
4328 ns_status
4329 )
4330 if stage[2] != detailed_status_old:
4331 detailed_status_old = stage[2]
4332 db_nsr_update["detailed-status"] = " ".join(stage)
4333 self._write_op_status(nslcmop_id, stage)
4334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4335 await asyncio.sleep(5, loop=self.loop)
4336 delete_timeout -= 5
4337 else: # delete_timeout <= 0:
4338 raise ROclient.ROClientException(
4339 "Timeout waiting ns deleted from VIM"
4340 )
4341
4342 except Exception as e:
4343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4344 if (
4345 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4346 ): # not found
4347 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4348 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4349 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4350 self.logger.debug(
4351 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4352 )
4353 elif (
4354 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4355 ): # conflict
4356 failed_detail.append("delete conflict: {}".format(e))
4357 self.logger.debug(
4358 logging_text
4359 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4360 )
4361 else:
4362 failed_detail.append("delete error: {}".format(e))
4363 self.logger.error(
4364 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4365 )
4366
4367 # Delete nsd
4368 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4369 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4370 try:
4371 stage[2] = "Deleting nsd from RO."
4372 db_nsr_update["detailed-status"] = " ".join(stage)
4373 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4374 self._write_op_status(nslcmop_id, stage)
4375 await self.RO.delete("nsd", ro_nsd_id)
4376 self.logger.debug(
4377 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4378 )
4379 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4380 except Exception as e:
4381 if (
4382 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4383 ): # not found
4384 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4385 self.logger.debug(
4386 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4387 )
4388 elif (
4389 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4390 ): # conflict
4391 failed_detail.append(
4392 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4393 )
4394 self.logger.debug(logging_text + failed_detail[-1])
4395 else:
4396 failed_detail.append(
4397 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4398 )
4399 self.logger.error(logging_text + failed_detail[-1])
4400
4401 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4402 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4403 if not vnf_deployed or not vnf_deployed["id"]:
4404 continue
4405 try:
4406 ro_vnfd_id = vnf_deployed["id"]
4407 stage[
4408 2
4409 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4410 vnf_deployed["member-vnf-index"], ro_vnfd_id
4411 )
4412 db_nsr_update["detailed-status"] = " ".join(stage)
4413 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4414 self._write_op_status(nslcmop_id, stage)
4415 await self.RO.delete("vnfd", ro_vnfd_id)
4416 self.logger.debug(
4417 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4418 )
4419 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4420 except Exception as e:
4421 if (
4422 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4423 ): # not found
4424 db_nsr_update[
4425 "_admin.deployed.RO.vnfd.{}.id".format(index)
4426 ] = None
4427 self.logger.debug(
4428 logging_text
4429 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4430 )
4431 elif (
4432 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4433 ): # conflict
4434 failed_detail.append(
4435 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4436 )
4437 self.logger.debug(logging_text + failed_detail[-1])
4438 else:
4439 failed_detail.append(
4440 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4441 )
4442 self.logger.error(logging_text + failed_detail[-1])
4443
4444 if failed_detail:
4445 stage[2] = "Error deleting from VIM"
4446 else:
4447 stage[2] = "Deleted from VIM"
4448 db_nsr_update["detailed-status"] = " ".join(stage)
4449 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4450 self._write_op_status(nslcmop_id, stage)
4451
4452 if failed_detail:
4453 raise LcmException("; ".join(failed_detail))
4454
4455 async def terminate(self, nsr_id, nslcmop_id):
4456 # Try to lock HA task here
4457 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4458 if not task_is_locked_by_me:
4459 return
4460
4461 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4462 self.logger.debug(logging_text + "Enter")
4463 timeout_ns_terminate = self.timeout_ns_terminate
4464 db_nsr = None
4465 db_nslcmop = None
4466 operation_params = None
4467 exc = None
4468 error_list = [] # annotates all failed error messages
4469 db_nslcmop_update = {}
4470 autoremove = False # autoremove after terminated
4471 tasks_dict_info = {}
4472 db_nsr_update = {}
4473 stage = [
4474 "Stage 1/3: Preparing task.",
4475 "Waiting for previous operations to terminate.",
4476 "",
4477 ]
4478 # ^ contains [stage, step, VIM-status]
4479 try:
4480 # wait for any previous tasks in process
4481 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4482
4483 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4484 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4485 operation_params = db_nslcmop.get("operationParams") or {}
4486 if operation_params.get("timeout_ns_terminate"):
4487 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4488 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4489 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4490
4491 db_nsr_update["operational-status"] = "terminating"
4492 db_nsr_update["config-status"] = "terminating"
4493 self._write_ns_status(
4494 nsr_id=nsr_id,
4495 ns_state="TERMINATING",
4496 current_operation="TERMINATING",
4497 current_operation_id=nslcmop_id,
4498 other_update=db_nsr_update,
4499 )
4500 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4501 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4502 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4503 return
4504
4505 stage[1] = "Getting vnf descriptors from db."
4506 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4507 db_vnfrs_dict = {
4508 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4509 }
4510 db_vnfds_from_id = {}
4511 db_vnfds_from_member_index = {}
4512 # Loop over VNFRs
4513 for vnfr in db_vnfrs_list:
4514 vnfd_id = vnfr["vnfd-id"]
4515 if vnfd_id not in db_vnfds_from_id:
4516 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4517 db_vnfds_from_id[vnfd_id] = vnfd
4518 db_vnfds_from_member_index[
4519 vnfr["member-vnf-index-ref"]
4520 ] = db_vnfds_from_id[vnfd_id]
4521
4522 # Destroy individual execution environments when there are terminating primitives.
4523 # Rest of EE will be deleted at once
4524 # TODO - check before calling _destroy_N2VC
4525 # if not operation_params.get("skip_terminate_primitives"):#
4526 # or not vca.get("needed_terminate"):
4527 stage[0] = "Stage 2/3 execute terminating primitives."
4528 self.logger.debug(logging_text + stage[0])
4529 stage[1] = "Looking execution environment that needs terminate."
4530 self.logger.debug(logging_text + stage[1])
4531
4532 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4533 config_descriptor = None
4534 vca_member_vnf_index = vca.get("member-vnf-index")
4535 vca_id = self.get_vca_id(
4536 db_vnfrs_dict.get(vca_member_vnf_index)
4537 if vca_member_vnf_index
4538 else None,
4539 db_nsr,
4540 )
4541 if not vca or not vca.get("ee_id"):
4542 continue
4543 if not vca.get("member-vnf-index"):
4544 # ns
4545 config_descriptor = db_nsr.get("ns-configuration")
4546 elif vca.get("vdu_id"):
4547 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4548 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4549 elif vca.get("kdu_name"):
4550 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4551 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4552 else:
4553 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4554 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4555 vca_type = vca.get("type")
4556 exec_terminate_primitives = not operation_params.get(
4557 "skip_terminate_primitives"
4558 ) and vca.get("needed_terminate")
4559 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4560 # pending native charms
4561 destroy_ee = (
4562 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4563 )
4564 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4565 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4566 task = asyncio.ensure_future(
4567 self.destroy_N2VC(
4568 logging_text,
4569 db_nslcmop,
4570 vca,
4571 config_descriptor,
4572 vca_index,
4573 destroy_ee,
4574 exec_terminate_primitives,
4575 vca_id=vca_id,
4576 )
4577 )
4578 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4579
4580 # wait for pending tasks of terminate primitives
4581 if tasks_dict_info:
4582 self.logger.debug(
4583 logging_text
4584 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4585 )
4586 error_list = await self._wait_for_tasks(
4587 logging_text,
4588 tasks_dict_info,
4589 min(self.timeout_charm_delete, timeout_ns_terminate),
4590 stage,
4591 nslcmop_id,
4592 )
4593 tasks_dict_info.clear()
4594 if error_list:
4595 return # raise LcmException("; ".join(error_list))
4596
4597 # remove All execution environments at once
4598 stage[0] = "Stage 3/3 delete all."
4599
4600 if nsr_deployed.get("VCA"):
4601 stage[1] = "Deleting all execution environments."
4602 self.logger.debug(logging_text + stage[1])
4603 vca_id = self.get_vca_id({}, db_nsr)
4604 task_delete_ee = asyncio.ensure_future(
4605 asyncio.wait_for(
4606 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4607 timeout=self.timeout_charm_delete,
4608 )
4609 )
4610 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4611 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4612
4613 # Delete from k8scluster
4614 stage[1] = "Deleting KDUs."
4615 self.logger.debug(logging_text + stage[1])
4616 # print(nsr_deployed)
4617 for kdu in get_iterable(nsr_deployed, "K8s"):
4618 if not kdu or not kdu.get("kdu-instance"):
4619 continue
4620 kdu_instance = kdu.get("kdu-instance")
4621 if kdu.get("k8scluster-type") in self.k8scluster_map:
4622 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4623 vca_id = self.get_vca_id({}, db_nsr)
4624 task_delete_kdu_instance = asyncio.ensure_future(
4625 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4626 cluster_uuid=kdu.get("k8scluster-uuid"),
4627 kdu_instance=kdu_instance,
4628 vca_id=vca_id,
4629 namespace=kdu.get("namespace"),
4630 )
4631 )
4632 else:
4633 self.logger.error(
4634 logging_text
4635 + "Unknown k8s deployment type {}".format(
4636 kdu.get("k8scluster-type")
4637 )
4638 )
4639 continue
4640 tasks_dict_info[
4641 task_delete_kdu_instance
4642 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4643
4644 # remove from RO
4645 stage[1] = "Deleting ns from VIM."
4646 if self.ng_ro:
4647 task_delete_ro = asyncio.ensure_future(
4648 self._terminate_ng_ro(
4649 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4650 )
4651 )
4652 else:
4653 task_delete_ro = asyncio.ensure_future(
4654 self._terminate_RO(
4655 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4656 )
4657 )
4658 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4659
4660 # rest of staff will be done at finally
4661
4662 except (
4663 ROclient.ROClientException,
4664 DbException,
4665 LcmException,
4666 N2VCException,
4667 ) as e:
4668 self.logger.error(logging_text + "Exit Exception {}".format(e))
4669 exc = e
4670 except asyncio.CancelledError:
4671 self.logger.error(
4672 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4673 )
4674 exc = "Operation was cancelled"
4675 except Exception as e:
4676 exc = traceback.format_exc()
4677 self.logger.critical(
4678 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4679 exc_info=True,
4680 )
4681 finally:
4682 if exc:
4683 error_list.append(str(exc))
4684 try:
4685 # wait for pending tasks
4686 if tasks_dict_info:
4687 stage[1] = "Waiting for terminate pending tasks."
4688 self.logger.debug(logging_text + stage[1])
4689 error_list += await self._wait_for_tasks(
4690 logging_text,
4691 tasks_dict_info,
4692 timeout_ns_terminate,
4693 stage,
4694 nslcmop_id,
4695 )
4696 stage[1] = stage[2] = ""
4697 except asyncio.CancelledError:
4698 error_list.append("Cancelled")
4699 # TODO cancell all tasks
4700 except Exception as exc:
4701 error_list.append(str(exc))
4702 # update status at database
4703 if error_list:
4704 error_detail = "; ".join(error_list)
4705 # self.logger.error(logging_text + error_detail)
4706 error_description_nslcmop = "{} Detail: {}".format(
4707 stage[0], error_detail
4708 )
4709 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4710 nslcmop_id, stage[0]
4711 )
4712
4713 db_nsr_update["operational-status"] = "failed"
4714 db_nsr_update["detailed-status"] = (
4715 error_description_nsr + " Detail: " + error_detail
4716 )
4717 db_nslcmop_update["detailed-status"] = error_detail
4718 nslcmop_operation_state = "FAILED"
4719 ns_state = "BROKEN"
4720 else:
4721 error_detail = None
4722 error_description_nsr = error_description_nslcmop = None
4723 ns_state = "NOT_INSTANTIATED"
4724 db_nsr_update["operational-status"] = "terminated"
4725 db_nsr_update["detailed-status"] = "Done"
4726 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4727 db_nslcmop_update["detailed-status"] = "Done"
4728 nslcmop_operation_state = "COMPLETED"
4729
4730 if db_nsr:
4731 self._write_ns_status(
4732 nsr_id=nsr_id,
4733 ns_state=ns_state,
4734 current_operation="IDLE",
4735 current_operation_id=None,
4736 error_description=error_description_nsr,
4737 error_detail=error_detail,
4738 other_update=db_nsr_update,
4739 )
4740 self._write_op_status(
4741 op_id=nslcmop_id,
4742 stage="",
4743 error_message=error_description_nslcmop,
4744 operation_state=nslcmop_operation_state,
4745 other_update=db_nslcmop_update,
4746 )
4747 if ns_state == "NOT_INSTANTIATED":
4748 try:
4749 self.db.set_list(
4750 "vnfrs",
4751 {"nsr-id-ref": nsr_id},
4752 {"_admin.nsState": "NOT_INSTANTIATED"},
4753 )
4754 except DbException as e:
4755 self.logger.warn(
4756 logging_text
4757 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4758 nsr_id, e
4759 )
4760 )
4761 if operation_params:
4762 autoremove = operation_params.get("autoremove", False)
4763 if nslcmop_operation_state:
4764 try:
4765 await self.msg.aiowrite(
4766 "ns",
4767 "terminated",
4768 {
4769 "nsr_id": nsr_id,
4770 "nslcmop_id": nslcmop_id,
4771 "operationState": nslcmop_operation_state,
4772 "autoremove": autoremove,
4773 },
4774 loop=self.loop,
4775 )
4776 except Exception as e:
4777 self.logger.error(
4778 logging_text + "kafka_write notification Exception {}".format(e)
4779 )
4780
4781 self.logger.debug(logging_text + "Exit")
4782 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4783
4784 async def _wait_for_tasks(
4785 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4786 ):
4787 time_start = time()
4788 error_detail_list = []
4789 error_list = []
4790 pending_tasks = list(created_tasks_info.keys())
4791 num_tasks = len(pending_tasks)
4792 num_done = 0
4793 stage[1] = "{}/{}.".format(num_done, num_tasks)
4794 self._write_op_status(nslcmop_id, stage)
4795 while pending_tasks:
4796 new_error = None
4797 _timeout = timeout + time_start - time()
4798 done, pending_tasks = await asyncio.wait(
4799 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4800 )
4801 num_done += len(done)
4802 if not done: # Timeout
4803 for task in pending_tasks:
4804 new_error = created_tasks_info[task] + ": Timeout"
4805 error_detail_list.append(new_error)
4806 error_list.append(new_error)
4807 break
4808 for task in done:
4809 if task.cancelled():
4810 exc = "Cancelled"
4811 else:
4812 exc = task.exception()
4813 if exc:
4814 if isinstance(exc, asyncio.TimeoutError):
4815 exc = "Timeout"
4816 new_error = created_tasks_info[task] + ": {}".format(exc)
4817 error_list.append(created_tasks_info[task])
4818 error_detail_list.append(new_error)
4819 if isinstance(
4820 exc,
4821 (
4822 str,
4823 DbException,
4824 N2VCException,
4825 ROclient.ROClientException,
4826 LcmException,
4827 K8sException,
4828 NgRoException,
4829 ),
4830 ):
4831 self.logger.error(logging_text + new_error)
4832 else:
4833 exc_traceback = "".join(
4834 traceback.format_exception(None, exc, exc.__traceback__)
4835 )
4836 self.logger.error(
4837 logging_text
4838 + created_tasks_info[task]
4839 + " "
4840 + exc_traceback
4841 )
4842 else:
4843 self.logger.debug(
4844 logging_text + created_tasks_info[task] + ": Done"
4845 )
4846 stage[1] = "{}/{}.".format(num_done, num_tasks)
4847 if new_error:
4848 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4849 if nsr_id: # update also nsr
4850 self.update_db_2(
4851 "nsrs",
4852 nsr_id,
4853 {
4854 "errorDescription": "Error at: " + ", ".join(error_list),
4855 "errorDetail": ". ".join(error_detail_list),
4856 },
4857 )
4858 self._write_op_status(nslcmop_id, stage)
4859 return error_detail_list
4860
4861 @staticmethod
4862 def _map_primitive_params(primitive_desc, params, instantiation_params):
4863 """
4864 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4865 The default-value is used. If it is between < > it look for a value at instantiation_params
4866 :param primitive_desc: portion of VNFD/NSD that describes primitive
4867 :param params: Params provided by user
4868 :param instantiation_params: Instantiation params provided by user
4869 :return: a dictionary with the calculated params
4870 """
4871 calculated_params = {}
4872 for parameter in primitive_desc.get("parameter", ()):
4873 param_name = parameter["name"]
4874 if param_name in params:
4875 calculated_params[param_name] = params[param_name]
4876 elif "default-value" in parameter or "value" in parameter:
4877 if "value" in parameter:
4878 calculated_params[param_name] = parameter["value"]
4879 else:
4880 calculated_params[param_name] = parameter["default-value"]
4881 if (
4882 isinstance(calculated_params[param_name], str)
4883 and calculated_params[param_name].startswith("<")
4884 and calculated_params[param_name].endswith(">")
4885 ):
4886 if calculated_params[param_name][1:-1] in instantiation_params:
4887 calculated_params[param_name] = instantiation_params[
4888 calculated_params[param_name][1:-1]
4889 ]
4890 else:
4891 raise LcmException(
4892 "Parameter {} needed to execute primitive {} not provided".format(
4893 calculated_params[param_name], primitive_desc["name"]
4894 )
4895 )
4896 else:
4897 raise LcmException(
4898 "Parameter {} needed to execute primitive {} not provided".format(
4899 param_name, primitive_desc["name"]
4900 )
4901 )
4902
4903 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4904 calculated_params[param_name] = yaml.safe_dump(
4905 calculated_params[param_name], default_flow_style=True, width=256
4906 )
4907 elif isinstance(calculated_params[param_name], str) and calculated_params[
4908 param_name
4909 ].startswith("!!yaml "):
4910 calculated_params[param_name] = calculated_params[param_name][7:]
4911 if parameter.get("data-type") == "INTEGER":
4912 try:
4913 calculated_params[param_name] = int(calculated_params[param_name])
4914 except ValueError: # error converting string to int
4915 raise LcmException(
4916 "Parameter {} of primitive {} must be integer".format(
4917 param_name, primitive_desc["name"]
4918 )
4919 )
4920 elif parameter.get("data-type") == "BOOLEAN":
4921 calculated_params[param_name] = not (
4922 (str(calculated_params[param_name])).lower() == "false"
4923 )
4924
4925 # add always ns_config_info if primitive name is config
4926 if primitive_desc["name"] == "config":
4927 if "ns_config_info" in instantiation_params:
4928 calculated_params["ns_config_info"] = instantiation_params[
4929 "ns_config_info"
4930 ]
4931 return calculated_params
4932
4933 def _look_for_deployed_vca(
4934 self,
4935 deployed_vca,
4936 member_vnf_index,
4937 vdu_id,
4938 vdu_count_index,
4939 kdu_name=None,
4940 ee_descriptor_id=None,
4941 ):
4942 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4943 for vca in deployed_vca:
4944 if not vca:
4945 continue
4946 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4947 continue
4948 if (
4949 vdu_count_index is not None
4950 and vdu_count_index != vca["vdu_count_index"]
4951 ):
4952 continue
4953 if kdu_name and kdu_name != vca["kdu_name"]:
4954 continue
4955 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4956 continue
4957 break
4958 else:
4959 # vca_deployed not found
4960 raise LcmException(
4961 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4962 " is not deployed".format(
4963 member_vnf_index,
4964 vdu_id,
4965 vdu_count_index,
4966 kdu_name,
4967 ee_descriptor_id,
4968 )
4969 )
4970 # get ee_id
4971 ee_id = vca.get("ee_id")
4972 vca_type = vca.get(
4973 "type", "lxc_proxy_charm"
4974 ) # default value for backward compatibility - proxy charm
4975 if not ee_id:
4976 raise LcmException(
4977 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4978 "execution environment".format(
4979 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4980 )
4981 )
4982 return ee_id, vca_type
4983
4984 async def _ns_execute_primitive(
4985 self,
4986 ee_id,
4987 primitive,
4988 primitive_params,
4989 retries=0,
4990 retries_interval=30,
4991 timeout=None,
4992 vca_type=None,
4993 db_dict=None,
4994 vca_id: str = None,
4995 ) -> (str, str):
4996 try:
4997 if primitive == "config":
4998 primitive_params = {"params": primitive_params}
4999
5000 vca_type = vca_type or "lxc_proxy_charm"
5001
5002 while retries >= 0:
5003 try:
5004 output = await asyncio.wait_for(
5005 self.vca_map[vca_type].exec_primitive(
5006 ee_id=ee_id,
5007 primitive_name=primitive,
5008 params_dict=primitive_params,
5009 progress_timeout=self.timeout_progress_primitive,
5010 total_timeout=self.timeout_primitive,
5011 db_dict=db_dict,
5012 vca_id=vca_id,
5013 vca_type=vca_type,
5014 ),
5015 timeout=timeout or self.timeout_primitive,
5016 )
5017 # execution was OK
5018 break
5019 except asyncio.CancelledError:
5020 raise
5021 except Exception as e: # asyncio.TimeoutError
5022 if isinstance(e, asyncio.TimeoutError):
5023 e = "Timeout"
5024 retries -= 1
5025 if retries >= 0:
5026 self.logger.debug(
5027 "Error executing action {} on {} -> {}".format(
5028 primitive, ee_id, e
5029 )
5030 )
5031 # wait and retry
5032 await asyncio.sleep(retries_interval, loop=self.loop)
5033 else:
5034 return "FAILED", str(e)
5035
5036 return "COMPLETED", output
5037
5038 except (LcmException, asyncio.CancelledError):
5039 raise
5040 except Exception as e:
5041 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5042
5043 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5044 """
5045 Updating the vca_status with latest juju information in nsrs record
5046 :param: nsr_id: Id of the nsr
5047 :param: nslcmop_id: Id of the nslcmop
5048 :return: None
5049 """
5050
5051 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5052 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5053 vca_id = self.get_vca_id({}, db_nsr)
5054 if db_nsr["_admin"]["deployed"]["K8s"]:
5055 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5056 cluster_uuid, kdu_instance, cluster_type = (
5057 k8s["k8scluster-uuid"],
5058 k8s["kdu-instance"],
5059 k8s["k8scluster-type"],
5060 )
5061 await self._on_update_k8s_db(
5062 cluster_uuid=cluster_uuid,
5063 kdu_instance=kdu_instance,
5064 filter={"_id": nsr_id},
5065 vca_id=vca_id,
5066 cluster_type=cluster_type,
5067 )
5068 else:
5069 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5070 table, filter = "nsrs", {"_id": nsr_id}
5071 path = "_admin.deployed.VCA.{}.".format(vca_index)
5072 await self._on_update_n2vc_db(table, filter, path, {})
5073
5074 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5075 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5076
5077 async def action(self, nsr_id, nslcmop_id):
5078 # Try to lock HA task here
5079 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5080 if not task_is_locked_by_me:
5081 return
5082
5083 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5084 self.logger.debug(logging_text + "Enter")
5085 # get all needed from database
5086 db_nsr = None
5087 db_nslcmop = None
5088 db_nsr_update = {}
5089 db_nslcmop_update = {}
5090 nslcmop_operation_state = None
5091 error_description_nslcmop = None
5092 exc = None
5093 try:
5094 # wait for any previous tasks in process
5095 step = "Waiting for previous operations to terminate"
5096 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5097
5098 self._write_ns_status(
5099 nsr_id=nsr_id,
5100 ns_state=None,
5101 current_operation="RUNNING ACTION",
5102 current_operation_id=nslcmop_id,
5103 )
5104
5105 step = "Getting information from database"
5106 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5107 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5108 if db_nslcmop["operationParams"].get("primitive_params"):
5109 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5110 db_nslcmop["operationParams"]["primitive_params"]
5111 )
5112
5113 nsr_deployed = db_nsr["_admin"].get("deployed")
5114 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5115 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5116 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5117 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5118 primitive = db_nslcmop["operationParams"]["primitive"]
5119 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5120 timeout_ns_action = db_nslcmop["operationParams"].get(
5121 "timeout_ns_action", self.timeout_primitive
5122 )
5123
5124 if vnf_index:
5125 step = "Getting vnfr from database"
5126 db_vnfr = self.db.get_one(
5127 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5128 )
5129 if db_vnfr.get("kdur"):
5130 kdur_list = []
5131 for kdur in db_vnfr["kdur"]:
5132 if kdur.get("additionalParams"):
5133 kdur["additionalParams"] = json.loads(
5134 kdur["additionalParams"]
5135 )
5136 kdur_list.append(kdur)
5137 db_vnfr["kdur"] = kdur_list
5138 step = "Getting vnfd from database"
5139 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5140
5141 # Sync filesystem before running a primitive
5142 self.fs.sync(db_vnfr["vnfd-id"])
5143 else:
5144 step = "Getting nsd from database"
5145 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5146
5147 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5148 # for backward compatibility
5149 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5150 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5151 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5152 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5153
5154 # look for primitive
5155 config_primitive_desc = descriptor_configuration = None
5156 if vdu_id:
5157 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5158 elif kdu_name:
5159 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5160 elif vnf_index:
5161 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5162 else:
5163 descriptor_configuration = db_nsd.get("ns-configuration")
5164
5165 if descriptor_configuration and descriptor_configuration.get(
5166 "config-primitive"
5167 ):
5168 for config_primitive in descriptor_configuration["config-primitive"]:
5169 if config_primitive["name"] == primitive:
5170 config_primitive_desc = config_primitive
5171 break
5172
5173 if not config_primitive_desc:
5174 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5175 raise LcmException(
5176 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5177 primitive
5178 )
5179 )
5180 primitive_name = primitive
5181 ee_descriptor_id = None
5182 else:
5183 primitive_name = config_primitive_desc.get(
5184 "execution-environment-primitive", primitive
5185 )
5186 ee_descriptor_id = config_primitive_desc.get(
5187 "execution-environment-ref"
5188 )
5189
5190 if vnf_index:
5191 if vdu_id:
5192 vdur = next(
5193 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5194 )
5195 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5196 elif kdu_name:
5197 kdur = next(
5198 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5199 )
5200 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5201 else:
5202 desc_params = parse_yaml_strings(
5203 db_vnfr.get("additionalParamsForVnf")
5204 )
5205 else:
5206 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5207 if kdu_name and get_configuration(db_vnfd, kdu_name):
5208 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5209 actions = set()
5210 for primitive in kdu_configuration.get("initial-config-primitive", []):
5211 actions.add(primitive["name"])
5212 for primitive in kdu_configuration.get("config-primitive", []):
5213 actions.add(primitive["name"])
5214 kdu = find_in_list(
5215 nsr_deployed["K8s"],
5216 lambda kdu: kdu_name == kdu["kdu-name"]
5217 and kdu["member-vnf-index"] == vnf_index,
5218 )
5219 kdu_action = (
5220 True
5221 if primitive_name in actions
5222 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5223 else False
5224 )
5225
5226 # TODO check if ns is in a proper status
5227 if kdu_name and (
5228 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5229 ):
5230 # kdur and desc_params already set from before
5231 if primitive_params:
5232 desc_params.update(primitive_params)
5233 # TODO Check if we will need something at vnf level
5234 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5235 if (
5236 kdu_name == kdu["kdu-name"]
5237 and kdu["member-vnf-index"] == vnf_index
5238 ):
5239 break
5240 else:
5241 raise LcmException(
5242 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5243 )
5244
5245 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5246 msg = "unknown k8scluster-type '{}'".format(
5247 kdu.get("k8scluster-type")
5248 )
5249 raise LcmException(msg)
5250
5251 db_dict = {
5252 "collection": "nsrs",
5253 "filter": {"_id": nsr_id},
5254 "path": "_admin.deployed.K8s.{}".format(index),
5255 }
5256 self.logger.debug(
5257 logging_text
5258 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5259 )
5260 step = "Executing kdu {}".format(primitive_name)
5261 if primitive_name == "upgrade":
5262 if desc_params.get("kdu_model"):
5263 kdu_model = desc_params.get("kdu_model")
5264 del desc_params["kdu_model"]
5265 else:
5266 kdu_model = kdu.get("kdu-model")
5267 if kdu_model.count("/") < 2:
5268 parts = kdu_model.split(sep=":")
5269 if len(parts) == 2:
5270 kdu_model = parts[0]
5271 if desc_params.get("kdu_atomic_upgrade"):
5272 atomic_upgrade = desc_params.get(
5273 "kdu_atomic_upgrade"
5274 ).lower() in ("yes", "true", "1")
5275 del desc_params["kdu_atomic_upgrade"]
5276 else:
5277 atomic_upgrade = True
5278
5279 detailed_status = await asyncio.wait_for(
5280 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5281 cluster_uuid=kdu.get("k8scluster-uuid"),
5282 kdu_instance=kdu.get("kdu-instance"),
5283 atomic=atomic_upgrade,
5284 kdu_model=kdu_model,
5285 params=desc_params,
5286 db_dict=db_dict,
5287 timeout=timeout_ns_action,
5288 ),
5289 timeout=timeout_ns_action + 10,
5290 )
5291 self.logger.debug(
5292 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5293 )
5294 elif primitive_name == "rollback":
5295 detailed_status = await asyncio.wait_for(
5296 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5297 cluster_uuid=kdu.get("k8scluster-uuid"),
5298 kdu_instance=kdu.get("kdu-instance"),
5299 db_dict=db_dict,
5300 ),
5301 timeout=timeout_ns_action,
5302 )
5303 elif primitive_name == "status":
5304 detailed_status = await asyncio.wait_for(
5305 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5306 cluster_uuid=kdu.get("k8scluster-uuid"),
5307 kdu_instance=kdu.get("kdu-instance"),
5308 vca_id=vca_id,
5309 ),
5310 timeout=timeout_ns_action,
5311 )
5312 else:
5313 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5314 kdu["kdu-name"], nsr_id
5315 )
5316 params = self._map_primitive_params(
5317 config_primitive_desc, primitive_params, desc_params
5318 )
5319
5320 detailed_status = await asyncio.wait_for(
5321 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5322 cluster_uuid=kdu.get("k8scluster-uuid"),
5323 kdu_instance=kdu_instance,
5324 primitive_name=primitive_name,
5325 params=params,
5326 db_dict=db_dict,
5327 timeout=timeout_ns_action,
5328 vca_id=vca_id,
5329 ),
5330 timeout=timeout_ns_action,
5331 )
5332
5333 if detailed_status:
5334 nslcmop_operation_state = "COMPLETED"
5335 else:
5336 detailed_status = ""
5337 nslcmop_operation_state = "FAILED"
5338 else:
5339 ee_id, vca_type = self._look_for_deployed_vca(
5340 nsr_deployed["VCA"],
5341 member_vnf_index=vnf_index,
5342 vdu_id=vdu_id,
5343 vdu_count_index=vdu_count_index,
5344 ee_descriptor_id=ee_descriptor_id,
5345 )
5346 for vca_index, vca_deployed in enumerate(
5347 db_nsr["_admin"]["deployed"]["VCA"]
5348 ):
5349 if vca_deployed.get("member-vnf-index") == vnf_index:
5350 db_dict = {
5351 "collection": "nsrs",
5352 "filter": {"_id": nsr_id},
5353 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5354 }
5355 break
5356 (
5357 nslcmop_operation_state,
5358 detailed_status,
5359 ) = await self._ns_execute_primitive(
5360 ee_id,
5361 primitive=primitive_name,
5362 primitive_params=self._map_primitive_params(
5363 config_primitive_desc, primitive_params, desc_params
5364 ),
5365 timeout=timeout_ns_action,
5366 vca_type=vca_type,
5367 db_dict=db_dict,
5368 vca_id=vca_id,
5369 )
5370
5371 db_nslcmop_update["detailed-status"] = detailed_status
5372 error_description_nslcmop = (
5373 detailed_status if nslcmop_operation_state == "FAILED" else ""
5374 )
5375 self.logger.debug(
5376 logging_text
5377 + " task Done with result {} {}".format(
5378 nslcmop_operation_state, detailed_status
5379 )
5380 )
5381 return # database update is called inside finally
5382
5383 except (DbException, LcmException, N2VCException, K8sException) as e:
5384 self.logger.error(logging_text + "Exit Exception {}".format(e))
5385 exc = e
5386 except asyncio.CancelledError:
5387 self.logger.error(
5388 logging_text + "Cancelled Exception while '{}'".format(step)
5389 )
5390 exc = "Operation was cancelled"
5391 except asyncio.TimeoutError:
5392 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5393 exc = "Timeout"
5394 except Exception as e:
5395 exc = traceback.format_exc()
5396 self.logger.critical(
5397 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5398 exc_info=True,
5399 )
5400 finally:
5401 if exc:
5402 db_nslcmop_update[
5403 "detailed-status"
5404 ] = (
5405 detailed_status
5406 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5407 nslcmop_operation_state = "FAILED"
5408 if db_nsr:
5409 self._write_ns_status(
5410 nsr_id=nsr_id,
5411 ns_state=db_nsr[
5412 "nsState"
5413 ], # TODO check if degraded. For the moment use previous status
5414 current_operation="IDLE",
5415 current_operation_id=None,
5416 # error_description=error_description_nsr,
5417 # error_detail=error_detail,
5418 other_update=db_nsr_update,
5419 )
5420
5421 self._write_op_status(
5422 op_id=nslcmop_id,
5423 stage="",
5424 error_message=error_description_nslcmop,
5425 operation_state=nslcmop_operation_state,
5426 other_update=db_nslcmop_update,
5427 )
5428
5429 if nslcmop_operation_state:
5430 try:
5431 await self.msg.aiowrite(
5432 "ns",
5433 "actioned",
5434 {
5435 "nsr_id": nsr_id,
5436 "nslcmop_id": nslcmop_id,
5437 "operationState": nslcmop_operation_state,
5438 },
5439 loop=self.loop,
5440 )
5441 except Exception as e:
5442 self.logger.error(
5443 logging_text + "kafka_write notification Exception {}".format(e)
5444 )
5445 self.logger.debug(logging_text + "Exit")
5446 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5447 return nslcmop_operation_state, detailed_status
5448
5449 async def terminate_vdus(
5450 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5451 ):
5452 """This method terminates VDUs
5453
5454 Args:
5455 db_vnfr: VNF instance record
5456 member_vnf_index: VNF index to identify the VDUs to be removed
5457 db_nsr: NS instance record
5458 update_db_nslcmops: Nslcmop update record
5459 """
5460 vca_scaling_info = []
5461 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5462 scaling_info["scaling_direction"] = "IN"
5463 scaling_info["vdu-delete"] = {}
5464 scaling_info["kdu-delete"] = {}
5465 db_vdur = db_vnfr.get("vdur")
5466 vdur_list = copy(db_vdur)
5467 count_index = 0
5468 for index, vdu in enumerate(vdur_list):
5469 vca_scaling_info.append(
5470 {
5471 "osm_vdu_id": vdu["vdu-id-ref"],
5472 "member-vnf-index": member_vnf_index,
5473 "type": "delete",
5474 "vdu_index": count_index,
5475 }
5476 )
5477 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5478 scaling_info["vdu"].append(
5479 {
5480 "name": vdu.get("name") or vdu.get("vdu-name"),
5481 "vdu_id": vdu["vdu-id-ref"],
5482 "interface": [],
5483 }
5484 )
5485 for interface in vdu["interfaces"]:
5486 scaling_info["vdu"][index]["interface"].append(
5487 {
5488 "name": interface["name"],
5489 "ip_address": interface["ip-address"],
5490 "mac_address": interface.get("mac-address"),
5491 }
5492 )
5493 self.logger.info("NS update scaling info{}".format(scaling_info))
5494 stage[2] = "Terminating VDUs"
5495 if scaling_info.get("vdu-delete"):
5496 # scale_process = "RO"
5497 if self.ro_config.get("ng"):
5498 await self._scale_ng_ro(
5499 logging_text,
5500 db_nsr,
5501 update_db_nslcmops,
5502 db_vnfr,
5503 scaling_info,
5504 stage,
5505 )
5506
5507 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5508 """This method is to Remove VNF instances from NS.
5509
5510 Args:
5511 nsr_id: NS instance id
5512 nslcmop_id: nslcmop id of update
5513 vnf_instance_id: id of the VNF instance to be removed
5514
5515 Returns:
5516 result: (str, str) COMPLETED/FAILED, details
5517 """
5518 try:
5519 db_nsr_update = {}
5520 logging_text = "Task ns={} update ".format(nsr_id)
5521 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5522 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5523 if check_vnfr_count > 1:
5524 stage = ["", "", ""]
5525 step = "Getting nslcmop from database"
5526 self.logger.debug(
5527 step + " after having waited for previous tasks to be completed"
5528 )
5529 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5530 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5531 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5532 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5533 """ db_vnfr = self.db.get_one(
5534 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5535
5536 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5537 await self.terminate_vdus(
5538 db_vnfr,
5539 member_vnf_index,
5540 db_nsr,
5541 update_db_nslcmops,
5542 stage,
5543 logging_text,
5544 )
5545
5546 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5547 constituent_vnfr.remove(db_vnfr.get("_id"))
5548 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5549 "constituent-vnfr-ref"
5550 )
5551 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5552 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5553 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5554 return "COMPLETED", "Done"
5555 else:
5556 step = "Terminate VNF Failed with"
5557 raise LcmException(
5558 "{} Cannot terminate the last VNF in this NS.".format(
5559 vnf_instance_id
5560 )
5561 )
5562 except (LcmException, asyncio.CancelledError):
5563 raise
5564 except Exception as e:
5565 self.logger.debug("Error removing VNF {}".format(e))
5566 return "FAILED", "Error removing VNF {}".format(e)
5567
5568 async def _ns_redeploy_vnf(
5569 self,
5570 nsr_id,
5571 nslcmop_id,
5572 db_vnfd,
5573 db_vnfr,
5574 db_nsr,
5575 ):
5576 """This method updates and redeploys VNF instances
5577
5578 Args:
5579 nsr_id: NS instance id
5580 nslcmop_id: nslcmop id
5581 db_vnfd: VNF descriptor
5582 db_vnfr: VNF instance record
5583 db_nsr: NS instance record
5584
5585 Returns:
5586 result: (str, str) COMPLETED/FAILED, details
5587 """
5588 try:
5589 count_index = 0
5590 stage = ["", "", ""]
5591 logging_text = "Task ns={} update ".format(nsr_id)
5592 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5593 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5594
5595 # Terminate old VNF resources
5596 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5597 await self.terminate_vdus(
5598 db_vnfr,
5599 member_vnf_index,
5600 db_nsr,
5601 update_db_nslcmops,
5602 stage,
5603 logging_text,
5604 )
5605
5606 # old_vnfd_id = db_vnfr["vnfd-id"]
5607 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5608 new_db_vnfd = db_vnfd
5609 # new_vnfd_ref = new_db_vnfd["id"]
5610 # new_vnfd_id = vnfd_id
5611
5612 # Create VDUR
5613 new_vnfr_cp = []
5614 for cp in new_db_vnfd.get("ext-cpd", ()):
5615 vnf_cp = {
5616 "name": cp.get("id"),
5617 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5618 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5619 "id": cp.get("id"),
5620 }
5621 new_vnfr_cp.append(vnf_cp)
5622 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5623 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5624 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5625 new_vnfr_update = {
5626 "revision": latest_vnfd_revision,
5627 "connection-point": new_vnfr_cp,
5628 "vdur": new_vdur,
5629 "ip-address": "",
5630 }
5631 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5632 updated_db_vnfr = self.db.get_one(
5633 "vnfrs",
5634 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5635 )
5636
5637 # Instantiate new VNF resources
5638 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5639 vca_scaling_info = []
5640 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5641 scaling_info["scaling_direction"] = "OUT"
5642 scaling_info["vdu-create"] = {}
5643 scaling_info["kdu-create"] = {}
5644 vdud_instantiate_list = db_vnfd["vdu"]
5645 for index, vdud in enumerate(vdud_instantiate_list):
5646 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5647 if cloud_init_text:
5648 additional_params = (
5649 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5650 or {}
5651 )
5652 cloud_init_list = []
5653 if cloud_init_text:
5654 # TODO Information of its own ip is not available because db_vnfr is not updated.
5655 additional_params["OSM"] = get_osm_params(
5656 updated_db_vnfr, vdud["id"], 1
5657 )
5658 cloud_init_list.append(
5659 self._parse_cloud_init(
5660 cloud_init_text,
5661 additional_params,
5662 db_vnfd["id"],
5663 vdud["id"],
5664 )
5665 )
5666 vca_scaling_info.append(
5667 {
5668 "osm_vdu_id": vdud["id"],
5669 "member-vnf-index": member_vnf_index,
5670 "type": "create",
5671 "vdu_index": count_index,
5672 }
5673 )
5674 scaling_info["vdu-create"][vdud["id"]] = count_index
5675 if self.ro_config.get("ng"):
5676 self.logger.debug(
5677 "New Resources to be deployed: {}".format(scaling_info)
5678 )
5679 await self._scale_ng_ro(
5680 logging_text,
5681 db_nsr,
5682 update_db_nslcmops,
5683 updated_db_vnfr,
5684 scaling_info,
5685 stage,
5686 )
5687 return "COMPLETED", "Done"
5688 except (LcmException, asyncio.CancelledError):
5689 raise
5690 except Exception as e:
5691 self.logger.debug("Error updating VNF {}".format(e))
5692 return "FAILED", "Error updating VNF {}".format(e)
5693
5694 async def _ns_charm_upgrade(
5695 self,
5696 ee_id,
5697 charm_id,
5698 charm_type,
5699 path,
5700 timeout: float = None,
5701 ) -> (str, str):
5702 """This method upgrade charms in VNF instances
5703
5704 Args:
5705 ee_id: Execution environment id
5706 path: Local path to the charm
5707 charm_id: charm-id
5708 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5709 timeout: (Float) Timeout for the ns update operation
5710
5711 Returns:
5712 result: (str, str) COMPLETED/FAILED, details
5713 """
5714 try:
5715 charm_type = charm_type or "lxc_proxy_charm"
5716 output = await self.vca_map[charm_type].upgrade_charm(
5717 ee_id=ee_id,
5718 path=path,
5719 charm_id=charm_id,
5720 charm_type=charm_type,
5721 timeout=timeout or self.timeout_ns_update,
5722 )
5723
5724 if output:
5725 return "COMPLETED", output
5726
5727 except (LcmException, asyncio.CancelledError):
5728 raise
5729
5730 except Exception as e:
5731 self.logger.debug("Error upgrading charm {}".format(path))
5732
5733 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5734
5735 async def update(self, nsr_id, nslcmop_id):
5736 """Update NS according to different update types
5737
5738 This method performs upgrade of VNF instances then updates the revision
5739 number in VNF record
5740
5741 Args:
5742 nsr_id: Network service will be updated
5743 nslcmop_id: ns lcm operation id
5744
5745 Returns:
5746 It may raise DbException, LcmException, N2VCException, K8sException
5747
5748 """
5749 # Try to lock HA task here
5750 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5751 if not task_is_locked_by_me:
5752 return
5753
5754 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5755 self.logger.debug(logging_text + "Enter")
5756
5757 # Set the required variables to be filled up later
5758 db_nsr = None
5759 db_nslcmop_update = {}
5760 vnfr_update = {}
5761 nslcmop_operation_state = None
5762 db_nsr_update = {}
5763 error_description_nslcmop = ""
5764 exc = None
5765 change_type = "updated"
5766 detailed_status = ""
5767
5768 try:
5769 # wait for any previous tasks in process
5770 step = "Waiting for previous operations to terminate"
5771 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5772 self._write_ns_status(
5773 nsr_id=nsr_id,
5774 ns_state=None,
5775 current_operation="UPDATING",
5776 current_operation_id=nslcmop_id,
5777 )
5778
5779 step = "Getting nslcmop from database"
5780 db_nslcmop = self.db.get_one(
5781 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5782 )
5783 update_type = db_nslcmop["operationParams"]["updateType"]
5784
5785 step = "Getting nsr from database"
5786 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5787 old_operational_status = db_nsr["operational-status"]
5788 db_nsr_update["operational-status"] = "updating"
5789 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5790 nsr_deployed = db_nsr["_admin"].get("deployed")
5791
5792 if update_type == "CHANGE_VNFPKG":
5793 # Get the input parameters given through update request
5794 vnf_instance_id = db_nslcmop["operationParams"][
5795 "changeVnfPackageData"
5796 ].get("vnfInstanceId")
5797
5798 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5799 "vnfdId"
5800 )
5801 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5802
5803 step = "Getting vnfr from database"
5804 db_vnfr = self.db.get_one(
5805 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5806 )
5807
5808 step = "Getting vnfds from database"
5809 # Latest VNFD
5810 latest_vnfd = self.db.get_one(
5811 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5812 )
5813 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5814
5815 # Current VNFD
5816 current_vnf_revision = db_vnfr.get("revision", 1)
5817 current_vnfd = self.db.get_one(
5818 "vnfds_revisions",
5819 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5820 fail_on_empty=False,
5821 )
5822 # Charm artifact paths will be filled up later
5823 (
5824 current_charm_artifact_path,
5825 target_charm_artifact_path,
5826 charm_artifact_paths,
5827 ) = ([], [], [])
5828
5829 step = "Checking if revision has changed in VNFD"
5830 if current_vnf_revision != latest_vnfd_revision:
5831 change_type = "policy_updated"
5832
5833 # There is new revision of VNFD, update operation is required
5834 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5835 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5836
5837 step = "Removing the VNFD packages if they exist in the local path"
5838 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5839 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5840
5841 step = "Get the VNFD packages from FSMongo"
5842 self.fs.sync(from_path=latest_vnfd_path)
5843 self.fs.sync(from_path=current_vnfd_path)
5844
5845 step = (
5846 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5847 )
5848 base_folder = latest_vnfd["_admin"]["storage"]
5849
5850 for charm_index, charm_deployed in enumerate(
5851 get_iterable(nsr_deployed, "VCA")
5852 ):
5853 vnf_index = db_vnfr.get("member-vnf-index-ref")
5854
5855 # Getting charm-id and charm-type
5856 if charm_deployed.get("member-vnf-index") == vnf_index:
5857 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5858 charm_type = charm_deployed.get("type")
5859
5860 # Getting ee-id
5861 ee_id = charm_deployed.get("ee_id")
5862
5863 step = "Getting descriptor config"
5864 descriptor_config = get_configuration(
5865 current_vnfd, current_vnfd["id"]
5866 )
5867
5868 if "execution-environment-list" in descriptor_config:
5869 ee_list = descriptor_config.get(
5870 "execution-environment-list", []
5871 )
5872 else:
5873 ee_list = []
5874
5875 # There could be several charm used in the same VNF
5876 for ee_item in ee_list:
5877 if ee_item.get("juju"):
5878 step = "Getting charm name"
5879 charm_name = ee_item["juju"].get("charm")
5880
5881 step = "Setting Charm artifact paths"
5882 current_charm_artifact_path.append(
5883 get_charm_artifact_path(
5884 base_folder,
5885 charm_name,
5886 charm_type,
5887 current_vnf_revision,
5888 )
5889 )
5890 target_charm_artifact_path.append(
5891 get_charm_artifact_path(
5892 base_folder,
5893 charm_name,
5894 charm_type,
5895 latest_vnfd_revision,
5896 )
5897 )
5898
5899 charm_artifact_paths = zip(
5900 current_charm_artifact_path, target_charm_artifact_path
5901 )
5902
5903 step = "Checking if software version has changed in VNFD"
5904 if find_software_version(current_vnfd) != find_software_version(
5905 latest_vnfd
5906 ):
5907 step = "Checking if existing VNF has charm"
5908 for current_charm_path, target_charm_path in list(
5909 charm_artifact_paths
5910 ):
5911 if current_charm_path:
5912 raise LcmException(
5913 "Software version change is not supported as VNF instance {} has charm.".format(
5914 vnf_instance_id
5915 )
5916 )
5917
5918 # There is no change in the charm package, then redeploy the VNF
5919 # based on new descriptor
5920 step = "Redeploying VNF"
5921 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5922 (result, detailed_status) = await self._ns_redeploy_vnf(
5923 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5924 )
5925 if result == "FAILED":
5926 nslcmop_operation_state = result
5927 error_description_nslcmop = detailed_status
5928 db_nslcmop_update["detailed-status"] = detailed_status
5929 self.logger.debug(
5930 logging_text
5931 + " step {} Done with result {} {}".format(
5932 step, nslcmop_operation_state, detailed_status
5933 )
5934 )
5935
5936 else:
5937 step = "Checking if any charm package has changed or not"
5938 for current_charm_path, target_charm_path in list(
5939 charm_artifact_paths
5940 ):
5941 if (
5942 current_charm_path
5943 and target_charm_path
5944 and self.check_charm_hash_changed(
5945 current_charm_path, target_charm_path
5946 )
5947 ):
5948 step = "Checking whether VNF uses juju bundle"
5949 if check_juju_bundle_existence(current_vnfd):
5950 raise LcmException(
5951 "Charm upgrade is not supported for the instance which"
5952 " uses juju-bundle: {}".format(
5953 check_juju_bundle_existence(current_vnfd)
5954 )
5955 )
5956
5957 step = "Upgrading Charm"
5958 (
5959 result,
5960 detailed_status,
5961 ) = await self._ns_charm_upgrade(
5962 ee_id=ee_id,
5963 charm_id=charm_id,
5964 charm_type=charm_type,
5965 path=self.fs.path + target_charm_path,
5966 timeout=timeout_seconds,
5967 )
5968
5969 if result == "FAILED":
5970 nslcmop_operation_state = result
5971 error_description_nslcmop = detailed_status
5972
5973 db_nslcmop_update["detailed-status"] = detailed_status
5974 self.logger.debug(
5975 logging_text
5976 + " step {} Done with result {} {}".format(
5977 step, nslcmop_operation_state, detailed_status
5978 )
5979 )
5980
5981 step = "Updating policies"
5982 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5983 result = "COMPLETED"
5984 detailed_status = "Done"
5985 db_nslcmop_update["detailed-status"] = "Done"
5986
5987 # If nslcmop_operation_state is None, so any operation is not failed.
5988 if not nslcmop_operation_state:
5989 nslcmop_operation_state = "COMPLETED"
5990
5991 # If update CHANGE_VNFPKG nslcmop_operation is successful
5992 # vnf revision need to be updated
5993 vnfr_update["revision"] = latest_vnfd_revision
5994 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5995
5996 self.logger.debug(
5997 logging_text
5998 + " task Done with result {} {}".format(
5999 nslcmop_operation_state, detailed_status
6000 )
6001 )
6002 elif update_type == "REMOVE_VNF":
6003 # This part is included in https://osm.etsi.org/gerrit/11876
6004 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6005 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6006 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6007 step = "Removing VNF"
6008 (result, detailed_status) = await self.remove_vnf(
6009 nsr_id, nslcmop_id, vnf_instance_id
6010 )
6011 if result == "FAILED":
6012 nslcmop_operation_state = result
6013 error_description_nslcmop = detailed_status
6014 db_nslcmop_update["detailed-status"] = detailed_status
6015 change_type = "vnf_terminated"
6016 if not nslcmop_operation_state:
6017 nslcmop_operation_state = "COMPLETED"
6018 self.logger.debug(
6019 logging_text
6020 + " task Done with result {} {}".format(
6021 nslcmop_operation_state, detailed_status
6022 )
6023 )
6024
6025 elif update_type == "OPERATE_VNF":
6026 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6027 "vnfInstanceId"
6028 ]
6029 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6030 "changeStateTo"
6031 ]
6032 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6033 "additionalParam"
6034 ]
6035 (result, detailed_status) = await self.rebuild_start_stop(
6036 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6037 )
6038 if result == "FAILED":
6039 nslcmop_operation_state = result
6040 error_description_nslcmop = detailed_status
6041 db_nslcmop_update["detailed-status"] = detailed_status
6042 if not nslcmop_operation_state:
6043 nslcmop_operation_state = "COMPLETED"
6044 self.logger.debug(
6045 logging_text
6046 + " task Done with result {} {}".format(
6047 nslcmop_operation_state, detailed_status
6048 )
6049 )
6050
6051 # If nslcmop_operation_state is None, so any operation is not failed.
6052 # All operations are executed in overall.
6053 if not nslcmop_operation_state:
6054 nslcmop_operation_state = "COMPLETED"
6055 db_nsr_update["operational-status"] = old_operational_status
6056
6057 except (DbException, LcmException, N2VCException, K8sException) as e:
6058 self.logger.error(logging_text + "Exit Exception {}".format(e))
6059 exc = e
6060 except asyncio.CancelledError:
6061 self.logger.error(
6062 logging_text + "Cancelled Exception while '{}'".format(step)
6063 )
6064 exc = "Operation was cancelled"
6065 except asyncio.TimeoutError:
6066 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6067 exc = "Timeout"
6068 except Exception as e:
6069 exc = traceback.format_exc()
6070 self.logger.critical(
6071 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6072 exc_info=True,
6073 )
6074 finally:
6075 if exc:
6076 db_nslcmop_update[
6077 "detailed-status"
6078 ] = (
6079 detailed_status
6080 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6081 nslcmop_operation_state = "FAILED"
6082 db_nsr_update["operational-status"] = old_operational_status
6083 if db_nsr:
6084 self._write_ns_status(
6085 nsr_id=nsr_id,
6086 ns_state=db_nsr["nsState"],
6087 current_operation="IDLE",
6088 current_operation_id=None,
6089 other_update=db_nsr_update,
6090 )
6091
6092 self._write_op_status(
6093 op_id=nslcmop_id,
6094 stage="",
6095 error_message=error_description_nslcmop,
6096 operation_state=nslcmop_operation_state,
6097 other_update=db_nslcmop_update,
6098 )
6099
6100 if nslcmop_operation_state:
6101 try:
6102 msg = {
6103 "nsr_id": nsr_id,
6104 "nslcmop_id": nslcmop_id,
6105 "operationState": nslcmop_operation_state,
6106 }
6107 if change_type in ("vnf_terminated", "policy_updated"):
6108 msg.update({"vnf_member_index": member_vnf_index})
6109 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6110 except Exception as e:
6111 self.logger.error(
6112 logging_text + "kafka_write notification Exception {}".format(e)
6113 )
6114 self.logger.debug(logging_text + "Exit")
6115 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6116 return nslcmop_operation_state, detailed_status
6117
6118 async def scale(self, nsr_id, nslcmop_id):
6119 # Try to lock HA task here
6120 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6121 if not task_is_locked_by_me:
6122 return
6123
6124 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6125 stage = ["", "", ""]
6126 tasks_dict_info = {}
6127 # ^ stage, step, VIM progress
6128 self.logger.debug(logging_text + "Enter")
6129 # get all needed from database
6130 db_nsr = None
6131 db_nslcmop_update = {}
6132 db_nsr_update = {}
6133 exc = None
6134 # in case of error, indicates what part of scale was failed to put nsr at error status
6135 scale_process = None
6136 old_operational_status = ""
6137 old_config_status = ""
6138 nsi_id = None
6139 try:
6140 # wait for any previous tasks in process
6141 step = "Waiting for previous operations to terminate"
6142 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6143 self._write_ns_status(
6144 nsr_id=nsr_id,
6145 ns_state=None,
6146 current_operation="SCALING",
6147 current_operation_id=nslcmop_id,
6148 )
6149
6150 step = "Getting nslcmop from database"
6151 self.logger.debug(
6152 step + " after having waited for previous tasks to be completed"
6153 )
6154 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6155
6156 step = "Getting nsr from database"
6157 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6158 old_operational_status = db_nsr["operational-status"]
6159 old_config_status = db_nsr["config-status"]
6160
6161 step = "Parsing scaling parameters"
6162 db_nsr_update["operational-status"] = "scaling"
6163 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6164 nsr_deployed = db_nsr["_admin"].get("deployed")
6165
6166 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6167 "scaleByStepData"
6168 ]["member-vnf-index"]
6169 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6170 "scaleByStepData"
6171 ]["scaling-group-descriptor"]
6172 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6173 # for backward compatibility
6174 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6175 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6176 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6177 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6178
6179 step = "Getting vnfr from database"
6180 db_vnfr = self.db.get_one(
6181 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6182 )
6183
6184 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6185
6186 step = "Getting vnfd from database"
6187 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6188
6189 base_folder = db_vnfd["_admin"]["storage"]
6190
6191 step = "Getting scaling-group-descriptor"
6192 scaling_descriptor = find_in_list(
6193 get_scaling_aspect(db_vnfd),
6194 lambda scale_desc: scale_desc["name"] == scaling_group,
6195 )
6196 if not scaling_descriptor:
6197 raise LcmException(
6198 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6199 "at vnfd:scaling-group-descriptor".format(scaling_group)
6200 )
6201
6202 step = "Sending scale order to VIM"
6203 # TODO check if ns is in a proper status
6204 nb_scale_op = 0
6205 if not db_nsr["_admin"].get("scaling-group"):
6206 self.update_db_2(
6207 "nsrs",
6208 nsr_id,
6209 {
6210 "_admin.scaling-group": [
6211 {"name": scaling_group, "nb-scale-op": 0}
6212 ]
6213 },
6214 )
6215 admin_scale_index = 0
6216 else:
6217 for admin_scale_index, admin_scale_info in enumerate(
6218 db_nsr["_admin"]["scaling-group"]
6219 ):
6220 if admin_scale_info["name"] == scaling_group:
6221 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6222 break
6223 else: # not found, set index one plus last element and add new entry with the name
6224 admin_scale_index += 1
6225 db_nsr_update[
6226 "_admin.scaling-group.{}.name".format(admin_scale_index)
6227 ] = scaling_group
6228
6229 vca_scaling_info = []
6230 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6231 if scaling_type == "SCALE_OUT":
6232 if "aspect-delta-details" not in scaling_descriptor:
6233 raise LcmException(
6234 "Aspect delta details not fount in scaling descriptor {}".format(
6235 scaling_descriptor["name"]
6236 )
6237 )
6238 # count if max-instance-count is reached
6239 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6240
6241 scaling_info["scaling_direction"] = "OUT"
6242 scaling_info["vdu-create"] = {}
6243 scaling_info["kdu-create"] = {}
6244 for delta in deltas:
6245 for vdu_delta in delta.get("vdu-delta", {}):
6246 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6247 # vdu_index also provides the number of instance of the targeted vdu
6248 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6249 cloud_init_text = self._get_vdu_cloud_init_content(
6250 vdud, db_vnfd
6251 )
6252 if cloud_init_text:
6253 additional_params = (
6254 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6255 or {}
6256 )
6257 cloud_init_list = []
6258
6259 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6260 max_instance_count = 10
6261 if vdu_profile and "max-number-of-instances" in vdu_profile:
6262 max_instance_count = vdu_profile.get(
6263 "max-number-of-instances", 10
6264 )
6265
6266 default_instance_num = get_number_of_instances(
6267 db_vnfd, vdud["id"]
6268 )
6269 instances_number = vdu_delta.get("number-of-instances", 1)
6270 nb_scale_op += instances_number
6271
6272 new_instance_count = nb_scale_op + default_instance_num
6273 # Control if new count is over max and vdu count is less than max.
6274 # Then assign new instance count
6275 if new_instance_count > max_instance_count > vdu_count:
6276 instances_number = new_instance_count - max_instance_count
6277 else:
6278 instances_number = instances_number
6279
6280 if new_instance_count > max_instance_count:
6281 raise LcmException(
6282 "reached the limit of {} (max-instance-count) "
6283 "scaling-out operations for the "
6284 "scaling-group-descriptor '{}'".format(
6285 nb_scale_op, scaling_group
6286 )
6287 )
6288 for x in range(vdu_delta.get("number-of-instances", 1)):
6289 if cloud_init_text:
6290 # TODO Information of its own ip is not available because db_vnfr is not updated.
6291 additional_params["OSM"] = get_osm_params(
6292 db_vnfr, vdu_delta["id"], vdu_index + x
6293 )
6294 cloud_init_list.append(
6295 self._parse_cloud_init(
6296 cloud_init_text,
6297 additional_params,
6298 db_vnfd["id"],
6299 vdud["id"],
6300 )
6301 )
6302 vca_scaling_info.append(
6303 {
6304 "osm_vdu_id": vdu_delta["id"],
6305 "member-vnf-index": vnf_index,
6306 "type": "create",
6307 "vdu_index": vdu_index + x,
6308 }
6309 )
6310 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6311 for kdu_delta in delta.get("kdu-resource-delta", {}):
6312 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6313 kdu_name = kdu_profile["kdu-name"]
6314 resource_name = kdu_profile.get("resource-name", "")
6315
6316 # Might have different kdus in the same delta
6317 # Should have list for each kdu
6318 if not scaling_info["kdu-create"].get(kdu_name, None):
6319 scaling_info["kdu-create"][kdu_name] = []
6320
6321 kdur = get_kdur(db_vnfr, kdu_name)
6322 if kdur.get("helm-chart"):
6323 k8s_cluster_type = "helm-chart-v3"
6324 self.logger.debug("kdur: {}".format(kdur))
6325 if (
6326 kdur.get("helm-version")
6327 and kdur.get("helm-version") == "v2"
6328 ):
6329 k8s_cluster_type = "helm-chart"
6330 elif kdur.get("juju-bundle"):
6331 k8s_cluster_type = "juju-bundle"
6332 else:
6333 raise LcmException(
6334 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6335 "juju-bundle. Maybe an old NBI version is running".format(
6336 db_vnfr["member-vnf-index-ref"], kdu_name
6337 )
6338 )
6339
6340 max_instance_count = 10
6341 if kdu_profile and "max-number-of-instances" in kdu_profile:
6342 max_instance_count = kdu_profile.get(
6343 "max-number-of-instances", 10
6344 )
6345
6346 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6347 deployed_kdu, _ = get_deployed_kdu(
6348 nsr_deployed, kdu_name, vnf_index
6349 )
6350 if deployed_kdu is None:
6351 raise LcmException(
6352 "KDU '{}' for vnf '{}' not deployed".format(
6353 kdu_name, vnf_index
6354 )
6355 )
6356 kdu_instance = deployed_kdu.get("kdu-instance")
6357 instance_num = await self.k8scluster_map[
6358 k8s_cluster_type
6359 ].get_scale_count(
6360 resource_name,
6361 kdu_instance,
6362 vca_id=vca_id,
6363 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6364 kdu_model=deployed_kdu.get("kdu-model"),
6365 )
6366 kdu_replica_count = instance_num + kdu_delta.get(
6367 "number-of-instances", 1
6368 )
6369
6370 # Control if new count is over max and instance_num is less than max.
6371 # Then assign max instance number to kdu replica count
6372 if kdu_replica_count > max_instance_count > instance_num:
6373 kdu_replica_count = max_instance_count
6374 if kdu_replica_count > max_instance_count:
6375 raise LcmException(
6376 "reached the limit of {} (max-instance-count) "
6377 "scaling-out operations for the "
6378 "scaling-group-descriptor '{}'".format(
6379 instance_num, scaling_group
6380 )
6381 )
6382
6383 for x in range(kdu_delta.get("number-of-instances", 1)):
6384 vca_scaling_info.append(
6385 {
6386 "osm_kdu_id": kdu_name,
6387 "member-vnf-index": vnf_index,
6388 "type": "create",
6389 "kdu_index": instance_num + x - 1,
6390 }
6391 )
6392 scaling_info["kdu-create"][kdu_name].append(
6393 {
6394 "member-vnf-index": vnf_index,
6395 "type": "create",
6396 "k8s-cluster-type": k8s_cluster_type,
6397 "resource-name": resource_name,
6398 "scale": kdu_replica_count,
6399 }
6400 )
6401 elif scaling_type == "SCALE_IN":
6402 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6403
6404 scaling_info["scaling_direction"] = "IN"
6405 scaling_info["vdu-delete"] = {}
6406 scaling_info["kdu-delete"] = {}
6407
6408 for delta in deltas:
6409 for vdu_delta in delta.get("vdu-delta", {}):
6410 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6411 min_instance_count = 0
6412 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6413 if vdu_profile and "min-number-of-instances" in vdu_profile:
6414 min_instance_count = vdu_profile["min-number-of-instances"]
6415
6416 default_instance_num = get_number_of_instances(
6417 db_vnfd, vdu_delta["id"]
6418 )
6419 instance_num = vdu_delta.get("number-of-instances", 1)
6420 nb_scale_op -= instance_num
6421
6422 new_instance_count = nb_scale_op + default_instance_num
6423
6424 if new_instance_count < min_instance_count < vdu_count:
6425 instances_number = min_instance_count - new_instance_count
6426 else:
6427 instances_number = instance_num
6428
6429 if new_instance_count < min_instance_count:
6430 raise LcmException(
6431 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6432 "scaling-group-descriptor '{}'".format(
6433 nb_scale_op, scaling_group
6434 )
6435 )
6436 for x in range(vdu_delta.get("number-of-instances", 1)):
6437 vca_scaling_info.append(
6438 {
6439 "osm_vdu_id": vdu_delta["id"],
6440 "member-vnf-index": vnf_index,
6441 "type": "delete",
6442 "vdu_index": vdu_index - 1 - x,
6443 }
6444 )
6445 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6446 for kdu_delta in delta.get("kdu-resource-delta", {}):
6447 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6448 kdu_name = kdu_profile["kdu-name"]
6449 resource_name = kdu_profile.get("resource-name", "")
6450
6451 if not scaling_info["kdu-delete"].get(kdu_name, None):
6452 scaling_info["kdu-delete"][kdu_name] = []
6453
6454 kdur = get_kdur(db_vnfr, kdu_name)
6455 if kdur.get("helm-chart"):
6456 k8s_cluster_type = "helm-chart-v3"
6457 self.logger.debug("kdur: {}".format(kdur))
6458 if (
6459 kdur.get("helm-version")
6460 and kdur.get("helm-version") == "v2"
6461 ):
6462 k8s_cluster_type = "helm-chart"
6463 elif kdur.get("juju-bundle"):
6464 k8s_cluster_type = "juju-bundle"
6465 else:
6466 raise LcmException(
6467 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6468 "juju-bundle. Maybe an old NBI version is running".format(
6469 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6470 )
6471 )
6472
6473 min_instance_count = 0
6474 if kdu_profile and "min-number-of-instances" in kdu_profile:
6475 min_instance_count = kdu_profile["min-number-of-instances"]
6476
6477 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6478 deployed_kdu, _ = get_deployed_kdu(
6479 nsr_deployed, kdu_name, vnf_index
6480 )
6481 if deployed_kdu is None:
6482 raise LcmException(
6483 "KDU '{}' for vnf '{}' not deployed".format(
6484 kdu_name, vnf_index
6485 )
6486 )
6487 kdu_instance = deployed_kdu.get("kdu-instance")
6488 instance_num = await self.k8scluster_map[
6489 k8s_cluster_type
6490 ].get_scale_count(
6491 resource_name,
6492 kdu_instance,
6493 vca_id=vca_id,
6494 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6495 kdu_model=deployed_kdu.get("kdu-model"),
6496 )
6497 kdu_replica_count = instance_num - kdu_delta.get(
6498 "number-of-instances", 1
6499 )
6500
6501 if kdu_replica_count < min_instance_count < instance_num:
6502 kdu_replica_count = min_instance_count
6503 if kdu_replica_count < min_instance_count:
6504 raise LcmException(
6505 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6506 "scaling-group-descriptor '{}'".format(
6507 instance_num, scaling_group
6508 )
6509 )
6510
6511 for x in range(kdu_delta.get("number-of-instances", 1)):
6512 vca_scaling_info.append(
6513 {
6514 "osm_kdu_id": kdu_name,
6515 "member-vnf-index": vnf_index,
6516 "type": "delete",
6517 "kdu_index": instance_num - x - 1,
6518 }
6519 )
6520 scaling_info["kdu-delete"][kdu_name].append(
6521 {
6522 "member-vnf-index": vnf_index,
6523 "type": "delete",
6524 "k8s-cluster-type": k8s_cluster_type,
6525 "resource-name": resource_name,
6526 "scale": kdu_replica_count,
6527 }
6528 )
6529
6530 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6531 vdu_delete = copy(scaling_info.get("vdu-delete"))
6532 if scaling_info["scaling_direction"] == "IN":
6533 for vdur in reversed(db_vnfr["vdur"]):
6534 if vdu_delete.get(vdur["vdu-id-ref"]):
6535 vdu_delete[vdur["vdu-id-ref"]] -= 1
6536 scaling_info["vdu"].append(
6537 {
6538 "name": vdur.get("name") or vdur.get("vdu-name"),
6539 "vdu_id": vdur["vdu-id-ref"],
6540 "interface": [],
6541 }
6542 )
6543 for interface in vdur["interfaces"]:
6544 scaling_info["vdu"][-1]["interface"].append(
6545 {
6546 "name": interface["name"],
6547 "ip_address": interface["ip-address"],
6548 "mac_address": interface.get("mac-address"),
6549 }
6550 )
6551 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6552
6553 # PRE-SCALE BEGIN
6554 step = "Executing pre-scale vnf-config-primitive"
6555 if scaling_descriptor.get("scaling-config-action"):
6556 for scaling_config_action in scaling_descriptor[
6557 "scaling-config-action"
6558 ]:
6559 if (
6560 scaling_config_action.get("trigger") == "pre-scale-in"
6561 and scaling_type == "SCALE_IN"
6562 ) or (
6563 scaling_config_action.get("trigger") == "pre-scale-out"
6564 and scaling_type == "SCALE_OUT"
6565 ):
6566 vnf_config_primitive = scaling_config_action[
6567 "vnf-config-primitive-name-ref"
6568 ]
6569 step = db_nslcmop_update[
6570 "detailed-status"
6571 ] = "executing pre-scale scaling-config-action '{}'".format(
6572 vnf_config_primitive
6573 )
6574
6575 # look for primitive
6576 for config_primitive in (
6577 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6578 ).get("config-primitive", ()):
6579 if config_primitive["name"] == vnf_config_primitive:
6580 break
6581 else:
6582 raise LcmException(
6583 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6584 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6585 "primitive".format(scaling_group, vnf_config_primitive)
6586 )
6587
6588 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6589 if db_vnfr.get("additionalParamsForVnf"):
6590 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6591
6592 scale_process = "VCA"
6593 db_nsr_update["config-status"] = "configuring pre-scaling"
6594 primitive_params = self._map_primitive_params(
6595 config_primitive, {}, vnfr_params
6596 )
6597
6598 # Pre-scale retry check: Check if this sub-operation has been executed before
6599 op_index = self._check_or_add_scale_suboperation(
6600 db_nslcmop,
6601 vnf_index,
6602 vnf_config_primitive,
6603 primitive_params,
6604 "PRE-SCALE",
6605 )
6606 if op_index == self.SUBOPERATION_STATUS_SKIP:
6607 # Skip sub-operation
6608 result = "COMPLETED"
6609 result_detail = "Done"
6610 self.logger.debug(
6611 logging_text
6612 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6613 vnf_config_primitive, result, result_detail
6614 )
6615 )
6616 else:
6617 if op_index == self.SUBOPERATION_STATUS_NEW:
6618 # New sub-operation: Get index of this sub-operation
6619 op_index = (
6620 len(db_nslcmop.get("_admin", {}).get("operations"))
6621 - 1
6622 )
6623 self.logger.debug(
6624 logging_text
6625 + "vnf_config_primitive={} New sub-operation".format(
6626 vnf_config_primitive
6627 )
6628 )
6629 else:
6630 # retry: Get registered params for this existing sub-operation
6631 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6632 op_index
6633 ]
6634 vnf_index = op.get("member_vnf_index")
6635 vnf_config_primitive = op.get("primitive")
6636 primitive_params = op.get("primitive_params")
6637 self.logger.debug(
6638 logging_text
6639 + "vnf_config_primitive={} Sub-operation retry".format(
6640 vnf_config_primitive
6641 )
6642 )
6643 # Execute the primitive, either with new (first-time) or registered (reintent) args
6644 ee_descriptor_id = config_primitive.get(
6645 "execution-environment-ref"
6646 )
6647 primitive_name = config_primitive.get(
6648 "execution-environment-primitive", vnf_config_primitive
6649 )
6650 ee_id, vca_type = self._look_for_deployed_vca(
6651 nsr_deployed["VCA"],
6652 member_vnf_index=vnf_index,
6653 vdu_id=None,
6654 vdu_count_index=None,
6655 ee_descriptor_id=ee_descriptor_id,
6656 )
6657 result, result_detail = await self._ns_execute_primitive(
6658 ee_id,
6659 primitive_name,
6660 primitive_params,
6661 vca_type=vca_type,
6662 vca_id=vca_id,
6663 )
6664 self.logger.debug(
6665 logging_text
6666 + "vnf_config_primitive={} Done with result {} {}".format(
6667 vnf_config_primitive, result, result_detail
6668 )
6669 )
6670 # Update operationState = COMPLETED | FAILED
6671 self._update_suboperation_status(
6672 db_nslcmop, op_index, result, result_detail
6673 )
6674
6675 if result == "FAILED":
6676 raise LcmException(result_detail)
6677 db_nsr_update["config-status"] = old_config_status
6678 scale_process = None
6679 # PRE-SCALE END
6680
6681 db_nsr_update[
6682 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6683 ] = nb_scale_op
6684 db_nsr_update[
6685 "_admin.scaling-group.{}.time".format(admin_scale_index)
6686 ] = time()
6687
6688 # SCALE-IN VCA - BEGIN
6689 if vca_scaling_info:
6690 step = db_nslcmop_update[
6691 "detailed-status"
6692 ] = "Deleting the execution environments"
6693 scale_process = "VCA"
6694 for vca_info in vca_scaling_info:
6695 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6696 member_vnf_index = str(vca_info["member-vnf-index"])
6697 self.logger.debug(
6698 logging_text + "vdu info: {}".format(vca_info)
6699 )
6700 if vca_info.get("osm_vdu_id"):
6701 vdu_id = vca_info["osm_vdu_id"]
6702 vdu_index = int(vca_info["vdu_index"])
6703 stage[
6704 1
6705 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6706 member_vnf_index, vdu_id, vdu_index
6707 )
6708 stage[2] = step = "Scaling in VCA"
6709 self._write_op_status(op_id=nslcmop_id, stage=stage)
6710 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6711 config_update = db_nsr["configurationStatus"]
6712 for vca_index, vca in enumerate(vca_update):
6713 if (
6714 (vca or vca.get("ee_id"))
6715 and vca["member-vnf-index"] == member_vnf_index
6716 and vca["vdu_count_index"] == vdu_index
6717 ):
6718 if vca.get("vdu_id"):
6719 config_descriptor = get_configuration(
6720 db_vnfd, vca.get("vdu_id")
6721 )
6722 elif vca.get("kdu_name"):
6723 config_descriptor = get_configuration(
6724 db_vnfd, vca.get("kdu_name")
6725 )
6726 else:
6727 config_descriptor = get_configuration(
6728 db_vnfd, db_vnfd["id"]
6729 )
6730 operation_params = (
6731 db_nslcmop.get("operationParams") or {}
6732 )
6733 exec_terminate_primitives = not operation_params.get(
6734 "skip_terminate_primitives"
6735 ) and vca.get("needed_terminate")
6736 task = asyncio.ensure_future(
6737 asyncio.wait_for(
6738 self.destroy_N2VC(
6739 logging_text,
6740 db_nslcmop,
6741 vca,
6742 config_descriptor,
6743 vca_index,
6744 destroy_ee=True,
6745 exec_primitives=exec_terminate_primitives,
6746 scaling_in=True,
6747 vca_id=vca_id,
6748 ),
6749 timeout=self.timeout_charm_delete,
6750 )
6751 )
6752 tasks_dict_info[task] = "Terminating VCA {}".format(
6753 vca.get("ee_id")
6754 )
6755 del vca_update[vca_index]
6756 del config_update[vca_index]
6757 # wait for pending tasks of terminate primitives
6758 if tasks_dict_info:
6759 self.logger.debug(
6760 logging_text
6761 + "Waiting for tasks {}".format(
6762 list(tasks_dict_info.keys())
6763 )
6764 )
6765 error_list = await self._wait_for_tasks(
6766 logging_text,
6767 tasks_dict_info,
6768 min(
6769 self.timeout_charm_delete, self.timeout_ns_terminate
6770 ),
6771 stage,
6772 nslcmop_id,
6773 )
6774 tasks_dict_info.clear()
6775 if error_list:
6776 raise LcmException("; ".join(error_list))
6777
6778 db_vca_and_config_update = {
6779 "_admin.deployed.VCA": vca_update,
6780 "configurationStatus": config_update,
6781 }
6782 self.update_db_2(
6783 "nsrs", db_nsr["_id"], db_vca_and_config_update
6784 )
6785 scale_process = None
6786 # SCALE-IN VCA - END
6787
6788 # SCALE RO - BEGIN
6789 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6790 scale_process = "RO"
6791 if self.ro_config.get("ng"):
6792 await self._scale_ng_ro(
6793 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6794 )
6795 scaling_info.pop("vdu-create", None)
6796 scaling_info.pop("vdu-delete", None)
6797
6798 scale_process = None
6799 # SCALE RO - END
6800
6801 # SCALE KDU - BEGIN
6802 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6803 scale_process = "KDU"
6804 await self._scale_kdu(
6805 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6806 )
6807 scaling_info.pop("kdu-create", None)
6808 scaling_info.pop("kdu-delete", None)
6809
6810 scale_process = None
6811 # SCALE KDU - END
6812
6813 if db_nsr_update:
6814 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6815
6816 # SCALE-UP VCA - BEGIN
6817 if vca_scaling_info:
6818 step = db_nslcmop_update[
6819 "detailed-status"
6820 ] = "Creating new execution environments"
6821 scale_process = "VCA"
6822 for vca_info in vca_scaling_info:
6823 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6824 member_vnf_index = str(vca_info["member-vnf-index"])
6825 self.logger.debug(
6826 logging_text + "vdu info: {}".format(vca_info)
6827 )
6828 vnfd_id = db_vnfr["vnfd-ref"]
6829 if vca_info.get("osm_vdu_id"):
6830 vdu_index = int(vca_info["vdu_index"])
6831 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6832 if db_vnfr.get("additionalParamsForVnf"):
6833 deploy_params.update(
6834 parse_yaml_strings(
6835 db_vnfr["additionalParamsForVnf"].copy()
6836 )
6837 )
6838 descriptor_config = get_configuration(
6839 db_vnfd, db_vnfd["id"]
6840 )
6841 if descriptor_config:
6842 vdu_id = None
6843 vdu_name = None
6844 kdu_name = None
6845 self._deploy_n2vc(
6846 logging_text=logging_text
6847 + "member_vnf_index={} ".format(member_vnf_index),
6848 db_nsr=db_nsr,
6849 db_vnfr=db_vnfr,
6850 nslcmop_id=nslcmop_id,
6851 nsr_id=nsr_id,
6852 nsi_id=nsi_id,
6853 vnfd_id=vnfd_id,
6854 vdu_id=vdu_id,
6855 kdu_name=kdu_name,
6856 member_vnf_index=member_vnf_index,
6857 vdu_index=vdu_index,
6858 vdu_name=vdu_name,
6859 deploy_params=deploy_params,
6860 descriptor_config=descriptor_config,
6861 base_folder=base_folder,
6862 task_instantiation_info=tasks_dict_info,
6863 stage=stage,
6864 )
6865 vdu_id = vca_info["osm_vdu_id"]
6866 vdur = find_in_list(
6867 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6868 )
6869 descriptor_config = get_configuration(db_vnfd, vdu_id)
6870 if vdur.get("additionalParams"):
6871 deploy_params_vdu = parse_yaml_strings(
6872 vdur["additionalParams"]
6873 )
6874 else:
6875 deploy_params_vdu = deploy_params
6876 deploy_params_vdu["OSM"] = get_osm_params(
6877 db_vnfr, vdu_id, vdu_count_index=vdu_index
6878 )
6879 if descriptor_config:
6880 vdu_name = None
6881 kdu_name = None
6882 stage[
6883 1
6884 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6885 member_vnf_index, vdu_id, vdu_index
6886 )
6887 stage[2] = step = "Scaling out VCA"
6888 self._write_op_status(op_id=nslcmop_id, stage=stage)
6889 self._deploy_n2vc(
6890 logging_text=logging_text
6891 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6892 member_vnf_index, vdu_id, vdu_index
6893 ),
6894 db_nsr=db_nsr,
6895 db_vnfr=db_vnfr,
6896 nslcmop_id=nslcmop_id,
6897 nsr_id=nsr_id,
6898 nsi_id=nsi_id,
6899 vnfd_id=vnfd_id,
6900 vdu_id=vdu_id,
6901 kdu_name=kdu_name,
6902 member_vnf_index=member_vnf_index,
6903 vdu_index=vdu_index,
6904 vdu_name=vdu_name,
6905 deploy_params=deploy_params_vdu,
6906 descriptor_config=descriptor_config,
6907 base_folder=base_folder,
6908 task_instantiation_info=tasks_dict_info,
6909 stage=stage,
6910 )
6911 # SCALE-UP VCA - END
6912 scale_process = None
6913
6914 # POST-SCALE BEGIN
6915 # execute primitive service POST-SCALING
6916 step = "Executing post-scale vnf-config-primitive"
6917 if scaling_descriptor.get("scaling-config-action"):
6918 for scaling_config_action in scaling_descriptor[
6919 "scaling-config-action"
6920 ]:
6921 if (
6922 scaling_config_action.get("trigger") == "post-scale-in"
6923 and scaling_type == "SCALE_IN"
6924 ) or (
6925 scaling_config_action.get("trigger") == "post-scale-out"
6926 and scaling_type == "SCALE_OUT"
6927 ):
6928 vnf_config_primitive = scaling_config_action[
6929 "vnf-config-primitive-name-ref"
6930 ]
6931 step = db_nslcmop_update[
6932 "detailed-status"
6933 ] = "executing post-scale scaling-config-action '{}'".format(
6934 vnf_config_primitive
6935 )
6936
6937 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6938 if db_vnfr.get("additionalParamsForVnf"):
6939 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6940
6941 # look for primitive
6942 for config_primitive in (
6943 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6944 ).get("config-primitive", ()):
6945 if config_primitive["name"] == vnf_config_primitive:
6946 break
6947 else:
6948 raise LcmException(
6949 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6950 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6951 "config-primitive".format(
6952 scaling_group, vnf_config_primitive
6953 )
6954 )
6955 scale_process = "VCA"
6956 db_nsr_update["config-status"] = "configuring post-scaling"
6957 primitive_params = self._map_primitive_params(
6958 config_primitive, {}, vnfr_params
6959 )
6960
6961 # Post-scale retry check: Check if this sub-operation has been executed before
6962 op_index = self._check_or_add_scale_suboperation(
6963 db_nslcmop,
6964 vnf_index,
6965 vnf_config_primitive,
6966 primitive_params,
6967 "POST-SCALE",
6968 )
6969 if op_index == self.SUBOPERATION_STATUS_SKIP:
6970 # Skip sub-operation
6971 result = "COMPLETED"
6972 result_detail = "Done"
6973 self.logger.debug(
6974 logging_text
6975 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6976 vnf_config_primitive, result, result_detail
6977 )
6978 )
6979 else:
6980 if op_index == self.SUBOPERATION_STATUS_NEW:
6981 # New sub-operation: Get index of this sub-operation
6982 op_index = (
6983 len(db_nslcmop.get("_admin", {}).get("operations"))
6984 - 1
6985 )
6986 self.logger.debug(
6987 logging_text
6988 + "vnf_config_primitive={} New sub-operation".format(
6989 vnf_config_primitive
6990 )
6991 )
6992 else:
6993 # retry: Get registered params for this existing sub-operation
6994 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6995 op_index
6996 ]
6997 vnf_index = op.get("member_vnf_index")
6998 vnf_config_primitive = op.get("primitive")
6999 primitive_params = op.get("primitive_params")
7000 self.logger.debug(
7001 logging_text
7002 + "vnf_config_primitive={} Sub-operation retry".format(
7003 vnf_config_primitive
7004 )
7005 )
7006 # Execute the primitive, either with new (first-time) or registered (reintent) args
7007 ee_descriptor_id = config_primitive.get(
7008 "execution-environment-ref"
7009 )
7010 primitive_name = config_primitive.get(
7011 "execution-environment-primitive", vnf_config_primitive
7012 )
7013 ee_id, vca_type = self._look_for_deployed_vca(
7014 nsr_deployed["VCA"],
7015 member_vnf_index=vnf_index,
7016 vdu_id=None,
7017 vdu_count_index=None,
7018 ee_descriptor_id=ee_descriptor_id,
7019 )
7020 result, result_detail = await self._ns_execute_primitive(
7021 ee_id,
7022 primitive_name,
7023 primitive_params,
7024 vca_type=vca_type,
7025 vca_id=vca_id,
7026 )
7027 self.logger.debug(
7028 logging_text
7029 + "vnf_config_primitive={} Done with result {} {}".format(
7030 vnf_config_primitive, result, result_detail
7031 )
7032 )
7033 # Update operationState = COMPLETED | FAILED
7034 self._update_suboperation_status(
7035 db_nslcmop, op_index, result, result_detail
7036 )
7037
7038 if result == "FAILED":
7039 raise LcmException(result_detail)
7040 db_nsr_update["config-status"] = old_config_status
7041 scale_process = None
7042 # POST-SCALE END
7043
7044 db_nsr_update[
7045 "detailed-status"
7046 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7047 db_nsr_update["operational-status"] = (
7048 "running"
7049 if old_operational_status == "failed"
7050 else old_operational_status
7051 )
7052 db_nsr_update["config-status"] = old_config_status
7053 return
7054 except (
7055 ROclient.ROClientException,
7056 DbException,
7057 LcmException,
7058 NgRoException,
7059 ) as e:
7060 self.logger.error(logging_text + "Exit Exception {}".format(e))
7061 exc = e
7062 except asyncio.CancelledError:
7063 self.logger.error(
7064 logging_text + "Cancelled Exception while '{}'".format(step)
7065 )
7066 exc = "Operation was cancelled"
7067 except Exception as e:
7068 exc = traceback.format_exc()
7069 self.logger.critical(
7070 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7071 exc_info=True,
7072 )
7073 finally:
7074 self._write_ns_status(
7075 nsr_id=nsr_id,
7076 ns_state=None,
7077 current_operation="IDLE",
7078 current_operation_id=None,
7079 )
7080 if tasks_dict_info:
7081 stage[1] = "Waiting for instantiate pending tasks."
7082 self.logger.debug(logging_text + stage[1])
7083 exc = await self._wait_for_tasks(
7084 logging_text,
7085 tasks_dict_info,
7086 self.timeout_ns_deploy,
7087 stage,
7088 nslcmop_id,
7089 nsr_id=nsr_id,
7090 )
7091 if exc:
7092 db_nslcmop_update[
7093 "detailed-status"
7094 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7095 nslcmop_operation_state = "FAILED"
7096 if db_nsr:
7097 db_nsr_update["operational-status"] = old_operational_status
7098 db_nsr_update["config-status"] = old_config_status
7099 db_nsr_update["detailed-status"] = ""
7100 if scale_process:
7101 if "VCA" in scale_process:
7102 db_nsr_update["config-status"] = "failed"
7103 if "RO" in scale_process:
7104 db_nsr_update["operational-status"] = "failed"
7105 db_nsr_update[
7106 "detailed-status"
7107 ] = "FAILED scaling nslcmop={} {}: {}".format(
7108 nslcmop_id, step, exc
7109 )
7110 else:
7111 error_description_nslcmop = None
7112 nslcmop_operation_state = "COMPLETED"
7113 db_nslcmop_update["detailed-status"] = "Done"
7114
7115 self._write_op_status(
7116 op_id=nslcmop_id,
7117 stage="",
7118 error_message=error_description_nslcmop,
7119 operation_state=nslcmop_operation_state,
7120 other_update=db_nslcmop_update,
7121 )
7122 if db_nsr:
7123 self._write_ns_status(
7124 nsr_id=nsr_id,
7125 ns_state=None,
7126 current_operation="IDLE",
7127 current_operation_id=None,
7128 other_update=db_nsr_update,
7129 )
7130
7131 if nslcmop_operation_state:
7132 try:
7133 msg = {
7134 "nsr_id": nsr_id,
7135 "nslcmop_id": nslcmop_id,
7136 "operationState": nslcmop_operation_state,
7137 }
7138 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7139 except Exception as e:
7140 self.logger.error(
7141 logging_text + "kafka_write notification Exception {}".format(e)
7142 )
7143 self.logger.debug(logging_text + "Exit")
7144 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7145
7146 async def _scale_kdu(
7147 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7148 ):
7149 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7150 for kdu_name in _scaling_info:
7151 for kdu_scaling_info in _scaling_info[kdu_name]:
7152 deployed_kdu, index = get_deployed_kdu(
7153 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7154 )
7155 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7156 kdu_instance = deployed_kdu["kdu-instance"]
7157 kdu_model = deployed_kdu.get("kdu-model")
7158 scale = int(kdu_scaling_info["scale"])
7159 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7160
7161 db_dict = {
7162 "collection": "nsrs",
7163 "filter": {"_id": nsr_id},
7164 "path": "_admin.deployed.K8s.{}".format(index),
7165 }
7166
7167 step = "scaling application {}".format(
7168 kdu_scaling_info["resource-name"]
7169 )
7170 self.logger.debug(logging_text + step)
7171
7172 if kdu_scaling_info["type"] == "delete":
7173 kdu_config = get_configuration(db_vnfd, kdu_name)
7174 if (
7175 kdu_config
7176 and kdu_config.get("terminate-config-primitive")
7177 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7178 ):
7179 terminate_config_primitive_list = kdu_config.get(
7180 "terminate-config-primitive"
7181 )
7182 terminate_config_primitive_list.sort(
7183 key=lambda val: int(val["seq"])
7184 )
7185
7186 for (
7187 terminate_config_primitive
7188 ) in terminate_config_primitive_list:
7189 primitive_params_ = self._map_primitive_params(
7190 terminate_config_primitive, {}, {}
7191 )
7192 step = "execute terminate config primitive"
7193 self.logger.debug(logging_text + step)
7194 await asyncio.wait_for(
7195 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7196 cluster_uuid=cluster_uuid,
7197 kdu_instance=kdu_instance,
7198 primitive_name=terminate_config_primitive["name"],
7199 params=primitive_params_,
7200 db_dict=db_dict,
7201 vca_id=vca_id,
7202 ),
7203 timeout=600,
7204 )
7205
7206 await asyncio.wait_for(
7207 self.k8scluster_map[k8s_cluster_type].scale(
7208 kdu_instance,
7209 scale,
7210 kdu_scaling_info["resource-name"],
7211 vca_id=vca_id,
7212 cluster_uuid=cluster_uuid,
7213 kdu_model=kdu_model,
7214 atomic=True,
7215 db_dict=db_dict,
7216 ),
7217 timeout=self.timeout_vca_on_error,
7218 )
7219
7220 if kdu_scaling_info["type"] == "create":
7221 kdu_config = get_configuration(db_vnfd, kdu_name)
7222 if (
7223 kdu_config
7224 and kdu_config.get("initial-config-primitive")
7225 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7226 ):
7227 initial_config_primitive_list = kdu_config.get(
7228 "initial-config-primitive"
7229 )
7230 initial_config_primitive_list.sort(
7231 key=lambda val: int(val["seq"])
7232 )
7233
7234 for initial_config_primitive in initial_config_primitive_list:
7235 primitive_params_ = self._map_primitive_params(
7236 initial_config_primitive, {}, {}
7237 )
7238 step = "execute initial config primitive"
7239 self.logger.debug(logging_text + step)
7240 await asyncio.wait_for(
7241 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7242 cluster_uuid=cluster_uuid,
7243 kdu_instance=kdu_instance,
7244 primitive_name=initial_config_primitive["name"],
7245 params=primitive_params_,
7246 db_dict=db_dict,
7247 vca_id=vca_id,
7248 ),
7249 timeout=600,
7250 )
7251
7252 async def _scale_ng_ro(
7253 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7254 ):
7255 nsr_id = db_nslcmop["nsInstanceId"]
7256 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7257 db_vnfrs = {}
7258
7259 # read from db: vnfd's for every vnf
7260 db_vnfds = []
7261
7262 # for each vnf in ns, read vnfd
7263 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7264 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7265 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7266 # if we haven't this vnfd, read it from db
7267 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7268 # read from db
7269 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7270 db_vnfds.append(vnfd)
7271 n2vc_key = self.n2vc.get_public_key()
7272 n2vc_key_list = [n2vc_key]
7273 self.scale_vnfr(
7274 db_vnfr,
7275 vdu_scaling_info.get("vdu-create"),
7276 vdu_scaling_info.get("vdu-delete"),
7277 mark_delete=True,
7278 )
7279 # db_vnfr has been updated, update db_vnfrs to use it
7280 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7281 await self._instantiate_ng_ro(
7282 logging_text,
7283 nsr_id,
7284 db_nsd,
7285 db_nsr,
7286 db_nslcmop,
7287 db_vnfrs,
7288 db_vnfds,
7289 n2vc_key_list,
7290 stage=stage,
7291 start_deploy=time(),
7292 timeout_ns_deploy=self.timeout_ns_deploy,
7293 )
7294 if vdu_scaling_info.get("vdu-delete"):
7295 self.scale_vnfr(
7296 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7297 )
7298
7299 async def extract_prometheus_scrape_jobs(
7300 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7301 ):
7302 # look if exist a file called 'prometheus*.j2' and
7303 artifact_content = self.fs.dir_ls(artifact_path)
7304 job_file = next(
7305 (
7306 f
7307 for f in artifact_content
7308 if f.startswith("prometheus") and f.endswith(".j2")
7309 ),
7310 None,
7311 )
7312 if not job_file:
7313 return
7314 with self.fs.file_open((artifact_path, job_file), "r") as f:
7315 job_data = f.read()
7316
7317 # TODO get_service
7318 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7319 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7320 host_port = "80"
7321 vnfr_id = vnfr_id.replace("-", "")
7322 variables = {
7323 "JOB_NAME": vnfr_id,
7324 "TARGET_IP": target_ip,
7325 "EXPORTER_POD_IP": host_name,
7326 "EXPORTER_POD_PORT": host_port,
7327 }
7328 job_list = parse_job(job_data, variables)
7329 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7330 for job in job_list:
7331 if (
7332 not isinstance(job.get("job_name"), str)
7333 or vnfr_id not in job["job_name"]
7334 ):
7335 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7336 job["nsr_id"] = nsr_id
7337 job["vnfr_id"] = vnfr_id
7338 return job_list
7339
7340 async def rebuild_start_stop(
7341 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7342 ):
7343 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7344 self.logger.info(logging_text + "Enter")
7345 stage = ["Preparing the environment", ""]
7346 # database nsrs record
7347 db_nsr_update = {}
7348 vdu_vim_name = None
7349 vim_vm_id = None
7350 # in case of error, indicates what part of scale was failed to put nsr at error status
7351 start_deploy = time()
7352 try:
7353 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7354 vim_account_id = db_vnfr.get("vim-account-id")
7355 vim_info_key = "vim:" + vim_account_id
7356 vdu_id = additional_param["vdu_id"]
7357 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7358 vdur = find_in_list(
7359 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7360 )
7361 if vdur:
7362 vdu_vim_name = vdur["name"]
7363 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7364 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7365 else:
7366 raise LcmException("Target vdu is not found")
7367 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7368 # wait for any previous tasks in process
7369 stage[1] = "Waiting for previous operations to terminate"
7370 self.logger.info(stage[1])
7371 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7372
7373 stage[1] = "Reading from database."
7374 self.logger.info(stage[1])
7375 self._write_ns_status(
7376 nsr_id=nsr_id,
7377 ns_state=None,
7378 current_operation=operation_type.upper(),
7379 current_operation_id=nslcmop_id,
7380 )
7381 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7382
7383 # read from db: ns
7384 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7385 db_nsr_update["operational-status"] = operation_type
7386 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7387 # Payload for RO
7388 desc = {
7389 operation_type: {
7390 "vim_vm_id": vim_vm_id,
7391 "vnf_id": vnf_id,
7392 "vdu_index": additional_param["count-index"],
7393 "vdu_id": vdur["id"],
7394 "target_vim": target_vim,
7395 "vim_account_id": vim_account_id,
7396 }
7397 }
7398 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7399 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7400 self.logger.info("ro nsr id: {}".format(nsr_id))
7401 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7402 self.logger.info("response from RO: {}".format(result_dict))
7403 action_id = result_dict["action_id"]
7404 await self._wait_ng_ro(
7405 nsr_id,
7406 action_id,
7407 nslcmop_id,
7408 start_deploy,
7409 self.timeout_operate,
7410 None,
7411 "start_stop_rebuild",
7412 )
7413 return "COMPLETED", "Done"
7414 except (ROclient.ROClientException, DbException, LcmException) as e:
7415 self.logger.error("Exit Exception {}".format(e))
7416 exc = e
7417 except asyncio.CancelledError:
7418 self.logger.error("Cancelled Exception while '{}'".format(stage))
7419 exc = "Operation was cancelled"
7420 except Exception as e:
7421 exc = traceback.format_exc()
7422 self.logger.critical(
7423 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7424 )
7425 return "FAILED", "Error in operate VNF {}".format(exc)
7426
7427 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7428 """
7429 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7430
7431 :param: vim_account_id: VIM Account ID
7432
7433 :return: (cloud_name, cloud_credential)
7434 """
7435 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7436 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7437
7438 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7439 """
7440 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7441
7442 :param: vim_account_id: VIM Account ID
7443
7444 :return: (cloud_name, cloud_credential)
7445 """
7446 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7447 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7448
7449 async def migrate(self, nsr_id, nslcmop_id):
7450 """
7451 Migrate VNFs and VDUs instances in a NS
7452
7453 :param: nsr_id: NS Instance ID
7454 :param: nslcmop_id: nslcmop ID of migrate
7455
7456 """
7457 # Try to lock HA task here
7458 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7459 if not task_is_locked_by_me:
7460 return
7461 logging_text = "Task ns={} migrate ".format(nsr_id)
7462 self.logger.debug(logging_text + "Enter")
7463 # get all needed from database
7464 db_nslcmop = None
7465 db_nslcmop_update = {}
7466 nslcmop_operation_state = None
7467 db_nsr_update = {}
7468 target = {}
7469 exc = None
7470 # in case of error, indicates what part of scale was failed to put nsr at error status
7471 start_deploy = time()
7472
7473 try:
7474 # wait for any previous tasks in process
7475 step = "Waiting for previous operations to terminate"
7476 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7477
7478 self._write_ns_status(
7479 nsr_id=nsr_id,
7480 ns_state=None,
7481 current_operation="MIGRATING",
7482 current_operation_id=nslcmop_id,
7483 )
7484 step = "Getting nslcmop from database"
7485 self.logger.debug(
7486 step + " after having waited for previous tasks to be completed"
7487 )
7488 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7489 migrate_params = db_nslcmop.get("operationParams")
7490
7491 target = {}
7492 target.update(migrate_params)
7493 desc = await self.RO.migrate(nsr_id, target)
7494 self.logger.debug("RO return > {}".format(desc))
7495 action_id = desc["action_id"]
7496 await self._wait_ng_ro(
7497 nsr_id,
7498 action_id,
7499 nslcmop_id,
7500 start_deploy,
7501 self.timeout_migrate,
7502 operation="migrate",
7503 )
7504 except (ROclient.ROClientException, DbException, LcmException) as e:
7505 self.logger.error("Exit Exception {}".format(e))
7506 exc = e
7507 except asyncio.CancelledError:
7508 self.logger.error("Cancelled Exception while '{}'".format(step))
7509 exc = "Operation was cancelled"
7510 except Exception as e:
7511 exc = traceback.format_exc()
7512 self.logger.critical(
7513 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7514 )
7515 finally:
7516 self._write_ns_status(
7517 nsr_id=nsr_id,
7518 ns_state=None,
7519 current_operation="IDLE",
7520 current_operation_id=None,
7521 )
7522 if exc:
7523 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7524 nslcmop_operation_state = "FAILED"
7525 else:
7526 nslcmop_operation_state = "COMPLETED"
7527 db_nslcmop_update["detailed-status"] = "Done"
7528 db_nsr_update["detailed-status"] = "Done"
7529
7530 self._write_op_status(
7531 op_id=nslcmop_id,
7532 stage="",
7533 error_message="",
7534 operation_state=nslcmop_operation_state,
7535 other_update=db_nslcmop_update,
7536 )
7537 if nslcmop_operation_state:
7538 try:
7539 msg = {
7540 "nsr_id": nsr_id,
7541 "nslcmop_id": nslcmop_id,
7542 "operationState": nslcmop_operation_state,
7543 }
7544 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7545 except Exception as e:
7546 self.logger.error(
7547 logging_text + "kafka_write notification Exception {}".format(e)
7548 )
7549 self.logger.debug(logging_text + "Exit")
7550 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7551
7552 async def heal(self, nsr_id, nslcmop_id):
7553 """
7554 Heal NS
7555
7556 :param nsr_id: ns instance to heal
7557 :param nslcmop_id: operation to run
7558 :return:
7559 """
7560
7561 # Try to lock HA task here
7562 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7563 if not task_is_locked_by_me:
7564 return
7565
7566 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7567 stage = ["", "", ""]
7568 tasks_dict_info = {}
7569 # ^ stage, step, VIM progress
7570 self.logger.debug(logging_text + "Enter")
7571 # get all needed from database
7572 db_nsr = None
7573 db_nslcmop_update = {}
7574 db_nsr_update = {}
7575 db_vnfrs = {} # vnf's info indexed by _id
7576 exc = None
7577 old_operational_status = ""
7578 old_config_status = ""
7579 nsi_id = None
7580 try:
7581 # wait for any previous tasks in process
7582 step = "Waiting for previous operations to terminate"
7583 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7584 self._write_ns_status(
7585 nsr_id=nsr_id,
7586 ns_state=None,
7587 current_operation="HEALING",
7588 current_operation_id=nslcmop_id,
7589 )
7590
7591 step = "Getting nslcmop from database"
7592 self.logger.debug(
7593 step + " after having waited for previous tasks to be completed"
7594 )
7595 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7596
7597 step = "Getting nsr from database"
7598 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7599 old_operational_status = db_nsr["operational-status"]
7600 old_config_status = db_nsr["config-status"]
7601
7602 db_nsr_update = {
7603 "_admin.deployed.RO.operational-status": "healing",
7604 }
7605 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7606
7607 step = "Sending heal order to VIM"
7608 # task_ro = asyncio.ensure_future(
7609 # self.heal_RO(
7610 # logging_text=logging_text,
7611 # nsr_id=nsr_id,
7612 # db_nslcmop=db_nslcmop,
7613 # stage=stage,
7614 # )
7615 # )
7616 # self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7617 # tasks_dict_info[task_ro] = "Healing at VIM"
7618 await self.heal_RO(
7619 logging_text=logging_text,
7620 nsr_id=nsr_id,
7621 db_nslcmop=db_nslcmop,
7622 stage=stage,
7623 )
7624 # VCA tasks
7625 # read from db: nsd
7626 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7627 self.logger.debug(logging_text + stage[1])
7628 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7629 self.fs.sync(db_nsr["nsd-id"])
7630 db_nsr["nsd"] = nsd
7631 # read from db: vnfr's of this ns
7632 step = "Getting vnfrs from db"
7633 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7634 for vnfr in db_vnfrs_list:
7635 db_vnfrs[vnfr["_id"]] = vnfr
7636 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7637
7638 # Check for each target VNF
7639 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7640 for target_vnf in target_list:
7641 # Find this VNF in the list from DB
7642 vnfr_id = target_vnf.get("vnfInstanceId", None)
7643 if vnfr_id:
7644 db_vnfr = db_vnfrs[vnfr_id]
7645 vnfd_id = db_vnfr.get("vnfd-id")
7646 vnfd_ref = db_vnfr.get("vnfd-ref")
7647 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7648 base_folder = vnfd["_admin"]["storage"]
7649 vdu_id = None
7650 vdu_index = 0
7651 vdu_name = None
7652 kdu_name = None
7653 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7654 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7655
7656 # Check each target VDU and deploy N2VC
7657 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7658 "vdu", []
7659 )
7660 if not target_vdu_list:
7661 # Codigo nuevo para crear diccionario
7662 target_vdu_list = []
7663 for existing_vdu in db_vnfr.get("vdur"):
7664 vdu_name = existing_vdu.get("vdu-name", None)
7665 vdu_index = existing_vdu.get("count-index", 0)
7666 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7667 "run-day1", False
7668 )
7669 vdu_to_be_healed = {
7670 "vdu-id": vdu_name,
7671 "count-index": vdu_index,
7672 "run-day1": vdu_run_day1,
7673 }
7674 target_vdu_list.append(vdu_to_be_healed)
7675 for target_vdu in target_vdu_list:
7676 deploy_params_vdu = target_vdu
7677 # Set run-day1 vnf level value if not vdu level value exists
7678 if not deploy_params_vdu.get("run-day1") and target_vnf[
7679 "additionalParams"
7680 ].get("run-day1"):
7681 deploy_params_vdu["run-day1"] = target_vnf[
7682 "additionalParams"
7683 ].get("run-day1")
7684 vdu_name = target_vdu.get("vdu-id", None)
7685 # TODO: Get vdu_id from vdud.
7686 vdu_id = vdu_name
7687 # For multi instance VDU count-index is mandatory
7688 # For single session VDU count-indes is 0
7689 vdu_index = target_vdu.get("count-index", 0)
7690
7691 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7692 stage[1] = "Deploying Execution Environments."
7693 self.logger.debug(logging_text + stage[1])
7694
7695 # VNF Level charm. Normal case when proxy charms.
7696 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7697 descriptor_config = get_configuration(vnfd, vnfd_ref)
7698 if descriptor_config:
7699 # Continue if healed machine is management machine
7700 vnf_ip_address = db_vnfr.get("ip-address")
7701 target_instance = None
7702 for instance in db_vnfr.get("vdur", None):
7703 if (
7704 instance["vdu-name"] == vdu_name
7705 and instance["count-index"] == vdu_index
7706 ):
7707 target_instance = instance
7708 break
7709 if vnf_ip_address == target_instance.get("ip-address"):
7710 self._heal_n2vc(
7711 logging_text=logging_text
7712 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7713 member_vnf_index, vdu_name, vdu_index
7714 ),
7715 db_nsr=db_nsr,
7716 db_vnfr=db_vnfr,
7717 nslcmop_id=nslcmop_id,
7718 nsr_id=nsr_id,
7719 nsi_id=nsi_id,
7720 vnfd_id=vnfd_ref,
7721 vdu_id=None,
7722 kdu_name=None,
7723 member_vnf_index=member_vnf_index,
7724 vdu_index=0,
7725 vdu_name=None,
7726 deploy_params=deploy_params_vdu,
7727 descriptor_config=descriptor_config,
7728 base_folder=base_folder,
7729 task_instantiation_info=tasks_dict_info,
7730 stage=stage,
7731 )
7732
7733 # VDU Level charm. Normal case with native charms.
7734 descriptor_config = get_configuration(vnfd, vdu_name)
7735 if descriptor_config:
7736 self._heal_n2vc(
7737 logging_text=logging_text
7738 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7739 member_vnf_index, vdu_name, vdu_index
7740 ),
7741 db_nsr=db_nsr,
7742 db_vnfr=db_vnfr,
7743 nslcmop_id=nslcmop_id,
7744 nsr_id=nsr_id,
7745 nsi_id=nsi_id,
7746 vnfd_id=vnfd_ref,
7747 vdu_id=vdu_id,
7748 kdu_name=kdu_name,
7749 member_vnf_index=member_vnf_index,
7750 vdu_index=vdu_index,
7751 vdu_name=vdu_name,
7752 deploy_params=deploy_params_vdu,
7753 descriptor_config=descriptor_config,
7754 base_folder=base_folder,
7755 task_instantiation_info=tasks_dict_info,
7756 stage=stage,
7757 )
7758
7759 except (
7760 ROclient.ROClientException,
7761 DbException,
7762 LcmException,
7763 NgRoException,
7764 ) as e:
7765 self.logger.error(logging_text + "Exit Exception {}".format(e))
7766 exc = e
7767 except asyncio.CancelledError:
7768 self.logger.error(
7769 logging_text + "Cancelled Exception while '{}'".format(step)
7770 )
7771 exc = "Operation was cancelled"
7772 except Exception as e:
7773 exc = traceback.format_exc()
7774 self.logger.critical(
7775 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7776 exc_info=True,
7777 )
7778 finally:
7779 if tasks_dict_info:
7780 stage[1] = "Waiting for healing pending tasks."
7781 self.logger.debug(logging_text + stage[1])
7782 exc = await self._wait_for_tasks(
7783 logging_text,
7784 tasks_dict_info,
7785 self.timeout_ns_deploy,
7786 stage,
7787 nslcmop_id,
7788 nsr_id=nsr_id,
7789 )
7790 if exc:
7791 db_nslcmop_update[
7792 "detailed-status"
7793 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7794 nslcmop_operation_state = "FAILED"
7795 if db_nsr:
7796 db_nsr_update["operational-status"] = old_operational_status
7797 db_nsr_update["config-status"] = old_config_status
7798 db_nsr_update[
7799 "detailed-status"
7800 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7801 for task, task_name in tasks_dict_info.items():
7802 if not task.done() or task.cancelled() or task.exception():
7803 if task_name.startswith(self.task_name_deploy_vca):
7804 # A N2VC task is pending
7805 db_nsr_update["config-status"] = "failed"
7806 else:
7807 # RO task is pending
7808 db_nsr_update["operational-status"] = "failed"
7809 else:
7810 error_description_nslcmop = None
7811 nslcmop_operation_state = "COMPLETED"
7812 db_nslcmop_update["detailed-status"] = "Done"
7813 db_nsr_update["detailed-status"] = "Done"
7814 db_nsr_update["operational-status"] = "running"
7815 db_nsr_update["config-status"] = "configured"
7816
7817 self._write_op_status(
7818 op_id=nslcmop_id,
7819 stage="",
7820 error_message=error_description_nslcmop,
7821 operation_state=nslcmop_operation_state,
7822 other_update=db_nslcmop_update,
7823 )
7824 if db_nsr:
7825 self._write_ns_status(
7826 nsr_id=nsr_id,
7827 ns_state=None,
7828 current_operation="IDLE",
7829 current_operation_id=None,
7830 other_update=db_nsr_update,
7831 )
7832
7833 if nslcmop_operation_state:
7834 try:
7835 msg = {
7836 "nsr_id": nsr_id,
7837 "nslcmop_id": nslcmop_id,
7838 "operationState": nslcmop_operation_state,
7839 }
7840 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7841 except Exception as e:
7842 self.logger.error(
7843 logging_text + "kafka_write notification Exception {}".format(e)
7844 )
7845 self.logger.debug(logging_text + "Exit")
7846 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7847
7848 async def heal_RO(
7849 self,
7850 logging_text,
7851 nsr_id,
7852 db_nslcmop,
7853 stage,
7854 ):
7855 """
7856 Heal at RO
7857 :param logging_text: preffix text to use at logging
7858 :param nsr_id: nsr identity
7859 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7860 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7861 :return: None or exception
7862 """
7863
7864 def get_vim_account(vim_account_id):
7865 nonlocal db_vims
7866 if vim_account_id in db_vims:
7867 return db_vims[vim_account_id]
7868 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7869 db_vims[vim_account_id] = db_vim
7870 return db_vim
7871
7872 try:
7873 start_heal = time()
7874 ns_params = db_nslcmop.get("operationParams")
7875 if ns_params and ns_params.get("timeout_ns_heal"):
7876 timeout_ns_heal = ns_params["timeout_ns_heal"]
7877 else:
7878 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7879
7880 db_vims = {}
7881
7882 nslcmop_id = db_nslcmop["_id"]
7883 target = {
7884 "action_id": nslcmop_id,
7885 }
7886 self.logger.warning(
7887 "db_nslcmop={} and timeout_ns_heal={}".format(
7888 db_nslcmop, timeout_ns_heal
7889 )
7890 )
7891 target.update(db_nslcmop.get("operationParams", {}))
7892
7893 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7894 desc = await self.RO.recreate(nsr_id, target)
7895 self.logger.debug("RO return > {}".format(desc))
7896 action_id = desc["action_id"]
7897 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7898 await self._wait_ng_ro(
7899 nsr_id,
7900 action_id,
7901 nslcmop_id,
7902 start_heal,
7903 timeout_ns_heal,
7904 stage,
7905 operation="healing",
7906 )
7907
7908 # Updating NSR
7909 db_nsr_update = {
7910 "_admin.deployed.RO.operational-status": "running",
7911 "detailed-status": " ".join(stage),
7912 }
7913 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7914 self._write_op_status(nslcmop_id, stage)
7915 self.logger.debug(
7916 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7917 )
7918
7919 except Exception as e:
7920 stage[2] = "ERROR healing at VIM"
7921 # self.set_vnfr_at_error(db_vnfrs, str(e))
7922 self.logger.error(
7923 "Error healing at VIM {}".format(e),
7924 exc_info=not isinstance(
7925 e,
7926 (
7927 ROclient.ROClientException,
7928 LcmException,
7929 DbException,
7930 NgRoException,
7931 ),
7932 ),
7933 )
7934 raise
7935
7936 def _heal_n2vc(
7937 self,
7938 logging_text,
7939 db_nsr,
7940 db_vnfr,
7941 nslcmop_id,
7942 nsr_id,
7943 nsi_id,
7944 vnfd_id,
7945 vdu_id,
7946 kdu_name,
7947 member_vnf_index,
7948 vdu_index,
7949 vdu_name,
7950 deploy_params,
7951 descriptor_config,
7952 base_folder,
7953 task_instantiation_info,
7954 stage,
7955 ):
7956 # launch instantiate_N2VC in a asyncio task and register task object
7957 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7958 # if not found, create one entry and update database
7959 # fill db_nsr._admin.deployed.VCA.<index>
7960
7961 self.logger.debug(
7962 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7963 )
7964 if "execution-environment-list" in descriptor_config:
7965 ee_list = descriptor_config.get("execution-environment-list", [])
7966 elif "juju" in descriptor_config:
7967 ee_list = [descriptor_config] # ns charms
7968 else: # other types as script are not supported
7969 ee_list = []
7970
7971 for ee_item in ee_list:
7972 self.logger.debug(
7973 logging_text
7974 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7975 ee_item.get("juju"), ee_item.get("helm-chart")
7976 )
7977 )
7978 ee_descriptor_id = ee_item.get("id")
7979 if ee_item.get("juju"):
7980 vca_name = ee_item["juju"].get("charm")
7981 vca_type = (
7982 "lxc_proxy_charm"
7983 if ee_item["juju"].get("charm") is not None
7984 else "native_charm"
7985 )
7986 if ee_item["juju"].get("cloud") == "k8s":
7987 vca_type = "k8s_proxy_charm"
7988 elif ee_item["juju"].get("proxy") is False:
7989 vca_type = "native_charm"
7990 elif ee_item.get("helm-chart"):
7991 vca_name = ee_item["helm-chart"]
7992 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7993 vca_type = "helm"
7994 else:
7995 vca_type = "helm-v3"
7996 else:
7997 self.logger.debug(
7998 logging_text + "skipping non juju neither charm configuration"
7999 )
8000 continue
8001
8002 vca_index = -1
8003 for vca_index, vca_deployed in enumerate(
8004 db_nsr["_admin"]["deployed"]["VCA"]
8005 ):
8006 if not vca_deployed:
8007 continue
8008 if (
8009 vca_deployed.get("member-vnf-index") == member_vnf_index
8010 and vca_deployed.get("vdu_id") == vdu_id
8011 and vca_deployed.get("kdu_name") == kdu_name
8012 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8013 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8014 ):
8015 break
8016 else:
8017 # not found, create one.
8018 target = (
8019 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8020 )
8021 if vdu_id:
8022 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8023 elif kdu_name:
8024 target += "/kdu/{}".format(kdu_name)
8025 vca_deployed = {
8026 "target_element": target,
8027 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8028 "member-vnf-index": member_vnf_index,
8029 "vdu_id": vdu_id,
8030 "kdu_name": kdu_name,
8031 "vdu_count_index": vdu_index,
8032 "operational-status": "init", # TODO revise
8033 "detailed-status": "", # TODO revise
8034 "step": "initial-deploy", # TODO revise
8035 "vnfd_id": vnfd_id,
8036 "vdu_name": vdu_name,
8037 "type": vca_type,
8038 "ee_descriptor_id": ee_descriptor_id,
8039 }
8040 vca_index += 1
8041
8042 # create VCA and configurationStatus in db
8043 db_dict = {
8044 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8045 "configurationStatus.{}".format(vca_index): dict(),
8046 }
8047 self.update_db_2("nsrs", nsr_id, db_dict)
8048
8049 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8050
8051 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8052 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8053 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8054
8055 # Launch task
8056 task_n2vc = asyncio.ensure_future(
8057 self.heal_N2VC(
8058 logging_text=logging_text,
8059 vca_index=vca_index,
8060 nsi_id=nsi_id,
8061 db_nsr=db_nsr,
8062 db_vnfr=db_vnfr,
8063 vdu_id=vdu_id,
8064 kdu_name=kdu_name,
8065 vdu_index=vdu_index,
8066 deploy_params=deploy_params,
8067 config_descriptor=descriptor_config,
8068 base_folder=base_folder,
8069 nslcmop_id=nslcmop_id,
8070 stage=stage,
8071 vca_type=vca_type,
8072 vca_name=vca_name,
8073 ee_config_descriptor=ee_item,
8074 )
8075 )
8076 self.lcm_tasks.register(
8077 "ns",
8078 nsr_id,
8079 nslcmop_id,
8080 "instantiate_N2VC-{}".format(vca_index),
8081 task_n2vc,
8082 )
8083 task_instantiation_info[
8084 task_n2vc
8085 ] = self.task_name_deploy_vca + " {}.{}".format(
8086 member_vnf_index or "", vdu_id or ""
8087 )
8088
8089 async def heal_N2VC(
8090 self,
8091 logging_text,
8092 vca_index,
8093 nsi_id,
8094 db_nsr,
8095 db_vnfr,
8096 vdu_id,
8097 kdu_name,
8098 vdu_index,
8099 config_descriptor,
8100 deploy_params,
8101 base_folder,
8102 nslcmop_id,
8103 stage,
8104 vca_type,
8105 vca_name,
8106 ee_config_descriptor,
8107 ):
8108 nsr_id = db_nsr["_id"]
8109 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8110 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8111 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8112 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8113 db_dict = {
8114 "collection": "nsrs",
8115 "filter": {"_id": nsr_id},
8116 "path": db_update_entry,
8117 }
8118 step = ""
8119 try:
8120 element_type = "NS"
8121 element_under_configuration = nsr_id
8122
8123 vnfr_id = None
8124 if db_vnfr:
8125 vnfr_id = db_vnfr["_id"]
8126 osm_config["osm"]["vnf_id"] = vnfr_id
8127
8128 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8129
8130 if vca_type == "native_charm":
8131 index_number = 0
8132 else:
8133 index_number = vdu_index or 0
8134
8135 if vnfr_id:
8136 element_type = "VNF"
8137 element_under_configuration = vnfr_id
8138 namespace += ".{}-{}".format(vnfr_id, index_number)
8139 if vdu_id:
8140 namespace += ".{}-{}".format(vdu_id, index_number)
8141 element_type = "VDU"
8142 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8143 osm_config["osm"]["vdu_id"] = vdu_id
8144 elif kdu_name:
8145 namespace += ".{}".format(kdu_name)
8146 element_type = "KDU"
8147 element_under_configuration = kdu_name
8148 osm_config["osm"]["kdu_name"] = kdu_name
8149
8150 # Get artifact path
8151 if base_folder["pkg-dir"]:
8152 artifact_path = "{}/{}/{}/{}".format(
8153 base_folder["folder"],
8154 base_folder["pkg-dir"],
8155 "charms"
8156 if vca_type
8157 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8158 else "helm-charts",
8159 vca_name,
8160 )
8161 else:
8162 artifact_path = "{}/Scripts/{}/{}/".format(
8163 base_folder["folder"],
8164 "charms"
8165 if vca_type
8166 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8167 else "helm-charts",
8168 vca_name,
8169 )
8170
8171 self.logger.debug("Artifact path > {}".format(artifact_path))
8172
8173 # get initial_config_primitive_list that applies to this element
8174 initial_config_primitive_list = config_descriptor.get(
8175 "initial-config-primitive"
8176 )
8177
8178 self.logger.debug(
8179 "Initial config primitive list > {}".format(
8180 initial_config_primitive_list
8181 )
8182 )
8183
8184 # add config if not present for NS charm
8185 ee_descriptor_id = ee_config_descriptor.get("id")
8186 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8187 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8188 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8189 )
8190
8191 self.logger.debug(
8192 "Initial config primitive list #2 > {}".format(
8193 initial_config_primitive_list
8194 )
8195 )
8196 # n2vc_redesign STEP 3.1
8197 # find old ee_id if exists
8198 ee_id = vca_deployed.get("ee_id")
8199
8200 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8201 # create or register execution environment in VCA. Only for native charms when healing
8202 if vca_type == "native_charm":
8203 step = "Waiting to VM being up and getting IP address"
8204 self.logger.debug(logging_text + step)
8205 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8206 logging_text,
8207 nsr_id,
8208 vnfr_id,
8209 vdu_id,
8210 vdu_index,
8211 user=None,
8212 pub_key=None,
8213 )
8214 credentials = {"hostname": rw_mgmt_ip}
8215 # get username
8216 username = deep_get(
8217 config_descriptor, ("config-access", "ssh-access", "default-user")
8218 )
8219 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8220 # merged. Meanwhile let's get username from initial-config-primitive
8221 if not username and initial_config_primitive_list:
8222 for config_primitive in initial_config_primitive_list:
8223 for param in config_primitive.get("parameter", ()):
8224 if param["name"] == "ssh-username":
8225 username = param["value"]
8226 break
8227 if not username:
8228 raise LcmException(
8229 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8230 "'config-access.ssh-access.default-user'"
8231 )
8232 credentials["username"] = username
8233
8234 # n2vc_redesign STEP 3.2
8235 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8236 self._write_configuration_status(
8237 nsr_id=nsr_id,
8238 vca_index=vca_index,
8239 status="REGISTERING",
8240 element_under_configuration=element_under_configuration,
8241 element_type=element_type,
8242 )
8243
8244 step = "register execution environment {}".format(credentials)
8245 self.logger.debug(logging_text + step)
8246 ee_id = await self.vca_map[vca_type].register_execution_environment(
8247 credentials=credentials,
8248 namespace=namespace,
8249 db_dict=db_dict,
8250 vca_id=vca_id,
8251 )
8252
8253 # update ee_id en db
8254 db_dict_ee_id = {
8255 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8256 }
8257 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8258
8259 # for compatibility with MON/POL modules, the need model and application name at database
8260 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8261 # Not sure if this need to be done when healing
8262 """
8263 ee_id_parts = ee_id.split(".")
8264 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8265 if len(ee_id_parts) >= 2:
8266 model_name = ee_id_parts[0]
8267 application_name = ee_id_parts[1]
8268 db_nsr_update[db_update_entry + "model"] = model_name
8269 db_nsr_update[db_update_entry + "application"] = application_name
8270 """
8271
8272 # n2vc_redesign STEP 3.3
8273 # Install configuration software. Only for native charms.
8274 step = "Install configuration Software"
8275
8276 self._write_configuration_status(
8277 nsr_id=nsr_id,
8278 vca_index=vca_index,
8279 status="INSTALLING SW",
8280 element_under_configuration=element_under_configuration,
8281 element_type=element_type,
8282 # other_update=db_nsr_update,
8283 other_update=None,
8284 )
8285
8286 # TODO check if already done
8287 self.logger.debug(logging_text + step)
8288 config = None
8289 if vca_type == "native_charm":
8290 config_primitive = next(
8291 (p for p in initial_config_primitive_list if p["name"] == "config"),
8292 None,
8293 )
8294 if config_primitive:
8295 config = self._map_primitive_params(
8296 config_primitive, {}, deploy_params
8297 )
8298 await self.vca_map[vca_type].install_configuration_sw(
8299 ee_id=ee_id,
8300 artifact_path=artifact_path,
8301 db_dict=db_dict,
8302 config=config,
8303 num_units=1,
8304 vca_id=vca_id,
8305 vca_type=vca_type,
8306 )
8307
8308 # write in db flag of configuration_sw already installed
8309 self.update_db_2(
8310 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8311 )
8312
8313 # Not sure if this need to be done when healing
8314 """
8315 # add relations for this VCA (wait for other peers related with this VCA)
8316 await self._add_vca_relations(
8317 logging_text=logging_text,
8318 nsr_id=nsr_id,
8319 vca_type=vca_type,
8320 vca_index=vca_index,
8321 )
8322 """
8323
8324 # if SSH access is required, then get execution environment SSH public
8325 # if native charm we have waited already to VM be UP
8326 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8327 pub_key = None
8328 user = None
8329 # self.logger.debug("get ssh key block")
8330 if deep_get(
8331 config_descriptor, ("config-access", "ssh-access", "required")
8332 ):
8333 # self.logger.debug("ssh key needed")
8334 # Needed to inject a ssh key
8335 user = deep_get(
8336 config_descriptor,
8337 ("config-access", "ssh-access", "default-user"),
8338 )
8339 step = "Install configuration Software, getting public ssh key"
8340 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8341 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8342 )
8343
8344 step = "Insert public key into VM user={} ssh_key={}".format(
8345 user, pub_key
8346 )
8347 else:
8348 # self.logger.debug("no need to get ssh key")
8349 step = "Waiting to VM being up and getting IP address"
8350 self.logger.debug(logging_text + step)
8351
8352 # n2vc_redesign STEP 5.1
8353 # wait for RO (ip-address) Insert pub_key into VM
8354 # IMPORTANT: We need do wait for RO to complete healing operation.
8355 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8356 if vnfr_id:
8357 if kdu_name:
8358 rw_mgmt_ip = await self.wait_kdu_up(
8359 logging_text, nsr_id, vnfr_id, kdu_name
8360 )
8361 else:
8362 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8363 logging_text,
8364 nsr_id,
8365 vnfr_id,
8366 vdu_id,
8367 vdu_index,
8368 user=user,
8369 pub_key=pub_key,
8370 )
8371 else:
8372 rw_mgmt_ip = None # This is for a NS configuration
8373
8374 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8375
8376 # store rw_mgmt_ip in deploy params for later replacement
8377 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8378
8379 # Day1 operations.
8380 # get run-day1 operation parameter
8381 runDay1 = deploy_params.get("run-day1", False)
8382 self.logger.debug(
8383 " Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8384 )
8385 if runDay1:
8386 # n2vc_redesign STEP 6 Execute initial config primitive
8387 step = "execute initial config primitive"
8388
8389 # wait for dependent primitives execution (NS -> VNF -> VDU)
8390 if initial_config_primitive_list:
8391 await self._wait_dependent_n2vc(
8392 nsr_id, vca_deployed_list, vca_index
8393 )
8394
8395 # stage, in function of element type: vdu, kdu, vnf or ns
8396 my_vca = vca_deployed_list[vca_index]
8397 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8398 # VDU or KDU
8399 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8400 elif my_vca.get("member-vnf-index"):
8401 # VNF
8402 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8403 else:
8404 # NS
8405 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8406
8407 self._write_configuration_status(
8408 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8409 )
8410
8411 self._write_op_status(op_id=nslcmop_id, stage=stage)
8412
8413 check_if_terminated_needed = True
8414 for initial_config_primitive in initial_config_primitive_list:
8415 # adding information on the vca_deployed if it is a NS execution environment
8416 if not vca_deployed["member-vnf-index"]:
8417 deploy_params["ns_config_info"] = json.dumps(
8418 self._get_ns_config_info(nsr_id)
8419 )
8420 # TODO check if already done
8421 primitive_params_ = self._map_primitive_params(
8422 initial_config_primitive, {}, deploy_params
8423 )
8424
8425 step = "execute primitive '{}' params '{}'".format(
8426 initial_config_primitive["name"], primitive_params_
8427 )
8428 self.logger.debug(logging_text + step)
8429 await self.vca_map[vca_type].exec_primitive(
8430 ee_id=ee_id,
8431 primitive_name=initial_config_primitive["name"],
8432 params_dict=primitive_params_,
8433 db_dict=db_dict,
8434 vca_id=vca_id,
8435 vca_type=vca_type,
8436 )
8437 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8438 if check_if_terminated_needed:
8439 if config_descriptor.get("terminate-config-primitive"):
8440 self.update_db_2(
8441 "nsrs",
8442 nsr_id,
8443 {db_update_entry + "needed_terminate": True},
8444 )
8445 check_if_terminated_needed = False
8446
8447 # TODO register in database that primitive is done
8448
8449 # STEP 7 Configure metrics
8450 # Not sure if this need to be done when healing
8451 """
8452 if vca_type == "helm" or vca_type == "helm-v3":
8453 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8454 ee_id=ee_id,
8455 artifact_path=artifact_path,
8456 ee_config_descriptor=ee_config_descriptor,
8457 vnfr_id=vnfr_id,
8458 nsr_id=nsr_id,
8459 target_ip=rw_mgmt_ip,
8460 )
8461 if prometheus_jobs:
8462 self.update_db_2(
8463 "nsrs",
8464 nsr_id,
8465 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8466 )
8467
8468 for job in prometheus_jobs:
8469 self.db.set_one(
8470 "prometheus_jobs",
8471 {"job_name": job["job_name"]},
8472 job,
8473 upsert=True,
8474 fail_on_empty=False,
8475 )
8476
8477 """
8478 step = "instantiated at VCA"
8479 self.logger.debug(logging_text + step)
8480
8481 self._write_configuration_status(
8482 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8483 )
8484
8485 except Exception as e: # TODO not use Exception but N2VC exception
8486 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8487 if not isinstance(
8488 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8489 ):
8490 self.logger.error(
8491 "Exception while {} : {}".format(step, e), exc_info=True
8492 )
8493 self._write_configuration_status(
8494 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8495 )
8496 raise LcmException("{} {}".format(step, e)) from e
8497
8498 async def _wait_heal_ro(
8499 self,
8500 nsr_id,
8501 timeout=600,
8502 ):
8503 start_time = time()
8504 while time() <= start_time + timeout:
8505 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8506 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8507 "operational-status"
8508 ]
8509 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8510 if operational_status_ro != "healing":
8511 break
8512 await asyncio.sleep(15, loop=self.loop)
8513 else: # timeout_ns_deploy
8514 raise NgRoException("Timeout waiting ns to deploy")
8515
8516 async def vertical_scale(self, nsr_id, nslcmop_id):
8517 """
8518 Vertical Scale the VDUs in a NS
8519
8520 :param: nsr_id: NS Instance ID
8521 :param: nslcmop_id: nslcmop ID of migrate
8522
8523 """
8524 # Try to lock HA task here
8525 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8526 if not task_is_locked_by_me:
8527 return
8528 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8529 self.logger.debug(logging_text + "Enter")
8530 # get all needed from database
8531 db_nslcmop = None
8532 db_nslcmop_update = {}
8533 nslcmop_operation_state = None
8534 db_nsr_update = {}
8535 target = {}
8536 exc = None
8537 # in case of error, indicates what part of scale was failed to put nsr at error status
8538 start_deploy = time()
8539
8540 try:
8541 # wait for any previous tasks in process
8542 step = "Waiting for previous operations to terminate"
8543 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8544
8545 self._write_ns_status(
8546 nsr_id=nsr_id,
8547 ns_state=None,
8548 current_operation="VerticalScale",
8549 current_operation_id=nslcmop_id,
8550 )
8551 step = "Getting nslcmop from database"
8552 self.logger.debug(
8553 step + " after having waited for previous tasks to be completed"
8554 )
8555 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8556 operationParams = db_nslcmop.get("operationParams")
8557 target = {}
8558 target.update(operationParams)
8559 desc = await self.RO.vertical_scale(nsr_id, target)
8560 self.logger.debug("RO return > {}".format(desc))
8561 action_id = desc["action_id"]
8562 await self._wait_ng_ro(
8563 nsr_id,
8564 action_id,
8565 nslcmop_id,
8566 start_deploy,
8567 self.timeout_verticalscale,
8568 operation="verticalscale",
8569 )
8570 except (ROclient.ROClientException, DbException, LcmException) as e:
8571 self.logger.error("Exit Exception {}".format(e))
8572 exc = e
8573 except asyncio.CancelledError:
8574 self.logger.error("Cancelled Exception while '{}'".format(step))
8575 exc = "Operation was cancelled"
8576 except Exception as e:
8577 exc = traceback.format_exc()
8578 self.logger.critical(
8579 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8580 )
8581 finally:
8582 self._write_ns_status(
8583 nsr_id=nsr_id,
8584 ns_state=None,
8585 current_operation="IDLE",
8586 current_operation_id=None,
8587 )
8588 if exc:
8589 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8590 nslcmop_operation_state = "FAILED"
8591 else:
8592 nslcmop_operation_state = "COMPLETED"
8593 db_nslcmop_update["detailed-status"] = "Done"
8594 db_nsr_update["detailed-status"] = "Done"
8595
8596 self._write_op_status(
8597 op_id=nslcmop_id,
8598 stage="",
8599 error_message="",
8600 operation_state=nslcmop_operation_state,
8601 other_update=db_nslcmop_update,
8602 )
8603 if nslcmop_operation_state:
8604 try:
8605 msg = {
8606 "nsr_id": nsr_id,
8607 "nslcmop_id": nslcmop_id,
8608 "operationState": nslcmop_operation_state,
8609 }
8610 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8611 except Exception as e:
8612 self.logger.error(
8613 logging_text + "kafka_write notification Exception {}".format(e)
8614 )
8615 self.logger.debug(logging_text + "Exit")
8616 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")