Heal: Await heal operation in RO before N2VC part to avoid errors
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
106 from n2vc.n2vc_juju_conn import N2VCJujuConnector
107 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
108
109 from osm_lcm.lcm_helm_conn import LCMHelmConn
110 from osm_lcm.osm_config import OsmConfigBuilder
111 from osm_lcm.prometheus import parse_job
112
113 from copy import copy, deepcopy
114 from time import time
115 from uuid import uuid4
116
117 from random import randint
118
119 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
120
121
122 class NsLcm(LcmBase):
123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
129 timeout_charm_delete = 10 * 60
130 timeout_primitive = 30 * 60 # timeout for primitive execution
131 timeout_ns_update = 30 * 60 # timeout for ns update
132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
135 timeout_migrate = 1800 # default global timeout for migrating vnfs
136 timeout_operate = 1800 # default global timeout for migrating vnfs
137 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
138 SUBOPERATION_STATUS_NOT_FOUND = -1
139 SUBOPERATION_STATUS_NEW = -2
140 SUBOPERATION_STATUS_SKIP = -3
141 task_name_deploy_vca = "Deploying VCA"
142
143 def __init__(self, msg, lcm_tasks, config, loop):
144 """
145 Init, Connect to database, filesystem storage, and messaging
146 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
147 :return: None
148 """
149 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
150
151 self.db = Database().instance.db
152 self.fs = Filesystem().instance.fs
153 self.loop = loop
154 self.lcm_tasks = lcm_tasks
155 self.timeout = config["timeout"]
156 self.ro_config = config["ro_config"]
157 self.ng_ro = config["ro_config"].get("ng")
158 self.vca_config = config["VCA"].copy()
159
160 # create N2VC connector
161 self.n2vc = N2VCJujuConnector(
162 log=self.logger,
163 loop=self.loop,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 loop=self.loop,
172 vca_config=self.vca_config,
173 on_update_db=self._on_update_n2vc_db,
174 )
175
176 self.k8sclusterhelm2 = K8sHelmConnector(
177 kubectl_command=self.vca_config.get("kubectlpath"),
178 helm_command=self.vca_config.get("helmpath"),
179 log=self.logger,
180 on_update_db=None,
181 fs=self.fs,
182 db=self.db,
183 )
184
185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helm3path"),
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 juju_command=self.vca_config.get("jujupath"),
197 log=self.logger,
198 loop=self.loop,
199 on_update_db=self._on_update_k8s_db,
200 fs=self.fs,
201 db=self.db,
202 )
203
204 self.k8scluster_map = {
205 "helm-chart": self.k8sclusterhelm2,
206 "helm-chart-v3": self.k8sclusterhelm3,
207 "chart": self.k8sclusterhelm3,
208 "juju-bundle": self.k8sclusterjuju,
209 "juju": self.k8sclusterjuju,
210 }
211
212 self.vca_map = {
213 "lxc_proxy_charm": self.n2vc,
214 "native_charm": self.n2vc,
215 "k8s_proxy_charm": self.n2vc,
216 "helm": self.conn_helm_ee,
217 "helm-v3": self.conn_helm_ee,
218 }
219
220 # create RO client
221 self.RO = NgRoClient(self.loop, **self.ro_config)
222
223 self.op_status_map = {
224 "instantiation": self.RO.status,
225 "termination": self.RO.status,
226 "migrate": self.RO.status,
227 "healing": self.RO.recreate_status,
228 "verticalscale": self.RO.status,
229 "start_stop_rebuild": self.RO.status,
230 }
231
232 @staticmethod
233 def increment_ip_mac(ip_mac, vm_index=1):
234 if not isinstance(ip_mac, str):
235 return ip_mac
236 try:
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
255
256 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
257
258 try:
259 # TODO filter RO descriptor fields...
260
261 # write to database
262 db_dict = dict()
263 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
264 db_dict["deploymentStatus"] = ro_descriptor
265 self.update_db_2("nsrs", nsrs_id, db_dict)
266
267 except Exception as e:
268 self.logger.warn(
269 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
270 )
271
272 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
273
274 # remove last dot from path (if exists)
275 if path.endswith("."):
276 path = path[:-1]
277
278 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
279 # .format(table, filter, path, updated_data))
280 try:
281
282 nsr_id = filter.get("_id")
283
284 # read ns record from database
285 nsr = self.db.get_one(table="nsrs", q_filter=filter)
286 current_ns_status = nsr.get("nsState")
287
288 # get vca status for NS
289 status_dict = await self.n2vc.get_status(
290 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
291 )
292
293 # vcaStatus
294 db_dict = dict()
295 db_dict["vcaStatus"] = status_dict
296 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 if cluster_type in ("juju-bundle", "juju"):
405 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
406 # status in a similar way between Juju Bundles and Helm Charts on this side
407 await self.k8sclusterjuju.update_vca_status(
408 db_dict["vcaStatus"],
409 kdu_instance,
410 vca_id=vca_id,
411 )
412
413 self.logger.debug(
414 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
415 )
416
417 # write to database
418 self.update_db_2("nsrs", nsr_id, db_dict)
419 except (asyncio.CancelledError, asyncio.TimeoutError):
420 raise
421 except Exception as e:
422 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
423
424 @staticmethod
425 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
426 try:
427 env = Environment(undefined=StrictUndefined, autoescape=True)
428 template = env.from_string(cloud_init_text)
429 return template.render(additional_params or {})
430 except UndefinedError as e:
431 raise LcmException(
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
435 )
436 except (TemplateError, TemplateNotFound) as e:
437 raise LcmException(
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
439 vnfd_id, vdu_id, e
440 )
441 )
442
443 def _get_vdu_cloud_init_content(self, vdu, vnfd):
444 cloud_init_content = cloud_init_file = None
445 try:
446 if vdu.get("cloud-init-file"):
447 base_folder = vnfd["_admin"]["storage"]
448 if base_folder["pkg-dir"]:
449 cloud_init_file = "{}/{}/cloud_init/{}".format(
450 base_folder["folder"],
451 base_folder["pkg-dir"],
452 vdu["cloud-init-file"],
453 )
454 else:
455 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
456 base_folder["folder"],
457 vdu["cloud-init-file"],
458 )
459 with self.fs.file_open(cloud_init_file, "r") as ci_file:
460 cloud_init_content = ci_file.read()
461 elif vdu.get("cloud-init"):
462 cloud_init_content = vdu["cloud-init"]
463
464 return cloud_init_content
465 except FsException as e:
466 raise LcmException(
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd["id"], vdu["id"], cloud_init_file, e
469 )
470 )
471
472 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
473 vdur = next(
474 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
475 )
476 additional_params = vdur.get("additionalParams")
477 return parse_yaml_strings(additional_params)
478
479 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
480 """
481 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
482 :param vnfd: input vnfd
483 :param new_id: overrides vnf id if provided
484 :param additionalParams: Instantiation params for VNFs provided
485 :param nsrId: Id of the NSR
486 :return: copy of vnfd
487 """
488 vnfd_RO = deepcopy(vnfd)
489 # remove unused by RO configuration, monitoring, scaling and internal keys
490 vnfd_RO.pop("_id", None)
491 vnfd_RO.pop("_admin", None)
492 vnfd_RO.pop("monitoring-param", None)
493 vnfd_RO.pop("scaling-group-descriptor", None)
494 vnfd_RO.pop("kdu", None)
495 vnfd_RO.pop("k8s-cluster", None)
496 if new_id:
497 vnfd_RO["id"] = new_id
498
499 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
500 for vdu in get_iterable(vnfd_RO, "vdu"):
501 vdu.pop("cloud-init-file", None)
502 vdu.pop("cloud-init", None)
503 return vnfd_RO
504
505 @staticmethod
506 def ip_profile_2_RO(ip_profile):
507 RO_ip_profile = deepcopy(ip_profile)
508 if "dns-server" in RO_ip_profile:
509 if isinstance(RO_ip_profile["dns-server"], list):
510 RO_ip_profile["dns-address"] = []
511 for ds in RO_ip_profile.pop("dns-server"):
512 RO_ip_profile["dns-address"].append(ds["address"])
513 else:
514 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
515 if RO_ip_profile.get("ip-version") == "ipv4":
516 RO_ip_profile["ip-version"] = "IPv4"
517 if RO_ip_profile.get("ip-version") == "ipv6":
518 RO_ip_profile["ip-version"] = "IPv6"
519 if "dhcp-params" in RO_ip_profile:
520 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
521 return RO_ip_profile
522
523 def _get_ro_vim_id_for_vim_account(self, vim_account):
524 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
525 if db_vim["_admin"]["operationalState"] != "ENABLED":
526 raise LcmException(
527 "VIM={} is not available. operationalState={}".format(
528 vim_account, db_vim["_admin"]["operationalState"]
529 )
530 )
531 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
532 return RO_vim_id
533
534 def get_ro_wim_id_for_wim_account(self, wim_account):
535 if isinstance(wim_account, str):
536 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
537 if db_wim["_admin"]["operationalState"] != "ENABLED":
538 raise LcmException(
539 "WIM={} is not available. operationalState={}".format(
540 wim_account, db_wim["_admin"]["operationalState"]
541 )
542 )
543 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
544 return RO_wim_id
545 else:
546 return wim_account
547
548 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
549
550 db_vdu_push_list = []
551 template_vdur = []
552 db_update = {"_admin.modified": time()}
553 if vdu_create:
554 for vdu_id, vdu_count in vdu_create.items():
555 vdur = next(
556 (
557 vdur
558 for vdur in reversed(db_vnfr["vdur"])
559 if vdur["vdu-id-ref"] == vdu_id
560 ),
561 None,
562 )
563 if not vdur:
564 # Read the template saved in the db:
565 self.logger.debug(
566 "No vdur in the database. Using the vdur-template to scale"
567 )
568 vdur_template = db_vnfr.get("vdur-template")
569 if not vdur_template:
570 raise LcmException(
571 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
572 vdu_id
573 )
574 )
575 vdur = vdur_template[0]
576 # Delete a template from the database after using it
577 self.db.set_one(
578 "vnfrs",
579 {"_id": db_vnfr["_id"]},
580 None,
581 pull={"vdur-template": {"_id": vdur["_id"]}},
582 )
583 for count in range(vdu_count):
584 vdur_copy = deepcopy(vdur)
585 vdur_copy["status"] = "BUILD"
586 vdur_copy["status-detailed"] = None
587 vdur_copy["ip-address"] = None
588 vdur_copy["_id"] = str(uuid4())
589 vdur_copy["count-index"] += count + 1
590 vdur_copy["id"] = "{}-{}".format(
591 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
592 )
593 vdur_copy.pop("vim_info", None)
594 for iface in vdur_copy["interfaces"]:
595 if iface.get("fixed-ip"):
596 iface["ip-address"] = self.increment_ip_mac(
597 iface["ip-address"], count + 1
598 )
599 else:
600 iface.pop("ip-address", None)
601 if iface.get("fixed-mac"):
602 iface["mac-address"] = self.increment_ip_mac(
603 iface["mac-address"], count + 1
604 )
605 else:
606 iface.pop("mac-address", None)
607 if db_vnfr["vdur"]:
608 iface.pop(
609 "mgmt_vnf", None
610 ) # only first vdu can be managment of vnf
611 db_vdu_push_list.append(vdur_copy)
612 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
613 if vdu_delete:
614 if len(db_vnfr["vdur"]) == 1:
615 # The scale will move to 0 instances
616 self.logger.debug(
617 "Scaling to 0 !, creating the template with the last vdur"
618 )
619 template_vdur = [db_vnfr["vdur"][0]]
620 for vdu_id, vdu_count in vdu_delete.items():
621 if mark_delete:
622 indexes_to_delete = [
623 iv[0]
624 for iv in enumerate(db_vnfr["vdur"])
625 if iv[1]["vdu-id-ref"] == vdu_id
626 ]
627 db_update.update(
628 {
629 "vdur.{}.status".format(i): "DELETING"
630 for i in indexes_to_delete[-vdu_count:]
631 }
632 )
633 else:
634 # it must be deleted one by one because common.db does not allow otherwise
635 vdus_to_delete = [
636 v
637 for v in reversed(db_vnfr["vdur"])
638 if v["vdu-id-ref"] == vdu_id
639 ]
640 for vdu in vdus_to_delete[:vdu_count]:
641 self.db.set_one(
642 "vnfrs",
643 {"_id": db_vnfr["_id"]},
644 None,
645 pull={"vdur": {"_id": vdu["_id"]}},
646 )
647 db_push = {}
648 if db_vdu_push_list:
649 db_push["vdur"] = db_vdu_push_list
650 if template_vdur:
651 db_push["vdur-template"] = template_vdur
652 if not db_push:
653 db_push = None
654 db_vnfr["vdur-template"] = template_vdur
655 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
656 # modify passed dictionary db_vnfr
657 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
658 db_vnfr["vdur"] = db_vnfr_["vdur"]
659
660 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
661 """
662 Updates database nsr with the RO info for the created vld
663 :param ns_update_nsr: dictionary to be filled with the updated info
664 :param db_nsr: content of db_nsr. This is also modified
665 :param nsr_desc_RO: nsr descriptor from RO
666 :return: Nothing, LcmException is raised on errors
667 """
668
669 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
670 for net_RO in get_iterable(nsr_desc_RO, "nets"):
671 if vld["id"] != net_RO.get("ns_net_osm_id"):
672 continue
673 vld["vim-id"] = net_RO.get("vim_net_id")
674 vld["name"] = net_RO.get("vim_name")
675 vld["status"] = net_RO.get("status")
676 vld["status-detailed"] = net_RO.get("error_msg")
677 ns_update_nsr["vld.{}".format(vld_index)] = vld
678 break
679 else:
680 raise LcmException(
681 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
682 )
683
684 def set_vnfr_at_error(self, db_vnfrs, error_text):
685 try:
686 for db_vnfr in db_vnfrs.values():
687 vnfr_update = {"status": "ERROR"}
688 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
689 if "status" not in vdur:
690 vdur["status"] = "ERROR"
691 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
692 if error_text:
693 vdur["status-detailed"] = str(error_text)
694 vnfr_update[
695 "vdur.{}.status-detailed".format(vdu_index)
696 ] = "ERROR"
697 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
698 except DbException as e:
699 self.logger.error("Cannot update vnf. {}".format(e))
700
701 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
702 """
703 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
704 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
705 :param nsr_desc_RO: nsr descriptor from RO
706 :return: Nothing, LcmException is raised on errors
707 """
708 for vnf_index, db_vnfr in db_vnfrs.items():
709 for vnf_RO in nsr_desc_RO["vnfs"]:
710 if vnf_RO["member_vnf_index"] != vnf_index:
711 continue
712 vnfr_update = {}
713 if vnf_RO.get("ip_address"):
714 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
715 "ip_address"
716 ].split(";")[0]
717 elif not db_vnfr.get("ip-address"):
718 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
719 raise LcmExceptionNoMgmtIP(
720 "ns member_vnf_index '{}' has no IP address".format(
721 vnf_index
722 )
723 )
724
725 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
726 vdur_RO_count_index = 0
727 if vdur.get("pdu-type"):
728 continue
729 for vdur_RO in get_iterable(vnf_RO, "vms"):
730 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
731 continue
732 if vdur["count-index"] != vdur_RO_count_index:
733 vdur_RO_count_index += 1
734 continue
735 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
736 if vdur_RO.get("ip_address"):
737 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
738 else:
739 vdur["ip-address"] = None
740 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
741 vdur["name"] = vdur_RO.get("vim_name")
742 vdur["status"] = vdur_RO.get("status")
743 vdur["status-detailed"] = vdur_RO.get("error_msg")
744 for ifacer in get_iterable(vdur, "interfaces"):
745 for interface_RO in get_iterable(vdur_RO, "interfaces"):
746 if ifacer["name"] == interface_RO.get("internal_name"):
747 ifacer["ip-address"] = interface_RO.get(
748 "ip_address"
749 )
750 ifacer["mac-address"] = interface_RO.get(
751 "mac_address"
752 )
753 break
754 else:
755 raise LcmException(
756 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
757 "from VIM info".format(
758 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
759 )
760 )
761 vnfr_update["vdur.{}".format(vdu_index)] = vdur
762 break
763 else:
764 raise LcmException(
765 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
766 "VIM info".format(
767 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
768 )
769 )
770
771 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
772 for net_RO in get_iterable(nsr_desc_RO, "nets"):
773 if vld["id"] != net_RO.get("vnf_net_osm_id"):
774 continue
775 vld["vim-id"] = net_RO.get("vim_net_id")
776 vld["name"] = net_RO.get("vim_name")
777 vld["status"] = net_RO.get("status")
778 vld["status-detailed"] = net_RO.get("error_msg")
779 vnfr_update["vld.{}".format(vld_index)] = vld
780 break
781 else:
782 raise LcmException(
783 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
784 vnf_index, vld["id"]
785 )
786 )
787
788 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
789 break
790
791 else:
792 raise LcmException(
793 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
794 vnf_index
795 )
796 )
797
798 def _get_ns_config_info(self, nsr_id):
799 """
800 Generates a mapping between vnf,vdu elements and the N2VC id
801 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
802 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
803 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
804 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
805 """
806 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
807 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
808 mapping = {}
809 ns_config_info = {"osm-config-mapping": mapping}
810 for vca in vca_deployed_list:
811 if not vca["member-vnf-index"]:
812 continue
813 if not vca["vdu_id"]:
814 mapping[vca["member-vnf-index"]] = vca["application"]
815 else:
816 mapping[
817 "{}.{}.{}".format(
818 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
819 )
820 ] = vca["application"]
821 return ns_config_info
822
823 async def _instantiate_ng_ro(
824 self,
825 logging_text,
826 nsr_id,
827 nsd,
828 db_nsr,
829 db_nslcmop,
830 db_vnfrs,
831 db_vnfds,
832 n2vc_key_list,
833 stage,
834 start_deploy,
835 timeout_ns_deploy,
836 ):
837
838 db_vims = {}
839
840 def get_vim_account(vim_account_id):
841 nonlocal db_vims
842 if vim_account_id in db_vims:
843 return db_vims[vim_account_id]
844 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
845 db_vims[vim_account_id] = db_vim
846 return db_vim
847
848 # modify target_vld info with instantiation parameters
849 def parse_vld_instantiation_params(
850 target_vim, target_vld, vld_params, target_sdn
851 ):
852 if vld_params.get("ip-profile"):
853 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
854 "ip-profile"
855 ]
856 if vld_params.get("provider-network"):
857 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
858 "provider-network"
859 ]
860 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
861 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
862 "provider-network"
863 ]["sdn-ports"]
864 if vld_params.get("wimAccountId"):
865 target_wim = "wim:{}".format(vld_params["wimAccountId"])
866 target_vld["vim_info"][target_wim] = {}
867 for param in ("vim-network-name", "vim-network-id"):
868 if vld_params.get(param):
869 if isinstance(vld_params[param], dict):
870 for vim, vim_net in vld_params[param].items():
871 other_target_vim = "vim:" + vim
872 populate_dict(
873 target_vld["vim_info"],
874 (other_target_vim, param.replace("-", "_")),
875 vim_net,
876 )
877 else: # isinstance str
878 target_vld["vim_info"][target_vim][
879 param.replace("-", "_")
880 ] = vld_params[param]
881 if vld_params.get("common_id"):
882 target_vld["common_id"] = vld_params.get("common_id")
883
884 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
885 def update_ns_vld_target(target, ns_params):
886 for vnf_params in ns_params.get("vnf", ()):
887 if vnf_params.get("vimAccountId"):
888 target_vnf = next(
889 (
890 vnfr
891 for vnfr in db_vnfrs.values()
892 if vnf_params["member-vnf-index"]
893 == vnfr["member-vnf-index-ref"]
894 ),
895 None,
896 )
897 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
898 for a_index, a_vld in enumerate(target["ns"]["vld"]):
899 target_vld = find_in_list(
900 get_iterable(vdur, "interfaces"),
901 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
902 )
903
904 vld_params = find_in_list(
905 get_iterable(ns_params, "vld"),
906 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
907 )
908 if target_vld:
909
910 if vnf_params.get("vimAccountId") not in a_vld.get(
911 "vim_info", {}
912 ):
913 target_vim_network_list = [
914 v for _, v in a_vld.get("vim_info").items()
915 ]
916 target_vim_network_name = next(
917 (
918 item.get("vim_network_name", "")
919 for item in target_vim_network_list
920 ),
921 "",
922 )
923
924 target["ns"]["vld"][a_index].get("vim_info").update(
925 {
926 "vim:{}".format(vnf_params["vimAccountId"]): {
927 "vim_network_name": target_vim_network_name,
928 }
929 }
930 )
931
932 if vld_params:
933 for param in ("vim-network-name", "vim-network-id"):
934 if vld_params.get(param) and isinstance(
935 vld_params[param], dict
936 ):
937 for vim, vim_net in vld_params[
938 param
939 ].items():
940 other_target_vim = "vim:" + vim
941 populate_dict(
942 target["ns"]["vld"][a_index].get(
943 "vim_info"
944 ),
945 (
946 other_target_vim,
947 param.replace("-", "_"),
948 ),
949 vim_net,
950 )
951
952 nslcmop_id = db_nslcmop["_id"]
953 target = {
954 "name": db_nsr["name"],
955 "ns": {"vld": []},
956 "vnf": [],
957 "image": deepcopy(db_nsr["image"]),
958 "flavor": deepcopy(db_nsr["flavor"]),
959 "action_id": nslcmop_id,
960 "cloud_init_content": {},
961 }
962 for image in target["image"]:
963 image["vim_info"] = {}
964 for flavor in target["flavor"]:
965 flavor["vim_info"] = {}
966 if db_nsr.get("affinity-or-anti-affinity-group"):
967 target["affinity-or-anti-affinity-group"] = deepcopy(
968 db_nsr["affinity-or-anti-affinity-group"]
969 )
970 for affinity_or_anti_affinity_group in target[
971 "affinity-or-anti-affinity-group"
972 ]:
973 affinity_or_anti_affinity_group["vim_info"] = {}
974
975 if db_nslcmop.get("lcmOperationType") != "instantiate":
976 # get parameters of instantiation:
977 db_nslcmop_instantiate = self.db.get_list(
978 "nslcmops",
979 {
980 "nsInstanceId": db_nslcmop["nsInstanceId"],
981 "lcmOperationType": "instantiate",
982 },
983 )[-1]
984 ns_params = db_nslcmop_instantiate.get("operationParams")
985 else:
986 ns_params = db_nslcmop.get("operationParams")
987 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
988 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
989
990 cp2target = {}
991 for vld_index, vld in enumerate(db_nsr.get("vld")):
992 target_vim = "vim:{}".format(ns_params["vimAccountId"])
993 target_vld = {
994 "id": vld["id"],
995 "name": vld["name"],
996 "mgmt-network": vld.get("mgmt-network", False),
997 "type": vld.get("type"),
998 "vim_info": {
999 target_vim: {
1000 "vim_network_name": vld.get("vim-network-name"),
1001 "vim_account_id": ns_params["vimAccountId"],
1002 }
1003 },
1004 }
1005 # check if this network needs SDN assist
1006 if vld.get("pci-interfaces"):
1007 db_vim = get_vim_account(ns_params["vimAccountId"])
1008 sdnc_id = db_vim["config"].get("sdn-controller")
1009 if sdnc_id:
1010 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1011 target_sdn = "sdn:{}".format(sdnc_id)
1012 target_vld["vim_info"][target_sdn] = {
1013 "sdn": True,
1014 "target_vim": target_vim,
1015 "vlds": [sdn_vld],
1016 "type": vld.get("type"),
1017 }
1018
1019 nsd_vnf_profiles = get_vnf_profiles(nsd)
1020 for nsd_vnf_profile in nsd_vnf_profiles:
1021 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1022 if cp["virtual-link-profile-id"] == vld["id"]:
1023 cp2target[
1024 "member_vnf:{}.{}".format(
1025 cp["constituent-cpd-id"][0][
1026 "constituent-base-element-id"
1027 ],
1028 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1029 )
1030 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1031
1032 # check at nsd descriptor, if there is an ip-profile
1033 vld_params = {}
1034 nsd_vlp = find_in_list(
1035 get_virtual_link_profiles(nsd),
1036 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1037 == vld["id"],
1038 )
1039 if (
1040 nsd_vlp
1041 and nsd_vlp.get("virtual-link-protocol-data")
1042 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1043 ):
1044 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1045 "l3-protocol-data"
1046 ]
1047 ip_profile_dest_data = {}
1048 if "ip-version" in ip_profile_source_data:
1049 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1050 "ip-version"
1051 ]
1052 if "cidr" in ip_profile_source_data:
1053 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1054 "cidr"
1055 ]
1056 if "gateway-ip" in ip_profile_source_data:
1057 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1058 "gateway-ip"
1059 ]
1060 if "dhcp-enabled" in ip_profile_source_data:
1061 ip_profile_dest_data["dhcp-params"] = {
1062 "enabled": ip_profile_source_data["dhcp-enabled"]
1063 }
1064 vld_params["ip-profile"] = ip_profile_dest_data
1065
1066 # update vld_params with instantiation params
1067 vld_instantiation_params = find_in_list(
1068 get_iterable(ns_params, "vld"),
1069 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1070 )
1071 if vld_instantiation_params:
1072 vld_params.update(vld_instantiation_params)
1073 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1074 target["ns"]["vld"].append(target_vld)
1075 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1076 update_ns_vld_target(target, ns_params)
1077
1078 for vnfr in db_vnfrs.values():
1079 vnfd = find_in_list(
1080 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1081 )
1082 vnf_params = find_in_list(
1083 get_iterable(ns_params, "vnf"),
1084 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1085 )
1086 target_vnf = deepcopy(vnfr)
1087 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1088 for vld in target_vnf.get("vld", ()):
1089 # check if connected to a ns.vld, to fill target'
1090 vnf_cp = find_in_list(
1091 vnfd.get("int-virtual-link-desc", ()),
1092 lambda cpd: cpd.get("id") == vld["id"],
1093 )
1094 if vnf_cp:
1095 ns_cp = "member_vnf:{}.{}".format(
1096 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1097 )
1098 if cp2target.get(ns_cp):
1099 vld["target"] = cp2target[ns_cp]
1100
1101 vld["vim_info"] = {
1102 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1103 }
1104 # check if this network needs SDN assist
1105 target_sdn = None
1106 if vld.get("pci-interfaces"):
1107 db_vim = get_vim_account(vnfr["vim-account-id"])
1108 sdnc_id = db_vim["config"].get("sdn-controller")
1109 if sdnc_id:
1110 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1111 target_sdn = "sdn:{}".format(sdnc_id)
1112 vld["vim_info"][target_sdn] = {
1113 "sdn": True,
1114 "target_vim": target_vim,
1115 "vlds": [sdn_vld],
1116 "type": vld.get("type"),
1117 }
1118
1119 # check at vnfd descriptor, if there is an ip-profile
1120 vld_params = {}
1121 vnfd_vlp = find_in_list(
1122 get_virtual_link_profiles(vnfd),
1123 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1124 )
1125 if (
1126 vnfd_vlp
1127 and vnfd_vlp.get("virtual-link-protocol-data")
1128 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1129 ):
1130 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1131 "l3-protocol-data"
1132 ]
1133 ip_profile_dest_data = {}
1134 if "ip-version" in ip_profile_source_data:
1135 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1136 "ip-version"
1137 ]
1138 if "cidr" in ip_profile_source_data:
1139 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1140 "cidr"
1141 ]
1142 if "gateway-ip" in ip_profile_source_data:
1143 ip_profile_dest_data[
1144 "gateway-address"
1145 ] = ip_profile_source_data["gateway-ip"]
1146 if "dhcp-enabled" in ip_profile_source_data:
1147 ip_profile_dest_data["dhcp-params"] = {
1148 "enabled": ip_profile_source_data["dhcp-enabled"]
1149 }
1150
1151 vld_params["ip-profile"] = ip_profile_dest_data
1152 # update vld_params with instantiation params
1153 if vnf_params:
1154 vld_instantiation_params = find_in_list(
1155 get_iterable(vnf_params, "internal-vld"),
1156 lambda i_vld: i_vld["name"] == vld["id"],
1157 )
1158 if vld_instantiation_params:
1159 vld_params.update(vld_instantiation_params)
1160 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1161
1162 vdur_list = []
1163 for vdur in target_vnf.get("vdur", ()):
1164 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1165 continue # This vdu must not be created
1166 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1167
1168 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1169
1170 if ssh_keys_all:
1171 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1172 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1173 if (
1174 vdu_configuration
1175 and vdu_configuration.get("config-access")
1176 and vdu_configuration.get("config-access").get("ssh-access")
1177 ):
1178 vdur["ssh-keys"] = ssh_keys_all
1179 vdur["ssh-access-required"] = vdu_configuration[
1180 "config-access"
1181 ]["ssh-access"]["required"]
1182 elif (
1183 vnf_configuration
1184 and vnf_configuration.get("config-access")
1185 and vnf_configuration.get("config-access").get("ssh-access")
1186 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1187 ):
1188 vdur["ssh-keys"] = ssh_keys_all
1189 vdur["ssh-access-required"] = vnf_configuration[
1190 "config-access"
1191 ]["ssh-access"]["required"]
1192 elif ssh_keys_instantiation and find_in_list(
1193 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1194 ):
1195 vdur["ssh-keys"] = ssh_keys_instantiation
1196
1197 self.logger.debug("NS > vdur > {}".format(vdur))
1198
1199 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1200 # cloud-init
1201 if vdud.get("cloud-init-file"):
1202 vdur["cloud-init"] = "{}:file:{}".format(
1203 vnfd["_id"], vdud.get("cloud-init-file")
1204 )
1205 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1206 if vdur["cloud-init"] not in target["cloud_init_content"]:
1207 base_folder = vnfd["_admin"]["storage"]
1208 if base_folder["pkg-dir"]:
1209 cloud_init_file = "{}/{}/cloud_init/{}".format(
1210 base_folder["folder"],
1211 base_folder["pkg-dir"],
1212 vdud.get("cloud-init-file"),
1213 )
1214 else:
1215 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1216 base_folder["folder"],
1217 vdud.get("cloud-init-file"),
1218 )
1219 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1220 target["cloud_init_content"][
1221 vdur["cloud-init"]
1222 ] = ci_file.read()
1223 elif vdud.get("cloud-init"):
1224 vdur["cloud-init"] = "{}:vdu:{}".format(
1225 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1226 )
1227 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1228 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1229 "cloud-init"
1230 ]
1231 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1232 deploy_params_vdu = self._format_additional_params(
1233 vdur.get("additionalParams") or {}
1234 )
1235 deploy_params_vdu["OSM"] = get_osm_params(
1236 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1237 )
1238 vdur["additionalParams"] = deploy_params_vdu
1239
1240 # flavor
1241 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1242 if target_vim not in ns_flavor["vim_info"]:
1243 ns_flavor["vim_info"][target_vim] = {}
1244
1245 # deal with images
1246 # in case alternative images are provided we must check if they should be applied
1247 # for the vim_type, modify the vim_type taking into account
1248 ns_image_id = int(vdur["ns-image-id"])
1249 if vdur.get("alt-image-ids"):
1250 db_vim = get_vim_account(vnfr["vim-account-id"])
1251 vim_type = db_vim["vim_type"]
1252 for alt_image_id in vdur.get("alt-image-ids"):
1253 ns_alt_image = target["image"][int(alt_image_id)]
1254 if vim_type == ns_alt_image.get("vim-type"):
1255 # must use alternative image
1256 self.logger.debug(
1257 "use alternative image id: {}".format(alt_image_id)
1258 )
1259 ns_image_id = alt_image_id
1260 vdur["ns-image-id"] = ns_image_id
1261 break
1262 ns_image = target["image"][int(ns_image_id)]
1263 if target_vim not in ns_image["vim_info"]:
1264 ns_image["vim_info"][target_vim] = {}
1265
1266 # Affinity groups
1267 if vdur.get("affinity-or-anti-affinity-group-id"):
1268 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1269 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1270 if target_vim not in ns_ags["vim_info"]:
1271 ns_ags["vim_info"][target_vim] = {}
1272
1273 vdur["vim_info"] = {target_vim: {}}
1274 # instantiation parameters
1275 if vnf_params:
1276 vdu_instantiation_params = find_in_list(
1277 get_iterable(vnf_params, "vdu"),
1278 lambda i_vdu: i_vdu["id"] == vdud["id"],
1279 )
1280 if vdu_instantiation_params:
1281 # Parse the vdu_volumes from the instantiation params
1282 vdu_volumes = get_volumes_from_instantiation_params(
1283 vdu_instantiation_params, vdud
1284 )
1285 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1286 vdur_list.append(vdur)
1287 target_vnf["vdur"] = vdur_list
1288 target["vnf"].append(target_vnf)
1289
1290 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1291 desc = await self.RO.deploy(nsr_id, target)
1292 self.logger.debug("RO return > {}".format(desc))
1293 action_id = desc["action_id"]
1294 await self._wait_ng_ro(
1295 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1296 operation="instantiation"
1297 )
1298
1299 # Updating NSR
1300 db_nsr_update = {
1301 "_admin.deployed.RO.operational-status": "running",
1302 "detailed-status": " ".join(stage),
1303 }
1304 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1305 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1306 self._write_op_status(nslcmop_id, stage)
1307 self.logger.debug(
1308 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1309 )
1310 return
1311
1312 async def _wait_ng_ro(
1313 self,
1314 nsr_id,
1315 action_id,
1316 nslcmop_id=None,
1317 start_time=None,
1318 timeout=600,
1319 stage=None,
1320 operation=None,
1321 ):
1322 detailed_status_old = None
1323 db_nsr_update = {}
1324 start_time = start_time or time()
1325 while time() <= start_time + timeout:
1326 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1327 self.logger.debug("Wait NG RO > {}".format(desc_status))
1328 if desc_status["status"] == "FAILED":
1329 raise NgRoException(desc_status["details"])
1330 elif desc_status["status"] == "BUILD":
1331 if stage:
1332 stage[2] = "VIM: ({})".format(desc_status["details"])
1333 elif desc_status["status"] == "DONE":
1334 if stage:
1335 stage[2] = "Deployed at VIM"
1336 break
1337 else:
1338 assert False, "ROclient.check_ns_status returns unknown {}".format(
1339 desc_status["status"]
1340 )
1341 if stage and nslcmop_id and stage[2] != detailed_status_old:
1342 detailed_status_old = stage[2]
1343 db_nsr_update["detailed-status"] = " ".join(stage)
1344 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1345 self._write_op_status(nslcmop_id, stage)
1346 await asyncio.sleep(15, loop=self.loop)
1347 else: # timeout_ns_deploy
1348 raise NgRoException("Timeout waiting ns to deploy")
1349
1350 async def _terminate_ng_ro(
1351 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1352 ):
1353 db_nsr_update = {}
1354 failed_detail = []
1355 action_id = None
1356 start_deploy = time()
1357 try:
1358 target = {
1359 "ns": {"vld": []},
1360 "vnf": [],
1361 "image": [],
1362 "flavor": [],
1363 "action_id": nslcmop_id,
1364 }
1365 desc = await self.RO.deploy(nsr_id, target)
1366 action_id = desc["action_id"]
1367 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1368 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1369 self.logger.debug(
1370 logging_text
1371 + "ns terminate action at RO. action_id={}".format(action_id)
1372 )
1373
1374 # wait until done
1375 delete_timeout = 20 * 60 # 20 minutes
1376 await self._wait_ng_ro(
1377 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1378 operation="termination"
1379 )
1380
1381 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1382 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1383 # delete all nsr
1384 await self.RO.delete(nsr_id)
1385 except Exception as e:
1386 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1387 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1388 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1389 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1390 self.logger.debug(
1391 logging_text + "RO_action_id={} already deleted".format(action_id)
1392 )
1393 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1394 failed_detail.append("delete conflict: {}".format(e))
1395 self.logger.debug(
1396 logging_text
1397 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1398 )
1399 else:
1400 failed_detail.append("delete error: {}".format(e))
1401 self.logger.error(
1402 logging_text
1403 + "RO_action_id={} delete error: {}".format(action_id, e)
1404 )
1405
1406 if failed_detail:
1407 stage[2] = "Error deleting from VIM"
1408 else:
1409 stage[2] = "Deleted from VIM"
1410 db_nsr_update["detailed-status"] = " ".join(stage)
1411 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1412 self._write_op_status(nslcmop_id, stage)
1413
1414 if failed_detail:
1415 raise LcmException("; ".join(failed_detail))
1416 return
1417
1418 async def instantiate_RO(
1419 self,
1420 logging_text,
1421 nsr_id,
1422 nsd,
1423 db_nsr,
1424 db_nslcmop,
1425 db_vnfrs,
1426 db_vnfds,
1427 n2vc_key_list,
1428 stage,
1429 ):
1430 """
1431 Instantiate at RO
1432 :param logging_text: preffix text to use at logging
1433 :param nsr_id: nsr identity
1434 :param nsd: database content of ns descriptor
1435 :param db_nsr: database content of ns record
1436 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1437 :param db_vnfrs:
1438 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1439 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1440 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1441 :return: None or exception
1442 """
1443 try:
1444 start_deploy = time()
1445 ns_params = db_nslcmop.get("operationParams")
1446 if ns_params and ns_params.get("timeout_ns_deploy"):
1447 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1448 else:
1449 timeout_ns_deploy = self.timeout.get(
1450 "ns_deploy", self.timeout_ns_deploy
1451 )
1452
1453 # Check for and optionally request placement optimization. Database will be updated if placement activated
1454 stage[2] = "Waiting for Placement."
1455 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1456 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1457 for vnfr in db_vnfrs.values():
1458 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1459 break
1460 else:
1461 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1462
1463 return await self._instantiate_ng_ro(
1464 logging_text,
1465 nsr_id,
1466 nsd,
1467 db_nsr,
1468 db_nslcmop,
1469 db_vnfrs,
1470 db_vnfds,
1471 n2vc_key_list,
1472 stage,
1473 start_deploy,
1474 timeout_ns_deploy,
1475 )
1476 except Exception as e:
1477 stage[2] = "ERROR deploying at VIM"
1478 self.set_vnfr_at_error(db_vnfrs, str(e))
1479 self.logger.error(
1480 "Error deploying at VIM {}".format(e),
1481 exc_info=not isinstance(
1482 e,
1483 (
1484 ROclient.ROClientException,
1485 LcmException,
1486 DbException,
1487 NgRoException,
1488 ),
1489 ),
1490 )
1491 raise
1492
1493 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1494 """
1495 Wait for kdu to be up, get ip address
1496 :param logging_text: prefix use for logging
1497 :param nsr_id:
1498 :param vnfr_id:
1499 :param kdu_name:
1500 :return: IP address, K8s services
1501 """
1502
1503 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1504 nb_tries = 0
1505
1506 while nb_tries < 360:
1507 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1508 kdur = next(
1509 (
1510 x
1511 for x in get_iterable(db_vnfr, "kdur")
1512 if x.get("kdu-name") == kdu_name
1513 ),
1514 None,
1515 )
1516 if not kdur:
1517 raise LcmException(
1518 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1519 )
1520 if kdur.get("status"):
1521 if kdur["status"] in ("READY", "ENABLED"):
1522 return kdur.get("ip-address"), kdur.get("services")
1523 else:
1524 raise LcmException(
1525 "target KDU={} is in error state".format(kdu_name)
1526 )
1527
1528 await asyncio.sleep(10, loop=self.loop)
1529 nb_tries += 1
1530 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1531
1532 async def wait_vm_up_insert_key_ro(
1533 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1534 ):
1535 """
1536 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1537 :param logging_text: prefix use for logging
1538 :param nsr_id:
1539 :param vnfr_id:
1540 :param vdu_id:
1541 :param vdu_index:
1542 :param pub_key: public ssh key to inject, None to skip
1543 :param user: user to apply the public ssh key
1544 :return: IP address
1545 """
1546
1547 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1548 ro_nsr_id = None
1549 ip_address = None
1550 nb_tries = 0
1551 target_vdu_id = None
1552 ro_retries = 0
1553
1554 while True:
1555
1556 ro_retries += 1
1557 if ro_retries >= 360: # 1 hour
1558 raise LcmException(
1559 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1560 )
1561
1562 await asyncio.sleep(10, loop=self.loop)
1563
1564 # get ip address
1565 if not target_vdu_id:
1566 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1567
1568 if not vdu_id: # for the VNF case
1569 if db_vnfr.get("status") == "ERROR":
1570 raise LcmException(
1571 "Cannot inject ssh-key because target VNF is in error state"
1572 )
1573 ip_address = db_vnfr.get("ip-address")
1574 if not ip_address:
1575 continue
1576 vdur = next(
1577 (
1578 x
1579 for x in get_iterable(db_vnfr, "vdur")
1580 if x.get("ip-address") == ip_address
1581 ),
1582 None,
1583 )
1584 else: # VDU case
1585 vdur = next(
1586 (
1587 x
1588 for x in get_iterable(db_vnfr, "vdur")
1589 if x.get("vdu-id-ref") == vdu_id
1590 and x.get("count-index") == vdu_index
1591 ),
1592 None,
1593 )
1594
1595 if (
1596 not vdur and len(db_vnfr.get("vdur", ())) == 1
1597 ): # If only one, this should be the target vdu
1598 vdur = db_vnfr["vdur"][0]
1599 if not vdur:
1600 raise LcmException(
1601 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1602 vnfr_id, vdu_id, vdu_index
1603 )
1604 )
1605 # New generation RO stores information at "vim_info"
1606 ng_ro_status = None
1607 target_vim = None
1608 if vdur.get("vim_info"):
1609 target_vim = next(
1610 t for t in vdur["vim_info"]
1611 ) # there should be only one key
1612 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1613 if (
1614 vdur.get("pdu-type")
1615 or vdur.get("status") == "ACTIVE"
1616 or ng_ro_status == "ACTIVE"
1617 ):
1618 ip_address = vdur.get("ip-address")
1619 if not ip_address:
1620 continue
1621 target_vdu_id = vdur["vdu-id-ref"]
1622 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1623 raise LcmException(
1624 "Cannot inject ssh-key because target VM is in error state"
1625 )
1626
1627 if not target_vdu_id:
1628 continue
1629
1630 # inject public key into machine
1631 if pub_key and user:
1632 self.logger.debug(logging_text + "Inserting RO key")
1633 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1634 if vdur.get("pdu-type"):
1635 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1636 return ip_address
1637 try:
1638 ro_vm_id = "{}-{}".format(
1639 db_vnfr["member-vnf-index-ref"], target_vdu_id
1640 ) # TODO add vdu_index
1641 if self.ng_ro:
1642 target = {
1643 "action": {
1644 "action": "inject_ssh_key",
1645 "key": pub_key,
1646 "user": user,
1647 },
1648 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1649 }
1650 desc = await self.RO.deploy(nsr_id, target)
1651 action_id = desc["action_id"]
1652 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
1653 break
1654 else:
1655 # wait until NS is deployed at RO
1656 if not ro_nsr_id:
1657 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1658 ro_nsr_id = deep_get(
1659 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1660 )
1661 if not ro_nsr_id:
1662 continue
1663 result_dict = await self.RO.create_action(
1664 item="ns",
1665 item_id_name=ro_nsr_id,
1666 descriptor={
1667 "add_public_key": pub_key,
1668 "vms": [ro_vm_id],
1669 "user": user,
1670 },
1671 )
1672 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1673 if not result_dict or not isinstance(result_dict, dict):
1674 raise LcmException(
1675 "Unknown response from RO when injecting key"
1676 )
1677 for result in result_dict.values():
1678 if result.get("vim_result") == 200:
1679 break
1680 else:
1681 raise ROclient.ROClientException(
1682 "error injecting key: {}".format(
1683 result.get("description")
1684 )
1685 )
1686 break
1687 except NgRoException as e:
1688 raise LcmException(
1689 "Reaching max tries injecting key. Error: {}".format(e)
1690 )
1691 except ROclient.ROClientException as e:
1692 if not nb_tries:
1693 self.logger.debug(
1694 logging_text
1695 + "error injecting key: {}. Retrying until {} seconds".format(
1696 e, 20 * 10
1697 )
1698 )
1699 nb_tries += 1
1700 if nb_tries >= 20:
1701 raise LcmException(
1702 "Reaching max tries injecting key. Error: {}".format(e)
1703 )
1704 else:
1705 break
1706
1707 return ip_address
1708
1709 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1710 """
1711 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1712 """
1713 my_vca = vca_deployed_list[vca_index]
1714 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1715 # vdu or kdu: no dependencies
1716 return
1717 timeout = 300
1718 while timeout >= 0:
1719 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1720 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1721 configuration_status_list = db_nsr["configurationStatus"]
1722 for index, vca_deployed in enumerate(configuration_status_list):
1723 if index == vca_index:
1724 # myself
1725 continue
1726 if not my_vca.get("member-vnf-index") or (
1727 vca_deployed.get("member-vnf-index")
1728 == my_vca.get("member-vnf-index")
1729 ):
1730 internal_status = configuration_status_list[index].get("status")
1731 if internal_status == "READY":
1732 continue
1733 elif internal_status == "BROKEN":
1734 raise LcmException(
1735 "Configuration aborted because dependent charm/s has failed"
1736 )
1737 else:
1738 break
1739 else:
1740 # no dependencies, return
1741 return
1742 await asyncio.sleep(10)
1743 timeout -= 1
1744
1745 raise LcmException("Configuration aborted because dependent charm/s timeout")
1746
1747 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1748 vca_id = None
1749 if db_vnfr:
1750 vca_id = deep_get(db_vnfr, ("vca-id",))
1751 elif db_nsr:
1752 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1753 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1754 return vca_id
1755
1756 async def instantiate_N2VC(
1757 self,
1758 logging_text,
1759 vca_index,
1760 nsi_id,
1761 db_nsr,
1762 db_vnfr,
1763 vdu_id,
1764 kdu_name,
1765 vdu_index,
1766 config_descriptor,
1767 deploy_params,
1768 base_folder,
1769 nslcmop_id,
1770 stage,
1771 vca_type,
1772 vca_name,
1773 ee_config_descriptor,
1774 ):
1775 nsr_id = db_nsr["_id"]
1776 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1777 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1778 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1779 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1780 db_dict = {
1781 "collection": "nsrs",
1782 "filter": {"_id": nsr_id},
1783 "path": db_update_entry,
1784 }
1785 step = ""
1786 try:
1787
1788 element_type = "NS"
1789 element_under_configuration = nsr_id
1790
1791 vnfr_id = None
1792 if db_vnfr:
1793 vnfr_id = db_vnfr["_id"]
1794 osm_config["osm"]["vnf_id"] = vnfr_id
1795
1796 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1797
1798 if vca_type == "native_charm":
1799 index_number = 0
1800 else:
1801 index_number = vdu_index or 0
1802
1803 if vnfr_id:
1804 element_type = "VNF"
1805 element_under_configuration = vnfr_id
1806 namespace += ".{}-{}".format(vnfr_id, index_number)
1807 if vdu_id:
1808 namespace += ".{}-{}".format(vdu_id, index_number)
1809 element_type = "VDU"
1810 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1811 osm_config["osm"]["vdu_id"] = vdu_id
1812 elif kdu_name:
1813 namespace += ".{}".format(kdu_name)
1814 element_type = "KDU"
1815 element_under_configuration = kdu_name
1816 osm_config["osm"]["kdu_name"] = kdu_name
1817
1818 # Get artifact path
1819 if base_folder["pkg-dir"]:
1820 artifact_path = "{}/{}/{}/{}".format(
1821 base_folder["folder"],
1822 base_folder["pkg-dir"],
1823 "charms"
1824 if vca_type
1825 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1826 else "helm-charts",
1827 vca_name,
1828 )
1829 else:
1830 artifact_path = "{}/Scripts/{}/{}/".format(
1831 base_folder["folder"],
1832 "charms"
1833 if vca_type
1834 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1835 else "helm-charts",
1836 vca_name,
1837 )
1838
1839 self.logger.debug("Artifact path > {}".format(artifact_path))
1840
1841 # get initial_config_primitive_list that applies to this element
1842 initial_config_primitive_list = config_descriptor.get(
1843 "initial-config-primitive"
1844 )
1845
1846 self.logger.debug(
1847 "Initial config primitive list > {}".format(
1848 initial_config_primitive_list
1849 )
1850 )
1851
1852 # add config if not present for NS charm
1853 ee_descriptor_id = ee_config_descriptor.get("id")
1854 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1855 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1856 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1857 )
1858
1859 self.logger.debug(
1860 "Initial config primitive list #2 > {}".format(
1861 initial_config_primitive_list
1862 )
1863 )
1864 # n2vc_redesign STEP 3.1
1865 # find old ee_id if exists
1866 ee_id = vca_deployed.get("ee_id")
1867
1868 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1869 # create or register execution environment in VCA
1870 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1871
1872 self._write_configuration_status(
1873 nsr_id=nsr_id,
1874 vca_index=vca_index,
1875 status="CREATING",
1876 element_under_configuration=element_under_configuration,
1877 element_type=element_type,
1878 )
1879
1880 step = "create execution environment"
1881 self.logger.debug(logging_text + step)
1882
1883 ee_id = None
1884 credentials = None
1885 if vca_type == "k8s_proxy_charm":
1886 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1887 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1888 namespace=namespace,
1889 artifact_path=artifact_path,
1890 db_dict=db_dict,
1891 vca_id=vca_id,
1892 )
1893 elif vca_type == "helm" or vca_type == "helm-v3":
1894 ee_id, credentials = await self.vca_map[
1895 vca_type
1896 ].create_execution_environment(
1897 namespace=namespace,
1898 reuse_ee_id=ee_id,
1899 db_dict=db_dict,
1900 config=osm_config,
1901 artifact_path=artifact_path,
1902 vca_type=vca_type,
1903 )
1904 else:
1905 ee_id, credentials = await self.vca_map[
1906 vca_type
1907 ].create_execution_environment(
1908 namespace=namespace,
1909 reuse_ee_id=ee_id,
1910 db_dict=db_dict,
1911 vca_id=vca_id,
1912 )
1913
1914 elif vca_type == "native_charm":
1915 step = "Waiting to VM being up and getting IP address"
1916 self.logger.debug(logging_text + step)
1917 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1918 logging_text,
1919 nsr_id,
1920 vnfr_id,
1921 vdu_id,
1922 vdu_index,
1923 user=None,
1924 pub_key=None,
1925 )
1926 credentials = {"hostname": rw_mgmt_ip}
1927 # get username
1928 username = deep_get(
1929 config_descriptor, ("config-access", "ssh-access", "default-user")
1930 )
1931 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1932 # merged. Meanwhile let's get username from initial-config-primitive
1933 if not username and initial_config_primitive_list:
1934 for config_primitive in initial_config_primitive_list:
1935 for param in config_primitive.get("parameter", ()):
1936 if param["name"] == "ssh-username":
1937 username = param["value"]
1938 break
1939 if not username:
1940 raise LcmException(
1941 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1942 "'config-access.ssh-access.default-user'"
1943 )
1944 credentials["username"] = username
1945 # n2vc_redesign STEP 3.2
1946
1947 self._write_configuration_status(
1948 nsr_id=nsr_id,
1949 vca_index=vca_index,
1950 status="REGISTERING",
1951 element_under_configuration=element_under_configuration,
1952 element_type=element_type,
1953 )
1954
1955 step = "register execution environment {}".format(credentials)
1956 self.logger.debug(logging_text + step)
1957 ee_id = await self.vca_map[vca_type].register_execution_environment(
1958 credentials=credentials,
1959 namespace=namespace,
1960 db_dict=db_dict,
1961 vca_id=vca_id,
1962 )
1963
1964 # for compatibility with MON/POL modules, the need model and application name at database
1965 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1966 ee_id_parts = ee_id.split(".")
1967 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1968 if len(ee_id_parts) >= 2:
1969 model_name = ee_id_parts[0]
1970 application_name = ee_id_parts[1]
1971 db_nsr_update[db_update_entry + "model"] = model_name
1972 db_nsr_update[db_update_entry + "application"] = application_name
1973
1974 # n2vc_redesign STEP 3.3
1975 step = "Install configuration Software"
1976
1977 self._write_configuration_status(
1978 nsr_id=nsr_id,
1979 vca_index=vca_index,
1980 status="INSTALLING SW",
1981 element_under_configuration=element_under_configuration,
1982 element_type=element_type,
1983 other_update=db_nsr_update,
1984 )
1985
1986 # TODO check if already done
1987 self.logger.debug(logging_text + step)
1988 config = None
1989 if vca_type == "native_charm":
1990 config_primitive = next(
1991 (p for p in initial_config_primitive_list if p["name"] == "config"),
1992 None,
1993 )
1994 if config_primitive:
1995 config = self._map_primitive_params(
1996 config_primitive, {}, deploy_params
1997 )
1998 num_units = 1
1999 if vca_type == "lxc_proxy_charm":
2000 if element_type == "NS":
2001 num_units = db_nsr.get("config-units") or 1
2002 elif element_type == "VNF":
2003 num_units = db_vnfr.get("config-units") or 1
2004 elif element_type == "VDU":
2005 for v in db_vnfr["vdur"]:
2006 if vdu_id == v["vdu-id-ref"]:
2007 num_units = v.get("config-units") or 1
2008 break
2009 if vca_type != "k8s_proxy_charm":
2010 await self.vca_map[vca_type].install_configuration_sw(
2011 ee_id=ee_id,
2012 artifact_path=artifact_path,
2013 db_dict=db_dict,
2014 config=config,
2015 num_units=num_units,
2016 vca_id=vca_id,
2017 vca_type=vca_type,
2018 )
2019
2020 # write in db flag of configuration_sw already installed
2021 self.update_db_2(
2022 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2023 )
2024
2025 # add relations for this VCA (wait for other peers related with this VCA)
2026 await self._add_vca_relations(
2027 logging_text=logging_text,
2028 nsr_id=nsr_id,
2029 vca_type=vca_type,
2030 vca_index=vca_index,
2031 )
2032
2033 # if SSH access is required, then get execution environment SSH public
2034 # if native charm we have waited already to VM be UP
2035 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2036 pub_key = None
2037 user = None
2038 # self.logger.debug("get ssh key block")
2039 if deep_get(
2040 config_descriptor, ("config-access", "ssh-access", "required")
2041 ):
2042 # self.logger.debug("ssh key needed")
2043 # Needed to inject a ssh key
2044 user = deep_get(
2045 config_descriptor,
2046 ("config-access", "ssh-access", "default-user"),
2047 )
2048 step = "Install configuration Software, getting public ssh key"
2049 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2050 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2051 )
2052
2053 step = "Insert public key into VM user={} ssh_key={}".format(
2054 user, pub_key
2055 )
2056 else:
2057 # self.logger.debug("no need to get ssh key")
2058 step = "Waiting to VM being up and getting IP address"
2059 self.logger.debug(logging_text + step)
2060
2061 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2062 rw_mgmt_ip = None
2063
2064 # n2vc_redesign STEP 5.1
2065 # wait for RO (ip-address) Insert pub_key into VM
2066 if vnfr_id:
2067 if kdu_name:
2068 rw_mgmt_ip, services = await self.wait_kdu_up(
2069 logging_text, nsr_id, vnfr_id, kdu_name
2070 )
2071 vnfd = self.db.get_one(
2072 "vnfds_revisions",
2073 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2074 )
2075 kdu = get_kdu(vnfd, kdu_name)
2076 kdu_services = [
2077 service["name"] for service in get_kdu_services(kdu)
2078 ]
2079 exposed_services = []
2080 for service in services:
2081 if any(s in service["name"] for s in kdu_services):
2082 exposed_services.append(service)
2083 await self.vca_map[vca_type].exec_primitive(
2084 ee_id=ee_id,
2085 primitive_name="config",
2086 params_dict={
2087 "osm-config": json.dumps(
2088 OsmConfigBuilder(
2089 k8s={"services": exposed_services}
2090 ).build()
2091 )
2092 },
2093 vca_id=vca_id,
2094 )
2095
2096 # This verification is needed in order to avoid trying to add a public key
2097 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2098 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2099 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2100 # or it is a KNF)
2101 elif db_vnfr.get('vdur'):
2102 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2103 logging_text,
2104 nsr_id,
2105 vnfr_id,
2106 vdu_id,
2107 vdu_index,
2108 user=user,
2109 pub_key=pub_key,
2110 )
2111
2112 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2113
2114 # store rw_mgmt_ip in deploy params for later replacement
2115 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2116
2117 # n2vc_redesign STEP 6 Execute initial config primitive
2118 step = "execute initial config primitive"
2119
2120 # wait for dependent primitives execution (NS -> VNF -> VDU)
2121 if initial_config_primitive_list:
2122 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2123
2124 # stage, in function of element type: vdu, kdu, vnf or ns
2125 my_vca = vca_deployed_list[vca_index]
2126 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2127 # VDU or KDU
2128 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2129 elif my_vca.get("member-vnf-index"):
2130 # VNF
2131 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2132 else:
2133 # NS
2134 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2135
2136 self._write_configuration_status(
2137 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2138 )
2139
2140 self._write_op_status(op_id=nslcmop_id, stage=stage)
2141
2142 check_if_terminated_needed = True
2143 for initial_config_primitive in initial_config_primitive_list:
2144 # adding information on the vca_deployed if it is a NS execution environment
2145 if not vca_deployed["member-vnf-index"]:
2146 deploy_params["ns_config_info"] = json.dumps(
2147 self._get_ns_config_info(nsr_id)
2148 )
2149 # TODO check if already done
2150 primitive_params_ = self._map_primitive_params(
2151 initial_config_primitive, {}, deploy_params
2152 )
2153
2154 step = "execute primitive '{}' params '{}'".format(
2155 initial_config_primitive["name"], primitive_params_
2156 )
2157 self.logger.debug(logging_text + step)
2158 await self.vca_map[vca_type].exec_primitive(
2159 ee_id=ee_id,
2160 primitive_name=initial_config_primitive["name"],
2161 params_dict=primitive_params_,
2162 db_dict=db_dict,
2163 vca_id=vca_id,
2164 vca_type=vca_type,
2165 )
2166 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2167 if check_if_terminated_needed:
2168 if config_descriptor.get("terminate-config-primitive"):
2169 self.update_db_2(
2170 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2171 )
2172 check_if_terminated_needed = False
2173
2174 # TODO register in database that primitive is done
2175
2176 # STEP 7 Configure metrics
2177 if vca_type == "helm" or vca_type == "helm-v3":
2178 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2179 ee_id=ee_id,
2180 artifact_path=artifact_path,
2181 ee_config_descriptor=ee_config_descriptor,
2182 vnfr_id=vnfr_id,
2183 nsr_id=nsr_id,
2184 target_ip=rw_mgmt_ip,
2185 )
2186 if prometheus_jobs:
2187 self.update_db_2(
2188 "nsrs",
2189 nsr_id,
2190 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2191 )
2192
2193 for job in prometheus_jobs:
2194 self.db.set_one(
2195 "prometheus_jobs",
2196 {"job_name": job["job_name"]},
2197 job,
2198 upsert=True,
2199 fail_on_empty=False,
2200 )
2201
2202 step = "instantiated at VCA"
2203 self.logger.debug(logging_text + step)
2204
2205 self._write_configuration_status(
2206 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2207 )
2208
2209 except Exception as e: # TODO not use Exception but N2VC exception
2210 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2211 if not isinstance(
2212 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2213 ):
2214 self.logger.error(
2215 "Exception while {} : {}".format(step, e), exc_info=True
2216 )
2217 self._write_configuration_status(
2218 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2219 )
2220 raise LcmException("{} {}".format(step, e)) from e
2221
2222 def _write_ns_status(
2223 self,
2224 nsr_id: str,
2225 ns_state: str,
2226 current_operation: str,
2227 current_operation_id: str,
2228 error_description: str = None,
2229 error_detail: str = None,
2230 other_update: dict = None,
2231 ):
2232 """
2233 Update db_nsr fields.
2234 :param nsr_id:
2235 :param ns_state:
2236 :param current_operation:
2237 :param current_operation_id:
2238 :param error_description:
2239 :param error_detail:
2240 :param other_update: Other required changes at database if provided, will be cleared
2241 :return:
2242 """
2243 try:
2244 db_dict = other_update or {}
2245 db_dict[
2246 "_admin.nslcmop"
2247 ] = current_operation_id # for backward compatibility
2248 db_dict["_admin.current-operation"] = current_operation_id
2249 db_dict["_admin.operation-type"] = (
2250 current_operation if current_operation != "IDLE" else None
2251 )
2252 db_dict["currentOperation"] = current_operation
2253 db_dict["currentOperationID"] = current_operation_id
2254 db_dict["errorDescription"] = error_description
2255 db_dict["errorDetail"] = error_detail
2256
2257 if ns_state:
2258 db_dict["nsState"] = ns_state
2259 self.update_db_2("nsrs", nsr_id, db_dict)
2260 except DbException as e:
2261 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2262
2263 def _write_op_status(
2264 self,
2265 op_id: str,
2266 stage: list = None,
2267 error_message: str = None,
2268 queuePosition: int = 0,
2269 operation_state: str = None,
2270 other_update: dict = None,
2271 ):
2272 try:
2273 db_dict = other_update or {}
2274 db_dict["queuePosition"] = queuePosition
2275 if isinstance(stage, list):
2276 db_dict["stage"] = stage[0]
2277 db_dict["detailed-status"] = " ".join(stage)
2278 elif stage is not None:
2279 db_dict["stage"] = str(stage)
2280
2281 if error_message is not None:
2282 db_dict["errorMessage"] = error_message
2283 if operation_state is not None:
2284 db_dict["operationState"] = operation_state
2285 db_dict["statusEnteredTime"] = time()
2286 self.update_db_2("nslcmops", op_id, db_dict)
2287 except DbException as e:
2288 self.logger.warn(
2289 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2290 )
2291
2292 def _write_all_config_status(self, db_nsr: dict, status: str):
2293 try:
2294 nsr_id = db_nsr["_id"]
2295 # configurationStatus
2296 config_status = db_nsr.get("configurationStatus")
2297 if config_status:
2298 db_nsr_update = {
2299 "configurationStatus.{}.status".format(index): status
2300 for index, v in enumerate(config_status)
2301 if v
2302 }
2303 # update status
2304 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2305
2306 except DbException as e:
2307 self.logger.warn(
2308 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2309 )
2310
2311 def _write_configuration_status(
2312 self,
2313 nsr_id: str,
2314 vca_index: int,
2315 status: str = None,
2316 element_under_configuration: str = None,
2317 element_type: str = None,
2318 other_update: dict = None,
2319 ):
2320
2321 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2322 # .format(vca_index, status))
2323
2324 try:
2325 db_path = "configurationStatus.{}.".format(vca_index)
2326 db_dict = other_update or {}
2327 if status:
2328 db_dict[db_path + "status"] = status
2329 if element_under_configuration:
2330 db_dict[
2331 db_path + "elementUnderConfiguration"
2332 ] = element_under_configuration
2333 if element_type:
2334 db_dict[db_path + "elementType"] = element_type
2335 self.update_db_2("nsrs", nsr_id, db_dict)
2336 except DbException as e:
2337 self.logger.warn(
2338 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2339 status, nsr_id, vca_index, e
2340 )
2341 )
2342
2343 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2344 """
2345 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2346 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2347 Database is used because the result can be obtained from a different LCM worker in case of HA.
2348 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2349 :param db_nslcmop: database content of nslcmop
2350 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2351 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2352 computed 'vim-account-id'
2353 """
2354 modified = False
2355 nslcmop_id = db_nslcmop["_id"]
2356 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2357 if placement_engine == "PLA":
2358 self.logger.debug(
2359 logging_text + "Invoke and wait for placement optimization"
2360 )
2361 await self.msg.aiowrite(
2362 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2363 )
2364 db_poll_interval = 5
2365 wait = db_poll_interval * 10
2366 pla_result = None
2367 while not pla_result and wait >= 0:
2368 await asyncio.sleep(db_poll_interval)
2369 wait -= db_poll_interval
2370 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2371 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2372
2373 if not pla_result:
2374 raise LcmException(
2375 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2376 )
2377
2378 for pla_vnf in pla_result["vnf"]:
2379 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2380 if not pla_vnf.get("vimAccountId") or not vnfr:
2381 continue
2382 modified = True
2383 self.db.set_one(
2384 "vnfrs",
2385 {"_id": vnfr["_id"]},
2386 {"vim-account-id": pla_vnf["vimAccountId"]},
2387 )
2388 # Modifies db_vnfrs
2389 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2390 return modified
2391
2392 def update_nsrs_with_pla_result(self, params):
2393 try:
2394 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2395 self.update_db_2(
2396 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2397 )
2398 except Exception as e:
2399 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2400
2401 async def instantiate(self, nsr_id, nslcmop_id):
2402 """
2403
2404 :param nsr_id: ns instance to deploy
2405 :param nslcmop_id: operation to run
2406 :return:
2407 """
2408
2409 # Try to lock HA task here
2410 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2411 if not task_is_locked_by_me:
2412 self.logger.debug(
2413 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2414 )
2415 return
2416
2417 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2418 self.logger.debug(logging_text + "Enter")
2419
2420 # get all needed from database
2421
2422 # database nsrs record
2423 db_nsr = None
2424
2425 # database nslcmops record
2426 db_nslcmop = None
2427
2428 # update operation on nsrs
2429 db_nsr_update = {}
2430 # update operation on nslcmops
2431 db_nslcmop_update = {}
2432
2433 nslcmop_operation_state = None
2434 db_vnfrs = {} # vnf's info indexed by member-index
2435 # n2vc_info = {}
2436 tasks_dict_info = {} # from task to info text
2437 exc = None
2438 error_list = []
2439 stage = [
2440 "Stage 1/5: preparation of the environment.",
2441 "Waiting for previous operations to terminate.",
2442 "",
2443 ]
2444 # ^ stage, step, VIM progress
2445 try:
2446 # wait for any previous tasks in process
2447 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2448
2449 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2450 stage[1] = "Reading from database."
2451 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2452 db_nsr_update["detailed-status"] = "creating"
2453 db_nsr_update["operational-status"] = "init"
2454 self._write_ns_status(
2455 nsr_id=nsr_id,
2456 ns_state="BUILDING",
2457 current_operation="INSTANTIATING",
2458 current_operation_id=nslcmop_id,
2459 other_update=db_nsr_update,
2460 )
2461 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2462
2463 # read from db: operation
2464 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2465 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2466 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2467 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2468 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2469 )
2470 ns_params = db_nslcmop.get("operationParams")
2471 if ns_params and ns_params.get("timeout_ns_deploy"):
2472 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2473 else:
2474 timeout_ns_deploy = self.timeout.get(
2475 "ns_deploy", self.timeout_ns_deploy
2476 )
2477
2478 # read from db: ns
2479 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2480 self.logger.debug(logging_text + stage[1])
2481 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2482 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2483 self.logger.debug(logging_text + stage[1])
2484 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2485 self.fs.sync(db_nsr["nsd-id"])
2486 db_nsr["nsd"] = nsd
2487 # nsr_name = db_nsr["name"] # TODO short-name??
2488
2489 # read from db: vnf's of this ns
2490 stage[1] = "Getting vnfrs from db."
2491 self.logger.debug(logging_text + stage[1])
2492 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2493
2494 # read from db: vnfd's for every vnf
2495 db_vnfds = [] # every vnfd data
2496
2497 # for each vnf in ns, read vnfd
2498 for vnfr in db_vnfrs_list:
2499 if vnfr.get("kdur"):
2500 kdur_list = []
2501 for kdur in vnfr["kdur"]:
2502 if kdur.get("additionalParams"):
2503 kdur["additionalParams"] = json.loads(
2504 kdur["additionalParams"]
2505 )
2506 kdur_list.append(kdur)
2507 vnfr["kdur"] = kdur_list
2508
2509 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2510 vnfd_id = vnfr["vnfd-id"]
2511 vnfd_ref = vnfr["vnfd-ref"]
2512 self.fs.sync(vnfd_id)
2513
2514 # if we haven't this vnfd, read it from db
2515 if vnfd_id not in db_vnfds:
2516 # read from db
2517 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2518 vnfd_id, vnfd_ref
2519 )
2520 self.logger.debug(logging_text + stage[1])
2521 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2522
2523 # store vnfd
2524 db_vnfds.append(vnfd)
2525
2526 # Get or generates the _admin.deployed.VCA list
2527 vca_deployed_list = None
2528 if db_nsr["_admin"].get("deployed"):
2529 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2530 if vca_deployed_list is None:
2531 vca_deployed_list = []
2532 configuration_status_list = []
2533 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2534 db_nsr_update["configurationStatus"] = configuration_status_list
2535 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2536 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2537 elif isinstance(vca_deployed_list, dict):
2538 # maintain backward compatibility. Change a dict to list at database
2539 vca_deployed_list = list(vca_deployed_list.values())
2540 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2541 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2542
2543 if not isinstance(
2544 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2545 ):
2546 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2547 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2548
2549 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2550 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2551 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2552 self.db.set_list(
2553 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2554 )
2555
2556 # n2vc_redesign STEP 2 Deploy Network Scenario
2557 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2558 self._write_op_status(op_id=nslcmop_id, stage=stage)
2559
2560 stage[1] = "Deploying KDUs."
2561 # self.logger.debug(logging_text + "Before deploy_kdus")
2562 # Call to deploy_kdus in case exists the "vdu:kdu" param
2563 await self.deploy_kdus(
2564 logging_text=logging_text,
2565 nsr_id=nsr_id,
2566 nslcmop_id=nslcmop_id,
2567 db_vnfrs=db_vnfrs,
2568 db_vnfds=db_vnfds,
2569 task_instantiation_info=tasks_dict_info,
2570 )
2571
2572 stage[1] = "Getting VCA public key."
2573 # n2vc_redesign STEP 1 Get VCA public ssh-key
2574 # feature 1429. Add n2vc public key to needed VMs
2575 n2vc_key = self.n2vc.get_public_key()
2576 n2vc_key_list = [n2vc_key]
2577 if self.vca_config.get("public_key"):
2578 n2vc_key_list.append(self.vca_config["public_key"])
2579
2580 stage[1] = "Deploying NS at VIM."
2581 task_ro = asyncio.ensure_future(
2582 self.instantiate_RO(
2583 logging_text=logging_text,
2584 nsr_id=nsr_id,
2585 nsd=nsd,
2586 db_nsr=db_nsr,
2587 db_nslcmop=db_nslcmop,
2588 db_vnfrs=db_vnfrs,
2589 db_vnfds=db_vnfds,
2590 n2vc_key_list=n2vc_key_list,
2591 stage=stage,
2592 )
2593 )
2594 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2595 tasks_dict_info[task_ro] = "Deploying at VIM"
2596
2597 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2598 stage[1] = "Deploying Execution Environments."
2599 self.logger.debug(logging_text + stage[1])
2600
2601 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2602 for vnf_profile in get_vnf_profiles(nsd):
2603 vnfd_id = vnf_profile["vnfd-id"]
2604 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2605 member_vnf_index = str(vnf_profile["id"])
2606 db_vnfr = db_vnfrs[member_vnf_index]
2607 base_folder = vnfd["_admin"]["storage"]
2608 vdu_id = None
2609 vdu_index = 0
2610 vdu_name = None
2611 kdu_name = None
2612
2613 # Get additional parameters
2614 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2615 if db_vnfr.get("additionalParamsForVnf"):
2616 deploy_params.update(
2617 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2618 )
2619
2620 descriptor_config = get_configuration(vnfd, vnfd["id"])
2621 if descriptor_config:
2622 self._deploy_n2vc(
2623 logging_text=logging_text
2624 + "member_vnf_index={} ".format(member_vnf_index),
2625 db_nsr=db_nsr,
2626 db_vnfr=db_vnfr,
2627 nslcmop_id=nslcmop_id,
2628 nsr_id=nsr_id,
2629 nsi_id=nsi_id,
2630 vnfd_id=vnfd_id,
2631 vdu_id=vdu_id,
2632 kdu_name=kdu_name,
2633 member_vnf_index=member_vnf_index,
2634 vdu_index=vdu_index,
2635 vdu_name=vdu_name,
2636 deploy_params=deploy_params,
2637 descriptor_config=descriptor_config,
2638 base_folder=base_folder,
2639 task_instantiation_info=tasks_dict_info,
2640 stage=stage,
2641 )
2642
2643 # Deploy charms for each VDU that supports one.
2644 for vdud in get_vdu_list(vnfd):
2645 vdu_id = vdud["id"]
2646 descriptor_config = get_configuration(vnfd, vdu_id)
2647 vdur = find_in_list(
2648 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2649 )
2650
2651 if vdur.get("additionalParams"):
2652 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2653 else:
2654 deploy_params_vdu = deploy_params
2655 deploy_params_vdu["OSM"] = get_osm_params(
2656 db_vnfr, vdu_id, vdu_count_index=0
2657 )
2658 vdud_count = get_number_of_instances(vnfd, vdu_id)
2659
2660 self.logger.debug("VDUD > {}".format(vdud))
2661 self.logger.debug(
2662 "Descriptor config > {}".format(descriptor_config)
2663 )
2664 if descriptor_config:
2665 vdu_name = None
2666 kdu_name = None
2667 for vdu_index in range(vdud_count):
2668 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2669 self._deploy_n2vc(
2670 logging_text=logging_text
2671 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2672 member_vnf_index, vdu_id, vdu_index
2673 ),
2674 db_nsr=db_nsr,
2675 db_vnfr=db_vnfr,
2676 nslcmop_id=nslcmop_id,
2677 nsr_id=nsr_id,
2678 nsi_id=nsi_id,
2679 vnfd_id=vnfd_id,
2680 vdu_id=vdu_id,
2681 kdu_name=kdu_name,
2682 member_vnf_index=member_vnf_index,
2683 vdu_index=vdu_index,
2684 vdu_name=vdu_name,
2685 deploy_params=deploy_params_vdu,
2686 descriptor_config=descriptor_config,
2687 base_folder=base_folder,
2688 task_instantiation_info=tasks_dict_info,
2689 stage=stage,
2690 )
2691 for kdud in get_kdu_list(vnfd):
2692 kdu_name = kdud["name"]
2693 descriptor_config = get_configuration(vnfd, kdu_name)
2694 if descriptor_config:
2695 vdu_id = None
2696 vdu_index = 0
2697 vdu_name = None
2698 kdur = next(
2699 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2700 )
2701 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2702 if kdur.get("additionalParams"):
2703 deploy_params_kdu.update(
2704 parse_yaml_strings(kdur["additionalParams"].copy())
2705 )
2706
2707 self._deploy_n2vc(
2708 logging_text=logging_text,
2709 db_nsr=db_nsr,
2710 db_vnfr=db_vnfr,
2711 nslcmop_id=nslcmop_id,
2712 nsr_id=nsr_id,
2713 nsi_id=nsi_id,
2714 vnfd_id=vnfd_id,
2715 vdu_id=vdu_id,
2716 kdu_name=kdu_name,
2717 member_vnf_index=member_vnf_index,
2718 vdu_index=vdu_index,
2719 vdu_name=vdu_name,
2720 deploy_params=deploy_params_kdu,
2721 descriptor_config=descriptor_config,
2722 base_folder=base_folder,
2723 task_instantiation_info=tasks_dict_info,
2724 stage=stage,
2725 )
2726
2727 # Check if this NS has a charm configuration
2728 descriptor_config = nsd.get("ns-configuration")
2729 if descriptor_config and descriptor_config.get("juju"):
2730 vnfd_id = None
2731 db_vnfr = None
2732 member_vnf_index = None
2733 vdu_id = None
2734 kdu_name = None
2735 vdu_index = 0
2736 vdu_name = None
2737
2738 # Get additional parameters
2739 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2740 if db_nsr.get("additionalParamsForNs"):
2741 deploy_params.update(
2742 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2743 )
2744 base_folder = nsd["_admin"]["storage"]
2745 self._deploy_n2vc(
2746 logging_text=logging_text,
2747 db_nsr=db_nsr,
2748 db_vnfr=db_vnfr,
2749 nslcmop_id=nslcmop_id,
2750 nsr_id=nsr_id,
2751 nsi_id=nsi_id,
2752 vnfd_id=vnfd_id,
2753 vdu_id=vdu_id,
2754 kdu_name=kdu_name,
2755 member_vnf_index=member_vnf_index,
2756 vdu_index=vdu_index,
2757 vdu_name=vdu_name,
2758 deploy_params=deploy_params,
2759 descriptor_config=descriptor_config,
2760 base_folder=base_folder,
2761 task_instantiation_info=tasks_dict_info,
2762 stage=stage,
2763 )
2764
2765 # rest of staff will be done at finally
2766
2767 except (
2768 ROclient.ROClientException,
2769 DbException,
2770 LcmException,
2771 N2VCException,
2772 ) as e:
2773 self.logger.error(
2774 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2775 )
2776 exc = e
2777 except asyncio.CancelledError:
2778 self.logger.error(
2779 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2780 )
2781 exc = "Operation was cancelled"
2782 except Exception as e:
2783 exc = traceback.format_exc()
2784 self.logger.critical(
2785 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2786 exc_info=True,
2787 )
2788 finally:
2789 if exc:
2790 error_list.append(str(exc))
2791 try:
2792 # wait for pending tasks
2793 if tasks_dict_info:
2794 stage[1] = "Waiting for instantiate pending tasks."
2795 self.logger.debug(logging_text + stage[1])
2796 error_list += await self._wait_for_tasks(
2797 logging_text,
2798 tasks_dict_info,
2799 timeout_ns_deploy,
2800 stage,
2801 nslcmop_id,
2802 nsr_id=nsr_id,
2803 )
2804 stage[1] = stage[2] = ""
2805 except asyncio.CancelledError:
2806 error_list.append("Cancelled")
2807 # TODO cancel all tasks
2808 except Exception as exc:
2809 error_list.append(str(exc))
2810
2811 # update operation-status
2812 db_nsr_update["operational-status"] = "running"
2813 # let's begin with VCA 'configured' status (later we can change it)
2814 db_nsr_update["config-status"] = "configured"
2815 for task, task_name in tasks_dict_info.items():
2816 if not task.done() or task.cancelled() or task.exception():
2817 if task_name.startswith(self.task_name_deploy_vca):
2818 # A N2VC task is pending
2819 db_nsr_update["config-status"] = "failed"
2820 else:
2821 # RO or KDU task is pending
2822 db_nsr_update["operational-status"] = "failed"
2823
2824 # update status at database
2825 if error_list:
2826 error_detail = ". ".join(error_list)
2827 self.logger.error(logging_text + error_detail)
2828 error_description_nslcmop = "{} Detail: {}".format(
2829 stage[0], error_detail
2830 )
2831 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2832 nslcmop_id, stage[0]
2833 )
2834
2835 db_nsr_update["detailed-status"] = (
2836 error_description_nsr + " Detail: " + error_detail
2837 )
2838 db_nslcmop_update["detailed-status"] = error_detail
2839 nslcmop_operation_state = "FAILED"
2840 ns_state = "BROKEN"
2841 else:
2842 error_detail = None
2843 error_description_nsr = error_description_nslcmop = None
2844 ns_state = "READY"
2845 db_nsr_update["detailed-status"] = "Done"
2846 db_nslcmop_update["detailed-status"] = "Done"
2847 nslcmop_operation_state = "COMPLETED"
2848
2849 if db_nsr:
2850 self._write_ns_status(
2851 nsr_id=nsr_id,
2852 ns_state=ns_state,
2853 current_operation="IDLE",
2854 current_operation_id=None,
2855 error_description=error_description_nsr,
2856 error_detail=error_detail,
2857 other_update=db_nsr_update,
2858 )
2859 self._write_op_status(
2860 op_id=nslcmop_id,
2861 stage="",
2862 error_message=error_description_nslcmop,
2863 operation_state=nslcmop_operation_state,
2864 other_update=db_nslcmop_update,
2865 )
2866
2867 if nslcmop_operation_state:
2868 try:
2869 await self.msg.aiowrite(
2870 "ns",
2871 "instantiated",
2872 {
2873 "nsr_id": nsr_id,
2874 "nslcmop_id": nslcmop_id,
2875 "operationState": nslcmop_operation_state,
2876 },
2877 loop=self.loop,
2878 )
2879 except Exception as e:
2880 self.logger.error(
2881 logging_text + "kafka_write notification Exception {}".format(e)
2882 )
2883
2884 self.logger.debug(logging_text + "Exit")
2885 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2886
2887 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2888 if vnfd_id not in cached_vnfds:
2889 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2890 return cached_vnfds[vnfd_id]
2891
2892 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2893 if vnf_profile_id not in cached_vnfrs:
2894 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2895 "vnfrs",
2896 {
2897 "member-vnf-index-ref": vnf_profile_id,
2898 "nsr-id-ref": nsr_id,
2899 },
2900 )
2901 return cached_vnfrs[vnf_profile_id]
2902
2903 def _is_deployed_vca_in_relation(
2904 self, vca: DeployedVCA, relation: Relation
2905 ) -> bool:
2906 found = False
2907 for endpoint in (relation.provider, relation.requirer):
2908 if endpoint["kdu-resource-profile-id"]:
2909 continue
2910 found = (
2911 vca.vnf_profile_id == endpoint.vnf_profile_id
2912 and vca.vdu_profile_id == endpoint.vdu_profile_id
2913 and vca.execution_environment_ref == endpoint.execution_environment_ref
2914 )
2915 if found:
2916 break
2917 return found
2918
2919 def _update_ee_relation_data_with_implicit_data(
2920 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2921 ):
2922 ee_relation_data = safe_get_ee_relation(
2923 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2924 )
2925 ee_relation_level = EELevel.get_level(ee_relation_data)
2926 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2927 "execution-environment-ref"
2928 ]:
2929 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2930 vnfd_id = vnf_profile["vnfd-id"]
2931 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2932 entity_id = (
2933 vnfd_id
2934 if ee_relation_level == EELevel.VNF
2935 else ee_relation_data["vdu-profile-id"]
2936 )
2937 ee = get_juju_ee_ref(db_vnfd, entity_id)
2938 if not ee:
2939 raise Exception(
2940 f"not execution environments found for ee_relation {ee_relation_data}"
2941 )
2942 ee_relation_data["execution-environment-ref"] = ee["id"]
2943 return ee_relation_data
2944
2945 def _get_ns_relations(
2946 self,
2947 nsr_id: str,
2948 nsd: Dict[str, Any],
2949 vca: DeployedVCA,
2950 cached_vnfds: Dict[str, Any],
2951 ) -> List[Relation]:
2952 relations = []
2953 db_ns_relations = get_ns_configuration_relation_list(nsd)
2954 for r in db_ns_relations:
2955 provider_dict = None
2956 requirer_dict = None
2957 if all(key in r for key in ("provider", "requirer")):
2958 provider_dict = r["provider"]
2959 requirer_dict = r["requirer"]
2960 elif "entities" in r:
2961 provider_id = r["entities"][0]["id"]
2962 provider_dict = {
2963 "nsr-id": nsr_id,
2964 "endpoint": r["entities"][0]["endpoint"],
2965 }
2966 if provider_id != nsd["id"]:
2967 provider_dict["vnf-profile-id"] = provider_id
2968 requirer_id = r["entities"][1]["id"]
2969 requirer_dict = {
2970 "nsr-id": nsr_id,
2971 "endpoint": r["entities"][1]["endpoint"],
2972 }
2973 if requirer_id != nsd["id"]:
2974 requirer_dict["vnf-profile-id"] = requirer_id
2975 else:
2976 raise Exception(
2977 "provider/requirer or entities must be included in the relation."
2978 )
2979 relation_provider = self._update_ee_relation_data_with_implicit_data(
2980 nsr_id, nsd, provider_dict, cached_vnfds
2981 )
2982 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2983 nsr_id, nsd, requirer_dict, cached_vnfds
2984 )
2985 provider = EERelation(relation_provider)
2986 requirer = EERelation(relation_requirer)
2987 relation = Relation(r["name"], provider, requirer)
2988 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2989 if vca_in_relation:
2990 relations.append(relation)
2991 return relations
2992
2993 def _get_vnf_relations(
2994 self,
2995 nsr_id: str,
2996 nsd: Dict[str, Any],
2997 vca: DeployedVCA,
2998 cached_vnfds: Dict[str, Any],
2999 ) -> List[Relation]:
3000 relations = []
3001 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3002 vnf_profile_id = vnf_profile["id"]
3003 vnfd_id = vnf_profile["vnfd-id"]
3004 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3005 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3006 for r in db_vnf_relations:
3007 provider_dict = None
3008 requirer_dict = None
3009 if all(key in r for key in ("provider", "requirer")):
3010 provider_dict = r["provider"]
3011 requirer_dict = r["requirer"]
3012 elif "entities" in r:
3013 provider_id = r["entities"][0]["id"]
3014 provider_dict = {
3015 "nsr-id": nsr_id,
3016 "vnf-profile-id": vnf_profile_id,
3017 "endpoint": r["entities"][0]["endpoint"],
3018 }
3019 if provider_id != vnfd_id:
3020 provider_dict["vdu-profile-id"] = provider_id
3021 requirer_id = r["entities"][1]["id"]
3022 requirer_dict = {
3023 "nsr-id": nsr_id,
3024 "vnf-profile-id": vnf_profile_id,
3025 "endpoint": r["entities"][1]["endpoint"],
3026 }
3027 if requirer_id != vnfd_id:
3028 requirer_dict["vdu-profile-id"] = requirer_id
3029 else:
3030 raise Exception(
3031 "provider/requirer or entities must be included in the relation."
3032 )
3033 relation_provider = self._update_ee_relation_data_with_implicit_data(
3034 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3035 )
3036 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3037 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3038 )
3039 provider = EERelation(relation_provider)
3040 requirer = EERelation(relation_requirer)
3041 relation = Relation(r["name"], provider, requirer)
3042 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3043 if vca_in_relation:
3044 relations.append(relation)
3045 return relations
3046
3047 def _get_kdu_resource_data(
3048 self,
3049 ee_relation: EERelation,
3050 db_nsr: Dict[str, Any],
3051 cached_vnfds: Dict[str, Any],
3052 ) -> DeployedK8sResource:
3053 nsd = get_nsd(db_nsr)
3054 vnf_profiles = get_vnf_profiles(nsd)
3055 vnfd_id = find_in_list(
3056 vnf_profiles,
3057 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3058 )["vnfd-id"]
3059 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3060 kdu_resource_profile = get_kdu_resource_profile(
3061 db_vnfd, ee_relation.kdu_resource_profile_id
3062 )
3063 kdu_name = kdu_resource_profile["kdu-name"]
3064 deployed_kdu, _ = get_deployed_kdu(
3065 db_nsr.get("_admin", ()).get("deployed", ()),
3066 kdu_name,
3067 ee_relation.vnf_profile_id,
3068 )
3069 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3070 return deployed_kdu
3071
3072 def _get_deployed_component(
3073 self,
3074 ee_relation: EERelation,
3075 db_nsr: Dict[str, Any],
3076 cached_vnfds: Dict[str, Any],
3077 ) -> DeployedComponent:
3078 nsr_id = db_nsr["_id"]
3079 deployed_component = None
3080 ee_level = EELevel.get_level(ee_relation)
3081 if ee_level == EELevel.NS:
3082 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3083 if vca:
3084 deployed_component = DeployedVCA(nsr_id, vca)
3085 elif ee_level == EELevel.VNF:
3086 vca = get_deployed_vca(
3087 db_nsr,
3088 {
3089 "vdu_id": None,
3090 "member-vnf-index": ee_relation.vnf_profile_id,
3091 "ee_descriptor_id": ee_relation.execution_environment_ref,
3092 },
3093 )
3094 if vca:
3095 deployed_component = DeployedVCA(nsr_id, vca)
3096 elif ee_level == EELevel.VDU:
3097 vca = get_deployed_vca(
3098 db_nsr,
3099 {
3100 "vdu_id": ee_relation.vdu_profile_id,
3101 "member-vnf-index": ee_relation.vnf_profile_id,
3102 "ee_descriptor_id": ee_relation.execution_environment_ref,
3103 },
3104 )
3105 if vca:
3106 deployed_component = DeployedVCA(nsr_id, vca)
3107 elif ee_level == EELevel.KDU:
3108 kdu_resource_data = self._get_kdu_resource_data(
3109 ee_relation, db_nsr, cached_vnfds
3110 )
3111 if kdu_resource_data:
3112 deployed_component = DeployedK8sResource(kdu_resource_data)
3113 return deployed_component
3114
3115 async def _add_relation(
3116 self,
3117 relation: Relation,
3118 vca_type: str,
3119 db_nsr: Dict[str, Any],
3120 cached_vnfds: Dict[str, Any],
3121 cached_vnfrs: Dict[str, Any],
3122 ) -> bool:
3123 deployed_provider = self._get_deployed_component(
3124 relation.provider, db_nsr, cached_vnfds
3125 )
3126 deployed_requirer = self._get_deployed_component(
3127 relation.requirer, db_nsr, cached_vnfds
3128 )
3129 if (
3130 deployed_provider
3131 and deployed_requirer
3132 and deployed_provider.config_sw_installed
3133 and deployed_requirer.config_sw_installed
3134 ):
3135 provider_db_vnfr = (
3136 self._get_vnfr(
3137 relation.provider.nsr_id,
3138 relation.provider.vnf_profile_id,
3139 cached_vnfrs,
3140 )
3141 if relation.provider.vnf_profile_id
3142 else None
3143 )
3144 requirer_db_vnfr = (
3145 self._get_vnfr(
3146 relation.requirer.nsr_id,
3147 relation.requirer.vnf_profile_id,
3148 cached_vnfrs,
3149 )
3150 if relation.requirer.vnf_profile_id
3151 else None
3152 )
3153 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3154 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3155 provider_relation_endpoint = RelationEndpoint(
3156 deployed_provider.ee_id,
3157 provider_vca_id,
3158 relation.provider.endpoint,
3159 )
3160 requirer_relation_endpoint = RelationEndpoint(
3161 deployed_requirer.ee_id,
3162 requirer_vca_id,
3163 relation.requirer.endpoint,
3164 )
3165 await self.vca_map[vca_type].add_relation(
3166 provider=provider_relation_endpoint,
3167 requirer=requirer_relation_endpoint,
3168 )
3169 # remove entry from relations list
3170 return True
3171 return False
3172
3173 async def _add_vca_relations(
3174 self,
3175 logging_text,
3176 nsr_id,
3177 vca_type: str,
3178 vca_index: int,
3179 timeout: int = 3600,
3180 ) -> bool:
3181
3182 # steps:
3183 # 1. find all relations for this VCA
3184 # 2. wait for other peers related
3185 # 3. add relations
3186
3187 try:
3188 # STEP 1: find all relations for this VCA
3189
3190 # read nsr record
3191 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3192 nsd = get_nsd(db_nsr)
3193
3194 # this VCA data
3195 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3196 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3197
3198 cached_vnfds = {}
3199 cached_vnfrs = {}
3200 relations = []
3201 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3202 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3203
3204 # if no relations, terminate
3205 if not relations:
3206 self.logger.debug(logging_text + " No relations")
3207 return True
3208
3209 self.logger.debug(logging_text + " adding relations {}".format(relations))
3210
3211 # add all relations
3212 start = time()
3213 while True:
3214 # check timeout
3215 now = time()
3216 if now - start >= timeout:
3217 self.logger.error(logging_text + " : timeout adding relations")
3218 return False
3219
3220 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3221 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3222
3223 # for each relation, find the VCA's related
3224 for relation in relations.copy():
3225 added = await self._add_relation(
3226 relation,
3227 vca_type,
3228 db_nsr,
3229 cached_vnfds,
3230 cached_vnfrs,
3231 )
3232 if added:
3233 relations.remove(relation)
3234
3235 if not relations:
3236 self.logger.debug("Relations added")
3237 break
3238 await asyncio.sleep(5.0)
3239
3240 return True
3241
3242 except Exception as e:
3243 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3244 return False
3245
3246 async def _install_kdu(
3247 self,
3248 nsr_id: str,
3249 nsr_db_path: str,
3250 vnfr_data: dict,
3251 kdu_index: int,
3252 kdud: dict,
3253 vnfd: dict,
3254 k8s_instance_info: dict,
3255 k8params: dict = None,
3256 timeout: int = 600,
3257 vca_id: str = None,
3258 ):
3259
3260 try:
3261 k8sclustertype = k8s_instance_info["k8scluster-type"]
3262 # Instantiate kdu
3263 db_dict_install = {
3264 "collection": "nsrs",
3265 "filter": {"_id": nsr_id},
3266 "path": nsr_db_path,
3267 }
3268
3269 if k8s_instance_info.get("kdu-deployment-name"):
3270 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3271 else:
3272 kdu_instance = self.k8scluster_map[
3273 k8sclustertype
3274 ].generate_kdu_instance_name(
3275 db_dict=db_dict_install,
3276 kdu_model=k8s_instance_info["kdu-model"],
3277 kdu_name=k8s_instance_info["kdu-name"],
3278 )
3279
3280 # Update the nsrs table with the kdu-instance value
3281 self.update_db_2(
3282 item="nsrs",
3283 _id=nsr_id,
3284 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3285 )
3286
3287 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3288 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3289 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3290 # namespace, this first verification could be removed, and the next step would be done for any kind
3291 # of KNF.
3292 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3293 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3294 if k8sclustertype in ("juju", "juju-bundle"):
3295 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3296 # that the user passed a namespace which he wants its KDU to be deployed in)
3297 if (
3298 self.db.count(
3299 table="nsrs",
3300 q_filter={
3301 "_id": nsr_id,
3302 "_admin.projects_write": k8s_instance_info["namespace"],
3303 "_admin.projects_read": k8s_instance_info["namespace"],
3304 },
3305 )
3306 > 0
3307 ):
3308 self.logger.debug(
3309 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3310 )
3311 self.update_db_2(
3312 item="nsrs",
3313 _id=nsr_id,
3314 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3315 )
3316 k8s_instance_info["namespace"] = kdu_instance
3317
3318 await self.k8scluster_map[k8sclustertype].install(
3319 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3320 kdu_model=k8s_instance_info["kdu-model"],
3321 atomic=True,
3322 params=k8params,
3323 db_dict=db_dict_install,
3324 timeout=timeout,
3325 kdu_name=k8s_instance_info["kdu-name"],
3326 namespace=k8s_instance_info["namespace"],
3327 kdu_instance=kdu_instance,
3328 vca_id=vca_id,
3329 )
3330
3331 # Obtain services to obtain management service ip
3332 services = await self.k8scluster_map[k8sclustertype].get_services(
3333 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3334 kdu_instance=kdu_instance,
3335 namespace=k8s_instance_info["namespace"],
3336 )
3337
3338 # Obtain management service info (if exists)
3339 vnfr_update_dict = {}
3340 kdu_config = get_configuration(vnfd, kdud["name"])
3341 if kdu_config:
3342 target_ee_list = kdu_config.get("execution-environment-list", [])
3343 else:
3344 target_ee_list = []
3345
3346 if services:
3347 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3348 mgmt_services = [
3349 service
3350 for service in kdud.get("service", [])
3351 if service.get("mgmt-service")
3352 ]
3353 for mgmt_service in mgmt_services:
3354 for service in services:
3355 if service["name"].startswith(mgmt_service["name"]):
3356 # Mgmt service found, Obtain service ip
3357 ip = service.get("external_ip", service.get("cluster_ip"))
3358 if isinstance(ip, list) and len(ip) == 1:
3359 ip = ip[0]
3360
3361 vnfr_update_dict[
3362 "kdur.{}.ip-address".format(kdu_index)
3363 ] = ip
3364
3365 # Check if must update also mgmt ip at the vnf
3366 service_external_cp = mgmt_service.get(
3367 "external-connection-point-ref"
3368 )
3369 if service_external_cp:
3370 if (
3371 deep_get(vnfd, ("mgmt-interface", "cp"))
3372 == service_external_cp
3373 ):
3374 vnfr_update_dict["ip-address"] = ip
3375
3376 if find_in_list(
3377 target_ee_list,
3378 lambda ee: ee.get(
3379 "external-connection-point-ref", ""
3380 )
3381 == service_external_cp,
3382 ):
3383 vnfr_update_dict[
3384 "kdur.{}.ip-address".format(kdu_index)
3385 ] = ip
3386 break
3387 else:
3388 self.logger.warn(
3389 "Mgmt service name: {} not found".format(
3390 mgmt_service["name"]
3391 )
3392 )
3393
3394 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3395 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3396
3397 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3398 if (
3399 kdu_config
3400 and kdu_config.get("initial-config-primitive")
3401 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3402 ):
3403 initial_config_primitive_list = kdu_config.get(
3404 "initial-config-primitive"
3405 )
3406 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3407
3408 for initial_config_primitive in initial_config_primitive_list:
3409 primitive_params_ = self._map_primitive_params(
3410 initial_config_primitive, {}, {}
3411 )
3412
3413 await asyncio.wait_for(
3414 self.k8scluster_map[k8sclustertype].exec_primitive(
3415 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3416 kdu_instance=kdu_instance,
3417 primitive_name=initial_config_primitive["name"],
3418 params=primitive_params_,
3419 db_dict=db_dict_install,
3420 vca_id=vca_id,
3421 ),
3422 timeout=timeout,
3423 )
3424
3425 except Exception as e:
3426 # Prepare update db with error and raise exception
3427 try:
3428 self.update_db_2(
3429 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3430 )
3431 self.update_db_2(
3432 "vnfrs",
3433 vnfr_data.get("_id"),
3434 {"kdur.{}.status".format(kdu_index): "ERROR"},
3435 )
3436 except Exception:
3437 # ignore to keep original exception
3438 pass
3439 # reraise original error
3440 raise
3441
3442 return kdu_instance
3443
3444 async def deploy_kdus(
3445 self,
3446 logging_text,
3447 nsr_id,
3448 nslcmop_id,
3449 db_vnfrs,
3450 db_vnfds,
3451 task_instantiation_info,
3452 ):
3453 # Launch kdus if present in the descriptor
3454
3455 k8scluster_id_2_uuic = {
3456 "helm-chart-v3": {},
3457 "helm-chart": {},
3458 "juju-bundle": {},
3459 }
3460
3461 async def _get_cluster_id(cluster_id, cluster_type):
3462 nonlocal k8scluster_id_2_uuic
3463 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3464 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3465
3466 # check if K8scluster is creating and wait look if previous tasks in process
3467 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3468 "k8scluster", cluster_id
3469 )
3470 if task_dependency:
3471 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3472 task_name, cluster_id
3473 )
3474 self.logger.debug(logging_text + text)
3475 await asyncio.wait(task_dependency, timeout=3600)
3476
3477 db_k8scluster = self.db.get_one(
3478 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3479 )
3480 if not db_k8scluster:
3481 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3482
3483 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3484 if not k8s_id:
3485 if cluster_type == "helm-chart-v3":
3486 try:
3487 # backward compatibility for existing clusters that have not been initialized for helm v3
3488 k8s_credentials = yaml.safe_dump(
3489 db_k8scluster.get("credentials")
3490 )
3491 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3492 k8s_credentials, reuse_cluster_uuid=cluster_id
3493 )
3494 db_k8scluster_update = {}
3495 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3496 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3497 db_k8scluster_update[
3498 "_admin.helm-chart-v3.created"
3499 ] = uninstall_sw
3500 db_k8scluster_update[
3501 "_admin.helm-chart-v3.operationalState"
3502 ] = "ENABLED"
3503 self.update_db_2(
3504 "k8sclusters", cluster_id, db_k8scluster_update
3505 )
3506 except Exception as e:
3507 self.logger.error(
3508 logging_text
3509 + "error initializing helm-v3 cluster: {}".format(str(e))
3510 )
3511 raise LcmException(
3512 "K8s cluster '{}' has not been initialized for '{}'".format(
3513 cluster_id, cluster_type
3514 )
3515 )
3516 else:
3517 raise LcmException(
3518 "K8s cluster '{}' has not been initialized for '{}'".format(
3519 cluster_id, cluster_type
3520 )
3521 )
3522 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3523 return k8s_id
3524
3525 logging_text += "Deploy kdus: "
3526 step = ""
3527 try:
3528 db_nsr_update = {"_admin.deployed.K8s": []}
3529 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3530
3531 index = 0
3532 updated_cluster_list = []
3533 updated_v3_cluster_list = []
3534
3535 for vnfr_data in db_vnfrs.values():
3536 vca_id = self.get_vca_id(vnfr_data, {})
3537 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3538 # Step 0: Prepare and set parameters
3539 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3540 vnfd_id = vnfr_data.get("vnfd-id")
3541 vnfd_with_id = find_in_list(
3542 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3543 )
3544 kdud = next(
3545 kdud
3546 for kdud in vnfd_with_id["kdu"]
3547 if kdud["name"] == kdur["kdu-name"]
3548 )
3549 namespace = kdur.get("k8s-namespace")
3550 kdu_deployment_name = kdur.get("kdu-deployment-name")
3551 if kdur.get("helm-chart"):
3552 kdumodel = kdur["helm-chart"]
3553 # Default version: helm3, if helm-version is v2 assign v2
3554 k8sclustertype = "helm-chart-v3"
3555 self.logger.debug("kdur: {}".format(kdur))
3556 if (
3557 kdur.get("helm-version")
3558 and kdur.get("helm-version") == "v2"
3559 ):
3560 k8sclustertype = "helm-chart"
3561 elif kdur.get("juju-bundle"):
3562 kdumodel = kdur["juju-bundle"]
3563 k8sclustertype = "juju-bundle"
3564 else:
3565 raise LcmException(
3566 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3567 "juju-bundle. Maybe an old NBI version is running".format(
3568 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3569 )
3570 )
3571 # check if kdumodel is a file and exists
3572 try:
3573 vnfd_with_id = find_in_list(
3574 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3575 )
3576 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3577 if storage: # may be not present if vnfd has not artifacts
3578 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3579 if storage["pkg-dir"]:
3580 filename = "{}/{}/{}s/{}".format(
3581 storage["folder"],
3582 storage["pkg-dir"],
3583 k8sclustertype,
3584 kdumodel,
3585 )
3586 else:
3587 filename = "{}/Scripts/{}s/{}".format(
3588 storage["folder"],
3589 k8sclustertype,
3590 kdumodel,
3591 )
3592 if self.fs.file_exists(
3593 filename, mode="file"
3594 ) or self.fs.file_exists(filename, mode="dir"):
3595 kdumodel = self.fs.path + filename
3596 except (asyncio.TimeoutError, asyncio.CancelledError):
3597 raise
3598 except Exception: # it is not a file
3599 pass
3600
3601 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3602 step = "Synchronize repos for k8s cluster '{}'".format(
3603 k8s_cluster_id
3604 )
3605 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3606
3607 # Synchronize repos
3608 if (
3609 k8sclustertype == "helm-chart"
3610 and cluster_uuid not in updated_cluster_list
3611 ) or (
3612 k8sclustertype == "helm-chart-v3"
3613 and cluster_uuid not in updated_v3_cluster_list
3614 ):
3615 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3616 self.k8scluster_map[k8sclustertype].synchronize_repos(
3617 cluster_uuid=cluster_uuid
3618 )
3619 )
3620 if del_repo_list or added_repo_dict:
3621 if k8sclustertype == "helm-chart":
3622 unset = {
3623 "_admin.helm_charts_added." + item: None
3624 for item in del_repo_list
3625 }
3626 updated = {
3627 "_admin.helm_charts_added." + item: name
3628 for item, name in added_repo_dict.items()
3629 }
3630 updated_cluster_list.append(cluster_uuid)
3631 elif k8sclustertype == "helm-chart-v3":
3632 unset = {
3633 "_admin.helm_charts_v3_added." + item: None
3634 for item in del_repo_list
3635 }
3636 updated = {
3637 "_admin.helm_charts_v3_added." + item: name
3638 for item, name in added_repo_dict.items()
3639 }
3640 updated_v3_cluster_list.append(cluster_uuid)
3641 self.logger.debug(
3642 logging_text + "repos synchronized on k8s cluster "
3643 "'{}' to_delete: {}, to_add: {}".format(
3644 k8s_cluster_id, del_repo_list, added_repo_dict
3645 )
3646 )
3647 self.db.set_one(
3648 "k8sclusters",
3649 {"_id": k8s_cluster_id},
3650 updated,
3651 unset=unset,
3652 )
3653
3654 # Instantiate kdu
3655 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3656 vnfr_data["member-vnf-index-ref"],
3657 kdur["kdu-name"],
3658 k8s_cluster_id,
3659 )
3660 k8s_instance_info = {
3661 "kdu-instance": None,
3662 "k8scluster-uuid": cluster_uuid,
3663 "k8scluster-type": k8sclustertype,
3664 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3665 "kdu-name": kdur["kdu-name"],
3666 "kdu-model": kdumodel,
3667 "namespace": namespace,
3668 "kdu-deployment-name": kdu_deployment_name,
3669 }
3670 db_path = "_admin.deployed.K8s.{}".format(index)
3671 db_nsr_update[db_path] = k8s_instance_info
3672 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3673 vnfd_with_id = find_in_list(
3674 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3675 )
3676 task = asyncio.ensure_future(
3677 self._install_kdu(
3678 nsr_id,
3679 db_path,
3680 vnfr_data,
3681 kdu_index,
3682 kdud,
3683 vnfd_with_id,
3684 k8s_instance_info,
3685 k8params=desc_params,
3686 timeout=1800,
3687 vca_id=vca_id,
3688 )
3689 )
3690 self.lcm_tasks.register(
3691 "ns",
3692 nsr_id,
3693 nslcmop_id,
3694 "instantiate_KDU-{}".format(index),
3695 task,
3696 )
3697 task_instantiation_info[task] = "Deploying KDU {}".format(
3698 kdur["kdu-name"]
3699 )
3700
3701 index += 1
3702
3703 except (LcmException, asyncio.CancelledError):
3704 raise
3705 except Exception as e:
3706 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3707 if isinstance(e, (N2VCException, DbException)):
3708 self.logger.error(logging_text + msg)
3709 else:
3710 self.logger.critical(logging_text + msg, exc_info=True)
3711 raise LcmException(msg)
3712 finally:
3713 if db_nsr_update:
3714 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3715
3716 def _deploy_n2vc(
3717 self,
3718 logging_text,
3719 db_nsr,
3720 db_vnfr,
3721 nslcmop_id,
3722 nsr_id,
3723 nsi_id,
3724 vnfd_id,
3725 vdu_id,
3726 kdu_name,
3727 member_vnf_index,
3728 vdu_index,
3729 vdu_name,
3730 deploy_params,
3731 descriptor_config,
3732 base_folder,
3733 task_instantiation_info,
3734 stage,
3735 ):
3736 # launch instantiate_N2VC in a asyncio task and register task object
3737 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3738 # if not found, create one entry and update database
3739 # fill db_nsr._admin.deployed.VCA.<index>
3740
3741 self.logger.debug(
3742 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3743 )
3744 if "execution-environment-list" in descriptor_config:
3745 ee_list = descriptor_config.get("execution-environment-list", [])
3746 elif "juju" in descriptor_config:
3747 ee_list = [descriptor_config] # ns charms
3748 else: # other types as script are not supported
3749 ee_list = []
3750
3751 for ee_item in ee_list:
3752 self.logger.debug(
3753 logging_text
3754 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3755 ee_item.get("juju"), ee_item.get("helm-chart")
3756 )
3757 )
3758 ee_descriptor_id = ee_item.get("id")
3759 if ee_item.get("juju"):
3760 vca_name = ee_item["juju"].get("charm")
3761 vca_type = (
3762 "lxc_proxy_charm"
3763 if ee_item["juju"].get("charm") is not None
3764 else "native_charm"
3765 )
3766 if ee_item["juju"].get("cloud") == "k8s":
3767 vca_type = "k8s_proxy_charm"
3768 elif ee_item["juju"].get("proxy") is False:
3769 vca_type = "native_charm"
3770 elif ee_item.get("helm-chart"):
3771 vca_name = ee_item["helm-chart"]
3772 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3773 vca_type = "helm"
3774 else:
3775 vca_type = "helm-v3"
3776 else:
3777 self.logger.debug(
3778 logging_text + "skipping non juju neither charm configuration"
3779 )
3780 continue
3781
3782 vca_index = -1
3783 for vca_index, vca_deployed in enumerate(
3784 db_nsr["_admin"]["deployed"]["VCA"]
3785 ):
3786 if not vca_deployed:
3787 continue
3788 if (
3789 vca_deployed.get("member-vnf-index") == member_vnf_index
3790 and vca_deployed.get("vdu_id") == vdu_id
3791 and vca_deployed.get("kdu_name") == kdu_name
3792 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3793 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3794 ):
3795 break
3796 else:
3797 # not found, create one.
3798 target = (
3799 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3800 )
3801 if vdu_id:
3802 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3803 elif kdu_name:
3804 target += "/kdu/{}".format(kdu_name)
3805 vca_deployed = {
3806 "target_element": target,
3807 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3808 "member-vnf-index": member_vnf_index,
3809 "vdu_id": vdu_id,
3810 "kdu_name": kdu_name,
3811 "vdu_count_index": vdu_index,
3812 "operational-status": "init", # TODO revise
3813 "detailed-status": "", # TODO revise
3814 "step": "initial-deploy", # TODO revise
3815 "vnfd_id": vnfd_id,
3816 "vdu_name": vdu_name,
3817 "type": vca_type,
3818 "ee_descriptor_id": ee_descriptor_id,
3819 }
3820 vca_index += 1
3821
3822 # create VCA and configurationStatus in db
3823 db_dict = {
3824 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3825 "configurationStatus.{}".format(vca_index): dict(),
3826 }
3827 self.update_db_2("nsrs", nsr_id, db_dict)
3828
3829 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3830
3831 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3832 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3833 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3834
3835 # Launch task
3836 task_n2vc = asyncio.ensure_future(
3837 self.instantiate_N2VC(
3838 logging_text=logging_text,
3839 vca_index=vca_index,
3840 nsi_id=nsi_id,
3841 db_nsr=db_nsr,
3842 db_vnfr=db_vnfr,
3843 vdu_id=vdu_id,
3844 kdu_name=kdu_name,
3845 vdu_index=vdu_index,
3846 deploy_params=deploy_params,
3847 config_descriptor=descriptor_config,
3848 base_folder=base_folder,
3849 nslcmop_id=nslcmop_id,
3850 stage=stage,
3851 vca_type=vca_type,
3852 vca_name=vca_name,
3853 ee_config_descriptor=ee_item,
3854 )
3855 )
3856 self.lcm_tasks.register(
3857 "ns",
3858 nsr_id,
3859 nslcmop_id,
3860 "instantiate_N2VC-{}".format(vca_index),
3861 task_n2vc,
3862 )
3863 task_instantiation_info[
3864 task_n2vc
3865 ] = self.task_name_deploy_vca + " {}.{}".format(
3866 member_vnf_index or "", vdu_id or ""
3867 )
3868
3869 @staticmethod
3870 def _create_nslcmop(nsr_id, operation, params):
3871 """
3872 Creates a ns-lcm-opp content to be stored at database.
3873 :param nsr_id: internal id of the instance
3874 :param operation: instantiate, terminate, scale, action, ...
3875 :param params: user parameters for the operation
3876 :return: dictionary following SOL005 format
3877 """
3878 # Raise exception if invalid arguments
3879 if not (nsr_id and operation and params):
3880 raise LcmException(
3881 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3882 )
3883 now = time()
3884 _id = str(uuid4())
3885 nslcmop = {
3886 "id": _id,
3887 "_id": _id,
3888 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3889 "operationState": "PROCESSING",
3890 "statusEnteredTime": now,
3891 "nsInstanceId": nsr_id,
3892 "lcmOperationType": operation,
3893 "startTime": now,
3894 "isAutomaticInvocation": False,
3895 "operationParams": params,
3896 "isCancelPending": False,
3897 "links": {
3898 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3899 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3900 },
3901 }
3902 return nslcmop
3903
3904 def _format_additional_params(self, params):
3905 params = params or {}
3906 for key, value in params.items():
3907 if str(value).startswith("!!yaml "):
3908 params[key] = yaml.safe_load(value[7:])
3909 return params
3910
3911 def _get_terminate_primitive_params(self, seq, vnf_index):
3912 primitive = seq.get("name")
3913 primitive_params = {}
3914 params = {
3915 "member_vnf_index": vnf_index,
3916 "primitive": primitive,
3917 "primitive_params": primitive_params,
3918 }
3919 desc_params = {}
3920 return self._map_primitive_params(seq, params, desc_params)
3921
3922 # sub-operations
3923
3924 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3925 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3926 if op.get("operationState") == "COMPLETED":
3927 # b. Skip sub-operation
3928 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3929 return self.SUBOPERATION_STATUS_SKIP
3930 else:
3931 # c. retry executing sub-operation
3932 # The sub-operation exists, and operationState != 'COMPLETED'
3933 # Update operationState = 'PROCESSING' to indicate a retry.
3934 operationState = "PROCESSING"
3935 detailed_status = "In progress"
3936 self._update_suboperation_status(
3937 db_nslcmop, op_index, operationState, detailed_status
3938 )
3939 # Return the sub-operation index
3940 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3941 # with arguments extracted from the sub-operation
3942 return op_index
3943
3944 # Find a sub-operation where all keys in a matching dictionary must match
3945 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3946 def _find_suboperation(self, db_nslcmop, match):
3947 if db_nslcmop and match:
3948 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3949 for i, op in enumerate(op_list):
3950 if all(op.get(k) == match[k] for k in match):
3951 return i
3952 return self.SUBOPERATION_STATUS_NOT_FOUND
3953
3954 # Update status for a sub-operation given its index
3955 def _update_suboperation_status(
3956 self, db_nslcmop, op_index, operationState, detailed_status
3957 ):
3958 # Update DB for HA tasks
3959 q_filter = {"_id": db_nslcmop["_id"]}
3960 update_dict = {
3961 "_admin.operations.{}.operationState".format(op_index): operationState,
3962 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3963 }
3964 self.db.set_one(
3965 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3966 )
3967
3968 # Add sub-operation, return the index of the added sub-operation
3969 # Optionally, set operationState, detailed-status, and operationType
3970 # Status and type are currently set for 'scale' sub-operations:
3971 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3972 # 'detailed-status' : status message
3973 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3974 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3975 def _add_suboperation(
3976 self,
3977 db_nslcmop,
3978 vnf_index,
3979 vdu_id,
3980 vdu_count_index,
3981 vdu_name,
3982 primitive,
3983 mapped_primitive_params,
3984 operationState=None,
3985 detailed_status=None,
3986 operationType=None,
3987 RO_nsr_id=None,
3988 RO_scaling_info=None,
3989 ):
3990 if not db_nslcmop:
3991 return self.SUBOPERATION_STATUS_NOT_FOUND
3992 # Get the "_admin.operations" list, if it exists
3993 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3994 op_list = db_nslcmop_admin.get("operations")
3995 # Create or append to the "_admin.operations" list
3996 new_op = {
3997 "member_vnf_index": vnf_index,
3998 "vdu_id": vdu_id,
3999 "vdu_count_index": vdu_count_index,
4000 "primitive": primitive,
4001 "primitive_params": mapped_primitive_params,
4002 }
4003 if operationState:
4004 new_op["operationState"] = operationState
4005 if detailed_status:
4006 new_op["detailed-status"] = detailed_status
4007 if operationType:
4008 new_op["lcmOperationType"] = operationType
4009 if RO_nsr_id:
4010 new_op["RO_nsr_id"] = RO_nsr_id
4011 if RO_scaling_info:
4012 new_op["RO_scaling_info"] = RO_scaling_info
4013 if not op_list:
4014 # No existing operations, create key 'operations' with current operation as first list element
4015 db_nslcmop_admin.update({"operations": [new_op]})
4016 op_list = db_nslcmop_admin.get("operations")
4017 else:
4018 # Existing operations, append operation to list
4019 op_list.append(new_op)
4020
4021 db_nslcmop_update = {"_admin.operations": op_list}
4022 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4023 op_index = len(op_list) - 1
4024 return op_index
4025
4026 # Helper methods for scale() sub-operations
4027
4028 # pre-scale/post-scale:
4029 # Check for 3 different cases:
4030 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4031 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4032 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4033 def _check_or_add_scale_suboperation(
4034 self,
4035 db_nslcmop,
4036 vnf_index,
4037 vnf_config_primitive,
4038 primitive_params,
4039 operationType,
4040 RO_nsr_id=None,
4041 RO_scaling_info=None,
4042 ):
4043 # Find this sub-operation
4044 if RO_nsr_id and RO_scaling_info:
4045 operationType = "SCALE-RO"
4046 match = {
4047 "member_vnf_index": vnf_index,
4048 "RO_nsr_id": RO_nsr_id,
4049 "RO_scaling_info": RO_scaling_info,
4050 }
4051 else:
4052 match = {
4053 "member_vnf_index": vnf_index,
4054 "primitive": vnf_config_primitive,
4055 "primitive_params": primitive_params,
4056 "lcmOperationType": operationType,
4057 }
4058 op_index = self._find_suboperation(db_nslcmop, match)
4059 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4060 # a. New sub-operation
4061 # The sub-operation does not exist, add it.
4062 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4063 # The following parameters are set to None for all kind of scaling:
4064 vdu_id = None
4065 vdu_count_index = None
4066 vdu_name = None
4067 if RO_nsr_id and RO_scaling_info:
4068 vnf_config_primitive = None
4069 primitive_params = None
4070 else:
4071 RO_nsr_id = None
4072 RO_scaling_info = None
4073 # Initial status for sub-operation
4074 operationState = "PROCESSING"
4075 detailed_status = "In progress"
4076 # Add sub-operation for pre/post-scaling (zero or more operations)
4077 self._add_suboperation(
4078 db_nslcmop,
4079 vnf_index,
4080 vdu_id,
4081 vdu_count_index,
4082 vdu_name,
4083 vnf_config_primitive,
4084 primitive_params,
4085 operationState,
4086 detailed_status,
4087 operationType,
4088 RO_nsr_id,
4089 RO_scaling_info,
4090 )
4091 return self.SUBOPERATION_STATUS_NEW
4092 else:
4093 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4094 # or op_index (operationState != 'COMPLETED')
4095 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4096
4097 # Function to return execution_environment id
4098
4099 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4100 # TODO vdu_index_count
4101 for vca in vca_deployed_list:
4102 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4103 return vca["ee_id"]
4104
4105 async def destroy_N2VC(
4106 self,
4107 logging_text,
4108 db_nslcmop,
4109 vca_deployed,
4110 config_descriptor,
4111 vca_index,
4112 destroy_ee=True,
4113 exec_primitives=True,
4114 scaling_in=False,
4115 vca_id: str = None,
4116 ):
4117 """
4118 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4119 :param logging_text:
4120 :param db_nslcmop:
4121 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4122 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4123 :param vca_index: index in the database _admin.deployed.VCA
4124 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4125 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4126 not executed properly
4127 :param scaling_in: True destroys the application, False destroys the model
4128 :return: None or exception
4129 """
4130
4131 self.logger.debug(
4132 logging_text
4133 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4134 vca_index, vca_deployed, config_descriptor, destroy_ee
4135 )
4136 )
4137
4138 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4139
4140 # execute terminate_primitives
4141 if exec_primitives:
4142 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4143 config_descriptor.get("terminate-config-primitive"),
4144 vca_deployed.get("ee_descriptor_id"),
4145 )
4146 vdu_id = vca_deployed.get("vdu_id")
4147 vdu_count_index = vca_deployed.get("vdu_count_index")
4148 vdu_name = vca_deployed.get("vdu_name")
4149 vnf_index = vca_deployed.get("member-vnf-index")
4150 if terminate_primitives and vca_deployed.get("needed_terminate"):
4151 for seq in terminate_primitives:
4152 # For each sequence in list, get primitive and call _ns_execute_primitive()
4153 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4154 vnf_index, seq.get("name")
4155 )
4156 self.logger.debug(logging_text + step)
4157 # Create the primitive for each sequence, i.e. "primitive": "touch"
4158 primitive = seq.get("name")
4159 mapped_primitive_params = self._get_terminate_primitive_params(
4160 seq, vnf_index
4161 )
4162
4163 # Add sub-operation
4164 self._add_suboperation(
4165 db_nslcmop,
4166 vnf_index,
4167 vdu_id,
4168 vdu_count_index,
4169 vdu_name,
4170 primitive,
4171 mapped_primitive_params,
4172 )
4173 # Sub-operations: Call _ns_execute_primitive() instead of action()
4174 try:
4175 result, result_detail = await self._ns_execute_primitive(
4176 vca_deployed["ee_id"],
4177 primitive,
4178 mapped_primitive_params,
4179 vca_type=vca_type,
4180 vca_id=vca_id,
4181 )
4182 except LcmException:
4183 # this happens when VCA is not deployed. In this case it is not needed to terminate
4184 continue
4185 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4186 if result not in result_ok:
4187 raise LcmException(
4188 "terminate_primitive {} for vnf_member_index={} fails with "
4189 "error {}".format(seq.get("name"), vnf_index, result_detail)
4190 )
4191 # set that this VCA do not need terminated
4192 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4193 vca_index
4194 )
4195 self.update_db_2(
4196 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4197 )
4198
4199 # Delete Prometheus Jobs if any
4200 # This uses NSR_ID, so it will destroy any jobs under this index
4201 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4202
4203 if destroy_ee:
4204 await self.vca_map[vca_type].delete_execution_environment(
4205 vca_deployed["ee_id"],
4206 scaling_in=scaling_in,
4207 vca_type=vca_type,
4208 vca_id=vca_id,
4209 )
4210
4211 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4212 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4213 namespace = "." + db_nsr["_id"]
4214 try:
4215 await self.n2vc.delete_namespace(
4216 namespace=namespace,
4217 total_timeout=self.timeout_charm_delete,
4218 vca_id=vca_id,
4219 )
4220 except N2VCNotFound: # already deleted. Skip
4221 pass
4222 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4223
4224 async def _terminate_RO(
4225 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4226 ):
4227 """
4228 Terminates a deployment from RO
4229 :param logging_text:
4230 :param nsr_deployed: db_nsr._admin.deployed
4231 :param nsr_id:
4232 :param nslcmop_id:
4233 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4234 this method will update only the index 2, but it will write on database the concatenated content of the list
4235 :return:
4236 """
4237 db_nsr_update = {}
4238 failed_detail = []
4239 ro_nsr_id = ro_delete_action = None
4240 if nsr_deployed and nsr_deployed.get("RO"):
4241 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4242 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4243 try:
4244 if ro_nsr_id:
4245 stage[2] = "Deleting ns from VIM."
4246 db_nsr_update["detailed-status"] = " ".join(stage)
4247 self._write_op_status(nslcmop_id, stage)
4248 self.logger.debug(logging_text + stage[2])
4249 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4250 self._write_op_status(nslcmop_id, stage)
4251 desc = await self.RO.delete("ns", ro_nsr_id)
4252 ro_delete_action = desc["action_id"]
4253 db_nsr_update[
4254 "_admin.deployed.RO.nsr_delete_action_id"
4255 ] = ro_delete_action
4256 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4257 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4258 if ro_delete_action:
4259 # wait until NS is deleted from VIM
4260 stage[2] = "Waiting ns deleted from VIM."
4261 detailed_status_old = None
4262 self.logger.debug(
4263 logging_text
4264 + stage[2]
4265 + " RO_id={} ro_delete_action={}".format(
4266 ro_nsr_id, ro_delete_action
4267 )
4268 )
4269 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4270 self._write_op_status(nslcmop_id, stage)
4271
4272 delete_timeout = 20 * 60 # 20 minutes
4273 while delete_timeout > 0:
4274 desc = await self.RO.show(
4275 "ns",
4276 item_id_name=ro_nsr_id,
4277 extra_item="action",
4278 extra_item_id=ro_delete_action,
4279 )
4280
4281 # deploymentStatus
4282 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4283
4284 ns_status, ns_status_info = self.RO.check_action_status(desc)
4285 if ns_status == "ERROR":
4286 raise ROclient.ROClientException(ns_status_info)
4287 elif ns_status == "BUILD":
4288 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4289 elif ns_status == "ACTIVE":
4290 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4291 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4292 break
4293 else:
4294 assert (
4295 False
4296 ), "ROclient.check_action_status returns unknown {}".format(
4297 ns_status
4298 )
4299 if stage[2] != detailed_status_old:
4300 detailed_status_old = stage[2]
4301 db_nsr_update["detailed-status"] = " ".join(stage)
4302 self._write_op_status(nslcmop_id, stage)
4303 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4304 await asyncio.sleep(5, loop=self.loop)
4305 delete_timeout -= 5
4306 else: # delete_timeout <= 0:
4307 raise ROclient.ROClientException(
4308 "Timeout waiting ns deleted from VIM"
4309 )
4310
4311 except Exception as e:
4312 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4313 if (
4314 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4315 ): # not found
4316 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4317 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4318 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4319 self.logger.debug(
4320 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4321 )
4322 elif (
4323 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4324 ): # conflict
4325 failed_detail.append("delete conflict: {}".format(e))
4326 self.logger.debug(
4327 logging_text
4328 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4329 )
4330 else:
4331 failed_detail.append("delete error: {}".format(e))
4332 self.logger.error(
4333 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4334 )
4335
4336 # Delete nsd
4337 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4338 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4339 try:
4340 stage[2] = "Deleting nsd from RO."
4341 db_nsr_update["detailed-status"] = " ".join(stage)
4342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4343 self._write_op_status(nslcmop_id, stage)
4344 await self.RO.delete("nsd", ro_nsd_id)
4345 self.logger.debug(
4346 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4347 )
4348 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4349 except Exception as e:
4350 if (
4351 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4352 ): # not found
4353 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4354 self.logger.debug(
4355 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4356 )
4357 elif (
4358 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4359 ): # conflict
4360 failed_detail.append(
4361 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4362 )
4363 self.logger.debug(logging_text + failed_detail[-1])
4364 else:
4365 failed_detail.append(
4366 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4367 )
4368 self.logger.error(logging_text + failed_detail[-1])
4369
4370 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4371 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4372 if not vnf_deployed or not vnf_deployed["id"]:
4373 continue
4374 try:
4375 ro_vnfd_id = vnf_deployed["id"]
4376 stage[
4377 2
4378 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4379 vnf_deployed["member-vnf-index"], ro_vnfd_id
4380 )
4381 db_nsr_update["detailed-status"] = " ".join(stage)
4382 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4383 self._write_op_status(nslcmop_id, stage)
4384 await self.RO.delete("vnfd", ro_vnfd_id)
4385 self.logger.debug(
4386 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4387 )
4388 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4389 except Exception as e:
4390 if (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4392 ): # not found
4393 db_nsr_update[
4394 "_admin.deployed.RO.vnfd.{}.id".format(index)
4395 ] = None
4396 self.logger.debug(
4397 logging_text
4398 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4399 )
4400 elif (
4401 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4402 ): # conflict
4403 failed_detail.append(
4404 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4405 )
4406 self.logger.debug(logging_text + failed_detail[-1])
4407 else:
4408 failed_detail.append(
4409 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4410 )
4411 self.logger.error(logging_text + failed_detail[-1])
4412
4413 if failed_detail:
4414 stage[2] = "Error deleting from VIM"
4415 else:
4416 stage[2] = "Deleted from VIM"
4417 db_nsr_update["detailed-status"] = " ".join(stage)
4418 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4419 self._write_op_status(nslcmop_id, stage)
4420
4421 if failed_detail:
4422 raise LcmException("; ".join(failed_detail))
4423
4424 async def terminate(self, nsr_id, nslcmop_id):
4425 # Try to lock HA task here
4426 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4427 if not task_is_locked_by_me:
4428 return
4429
4430 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4431 self.logger.debug(logging_text + "Enter")
4432 timeout_ns_terminate = self.timeout_ns_terminate
4433 db_nsr = None
4434 db_nslcmop = None
4435 operation_params = None
4436 exc = None
4437 error_list = [] # annotates all failed error messages
4438 db_nslcmop_update = {}
4439 autoremove = False # autoremove after terminated
4440 tasks_dict_info = {}
4441 db_nsr_update = {}
4442 stage = [
4443 "Stage 1/3: Preparing task.",
4444 "Waiting for previous operations to terminate.",
4445 "",
4446 ]
4447 # ^ contains [stage, step, VIM-status]
4448 try:
4449 # wait for any previous tasks in process
4450 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4451
4452 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4453 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4454 operation_params = db_nslcmop.get("operationParams") or {}
4455 if operation_params.get("timeout_ns_terminate"):
4456 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4457 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4458 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4459
4460 db_nsr_update["operational-status"] = "terminating"
4461 db_nsr_update["config-status"] = "terminating"
4462 self._write_ns_status(
4463 nsr_id=nsr_id,
4464 ns_state="TERMINATING",
4465 current_operation="TERMINATING",
4466 current_operation_id=nslcmop_id,
4467 other_update=db_nsr_update,
4468 )
4469 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4470 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4471 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4472 return
4473
4474 stage[1] = "Getting vnf descriptors from db."
4475 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4476 db_vnfrs_dict = {
4477 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4478 }
4479 db_vnfds_from_id = {}
4480 db_vnfds_from_member_index = {}
4481 # Loop over VNFRs
4482 for vnfr in db_vnfrs_list:
4483 vnfd_id = vnfr["vnfd-id"]
4484 if vnfd_id not in db_vnfds_from_id:
4485 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4486 db_vnfds_from_id[vnfd_id] = vnfd
4487 db_vnfds_from_member_index[
4488 vnfr["member-vnf-index-ref"]
4489 ] = db_vnfds_from_id[vnfd_id]
4490
4491 # Destroy individual execution environments when there are terminating primitives.
4492 # Rest of EE will be deleted at once
4493 # TODO - check before calling _destroy_N2VC
4494 # if not operation_params.get("skip_terminate_primitives"):#
4495 # or not vca.get("needed_terminate"):
4496 stage[0] = "Stage 2/3 execute terminating primitives."
4497 self.logger.debug(logging_text + stage[0])
4498 stage[1] = "Looking execution environment that needs terminate."
4499 self.logger.debug(logging_text + stage[1])
4500
4501 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4502 config_descriptor = None
4503 vca_member_vnf_index = vca.get("member-vnf-index")
4504 vca_id = self.get_vca_id(
4505 db_vnfrs_dict.get(vca_member_vnf_index)
4506 if vca_member_vnf_index
4507 else None,
4508 db_nsr,
4509 )
4510 if not vca or not vca.get("ee_id"):
4511 continue
4512 if not vca.get("member-vnf-index"):
4513 # ns
4514 config_descriptor = db_nsr.get("ns-configuration")
4515 elif vca.get("vdu_id"):
4516 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4517 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4518 elif vca.get("kdu_name"):
4519 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4520 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4521 else:
4522 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4523 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4524 vca_type = vca.get("type")
4525 exec_terminate_primitives = not operation_params.get(
4526 "skip_terminate_primitives"
4527 ) and vca.get("needed_terminate")
4528 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4529 # pending native charms
4530 destroy_ee = (
4531 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4532 )
4533 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4534 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4535 task = asyncio.ensure_future(
4536 self.destroy_N2VC(
4537 logging_text,
4538 db_nslcmop,
4539 vca,
4540 config_descriptor,
4541 vca_index,
4542 destroy_ee,
4543 exec_terminate_primitives,
4544 vca_id=vca_id,
4545 )
4546 )
4547 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4548
4549 # wait for pending tasks of terminate primitives
4550 if tasks_dict_info:
4551 self.logger.debug(
4552 logging_text
4553 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4554 )
4555 error_list = await self._wait_for_tasks(
4556 logging_text,
4557 tasks_dict_info,
4558 min(self.timeout_charm_delete, timeout_ns_terminate),
4559 stage,
4560 nslcmop_id,
4561 )
4562 tasks_dict_info.clear()
4563 if error_list:
4564 return # raise LcmException("; ".join(error_list))
4565
4566 # remove All execution environments at once
4567 stage[0] = "Stage 3/3 delete all."
4568
4569 if nsr_deployed.get("VCA"):
4570 stage[1] = "Deleting all execution environments."
4571 self.logger.debug(logging_text + stage[1])
4572 vca_id = self.get_vca_id({}, db_nsr)
4573 task_delete_ee = asyncio.ensure_future(
4574 asyncio.wait_for(
4575 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4576 timeout=self.timeout_charm_delete,
4577 )
4578 )
4579 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4580 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4581
4582 # Delete from k8scluster
4583 stage[1] = "Deleting KDUs."
4584 self.logger.debug(logging_text + stage[1])
4585 # print(nsr_deployed)
4586 for kdu in get_iterable(nsr_deployed, "K8s"):
4587 if not kdu or not kdu.get("kdu-instance"):
4588 continue
4589 kdu_instance = kdu.get("kdu-instance")
4590 if kdu.get("k8scluster-type") in self.k8scluster_map:
4591 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4592 vca_id = self.get_vca_id({}, db_nsr)
4593 task_delete_kdu_instance = asyncio.ensure_future(
4594 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4595 cluster_uuid=kdu.get("k8scluster-uuid"),
4596 kdu_instance=kdu_instance,
4597 vca_id=vca_id,
4598 namespace=kdu.get("namespace"),
4599 )
4600 )
4601 else:
4602 self.logger.error(
4603 logging_text
4604 + "Unknown k8s deployment type {}".format(
4605 kdu.get("k8scluster-type")
4606 )
4607 )
4608 continue
4609 tasks_dict_info[
4610 task_delete_kdu_instance
4611 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4612
4613 # remove from RO
4614 stage[1] = "Deleting ns from VIM."
4615 if self.ng_ro:
4616 task_delete_ro = asyncio.ensure_future(
4617 self._terminate_ng_ro(
4618 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4619 )
4620 )
4621 else:
4622 task_delete_ro = asyncio.ensure_future(
4623 self._terminate_RO(
4624 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4625 )
4626 )
4627 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4628
4629 # rest of staff will be done at finally
4630
4631 except (
4632 ROclient.ROClientException,
4633 DbException,
4634 LcmException,
4635 N2VCException,
4636 ) as e:
4637 self.logger.error(logging_text + "Exit Exception {}".format(e))
4638 exc = e
4639 except asyncio.CancelledError:
4640 self.logger.error(
4641 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4642 )
4643 exc = "Operation was cancelled"
4644 except Exception as e:
4645 exc = traceback.format_exc()
4646 self.logger.critical(
4647 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4648 exc_info=True,
4649 )
4650 finally:
4651 if exc:
4652 error_list.append(str(exc))
4653 try:
4654 # wait for pending tasks
4655 if tasks_dict_info:
4656 stage[1] = "Waiting for terminate pending tasks."
4657 self.logger.debug(logging_text + stage[1])
4658 error_list += await self._wait_for_tasks(
4659 logging_text,
4660 tasks_dict_info,
4661 timeout_ns_terminate,
4662 stage,
4663 nslcmop_id,
4664 )
4665 stage[1] = stage[2] = ""
4666 except asyncio.CancelledError:
4667 error_list.append("Cancelled")
4668 # TODO cancell all tasks
4669 except Exception as exc:
4670 error_list.append(str(exc))
4671 # update status at database
4672 if error_list:
4673 error_detail = "; ".join(error_list)
4674 # self.logger.error(logging_text + error_detail)
4675 error_description_nslcmop = "{} Detail: {}".format(
4676 stage[0], error_detail
4677 )
4678 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4679 nslcmop_id, stage[0]
4680 )
4681
4682 db_nsr_update["operational-status"] = "failed"
4683 db_nsr_update["detailed-status"] = (
4684 error_description_nsr + " Detail: " + error_detail
4685 )
4686 db_nslcmop_update["detailed-status"] = error_detail
4687 nslcmop_operation_state = "FAILED"
4688 ns_state = "BROKEN"
4689 else:
4690 error_detail = None
4691 error_description_nsr = error_description_nslcmop = None
4692 ns_state = "NOT_INSTANTIATED"
4693 db_nsr_update["operational-status"] = "terminated"
4694 db_nsr_update["detailed-status"] = "Done"
4695 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4696 db_nslcmop_update["detailed-status"] = "Done"
4697 nslcmop_operation_state = "COMPLETED"
4698
4699 if db_nsr:
4700 self._write_ns_status(
4701 nsr_id=nsr_id,
4702 ns_state=ns_state,
4703 current_operation="IDLE",
4704 current_operation_id=None,
4705 error_description=error_description_nsr,
4706 error_detail=error_detail,
4707 other_update=db_nsr_update,
4708 )
4709 self._write_op_status(
4710 op_id=nslcmop_id,
4711 stage="",
4712 error_message=error_description_nslcmop,
4713 operation_state=nslcmop_operation_state,
4714 other_update=db_nslcmop_update,
4715 )
4716 if ns_state == "NOT_INSTANTIATED":
4717 try:
4718 self.db.set_list(
4719 "vnfrs",
4720 {"nsr-id-ref": nsr_id},
4721 {"_admin.nsState": "NOT_INSTANTIATED"},
4722 )
4723 except DbException as e:
4724 self.logger.warn(
4725 logging_text
4726 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4727 nsr_id, e
4728 )
4729 )
4730 if operation_params:
4731 autoremove = operation_params.get("autoremove", False)
4732 if nslcmop_operation_state:
4733 try:
4734 await self.msg.aiowrite(
4735 "ns",
4736 "terminated",
4737 {
4738 "nsr_id": nsr_id,
4739 "nslcmop_id": nslcmop_id,
4740 "operationState": nslcmop_operation_state,
4741 "autoremove": autoremove,
4742 },
4743 loop=self.loop,
4744 )
4745 except Exception as e:
4746 self.logger.error(
4747 logging_text + "kafka_write notification Exception {}".format(e)
4748 )
4749
4750 self.logger.debug(logging_text + "Exit")
4751 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4752
4753 async def _wait_for_tasks(
4754 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4755 ):
4756 time_start = time()
4757 error_detail_list = []
4758 error_list = []
4759 pending_tasks = list(created_tasks_info.keys())
4760 num_tasks = len(pending_tasks)
4761 num_done = 0
4762 stage[1] = "{}/{}.".format(num_done, num_tasks)
4763 self._write_op_status(nslcmop_id, stage)
4764 while pending_tasks:
4765 new_error = None
4766 _timeout = timeout + time_start - time()
4767 done, pending_tasks = await asyncio.wait(
4768 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4769 )
4770 num_done += len(done)
4771 if not done: # Timeout
4772 for task in pending_tasks:
4773 new_error = created_tasks_info[task] + ": Timeout"
4774 error_detail_list.append(new_error)
4775 error_list.append(new_error)
4776 break
4777 for task in done:
4778 if task.cancelled():
4779 exc = "Cancelled"
4780 else:
4781 exc = task.exception()
4782 if exc:
4783 if isinstance(exc, asyncio.TimeoutError):
4784 exc = "Timeout"
4785 new_error = created_tasks_info[task] + ": {}".format(exc)
4786 error_list.append(created_tasks_info[task])
4787 error_detail_list.append(new_error)
4788 if isinstance(
4789 exc,
4790 (
4791 str,
4792 DbException,
4793 N2VCException,
4794 ROclient.ROClientException,
4795 LcmException,
4796 K8sException,
4797 NgRoException,
4798 ),
4799 ):
4800 self.logger.error(logging_text + new_error)
4801 else:
4802 exc_traceback = "".join(
4803 traceback.format_exception(None, exc, exc.__traceback__)
4804 )
4805 self.logger.error(
4806 logging_text
4807 + created_tasks_info[task]
4808 + " "
4809 + exc_traceback
4810 )
4811 else:
4812 self.logger.debug(
4813 logging_text + created_tasks_info[task] + ": Done"
4814 )
4815 stage[1] = "{}/{}.".format(num_done, num_tasks)
4816 if new_error:
4817 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4818 if nsr_id: # update also nsr
4819 self.update_db_2(
4820 "nsrs",
4821 nsr_id,
4822 {
4823 "errorDescription": "Error at: " + ", ".join(error_list),
4824 "errorDetail": ". ".join(error_detail_list),
4825 },
4826 )
4827 self._write_op_status(nslcmop_id, stage)
4828 return error_detail_list
4829
4830 @staticmethod
4831 def _map_primitive_params(primitive_desc, params, instantiation_params):
4832 """
4833 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4834 The default-value is used. If it is between < > it look for a value at instantiation_params
4835 :param primitive_desc: portion of VNFD/NSD that describes primitive
4836 :param params: Params provided by user
4837 :param instantiation_params: Instantiation params provided by user
4838 :return: a dictionary with the calculated params
4839 """
4840 calculated_params = {}
4841 for parameter in primitive_desc.get("parameter", ()):
4842 param_name = parameter["name"]
4843 if param_name in params:
4844 calculated_params[param_name] = params[param_name]
4845 elif "default-value" in parameter or "value" in parameter:
4846 if "value" in parameter:
4847 calculated_params[param_name] = parameter["value"]
4848 else:
4849 calculated_params[param_name] = parameter["default-value"]
4850 if (
4851 isinstance(calculated_params[param_name], str)
4852 and calculated_params[param_name].startswith("<")
4853 and calculated_params[param_name].endswith(">")
4854 ):
4855 if calculated_params[param_name][1:-1] in instantiation_params:
4856 calculated_params[param_name] = instantiation_params[
4857 calculated_params[param_name][1:-1]
4858 ]
4859 else:
4860 raise LcmException(
4861 "Parameter {} needed to execute primitive {} not provided".format(
4862 calculated_params[param_name], primitive_desc["name"]
4863 )
4864 )
4865 else:
4866 raise LcmException(
4867 "Parameter {} needed to execute primitive {} not provided".format(
4868 param_name, primitive_desc["name"]
4869 )
4870 )
4871
4872 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4873 calculated_params[param_name] = yaml.safe_dump(
4874 calculated_params[param_name], default_flow_style=True, width=256
4875 )
4876 elif isinstance(calculated_params[param_name], str) and calculated_params[
4877 param_name
4878 ].startswith("!!yaml "):
4879 calculated_params[param_name] = calculated_params[param_name][7:]
4880 if parameter.get("data-type") == "INTEGER":
4881 try:
4882 calculated_params[param_name] = int(calculated_params[param_name])
4883 except ValueError: # error converting string to int
4884 raise LcmException(
4885 "Parameter {} of primitive {} must be integer".format(
4886 param_name, primitive_desc["name"]
4887 )
4888 )
4889 elif parameter.get("data-type") == "BOOLEAN":
4890 calculated_params[param_name] = not (
4891 (str(calculated_params[param_name])).lower() == "false"
4892 )
4893
4894 # add always ns_config_info if primitive name is config
4895 if primitive_desc["name"] == "config":
4896 if "ns_config_info" in instantiation_params:
4897 calculated_params["ns_config_info"] = instantiation_params[
4898 "ns_config_info"
4899 ]
4900 return calculated_params
4901
4902 def _look_for_deployed_vca(
4903 self,
4904 deployed_vca,
4905 member_vnf_index,
4906 vdu_id,
4907 vdu_count_index,
4908 kdu_name=None,
4909 ee_descriptor_id=None,
4910 ):
4911 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4912 for vca in deployed_vca:
4913 if not vca:
4914 continue
4915 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4916 continue
4917 if (
4918 vdu_count_index is not None
4919 and vdu_count_index != vca["vdu_count_index"]
4920 ):
4921 continue
4922 if kdu_name and kdu_name != vca["kdu_name"]:
4923 continue
4924 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4925 continue
4926 break
4927 else:
4928 # vca_deployed not found
4929 raise LcmException(
4930 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4931 " is not deployed".format(
4932 member_vnf_index,
4933 vdu_id,
4934 vdu_count_index,
4935 kdu_name,
4936 ee_descriptor_id,
4937 )
4938 )
4939 # get ee_id
4940 ee_id = vca.get("ee_id")
4941 vca_type = vca.get(
4942 "type", "lxc_proxy_charm"
4943 ) # default value for backward compatibility - proxy charm
4944 if not ee_id:
4945 raise LcmException(
4946 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4947 "execution environment".format(
4948 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4949 )
4950 )
4951 return ee_id, vca_type
4952
4953 async def _ns_execute_primitive(
4954 self,
4955 ee_id,
4956 primitive,
4957 primitive_params,
4958 retries=0,
4959 retries_interval=30,
4960 timeout=None,
4961 vca_type=None,
4962 db_dict=None,
4963 vca_id: str = None,
4964 ) -> (str, str):
4965 try:
4966 if primitive == "config":
4967 primitive_params = {"params": primitive_params}
4968
4969 vca_type = vca_type or "lxc_proxy_charm"
4970
4971 while retries >= 0:
4972 try:
4973 output = await asyncio.wait_for(
4974 self.vca_map[vca_type].exec_primitive(
4975 ee_id=ee_id,
4976 primitive_name=primitive,
4977 params_dict=primitive_params,
4978 progress_timeout=self.timeout_progress_primitive,
4979 total_timeout=self.timeout_primitive,
4980 db_dict=db_dict,
4981 vca_id=vca_id,
4982 vca_type=vca_type,
4983 ),
4984 timeout=timeout or self.timeout_primitive,
4985 )
4986 # execution was OK
4987 break
4988 except asyncio.CancelledError:
4989 raise
4990 except Exception as e: # asyncio.TimeoutError
4991 if isinstance(e, asyncio.TimeoutError):
4992 e = "Timeout"
4993 retries -= 1
4994 if retries >= 0:
4995 self.logger.debug(
4996 "Error executing action {} on {} -> {}".format(
4997 primitive, ee_id, e
4998 )
4999 )
5000 # wait and retry
5001 await asyncio.sleep(retries_interval, loop=self.loop)
5002 else:
5003 return "FAILED", str(e)
5004
5005 return "COMPLETED", output
5006
5007 except (LcmException, asyncio.CancelledError):
5008 raise
5009 except Exception as e:
5010 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5011
5012 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5013 """
5014 Updating the vca_status with latest juju information in nsrs record
5015 :param: nsr_id: Id of the nsr
5016 :param: nslcmop_id: Id of the nslcmop
5017 :return: None
5018 """
5019
5020 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5021 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5022 vca_id = self.get_vca_id({}, db_nsr)
5023 if db_nsr["_admin"]["deployed"]["K8s"]:
5024 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5025 cluster_uuid, kdu_instance, cluster_type = (
5026 k8s["k8scluster-uuid"],
5027 k8s["kdu-instance"],
5028 k8s["k8scluster-type"],
5029 )
5030 await self._on_update_k8s_db(
5031 cluster_uuid=cluster_uuid,
5032 kdu_instance=kdu_instance,
5033 filter={"_id": nsr_id},
5034 vca_id=vca_id,
5035 cluster_type=cluster_type,
5036 )
5037 else:
5038 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5039 table, filter = "nsrs", {"_id": nsr_id}
5040 path = "_admin.deployed.VCA.{}.".format(vca_index)
5041 await self._on_update_n2vc_db(table, filter, path, {})
5042
5043 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5044 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5045
5046 async def action(self, nsr_id, nslcmop_id):
5047 # Try to lock HA task here
5048 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5049 if not task_is_locked_by_me:
5050 return
5051
5052 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5053 self.logger.debug(logging_text + "Enter")
5054 # get all needed from database
5055 db_nsr = None
5056 db_nslcmop = None
5057 db_nsr_update = {}
5058 db_nslcmop_update = {}
5059 nslcmop_operation_state = None
5060 error_description_nslcmop = None
5061 exc = None
5062 try:
5063 # wait for any previous tasks in process
5064 step = "Waiting for previous operations to terminate"
5065 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5066
5067 self._write_ns_status(
5068 nsr_id=nsr_id,
5069 ns_state=None,
5070 current_operation="RUNNING ACTION",
5071 current_operation_id=nslcmop_id,
5072 )
5073
5074 step = "Getting information from database"
5075 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5077 if db_nslcmop["operationParams"].get("primitive_params"):
5078 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5079 db_nslcmop["operationParams"]["primitive_params"]
5080 )
5081
5082 nsr_deployed = db_nsr["_admin"].get("deployed")
5083 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5084 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5085 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5086 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5087 primitive = db_nslcmop["operationParams"]["primitive"]
5088 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5089 timeout_ns_action = db_nslcmop["operationParams"].get(
5090 "timeout_ns_action", self.timeout_primitive
5091 )
5092
5093 if vnf_index:
5094 step = "Getting vnfr from database"
5095 db_vnfr = self.db.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5097 )
5098 if db_vnfr.get("kdur"):
5099 kdur_list = []
5100 for kdur in db_vnfr["kdur"]:
5101 if kdur.get("additionalParams"):
5102 kdur["additionalParams"] = json.loads(
5103 kdur["additionalParams"]
5104 )
5105 kdur_list.append(kdur)
5106 db_vnfr["kdur"] = kdur_list
5107 step = "Getting vnfd from database"
5108 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5109
5110 # Sync filesystem before running a primitive
5111 self.fs.sync(db_vnfr["vnfd-id"])
5112 else:
5113 step = "Getting nsd from database"
5114 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5115
5116 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5117 # for backward compatibility
5118 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5119 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5120 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5122
5123 # look for primitive
5124 config_primitive_desc = descriptor_configuration = None
5125 if vdu_id:
5126 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5127 elif kdu_name:
5128 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5129 elif vnf_index:
5130 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5131 else:
5132 descriptor_configuration = db_nsd.get("ns-configuration")
5133
5134 if descriptor_configuration and descriptor_configuration.get(
5135 "config-primitive"
5136 ):
5137 for config_primitive in descriptor_configuration["config-primitive"]:
5138 if config_primitive["name"] == primitive:
5139 config_primitive_desc = config_primitive
5140 break
5141
5142 if not config_primitive_desc:
5143 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5144 raise LcmException(
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5146 primitive
5147 )
5148 )
5149 primitive_name = primitive
5150 ee_descriptor_id = None
5151 else:
5152 primitive_name = config_primitive_desc.get(
5153 "execution-environment-primitive", primitive
5154 )
5155 ee_descriptor_id = config_primitive_desc.get(
5156 "execution-environment-ref"
5157 )
5158
5159 if vnf_index:
5160 if vdu_id:
5161 vdur = next(
5162 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5163 )
5164 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5165 elif kdu_name:
5166 kdur = next(
5167 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5168 )
5169 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5170 else:
5171 desc_params = parse_yaml_strings(
5172 db_vnfr.get("additionalParamsForVnf")
5173 )
5174 else:
5175 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5176 if kdu_name and get_configuration(db_vnfd, kdu_name):
5177 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5178 actions = set()
5179 for primitive in kdu_configuration.get("initial-config-primitive", []):
5180 actions.add(primitive["name"])
5181 for primitive in kdu_configuration.get("config-primitive", []):
5182 actions.add(primitive["name"])
5183 kdu = find_in_list(
5184 nsr_deployed["K8s"],
5185 lambda kdu: kdu_name == kdu["kdu-name"]
5186 and kdu["member-vnf-index"] == vnf_index,
5187 )
5188 kdu_action = (
5189 True
5190 if primitive_name in actions
5191 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5192 else False
5193 )
5194
5195 # TODO check if ns is in a proper status
5196 if kdu_name and (
5197 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5198 ):
5199 # kdur and desc_params already set from before
5200 if primitive_params:
5201 desc_params.update(primitive_params)
5202 # TODO Check if we will need something at vnf level
5203 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5204 if (
5205 kdu_name == kdu["kdu-name"]
5206 and kdu["member-vnf-index"] == vnf_index
5207 ):
5208 break
5209 else:
5210 raise LcmException(
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5212 )
5213
5214 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5215 msg = "unknown k8scluster-type '{}'".format(
5216 kdu.get("k8scluster-type")
5217 )
5218 raise LcmException(msg)
5219
5220 db_dict = {
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id},
5223 "path": "_admin.deployed.K8s.{}".format(index),
5224 }
5225 self.logger.debug(
5226 logging_text
5227 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5228 )
5229 step = "Executing kdu {}".format(primitive_name)
5230 if primitive_name == "upgrade":
5231 if desc_params.get("kdu_model"):
5232 kdu_model = desc_params.get("kdu_model")
5233 del desc_params["kdu_model"]
5234 else:
5235 kdu_model = kdu.get("kdu-model")
5236 parts = kdu_model.split(sep=":")
5237 if len(parts) == 2:
5238 kdu_model = parts[0]
5239 if desc_params.get("kdu_atomic_upgrade"):
5240 atomic_upgrade = desc_params.get("kdu_atomic_upgrade").lower() in ("yes", "true", "1")
5241 del desc_params["kdu_atomic_upgrade"]
5242 else:
5243 atomic_upgrade = True
5244
5245 detailed_status = await asyncio.wait_for(
5246 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5247 cluster_uuid=kdu.get("k8scluster-uuid"),
5248 kdu_instance=kdu.get("kdu-instance"),
5249 atomic=atomic_upgrade,
5250 kdu_model=kdu_model,
5251 params=desc_params,
5252 db_dict=db_dict,
5253 timeout=timeout_ns_action,
5254 ),
5255 timeout=timeout_ns_action + 10,
5256 )
5257 self.logger.debug(
5258 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5259 )
5260 elif primitive_name == "rollback":
5261 detailed_status = await asyncio.wait_for(
5262 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5263 cluster_uuid=kdu.get("k8scluster-uuid"),
5264 kdu_instance=kdu.get("kdu-instance"),
5265 db_dict=db_dict,
5266 ),
5267 timeout=timeout_ns_action,
5268 )
5269 elif primitive_name == "status":
5270 detailed_status = await asyncio.wait_for(
5271 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5272 cluster_uuid=kdu.get("k8scluster-uuid"),
5273 kdu_instance=kdu.get("kdu-instance"),
5274 vca_id=vca_id,
5275 ),
5276 timeout=timeout_ns_action,
5277 )
5278 else:
5279 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5280 kdu["kdu-name"], nsr_id
5281 )
5282 params = self._map_primitive_params(
5283 config_primitive_desc, primitive_params, desc_params
5284 )
5285
5286 detailed_status = await asyncio.wait_for(
5287 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5288 cluster_uuid=kdu.get("k8scluster-uuid"),
5289 kdu_instance=kdu_instance,
5290 primitive_name=primitive_name,
5291 params=params,
5292 db_dict=db_dict,
5293 timeout=timeout_ns_action,
5294 vca_id=vca_id,
5295 ),
5296 timeout=timeout_ns_action,
5297 )
5298
5299 if detailed_status:
5300 nslcmop_operation_state = "COMPLETED"
5301 else:
5302 detailed_status = ""
5303 nslcmop_operation_state = "FAILED"
5304 else:
5305 ee_id, vca_type = self._look_for_deployed_vca(
5306 nsr_deployed["VCA"],
5307 member_vnf_index=vnf_index,
5308 vdu_id=vdu_id,
5309 vdu_count_index=vdu_count_index,
5310 ee_descriptor_id=ee_descriptor_id,
5311 )
5312 for vca_index, vca_deployed in enumerate(
5313 db_nsr["_admin"]["deployed"]["VCA"]
5314 ):
5315 if vca_deployed.get("member-vnf-index") == vnf_index:
5316 db_dict = {
5317 "collection": "nsrs",
5318 "filter": {"_id": nsr_id},
5319 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5320 }
5321 break
5322 (
5323 nslcmop_operation_state,
5324 detailed_status,
5325 ) = await self._ns_execute_primitive(
5326 ee_id,
5327 primitive=primitive_name,
5328 primitive_params=self._map_primitive_params(
5329 config_primitive_desc, primitive_params, desc_params
5330 ),
5331 timeout=timeout_ns_action,
5332 vca_type=vca_type,
5333 db_dict=db_dict,
5334 vca_id=vca_id,
5335 )
5336
5337 db_nslcmop_update["detailed-status"] = detailed_status
5338 error_description_nslcmop = (
5339 detailed_status if nslcmop_operation_state == "FAILED" else ""
5340 )
5341 self.logger.debug(
5342 logging_text
5343 + " task Done with result {} {}".format(
5344 nslcmop_operation_state, detailed_status
5345 )
5346 )
5347 return # database update is called inside finally
5348
5349 except (DbException, LcmException, N2VCException, K8sException) as e:
5350 self.logger.error(logging_text + "Exit Exception {}".format(e))
5351 exc = e
5352 except asyncio.CancelledError:
5353 self.logger.error(
5354 logging_text + "Cancelled Exception while '{}'".format(step)
5355 )
5356 exc = "Operation was cancelled"
5357 except asyncio.TimeoutError:
5358 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5359 exc = "Timeout"
5360 except Exception as e:
5361 exc = traceback.format_exc()
5362 self.logger.critical(
5363 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5364 exc_info=True,
5365 )
5366 finally:
5367 if exc:
5368 db_nslcmop_update[
5369 "detailed-status"
5370 ] = (
5371 detailed_status
5372 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5373 nslcmop_operation_state = "FAILED"
5374 if db_nsr:
5375 self._write_ns_status(
5376 nsr_id=nsr_id,
5377 ns_state=db_nsr[
5378 "nsState"
5379 ], # TODO check if degraded. For the moment use previous status
5380 current_operation="IDLE",
5381 current_operation_id=None,
5382 # error_description=error_description_nsr,
5383 # error_detail=error_detail,
5384 other_update=db_nsr_update,
5385 )
5386
5387 self._write_op_status(
5388 op_id=nslcmop_id,
5389 stage="",
5390 error_message=error_description_nslcmop,
5391 operation_state=nslcmop_operation_state,
5392 other_update=db_nslcmop_update,
5393 )
5394
5395 if nslcmop_operation_state:
5396 try:
5397 await self.msg.aiowrite(
5398 "ns",
5399 "actioned",
5400 {
5401 "nsr_id": nsr_id,
5402 "nslcmop_id": nslcmop_id,
5403 "operationState": nslcmop_operation_state,
5404 },
5405 loop=self.loop,
5406 )
5407 except Exception as e:
5408 self.logger.error(
5409 logging_text + "kafka_write notification Exception {}".format(e)
5410 )
5411 self.logger.debug(logging_text + "Exit")
5412 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5413 return nslcmop_operation_state, detailed_status
5414
5415 async def terminate_vdus(
5416 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5417 ):
5418 """This method terminates VDUs
5419
5420 Args:
5421 db_vnfr: VNF instance record
5422 member_vnf_index: VNF index to identify the VDUs to be removed
5423 db_nsr: NS instance record
5424 update_db_nslcmops: Nslcmop update record
5425 """
5426 vca_scaling_info = []
5427 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5428 scaling_info["scaling_direction"] = "IN"
5429 scaling_info["vdu-delete"] = {}
5430 scaling_info["kdu-delete"] = {}
5431 db_vdur = db_vnfr.get("vdur")
5432 vdur_list = copy(db_vdur)
5433 count_index = 0
5434 for index, vdu in enumerate(vdur_list):
5435 vca_scaling_info.append(
5436 {
5437 "osm_vdu_id": vdu["vdu-id-ref"],
5438 "member-vnf-index": member_vnf_index,
5439 "type": "delete",
5440 "vdu_index": count_index,
5441 })
5442 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5443 scaling_info["vdu"].append(
5444 {
5445 "name": vdu.get("name") or vdu.get("vdu-name"),
5446 "vdu_id": vdu["vdu-id-ref"],
5447 "interface": [],
5448 })
5449 for interface in vdu["interfaces"]:
5450 scaling_info["vdu"][index]["interface"].append(
5451 {
5452 "name": interface["name"],
5453 "ip_address": interface["ip-address"],
5454 "mac_address": interface.get("mac-address"),
5455 })
5456 self.logger.info("NS update scaling info{}".format(scaling_info))
5457 stage[2] = "Terminating VDUs"
5458 if scaling_info.get("vdu-delete"):
5459 # scale_process = "RO"
5460 if self.ro_config.get("ng"):
5461 await self._scale_ng_ro(
5462 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5463 )
5464
5465 async def remove_vnf(
5466 self, nsr_id, nslcmop_id, vnf_instance_id
5467 ):
5468 """This method is to Remove VNF instances from NS.
5469
5470 Args:
5471 nsr_id: NS instance id
5472 nslcmop_id: nslcmop id of update
5473 vnf_instance_id: id of the VNF instance to be removed
5474
5475 Returns:
5476 result: (str, str) COMPLETED/FAILED, details
5477 """
5478 try:
5479 db_nsr_update = {}
5480 logging_text = "Task ns={} update ".format(nsr_id)
5481 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5482 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5483 if check_vnfr_count > 1:
5484 stage = ["", "", ""]
5485 step = "Getting nslcmop from database"
5486 self.logger.debug(step + " after having waited for previous tasks to be completed")
5487 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5488 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5489 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5490 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5491 """ db_vnfr = self.db.get_one(
5492 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5493
5494 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5495 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5496
5497 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5498 constituent_vnfr.remove(db_vnfr.get("_id"))
5499 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5500 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5501 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5502 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5503 return "COMPLETED", "Done"
5504 else:
5505 step = "Terminate VNF Failed with"
5506 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5507 vnf_instance_id))
5508 except (LcmException, asyncio.CancelledError):
5509 raise
5510 except Exception as e:
5511 self.logger.debug("Error removing VNF {}".format(e))
5512 return "FAILED", "Error removing VNF {}".format(e)
5513
5514 async def _ns_redeploy_vnf(
5515 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5516 ):
5517 """This method updates and redeploys VNF instances
5518
5519 Args:
5520 nsr_id: NS instance id
5521 nslcmop_id: nslcmop id
5522 db_vnfd: VNF descriptor
5523 db_vnfr: VNF instance record
5524 db_nsr: NS instance record
5525
5526 Returns:
5527 result: (str, str) COMPLETED/FAILED, details
5528 """
5529 try:
5530 count_index = 0
5531 stage = ["", "", ""]
5532 logging_text = "Task ns={} update ".format(nsr_id)
5533 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5534 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5535
5536 # Terminate old VNF resources
5537 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5538 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5539
5540 # old_vnfd_id = db_vnfr["vnfd-id"]
5541 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5542 new_db_vnfd = db_vnfd
5543 # new_vnfd_ref = new_db_vnfd["id"]
5544 # new_vnfd_id = vnfd_id
5545
5546 # Create VDUR
5547 new_vnfr_cp = []
5548 for cp in new_db_vnfd.get("ext-cpd", ()):
5549 vnf_cp = {
5550 "name": cp.get("id"),
5551 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5552 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5553 "id": cp.get("id"),
5554 }
5555 new_vnfr_cp.append(vnf_cp)
5556 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5557 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5558 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5559 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5560 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5561 updated_db_vnfr = self.db.get_one(
5562 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5563 )
5564
5565 # Instantiate new VNF resources
5566 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5567 vca_scaling_info = []
5568 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5569 scaling_info["scaling_direction"] = "OUT"
5570 scaling_info["vdu-create"] = {}
5571 scaling_info["kdu-create"] = {}
5572 vdud_instantiate_list = db_vnfd["vdu"]
5573 for index, vdud in enumerate(vdud_instantiate_list):
5574 cloud_init_text = self._get_vdu_cloud_init_content(
5575 vdud, db_vnfd
5576 )
5577 if cloud_init_text:
5578 additional_params = (
5579 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5580 or {}
5581 )
5582 cloud_init_list = []
5583 if cloud_init_text:
5584 # TODO Information of its own ip is not available because db_vnfr is not updated.
5585 additional_params["OSM"] = get_osm_params(
5586 updated_db_vnfr, vdud["id"], 1
5587 )
5588 cloud_init_list.append(
5589 self._parse_cloud_init(
5590 cloud_init_text,
5591 additional_params,
5592 db_vnfd["id"],
5593 vdud["id"],
5594 )
5595 )
5596 vca_scaling_info.append(
5597 {
5598 "osm_vdu_id": vdud["id"],
5599 "member-vnf-index": member_vnf_index,
5600 "type": "create",
5601 "vdu_index": count_index,
5602 }
5603 )
5604 scaling_info["vdu-create"][vdud["id"]] = count_index
5605 if self.ro_config.get("ng"):
5606 self.logger.debug(
5607 "New Resources to be deployed: {}".format(scaling_info))
5608 await self._scale_ng_ro(
5609 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5610 )
5611 return "COMPLETED", "Done"
5612 except (LcmException, asyncio.CancelledError):
5613 raise
5614 except Exception as e:
5615 self.logger.debug("Error updating VNF {}".format(e))
5616 return "FAILED", "Error updating VNF {}".format(e)
5617
5618 async def _ns_charm_upgrade(
5619 self,
5620 ee_id,
5621 charm_id,
5622 charm_type,
5623 path,
5624 timeout: float = None,
5625 ) -> (str, str):
5626 """This method upgrade charms in VNF instances
5627
5628 Args:
5629 ee_id: Execution environment id
5630 path: Local path to the charm
5631 charm_id: charm-id
5632 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5633 timeout: (Float) Timeout for the ns update operation
5634
5635 Returns:
5636 result: (str, str) COMPLETED/FAILED, details
5637 """
5638 try:
5639 charm_type = charm_type or "lxc_proxy_charm"
5640 output = await self.vca_map[charm_type].upgrade_charm(
5641 ee_id=ee_id,
5642 path=path,
5643 charm_id=charm_id,
5644 charm_type=charm_type,
5645 timeout=timeout or self.timeout_ns_update,
5646 )
5647
5648 if output:
5649 return "COMPLETED", output
5650
5651 except (LcmException, asyncio.CancelledError):
5652 raise
5653
5654 except Exception as e:
5655
5656 self.logger.debug("Error upgrading charm {}".format(path))
5657
5658 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5659
5660 async def update(self, nsr_id, nslcmop_id):
5661 """Update NS according to different update types
5662
5663 This method performs upgrade of VNF instances then updates the revision
5664 number in VNF record
5665
5666 Args:
5667 nsr_id: Network service will be updated
5668 nslcmop_id: ns lcm operation id
5669
5670 Returns:
5671 It may raise DbException, LcmException, N2VCException, K8sException
5672
5673 """
5674 # Try to lock HA task here
5675 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5676 if not task_is_locked_by_me:
5677 return
5678
5679 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5680 self.logger.debug(logging_text + "Enter")
5681
5682 # Set the required variables to be filled up later
5683 db_nsr = None
5684 db_nslcmop_update = {}
5685 vnfr_update = {}
5686 nslcmop_operation_state = None
5687 db_nsr_update = {}
5688 error_description_nslcmop = ""
5689 exc = None
5690 change_type = "updated"
5691 detailed_status = ""
5692
5693 try:
5694 # wait for any previous tasks in process
5695 step = "Waiting for previous operations to terminate"
5696 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5697 self._write_ns_status(
5698 nsr_id=nsr_id,
5699 ns_state=None,
5700 current_operation="UPDATING",
5701 current_operation_id=nslcmop_id,
5702 )
5703
5704 step = "Getting nslcmop from database"
5705 db_nslcmop = self.db.get_one(
5706 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5707 )
5708 update_type = db_nslcmop["operationParams"]["updateType"]
5709
5710 step = "Getting nsr from database"
5711 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5712 old_operational_status = db_nsr["operational-status"]
5713 db_nsr_update["operational-status"] = "updating"
5714 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5715 nsr_deployed = db_nsr["_admin"].get("deployed")
5716
5717 if update_type == "CHANGE_VNFPKG":
5718
5719 # Get the input parameters given through update request
5720 vnf_instance_id = db_nslcmop["operationParams"][
5721 "changeVnfPackageData"
5722 ].get("vnfInstanceId")
5723
5724 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5725 "vnfdId"
5726 )
5727 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5728
5729 step = "Getting vnfr from database"
5730 db_vnfr = self.db.get_one(
5731 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5732 )
5733
5734 step = "Getting vnfds from database"
5735 # Latest VNFD
5736 latest_vnfd = self.db.get_one(
5737 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5738 )
5739 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5740
5741 # Current VNFD
5742 current_vnf_revision = db_vnfr.get("revision", 1)
5743 current_vnfd = self.db.get_one(
5744 "vnfds_revisions",
5745 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5746 fail_on_empty=False,
5747 )
5748 # Charm artifact paths will be filled up later
5749 (
5750 current_charm_artifact_path,
5751 target_charm_artifact_path,
5752 charm_artifact_paths,
5753 ) = ([], [], [])
5754
5755 step = "Checking if revision has changed in VNFD"
5756 if current_vnf_revision != latest_vnfd_revision:
5757
5758 change_type = "policy_updated"
5759
5760 # There is new revision of VNFD, update operation is required
5761 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5762 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5763
5764 step = "Removing the VNFD packages if they exist in the local path"
5765 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5766 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5767
5768 step = "Get the VNFD packages from FSMongo"
5769 self.fs.sync(from_path=latest_vnfd_path)
5770 self.fs.sync(from_path=current_vnfd_path)
5771
5772 step = (
5773 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5774 )
5775 base_folder = latest_vnfd["_admin"]["storage"]
5776
5777 for charm_index, charm_deployed in enumerate(
5778 get_iterable(nsr_deployed, "VCA")
5779 ):
5780 vnf_index = db_vnfr.get("member-vnf-index-ref")
5781
5782 # Getting charm-id and charm-type
5783 if charm_deployed.get("member-vnf-index") == vnf_index:
5784 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5785 charm_type = charm_deployed.get("type")
5786
5787 # Getting ee-id
5788 ee_id = charm_deployed.get("ee_id")
5789
5790 step = "Getting descriptor config"
5791 descriptor_config = get_configuration(
5792 current_vnfd, current_vnfd["id"]
5793 )
5794
5795 if "execution-environment-list" in descriptor_config:
5796 ee_list = descriptor_config.get(
5797 "execution-environment-list", []
5798 )
5799 else:
5800 ee_list = []
5801
5802 # There could be several charm used in the same VNF
5803 for ee_item in ee_list:
5804 if ee_item.get("juju"):
5805
5806 step = "Getting charm name"
5807 charm_name = ee_item["juju"].get("charm")
5808
5809 step = "Setting Charm artifact paths"
5810 current_charm_artifact_path.append(
5811 get_charm_artifact_path(
5812 base_folder,
5813 charm_name,
5814 charm_type,
5815 current_vnf_revision,
5816 )
5817 )
5818 target_charm_artifact_path.append(
5819 get_charm_artifact_path(
5820 base_folder,
5821 charm_name,
5822 charm_type,
5823 latest_vnfd_revision,
5824 )
5825 )
5826
5827 charm_artifact_paths = zip(
5828 current_charm_artifact_path, target_charm_artifact_path
5829 )
5830
5831 step = "Checking if software version has changed in VNFD"
5832 if find_software_version(current_vnfd) != find_software_version(
5833 latest_vnfd
5834 ):
5835
5836 step = "Checking if existing VNF has charm"
5837 for current_charm_path, target_charm_path in list(
5838 charm_artifact_paths
5839 ):
5840 if current_charm_path:
5841 raise LcmException(
5842 "Software version change is not supported as VNF instance {} has charm.".format(
5843 vnf_instance_id
5844 )
5845 )
5846
5847 # There is no change in the charm package, then redeploy the VNF
5848 # based on new descriptor
5849 step = "Redeploying VNF"
5850 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5851 (
5852 result,
5853 detailed_status
5854 ) = await self._ns_redeploy_vnf(
5855 nsr_id,
5856 nslcmop_id,
5857 latest_vnfd,
5858 db_vnfr,
5859 db_nsr
5860 )
5861 if result == "FAILED":
5862 nslcmop_operation_state = result
5863 error_description_nslcmop = detailed_status
5864 db_nslcmop_update["detailed-status"] = detailed_status
5865 self.logger.debug(
5866 logging_text
5867 + " step {} Done with result {} {}".format(
5868 step, nslcmop_operation_state, detailed_status
5869 )
5870 )
5871
5872 else:
5873 step = "Checking if any charm package has changed or not"
5874 for current_charm_path, target_charm_path in list(
5875 charm_artifact_paths
5876 ):
5877 if (
5878 current_charm_path
5879 and target_charm_path
5880 and self.check_charm_hash_changed(
5881 current_charm_path, target_charm_path
5882 )
5883 ):
5884
5885 step = "Checking whether VNF uses juju bundle"
5886 if check_juju_bundle_existence(current_vnfd):
5887
5888 raise LcmException(
5889 "Charm upgrade is not supported for the instance which"
5890 " uses juju-bundle: {}".format(
5891 check_juju_bundle_existence(current_vnfd)
5892 )
5893 )
5894
5895 step = "Upgrading Charm"
5896 (
5897 result,
5898 detailed_status,
5899 ) = await self._ns_charm_upgrade(
5900 ee_id=ee_id,
5901 charm_id=charm_id,
5902 charm_type=charm_type,
5903 path=self.fs.path + target_charm_path,
5904 timeout=timeout_seconds,
5905 )
5906
5907 if result == "FAILED":
5908 nslcmop_operation_state = result
5909 error_description_nslcmop = detailed_status
5910
5911 db_nslcmop_update["detailed-status"] = detailed_status
5912 self.logger.debug(
5913 logging_text
5914 + " step {} Done with result {} {}".format(
5915 step, nslcmop_operation_state, detailed_status
5916 )
5917 )
5918
5919 step = "Updating policies"
5920 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5921 result = "COMPLETED"
5922 detailed_status = "Done"
5923 db_nslcmop_update["detailed-status"] = "Done"
5924
5925 # If nslcmop_operation_state is None, so any operation is not failed.
5926 if not nslcmop_operation_state:
5927 nslcmop_operation_state = "COMPLETED"
5928
5929 # If update CHANGE_VNFPKG nslcmop_operation is successful
5930 # vnf revision need to be updated
5931 vnfr_update["revision"] = latest_vnfd_revision
5932 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5933
5934 self.logger.debug(
5935 logging_text
5936 + " task Done with result {} {}".format(
5937 nslcmop_operation_state, detailed_status
5938 )
5939 )
5940 elif update_type == "REMOVE_VNF":
5941 # This part is included in https://osm.etsi.org/gerrit/11876
5942 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5943 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5944 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5945 step = "Removing VNF"
5946 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5947 if result == "FAILED":
5948 nslcmop_operation_state = result
5949 error_description_nslcmop = detailed_status
5950 db_nslcmop_update["detailed-status"] = detailed_status
5951 change_type = "vnf_terminated"
5952 if not nslcmop_operation_state:
5953 nslcmop_operation_state = "COMPLETED"
5954 self.logger.debug(
5955 logging_text
5956 + " task Done with result {} {}".format(
5957 nslcmop_operation_state, detailed_status
5958 )
5959 )
5960
5961 elif update_type == "OPERATE_VNF":
5962 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5963 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5964 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5965 (result, detailed_status) = await self.rebuild_start_stop(
5966 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5967 )
5968 if result == "FAILED":
5969 nslcmop_operation_state = result
5970 error_description_nslcmop = detailed_status
5971 db_nslcmop_update["detailed-status"] = detailed_status
5972 if not nslcmop_operation_state:
5973 nslcmop_operation_state = "COMPLETED"
5974 self.logger.debug(
5975 logging_text
5976 + " task Done with result {} {}".format(
5977 nslcmop_operation_state, detailed_status
5978 )
5979 )
5980
5981 # If nslcmop_operation_state is None, so any operation is not failed.
5982 # All operations are executed in overall.
5983 if not nslcmop_operation_state:
5984 nslcmop_operation_state = "COMPLETED"
5985 db_nsr_update["operational-status"] = old_operational_status
5986
5987 except (DbException, LcmException, N2VCException, K8sException) as e:
5988 self.logger.error(logging_text + "Exit Exception {}".format(e))
5989 exc = e
5990 except asyncio.CancelledError:
5991 self.logger.error(
5992 logging_text + "Cancelled Exception while '{}'".format(step)
5993 )
5994 exc = "Operation was cancelled"
5995 except asyncio.TimeoutError:
5996 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5997 exc = "Timeout"
5998 except Exception as e:
5999 exc = traceback.format_exc()
6000 self.logger.critical(
6001 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6002 exc_info=True,
6003 )
6004 finally:
6005 if exc:
6006 db_nslcmop_update[
6007 "detailed-status"
6008 ] = (
6009 detailed_status
6010 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6011 nslcmop_operation_state = "FAILED"
6012 db_nsr_update["operational-status"] = old_operational_status
6013 if db_nsr:
6014 self._write_ns_status(
6015 nsr_id=nsr_id,
6016 ns_state=db_nsr["nsState"],
6017 current_operation="IDLE",
6018 current_operation_id=None,
6019 other_update=db_nsr_update,
6020 )
6021
6022 self._write_op_status(
6023 op_id=nslcmop_id,
6024 stage="",
6025 error_message=error_description_nslcmop,
6026 operation_state=nslcmop_operation_state,
6027 other_update=db_nslcmop_update,
6028 )
6029
6030 if nslcmop_operation_state:
6031 try:
6032 msg = {
6033 "nsr_id": nsr_id,
6034 "nslcmop_id": nslcmop_id,
6035 "operationState": nslcmop_operation_state,
6036 }
6037 if change_type in ("vnf_terminated", "policy_updated"):
6038 msg.update({"vnf_member_index": member_vnf_index})
6039 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6040 except Exception as e:
6041 self.logger.error(
6042 logging_text + "kafka_write notification Exception {}".format(e)
6043 )
6044 self.logger.debug(logging_text + "Exit")
6045 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6046 return nslcmop_operation_state, detailed_status
6047
6048 async def scale(self, nsr_id, nslcmop_id):
6049 # Try to lock HA task here
6050 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6051 if not task_is_locked_by_me:
6052 return
6053
6054 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6055 stage = ["", "", ""]
6056 tasks_dict_info = {}
6057 # ^ stage, step, VIM progress
6058 self.logger.debug(logging_text + "Enter")
6059 # get all needed from database
6060 db_nsr = None
6061 db_nslcmop_update = {}
6062 db_nsr_update = {}
6063 exc = None
6064 # in case of error, indicates what part of scale was failed to put nsr at error status
6065 scale_process = None
6066 old_operational_status = ""
6067 old_config_status = ""
6068 nsi_id = None
6069 try:
6070 # wait for any previous tasks in process
6071 step = "Waiting for previous operations to terminate"
6072 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6073 self._write_ns_status(
6074 nsr_id=nsr_id,
6075 ns_state=None,
6076 current_operation="SCALING",
6077 current_operation_id=nslcmop_id,
6078 )
6079
6080 step = "Getting nslcmop from database"
6081 self.logger.debug(
6082 step + " after having waited for previous tasks to be completed"
6083 )
6084 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6085
6086 step = "Getting nsr from database"
6087 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6088 old_operational_status = db_nsr["operational-status"]
6089 old_config_status = db_nsr["config-status"]
6090
6091 step = "Parsing scaling parameters"
6092 db_nsr_update["operational-status"] = "scaling"
6093 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6094 nsr_deployed = db_nsr["_admin"].get("deployed")
6095
6096 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6097 "scaleByStepData"
6098 ]["member-vnf-index"]
6099 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6100 "scaleByStepData"
6101 ]["scaling-group-descriptor"]
6102 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6103 # for backward compatibility
6104 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6105 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6106 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6107 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6108
6109 step = "Getting vnfr from database"
6110 db_vnfr = self.db.get_one(
6111 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6112 )
6113
6114 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6115
6116 step = "Getting vnfd from database"
6117 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6118
6119 base_folder = db_vnfd["_admin"]["storage"]
6120
6121 step = "Getting scaling-group-descriptor"
6122 scaling_descriptor = find_in_list(
6123 get_scaling_aspect(db_vnfd),
6124 lambda scale_desc: scale_desc["name"] == scaling_group,
6125 )
6126 if not scaling_descriptor:
6127 raise LcmException(
6128 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6129 "at vnfd:scaling-group-descriptor".format(scaling_group)
6130 )
6131
6132 step = "Sending scale order to VIM"
6133 # TODO check if ns is in a proper status
6134 nb_scale_op = 0
6135 if not db_nsr["_admin"].get("scaling-group"):
6136 self.update_db_2(
6137 "nsrs",
6138 nsr_id,
6139 {
6140 "_admin.scaling-group": [
6141 {"name": scaling_group, "nb-scale-op": 0}
6142 ]
6143 },
6144 )
6145 admin_scale_index = 0
6146 else:
6147 for admin_scale_index, admin_scale_info in enumerate(
6148 db_nsr["_admin"]["scaling-group"]
6149 ):
6150 if admin_scale_info["name"] == scaling_group:
6151 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6152 break
6153 else: # not found, set index one plus last element and add new entry with the name
6154 admin_scale_index += 1
6155 db_nsr_update[
6156 "_admin.scaling-group.{}.name".format(admin_scale_index)
6157 ] = scaling_group
6158
6159 vca_scaling_info = []
6160 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6161 if scaling_type == "SCALE_OUT":
6162 if "aspect-delta-details" not in scaling_descriptor:
6163 raise LcmException(
6164 "Aspect delta details not fount in scaling descriptor {}".format(
6165 scaling_descriptor["name"]
6166 )
6167 )
6168 # count if max-instance-count is reached
6169 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6170
6171 scaling_info["scaling_direction"] = "OUT"
6172 scaling_info["vdu-create"] = {}
6173 scaling_info["kdu-create"] = {}
6174 for delta in deltas:
6175 for vdu_delta in delta.get("vdu-delta", {}):
6176 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6177 # vdu_index also provides the number of instance of the targeted vdu
6178 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6179 cloud_init_text = self._get_vdu_cloud_init_content(
6180 vdud, db_vnfd
6181 )
6182 if cloud_init_text:
6183 additional_params = (
6184 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6185 or {}
6186 )
6187 cloud_init_list = []
6188
6189 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6190 max_instance_count = 10
6191 if vdu_profile and "max-number-of-instances" in vdu_profile:
6192 max_instance_count = vdu_profile.get(
6193 "max-number-of-instances", 10
6194 )
6195
6196 default_instance_num = get_number_of_instances(
6197 db_vnfd, vdud["id"]
6198 )
6199 instances_number = vdu_delta.get("number-of-instances", 1)
6200 nb_scale_op += instances_number
6201
6202 new_instance_count = nb_scale_op + default_instance_num
6203 # Control if new count is over max and vdu count is less than max.
6204 # Then assign new instance count
6205 if new_instance_count > max_instance_count > vdu_count:
6206 instances_number = new_instance_count - max_instance_count
6207 else:
6208 instances_number = instances_number
6209
6210 if new_instance_count > max_instance_count:
6211 raise LcmException(
6212 "reached the limit of {} (max-instance-count) "
6213 "scaling-out operations for the "
6214 "scaling-group-descriptor '{}'".format(
6215 nb_scale_op, scaling_group
6216 )
6217 )
6218 for x in range(vdu_delta.get("number-of-instances", 1)):
6219 if cloud_init_text:
6220 # TODO Information of its own ip is not available because db_vnfr is not updated.
6221 additional_params["OSM"] = get_osm_params(
6222 db_vnfr, vdu_delta["id"], vdu_index + x
6223 )
6224 cloud_init_list.append(
6225 self._parse_cloud_init(
6226 cloud_init_text,
6227 additional_params,
6228 db_vnfd["id"],
6229 vdud["id"],
6230 )
6231 )
6232 vca_scaling_info.append(
6233 {
6234 "osm_vdu_id": vdu_delta["id"],
6235 "member-vnf-index": vnf_index,
6236 "type": "create",
6237 "vdu_index": vdu_index + x,
6238 }
6239 )
6240 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6241 for kdu_delta in delta.get("kdu-resource-delta", {}):
6242 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6243 kdu_name = kdu_profile["kdu-name"]
6244 resource_name = kdu_profile.get("resource-name", "")
6245
6246 # Might have different kdus in the same delta
6247 # Should have list for each kdu
6248 if not scaling_info["kdu-create"].get(kdu_name, None):
6249 scaling_info["kdu-create"][kdu_name] = []
6250
6251 kdur = get_kdur(db_vnfr, kdu_name)
6252 if kdur.get("helm-chart"):
6253 k8s_cluster_type = "helm-chart-v3"
6254 self.logger.debug("kdur: {}".format(kdur))
6255 if (
6256 kdur.get("helm-version")
6257 and kdur.get("helm-version") == "v2"
6258 ):
6259 k8s_cluster_type = "helm-chart"
6260 elif kdur.get("juju-bundle"):
6261 k8s_cluster_type = "juju-bundle"
6262 else:
6263 raise LcmException(
6264 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6265 "juju-bundle. Maybe an old NBI version is running".format(
6266 db_vnfr["member-vnf-index-ref"], kdu_name
6267 )
6268 )
6269
6270 max_instance_count = 10
6271 if kdu_profile and "max-number-of-instances" in kdu_profile:
6272 max_instance_count = kdu_profile.get(
6273 "max-number-of-instances", 10
6274 )
6275
6276 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6277 deployed_kdu, _ = get_deployed_kdu(
6278 nsr_deployed, kdu_name, vnf_index
6279 )
6280 if deployed_kdu is None:
6281 raise LcmException(
6282 "KDU '{}' for vnf '{}' not deployed".format(
6283 kdu_name, vnf_index
6284 )
6285 )
6286 kdu_instance = deployed_kdu.get("kdu-instance")
6287 instance_num = await self.k8scluster_map[
6288 k8s_cluster_type
6289 ].get_scale_count(
6290 resource_name,
6291 kdu_instance,
6292 vca_id=vca_id,
6293 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6294 kdu_model=deployed_kdu.get("kdu-model"),
6295 )
6296 kdu_replica_count = instance_num + kdu_delta.get(
6297 "number-of-instances", 1
6298 )
6299
6300 # Control if new count is over max and instance_num is less than max.
6301 # Then assign max instance number to kdu replica count
6302 if kdu_replica_count > max_instance_count > instance_num:
6303 kdu_replica_count = max_instance_count
6304 if kdu_replica_count > max_instance_count:
6305 raise LcmException(
6306 "reached the limit of {} (max-instance-count) "
6307 "scaling-out operations for the "
6308 "scaling-group-descriptor '{}'".format(
6309 instance_num, scaling_group
6310 )
6311 )
6312
6313 for x in range(kdu_delta.get("number-of-instances", 1)):
6314 vca_scaling_info.append(
6315 {
6316 "osm_kdu_id": kdu_name,
6317 "member-vnf-index": vnf_index,
6318 "type": "create",
6319 "kdu_index": instance_num + x - 1,
6320 }
6321 )
6322 scaling_info["kdu-create"][kdu_name].append(
6323 {
6324 "member-vnf-index": vnf_index,
6325 "type": "create",
6326 "k8s-cluster-type": k8s_cluster_type,
6327 "resource-name": resource_name,
6328 "scale": kdu_replica_count,
6329 }
6330 )
6331 elif scaling_type == "SCALE_IN":
6332 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6333
6334 scaling_info["scaling_direction"] = "IN"
6335 scaling_info["vdu-delete"] = {}
6336 scaling_info["kdu-delete"] = {}
6337
6338 for delta in deltas:
6339 for vdu_delta in delta.get("vdu-delta", {}):
6340 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6341 min_instance_count = 0
6342 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6343 if vdu_profile and "min-number-of-instances" in vdu_profile:
6344 min_instance_count = vdu_profile["min-number-of-instances"]
6345
6346 default_instance_num = get_number_of_instances(
6347 db_vnfd, vdu_delta["id"]
6348 )
6349 instance_num = vdu_delta.get("number-of-instances", 1)
6350 nb_scale_op -= instance_num
6351
6352 new_instance_count = nb_scale_op + default_instance_num
6353
6354 if new_instance_count < min_instance_count < vdu_count:
6355 instances_number = min_instance_count - new_instance_count
6356 else:
6357 instances_number = instance_num
6358
6359 if new_instance_count < min_instance_count:
6360 raise LcmException(
6361 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6362 "scaling-group-descriptor '{}'".format(
6363 nb_scale_op, scaling_group
6364 )
6365 )
6366 for x in range(vdu_delta.get("number-of-instances", 1)):
6367 vca_scaling_info.append(
6368 {
6369 "osm_vdu_id": vdu_delta["id"],
6370 "member-vnf-index": vnf_index,
6371 "type": "delete",
6372 "vdu_index": vdu_index - 1 - x,
6373 }
6374 )
6375 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6376 for kdu_delta in delta.get("kdu-resource-delta", {}):
6377 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6378 kdu_name = kdu_profile["kdu-name"]
6379 resource_name = kdu_profile.get("resource-name", "")
6380
6381 if not scaling_info["kdu-delete"].get(kdu_name, None):
6382 scaling_info["kdu-delete"][kdu_name] = []
6383
6384 kdur = get_kdur(db_vnfr, kdu_name)
6385 if kdur.get("helm-chart"):
6386 k8s_cluster_type = "helm-chart-v3"
6387 self.logger.debug("kdur: {}".format(kdur))
6388 if (
6389 kdur.get("helm-version")
6390 and kdur.get("helm-version") == "v2"
6391 ):
6392 k8s_cluster_type = "helm-chart"
6393 elif kdur.get("juju-bundle"):
6394 k8s_cluster_type = "juju-bundle"
6395 else:
6396 raise LcmException(
6397 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6398 "juju-bundle. Maybe an old NBI version is running".format(
6399 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6400 )
6401 )
6402
6403 min_instance_count = 0
6404 if kdu_profile and "min-number-of-instances" in kdu_profile:
6405 min_instance_count = kdu_profile["min-number-of-instances"]
6406
6407 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6408 deployed_kdu, _ = get_deployed_kdu(
6409 nsr_deployed, kdu_name, vnf_index
6410 )
6411 if deployed_kdu is None:
6412 raise LcmException(
6413 "KDU '{}' for vnf '{}' not deployed".format(
6414 kdu_name, vnf_index
6415 )
6416 )
6417 kdu_instance = deployed_kdu.get("kdu-instance")
6418 instance_num = await self.k8scluster_map[
6419 k8s_cluster_type
6420 ].get_scale_count(
6421 resource_name,
6422 kdu_instance,
6423 vca_id=vca_id,
6424 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6425 kdu_model=deployed_kdu.get("kdu-model"),
6426 )
6427 kdu_replica_count = instance_num - kdu_delta.get(
6428 "number-of-instances", 1
6429 )
6430
6431 if kdu_replica_count < min_instance_count < instance_num:
6432 kdu_replica_count = min_instance_count
6433 if kdu_replica_count < min_instance_count:
6434 raise LcmException(
6435 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6436 "scaling-group-descriptor '{}'".format(
6437 instance_num, scaling_group
6438 )
6439 )
6440
6441 for x in range(kdu_delta.get("number-of-instances", 1)):
6442 vca_scaling_info.append(
6443 {
6444 "osm_kdu_id": kdu_name,
6445 "member-vnf-index": vnf_index,
6446 "type": "delete",
6447 "kdu_index": instance_num - x - 1,
6448 }
6449 )
6450 scaling_info["kdu-delete"][kdu_name].append(
6451 {
6452 "member-vnf-index": vnf_index,
6453 "type": "delete",
6454 "k8s-cluster-type": k8s_cluster_type,
6455 "resource-name": resource_name,
6456 "scale": kdu_replica_count,
6457 }
6458 )
6459
6460 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6461 vdu_delete = copy(scaling_info.get("vdu-delete"))
6462 if scaling_info["scaling_direction"] == "IN":
6463 for vdur in reversed(db_vnfr["vdur"]):
6464 if vdu_delete.get(vdur["vdu-id-ref"]):
6465 vdu_delete[vdur["vdu-id-ref"]] -= 1
6466 scaling_info["vdu"].append(
6467 {
6468 "name": vdur.get("name") or vdur.get("vdu-name"),
6469 "vdu_id": vdur["vdu-id-ref"],
6470 "interface": [],
6471 }
6472 )
6473 for interface in vdur["interfaces"]:
6474 scaling_info["vdu"][-1]["interface"].append(
6475 {
6476 "name": interface["name"],
6477 "ip_address": interface["ip-address"],
6478 "mac_address": interface.get("mac-address"),
6479 }
6480 )
6481 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6482
6483 # PRE-SCALE BEGIN
6484 step = "Executing pre-scale vnf-config-primitive"
6485 if scaling_descriptor.get("scaling-config-action"):
6486 for scaling_config_action in scaling_descriptor[
6487 "scaling-config-action"
6488 ]:
6489 if (
6490 scaling_config_action.get("trigger") == "pre-scale-in"
6491 and scaling_type == "SCALE_IN"
6492 ) or (
6493 scaling_config_action.get("trigger") == "pre-scale-out"
6494 and scaling_type == "SCALE_OUT"
6495 ):
6496 vnf_config_primitive = scaling_config_action[
6497 "vnf-config-primitive-name-ref"
6498 ]
6499 step = db_nslcmop_update[
6500 "detailed-status"
6501 ] = "executing pre-scale scaling-config-action '{}'".format(
6502 vnf_config_primitive
6503 )
6504
6505 # look for primitive
6506 for config_primitive in (
6507 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6508 ).get("config-primitive", ()):
6509 if config_primitive["name"] == vnf_config_primitive:
6510 break
6511 else:
6512 raise LcmException(
6513 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6514 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6515 "primitive".format(scaling_group, vnf_config_primitive)
6516 )
6517
6518 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6519 if db_vnfr.get("additionalParamsForVnf"):
6520 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6521
6522 scale_process = "VCA"
6523 db_nsr_update["config-status"] = "configuring pre-scaling"
6524 primitive_params = self._map_primitive_params(
6525 config_primitive, {}, vnfr_params
6526 )
6527
6528 # Pre-scale retry check: Check if this sub-operation has been executed before
6529 op_index = self._check_or_add_scale_suboperation(
6530 db_nslcmop,
6531 vnf_index,
6532 vnf_config_primitive,
6533 primitive_params,
6534 "PRE-SCALE",
6535 )
6536 if op_index == self.SUBOPERATION_STATUS_SKIP:
6537 # Skip sub-operation
6538 result = "COMPLETED"
6539 result_detail = "Done"
6540 self.logger.debug(
6541 logging_text
6542 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6543 vnf_config_primitive, result, result_detail
6544 )
6545 )
6546 else:
6547 if op_index == self.SUBOPERATION_STATUS_NEW:
6548 # New sub-operation: Get index of this sub-operation
6549 op_index = (
6550 len(db_nslcmop.get("_admin", {}).get("operations"))
6551 - 1
6552 )
6553 self.logger.debug(
6554 logging_text
6555 + "vnf_config_primitive={} New sub-operation".format(
6556 vnf_config_primitive
6557 )
6558 )
6559 else:
6560 # retry: Get registered params for this existing sub-operation
6561 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6562 op_index
6563 ]
6564 vnf_index = op.get("member_vnf_index")
6565 vnf_config_primitive = op.get("primitive")
6566 primitive_params = op.get("primitive_params")
6567 self.logger.debug(
6568 logging_text
6569 + "vnf_config_primitive={} Sub-operation retry".format(
6570 vnf_config_primitive
6571 )
6572 )
6573 # Execute the primitive, either with new (first-time) or registered (reintent) args
6574 ee_descriptor_id = config_primitive.get(
6575 "execution-environment-ref"
6576 )
6577 primitive_name = config_primitive.get(
6578 "execution-environment-primitive", vnf_config_primitive
6579 )
6580 ee_id, vca_type = self._look_for_deployed_vca(
6581 nsr_deployed["VCA"],
6582 member_vnf_index=vnf_index,
6583 vdu_id=None,
6584 vdu_count_index=None,
6585 ee_descriptor_id=ee_descriptor_id,
6586 )
6587 result, result_detail = await self._ns_execute_primitive(
6588 ee_id,
6589 primitive_name,
6590 primitive_params,
6591 vca_type=vca_type,
6592 vca_id=vca_id,
6593 )
6594 self.logger.debug(
6595 logging_text
6596 + "vnf_config_primitive={} Done with result {} {}".format(
6597 vnf_config_primitive, result, result_detail
6598 )
6599 )
6600 # Update operationState = COMPLETED | FAILED
6601 self._update_suboperation_status(
6602 db_nslcmop, op_index, result, result_detail
6603 )
6604
6605 if result == "FAILED":
6606 raise LcmException(result_detail)
6607 db_nsr_update["config-status"] = old_config_status
6608 scale_process = None
6609 # PRE-SCALE END
6610
6611 db_nsr_update[
6612 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6613 ] = nb_scale_op
6614 db_nsr_update[
6615 "_admin.scaling-group.{}.time".format(admin_scale_index)
6616 ] = time()
6617
6618 # SCALE-IN VCA - BEGIN
6619 if vca_scaling_info:
6620 step = db_nslcmop_update[
6621 "detailed-status"
6622 ] = "Deleting the execution environments"
6623 scale_process = "VCA"
6624 for vca_info in vca_scaling_info:
6625 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6626 member_vnf_index = str(vca_info["member-vnf-index"])
6627 self.logger.debug(
6628 logging_text + "vdu info: {}".format(vca_info)
6629 )
6630 if vca_info.get("osm_vdu_id"):
6631 vdu_id = vca_info["osm_vdu_id"]
6632 vdu_index = int(vca_info["vdu_index"])
6633 stage[
6634 1
6635 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6636 member_vnf_index, vdu_id, vdu_index
6637 )
6638 stage[2] = step = "Scaling in VCA"
6639 self._write_op_status(op_id=nslcmop_id, stage=stage)
6640 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6641 config_update = db_nsr["configurationStatus"]
6642 for vca_index, vca in enumerate(vca_update):
6643 if (
6644 (vca or vca.get("ee_id"))
6645 and vca["member-vnf-index"] == member_vnf_index
6646 and vca["vdu_count_index"] == vdu_index
6647 ):
6648 if vca.get("vdu_id"):
6649 config_descriptor = get_configuration(
6650 db_vnfd, vca.get("vdu_id")
6651 )
6652 elif vca.get("kdu_name"):
6653 config_descriptor = get_configuration(
6654 db_vnfd, vca.get("kdu_name")
6655 )
6656 else:
6657 config_descriptor = get_configuration(
6658 db_vnfd, db_vnfd["id"]
6659 )
6660 operation_params = (
6661 db_nslcmop.get("operationParams") or {}
6662 )
6663 exec_terminate_primitives = not operation_params.get(
6664 "skip_terminate_primitives"
6665 ) and vca.get("needed_terminate")
6666 task = asyncio.ensure_future(
6667 asyncio.wait_for(
6668 self.destroy_N2VC(
6669 logging_text,
6670 db_nslcmop,
6671 vca,
6672 config_descriptor,
6673 vca_index,
6674 destroy_ee=True,
6675 exec_primitives=exec_terminate_primitives,
6676 scaling_in=True,
6677 vca_id=vca_id,
6678 ),
6679 timeout=self.timeout_charm_delete,
6680 )
6681 )
6682 tasks_dict_info[task] = "Terminating VCA {}".format(
6683 vca.get("ee_id")
6684 )
6685 del vca_update[vca_index]
6686 del config_update[vca_index]
6687 # wait for pending tasks of terminate primitives
6688 if tasks_dict_info:
6689 self.logger.debug(
6690 logging_text
6691 + "Waiting for tasks {}".format(
6692 list(tasks_dict_info.keys())
6693 )
6694 )
6695 error_list = await self._wait_for_tasks(
6696 logging_text,
6697 tasks_dict_info,
6698 min(
6699 self.timeout_charm_delete, self.timeout_ns_terminate
6700 ),
6701 stage,
6702 nslcmop_id,
6703 )
6704 tasks_dict_info.clear()
6705 if error_list:
6706 raise LcmException("; ".join(error_list))
6707
6708 db_vca_and_config_update = {
6709 "_admin.deployed.VCA": vca_update,
6710 "configurationStatus": config_update,
6711 }
6712 self.update_db_2(
6713 "nsrs", db_nsr["_id"], db_vca_and_config_update
6714 )
6715 scale_process = None
6716 # SCALE-IN VCA - END
6717
6718 # SCALE RO - BEGIN
6719 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6720 scale_process = "RO"
6721 if self.ro_config.get("ng"):
6722 await self._scale_ng_ro(
6723 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6724 )
6725 scaling_info.pop("vdu-create", None)
6726 scaling_info.pop("vdu-delete", None)
6727
6728 scale_process = None
6729 # SCALE RO - END
6730
6731 # SCALE KDU - BEGIN
6732 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6733 scale_process = "KDU"
6734 await self._scale_kdu(
6735 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6736 )
6737 scaling_info.pop("kdu-create", None)
6738 scaling_info.pop("kdu-delete", None)
6739
6740 scale_process = None
6741 # SCALE KDU - END
6742
6743 if db_nsr_update:
6744 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6745
6746 # SCALE-UP VCA - BEGIN
6747 if vca_scaling_info:
6748 step = db_nslcmop_update[
6749 "detailed-status"
6750 ] = "Creating new execution environments"
6751 scale_process = "VCA"
6752 for vca_info in vca_scaling_info:
6753 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6754 member_vnf_index = str(vca_info["member-vnf-index"])
6755 self.logger.debug(
6756 logging_text + "vdu info: {}".format(vca_info)
6757 )
6758 vnfd_id = db_vnfr["vnfd-ref"]
6759 if vca_info.get("osm_vdu_id"):
6760 vdu_index = int(vca_info["vdu_index"])
6761 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6762 if db_vnfr.get("additionalParamsForVnf"):
6763 deploy_params.update(
6764 parse_yaml_strings(
6765 db_vnfr["additionalParamsForVnf"].copy()
6766 )
6767 )
6768 descriptor_config = get_configuration(
6769 db_vnfd, db_vnfd["id"]
6770 )
6771 if descriptor_config:
6772 vdu_id = None
6773 vdu_name = None
6774 kdu_name = None
6775 self._deploy_n2vc(
6776 logging_text=logging_text
6777 + "member_vnf_index={} ".format(member_vnf_index),
6778 db_nsr=db_nsr,
6779 db_vnfr=db_vnfr,
6780 nslcmop_id=nslcmop_id,
6781 nsr_id=nsr_id,
6782 nsi_id=nsi_id,
6783 vnfd_id=vnfd_id,
6784 vdu_id=vdu_id,
6785 kdu_name=kdu_name,
6786 member_vnf_index=member_vnf_index,
6787 vdu_index=vdu_index,
6788 vdu_name=vdu_name,
6789 deploy_params=deploy_params,
6790 descriptor_config=descriptor_config,
6791 base_folder=base_folder,
6792 task_instantiation_info=tasks_dict_info,
6793 stage=stage,
6794 )
6795 vdu_id = vca_info["osm_vdu_id"]
6796 vdur = find_in_list(
6797 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6798 )
6799 descriptor_config = get_configuration(db_vnfd, vdu_id)
6800 if vdur.get("additionalParams"):
6801 deploy_params_vdu = parse_yaml_strings(
6802 vdur["additionalParams"]
6803 )
6804 else:
6805 deploy_params_vdu = deploy_params
6806 deploy_params_vdu["OSM"] = get_osm_params(
6807 db_vnfr, vdu_id, vdu_count_index=vdu_index
6808 )
6809 if descriptor_config:
6810 vdu_name = None
6811 kdu_name = None
6812 stage[
6813 1
6814 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6815 member_vnf_index, vdu_id, vdu_index
6816 )
6817 stage[2] = step = "Scaling out VCA"
6818 self._write_op_status(op_id=nslcmop_id, stage=stage)
6819 self._deploy_n2vc(
6820 logging_text=logging_text
6821 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6822 member_vnf_index, vdu_id, vdu_index
6823 ),
6824 db_nsr=db_nsr,
6825 db_vnfr=db_vnfr,
6826 nslcmop_id=nslcmop_id,
6827 nsr_id=nsr_id,
6828 nsi_id=nsi_id,
6829 vnfd_id=vnfd_id,
6830 vdu_id=vdu_id,
6831 kdu_name=kdu_name,
6832 member_vnf_index=member_vnf_index,
6833 vdu_index=vdu_index,
6834 vdu_name=vdu_name,
6835 deploy_params=deploy_params_vdu,
6836 descriptor_config=descriptor_config,
6837 base_folder=base_folder,
6838 task_instantiation_info=tasks_dict_info,
6839 stage=stage,
6840 )
6841 # SCALE-UP VCA - END
6842 scale_process = None
6843
6844 # POST-SCALE BEGIN
6845 # execute primitive service POST-SCALING
6846 step = "Executing post-scale vnf-config-primitive"
6847 if scaling_descriptor.get("scaling-config-action"):
6848 for scaling_config_action in scaling_descriptor[
6849 "scaling-config-action"
6850 ]:
6851 if (
6852 scaling_config_action.get("trigger") == "post-scale-in"
6853 and scaling_type == "SCALE_IN"
6854 ) or (
6855 scaling_config_action.get("trigger") == "post-scale-out"
6856 and scaling_type == "SCALE_OUT"
6857 ):
6858 vnf_config_primitive = scaling_config_action[
6859 "vnf-config-primitive-name-ref"
6860 ]
6861 step = db_nslcmop_update[
6862 "detailed-status"
6863 ] = "executing post-scale scaling-config-action '{}'".format(
6864 vnf_config_primitive
6865 )
6866
6867 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6868 if db_vnfr.get("additionalParamsForVnf"):
6869 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6870
6871 # look for primitive
6872 for config_primitive in (
6873 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6874 ).get("config-primitive", ()):
6875 if config_primitive["name"] == vnf_config_primitive:
6876 break
6877 else:
6878 raise LcmException(
6879 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6880 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6881 "config-primitive".format(
6882 scaling_group, vnf_config_primitive
6883 )
6884 )
6885 scale_process = "VCA"
6886 db_nsr_update["config-status"] = "configuring post-scaling"
6887 primitive_params = self._map_primitive_params(
6888 config_primitive, {}, vnfr_params
6889 )
6890
6891 # Post-scale retry check: Check if this sub-operation has been executed before
6892 op_index = self._check_or_add_scale_suboperation(
6893 db_nslcmop,
6894 vnf_index,
6895 vnf_config_primitive,
6896 primitive_params,
6897 "POST-SCALE",
6898 )
6899 if op_index == self.SUBOPERATION_STATUS_SKIP:
6900 # Skip sub-operation
6901 result = "COMPLETED"
6902 result_detail = "Done"
6903 self.logger.debug(
6904 logging_text
6905 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6906 vnf_config_primitive, result, result_detail
6907 )
6908 )
6909 else:
6910 if op_index == self.SUBOPERATION_STATUS_NEW:
6911 # New sub-operation: Get index of this sub-operation
6912 op_index = (
6913 len(db_nslcmop.get("_admin", {}).get("operations"))
6914 - 1
6915 )
6916 self.logger.debug(
6917 logging_text
6918 + "vnf_config_primitive={} New sub-operation".format(
6919 vnf_config_primitive
6920 )
6921 )
6922 else:
6923 # retry: Get registered params for this existing sub-operation
6924 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6925 op_index
6926 ]
6927 vnf_index = op.get("member_vnf_index")
6928 vnf_config_primitive = op.get("primitive")
6929 primitive_params = op.get("primitive_params")
6930 self.logger.debug(
6931 logging_text
6932 + "vnf_config_primitive={} Sub-operation retry".format(
6933 vnf_config_primitive
6934 )
6935 )
6936 # Execute the primitive, either with new (first-time) or registered (reintent) args
6937 ee_descriptor_id = config_primitive.get(
6938 "execution-environment-ref"
6939 )
6940 primitive_name = config_primitive.get(
6941 "execution-environment-primitive", vnf_config_primitive
6942 )
6943 ee_id, vca_type = self._look_for_deployed_vca(
6944 nsr_deployed["VCA"],
6945 member_vnf_index=vnf_index,
6946 vdu_id=None,
6947 vdu_count_index=None,
6948 ee_descriptor_id=ee_descriptor_id,
6949 )
6950 result, result_detail = await self._ns_execute_primitive(
6951 ee_id,
6952 primitive_name,
6953 primitive_params,
6954 vca_type=vca_type,
6955 vca_id=vca_id,
6956 )
6957 self.logger.debug(
6958 logging_text
6959 + "vnf_config_primitive={} Done with result {} {}".format(
6960 vnf_config_primitive, result, result_detail
6961 )
6962 )
6963 # Update operationState = COMPLETED | FAILED
6964 self._update_suboperation_status(
6965 db_nslcmop, op_index, result, result_detail
6966 )
6967
6968 if result == "FAILED":
6969 raise LcmException(result_detail)
6970 db_nsr_update["config-status"] = old_config_status
6971 scale_process = None
6972 # POST-SCALE END
6973
6974 db_nsr_update[
6975 "detailed-status"
6976 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6977 db_nsr_update["operational-status"] = (
6978 "running"
6979 if old_operational_status == "failed"
6980 else old_operational_status
6981 )
6982 db_nsr_update["config-status"] = old_config_status
6983 return
6984 except (
6985 ROclient.ROClientException,
6986 DbException,
6987 LcmException,
6988 NgRoException,
6989 ) as e:
6990 self.logger.error(logging_text + "Exit Exception {}".format(e))
6991 exc = e
6992 except asyncio.CancelledError:
6993 self.logger.error(
6994 logging_text + "Cancelled Exception while '{}'".format(step)
6995 )
6996 exc = "Operation was cancelled"
6997 except Exception as e:
6998 exc = traceback.format_exc()
6999 self.logger.critical(
7000 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7001 exc_info=True,
7002 )
7003 finally:
7004 self._write_ns_status(
7005 nsr_id=nsr_id,
7006 ns_state=None,
7007 current_operation="IDLE",
7008 current_operation_id=None,
7009 )
7010 if tasks_dict_info:
7011 stage[1] = "Waiting for instantiate pending tasks."
7012 self.logger.debug(logging_text + stage[1])
7013 exc = await self._wait_for_tasks(
7014 logging_text,
7015 tasks_dict_info,
7016 self.timeout_ns_deploy,
7017 stage,
7018 nslcmop_id,
7019 nsr_id=nsr_id,
7020 )
7021 if exc:
7022 db_nslcmop_update[
7023 "detailed-status"
7024 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7025 nslcmop_operation_state = "FAILED"
7026 if db_nsr:
7027 db_nsr_update["operational-status"] = old_operational_status
7028 db_nsr_update["config-status"] = old_config_status
7029 db_nsr_update["detailed-status"] = ""
7030 if scale_process:
7031 if "VCA" in scale_process:
7032 db_nsr_update["config-status"] = "failed"
7033 if "RO" in scale_process:
7034 db_nsr_update["operational-status"] = "failed"
7035 db_nsr_update[
7036 "detailed-status"
7037 ] = "FAILED scaling nslcmop={} {}: {}".format(
7038 nslcmop_id, step, exc
7039 )
7040 else:
7041 error_description_nslcmop = None
7042 nslcmop_operation_state = "COMPLETED"
7043 db_nslcmop_update["detailed-status"] = "Done"
7044
7045 self._write_op_status(
7046 op_id=nslcmop_id,
7047 stage="",
7048 error_message=error_description_nslcmop,
7049 operation_state=nslcmop_operation_state,
7050 other_update=db_nslcmop_update,
7051 )
7052 if db_nsr:
7053 self._write_ns_status(
7054 nsr_id=nsr_id,
7055 ns_state=None,
7056 current_operation="IDLE",
7057 current_operation_id=None,
7058 other_update=db_nsr_update,
7059 )
7060
7061 if nslcmop_operation_state:
7062 try:
7063 msg = {
7064 "nsr_id": nsr_id,
7065 "nslcmop_id": nslcmop_id,
7066 "operationState": nslcmop_operation_state,
7067 }
7068 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7069 except Exception as e:
7070 self.logger.error(
7071 logging_text + "kafka_write notification Exception {}".format(e)
7072 )
7073 self.logger.debug(logging_text + "Exit")
7074 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7075
7076 async def _scale_kdu(
7077 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7078 ):
7079 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7080 for kdu_name in _scaling_info:
7081 for kdu_scaling_info in _scaling_info[kdu_name]:
7082 deployed_kdu, index = get_deployed_kdu(
7083 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7084 )
7085 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7086 kdu_instance = deployed_kdu["kdu-instance"]
7087 kdu_model = deployed_kdu.get("kdu-model")
7088 scale = int(kdu_scaling_info["scale"])
7089 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7090
7091 db_dict = {
7092 "collection": "nsrs",
7093 "filter": {"_id": nsr_id},
7094 "path": "_admin.deployed.K8s.{}".format(index),
7095 }
7096
7097 step = "scaling application {}".format(
7098 kdu_scaling_info["resource-name"]
7099 )
7100 self.logger.debug(logging_text + step)
7101
7102 if kdu_scaling_info["type"] == "delete":
7103 kdu_config = get_configuration(db_vnfd, kdu_name)
7104 if (
7105 kdu_config
7106 and kdu_config.get("terminate-config-primitive")
7107 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7108 ):
7109 terminate_config_primitive_list = kdu_config.get(
7110 "terminate-config-primitive"
7111 )
7112 terminate_config_primitive_list.sort(
7113 key=lambda val: int(val["seq"])
7114 )
7115
7116 for (
7117 terminate_config_primitive
7118 ) in terminate_config_primitive_list:
7119 primitive_params_ = self._map_primitive_params(
7120 terminate_config_primitive, {}, {}
7121 )
7122 step = "execute terminate config primitive"
7123 self.logger.debug(logging_text + step)
7124 await asyncio.wait_for(
7125 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7126 cluster_uuid=cluster_uuid,
7127 kdu_instance=kdu_instance,
7128 primitive_name=terminate_config_primitive["name"],
7129 params=primitive_params_,
7130 db_dict=db_dict,
7131 vca_id=vca_id,
7132 ),
7133 timeout=600,
7134 )
7135
7136 await asyncio.wait_for(
7137 self.k8scluster_map[k8s_cluster_type].scale(
7138 kdu_instance,
7139 scale,
7140 kdu_scaling_info["resource-name"],
7141 vca_id=vca_id,
7142 cluster_uuid=cluster_uuid,
7143 kdu_model=kdu_model,
7144 atomic=True,
7145 db_dict=db_dict,
7146 ),
7147 timeout=self.timeout_vca_on_error,
7148 )
7149
7150 if kdu_scaling_info["type"] == "create":
7151 kdu_config = get_configuration(db_vnfd, kdu_name)
7152 if (
7153 kdu_config
7154 and kdu_config.get("initial-config-primitive")
7155 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7156 ):
7157 initial_config_primitive_list = kdu_config.get(
7158 "initial-config-primitive"
7159 )
7160 initial_config_primitive_list.sort(
7161 key=lambda val: int(val["seq"])
7162 )
7163
7164 for initial_config_primitive in initial_config_primitive_list:
7165 primitive_params_ = self._map_primitive_params(
7166 initial_config_primitive, {}, {}
7167 )
7168 step = "execute initial config primitive"
7169 self.logger.debug(logging_text + step)
7170 await asyncio.wait_for(
7171 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7172 cluster_uuid=cluster_uuid,
7173 kdu_instance=kdu_instance,
7174 primitive_name=initial_config_primitive["name"],
7175 params=primitive_params_,
7176 db_dict=db_dict,
7177 vca_id=vca_id,
7178 ),
7179 timeout=600,
7180 )
7181
7182 async def _scale_ng_ro(
7183 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7184 ):
7185 nsr_id = db_nslcmop["nsInstanceId"]
7186 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7187 db_vnfrs = {}
7188
7189 # read from db: vnfd's for every vnf
7190 db_vnfds = []
7191
7192 # for each vnf in ns, read vnfd
7193 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7194 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7195 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7196 # if we haven't this vnfd, read it from db
7197 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7198 # read from db
7199 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7200 db_vnfds.append(vnfd)
7201 n2vc_key = self.n2vc.get_public_key()
7202 n2vc_key_list = [n2vc_key]
7203 self.scale_vnfr(
7204 db_vnfr,
7205 vdu_scaling_info.get("vdu-create"),
7206 vdu_scaling_info.get("vdu-delete"),
7207 mark_delete=True,
7208 )
7209 # db_vnfr has been updated, update db_vnfrs to use it
7210 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7211 await self._instantiate_ng_ro(
7212 logging_text,
7213 nsr_id,
7214 db_nsd,
7215 db_nsr,
7216 db_nslcmop,
7217 db_vnfrs,
7218 db_vnfds,
7219 n2vc_key_list,
7220 stage=stage,
7221 start_deploy=time(),
7222 timeout_ns_deploy=self.timeout_ns_deploy,
7223 )
7224 if vdu_scaling_info.get("vdu-delete"):
7225 self.scale_vnfr(
7226 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7227 )
7228
7229 async def extract_prometheus_scrape_jobs(
7230 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7231 ):
7232 # look if exist a file called 'prometheus*.j2' and
7233 artifact_content = self.fs.dir_ls(artifact_path)
7234 job_file = next(
7235 (
7236 f
7237 for f in artifact_content
7238 if f.startswith("prometheus") and f.endswith(".j2")
7239 ),
7240 None,
7241 )
7242 if not job_file:
7243 return
7244 with self.fs.file_open((artifact_path, job_file), "r") as f:
7245 job_data = f.read()
7246
7247 # TODO get_service
7248 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7249 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7250 host_port = "80"
7251 vnfr_id = vnfr_id.replace("-", "")
7252 variables = {
7253 "JOB_NAME": vnfr_id,
7254 "TARGET_IP": target_ip,
7255 "EXPORTER_POD_IP": host_name,
7256 "EXPORTER_POD_PORT": host_port,
7257 }
7258 job_list = parse_job(job_data, variables)
7259 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7260 for job in job_list:
7261 if (
7262 not isinstance(job.get("job_name"), str)
7263 or vnfr_id not in job["job_name"]
7264 ):
7265 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7266 job["nsr_id"] = nsr_id
7267 job["vnfr_id"] = vnfr_id
7268 return job_list
7269
7270 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7271 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7272 self.logger.info(logging_text + "Enter")
7273 stage = ["Preparing the environment", ""]
7274 # database nsrs record
7275 db_nsr_update = {}
7276 vdu_vim_name = None
7277 vim_vm_id = None
7278 # in case of error, indicates what part of scale was failed to put nsr at error status
7279 start_deploy = time()
7280 try:
7281 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7282 vim_account_id = db_vnfr.get("vim-account-id")
7283 vim_info_key = "vim:" + vim_account_id
7284 vdu_id = additional_param["vdu_id"]
7285 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7286 vdur = find_in_list(
7287 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7288 )
7289 if vdur:
7290 vdu_vim_name = vdur["name"]
7291 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7292 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7293 else:
7294 raise LcmException("Target vdu is not found")
7295 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7296 # wait for any previous tasks in process
7297 stage[1] = "Waiting for previous operations to terminate"
7298 self.logger.info(stage[1])
7299 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7300
7301 stage[1] = "Reading from database."
7302 self.logger.info(stage[1])
7303 self._write_ns_status(
7304 nsr_id=nsr_id,
7305 ns_state=None,
7306 current_operation=operation_type.upper(),
7307 current_operation_id=nslcmop_id
7308 )
7309 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7310
7311 # read from db: ns
7312 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7313 db_nsr_update["operational-status"] = operation_type
7314 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7315 # Payload for RO
7316 desc = {
7317 operation_type: {
7318 "vim_vm_id": vim_vm_id,
7319 "vnf_id": vnf_id,
7320 "vdu_index": additional_param["count-index"],
7321 "vdu_id": vdur["id"],
7322 "target_vim": target_vim,
7323 "vim_account_id": vim_account_id
7324 }
7325 }
7326 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7327 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7328 self.logger.info("ro nsr id: {}".format(nsr_id))
7329 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7330 self.logger.info("response from RO: {}".format(result_dict))
7331 action_id = result_dict["action_id"]
7332 await self._wait_ng_ro(
7333 nsr_id, action_id, nslcmop_id, start_deploy,
7334 self.timeout_operate, None, "start_stop_rebuild",
7335 )
7336 return "COMPLETED", "Done"
7337 except (ROclient.ROClientException, DbException, LcmException) as e:
7338 self.logger.error("Exit Exception {}".format(e))
7339 exc = e
7340 except asyncio.CancelledError:
7341 self.logger.error("Cancelled Exception while '{}'".format(stage))
7342 exc = "Operation was cancelled"
7343 except Exception as e:
7344 exc = traceback.format_exc()
7345 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7346 return "FAILED", "Error in operate VNF {}".format(exc)
7347
7348 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7349 """
7350 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7351
7352 :param: vim_account_id: VIM Account ID
7353
7354 :return: (cloud_name, cloud_credential)
7355 """
7356 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7357 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7358
7359 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7360 """
7361 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7362
7363 :param: vim_account_id: VIM Account ID
7364
7365 :return: (cloud_name, cloud_credential)
7366 """
7367 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7368 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7369
7370 async def migrate(self, nsr_id, nslcmop_id):
7371 """
7372 Migrate VNFs and VDUs instances in a NS
7373
7374 :param: nsr_id: NS Instance ID
7375 :param: nslcmop_id: nslcmop ID of migrate
7376
7377 """
7378 # Try to lock HA task here
7379 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7380 if not task_is_locked_by_me:
7381 return
7382 logging_text = "Task ns={} migrate ".format(nsr_id)
7383 self.logger.debug(logging_text + "Enter")
7384 # get all needed from database
7385 db_nslcmop = None
7386 db_nslcmop_update = {}
7387 nslcmop_operation_state = None
7388 db_nsr_update = {}
7389 target = {}
7390 exc = None
7391 # in case of error, indicates what part of scale was failed to put nsr at error status
7392 start_deploy = time()
7393
7394 try:
7395 # wait for any previous tasks in process
7396 step = "Waiting for previous operations to terminate"
7397 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7398
7399 self._write_ns_status(
7400 nsr_id=nsr_id,
7401 ns_state=None,
7402 current_operation="MIGRATING",
7403 current_operation_id=nslcmop_id,
7404 )
7405 step = "Getting nslcmop from database"
7406 self.logger.debug(
7407 step + " after having waited for previous tasks to be completed"
7408 )
7409 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7410 migrate_params = db_nslcmop.get("operationParams")
7411
7412 target = {}
7413 target.update(migrate_params)
7414 desc = await self.RO.migrate(nsr_id, target)
7415 self.logger.debug("RO return > {}".format(desc))
7416 action_id = desc["action_id"]
7417 await self._wait_ng_ro(
7418 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7419 operation="migrate"
7420 )
7421 except (ROclient.ROClientException, DbException, LcmException) as e:
7422 self.logger.error("Exit Exception {}".format(e))
7423 exc = e
7424 except asyncio.CancelledError:
7425 self.logger.error("Cancelled Exception while '{}'".format(step))
7426 exc = "Operation was cancelled"
7427 except Exception as e:
7428 exc = traceback.format_exc()
7429 self.logger.critical(
7430 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7431 )
7432 finally:
7433 self._write_ns_status(
7434 nsr_id=nsr_id,
7435 ns_state=None,
7436 current_operation="IDLE",
7437 current_operation_id=None,
7438 )
7439 if exc:
7440 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7441 nslcmop_operation_state = "FAILED"
7442 else:
7443 nslcmop_operation_state = "COMPLETED"
7444 db_nslcmop_update["detailed-status"] = "Done"
7445 db_nsr_update["detailed-status"] = "Done"
7446
7447 self._write_op_status(
7448 op_id=nslcmop_id,
7449 stage="",
7450 error_message="",
7451 operation_state=nslcmop_operation_state,
7452 other_update=db_nslcmop_update,
7453 )
7454 if nslcmop_operation_state:
7455 try:
7456 msg = {
7457 "nsr_id": nsr_id,
7458 "nslcmop_id": nslcmop_id,
7459 "operationState": nslcmop_operation_state,
7460 }
7461 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7462 except Exception as e:
7463 self.logger.error(
7464 logging_text + "kafka_write notification Exception {}".format(e)
7465 )
7466 self.logger.debug(logging_text + "Exit")
7467 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7468
7469
7470 async def heal(self, nsr_id, nslcmop_id):
7471 """
7472 Heal NS
7473
7474 :param nsr_id: ns instance to heal
7475 :param nslcmop_id: operation to run
7476 :return:
7477 """
7478
7479 # Try to lock HA task here
7480 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7481 if not task_is_locked_by_me:
7482 return
7483
7484 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7485 stage = ["", "", ""]
7486 tasks_dict_info = {}
7487 # ^ stage, step, VIM progress
7488 self.logger.debug(logging_text + "Enter")
7489 # get all needed from database
7490 db_nsr = None
7491 db_nslcmop_update = {}
7492 db_nsr_update = {}
7493 db_vnfrs = {} # vnf's info indexed by _id
7494 exc = None
7495 old_operational_status = ""
7496 old_config_status = ""
7497 nsi_id = None
7498 try:
7499 # wait for any previous tasks in process
7500 step = "Waiting for previous operations to terminate"
7501 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7502 self._write_ns_status(
7503 nsr_id=nsr_id,
7504 ns_state=None,
7505 current_operation="HEALING",
7506 current_operation_id=nslcmop_id,
7507 )
7508
7509 step = "Getting nslcmop from database"
7510 self.logger.debug(
7511 step + " after having waited for previous tasks to be completed"
7512 )
7513 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7514
7515 step = "Getting nsr from database"
7516 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7517 old_operational_status = db_nsr["operational-status"]
7518 old_config_status = db_nsr["config-status"]
7519
7520 db_nsr_update = {
7521 "_admin.deployed.RO.operational-status": "healing",
7522 }
7523 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7524
7525 step = "Sending heal order to VIM"
7526 #task_ro = asyncio.ensure_future(
7527 # self.heal_RO(
7528 # logging_text=logging_text,
7529 # nsr_id=nsr_id,
7530 # db_nslcmop=db_nslcmop,
7531 # stage=stage,
7532 # )
7533 #)
7534 #self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7535 #tasks_dict_info[task_ro] = "Healing at VIM"
7536 await self.heal_RO(
7537 logging_text=logging_text,
7538 nsr_id=nsr_id,
7539 db_nslcmop=db_nslcmop,
7540 stage=stage,
7541 )
7542 # VCA tasks
7543 # read from db: nsd
7544 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7545 self.logger.debug(logging_text + stage[1])
7546 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7547 self.fs.sync(db_nsr["nsd-id"])
7548 db_nsr["nsd"] = nsd
7549 # read from db: vnfr's of this ns
7550 step = "Getting vnfrs from db"
7551 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7552 for vnfr in db_vnfrs_list:
7553 db_vnfrs[vnfr["_id"]] = vnfr
7554 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7555
7556 # Check for each target VNF
7557 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7558 for target_vnf in target_list:
7559 # Find this VNF in the list from DB
7560 vnfr_id = target_vnf.get("vnfInstanceId", None)
7561 if vnfr_id:
7562 db_vnfr = db_vnfrs[vnfr_id]
7563 vnfd_id = db_vnfr.get("vnfd-id")
7564 vnfd_ref = db_vnfr.get("vnfd-ref")
7565 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7566 base_folder = vnfd["_admin"]["storage"]
7567 vdu_id = None
7568 vdu_index = 0
7569 vdu_name = None
7570 kdu_name = None
7571 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7572 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7573
7574 # Check each target VDU and deploy N2VC
7575 target_vdu_list = target_vnf.get("additionalParams", {}).get("vdu", [])
7576 if not target_vdu_list:
7577 # Codigo nuevo para crear diccionario
7578 target_vdu_list = []
7579 for existing_vdu in db_vnfr.get("vdur"):
7580 vdu_name = existing_vdu.get("vdu-name", None)
7581 vdu_index = existing_vdu.get("count-index", 0)
7582 vdu_run_day1 = target_vnf.get("additionalParams", {}).get("run-day1", False)
7583 vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index, "run-day1": vdu_run_day1}
7584 target_vdu_list.append(vdu_to_be_healed)
7585 for target_vdu in target_vdu_list:
7586 deploy_params_vdu = target_vdu
7587 # Set run-day1 vnf level value if not vdu level value exists
7588 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7589 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7590 vdu_name = target_vdu.get("vdu-id", None)
7591 # TODO: Get vdu_id from vdud.
7592 vdu_id = vdu_name
7593 # For multi instance VDU count-index is mandatory
7594 # For single session VDU count-indes is 0
7595 vdu_index = target_vdu.get("count-index",0)
7596
7597 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7598 stage[1] = "Deploying Execution Environments."
7599 self.logger.debug(logging_text + stage[1])
7600
7601 # VNF Level charm. Normal case when proxy charms.
7602 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7603 descriptor_config = get_configuration(vnfd, vnfd_ref)
7604 if descriptor_config:
7605 # Continue if healed machine is management machine
7606 vnf_ip_address = db_vnfr.get("ip-address")
7607 target_instance = None
7608 for instance in db_vnfr.get("vdur", None):
7609 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7610 target_instance = instance
7611 break
7612 if vnf_ip_address == target_instance.get("ip-address"):
7613 self._heal_n2vc(
7614 logging_text=logging_text
7615 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7616 member_vnf_index, vdu_name, vdu_index
7617 ),
7618 db_nsr=db_nsr,
7619 db_vnfr=db_vnfr,
7620 nslcmop_id=nslcmop_id,
7621 nsr_id=nsr_id,
7622 nsi_id=nsi_id,
7623 vnfd_id=vnfd_ref,
7624 vdu_id=None,
7625 kdu_name=None,
7626 member_vnf_index=member_vnf_index,
7627 vdu_index=0,
7628 vdu_name=None,
7629 deploy_params=deploy_params_vdu,
7630 descriptor_config=descriptor_config,
7631 base_folder=base_folder,
7632 task_instantiation_info=tasks_dict_info,
7633 stage=stage,
7634 )
7635
7636 # VDU Level charm. Normal case with native charms.
7637 descriptor_config = get_configuration(vnfd, vdu_name)
7638 if descriptor_config:
7639 self._heal_n2vc(
7640 logging_text=logging_text
7641 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7642 member_vnf_index, vdu_name, vdu_index
7643 ),
7644 db_nsr=db_nsr,
7645 db_vnfr=db_vnfr,
7646 nslcmop_id=nslcmop_id,
7647 nsr_id=nsr_id,
7648 nsi_id=nsi_id,
7649 vnfd_id=vnfd_ref,
7650 vdu_id=vdu_id,
7651 kdu_name=kdu_name,
7652 member_vnf_index=member_vnf_index,
7653 vdu_index=vdu_index,
7654 vdu_name=vdu_name,
7655 deploy_params=deploy_params_vdu,
7656 descriptor_config=descriptor_config,
7657 base_folder=base_folder,
7658 task_instantiation_info=tasks_dict_info,
7659 stage=stage,
7660 )
7661
7662 except (
7663 ROclient.ROClientException,
7664 DbException,
7665 LcmException,
7666 NgRoException,
7667 ) as e:
7668 self.logger.error(logging_text + "Exit Exception {}".format(e))
7669 exc = e
7670 except asyncio.CancelledError:
7671 self.logger.error(
7672 logging_text + "Cancelled Exception while '{}'".format(step)
7673 )
7674 exc = "Operation was cancelled"
7675 except Exception as e:
7676 exc = traceback.format_exc()
7677 self.logger.critical(
7678 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7679 exc_info=True,
7680 )
7681 finally:
7682 if tasks_dict_info:
7683 stage[1] = "Waiting for healing pending tasks."
7684 self.logger.debug(logging_text + stage[1])
7685 exc = await self._wait_for_tasks(
7686 logging_text,
7687 tasks_dict_info,
7688 self.timeout_ns_deploy,
7689 stage,
7690 nslcmop_id,
7691 nsr_id=nsr_id,
7692 )
7693 if exc:
7694 db_nslcmop_update[
7695 "detailed-status"
7696 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7697 nslcmop_operation_state = "FAILED"
7698 if db_nsr:
7699 db_nsr_update["operational-status"] = old_operational_status
7700 db_nsr_update["config-status"] = old_config_status
7701 db_nsr_update[
7702 "detailed-status"
7703 ] = "FAILED healing nslcmop={} {}: {}".format(
7704 nslcmop_id, step, exc
7705 )
7706 for task, task_name in tasks_dict_info.items():
7707 if not task.done() or task.cancelled() or task.exception():
7708 if task_name.startswith(self.task_name_deploy_vca):
7709 # A N2VC task is pending
7710 db_nsr_update["config-status"] = "failed"
7711 else:
7712 # RO task is pending
7713 db_nsr_update["operational-status"] = "failed"
7714 else:
7715 error_description_nslcmop = None
7716 nslcmop_operation_state = "COMPLETED"
7717 db_nslcmop_update["detailed-status"] = "Done"
7718 db_nsr_update["detailed-status"] = "Done"
7719 db_nsr_update["operational-status"] = "running"
7720 db_nsr_update["config-status"] = "configured"
7721
7722 self._write_op_status(
7723 op_id=nslcmop_id,
7724 stage="",
7725 error_message=error_description_nslcmop,
7726 operation_state=nslcmop_operation_state,
7727 other_update=db_nslcmop_update,
7728 )
7729 if db_nsr:
7730 self._write_ns_status(
7731 nsr_id=nsr_id,
7732 ns_state=None,
7733 current_operation="IDLE",
7734 current_operation_id=None,
7735 other_update=db_nsr_update,
7736 )
7737
7738 if nslcmop_operation_state:
7739 try:
7740 msg = {
7741 "nsr_id": nsr_id,
7742 "nslcmop_id": nslcmop_id,
7743 "operationState": nslcmop_operation_state,
7744 }
7745 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7746 except Exception as e:
7747 self.logger.error(
7748 logging_text + "kafka_write notification Exception {}".format(e)
7749 )
7750 self.logger.debug(logging_text + "Exit")
7751 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7752
7753 async def heal_RO(
7754 self,
7755 logging_text,
7756 nsr_id,
7757 db_nslcmop,
7758 stage,
7759 ):
7760 """
7761 Heal at RO
7762 :param logging_text: preffix text to use at logging
7763 :param nsr_id: nsr identity
7764 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7765 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7766 :return: None or exception
7767 """
7768 def get_vim_account(vim_account_id):
7769 nonlocal db_vims
7770 if vim_account_id in db_vims:
7771 return db_vims[vim_account_id]
7772 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7773 db_vims[vim_account_id] = db_vim
7774 return db_vim
7775
7776 try:
7777 start_heal = time()
7778 ns_params = db_nslcmop.get("operationParams")
7779 if ns_params and ns_params.get("timeout_ns_heal"):
7780 timeout_ns_heal = ns_params["timeout_ns_heal"]
7781 else:
7782 timeout_ns_heal = self.timeout.get(
7783 "ns_heal", self.timeout_ns_heal
7784 )
7785
7786 db_vims = {}
7787
7788 nslcmop_id = db_nslcmop["_id"]
7789 target = {
7790 "action_id": nslcmop_id,
7791 }
7792 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7793 target.update(db_nslcmop.get("operationParams", {}))
7794
7795 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7796 desc = await self.RO.recreate(nsr_id, target)
7797 self.logger.debug("RO return > {}".format(desc))
7798 action_id = desc["action_id"]
7799 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7800 await self._wait_ng_ro(
7801 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7802 operation="healing"
7803 )
7804
7805 # Updating NSR
7806 db_nsr_update = {
7807 "_admin.deployed.RO.operational-status": "running",
7808 "detailed-status": " ".join(stage),
7809 }
7810 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7811 self._write_op_status(nslcmop_id, stage)
7812 self.logger.debug(
7813 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7814 )
7815
7816 except Exception as e:
7817 stage[2] = "ERROR healing at VIM"
7818 #self.set_vnfr_at_error(db_vnfrs, str(e))
7819 self.logger.error(
7820 "Error healing at VIM {}".format(e),
7821 exc_info=not isinstance(
7822 e,
7823 (
7824 ROclient.ROClientException,
7825 LcmException,
7826 DbException,
7827 NgRoException,
7828 ),
7829 ),
7830 )
7831 raise
7832
7833 def _heal_n2vc(
7834 self,
7835 logging_text,
7836 db_nsr,
7837 db_vnfr,
7838 nslcmop_id,
7839 nsr_id,
7840 nsi_id,
7841 vnfd_id,
7842 vdu_id,
7843 kdu_name,
7844 member_vnf_index,
7845 vdu_index,
7846 vdu_name,
7847 deploy_params,
7848 descriptor_config,
7849 base_folder,
7850 task_instantiation_info,
7851 stage,
7852 ):
7853 # launch instantiate_N2VC in a asyncio task and register task object
7854 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7855 # if not found, create one entry and update database
7856 # fill db_nsr._admin.deployed.VCA.<index>
7857
7858 self.logger.debug(
7859 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7860 )
7861 if "execution-environment-list" in descriptor_config:
7862 ee_list = descriptor_config.get("execution-environment-list", [])
7863 elif "juju" in descriptor_config:
7864 ee_list = [descriptor_config] # ns charms
7865 else: # other types as script are not supported
7866 ee_list = []
7867
7868 for ee_item in ee_list:
7869 self.logger.debug(
7870 logging_text
7871 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7872 ee_item.get("juju"), ee_item.get("helm-chart")
7873 )
7874 )
7875 ee_descriptor_id = ee_item.get("id")
7876 if ee_item.get("juju"):
7877 vca_name = ee_item["juju"].get("charm")
7878 vca_type = (
7879 "lxc_proxy_charm"
7880 if ee_item["juju"].get("charm") is not None
7881 else "native_charm"
7882 )
7883 if ee_item["juju"].get("cloud") == "k8s":
7884 vca_type = "k8s_proxy_charm"
7885 elif ee_item["juju"].get("proxy") is False:
7886 vca_type = "native_charm"
7887 elif ee_item.get("helm-chart"):
7888 vca_name = ee_item["helm-chart"]
7889 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7890 vca_type = "helm"
7891 else:
7892 vca_type = "helm-v3"
7893 else:
7894 self.logger.debug(
7895 logging_text + "skipping non juju neither charm configuration"
7896 )
7897 continue
7898
7899 vca_index = -1
7900 for vca_index, vca_deployed in enumerate(
7901 db_nsr["_admin"]["deployed"]["VCA"]
7902 ):
7903 if not vca_deployed:
7904 continue
7905 if (
7906 vca_deployed.get("member-vnf-index") == member_vnf_index
7907 and vca_deployed.get("vdu_id") == vdu_id
7908 and vca_deployed.get("kdu_name") == kdu_name
7909 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7910 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7911 ):
7912 break
7913 else:
7914 # not found, create one.
7915 target = (
7916 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7917 )
7918 if vdu_id:
7919 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7920 elif kdu_name:
7921 target += "/kdu/{}".format(kdu_name)
7922 vca_deployed = {
7923 "target_element": target,
7924 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7925 "member-vnf-index": member_vnf_index,
7926 "vdu_id": vdu_id,
7927 "kdu_name": kdu_name,
7928 "vdu_count_index": vdu_index,
7929 "operational-status": "init", # TODO revise
7930 "detailed-status": "", # TODO revise
7931 "step": "initial-deploy", # TODO revise
7932 "vnfd_id": vnfd_id,
7933 "vdu_name": vdu_name,
7934 "type": vca_type,
7935 "ee_descriptor_id": ee_descriptor_id,
7936 }
7937 vca_index += 1
7938
7939 # create VCA and configurationStatus in db
7940 db_dict = {
7941 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7942 "configurationStatus.{}".format(vca_index): dict(),
7943 }
7944 self.update_db_2("nsrs", nsr_id, db_dict)
7945
7946 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7947
7948 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7949 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7950 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7951
7952 # Launch task
7953 task_n2vc = asyncio.ensure_future(
7954 self.heal_N2VC(
7955 logging_text=logging_text,
7956 vca_index=vca_index,
7957 nsi_id=nsi_id,
7958 db_nsr=db_nsr,
7959 db_vnfr=db_vnfr,
7960 vdu_id=vdu_id,
7961 kdu_name=kdu_name,
7962 vdu_index=vdu_index,
7963 deploy_params=deploy_params,
7964 config_descriptor=descriptor_config,
7965 base_folder=base_folder,
7966 nslcmop_id=nslcmop_id,
7967 stage=stage,
7968 vca_type=vca_type,
7969 vca_name=vca_name,
7970 ee_config_descriptor=ee_item,
7971 )
7972 )
7973 self.lcm_tasks.register(
7974 "ns",
7975 nsr_id,
7976 nslcmop_id,
7977 "instantiate_N2VC-{}".format(vca_index),
7978 task_n2vc,
7979 )
7980 task_instantiation_info[
7981 task_n2vc
7982 ] = self.task_name_deploy_vca + " {}.{}".format(
7983 member_vnf_index or "", vdu_id or ""
7984 )
7985
7986 async def heal_N2VC(
7987 self,
7988 logging_text,
7989 vca_index,
7990 nsi_id,
7991 db_nsr,
7992 db_vnfr,
7993 vdu_id,
7994 kdu_name,
7995 vdu_index,
7996 config_descriptor,
7997 deploy_params,
7998 base_folder,
7999 nslcmop_id,
8000 stage,
8001 vca_type,
8002 vca_name,
8003 ee_config_descriptor,
8004 ):
8005 nsr_id = db_nsr["_id"]
8006 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8007 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8008 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8009 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8010 db_dict = {
8011 "collection": "nsrs",
8012 "filter": {"_id": nsr_id},
8013 "path": db_update_entry,
8014 }
8015 step = ""
8016 try:
8017
8018 element_type = "NS"
8019 element_under_configuration = nsr_id
8020
8021 vnfr_id = None
8022 if db_vnfr:
8023 vnfr_id = db_vnfr["_id"]
8024 osm_config["osm"]["vnf_id"] = vnfr_id
8025
8026 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8027
8028 if vca_type == "native_charm":
8029 index_number = 0
8030 else:
8031 index_number = vdu_index or 0
8032
8033 if vnfr_id:
8034 element_type = "VNF"
8035 element_under_configuration = vnfr_id
8036 namespace += ".{}-{}".format(vnfr_id, index_number)
8037 if vdu_id:
8038 namespace += ".{}-{}".format(vdu_id, index_number)
8039 element_type = "VDU"
8040 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8041 osm_config["osm"]["vdu_id"] = vdu_id
8042 elif kdu_name:
8043 namespace += ".{}".format(kdu_name)
8044 element_type = "KDU"
8045 element_under_configuration = kdu_name
8046 osm_config["osm"]["kdu_name"] = kdu_name
8047
8048 # Get artifact path
8049 if base_folder["pkg-dir"]:
8050 artifact_path = "{}/{}/{}/{}".format(
8051 base_folder["folder"],
8052 base_folder["pkg-dir"],
8053 "charms"
8054 if vca_type
8055 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8056 else "helm-charts",
8057 vca_name,
8058 )
8059 else:
8060 artifact_path = "{}/Scripts/{}/{}/".format(
8061 base_folder["folder"],
8062 "charms"
8063 if vca_type
8064 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8065 else "helm-charts",
8066 vca_name,
8067 )
8068
8069 self.logger.debug("Artifact path > {}".format(artifact_path))
8070
8071 # get initial_config_primitive_list that applies to this element
8072 initial_config_primitive_list = config_descriptor.get(
8073 "initial-config-primitive"
8074 )
8075
8076 self.logger.debug(
8077 "Initial config primitive list > {}".format(
8078 initial_config_primitive_list
8079 )
8080 )
8081
8082 # add config if not present for NS charm
8083 ee_descriptor_id = ee_config_descriptor.get("id")
8084 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8085 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8086 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8087 )
8088
8089 self.logger.debug(
8090 "Initial config primitive list #2 > {}".format(
8091 initial_config_primitive_list
8092 )
8093 )
8094 # n2vc_redesign STEP 3.1
8095 # find old ee_id if exists
8096 ee_id = vca_deployed.get("ee_id")
8097
8098 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8099 # create or register execution environment in VCA. Only for native charms when healing
8100 if vca_type == "native_charm":
8101 step = "Waiting to VM being up and getting IP address"
8102 self.logger.debug(logging_text + step)
8103 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8104 logging_text,
8105 nsr_id,
8106 vnfr_id,
8107 vdu_id,
8108 vdu_index,
8109 user=None,
8110 pub_key=None,
8111 )
8112 credentials = {"hostname": rw_mgmt_ip}
8113 # get username
8114 username = deep_get(
8115 config_descriptor, ("config-access", "ssh-access", "default-user")
8116 )
8117 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8118 # merged. Meanwhile let's get username from initial-config-primitive
8119 if not username and initial_config_primitive_list:
8120 for config_primitive in initial_config_primitive_list:
8121 for param in config_primitive.get("parameter", ()):
8122 if param["name"] == "ssh-username":
8123 username = param["value"]
8124 break
8125 if not username:
8126 raise LcmException(
8127 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8128 "'config-access.ssh-access.default-user'"
8129 )
8130 credentials["username"] = username
8131
8132 # n2vc_redesign STEP 3.2
8133 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8134 self._write_configuration_status(
8135 nsr_id=nsr_id,
8136 vca_index=vca_index,
8137 status="REGISTERING",
8138 element_under_configuration=element_under_configuration,
8139 element_type=element_type,
8140 )
8141
8142 step = "register execution environment {}".format(credentials)
8143 self.logger.debug(logging_text + step)
8144 ee_id = await self.vca_map[vca_type].register_execution_environment(
8145 credentials=credentials,
8146 namespace=namespace,
8147 db_dict=db_dict,
8148 vca_id=vca_id,
8149 )
8150
8151 # update ee_id en db
8152 db_dict_ee_id = {
8153 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8154 }
8155 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8156
8157 # for compatibility with MON/POL modules, the need model and application name at database
8158 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8159 # Not sure if this need to be done when healing
8160 """
8161 ee_id_parts = ee_id.split(".")
8162 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8163 if len(ee_id_parts) >= 2:
8164 model_name = ee_id_parts[0]
8165 application_name = ee_id_parts[1]
8166 db_nsr_update[db_update_entry + "model"] = model_name
8167 db_nsr_update[db_update_entry + "application"] = application_name
8168 """
8169
8170 # n2vc_redesign STEP 3.3
8171 # Install configuration software. Only for native charms.
8172 step = "Install configuration Software"
8173
8174 self._write_configuration_status(
8175 nsr_id=nsr_id,
8176 vca_index=vca_index,
8177 status="INSTALLING SW",
8178 element_under_configuration=element_under_configuration,
8179 element_type=element_type,
8180 #other_update=db_nsr_update,
8181 other_update=None,
8182 )
8183
8184 # TODO check if already done
8185 self.logger.debug(logging_text + step)
8186 config = None
8187 if vca_type == "native_charm":
8188 config_primitive = next(
8189 (p for p in initial_config_primitive_list if p["name"] == "config"),
8190 None,
8191 )
8192 if config_primitive:
8193 config = self._map_primitive_params(
8194 config_primitive, {}, deploy_params
8195 )
8196 await self.vca_map[vca_type].install_configuration_sw(
8197 ee_id=ee_id,
8198 artifact_path=artifact_path,
8199 db_dict=db_dict,
8200 config=config,
8201 num_units=1,
8202 vca_id=vca_id,
8203 vca_type=vca_type,
8204 )
8205
8206 # write in db flag of configuration_sw already installed
8207 self.update_db_2(
8208 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8209 )
8210
8211 # Not sure if this need to be done when healing
8212 """
8213 # add relations for this VCA (wait for other peers related with this VCA)
8214 await self._add_vca_relations(
8215 logging_text=logging_text,
8216 nsr_id=nsr_id,
8217 vca_type=vca_type,
8218 vca_index=vca_index,
8219 )
8220 """
8221
8222 # if SSH access is required, then get execution environment SSH public
8223 # if native charm we have waited already to VM be UP
8224 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8225 pub_key = None
8226 user = None
8227 # self.logger.debug("get ssh key block")
8228 if deep_get(
8229 config_descriptor, ("config-access", "ssh-access", "required")
8230 ):
8231 # self.logger.debug("ssh key needed")
8232 # Needed to inject a ssh key
8233 user = deep_get(
8234 config_descriptor,
8235 ("config-access", "ssh-access", "default-user"),
8236 )
8237 step = "Install configuration Software, getting public ssh key"
8238 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8239 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8240 )
8241
8242 step = "Insert public key into VM user={} ssh_key={}".format(
8243 user, pub_key
8244 )
8245 else:
8246 # self.logger.debug("no need to get ssh key")
8247 step = "Waiting to VM being up and getting IP address"
8248 self.logger.debug(logging_text + step)
8249
8250 # n2vc_redesign STEP 5.1
8251 # wait for RO (ip-address) Insert pub_key into VM
8252 # IMPORTANT: We need do wait for RO to complete healing operation.
8253 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8254 if vnfr_id:
8255 if kdu_name:
8256 rw_mgmt_ip = await self.wait_kdu_up(
8257 logging_text, nsr_id, vnfr_id, kdu_name
8258 )
8259 else:
8260 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8261 logging_text,
8262 nsr_id,
8263 vnfr_id,
8264 vdu_id,
8265 vdu_index,
8266 user=user,
8267 pub_key=pub_key,
8268 )
8269 else:
8270 rw_mgmt_ip = None # This is for a NS configuration
8271
8272 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8273
8274 # store rw_mgmt_ip in deploy params for later replacement
8275 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8276
8277 # Day1 operations.
8278 # get run-day1 operation parameter
8279 runDay1 = deploy_params.get("run-day1",False)
8280 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8281 if runDay1:
8282 # n2vc_redesign STEP 6 Execute initial config primitive
8283 step = "execute initial config primitive"
8284
8285 # wait for dependent primitives execution (NS -> VNF -> VDU)
8286 if initial_config_primitive_list:
8287 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8288
8289 # stage, in function of element type: vdu, kdu, vnf or ns
8290 my_vca = vca_deployed_list[vca_index]
8291 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8292 # VDU or KDU
8293 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8294 elif my_vca.get("member-vnf-index"):
8295 # VNF
8296 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8297 else:
8298 # NS
8299 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8300
8301 self._write_configuration_status(
8302 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8303 )
8304
8305 self._write_op_status(op_id=nslcmop_id, stage=stage)
8306
8307 check_if_terminated_needed = True
8308 for initial_config_primitive in initial_config_primitive_list:
8309 # adding information on the vca_deployed if it is a NS execution environment
8310 if not vca_deployed["member-vnf-index"]:
8311 deploy_params["ns_config_info"] = json.dumps(
8312 self._get_ns_config_info(nsr_id)
8313 )
8314 # TODO check if already done
8315 primitive_params_ = self._map_primitive_params(
8316 initial_config_primitive, {}, deploy_params
8317 )
8318
8319 step = "execute primitive '{}' params '{}'".format(
8320 initial_config_primitive["name"], primitive_params_
8321 )
8322 self.logger.debug(logging_text + step)
8323 await self.vca_map[vca_type].exec_primitive(
8324 ee_id=ee_id,
8325 primitive_name=initial_config_primitive["name"],
8326 params_dict=primitive_params_,
8327 db_dict=db_dict,
8328 vca_id=vca_id,
8329 vca_type=vca_type,
8330 )
8331 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8332 if check_if_terminated_needed:
8333 if config_descriptor.get("terminate-config-primitive"):
8334 self.update_db_2(
8335 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8336 )
8337 check_if_terminated_needed = False
8338
8339 # TODO register in database that primitive is done
8340
8341 # STEP 7 Configure metrics
8342 # Not sure if this need to be done when healing
8343 """
8344 if vca_type == "helm" or vca_type == "helm-v3":
8345 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8346 ee_id=ee_id,
8347 artifact_path=artifact_path,
8348 ee_config_descriptor=ee_config_descriptor,
8349 vnfr_id=vnfr_id,
8350 nsr_id=nsr_id,
8351 target_ip=rw_mgmt_ip,
8352 )
8353 if prometheus_jobs:
8354 self.update_db_2(
8355 "nsrs",
8356 nsr_id,
8357 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8358 )
8359
8360 for job in prometheus_jobs:
8361 self.db.set_one(
8362 "prometheus_jobs",
8363 {"job_name": job["job_name"]},
8364 job,
8365 upsert=True,
8366 fail_on_empty=False,
8367 )
8368
8369 """
8370 step = "instantiated at VCA"
8371 self.logger.debug(logging_text + step)
8372
8373 self._write_configuration_status(
8374 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8375 )
8376
8377 except Exception as e: # TODO not use Exception but N2VC exception
8378 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8379 if not isinstance(
8380 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8381 ):
8382 self.logger.error(
8383 "Exception while {} : {}".format(step, e), exc_info=True
8384 )
8385 self._write_configuration_status(
8386 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8387 )
8388 raise LcmException("{} {}".format(step, e)) from e
8389
8390 async def _wait_heal_ro(
8391 self,
8392 nsr_id,
8393 timeout=600,
8394 ):
8395 start_time = time()
8396 while time() <= start_time + timeout:
8397 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8398 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8399 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8400 if operational_status_ro != "healing":
8401 break
8402 await asyncio.sleep(15, loop=self.loop)
8403 else: # timeout_ns_deploy
8404 raise NgRoException("Timeout waiting ns to deploy")
8405
8406 async def vertical_scale(self, nsr_id, nslcmop_id):
8407 """
8408 Vertical Scale the VDUs in a NS
8409
8410 :param: nsr_id: NS Instance ID
8411 :param: nslcmop_id: nslcmop ID of migrate
8412
8413 """
8414 # Try to lock HA task here
8415 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8416 if not task_is_locked_by_me:
8417 return
8418 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8419 self.logger.debug(logging_text + "Enter")
8420 # get all needed from database
8421 db_nslcmop = None
8422 db_nslcmop_update = {}
8423 nslcmop_operation_state = None
8424 db_nsr_update = {}
8425 target = {}
8426 exc = None
8427 # in case of error, indicates what part of scale was failed to put nsr at error status
8428 start_deploy = time()
8429
8430 try:
8431 # wait for any previous tasks in process
8432 step = "Waiting for previous operations to terminate"
8433 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8434
8435 self._write_ns_status(
8436 nsr_id=nsr_id,
8437 ns_state=None,
8438 current_operation="VerticalScale",
8439 current_operation_id=nslcmop_id
8440 )
8441 step = "Getting nslcmop from database"
8442 self.logger.debug(step + " after having waited for previous tasks to be completed")
8443 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8444 operationParams = db_nslcmop.get("operationParams")
8445 target = {}
8446 target.update(operationParams)
8447 desc = await self.RO.vertical_scale(nsr_id, target)
8448 self.logger.debug("RO return > {}".format(desc))
8449 action_id = desc["action_id"]
8450 await self._wait_ng_ro(
8451 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
8452 operation="verticalscale"
8453 )
8454 except (ROclient.ROClientException, DbException, LcmException) as e:
8455 self.logger.error("Exit Exception {}".format(e))
8456 exc = e
8457 except asyncio.CancelledError:
8458 self.logger.error("Cancelled Exception while '{}'".format(step))
8459 exc = "Operation was cancelled"
8460 except Exception as e:
8461 exc = traceback.format_exc()
8462 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8463 finally:
8464 self._write_ns_status(
8465 nsr_id=nsr_id,
8466 ns_state=None,
8467 current_operation="IDLE",
8468 current_operation_id=None,
8469 )
8470 if exc:
8471 db_nslcmop_update[
8472 "detailed-status"
8473 ] = "FAILED {}: {}".format(step, exc)
8474 nslcmop_operation_state = "FAILED"
8475 else:
8476 nslcmop_operation_state = "COMPLETED"
8477 db_nslcmop_update["detailed-status"] = "Done"
8478 db_nsr_update["detailed-status"] = "Done"
8479
8480 self._write_op_status(
8481 op_id=nslcmop_id,
8482 stage="",
8483 error_message="",
8484 operation_state=nslcmop_operation_state,
8485 other_update=db_nslcmop_update,
8486 )
8487 if nslcmop_operation_state:
8488 try:
8489 msg = {
8490 "nsr_id": nsr_id,
8491 "nslcmop_id": nslcmop_id,
8492 "operationState": nslcmop_operation_state,
8493 }
8494 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8495 except Exception as e:
8496 self.logger.error(
8497 logging_text + "kafka_write notification Exception {}".format(e)
8498 )
8499 self.logger.debug(logging_text + "Exit")
8500 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")