a26039a860e27917c5781011feee628ba3c3884e
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
106 from n2vc.n2vc_juju_conn import N2VCJujuConnector
107 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
108
109 from osm_lcm.lcm_helm_conn import LCMHelmConn
110 from osm_lcm.osm_config import OsmConfigBuilder
111 from osm_lcm.prometheus import parse_job
112
113 from copy import copy, deepcopy
114 from time import time
115 from uuid import uuid4
116
117 from random import randint
118
119 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
120
121
122 class NsLcm(LcmBase):
123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
129 timeout_charm_delete = 10 * 60
130 timeout_primitive = 30 * 60 # timeout for primitive execution
131 timeout_ns_update = 30 * 60 # timeout for ns update
132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
135 timeout_migrate = 1800 # default global timeout for migrating vnfs
136 timeout_operate = 1800 # default global timeout for migrating vnfs
137 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
138 SUBOPERATION_STATUS_NOT_FOUND = -1
139 SUBOPERATION_STATUS_NEW = -2
140 SUBOPERATION_STATUS_SKIP = -3
141 task_name_deploy_vca = "Deploying VCA"
142
143 def __init__(self, msg, lcm_tasks, config, loop):
144 """
145 Init, Connect to database, filesystem storage, and messaging
146 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
147 :return: None
148 """
149 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
150
151 self.db = Database().instance.db
152 self.fs = Filesystem().instance.fs
153 self.loop = loop
154 self.lcm_tasks = lcm_tasks
155 self.timeout = config["timeout"]
156 self.ro_config = config["ro_config"]
157 self.ng_ro = config["ro_config"].get("ng")
158 self.vca_config = config["VCA"].copy()
159
160 # create N2VC connector
161 self.n2vc = N2VCJujuConnector(
162 log=self.logger,
163 loop=self.loop,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 loop=self.loop,
172 vca_config=self.vca_config,
173 on_update_db=self._on_update_n2vc_db,
174 )
175
176 self.k8sclusterhelm2 = K8sHelmConnector(
177 kubectl_command=self.vca_config.get("kubectlpath"),
178 helm_command=self.vca_config.get("helmpath"),
179 log=self.logger,
180 on_update_db=None,
181 fs=self.fs,
182 db=self.db,
183 )
184
185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helm3path"),
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 juju_command=self.vca_config.get("jujupath"),
197 log=self.logger,
198 loop=self.loop,
199 on_update_db=self._on_update_k8s_db,
200 fs=self.fs,
201 db=self.db,
202 )
203
204 self.k8scluster_map = {
205 "helm-chart": self.k8sclusterhelm2,
206 "helm-chart-v3": self.k8sclusterhelm3,
207 "chart": self.k8sclusterhelm3,
208 "juju-bundle": self.k8sclusterjuju,
209 "juju": self.k8sclusterjuju,
210 }
211
212 self.vca_map = {
213 "lxc_proxy_charm": self.n2vc,
214 "native_charm": self.n2vc,
215 "k8s_proxy_charm": self.n2vc,
216 "helm": self.conn_helm_ee,
217 "helm-v3": self.conn_helm_ee,
218 }
219
220 # create RO client
221 self.RO = NgRoClient(self.loop, **self.ro_config)
222
223 self.op_status_map = {
224 "instantiation": self.RO.status,
225 "termination": self.RO.status,
226 "migrate": self.RO.status,
227 "healing": self.RO.recreate_status,
228 "verticalscale": self.RO.status,
229 "start_stop_rebuild": self.RO.status,
230 }
231
232 @staticmethod
233 def increment_ip_mac(ip_mac, vm_index=1):
234 if not isinstance(ip_mac, str):
235 return ip_mac
236 try:
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
255
256 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
257
258 try:
259 # TODO filter RO descriptor fields...
260
261 # write to database
262 db_dict = dict()
263 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
264 db_dict["deploymentStatus"] = ro_descriptor
265 self.update_db_2("nsrs", nsrs_id, db_dict)
266
267 except Exception as e:
268 self.logger.warn(
269 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
270 )
271
272 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
273
274 # remove last dot from path (if exists)
275 if path.endswith("."):
276 path = path[:-1]
277
278 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
279 # .format(table, filter, path, updated_data))
280 try:
281
282 nsr_id = filter.get("_id")
283
284 # read ns record from database
285 nsr = self.db.get_one(table="nsrs", q_filter=filter)
286 current_ns_status = nsr.get("nsState")
287
288 # get vca status for NS
289 status_dict = await self.n2vc.get_status(
290 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
291 )
292
293 # vcaStatus
294 db_dict = dict()
295 db_dict["vcaStatus"] = status_dict
296 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 if cluster_type in ("juju-bundle", "juju"):
405 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
406 # status in a similar way between Juju Bundles and Helm Charts on this side
407 await self.k8sclusterjuju.update_vca_status(
408 db_dict["vcaStatus"],
409 kdu_instance,
410 vca_id=vca_id,
411 )
412
413 self.logger.debug(
414 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
415 )
416
417 # write to database
418 self.update_db_2("nsrs", nsr_id, db_dict)
419 except (asyncio.CancelledError, asyncio.TimeoutError):
420 raise
421 except Exception as e:
422 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
423
424 @staticmethod
425 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
426 try:
427 env = Environment(undefined=StrictUndefined, autoescape=True)
428 template = env.from_string(cloud_init_text)
429 return template.render(additional_params or {})
430 except UndefinedError as e:
431 raise LcmException(
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
435 )
436 except (TemplateError, TemplateNotFound) as e:
437 raise LcmException(
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
439 vnfd_id, vdu_id, e
440 )
441 )
442
443 def _get_vdu_cloud_init_content(self, vdu, vnfd):
444 cloud_init_content = cloud_init_file = None
445 try:
446 if vdu.get("cloud-init-file"):
447 base_folder = vnfd["_admin"]["storage"]
448 if base_folder["pkg-dir"]:
449 cloud_init_file = "{}/{}/cloud_init/{}".format(
450 base_folder["folder"],
451 base_folder["pkg-dir"],
452 vdu["cloud-init-file"],
453 )
454 else:
455 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
456 base_folder["folder"],
457 vdu["cloud-init-file"],
458 )
459 with self.fs.file_open(cloud_init_file, "r") as ci_file:
460 cloud_init_content = ci_file.read()
461 elif vdu.get("cloud-init"):
462 cloud_init_content = vdu["cloud-init"]
463
464 return cloud_init_content
465 except FsException as e:
466 raise LcmException(
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd["id"], vdu["id"], cloud_init_file, e
469 )
470 )
471
472 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
473 vdur = next(
474 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
475 )
476 additional_params = vdur.get("additionalParams")
477 return parse_yaml_strings(additional_params)
478
479 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
480 """
481 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
482 :param vnfd: input vnfd
483 :param new_id: overrides vnf id if provided
484 :param additionalParams: Instantiation params for VNFs provided
485 :param nsrId: Id of the NSR
486 :return: copy of vnfd
487 """
488 vnfd_RO = deepcopy(vnfd)
489 # remove unused by RO configuration, monitoring, scaling and internal keys
490 vnfd_RO.pop("_id", None)
491 vnfd_RO.pop("_admin", None)
492 vnfd_RO.pop("monitoring-param", None)
493 vnfd_RO.pop("scaling-group-descriptor", None)
494 vnfd_RO.pop("kdu", None)
495 vnfd_RO.pop("k8s-cluster", None)
496 if new_id:
497 vnfd_RO["id"] = new_id
498
499 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
500 for vdu in get_iterable(vnfd_RO, "vdu"):
501 vdu.pop("cloud-init-file", None)
502 vdu.pop("cloud-init", None)
503 return vnfd_RO
504
505 @staticmethod
506 def ip_profile_2_RO(ip_profile):
507 RO_ip_profile = deepcopy(ip_profile)
508 if "dns-server" in RO_ip_profile:
509 if isinstance(RO_ip_profile["dns-server"], list):
510 RO_ip_profile["dns-address"] = []
511 for ds in RO_ip_profile.pop("dns-server"):
512 RO_ip_profile["dns-address"].append(ds["address"])
513 else:
514 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
515 if RO_ip_profile.get("ip-version") == "ipv4":
516 RO_ip_profile["ip-version"] = "IPv4"
517 if RO_ip_profile.get("ip-version") == "ipv6":
518 RO_ip_profile["ip-version"] = "IPv6"
519 if "dhcp-params" in RO_ip_profile:
520 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
521 return RO_ip_profile
522
523 def _get_ro_vim_id_for_vim_account(self, vim_account):
524 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
525 if db_vim["_admin"]["operationalState"] != "ENABLED":
526 raise LcmException(
527 "VIM={} is not available. operationalState={}".format(
528 vim_account, db_vim["_admin"]["operationalState"]
529 )
530 )
531 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
532 return RO_vim_id
533
534 def get_ro_wim_id_for_wim_account(self, wim_account):
535 if isinstance(wim_account, str):
536 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
537 if db_wim["_admin"]["operationalState"] != "ENABLED":
538 raise LcmException(
539 "WIM={} is not available. operationalState={}".format(
540 wim_account, db_wim["_admin"]["operationalState"]
541 )
542 )
543 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
544 return RO_wim_id
545 else:
546 return wim_account
547
548 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
549
550 db_vdu_push_list = []
551 template_vdur = []
552 db_update = {"_admin.modified": time()}
553 if vdu_create:
554 for vdu_id, vdu_count in vdu_create.items():
555 vdur = next(
556 (
557 vdur
558 for vdur in reversed(db_vnfr["vdur"])
559 if vdur["vdu-id-ref"] == vdu_id
560 ),
561 None,
562 )
563 if not vdur:
564 # Read the template saved in the db:
565 self.logger.debug(
566 "No vdur in the database. Using the vdur-template to scale"
567 )
568 vdur_template = db_vnfr.get("vdur-template")
569 if not vdur_template:
570 raise LcmException(
571 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
572 vdu_id
573 )
574 )
575 vdur = vdur_template[0]
576 # Delete a template from the database after using it
577 self.db.set_one(
578 "vnfrs",
579 {"_id": db_vnfr["_id"]},
580 None,
581 pull={"vdur-template": {"_id": vdur["_id"]}},
582 )
583 for count in range(vdu_count):
584 vdur_copy = deepcopy(vdur)
585 vdur_copy["status"] = "BUILD"
586 vdur_copy["status-detailed"] = None
587 vdur_copy["ip-address"] = None
588 vdur_copy["_id"] = str(uuid4())
589 vdur_copy["count-index"] += count + 1
590 vdur_copy["id"] = "{}-{}".format(
591 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
592 )
593 vdur_copy.pop("vim_info", None)
594 for iface in vdur_copy["interfaces"]:
595 if iface.get("fixed-ip"):
596 iface["ip-address"] = self.increment_ip_mac(
597 iface["ip-address"], count + 1
598 )
599 else:
600 iface.pop("ip-address", None)
601 if iface.get("fixed-mac"):
602 iface["mac-address"] = self.increment_ip_mac(
603 iface["mac-address"], count + 1
604 )
605 else:
606 iface.pop("mac-address", None)
607 if db_vnfr["vdur"]:
608 iface.pop(
609 "mgmt_vnf", None
610 ) # only first vdu can be managment of vnf
611 db_vdu_push_list.append(vdur_copy)
612 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
613 if vdu_delete:
614 if len(db_vnfr["vdur"]) == 1:
615 # The scale will move to 0 instances
616 self.logger.debug(
617 "Scaling to 0 !, creating the template with the last vdur"
618 )
619 template_vdur = [db_vnfr["vdur"][0]]
620 for vdu_id, vdu_count in vdu_delete.items():
621 if mark_delete:
622 indexes_to_delete = [
623 iv[0]
624 for iv in enumerate(db_vnfr["vdur"])
625 if iv[1]["vdu-id-ref"] == vdu_id
626 ]
627 db_update.update(
628 {
629 "vdur.{}.status".format(i): "DELETING"
630 for i in indexes_to_delete[-vdu_count:]
631 }
632 )
633 else:
634 # it must be deleted one by one because common.db does not allow otherwise
635 vdus_to_delete = [
636 v
637 for v in reversed(db_vnfr["vdur"])
638 if v["vdu-id-ref"] == vdu_id
639 ]
640 for vdu in vdus_to_delete[:vdu_count]:
641 self.db.set_one(
642 "vnfrs",
643 {"_id": db_vnfr["_id"]},
644 None,
645 pull={"vdur": {"_id": vdu["_id"]}},
646 )
647 db_push = {}
648 if db_vdu_push_list:
649 db_push["vdur"] = db_vdu_push_list
650 if template_vdur:
651 db_push["vdur-template"] = template_vdur
652 if not db_push:
653 db_push = None
654 db_vnfr["vdur-template"] = template_vdur
655 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
656 # modify passed dictionary db_vnfr
657 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
658 db_vnfr["vdur"] = db_vnfr_["vdur"]
659
660 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
661 """
662 Updates database nsr with the RO info for the created vld
663 :param ns_update_nsr: dictionary to be filled with the updated info
664 :param db_nsr: content of db_nsr. This is also modified
665 :param nsr_desc_RO: nsr descriptor from RO
666 :return: Nothing, LcmException is raised on errors
667 """
668
669 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
670 for net_RO in get_iterable(nsr_desc_RO, "nets"):
671 if vld["id"] != net_RO.get("ns_net_osm_id"):
672 continue
673 vld["vim-id"] = net_RO.get("vim_net_id")
674 vld["name"] = net_RO.get("vim_name")
675 vld["status"] = net_RO.get("status")
676 vld["status-detailed"] = net_RO.get("error_msg")
677 ns_update_nsr["vld.{}".format(vld_index)] = vld
678 break
679 else:
680 raise LcmException(
681 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
682 )
683
684 def set_vnfr_at_error(self, db_vnfrs, error_text):
685 try:
686 for db_vnfr in db_vnfrs.values():
687 vnfr_update = {"status": "ERROR"}
688 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
689 if "status" not in vdur:
690 vdur["status"] = "ERROR"
691 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
692 if error_text:
693 vdur["status-detailed"] = str(error_text)
694 vnfr_update[
695 "vdur.{}.status-detailed".format(vdu_index)
696 ] = "ERROR"
697 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
698 except DbException as e:
699 self.logger.error("Cannot update vnf. {}".format(e))
700
701 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
702 """
703 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
704 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
705 :param nsr_desc_RO: nsr descriptor from RO
706 :return: Nothing, LcmException is raised on errors
707 """
708 for vnf_index, db_vnfr in db_vnfrs.items():
709 for vnf_RO in nsr_desc_RO["vnfs"]:
710 if vnf_RO["member_vnf_index"] != vnf_index:
711 continue
712 vnfr_update = {}
713 if vnf_RO.get("ip_address"):
714 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
715 "ip_address"
716 ].split(";")[0]
717 elif not db_vnfr.get("ip-address"):
718 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
719 raise LcmExceptionNoMgmtIP(
720 "ns member_vnf_index '{}' has no IP address".format(
721 vnf_index
722 )
723 )
724
725 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
726 vdur_RO_count_index = 0
727 if vdur.get("pdu-type"):
728 continue
729 for vdur_RO in get_iterable(vnf_RO, "vms"):
730 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
731 continue
732 if vdur["count-index"] != vdur_RO_count_index:
733 vdur_RO_count_index += 1
734 continue
735 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
736 if vdur_RO.get("ip_address"):
737 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
738 else:
739 vdur["ip-address"] = None
740 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
741 vdur["name"] = vdur_RO.get("vim_name")
742 vdur["status"] = vdur_RO.get("status")
743 vdur["status-detailed"] = vdur_RO.get("error_msg")
744 for ifacer in get_iterable(vdur, "interfaces"):
745 for interface_RO in get_iterable(vdur_RO, "interfaces"):
746 if ifacer["name"] == interface_RO.get("internal_name"):
747 ifacer["ip-address"] = interface_RO.get(
748 "ip_address"
749 )
750 ifacer["mac-address"] = interface_RO.get(
751 "mac_address"
752 )
753 break
754 else:
755 raise LcmException(
756 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
757 "from VIM info".format(
758 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
759 )
760 )
761 vnfr_update["vdur.{}".format(vdu_index)] = vdur
762 break
763 else:
764 raise LcmException(
765 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
766 "VIM info".format(
767 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
768 )
769 )
770
771 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
772 for net_RO in get_iterable(nsr_desc_RO, "nets"):
773 if vld["id"] != net_RO.get("vnf_net_osm_id"):
774 continue
775 vld["vim-id"] = net_RO.get("vim_net_id")
776 vld["name"] = net_RO.get("vim_name")
777 vld["status"] = net_RO.get("status")
778 vld["status-detailed"] = net_RO.get("error_msg")
779 vnfr_update["vld.{}".format(vld_index)] = vld
780 break
781 else:
782 raise LcmException(
783 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
784 vnf_index, vld["id"]
785 )
786 )
787
788 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
789 break
790
791 else:
792 raise LcmException(
793 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
794 vnf_index
795 )
796 )
797
798 def _get_ns_config_info(self, nsr_id):
799 """
800 Generates a mapping between vnf,vdu elements and the N2VC id
801 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
802 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
803 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
804 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
805 """
806 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
807 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
808 mapping = {}
809 ns_config_info = {"osm-config-mapping": mapping}
810 for vca in vca_deployed_list:
811 if not vca["member-vnf-index"]:
812 continue
813 if not vca["vdu_id"]:
814 mapping[vca["member-vnf-index"]] = vca["application"]
815 else:
816 mapping[
817 "{}.{}.{}".format(
818 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
819 )
820 ] = vca["application"]
821 return ns_config_info
822
823 async def _instantiate_ng_ro(
824 self,
825 logging_text,
826 nsr_id,
827 nsd,
828 db_nsr,
829 db_nslcmop,
830 db_vnfrs,
831 db_vnfds,
832 n2vc_key_list,
833 stage,
834 start_deploy,
835 timeout_ns_deploy,
836 ):
837
838 db_vims = {}
839
840 def get_vim_account(vim_account_id):
841 nonlocal db_vims
842 if vim_account_id in db_vims:
843 return db_vims[vim_account_id]
844 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
845 db_vims[vim_account_id] = db_vim
846 return db_vim
847
848 # modify target_vld info with instantiation parameters
849 def parse_vld_instantiation_params(
850 target_vim, target_vld, vld_params, target_sdn
851 ):
852 if vld_params.get("ip-profile"):
853 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
854 "ip-profile"
855 ]
856 if vld_params.get("provider-network"):
857 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
858 "provider-network"
859 ]
860 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
861 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
862 "provider-network"
863 ]["sdn-ports"]
864 if vld_params.get("wimAccountId"):
865 target_wim = "wim:{}".format(vld_params["wimAccountId"])
866 target_vld["vim_info"][target_wim] = {}
867 for param in ("vim-network-name", "vim-network-id"):
868 if vld_params.get(param):
869 if isinstance(vld_params[param], dict):
870 for vim, vim_net in vld_params[param].items():
871 other_target_vim = "vim:" + vim
872 populate_dict(
873 target_vld["vim_info"],
874 (other_target_vim, param.replace("-", "_")),
875 vim_net,
876 )
877 else: # isinstance str
878 target_vld["vim_info"][target_vim][
879 param.replace("-", "_")
880 ] = vld_params[param]
881 if vld_params.get("common_id"):
882 target_vld["common_id"] = vld_params.get("common_id")
883
884 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
885 def update_ns_vld_target(target, ns_params):
886 for vnf_params in ns_params.get("vnf", ()):
887 if vnf_params.get("vimAccountId"):
888 target_vnf = next(
889 (
890 vnfr
891 for vnfr in db_vnfrs.values()
892 if vnf_params["member-vnf-index"]
893 == vnfr["member-vnf-index-ref"]
894 ),
895 None,
896 )
897 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
898 for a_index, a_vld in enumerate(target["ns"]["vld"]):
899 target_vld = find_in_list(
900 get_iterable(vdur, "interfaces"),
901 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
902 )
903
904 vld_params = find_in_list(
905 get_iterable(ns_params, "vld"),
906 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
907 )
908 if target_vld:
909
910 if vnf_params.get("vimAccountId") not in a_vld.get(
911 "vim_info", {}
912 ):
913 target_vim_network_list = [
914 v for _, v in a_vld.get("vim_info").items()
915 ]
916 target_vim_network_name = next(
917 (
918 item.get("vim_network_name", "")
919 for item in target_vim_network_list
920 ),
921 "",
922 )
923
924 target["ns"]["vld"][a_index].get("vim_info").update(
925 {
926 "vim:{}".format(vnf_params["vimAccountId"]): {
927 "vim_network_name": target_vim_network_name,
928 }
929 }
930 )
931
932 if vld_params:
933 for param in ("vim-network-name", "vim-network-id"):
934 if vld_params.get(param) and isinstance(
935 vld_params[param], dict
936 ):
937 for vim, vim_net in vld_params[
938 param
939 ].items():
940 other_target_vim = "vim:" + vim
941 populate_dict(
942 target["ns"]["vld"][a_index].get(
943 "vim_info"
944 ),
945 (
946 other_target_vim,
947 param.replace("-", "_"),
948 ),
949 vim_net,
950 )
951
952 nslcmop_id = db_nslcmop["_id"]
953 target = {
954 "name": db_nsr["name"],
955 "ns": {"vld": []},
956 "vnf": [],
957 "image": deepcopy(db_nsr["image"]),
958 "flavor": deepcopy(db_nsr["flavor"]),
959 "action_id": nslcmop_id,
960 "cloud_init_content": {},
961 }
962 for image in target["image"]:
963 image["vim_info"] = {}
964 for flavor in target["flavor"]:
965 flavor["vim_info"] = {}
966 if db_nsr.get("affinity-or-anti-affinity-group"):
967 target["affinity-or-anti-affinity-group"] = deepcopy(
968 db_nsr["affinity-or-anti-affinity-group"]
969 )
970 for affinity_or_anti_affinity_group in target[
971 "affinity-or-anti-affinity-group"
972 ]:
973 affinity_or_anti_affinity_group["vim_info"] = {}
974
975 if db_nslcmop.get("lcmOperationType") != "instantiate":
976 # get parameters of instantiation:
977 db_nslcmop_instantiate = self.db.get_list(
978 "nslcmops",
979 {
980 "nsInstanceId": db_nslcmop["nsInstanceId"],
981 "lcmOperationType": "instantiate",
982 },
983 )[-1]
984 ns_params = db_nslcmop_instantiate.get("operationParams")
985 else:
986 ns_params = db_nslcmop.get("operationParams")
987 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
988 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
989
990 cp2target = {}
991 for vld_index, vld in enumerate(db_nsr.get("vld")):
992 target_vim = "vim:{}".format(ns_params["vimAccountId"])
993 target_vld = {
994 "id": vld["id"],
995 "name": vld["name"],
996 "mgmt-network": vld.get("mgmt-network", False),
997 "type": vld.get("type"),
998 "vim_info": {
999 target_vim: {
1000 "vim_network_name": vld.get("vim-network-name"),
1001 "vim_account_id": ns_params["vimAccountId"],
1002 }
1003 },
1004 }
1005 # check if this network needs SDN assist
1006 if vld.get("pci-interfaces"):
1007 db_vim = get_vim_account(ns_params["vimAccountId"])
1008 sdnc_id = db_vim["config"].get("sdn-controller")
1009 if sdnc_id:
1010 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1011 target_sdn = "sdn:{}".format(sdnc_id)
1012 target_vld["vim_info"][target_sdn] = {
1013 "sdn": True,
1014 "target_vim": target_vim,
1015 "vlds": [sdn_vld],
1016 "type": vld.get("type"),
1017 }
1018
1019 nsd_vnf_profiles = get_vnf_profiles(nsd)
1020 for nsd_vnf_profile in nsd_vnf_profiles:
1021 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1022 if cp["virtual-link-profile-id"] == vld["id"]:
1023 cp2target[
1024 "member_vnf:{}.{}".format(
1025 cp["constituent-cpd-id"][0][
1026 "constituent-base-element-id"
1027 ],
1028 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1029 )
1030 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1031
1032 # check at nsd descriptor, if there is an ip-profile
1033 vld_params = {}
1034 nsd_vlp = find_in_list(
1035 get_virtual_link_profiles(nsd),
1036 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1037 == vld["id"],
1038 )
1039 if (
1040 nsd_vlp
1041 and nsd_vlp.get("virtual-link-protocol-data")
1042 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1043 ):
1044 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1045 "l3-protocol-data"
1046 ]
1047 ip_profile_dest_data = {}
1048 if "ip-version" in ip_profile_source_data:
1049 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1050 "ip-version"
1051 ]
1052 if "cidr" in ip_profile_source_data:
1053 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1054 "cidr"
1055 ]
1056 if "gateway-ip" in ip_profile_source_data:
1057 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1058 "gateway-ip"
1059 ]
1060 if "dhcp-enabled" in ip_profile_source_data:
1061 ip_profile_dest_data["dhcp-params"] = {
1062 "enabled": ip_profile_source_data["dhcp-enabled"]
1063 }
1064 vld_params["ip-profile"] = ip_profile_dest_data
1065
1066 # update vld_params with instantiation params
1067 vld_instantiation_params = find_in_list(
1068 get_iterable(ns_params, "vld"),
1069 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1070 )
1071 if vld_instantiation_params:
1072 vld_params.update(vld_instantiation_params)
1073 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1074 target["ns"]["vld"].append(target_vld)
1075 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1076 update_ns_vld_target(target, ns_params)
1077
1078 for vnfr in db_vnfrs.values():
1079 vnfd = find_in_list(
1080 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1081 )
1082 vnf_params = find_in_list(
1083 get_iterable(ns_params, "vnf"),
1084 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1085 )
1086 target_vnf = deepcopy(vnfr)
1087 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1088 for vld in target_vnf.get("vld", ()):
1089 # check if connected to a ns.vld, to fill target'
1090 vnf_cp = find_in_list(
1091 vnfd.get("int-virtual-link-desc", ()),
1092 lambda cpd: cpd.get("id") == vld["id"],
1093 )
1094 if vnf_cp:
1095 ns_cp = "member_vnf:{}.{}".format(
1096 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1097 )
1098 if cp2target.get(ns_cp):
1099 vld["target"] = cp2target[ns_cp]
1100
1101 vld["vim_info"] = {
1102 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1103 }
1104 # check if this network needs SDN assist
1105 target_sdn = None
1106 if vld.get("pci-interfaces"):
1107 db_vim = get_vim_account(vnfr["vim-account-id"])
1108 sdnc_id = db_vim["config"].get("sdn-controller")
1109 if sdnc_id:
1110 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1111 target_sdn = "sdn:{}".format(sdnc_id)
1112 vld["vim_info"][target_sdn] = {
1113 "sdn": True,
1114 "target_vim": target_vim,
1115 "vlds": [sdn_vld],
1116 "type": vld.get("type"),
1117 }
1118
1119 # check at vnfd descriptor, if there is an ip-profile
1120 vld_params = {}
1121 vnfd_vlp = find_in_list(
1122 get_virtual_link_profiles(vnfd),
1123 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1124 )
1125 if (
1126 vnfd_vlp
1127 and vnfd_vlp.get("virtual-link-protocol-data")
1128 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1129 ):
1130 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1131 "l3-protocol-data"
1132 ]
1133 ip_profile_dest_data = {}
1134 if "ip-version" in ip_profile_source_data:
1135 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1136 "ip-version"
1137 ]
1138 if "cidr" in ip_profile_source_data:
1139 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1140 "cidr"
1141 ]
1142 if "gateway-ip" in ip_profile_source_data:
1143 ip_profile_dest_data[
1144 "gateway-address"
1145 ] = ip_profile_source_data["gateway-ip"]
1146 if "dhcp-enabled" in ip_profile_source_data:
1147 ip_profile_dest_data["dhcp-params"] = {
1148 "enabled": ip_profile_source_data["dhcp-enabled"]
1149 }
1150
1151 vld_params["ip-profile"] = ip_profile_dest_data
1152 # update vld_params with instantiation params
1153 if vnf_params:
1154 vld_instantiation_params = find_in_list(
1155 get_iterable(vnf_params, "internal-vld"),
1156 lambda i_vld: i_vld["name"] == vld["id"],
1157 )
1158 if vld_instantiation_params:
1159 vld_params.update(vld_instantiation_params)
1160 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1161
1162 vdur_list = []
1163 for vdur in target_vnf.get("vdur", ()):
1164 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1165 continue # This vdu must not be created
1166 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1167
1168 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1169
1170 if ssh_keys_all:
1171 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1172 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1173 if (
1174 vdu_configuration
1175 and vdu_configuration.get("config-access")
1176 and vdu_configuration.get("config-access").get("ssh-access")
1177 ):
1178 vdur["ssh-keys"] = ssh_keys_all
1179 vdur["ssh-access-required"] = vdu_configuration[
1180 "config-access"
1181 ]["ssh-access"]["required"]
1182 elif (
1183 vnf_configuration
1184 and vnf_configuration.get("config-access")
1185 and vnf_configuration.get("config-access").get("ssh-access")
1186 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1187 ):
1188 vdur["ssh-keys"] = ssh_keys_all
1189 vdur["ssh-access-required"] = vnf_configuration[
1190 "config-access"
1191 ]["ssh-access"]["required"]
1192 elif ssh_keys_instantiation and find_in_list(
1193 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1194 ):
1195 vdur["ssh-keys"] = ssh_keys_instantiation
1196
1197 self.logger.debug("NS > vdur > {}".format(vdur))
1198
1199 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1200 # cloud-init
1201 if vdud.get("cloud-init-file"):
1202 vdur["cloud-init"] = "{}:file:{}".format(
1203 vnfd["_id"], vdud.get("cloud-init-file")
1204 )
1205 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1206 if vdur["cloud-init"] not in target["cloud_init_content"]:
1207 base_folder = vnfd["_admin"]["storage"]
1208 if base_folder["pkg-dir"]:
1209 cloud_init_file = "{}/{}/cloud_init/{}".format(
1210 base_folder["folder"],
1211 base_folder["pkg-dir"],
1212 vdud.get("cloud-init-file"),
1213 )
1214 else:
1215 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1216 base_folder["folder"],
1217 vdud.get("cloud-init-file"),
1218 )
1219 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1220 target["cloud_init_content"][
1221 vdur["cloud-init"]
1222 ] = ci_file.read()
1223 elif vdud.get("cloud-init"):
1224 vdur["cloud-init"] = "{}:vdu:{}".format(
1225 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1226 )
1227 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1228 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1229 "cloud-init"
1230 ]
1231 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1232 deploy_params_vdu = self._format_additional_params(
1233 vdur.get("additionalParams") or {}
1234 )
1235 deploy_params_vdu["OSM"] = get_osm_params(
1236 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1237 )
1238 vdur["additionalParams"] = deploy_params_vdu
1239
1240 # flavor
1241 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1242 if target_vim not in ns_flavor["vim_info"]:
1243 ns_flavor["vim_info"][target_vim] = {}
1244
1245 # deal with images
1246 # in case alternative images are provided we must check if they should be applied
1247 # for the vim_type, modify the vim_type taking into account
1248 ns_image_id = int(vdur["ns-image-id"])
1249 if vdur.get("alt-image-ids"):
1250 db_vim = get_vim_account(vnfr["vim-account-id"])
1251 vim_type = db_vim["vim_type"]
1252 for alt_image_id in vdur.get("alt-image-ids"):
1253 ns_alt_image = target["image"][int(alt_image_id)]
1254 if vim_type == ns_alt_image.get("vim-type"):
1255 # must use alternative image
1256 self.logger.debug(
1257 "use alternative image id: {}".format(alt_image_id)
1258 )
1259 ns_image_id = alt_image_id
1260 vdur["ns-image-id"] = ns_image_id
1261 break
1262 ns_image = target["image"][int(ns_image_id)]
1263 if target_vim not in ns_image["vim_info"]:
1264 ns_image["vim_info"][target_vim] = {}
1265
1266 # Affinity groups
1267 if vdur.get("affinity-or-anti-affinity-group-id"):
1268 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1269 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1270 if target_vim not in ns_ags["vim_info"]:
1271 ns_ags["vim_info"][target_vim] = {}
1272
1273 vdur["vim_info"] = {target_vim: {}}
1274 # instantiation parameters
1275 if vnf_params:
1276 vdu_instantiation_params = find_in_list(
1277 get_iterable(vnf_params, "vdu"),
1278 lambda i_vdu: i_vdu["id"] == vdud["id"],
1279 )
1280 if vdu_instantiation_params:
1281 # Parse the vdu_volumes from the instantiation params
1282 vdu_volumes = get_volumes_from_instantiation_params(
1283 vdu_instantiation_params, vdud
1284 )
1285 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1286 vdur_list.append(vdur)
1287 target_vnf["vdur"] = vdur_list
1288 target["vnf"].append(target_vnf)
1289
1290 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1291 desc = await self.RO.deploy(nsr_id, target)
1292 self.logger.debug("RO return > {}".format(desc))
1293 action_id = desc["action_id"]
1294 await self._wait_ng_ro(
1295 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1296 operation="instantiation"
1297 )
1298
1299 # Updating NSR
1300 db_nsr_update = {
1301 "_admin.deployed.RO.operational-status": "running",
1302 "detailed-status": " ".join(stage),
1303 }
1304 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1305 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1306 self._write_op_status(nslcmop_id, stage)
1307 self.logger.debug(
1308 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1309 )
1310 return
1311
1312 async def _wait_ng_ro(
1313 self,
1314 nsr_id,
1315 action_id,
1316 nslcmop_id=None,
1317 start_time=None,
1318 timeout=600,
1319 stage=None,
1320 operation=None,
1321 ):
1322 detailed_status_old = None
1323 db_nsr_update = {}
1324 start_time = start_time or time()
1325 while time() <= start_time + timeout:
1326 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1327 self.logger.debug("Wait NG RO > {}".format(desc_status))
1328 if desc_status["status"] == "FAILED":
1329 raise NgRoException(desc_status["details"])
1330 elif desc_status["status"] == "BUILD":
1331 if stage:
1332 stage[2] = "VIM: ({})".format(desc_status["details"])
1333 elif desc_status["status"] == "DONE":
1334 if stage:
1335 stage[2] = "Deployed at VIM"
1336 break
1337 else:
1338 assert False, "ROclient.check_ns_status returns unknown {}".format(
1339 desc_status["status"]
1340 )
1341 if stage and nslcmop_id and stage[2] != detailed_status_old:
1342 detailed_status_old = stage[2]
1343 db_nsr_update["detailed-status"] = " ".join(stage)
1344 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1345 self._write_op_status(nslcmop_id, stage)
1346 await asyncio.sleep(15, loop=self.loop)
1347 else: # timeout_ns_deploy
1348 raise NgRoException("Timeout waiting ns to deploy")
1349
1350 async def _terminate_ng_ro(
1351 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1352 ):
1353 db_nsr_update = {}
1354 failed_detail = []
1355 action_id = None
1356 start_deploy = time()
1357 try:
1358 target = {
1359 "ns": {"vld": []},
1360 "vnf": [],
1361 "image": [],
1362 "flavor": [],
1363 "action_id": nslcmop_id,
1364 }
1365 desc = await self.RO.deploy(nsr_id, target)
1366 action_id = desc["action_id"]
1367 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1368 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1369 self.logger.debug(
1370 logging_text
1371 + "ns terminate action at RO. action_id={}".format(action_id)
1372 )
1373
1374 # wait until done
1375 delete_timeout = 20 * 60 # 20 minutes
1376 await self._wait_ng_ro(
1377 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1378 operation="termination"
1379 )
1380
1381 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1382 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1383 # delete all nsr
1384 await self.RO.delete(nsr_id)
1385 except Exception as e:
1386 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1387 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1388 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1389 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1390 self.logger.debug(
1391 logging_text + "RO_action_id={} already deleted".format(action_id)
1392 )
1393 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1394 failed_detail.append("delete conflict: {}".format(e))
1395 self.logger.debug(
1396 logging_text
1397 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1398 )
1399 else:
1400 failed_detail.append("delete error: {}".format(e))
1401 self.logger.error(
1402 logging_text
1403 + "RO_action_id={} delete error: {}".format(action_id, e)
1404 )
1405
1406 if failed_detail:
1407 stage[2] = "Error deleting from VIM"
1408 else:
1409 stage[2] = "Deleted from VIM"
1410 db_nsr_update["detailed-status"] = " ".join(stage)
1411 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1412 self._write_op_status(nslcmop_id, stage)
1413
1414 if failed_detail:
1415 raise LcmException("; ".join(failed_detail))
1416 return
1417
1418 async def instantiate_RO(
1419 self,
1420 logging_text,
1421 nsr_id,
1422 nsd,
1423 db_nsr,
1424 db_nslcmop,
1425 db_vnfrs,
1426 db_vnfds,
1427 n2vc_key_list,
1428 stage,
1429 ):
1430 """
1431 Instantiate at RO
1432 :param logging_text: preffix text to use at logging
1433 :param nsr_id: nsr identity
1434 :param nsd: database content of ns descriptor
1435 :param db_nsr: database content of ns record
1436 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1437 :param db_vnfrs:
1438 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1439 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1440 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1441 :return: None or exception
1442 """
1443 try:
1444 start_deploy = time()
1445 ns_params = db_nslcmop.get("operationParams")
1446 if ns_params and ns_params.get("timeout_ns_deploy"):
1447 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1448 else:
1449 timeout_ns_deploy = self.timeout.get(
1450 "ns_deploy", self.timeout_ns_deploy
1451 )
1452
1453 # Check for and optionally request placement optimization. Database will be updated if placement activated
1454 stage[2] = "Waiting for Placement."
1455 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1456 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1457 for vnfr in db_vnfrs.values():
1458 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1459 break
1460 else:
1461 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1462
1463 return await self._instantiate_ng_ro(
1464 logging_text,
1465 nsr_id,
1466 nsd,
1467 db_nsr,
1468 db_nslcmop,
1469 db_vnfrs,
1470 db_vnfds,
1471 n2vc_key_list,
1472 stage,
1473 start_deploy,
1474 timeout_ns_deploy,
1475 )
1476 except Exception as e:
1477 stage[2] = "ERROR deploying at VIM"
1478 self.set_vnfr_at_error(db_vnfrs, str(e))
1479 self.logger.error(
1480 "Error deploying at VIM {}".format(e),
1481 exc_info=not isinstance(
1482 e,
1483 (
1484 ROclient.ROClientException,
1485 LcmException,
1486 DbException,
1487 NgRoException,
1488 ),
1489 ),
1490 )
1491 raise
1492
1493 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1494 """
1495 Wait for kdu to be up, get ip address
1496 :param logging_text: prefix use for logging
1497 :param nsr_id:
1498 :param vnfr_id:
1499 :param kdu_name:
1500 :return: IP address, K8s services
1501 """
1502
1503 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1504 nb_tries = 0
1505
1506 while nb_tries < 360:
1507 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1508 kdur = next(
1509 (
1510 x
1511 for x in get_iterable(db_vnfr, "kdur")
1512 if x.get("kdu-name") == kdu_name
1513 ),
1514 None,
1515 )
1516 if not kdur:
1517 raise LcmException(
1518 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1519 )
1520 if kdur.get("status"):
1521 if kdur["status"] in ("READY", "ENABLED"):
1522 return kdur.get("ip-address"), kdur.get("services")
1523 else:
1524 raise LcmException(
1525 "target KDU={} is in error state".format(kdu_name)
1526 )
1527
1528 await asyncio.sleep(10, loop=self.loop)
1529 nb_tries += 1
1530 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1531
1532 async def wait_vm_up_insert_key_ro(
1533 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1534 ):
1535 """
1536 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1537 :param logging_text: prefix use for logging
1538 :param nsr_id:
1539 :param vnfr_id:
1540 :param vdu_id:
1541 :param vdu_index:
1542 :param pub_key: public ssh key to inject, None to skip
1543 :param user: user to apply the public ssh key
1544 :return: IP address
1545 """
1546
1547 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1548 ro_nsr_id = None
1549 ip_address = None
1550 nb_tries = 0
1551 target_vdu_id = None
1552 ro_retries = 0
1553
1554 while True:
1555
1556 ro_retries += 1
1557 if ro_retries >= 360: # 1 hour
1558 raise LcmException(
1559 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1560 )
1561
1562 await asyncio.sleep(10, loop=self.loop)
1563
1564 # get ip address
1565 if not target_vdu_id:
1566 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1567
1568 if not vdu_id: # for the VNF case
1569 if db_vnfr.get("status") == "ERROR":
1570 raise LcmException(
1571 "Cannot inject ssh-key because target VNF is in error state"
1572 )
1573 ip_address = db_vnfr.get("ip-address")
1574 if not ip_address:
1575 continue
1576 vdur = next(
1577 (
1578 x
1579 for x in get_iterable(db_vnfr, "vdur")
1580 if x.get("ip-address") == ip_address
1581 ),
1582 None,
1583 )
1584 else: # VDU case
1585 vdur = next(
1586 (
1587 x
1588 for x in get_iterable(db_vnfr, "vdur")
1589 if x.get("vdu-id-ref") == vdu_id
1590 and x.get("count-index") == vdu_index
1591 ),
1592 None,
1593 )
1594
1595 if (
1596 not vdur and len(db_vnfr.get("vdur", ())) == 1
1597 ): # If only one, this should be the target vdu
1598 vdur = db_vnfr["vdur"][0]
1599 if not vdur:
1600 raise LcmException(
1601 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1602 vnfr_id, vdu_id, vdu_index
1603 )
1604 )
1605 # New generation RO stores information at "vim_info"
1606 ng_ro_status = None
1607 target_vim = None
1608 if vdur.get("vim_info"):
1609 target_vim = next(
1610 t for t in vdur["vim_info"]
1611 ) # there should be only one key
1612 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1613 if (
1614 vdur.get("pdu-type")
1615 or vdur.get("status") == "ACTIVE"
1616 or ng_ro_status == "ACTIVE"
1617 ):
1618 ip_address = vdur.get("ip-address")
1619 if not ip_address:
1620 continue
1621 target_vdu_id = vdur["vdu-id-ref"]
1622 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1623 raise LcmException(
1624 "Cannot inject ssh-key because target VM is in error state"
1625 )
1626
1627 if not target_vdu_id:
1628 continue
1629
1630 # inject public key into machine
1631 if pub_key and user:
1632 self.logger.debug(logging_text + "Inserting RO key")
1633 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1634 if vdur.get("pdu-type"):
1635 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1636 return ip_address
1637 try:
1638 ro_vm_id = "{}-{}".format(
1639 db_vnfr["member-vnf-index-ref"], target_vdu_id
1640 ) # TODO add vdu_index
1641 if self.ng_ro:
1642 target = {
1643 "action": {
1644 "action": "inject_ssh_key",
1645 "key": pub_key,
1646 "user": user,
1647 },
1648 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1649 }
1650 desc = await self.RO.deploy(nsr_id, target)
1651 action_id = desc["action_id"]
1652 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
1653 break
1654 else:
1655 # wait until NS is deployed at RO
1656 if not ro_nsr_id:
1657 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1658 ro_nsr_id = deep_get(
1659 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1660 )
1661 if not ro_nsr_id:
1662 continue
1663 result_dict = await self.RO.create_action(
1664 item="ns",
1665 item_id_name=ro_nsr_id,
1666 descriptor={
1667 "add_public_key": pub_key,
1668 "vms": [ro_vm_id],
1669 "user": user,
1670 },
1671 )
1672 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1673 if not result_dict or not isinstance(result_dict, dict):
1674 raise LcmException(
1675 "Unknown response from RO when injecting key"
1676 )
1677 for result in result_dict.values():
1678 if result.get("vim_result") == 200:
1679 break
1680 else:
1681 raise ROclient.ROClientException(
1682 "error injecting key: {}".format(
1683 result.get("description")
1684 )
1685 )
1686 break
1687 except NgRoException as e:
1688 raise LcmException(
1689 "Reaching max tries injecting key. Error: {}".format(e)
1690 )
1691 except ROclient.ROClientException as e:
1692 if not nb_tries:
1693 self.logger.debug(
1694 logging_text
1695 + "error injecting key: {}. Retrying until {} seconds".format(
1696 e, 20 * 10
1697 )
1698 )
1699 nb_tries += 1
1700 if nb_tries >= 20:
1701 raise LcmException(
1702 "Reaching max tries injecting key. Error: {}".format(e)
1703 )
1704 else:
1705 break
1706
1707 return ip_address
1708
1709 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1710 """
1711 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1712 """
1713 my_vca = vca_deployed_list[vca_index]
1714 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1715 # vdu or kdu: no dependencies
1716 return
1717 timeout = 300
1718 while timeout >= 0:
1719 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1720 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1721 configuration_status_list = db_nsr["configurationStatus"]
1722 for index, vca_deployed in enumerate(configuration_status_list):
1723 if index == vca_index:
1724 # myself
1725 continue
1726 if not my_vca.get("member-vnf-index") or (
1727 vca_deployed.get("member-vnf-index")
1728 == my_vca.get("member-vnf-index")
1729 ):
1730 internal_status = configuration_status_list[index].get("status")
1731 if internal_status == "READY":
1732 continue
1733 elif internal_status == "BROKEN":
1734 raise LcmException(
1735 "Configuration aborted because dependent charm/s has failed"
1736 )
1737 else:
1738 break
1739 else:
1740 # no dependencies, return
1741 return
1742 await asyncio.sleep(10)
1743 timeout -= 1
1744
1745 raise LcmException("Configuration aborted because dependent charm/s timeout")
1746
1747 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1748 vca_id = None
1749 if db_vnfr:
1750 vca_id = deep_get(db_vnfr, ("vca-id",))
1751 elif db_nsr:
1752 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1753 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1754 return vca_id
1755
1756 async def instantiate_N2VC(
1757 self,
1758 logging_text,
1759 vca_index,
1760 nsi_id,
1761 db_nsr,
1762 db_vnfr,
1763 vdu_id,
1764 kdu_name,
1765 vdu_index,
1766 config_descriptor,
1767 deploy_params,
1768 base_folder,
1769 nslcmop_id,
1770 stage,
1771 vca_type,
1772 vca_name,
1773 ee_config_descriptor,
1774 ):
1775 nsr_id = db_nsr["_id"]
1776 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1777 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1778 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1779 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1780 db_dict = {
1781 "collection": "nsrs",
1782 "filter": {"_id": nsr_id},
1783 "path": db_update_entry,
1784 }
1785 step = ""
1786 try:
1787
1788 element_type = "NS"
1789 element_under_configuration = nsr_id
1790
1791 vnfr_id = None
1792 if db_vnfr:
1793 vnfr_id = db_vnfr["_id"]
1794 osm_config["osm"]["vnf_id"] = vnfr_id
1795
1796 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1797
1798 if vca_type == "native_charm":
1799 index_number = 0
1800 else:
1801 index_number = vdu_index or 0
1802
1803 if vnfr_id:
1804 element_type = "VNF"
1805 element_under_configuration = vnfr_id
1806 namespace += ".{}-{}".format(vnfr_id, index_number)
1807 if vdu_id:
1808 namespace += ".{}-{}".format(vdu_id, index_number)
1809 element_type = "VDU"
1810 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1811 osm_config["osm"]["vdu_id"] = vdu_id
1812 elif kdu_name:
1813 namespace += ".{}".format(kdu_name)
1814 element_type = "KDU"
1815 element_under_configuration = kdu_name
1816 osm_config["osm"]["kdu_name"] = kdu_name
1817
1818 # Get artifact path
1819 if base_folder["pkg-dir"]:
1820 artifact_path = "{}/{}/{}/{}".format(
1821 base_folder["folder"],
1822 base_folder["pkg-dir"],
1823 "charms"
1824 if vca_type
1825 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1826 else "helm-charts",
1827 vca_name,
1828 )
1829 else:
1830 artifact_path = "{}/Scripts/{}/{}/".format(
1831 base_folder["folder"],
1832 "charms"
1833 if vca_type
1834 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1835 else "helm-charts",
1836 vca_name,
1837 )
1838
1839 self.logger.debug("Artifact path > {}".format(artifact_path))
1840
1841 # get initial_config_primitive_list that applies to this element
1842 initial_config_primitive_list = config_descriptor.get(
1843 "initial-config-primitive"
1844 )
1845
1846 self.logger.debug(
1847 "Initial config primitive list > {}".format(
1848 initial_config_primitive_list
1849 )
1850 )
1851
1852 # add config if not present for NS charm
1853 ee_descriptor_id = ee_config_descriptor.get("id")
1854 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1855 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1856 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1857 )
1858
1859 self.logger.debug(
1860 "Initial config primitive list #2 > {}".format(
1861 initial_config_primitive_list
1862 )
1863 )
1864 # n2vc_redesign STEP 3.1
1865 # find old ee_id if exists
1866 ee_id = vca_deployed.get("ee_id")
1867
1868 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1869 # create or register execution environment in VCA
1870 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1871
1872 self._write_configuration_status(
1873 nsr_id=nsr_id,
1874 vca_index=vca_index,
1875 status="CREATING",
1876 element_under_configuration=element_under_configuration,
1877 element_type=element_type,
1878 )
1879
1880 step = "create execution environment"
1881 self.logger.debug(logging_text + step)
1882
1883 ee_id = None
1884 credentials = None
1885 if vca_type == "k8s_proxy_charm":
1886 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1887 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1888 namespace=namespace,
1889 artifact_path=artifact_path,
1890 db_dict=db_dict,
1891 vca_id=vca_id,
1892 )
1893 elif vca_type == "helm" or vca_type == "helm-v3":
1894 ee_id, credentials = await self.vca_map[
1895 vca_type
1896 ].create_execution_environment(
1897 namespace=namespace,
1898 reuse_ee_id=ee_id,
1899 db_dict=db_dict,
1900 config=osm_config,
1901 artifact_path=artifact_path,
1902 vca_type=vca_type,
1903 )
1904 else:
1905 ee_id, credentials = await self.vca_map[
1906 vca_type
1907 ].create_execution_environment(
1908 namespace=namespace,
1909 reuse_ee_id=ee_id,
1910 db_dict=db_dict,
1911 vca_id=vca_id,
1912 )
1913
1914 elif vca_type == "native_charm":
1915 step = "Waiting to VM being up and getting IP address"
1916 self.logger.debug(logging_text + step)
1917 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1918 logging_text,
1919 nsr_id,
1920 vnfr_id,
1921 vdu_id,
1922 vdu_index,
1923 user=None,
1924 pub_key=None,
1925 )
1926 credentials = {"hostname": rw_mgmt_ip}
1927 # get username
1928 username = deep_get(
1929 config_descriptor, ("config-access", "ssh-access", "default-user")
1930 )
1931 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1932 # merged. Meanwhile let's get username from initial-config-primitive
1933 if not username and initial_config_primitive_list:
1934 for config_primitive in initial_config_primitive_list:
1935 for param in config_primitive.get("parameter", ()):
1936 if param["name"] == "ssh-username":
1937 username = param["value"]
1938 break
1939 if not username:
1940 raise LcmException(
1941 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1942 "'config-access.ssh-access.default-user'"
1943 )
1944 credentials["username"] = username
1945 # n2vc_redesign STEP 3.2
1946
1947 self._write_configuration_status(
1948 nsr_id=nsr_id,
1949 vca_index=vca_index,
1950 status="REGISTERING",
1951 element_under_configuration=element_under_configuration,
1952 element_type=element_type,
1953 )
1954
1955 step = "register execution environment {}".format(credentials)
1956 self.logger.debug(logging_text + step)
1957 ee_id = await self.vca_map[vca_type].register_execution_environment(
1958 credentials=credentials,
1959 namespace=namespace,
1960 db_dict=db_dict,
1961 vca_id=vca_id,
1962 )
1963
1964 # for compatibility with MON/POL modules, the need model and application name at database
1965 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1966 ee_id_parts = ee_id.split(".")
1967 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1968 if len(ee_id_parts) >= 2:
1969 model_name = ee_id_parts[0]
1970 application_name = ee_id_parts[1]
1971 db_nsr_update[db_update_entry + "model"] = model_name
1972 db_nsr_update[db_update_entry + "application"] = application_name
1973
1974 # n2vc_redesign STEP 3.3
1975 step = "Install configuration Software"
1976
1977 self._write_configuration_status(
1978 nsr_id=nsr_id,
1979 vca_index=vca_index,
1980 status="INSTALLING SW",
1981 element_under_configuration=element_under_configuration,
1982 element_type=element_type,
1983 other_update=db_nsr_update,
1984 )
1985
1986 # TODO check if already done
1987 self.logger.debug(logging_text + step)
1988 config = None
1989 if vca_type == "native_charm":
1990 config_primitive = next(
1991 (p for p in initial_config_primitive_list if p["name"] == "config"),
1992 None,
1993 )
1994 if config_primitive:
1995 config = self._map_primitive_params(
1996 config_primitive, {}, deploy_params
1997 )
1998 num_units = 1
1999 if vca_type == "lxc_proxy_charm":
2000 if element_type == "NS":
2001 num_units = db_nsr.get("config-units") or 1
2002 elif element_type == "VNF":
2003 num_units = db_vnfr.get("config-units") or 1
2004 elif element_type == "VDU":
2005 for v in db_vnfr["vdur"]:
2006 if vdu_id == v["vdu-id-ref"]:
2007 num_units = v.get("config-units") or 1
2008 break
2009 if vca_type != "k8s_proxy_charm":
2010 await self.vca_map[vca_type].install_configuration_sw(
2011 ee_id=ee_id,
2012 artifact_path=artifact_path,
2013 db_dict=db_dict,
2014 config=config,
2015 num_units=num_units,
2016 vca_id=vca_id,
2017 vca_type=vca_type,
2018 )
2019
2020 # write in db flag of configuration_sw already installed
2021 self.update_db_2(
2022 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2023 )
2024
2025 # add relations for this VCA (wait for other peers related with this VCA)
2026 await self._add_vca_relations(
2027 logging_text=logging_text,
2028 nsr_id=nsr_id,
2029 vca_type=vca_type,
2030 vca_index=vca_index,
2031 )
2032
2033 # if SSH access is required, then get execution environment SSH public
2034 # if native charm we have waited already to VM be UP
2035 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2036 pub_key = None
2037 user = None
2038 # self.logger.debug("get ssh key block")
2039 if deep_get(
2040 config_descriptor, ("config-access", "ssh-access", "required")
2041 ):
2042 # self.logger.debug("ssh key needed")
2043 # Needed to inject a ssh key
2044 user = deep_get(
2045 config_descriptor,
2046 ("config-access", "ssh-access", "default-user"),
2047 )
2048 step = "Install configuration Software, getting public ssh key"
2049 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2050 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2051 )
2052
2053 step = "Insert public key into VM user={} ssh_key={}".format(
2054 user, pub_key
2055 )
2056 else:
2057 # self.logger.debug("no need to get ssh key")
2058 step = "Waiting to VM being up and getting IP address"
2059 self.logger.debug(logging_text + step)
2060
2061 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2062 rw_mgmt_ip = None
2063
2064 # n2vc_redesign STEP 5.1
2065 # wait for RO (ip-address) Insert pub_key into VM
2066 if vnfr_id:
2067 if kdu_name:
2068 rw_mgmt_ip, services = await self.wait_kdu_up(
2069 logging_text, nsr_id, vnfr_id, kdu_name
2070 )
2071 vnfd = self.db.get_one(
2072 "vnfds_revisions",
2073 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2074 )
2075 kdu = get_kdu(vnfd, kdu_name)
2076 kdu_services = [
2077 service["name"] for service in get_kdu_services(kdu)
2078 ]
2079 exposed_services = []
2080 for service in services:
2081 if any(s in service["name"] for s in kdu_services):
2082 exposed_services.append(service)
2083 await self.vca_map[vca_type].exec_primitive(
2084 ee_id=ee_id,
2085 primitive_name="config",
2086 params_dict={
2087 "osm-config": json.dumps(
2088 OsmConfigBuilder(
2089 k8s={"services": exposed_services}
2090 ).build()
2091 )
2092 },
2093 vca_id=vca_id,
2094 )
2095
2096 # This verification is needed in order to avoid trying to add a public key
2097 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2098 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2099 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2100 # or it is a KNF)
2101 elif db_vnfr.get('vdur'):
2102 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2103 logging_text,
2104 nsr_id,
2105 vnfr_id,
2106 vdu_id,
2107 vdu_index,
2108 user=user,
2109 pub_key=pub_key,
2110 )
2111
2112 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2113
2114 # store rw_mgmt_ip in deploy params for later replacement
2115 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2116
2117 # n2vc_redesign STEP 6 Execute initial config primitive
2118 step = "execute initial config primitive"
2119
2120 # wait for dependent primitives execution (NS -> VNF -> VDU)
2121 if initial_config_primitive_list:
2122 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2123
2124 # stage, in function of element type: vdu, kdu, vnf or ns
2125 my_vca = vca_deployed_list[vca_index]
2126 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2127 # VDU or KDU
2128 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2129 elif my_vca.get("member-vnf-index"):
2130 # VNF
2131 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2132 else:
2133 # NS
2134 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2135
2136 self._write_configuration_status(
2137 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2138 )
2139
2140 self._write_op_status(op_id=nslcmop_id, stage=stage)
2141
2142 check_if_terminated_needed = True
2143 for initial_config_primitive in initial_config_primitive_list:
2144 # adding information on the vca_deployed if it is a NS execution environment
2145 if not vca_deployed["member-vnf-index"]:
2146 deploy_params["ns_config_info"] = json.dumps(
2147 self._get_ns_config_info(nsr_id)
2148 )
2149 # TODO check if already done
2150 primitive_params_ = self._map_primitive_params(
2151 initial_config_primitive, {}, deploy_params
2152 )
2153
2154 step = "execute primitive '{}' params '{}'".format(
2155 initial_config_primitive["name"], primitive_params_
2156 )
2157 self.logger.debug(logging_text + step)
2158 await self.vca_map[vca_type].exec_primitive(
2159 ee_id=ee_id,
2160 primitive_name=initial_config_primitive["name"],
2161 params_dict=primitive_params_,
2162 db_dict=db_dict,
2163 vca_id=vca_id,
2164 vca_type=vca_type,
2165 )
2166 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2167 if check_if_terminated_needed:
2168 if config_descriptor.get("terminate-config-primitive"):
2169 self.update_db_2(
2170 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2171 )
2172 check_if_terminated_needed = False
2173
2174 # TODO register in database that primitive is done
2175
2176 # STEP 7 Configure metrics
2177 if vca_type == "helm" or vca_type == "helm-v3":
2178 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2179 ee_id=ee_id,
2180 artifact_path=artifact_path,
2181 ee_config_descriptor=ee_config_descriptor,
2182 vnfr_id=vnfr_id,
2183 nsr_id=nsr_id,
2184 target_ip=rw_mgmt_ip,
2185 )
2186 if prometheus_jobs:
2187 self.update_db_2(
2188 "nsrs",
2189 nsr_id,
2190 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2191 )
2192
2193 for job in prometheus_jobs:
2194 self.db.set_one(
2195 "prometheus_jobs",
2196 {"job_name": job["job_name"]},
2197 job,
2198 upsert=True,
2199 fail_on_empty=False,
2200 )
2201
2202 step = "instantiated at VCA"
2203 self.logger.debug(logging_text + step)
2204
2205 self._write_configuration_status(
2206 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2207 )
2208
2209 except Exception as e: # TODO not use Exception but N2VC exception
2210 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2211 if not isinstance(
2212 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2213 ):
2214 self.logger.error(
2215 "Exception while {} : {}".format(step, e), exc_info=True
2216 )
2217 self._write_configuration_status(
2218 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2219 )
2220 raise LcmException("{} {}".format(step, e)) from e
2221
2222 def _write_ns_status(
2223 self,
2224 nsr_id: str,
2225 ns_state: str,
2226 current_operation: str,
2227 current_operation_id: str,
2228 error_description: str = None,
2229 error_detail: str = None,
2230 other_update: dict = None,
2231 ):
2232 """
2233 Update db_nsr fields.
2234 :param nsr_id:
2235 :param ns_state:
2236 :param current_operation:
2237 :param current_operation_id:
2238 :param error_description:
2239 :param error_detail:
2240 :param other_update: Other required changes at database if provided, will be cleared
2241 :return:
2242 """
2243 try:
2244 db_dict = other_update or {}
2245 db_dict[
2246 "_admin.nslcmop"
2247 ] = current_operation_id # for backward compatibility
2248 db_dict["_admin.current-operation"] = current_operation_id
2249 db_dict["_admin.operation-type"] = (
2250 current_operation if current_operation != "IDLE" else None
2251 )
2252 db_dict["currentOperation"] = current_operation
2253 db_dict["currentOperationID"] = current_operation_id
2254 db_dict["errorDescription"] = error_description
2255 db_dict["errorDetail"] = error_detail
2256
2257 if ns_state:
2258 db_dict["nsState"] = ns_state
2259 self.update_db_2("nsrs", nsr_id, db_dict)
2260 except DbException as e:
2261 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2262
2263 def _write_op_status(
2264 self,
2265 op_id: str,
2266 stage: list = None,
2267 error_message: str = None,
2268 queuePosition: int = 0,
2269 operation_state: str = None,
2270 other_update: dict = None,
2271 ):
2272 try:
2273 db_dict = other_update or {}
2274 db_dict["queuePosition"] = queuePosition
2275 if isinstance(stage, list):
2276 db_dict["stage"] = stage[0]
2277 db_dict["detailed-status"] = " ".join(stage)
2278 elif stage is not None:
2279 db_dict["stage"] = str(stage)
2280
2281 if error_message is not None:
2282 db_dict["errorMessage"] = error_message
2283 if operation_state is not None:
2284 db_dict["operationState"] = operation_state
2285 db_dict["statusEnteredTime"] = time()
2286 self.update_db_2("nslcmops", op_id, db_dict)
2287 except DbException as e:
2288 self.logger.warn(
2289 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2290 )
2291
2292 def _write_all_config_status(self, db_nsr: dict, status: str):
2293 try:
2294 nsr_id = db_nsr["_id"]
2295 # configurationStatus
2296 config_status = db_nsr.get("configurationStatus")
2297 if config_status:
2298 db_nsr_update = {
2299 "configurationStatus.{}.status".format(index): status
2300 for index, v in enumerate(config_status)
2301 if v
2302 }
2303 # update status
2304 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2305
2306 except DbException as e:
2307 self.logger.warn(
2308 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2309 )
2310
2311 def _write_configuration_status(
2312 self,
2313 nsr_id: str,
2314 vca_index: int,
2315 status: str = None,
2316 element_under_configuration: str = None,
2317 element_type: str = None,
2318 other_update: dict = None,
2319 ):
2320
2321 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2322 # .format(vca_index, status))
2323
2324 try:
2325 db_path = "configurationStatus.{}.".format(vca_index)
2326 db_dict = other_update or {}
2327 if status:
2328 db_dict[db_path + "status"] = status
2329 if element_under_configuration:
2330 db_dict[
2331 db_path + "elementUnderConfiguration"
2332 ] = element_under_configuration
2333 if element_type:
2334 db_dict[db_path + "elementType"] = element_type
2335 self.update_db_2("nsrs", nsr_id, db_dict)
2336 except DbException as e:
2337 self.logger.warn(
2338 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2339 status, nsr_id, vca_index, e
2340 )
2341 )
2342
2343 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2344 """
2345 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2346 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2347 Database is used because the result can be obtained from a different LCM worker in case of HA.
2348 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2349 :param db_nslcmop: database content of nslcmop
2350 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2351 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2352 computed 'vim-account-id'
2353 """
2354 modified = False
2355 nslcmop_id = db_nslcmop["_id"]
2356 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2357 if placement_engine == "PLA":
2358 self.logger.debug(
2359 logging_text + "Invoke and wait for placement optimization"
2360 )
2361 await self.msg.aiowrite(
2362 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2363 )
2364 db_poll_interval = 5
2365 wait = db_poll_interval * 10
2366 pla_result = None
2367 while not pla_result and wait >= 0:
2368 await asyncio.sleep(db_poll_interval)
2369 wait -= db_poll_interval
2370 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2371 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2372
2373 if not pla_result:
2374 raise LcmException(
2375 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2376 )
2377
2378 for pla_vnf in pla_result["vnf"]:
2379 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2380 if not pla_vnf.get("vimAccountId") or not vnfr:
2381 continue
2382 modified = True
2383 self.db.set_one(
2384 "vnfrs",
2385 {"_id": vnfr["_id"]},
2386 {"vim-account-id": pla_vnf["vimAccountId"]},
2387 )
2388 # Modifies db_vnfrs
2389 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2390 return modified
2391
2392 def update_nsrs_with_pla_result(self, params):
2393 try:
2394 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2395 self.update_db_2(
2396 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2397 )
2398 except Exception as e:
2399 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2400
2401 async def instantiate(self, nsr_id, nslcmop_id):
2402 """
2403
2404 :param nsr_id: ns instance to deploy
2405 :param nslcmop_id: operation to run
2406 :return:
2407 """
2408
2409 # Try to lock HA task here
2410 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2411 if not task_is_locked_by_me:
2412 self.logger.debug(
2413 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2414 )
2415 return
2416
2417 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2418 self.logger.debug(logging_text + "Enter")
2419
2420 # get all needed from database
2421
2422 # database nsrs record
2423 db_nsr = None
2424
2425 # database nslcmops record
2426 db_nslcmop = None
2427
2428 # update operation on nsrs
2429 db_nsr_update = {}
2430 # update operation on nslcmops
2431 db_nslcmop_update = {}
2432
2433 nslcmop_operation_state = None
2434 db_vnfrs = {} # vnf's info indexed by member-index
2435 # n2vc_info = {}
2436 tasks_dict_info = {} # from task to info text
2437 exc = None
2438 error_list = []
2439 stage = [
2440 "Stage 1/5: preparation of the environment.",
2441 "Waiting for previous operations to terminate.",
2442 "",
2443 ]
2444 # ^ stage, step, VIM progress
2445 try:
2446 # wait for any previous tasks in process
2447 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2448
2449 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2450 stage[1] = "Reading from database."
2451 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2452 db_nsr_update["detailed-status"] = "creating"
2453 db_nsr_update["operational-status"] = "init"
2454 self._write_ns_status(
2455 nsr_id=nsr_id,
2456 ns_state="BUILDING",
2457 current_operation="INSTANTIATING",
2458 current_operation_id=nslcmop_id,
2459 other_update=db_nsr_update,
2460 )
2461 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2462
2463 # read from db: operation
2464 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2465 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2466 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2467 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2468 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2469 )
2470 ns_params = db_nslcmop.get("operationParams")
2471 if ns_params and ns_params.get("timeout_ns_deploy"):
2472 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2473 else:
2474 timeout_ns_deploy = self.timeout.get(
2475 "ns_deploy", self.timeout_ns_deploy
2476 )
2477
2478 # read from db: ns
2479 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2480 self.logger.debug(logging_text + stage[1])
2481 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2482 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2483 self.logger.debug(logging_text + stage[1])
2484 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2485 self.fs.sync(db_nsr["nsd-id"])
2486 db_nsr["nsd"] = nsd
2487 # nsr_name = db_nsr["name"] # TODO short-name??
2488
2489 # read from db: vnf's of this ns
2490 stage[1] = "Getting vnfrs from db."
2491 self.logger.debug(logging_text + stage[1])
2492 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2493
2494 # read from db: vnfd's for every vnf
2495 db_vnfds = [] # every vnfd data
2496
2497 # for each vnf in ns, read vnfd
2498 for vnfr in db_vnfrs_list:
2499 if vnfr.get("kdur"):
2500 kdur_list = []
2501 for kdur in vnfr["kdur"]:
2502 if kdur.get("additionalParams"):
2503 kdur["additionalParams"] = json.loads(
2504 kdur["additionalParams"]
2505 )
2506 kdur_list.append(kdur)
2507 vnfr["kdur"] = kdur_list
2508
2509 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2510 vnfd_id = vnfr["vnfd-id"]
2511 vnfd_ref = vnfr["vnfd-ref"]
2512 self.fs.sync(vnfd_id)
2513
2514 # if we haven't this vnfd, read it from db
2515 if vnfd_id not in db_vnfds:
2516 # read from db
2517 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2518 vnfd_id, vnfd_ref
2519 )
2520 self.logger.debug(logging_text + stage[1])
2521 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2522
2523 # store vnfd
2524 db_vnfds.append(vnfd)
2525
2526 # Get or generates the _admin.deployed.VCA list
2527 vca_deployed_list = None
2528 if db_nsr["_admin"].get("deployed"):
2529 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2530 if vca_deployed_list is None:
2531 vca_deployed_list = []
2532 configuration_status_list = []
2533 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2534 db_nsr_update["configurationStatus"] = configuration_status_list
2535 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2536 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2537 elif isinstance(vca_deployed_list, dict):
2538 # maintain backward compatibility. Change a dict to list at database
2539 vca_deployed_list = list(vca_deployed_list.values())
2540 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2541 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2542
2543 if not isinstance(
2544 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2545 ):
2546 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2547 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2548
2549 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2550 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2551 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2552 self.db.set_list(
2553 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2554 )
2555
2556 # n2vc_redesign STEP 2 Deploy Network Scenario
2557 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2558 self._write_op_status(op_id=nslcmop_id, stage=stage)
2559
2560 stage[1] = "Deploying KDUs."
2561 # self.logger.debug(logging_text + "Before deploy_kdus")
2562 # Call to deploy_kdus in case exists the "vdu:kdu" param
2563 await self.deploy_kdus(
2564 logging_text=logging_text,
2565 nsr_id=nsr_id,
2566 nslcmop_id=nslcmop_id,
2567 db_vnfrs=db_vnfrs,
2568 db_vnfds=db_vnfds,
2569 task_instantiation_info=tasks_dict_info,
2570 )
2571
2572 stage[1] = "Getting VCA public key."
2573 # n2vc_redesign STEP 1 Get VCA public ssh-key
2574 # feature 1429. Add n2vc public key to needed VMs
2575 n2vc_key = self.n2vc.get_public_key()
2576 n2vc_key_list = [n2vc_key]
2577 if self.vca_config.get("public_key"):
2578 n2vc_key_list.append(self.vca_config["public_key"])
2579
2580 stage[1] = "Deploying NS at VIM."
2581 task_ro = asyncio.ensure_future(
2582 self.instantiate_RO(
2583 logging_text=logging_text,
2584 nsr_id=nsr_id,
2585 nsd=nsd,
2586 db_nsr=db_nsr,
2587 db_nslcmop=db_nslcmop,
2588 db_vnfrs=db_vnfrs,
2589 db_vnfds=db_vnfds,
2590 n2vc_key_list=n2vc_key_list,
2591 stage=stage,
2592 )
2593 )
2594 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2595 tasks_dict_info[task_ro] = "Deploying at VIM"
2596
2597 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2598 stage[1] = "Deploying Execution Environments."
2599 self.logger.debug(logging_text + stage[1])
2600
2601 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2602 for vnf_profile in get_vnf_profiles(nsd):
2603 vnfd_id = vnf_profile["vnfd-id"]
2604 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2605 member_vnf_index = str(vnf_profile["id"])
2606 db_vnfr = db_vnfrs[member_vnf_index]
2607 base_folder = vnfd["_admin"]["storage"]
2608 vdu_id = None
2609 vdu_index = 0
2610 vdu_name = None
2611 kdu_name = None
2612
2613 # Get additional parameters
2614 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2615 if db_vnfr.get("additionalParamsForVnf"):
2616 deploy_params.update(
2617 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2618 )
2619
2620 descriptor_config = get_configuration(vnfd, vnfd["id"])
2621 if descriptor_config:
2622 self._deploy_n2vc(
2623 logging_text=logging_text
2624 + "member_vnf_index={} ".format(member_vnf_index),
2625 db_nsr=db_nsr,
2626 db_vnfr=db_vnfr,
2627 nslcmop_id=nslcmop_id,
2628 nsr_id=nsr_id,
2629 nsi_id=nsi_id,
2630 vnfd_id=vnfd_id,
2631 vdu_id=vdu_id,
2632 kdu_name=kdu_name,
2633 member_vnf_index=member_vnf_index,
2634 vdu_index=vdu_index,
2635 vdu_name=vdu_name,
2636 deploy_params=deploy_params,
2637 descriptor_config=descriptor_config,
2638 base_folder=base_folder,
2639 task_instantiation_info=tasks_dict_info,
2640 stage=stage,
2641 )
2642
2643 # Deploy charms for each VDU that supports one.
2644 for vdud in get_vdu_list(vnfd):
2645 vdu_id = vdud["id"]
2646 descriptor_config = get_configuration(vnfd, vdu_id)
2647 vdur = find_in_list(
2648 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2649 )
2650
2651 if vdur.get("additionalParams"):
2652 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2653 else:
2654 deploy_params_vdu = deploy_params
2655 deploy_params_vdu["OSM"] = get_osm_params(
2656 db_vnfr, vdu_id, vdu_count_index=0
2657 )
2658 vdud_count = get_number_of_instances(vnfd, vdu_id)
2659
2660 self.logger.debug("VDUD > {}".format(vdud))
2661 self.logger.debug(
2662 "Descriptor config > {}".format(descriptor_config)
2663 )
2664 if descriptor_config:
2665 vdu_name = None
2666 kdu_name = None
2667 for vdu_index in range(vdud_count):
2668 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2669 self._deploy_n2vc(
2670 logging_text=logging_text
2671 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2672 member_vnf_index, vdu_id, vdu_index
2673 ),
2674 db_nsr=db_nsr,
2675 db_vnfr=db_vnfr,
2676 nslcmop_id=nslcmop_id,
2677 nsr_id=nsr_id,
2678 nsi_id=nsi_id,
2679 vnfd_id=vnfd_id,
2680 vdu_id=vdu_id,
2681 kdu_name=kdu_name,
2682 member_vnf_index=member_vnf_index,
2683 vdu_index=vdu_index,
2684 vdu_name=vdu_name,
2685 deploy_params=deploy_params_vdu,
2686 descriptor_config=descriptor_config,
2687 base_folder=base_folder,
2688 task_instantiation_info=tasks_dict_info,
2689 stage=stage,
2690 )
2691 for kdud in get_kdu_list(vnfd):
2692 kdu_name = kdud["name"]
2693 descriptor_config = get_configuration(vnfd, kdu_name)
2694 if descriptor_config:
2695 vdu_id = None
2696 vdu_index = 0
2697 vdu_name = None
2698 kdur = next(
2699 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2700 )
2701 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2702 if kdur.get("additionalParams"):
2703 deploy_params_kdu.update(
2704 parse_yaml_strings(kdur["additionalParams"].copy())
2705 )
2706
2707 self._deploy_n2vc(
2708 logging_text=logging_text,
2709 db_nsr=db_nsr,
2710 db_vnfr=db_vnfr,
2711 nslcmop_id=nslcmop_id,
2712 nsr_id=nsr_id,
2713 nsi_id=nsi_id,
2714 vnfd_id=vnfd_id,
2715 vdu_id=vdu_id,
2716 kdu_name=kdu_name,
2717 member_vnf_index=member_vnf_index,
2718 vdu_index=vdu_index,
2719 vdu_name=vdu_name,
2720 deploy_params=deploy_params_kdu,
2721 descriptor_config=descriptor_config,
2722 base_folder=base_folder,
2723 task_instantiation_info=tasks_dict_info,
2724 stage=stage,
2725 )
2726
2727 # Check if this NS has a charm configuration
2728 descriptor_config = nsd.get("ns-configuration")
2729 if descriptor_config and descriptor_config.get("juju"):
2730 vnfd_id = None
2731 db_vnfr = None
2732 member_vnf_index = None
2733 vdu_id = None
2734 kdu_name = None
2735 vdu_index = 0
2736 vdu_name = None
2737
2738 # Get additional parameters
2739 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2740 if db_nsr.get("additionalParamsForNs"):
2741 deploy_params.update(
2742 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2743 )
2744 base_folder = nsd["_admin"]["storage"]
2745 self._deploy_n2vc(
2746 logging_text=logging_text,
2747 db_nsr=db_nsr,
2748 db_vnfr=db_vnfr,
2749 nslcmop_id=nslcmop_id,
2750 nsr_id=nsr_id,
2751 nsi_id=nsi_id,
2752 vnfd_id=vnfd_id,
2753 vdu_id=vdu_id,
2754 kdu_name=kdu_name,
2755 member_vnf_index=member_vnf_index,
2756 vdu_index=vdu_index,
2757 vdu_name=vdu_name,
2758 deploy_params=deploy_params,
2759 descriptor_config=descriptor_config,
2760 base_folder=base_folder,
2761 task_instantiation_info=tasks_dict_info,
2762 stage=stage,
2763 )
2764
2765 # rest of staff will be done at finally
2766
2767 except (
2768 ROclient.ROClientException,
2769 DbException,
2770 LcmException,
2771 N2VCException,
2772 ) as e:
2773 self.logger.error(
2774 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2775 )
2776 exc = e
2777 except asyncio.CancelledError:
2778 self.logger.error(
2779 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2780 )
2781 exc = "Operation was cancelled"
2782 except Exception as e:
2783 exc = traceback.format_exc()
2784 self.logger.critical(
2785 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2786 exc_info=True,
2787 )
2788 finally:
2789 if exc:
2790 error_list.append(str(exc))
2791 try:
2792 # wait for pending tasks
2793 if tasks_dict_info:
2794 stage[1] = "Waiting for instantiate pending tasks."
2795 self.logger.debug(logging_text + stage[1])
2796 error_list += await self._wait_for_tasks(
2797 logging_text,
2798 tasks_dict_info,
2799 timeout_ns_deploy,
2800 stage,
2801 nslcmop_id,
2802 nsr_id=nsr_id,
2803 )
2804 stage[1] = stage[2] = ""
2805 except asyncio.CancelledError:
2806 error_list.append("Cancelled")
2807 # TODO cancel all tasks
2808 except Exception as exc:
2809 error_list.append(str(exc))
2810
2811 # update operation-status
2812 db_nsr_update["operational-status"] = "running"
2813 # let's begin with VCA 'configured' status (later we can change it)
2814 db_nsr_update["config-status"] = "configured"
2815 for task, task_name in tasks_dict_info.items():
2816 if not task.done() or task.cancelled() or task.exception():
2817 if task_name.startswith(self.task_name_deploy_vca):
2818 # A N2VC task is pending
2819 db_nsr_update["config-status"] = "failed"
2820 else:
2821 # RO or KDU task is pending
2822 db_nsr_update["operational-status"] = "failed"
2823
2824 # update status at database
2825 if error_list:
2826 error_detail = ". ".join(error_list)
2827 self.logger.error(logging_text + error_detail)
2828 error_description_nslcmop = "{} Detail: {}".format(
2829 stage[0], error_detail
2830 )
2831 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2832 nslcmop_id, stage[0]
2833 )
2834
2835 db_nsr_update["detailed-status"] = (
2836 error_description_nsr + " Detail: " + error_detail
2837 )
2838 db_nslcmop_update["detailed-status"] = error_detail
2839 nslcmop_operation_state = "FAILED"
2840 ns_state = "BROKEN"
2841 else:
2842 error_detail = None
2843 error_description_nsr = error_description_nslcmop = None
2844 ns_state = "READY"
2845 db_nsr_update["detailed-status"] = "Done"
2846 db_nslcmop_update["detailed-status"] = "Done"
2847 nslcmop_operation_state = "COMPLETED"
2848
2849 if db_nsr:
2850 self._write_ns_status(
2851 nsr_id=nsr_id,
2852 ns_state=ns_state,
2853 current_operation="IDLE",
2854 current_operation_id=None,
2855 error_description=error_description_nsr,
2856 error_detail=error_detail,
2857 other_update=db_nsr_update,
2858 )
2859 self._write_op_status(
2860 op_id=nslcmop_id,
2861 stage="",
2862 error_message=error_description_nslcmop,
2863 operation_state=nslcmop_operation_state,
2864 other_update=db_nslcmop_update,
2865 )
2866
2867 if nslcmop_operation_state:
2868 try:
2869 await self.msg.aiowrite(
2870 "ns",
2871 "instantiated",
2872 {
2873 "nsr_id": nsr_id,
2874 "nslcmop_id": nslcmop_id,
2875 "operationState": nslcmop_operation_state,
2876 },
2877 loop=self.loop,
2878 )
2879 except Exception as e:
2880 self.logger.error(
2881 logging_text + "kafka_write notification Exception {}".format(e)
2882 )
2883
2884 self.logger.debug(logging_text + "Exit")
2885 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2886
2887 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2888 if vnfd_id not in cached_vnfds:
2889 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2890 return cached_vnfds[vnfd_id]
2891
2892 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2893 if vnf_profile_id not in cached_vnfrs:
2894 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2895 "vnfrs",
2896 {
2897 "member-vnf-index-ref": vnf_profile_id,
2898 "nsr-id-ref": nsr_id,
2899 },
2900 )
2901 return cached_vnfrs[vnf_profile_id]
2902
2903 def _is_deployed_vca_in_relation(
2904 self, vca: DeployedVCA, relation: Relation
2905 ) -> bool:
2906 found = False
2907 for endpoint in (relation.provider, relation.requirer):
2908 if endpoint["kdu-resource-profile-id"]:
2909 continue
2910 found = (
2911 vca.vnf_profile_id == endpoint.vnf_profile_id
2912 and vca.vdu_profile_id == endpoint.vdu_profile_id
2913 and vca.execution_environment_ref == endpoint.execution_environment_ref
2914 )
2915 if found:
2916 break
2917 return found
2918
2919 def _update_ee_relation_data_with_implicit_data(
2920 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2921 ):
2922 ee_relation_data = safe_get_ee_relation(
2923 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2924 )
2925 ee_relation_level = EELevel.get_level(ee_relation_data)
2926 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2927 "execution-environment-ref"
2928 ]:
2929 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2930 vnfd_id = vnf_profile["vnfd-id"]
2931 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2932 entity_id = (
2933 vnfd_id
2934 if ee_relation_level == EELevel.VNF
2935 else ee_relation_data["vdu-profile-id"]
2936 )
2937 ee = get_juju_ee_ref(db_vnfd, entity_id)
2938 if not ee:
2939 raise Exception(
2940 f"not execution environments found for ee_relation {ee_relation_data}"
2941 )
2942 ee_relation_data["execution-environment-ref"] = ee["id"]
2943 return ee_relation_data
2944
2945 def _get_ns_relations(
2946 self,
2947 nsr_id: str,
2948 nsd: Dict[str, Any],
2949 vca: DeployedVCA,
2950 cached_vnfds: Dict[str, Any],
2951 ) -> List[Relation]:
2952 relations = []
2953 db_ns_relations = get_ns_configuration_relation_list(nsd)
2954 for r in db_ns_relations:
2955 provider_dict = None
2956 requirer_dict = None
2957 if all(key in r for key in ("provider", "requirer")):
2958 provider_dict = r["provider"]
2959 requirer_dict = r["requirer"]
2960 elif "entities" in r:
2961 provider_id = r["entities"][0]["id"]
2962 provider_dict = {
2963 "nsr-id": nsr_id,
2964 "endpoint": r["entities"][0]["endpoint"],
2965 }
2966 if provider_id != nsd["id"]:
2967 provider_dict["vnf-profile-id"] = provider_id
2968 requirer_id = r["entities"][1]["id"]
2969 requirer_dict = {
2970 "nsr-id": nsr_id,
2971 "endpoint": r["entities"][1]["endpoint"],
2972 }
2973 if requirer_id != nsd["id"]:
2974 requirer_dict["vnf-profile-id"] = requirer_id
2975 else:
2976 raise Exception(
2977 "provider/requirer or entities must be included in the relation."
2978 )
2979 relation_provider = self._update_ee_relation_data_with_implicit_data(
2980 nsr_id, nsd, provider_dict, cached_vnfds
2981 )
2982 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2983 nsr_id, nsd, requirer_dict, cached_vnfds
2984 )
2985 provider = EERelation(relation_provider)
2986 requirer = EERelation(relation_requirer)
2987 relation = Relation(r["name"], provider, requirer)
2988 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2989 if vca_in_relation:
2990 relations.append(relation)
2991 return relations
2992
2993 def _get_vnf_relations(
2994 self,
2995 nsr_id: str,
2996 nsd: Dict[str, Any],
2997 vca: DeployedVCA,
2998 cached_vnfds: Dict[str, Any],
2999 ) -> List[Relation]:
3000 relations = []
3001 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3002 vnf_profile_id = vnf_profile["id"]
3003 vnfd_id = vnf_profile["vnfd-id"]
3004 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3005 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3006 for r in db_vnf_relations:
3007 provider_dict = None
3008 requirer_dict = None
3009 if all(key in r for key in ("provider", "requirer")):
3010 provider_dict = r["provider"]
3011 requirer_dict = r["requirer"]
3012 elif "entities" in r:
3013 provider_id = r["entities"][0]["id"]
3014 provider_dict = {
3015 "nsr-id": nsr_id,
3016 "vnf-profile-id": vnf_profile_id,
3017 "endpoint": r["entities"][0]["endpoint"],
3018 }
3019 if provider_id != vnfd_id:
3020 provider_dict["vdu-profile-id"] = provider_id
3021 requirer_id = r["entities"][1]["id"]
3022 requirer_dict = {
3023 "nsr-id": nsr_id,
3024 "vnf-profile-id": vnf_profile_id,
3025 "endpoint": r["entities"][1]["endpoint"],
3026 }
3027 if requirer_id != vnfd_id:
3028 requirer_dict["vdu-profile-id"] = requirer_id
3029 else:
3030 raise Exception(
3031 "provider/requirer or entities must be included in the relation."
3032 )
3033 relation_provider = self._update_ee_relation_data_with_implicit_data(
3034 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3035 )
3036 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3037 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3038 )
3039 provider = EERelation(relation_provider)
3040 requirer = EERelation(relation_requirer)
3041 relation = Relation(r["name"], provider, requirer)
3042 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3043 if vca_in_relation:
3044 relations.append(relation)
3045 return relations
3046
3047 def _get_kdu_resource_data(
3048 self,
3049 ee_relation: EERelation,
3050 db_nsr: Dict[str, Any],
3051 cached_vnfds: Dict[str, Any],
3052 ) -> DeployedK8sResource:
3053 nsd = get_nsd(db_nsr)
3054 vnf_profiles = get_vnf_profiles(nsd)
3055 vnfd_id = find_in_list(
3056 vnf_profiles,
3057 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3058 )["vnfd-id"]
3059 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3060 kdu_resource_profile = get_kdu_resource_profile(
3061 db_vnfd, ee_relation.kdu_resource_profile_id
3062 )
3063 kdu_name = kdu_resource_profile["kdu-name"]
3064 deployed_kdu, _ = get_deployed_kdu(
3065 db_nsr.get("_admin", ()).get("deployed", ()),
3066 kdu_name,
3067 ee_relation.vnf_profile_id,
3068 )
3069 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3070 return deployed_kdu
3071
3072 def _get_deployed_component(
3073 self,
3074 ee_relation: EERelation,
3075 db_nsr: Dict[str, Any],
3076 cached_vnfds: Dict[str, Any],
3077 ) -> DeployedComponent:
3078 nsr_id = db_nsr["_id"]
3079 deployed_component = None
3080 ee_level = EELevel.get_level(ee_relation)
3081 if ee_level == EELevel.NS:
3082 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3083 if vca:
3084 deployed_component = DeployedVCA(nsr_id, vca)
3085 elif ee_level == EELevel.VNF:
3086 vca = get_deployed_vca(
3087 db_nsr,
3088 {
3089 "vdu_id": None,
3090 "member-vnf-index": ee_relation.vnf_profile_id,
3091 "ee_descriptor_id": ee_relation.execution_environment_ref,
3092 },
3093 )
3094 if vca:
3095 deployed_component = DeployedVCA(nsr_id, vca)
3096 elif ee_level == EELevel.VDU:
3097 vca = get_deployed_vca(
3098 db_nsr,
3099 {
3100 "vdu_id": ee_relation.vdu_profile_id,
3101 "member-vnf-index": ee_relation.vnf_profile_id,
3102 "ee_descriptor_id": ee_relation.execution_environment_ref,
3103 },
3104 )
3105 if vca:
3106 deployed_component = DeployedVCA(nsr_id, vca)
3107 elif ee_level == EELevel.KDU:
3108 kdu_resource_data = self._get_kdu_resource_data(
3109 ee_relation, db_nsr, cached_vnfds
3110 )
3111 if kdu_resource_data:
3112 deployed_component = DeployedK8sResource(kdu_resource_data)
3113 return deployed_component
3114
3115 async def _add_relation(
3116 self,
3117 relation: Relation,
3118 vca_type: str,
3119 db_nsr: Dict[str, Any],
3120 cached_vnfds: Dict[str, Any],
3121 cached_vnfrs: Dict[str, Any],
3122 ) -> bool:
3123 deployed_provider = self._get_deployed_component(
3124 relation.provider, db_nsr, cached_vnfds
3125 )
3126 deployed_requirer = self._get_deployed_component(
3127 relation.requirer, db_nsr, cached_vnfds
3128 )
3129 if (
3130 deployed_provider
3131 and deployed_requirer
3132 and deployed_provider.config_sw_installed
3133 and deployed_requirer.config_sw_installed
3134 ):
3135 provider_db_vnfr = (
3136 self._get_vnfr(
3137 relation.provider.nsr_id,
3138 relation.provider.vnf_profile_id,
3139 cached_vnfrs,
3140 )
3141 if relation.provider.vnf_profile_id
3142 else None
3143 )
3144 requirer_db_vnfr = (
3145 self._get_vnfr(
3146 relation.requirer.nsr_id,
3147 relation.requirer.vnf_profile_id,
3148 cached_vnfrs,
3149 )
3150 if relation.requirer.vnf_profile_id
3151 else None
3152 )
3153 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3154 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3155 provider_relation_endpoint = RelationEndpoint(
3156 deployed_provider.ee_id,
3157 provider_vca_id,
3158 relation.provider.endpoint,
3159 )
3160 requirer_relation_endpoint = RelationEndpoint(
3161 deployed_requirer.ee_id,
3162 requirer_vca_id,
3163 relation.requirer.endpoint,
3164 )
3165 await self.vca_map[vca_type].add_relation(
3166 provider=provider_relation_endpoint,
3167 requirer=requirer_relation_endpoint,
3168 )
3169 # remove entry from relations list
3170 return True
3171 return False
3172
3173 async def _add_vca_relations(
3174 self,
3175 logging_text,
3176 nsr_id,
3177 vca_type: str,
3178 vca_index: int,
3179 timeout: int = 3600,
3180 ) -> bool:
3181
3182 # steps:
3183 # 1. find all relations for this VCA
3184 # 2. wait for other peers related
3185 # 3. add relations
3186
3187 try:
3188 # STEP 1: find all relations for this VCA
3189
3190 # read nsr record
3191 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3192 nsd = get_nsd(db_nsr)
3193
3194 # this VCA data
3195 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3196 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3197
3198 cached_vnfds = {}
3199 cached_vnfrs = {}
3200 relations = []
3201 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3202 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3203
3204 # if no relations, terminate
3205 if not relations:
3206 self.logger.debug(logging_text + " No relations")
3207 return True
3208
3209 self.logger.debug(logging_text + " adding relations {}".format(relations))
3210
3211 # add all relations
3212 start = time()
3213 while True:
3214 # check timeout
3215 now = time()
3216 if now - start >= timeout:
3217 self.logger.error(logging_text + " : timeout adding relations")
3218 return False
3219
3220 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3221 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3222
3223 # for each relation, find the VCA's related
3224 for relation in relations.copy():
3225 added = await self._add_relation(
3226 relation,
3227 vca_type,
3228 db_nsr,
3229 cached_vnfds,
3230 cached_vnfrs,
3231 )
3232 if added:
3233 relations.remove(relation)
3234
3235 if not relations:
3236 self.logger.debug("Relations added")
3237 break
3238 await asyncio.sleep(5.0)
3239
3240 return True
3241
3242 except Exception as e:
3243 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3244 return False
3245
3246 async def _install_kdu(
3247 self,
3248 nsr_id: str,
3249 nsr_db_path: str,
3250 vnfr_data: dict,
3251 kdu_index: int,
3252 kdud: dict,
3253 vnfd: dict,
3254 k8s_instance_info: dict,
3255 k8params: dict = None,
3256 timeout: int = 600,
3257 vca_id: str = None,
3258 ):
3259
3260 try:
3261 k8sclustertype = k8s_instance_info["k8scluster-type"]
3262 # Instantiate kdu
3263 db_dict_install = {
3264 "collection": "nsrs",
3265 "filter": {"_id": nsr_id},
3266 "path": nsr_db_path,
3267 }
3268
3269 if k8s_instance_info.get("kdu-deployment-name"):
3270 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3271 else:
3272 kdu_instance = self.k8scluster_map[
3273 k8sclustertype
3274 ].generate_kdu_instance_name(
3275 db_dict=db_dict_install,
3276 kdu_model=k8s_instance_info["kdu-model"],
3277 kdu_name=k8s_instance_info["kdu-name"],
3278 )
3279
3280 # Update the nsrs table with the kdu-instance value
3281 self.update_db_2(
3282 item="nsrs",
3283 _id=nsr_id,
3284 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3285 )
3286
3287 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3288 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3289 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3290 # namespace, this first verification could be removed, and the next step would be done for any kind
3291 # of KNF.
3292 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3293 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3294 if k8sclustertype in ("juju", "juju-bundle"):
3295 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3296 # that the user passed a namespace which he wants its KDU to be deployed in)
3297 if (
3298 self.db.count(
3299 table="nsrs",
3300 q_filter={
3301 "_id": nsr_id,
3302 "_admin.projects_write": k8s_instance_info["namespace"],
3303 "_admin.projects_read": k8s_instance_info["namespace"],
3304 },
3305 )
3306 > 0
3307 ):
3308 self.logger.debug(
3309 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3310 )
3311 self.update_db_2(
3312 item="nsrs",
3313 _id=nsr_id,
3314 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3315 )
3316 k8s_instance_info["namespace"] = kdu_instance
3317
3318 await self.k8scluster_map[k8sclustertype].install(
3319 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3320 kdu_model=k8s_instance_info["kdu-model"],
3321 atomic=True,
3322 params=k8params,
3323 db_dict=db_dict_install,
3324 timeout=timeout,
3325 kdu_name=k8s_instance_info["kdu-name"],
3326 namespace=k8s_instance_info["namespace"],
3327 kdu_instance=kdu_instance,
3328 vca_id=vca_id,
3329 )
3330
3331 # Obtain services to obtain management service ip
3332 services = await self.k8scluster_map[k8sclustertype].get_services(
3333 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3334 kdu_instance=kdu_instance,
3335 namespace=k8s_instance_info["namespace"],
3336 )
3337
3338 # Obtain management service info (if exists)
3339 vnfr_update_dict = {}
3340 kdu_config = get_configuration(vnfd, kdud["name"])
3341 if kdu_config:
3342 target_ee_list = kdu_config.get("execution-environment-list", [])
3343 else:
3344 target_ee_list = []
3345
3346 if services:
3347 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3348 mgmt_services = [
3349 service
3350 for service in kdud.get("service", [])
3351 if service.get("mgmt-service")
3352 ]
3353 for mgmt_service in mgmt_services:
3354 for service in services:
3355 if service["name"].startswith(mgmt_service["name"]):
3356 # Mgmt service found, Obtain service ip
3357 ip = service.get("external_ip", service.get("cluster_ip"))
3358 if isinstance(ip, list) and len(ip) == 1:
3359 ip = ip[0]
3360
3361 vnfr_update_dict[
3362 "kdur.{}.ip-address".format(kdu_index)
3363 ] = ip
3364
3365 # Check if must update also mgmt ip at the vnf
3366 service_external_cp = mgmt_service.get(
3367 "external-connection-point-ref"
3368 )
3369 if service_external_cp:
3370 if (
3371 deep_get(vnfd, ("mgmt-interface", "cp"))
3372 == service_external_cp
3373 ):
3374 vnfr_update_dict["ip-address"] = ip
3375
3376 if find_in_list(
3377 target_ee_list,
3378 lambda ee: ee.get(
3379 "external-connection-point-ref", ""
3380 )
3381 == service_external_cp,
3382 ):
3383 vnfr_update_dict[
3384 "kdur.{}.ip-address".format(kdu_index)
3385 ] = ip
3386 break
3387 else:
3388 self.logger.warn(
3389 "Mgmt service name: {} not found".format(
3390 mgmt_service["name"]
3391 )
3392 )
3393
3394 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3395 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3396
3397 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3398 if (
3399 kdu_config
3400 and kdu_config.get("initial-config-primitive")
3401 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3402 ):
3403 initial_config_primitive_list = kdu_config.get(
3404 "initial-config-primitive"
3405 )
3406 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3407
3408 for initial_config_primitive in initial_config_primitive_list:
3409 primitive_params_ = self._map_primitive_params(
3410 initial_config_primitive, {}, {}
3411 )
3412
3413 await asyncio.wait_for(
3414 self.k8scluster_map[k8sclustertype].exec_primitive(
3415 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3416 kdu_instance=kdu_instance,
3417 primitive_name=initial_config_primitive["name"],
3418 params=primitive_params_,
3419 db_dict=db_dict_install,
3420 vca_id=vca_id,
3421 ),
3422 timeout=timeout,
3423 )
3424
3425 except Exception as e:
3426 # Prepare update db with error and raise exception
3427 try:
3428 self.update_db_2(
3429 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3430 )
3431 self.update_db_2(
3432 "vnfrs",
3433 vnfr_data.get("_id"),
3434 {"kdur.{}.status".format(kdu_index): "ERROR"},
3435 )
3436 except Exception:
3437 # ignore to keep original exception
3438 pass
3439 # reraise original error
3440 raise
3441
3442 return kdu_instance
3443
3444 async def deploy_kdus(
3445 self,
3446 logging_text,
3447 nsr_id,
3448 nslcmop_id,
3449 db_vnfrs,
3450 db_vnfds,
3451 task_instantiation_info,
3452 ):
3453 # Launch kdus if present in the descriptor
3454
3455 k8scluster_id_2_uuic = {
3456 "helm-chart-v3": {},
3457 "helm-chart": {},
3458 "juju-bundle": {},
3459 }
3460
3461 async def _get_cluster_id(cluster_id, cluster_type):
3462 nonlocal k8scluster_id_2_uuic
3463 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3464 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3465
3466 # check if K8scluster is creating and wait look if previous tasks in process
3467 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3468 "k8scluster", cluster_id
3469 )
3470 if task_dependency:
3471 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3472 task_name, cluster_id
3473 )
3474 self.logger.debug(logging_text + text)
3475 await asyncio.wait(task_dependency, timeout=3600)
3476
3477 db_k8scluster = self.db.get_one(
3478 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3479 )
3480 if not db_k8scluster:
3481 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3482
3483 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3484 if not k8s_id:
3485 if cluster_type == "helm-chart-v3":
3486 try:
3487 # backward compatibility for existing clusters that have not been initialized for helm v3
3488 k8s_credentials = yaml.safe_dump(
3489 db_k8scluster.get("credentials")
3490 )
3491 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3492 k8s_credentials, reuse_cluster_uuid=cluster_id
3493 )
3494 db_k8scluster_update = {}
3495 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3496 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3497 db_k8scluster_update[
3498 "_admin.helm-chart-v3.created"
3499 ] = uninstall_sw
3500 db_k8scluster_update[
3501 "_admin.helm-chart-v3.operationalState"
3502 ] = "ENABLED"
3503 self.update_db_2(
3504 "k8sclusters", cluster_id, db_k8scluster_update
3505 )
3506 except Exception as e:
3507 self.logger.error(
3508 logging_text
3509 + "error initializing helm-v3 cluster: {}".format(str(e))
3510 )
3511 raise LcmException(
3512 "K8s cluster '{}' has not been initialized for '{}'".format(
3513 cluster_id, cluster_type
3514 )
3515 )
3516 else:
3517 raise LcmException(
3518 "K8s cluster '{}' has not been initialized for '{}'".format(
3519 cluster_id, cluster_type
3520 )
3521 )
3522 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3523 return k8s_id
3524
3525 logging_text += "Deploy kdus: "
3526 step = ""
3527 try:
3528 db_nsr_update = {"_admin.deployed.K8s": []}
3529 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3530
3531 index = 0
3532 updated_cluster_list = []
3533 updated_v3_cluster_list = []
3534
3535 for vnfr_data in db_vnfrs.values():
3536 vca_id = self.get_vca_id(vnfr_data, {})
3537 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3538 # Step 0: Prepare and set parameters
3539 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3540 vnfd_id = vnfr_data.get("vnfd-id")
3541 vnfd_with_id = find_in_list(
3542 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3543 )
3544 kdud = next(
3545 kdud
3546 for kdud in vnfd_with_id["kdu"]
3547 if kdud["name"] == kdur["kdu-name"]
3548 )
3549 namespace = kdur.get("k8s-namespace")
3550 kdu_deployment_name = kdur.get("kdu-deployment-name")
3551 if kdur.get("helm-chart"):
3552 kdumodel = kdur["helm-chart"]
3553 # Default version: helm3, if helm-version is v2 assign v2
3554 k8sclustertype = "helm-chart-v3"
3555 self.logger.debug("kdur: {}".format(kdur))
3556 if (
3557 kdur.get("helm-version")
3558 and kdur.get("helm-version") == "v2"
3559 ):
3560 k8sclustertype = "helm-chart"
3561 elif kdur.get("juju-bundle"):
3562 kdumodel = kdur["juju-bundle"]
3563 k8sclustertype = "juju-bundle"
3564 else:
3565 raise LcmException(
3566 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3567 "juju-bundle. Maybe an old NBI version is running".format(
3568 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3569 )
3570 )
3571 # check if kdumodel is a file and exists
3572 try:
3573 vnfd_with_id = find_in_list(
3574 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3575 )
3576 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3577 if storage: # may be not present if vnfd has not artifacts
3578 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3579 if storage["pkg-dir"]:
3580 filename = "{}/{}/{}s/{}".format(
3581 storage["folder"],
3582 storage["pkg-dir"],
3583 k8sclustertype,
3584 kdumodel,
3585 )
3586 else:
3587 filename = "{}/Scripts/{}s/{}".format(
3588 storage["folder"],
3589 k8sclustertype,
3590 kdumodel,
3591 )
3592 if self.fs.file_exists(
3593 filename, mode="file"
3594 ) or self.fs.file_exists(filename, mode="dir"):
3595 kdumodel = self.fs.path + filename
3596 except (asyncio.TimeoutError, asyncio.CancelledError):
3597 raise
3598 except Exception: # it is not a file
3599 pass
3600
3601 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3602 step = "Synchronize repos for k8s cluster '{}'".format(
3603 k8s_cluster_id
3604 )
3605 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3606
3607 # Synchronize repos
3608 if (
3609 k8sclustertype == "helm-chart"
3610 and cluster_uuid not in updated_cluster_list
3611 ) or (
3612 k8sclustertype == "helm-chart-v3"
3613 and cluster_uuid not in updated_v3_cluster_list
3614 ):
3615 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3616 self.k8scluster_map[k8sclustertype].synchronize_repos(
3617 cluster_uuid=cluster_uuid
3618 )
3619 )
3620 if del_repo_list or added_repo_dict:
3621 if k8sclustertype == "helm-chart":
3622 unset = {
3623 "_admin.helm_charts_added." + item: None
3624 for item in del_repo_list
3625 }
3626 updated = {
3627 "_admin.helm_charts_added." + item: name
3628 for item, name in added_repo_dict.items()
3629 }
3630 updated_cluster_list.append(cluster_uuid)
3631 elif k8sclustertype == "helm-chart-v3":
3632 unset = {
3633 "_admin.helm_charts_v3_added." + item: None
3634 for item in del_repo_list
3635 }
3636 updated = {
3637 "_admin.helm_charts_v3_added." + item: name
3638 for item, name in added_repo_dict.items()
3639 }
3640 updated_v3_cluster_list.append(cluster_uuid)
3641 self.logger.debug(
3642 logging_text + "repos synchronized on k8s cluster "
3643 "'{}' to_delete: {}, to_add: {}".format(
3644 k8s_cluster_id, del_repo_list, added_repo_dict
3645 )
3646 )
3647 self.db.set_one(
3648 "k8sclusters",
3649 {"_id": k8s_cluster_id},
3650 updated,
3651 unset=unset,
3652 )
3653
3654 # Instantiate kdu
3655 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3656 vnfr_data["member-vnf-index-ref"],
3657 kdur["kdu-name"],
3658 k8s_cluster_id,
3659 )
3660 k8s_instance_info = {
3661 "kdu-instance": None,
3662 "k8scluster-uuid": cluster_uuid,
3663 "k8scluster-type": k8sclustertype,
3664 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3665 "kdu-name": kdur["kdu-name"],
3666 "kdu-model": kdumodel,
3667 "namespace": namespace,
3668 "kdu-deployment-name": kdu_deployment_name,
3669 }
3670 db_path = "_admin.deployed.K8s.{}".format(index)
3671 db_nsr_update[db_path] = k8s_instance_info
3672 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3673 vnfd_with_id = find_in_list(
3674 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3675 )
3676 task = asyncio.ensure_future(
3677 self._install_kdu(
3678 nsr_id,
3679 db_path,
3680 vnfr_data,
3681 kdu_index,
3682 kdud,
3683 vnfd_with_id,
3684 k8s_instance_info,
3685 k8params=desc_params,
3686 timeout=1800,
3687 vca_id=vca_id,
3688 )
3689 )
3690 self.lcm_tasks.register(
3691 "ns",
3692 nsr_id,
3693 nslcmop_id,
3694 "instantiate_KDU-{}".format(index),
3695 task,
3696 )
3697 task_instantiation_info[task] = "Deploying KDU {}".format(
3698 kdur["kdu-name"]
3699 )
3700
3701 index += 1
3702
3703 except (LcmException, asyncio.CancelledError):
3704 raise
3705 except Exception as e:
3706 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3707 if isinstance(e, (N2VCException, DbException)):
3708 self.logger.error(logging_text + msg)
3709 else:
3710 self.logger.critical(logging_text + msg, exc_info=True)
3711 raise LcmException(msg)
3712 finally:
3713 if db_nsr_update:
3714 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3715
3716 def _deploy_n2vc(
3717 self,
3718 logging_text,
3719 db_nsr,
3720 db_vnfr,
3721 nslcmop_id,
3722 nsr_id,
3723 nsi_id,
3724 vnfd_id,
3725 vdu_id,
3726 kdu_name,
3727 member_vnf_index,
3728 vdu_index,
3729 vdu_name,
3730 deploy_params,
3731 descriptor_config,
3732 base_folder,
3733 task_instantiation_info,
3734 stage,
3735 ):
3736 # launch instantiate_N2VC in a asyncio task and register task object
3737 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3738 # if not found, create one entry and update database
3739 # fill db_nsr._admin.deployed.VCA.<index>
3740
3741 self.logger.debug(
3742 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3743 )
3744 if "execution-environment-list" in descriptor_config:
3745 ee_list = descriptor_config.get("execution-environment-list", [])
3746 elif "juju" in descriptor_config:
3747 ee_list = [descriptor_config] # ns charms
3748 else: # other types as script are not supported
3749 ee_list = []
3750
3751 for ee_item in ee_list:
3752 self.logger.debug(
3753 logging_text
3754 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3755 ee_item.get("juju"), ee_item.get("helm-chart")
3756 )
3757 )
3758 ee_descriptor_id = ee_item.get("id")
3759 if ee_item.get("juju"):
3760 vca_name = ee_item["juju"].get("charm")
3761 vca_type = (
3762 "lxc_proxy_charm"
3763 if ee_item["juju"].get("charm") is not None
3764 else "native_charm"
3765 )
3766 if ee_item["juju"].get("cloud") == "k8s":
3767 vca_type = "k8s_proxy_charm"
3768 elif ee_item["juju"].get("proxy") is False:
3769 vca_type = "native_charm"
3770 elif ee_item.get("helm-chart"):
3771 vca_name = ee_item["helm-chart"]
3772 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3773 vca_type = "helm"
3774 else:
3775 vca_type = "helm-v3"
3776 else:
3777 self.logger.debug(
3778 logging_text + "skipping non juju neither charm configuration"
3779 )
3780 continue
3781
3782 vca_index = -1
3783 for vca_index, vca_deployed in enumerate(
3784 db_nsr["_admin"]["deployed"]["VCA"]
3785 ):
3786 if not vca_deployed:
3787 continue
3788 if (
3789 vca_deployed.get("member-vnf-index") == member_vnf_index
3790 and vca_deployed.get("vdu_id") == vdu_id
3791 and vca_deployed.get("kdu_name") == kdu_name
3792 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3793 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3794 ):
3795 break
3796 else:
3797 # not found, create one.
3798 target = (
3799 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3800 )
3801 if vdu_id:
3802 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3803 elif kdu_name:
3804 target += "/kdu/{}".format(kdu_name)
3805 vca_deployed = {
3806 "target_element": target,
3807 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3808 "member-vnf-index": member_vnf_index,
3809 "vdu_id": vdu_id,
3810 "kdu_name": kdu_name,
3811 "vdu_count_index": vdu_index,
3812 "operational-status": "init", # TODO revise
3813 "detailed-status": "", # TODO revise
3814 "step": "initial-deploy", # TODO revise
3815 "vnfd_id": vnfd_id,
3816 "vdu_name": vdu_name,
3817 "type": vca_type,
3818 "ee_descriptor_id": ee_descriptor_id,
3819 }
3820 vca_index += 1
3821
3822 # create VCA and configurationStatus in db
3823 db_dict = {
3824 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3825 "configurationStatus.{}".format(vca_index): dict(),
3826 }
3827 self.update_db_2("nsrs", nsr_id, db_dict)
3828
3829 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3830
3831 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3832 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3833 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3834
3835 # Launch task
3836 task_n2vc = asyncio.ensure_future(
3837 self.instantiate_N2VC(
3838 logging_text=logging_text,
3839 vca_index=vca_index,
3840 nsi_id=nsi_id,
3841 db_nsr=db_nsr,
3842 db_vnfr=db_vnfr,
3843 vdu_id=vdu_id,
3844 kdu_name=kdu_name,
3845 vdu_index=vdu_index,
3846 deploy_params=deploy_params,
3847 config_descriptor=descriptor_config,
3848 base_folder=base_folder,
3849 nslcmop_id=nslcmop_id,
3850 stage=stage,
3851 vca_type=vca_type,
3852 vca_name=vca_name,
3853 ee_config_descriptor=ee_item,
3854 )
3855 )
3856 self.lcm_tasks.register(
3857 "ns",
3858 nsr_id,
3859 nslcmop_id,
3860 "instantiate_N2VC-{}".format(vca_index),
3861 task_n2vc,
3862 )
3863 task_instantiation_info[
3864 task_n2vc
3865 ] = self.task_name_deploy_vca + " {}.{}".format(
3866 member_vnf_index or "", vdu_id or ""
3867 )
3868
3869 @staticmethod
3870 def _create_nslcmop(nsr_id, operation, params):
3871 """
3872 Creates a ns-lcm-opp content to be stored at database.
3873 :param nsr_id: internal id of the instance
3874 :param operation: instantiate, terminate, scale, action, ...
3875 :param params: user parameters for the operation
3876 :return: dictionary following SOL005 format
3877 """
3878 # Raise exception if invalid arguments
3879 if not (nsr_id and operation and params):
3880 raise LcmException(
3881 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3882 )
3883 now = time()
3884 _id = str(uuid4())
3885 nslcmop = {
3886 "id": _id,
3887 "_id": _id,
3888 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3889 "operationState": "PROCESSING",
3890 "statusEnteredTime": now,
3891 "nsInstanceId": nsr_id,
3892 "lcmOperationType": operation,
3893 "startTime": now,
3894 "isAutomaticInvocation": False,
3895 "operationParams": params,
3896 "isCancelPending": False,
3897 "links": {
3898 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3899 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3900 },
3901 }
3902 return nslcmop
3903
3904 def _format_additional_params(self, params):
3905 params = params or {}
3906 for key, value in params.items():
3907 if str(value).startswith("!!yaml "):
3908 params[key] = yaml.safe_load(value[7:])
3909 return params
3910
3911 def _get_terminate_primitive_params(self, seq, vnf_index):
3912 primitive = seq.get("name")
3913 primitive_params = {}
3914 params = {
3915 "member_vnf_index": vnf_index,
3916 "primitive": primitive,
3917 "primitive_params": primitive_params,
3918 }
3919 desc_params = {}
3920 return self._map_primitive_params(seq, params, desc_params)
3921
3922 # sub-operations
3923
3924 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3925 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3926 if op.get("operationState") == "COMPLETED":
3927 # b. Skip sub-operation
3928 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3929 return self.SUBOPERATION_STATUS_SKIP
3930 else:
3931 # c. retry executing sub-operation
3932 # The sub-operation exists, and operationState != 'COMPLETED'
3933 # Update operationState = 'PROCESSING' to indicate a retry.
3934 operationState = "PROCESSING"
3935 detailed_status = "In progress"
3936 self._update_suboperation_status(
3937 db_nslcmop, op_index, operationState, detailed_status
3938 )
3939 # Return the sub-operation index
3940 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3941 # with arguments extracted from the sub-operation
3942 return op_index
3943
3944 # Find a sub-operation where all keys in a matching dictionary must match
3945 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3946 def _find_suboperation(self, db_nslcmop, match):
3947 if db_nslcmop and match:
3948 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3949 for i, op in enumerate(op_list):
3950 if all(op.get(k) == match[k] for k in match):
3951 return i
3952 return self.SUBOPERATION_STATUS_NOT_FOUND
3953
3954 # Update status for a sub-operation given its index
3955 def _update_suboperation_status(
3956 self, db_nslcmop, op_index, operationState, detailed_status
3957 ):
3958 # Update DB for HA tasks
3959 q_filter = {"_id": db_nslcmop["_id"]}
3960 update_dict = {
3961 "_admin.operations.{}.operationState".format(op_index): operationState,
3962 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3963 }
3964 self.db.set_one(
3965 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3966 )
3967
3968 # Add sub-operation, return the index of the added sub-operation
3969 # Optionally, set operationState, detailed-status, and operationType
3970 # Status and type are currently set for 'scale' sub-operations:
3971 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3972 # 'detailed-status' : status message
3973 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3974 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3975 def _add_suboperation(
3976 self,
3977 db_nslcmop,
3978 vnf_index,
3979 vdu_id,
3980 vdu_count_index,
3981 vdu_name,
3982 primitive,
3983 mapped_primitive_params,
3984 operationState=None,
3985 detailed_status=None,
3986 operationType=None,
3987 RO_nsr_id=None,
3988 RO_scaling_info=None,
3989 ):
3990 if not db_nslcmop:
3991 return self.SUBOPERATION_STATUS_NOT_FOUND
3992 # Get the "_admin.operations" list, if it exists
3993 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3994 op_list = db_nslcmop_admin.get("operations")
3995 # Create or append to the "_admin.operations" list
3996 new_op = {
3997 "member_vnf_index": vnf_index,
3998 "vdu_id": vdu_id,
3999 "vdu_count_index": vdu_count_index,
4000 "primitive": primitive,
4001 "primitive_params": mapped_primitive_params,
4002 }
4003 if operationState:
4004 new_op["operationState"] = operationState
4005 if detailed_status:
4006 new_op["detailed-status"] = detailed_status
4007 if operationType:
4008 new_op["lcmOperationType"] = operationType
4009 if RO_nsr_id:
4010 new_op["RO_nsr_id"] = RO_nsr_id
4011 if RO_scaling_info:
4012 new_op["RO_scaling_info"] = RO_scaling_info
4013 if not op_list:
4014 # No existing operations, create key 'operations' with current operation as first list element
4015 db_nslcmop_admin.update({"operations": [new_op]})
4016 op_list = db_nslcmop_admin.get("operations")
4017 else:
4018 # Existing operations, append operation to list
4019 op_list.append(new_op)
4020
4021 db_nslcmop_update = {"_admin.operations": op_list}
4022 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4023 op_index = len(op_list) - 1
4024 return op_index
4025
4026 # Helper methods for scale() sub-operations
4027
4028 # pre-scale/post-scale:
4029 # Check for 3 different cases:
4030 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4031 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4032 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4033 def _check_or_add_scale_suboperation(
4034 self,
4035 db_nslcmop,
4036 vnf_index,
4037 vnf_config_primitive,
4038 primitive_params,
4039 operationType,
4040 RO_nsr_id=None,
4041 RO_scaling_info=None,
4042 ):
4043 # Find this sub-operation
4044 if RO_nsr_id and RO_scaling_info:
4045 operationType = "SCALE-RO"
4046 match = {
4047 "member_vnf_index": vnf_index,
4048 "RO_nsr_id": RO_nsr_id,
4049 "RO_scaling_info": RO_scaling_info,
4050 }
4051 else:
4052 match = {
4053 "member_vnf_index": vnf_index,
4054 "primitive": vnf_config_primitive,
4055 "primitive_params": primitive_params,
4056 "lcmOperationType": operationType,
4057 }
4058 op_index = self._find_suboperation(db_nslcmop, match)
4059 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4060 # a. New sub-operation
4061 # The sub-operation does not exist, add it.
4062 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4063 # The following parameters are set to None for all kind of scaling:
4064 vdu_id = None
4065 vdu_count_index = None
4066 vdu_name = None
4067 if RO_nsr_id and RO_scaling_info:
4068 vnf_config_primitive = None
4069 primitive_params = None
4070 else:
4071 RO_nsr_id = None
4072 RO_scaling_info = None
4073 # Initial status for sub-operation
4074 operationState = "PROCESSING"
4075 detailed_status = "In progress"
4076 # Add sub-operation for pre/post-scaling (zero or more operations)
4077 self._add_suboperation(
4078 db_nslcmop,
4079 vnf_index,
4080 vdu_id,
4081 vdu_count_index,
4082 vdu_name,
4083 vnf_config_primitive,
4084 primitive_params,
4085 operationState,
4086 detailed_status,
4087 operationType,
4088 RO_nsr_id,
4089 RO_scaling_info,
4090 )
4091 return self.SUBOPERATION_STATUS_NEW
4092 else:
4093 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4094 # or op_index (operationState != 'COMPLETED')
4095 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4096
4097 # Function to return execution_environment id
4098
4099 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4100 # TODO vdu_index_count
4101 for vca in vca_deployed_list:
4102 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4103 return vca["ee_id"]
4104
4105 async def destroy_N2VC(
4106 self,
4107 logging_text,
4108 db_nslcmop,
4109 vca_deployed,
4110 config_descriptor,
4111 vca_index,
4112 destroy_ee=True,
4113 exec_primitives=True,
4114 scaling_in=False,
4115 vca_id: str = None,
4116 ):
4117 """
4118 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4119 :param logging_text:
4120 :param db_nslcmop:
4121 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4122 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4123 :param vca_index: index in the database _admin.deployed.VCA
4124 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4125 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4126 not executed properly
4127 :param scaling_in: True destroys the application, False destroys the model
4128 :return: None or exception
4129 """
4130
4131 self.logger.debug(
4132 logging_text
4133 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4134 vca_index, vca_deployed, config_descriptor, destroy_ee
4135 )
4136 )
4137
4138 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4139
4140 # execute terminate_primitives
4141 if exec_primitives:
4142 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4143 config_descriptor.get("terminate-config-primitive"),
4144 vca_deployed.get("ee_descriptor_id"),
4145 )
4146 vdu_id = vca_deployed.get("vdu_id")
4147 vdu_count_index = vca_deployed.get("vdu_count_index")
4148 vdu_name = vca_deployed.get("vdu_name")
4149 vnf_index = vca_deployed.get("member-vnf-index")
4150 if terminate_primitives and vca_deployed.get("needed_terminate"):
4151 for seq in terminate_primitives:
4152 # For each sequence in list, get primitive and call _ns_execute_primitive()
4153 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4154 vnf_index, seq.get("name")
4155 )
4156 self.logger.debug(logging_text + step)
4157 # Create the primitive for each sequence, i.e. "primitive": "touch"
4158 primitive = seq.get("name")
4159 mapped_primitive_params = self._get_terminate_primitive_params(
4160 seq, vnf_index
4161 )
4162
4163 # Add sub-operation
4164 self._add_suboperation(
4165 db_nslcmop,
4166 vnf_index,
4167 vdu_id,
4168 vdu_count_index,
4169 vdu_name,
4170 primitive,
4171 mapped_primitive_params,
4172 )
4173 # Sub-operations: Call _ns_execute_primitive() instead of action()
4174 try:
4175 result, result_detail = await self._ns_execute_primitive(
4176 vca_deployed["ee_id"],
4177 primitive,
4178 mapped_primitive_params,
4179 vca_type=vca_type,
4180 vca_id=vca_id,
4181 )
4182 except LcmException:
4183 # this happens when VCA is not deployed. In this case it is not needed to terminate
4184 continue
4185 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4186 if result not in result_ok:
4187 raise LcmException(
4188 "terminate_primitive {} for vnf_member_index={} fails with "
4189 "error {}".format(seq.get("name"), vnf_index, result_detail)
4190 )
4191 # set that this VCA do not need terminated
4192 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4193 vca_index
4194 )
4195 self.update_db_2(
4196 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4197 )
4198
4199 # Delete Prometheus Jobs if any
4200 # This uses NSR_ID, so it will destroy any jobs under this index
4201 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4202
4203 if destroy_ee:
4204 await self.vca_map[vca_type].delete_execution_environment(
4205 vca_deployed["ee_id"],
4206 scaling_in=scaling_in,
4207 vca_type=vca_type,
4208 vca_id=vca_id,
4209 )
4210
4211 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4212 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4213 namespace = "." + db_nsr["_id"]
4214 try:
4215 await self.n2vc.delete_namespace(
4216 namespace=namespace,
4217 total_timeout=self.timeout_charm_delete,
4218 vca_id=vca_id,
4219 )
4220 except N2VCNotFound: # already deleted. Skip
4221 pass
4222 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4223
4224 async def _terminate_RO(
4225 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4226 ):
4227 """
4228 Terminates a deployment from RO
4229 :param logging_text:
4230 :param nsr_deployed: db_nsr._admin.deployed
4231 :param nsr_id:
4232 :param nslcmop_id:
4233 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4234 this method will update only the index 2, but it will write on database the concatenated content of the list
4235 :return:
4236 """
4237 db_nsr_update = {}
4238 failed_detail = []
4239 ro_nsr_id = ro_delete_action = None
4240 if nsr_deployed and nsr_deployed.get("RO"):
4241 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4242 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4243 try:
4244 if ro_nsr_id:
4245 stage[2] = "Deleting ns from VIM."
4246 db_nsr_update["detailed-status"] = " ".join(stage)
4247 self._write_op_status(nslcmop_id, stage)
4248 self.logger.debug(logging_text + stage[2])
4249 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4250 self._write_op_status(nslcmop_id, stage)
4251 desc = await self.RO.delete("ns", ro_nsr_id)
4252 ro_delete_action = desc["action_id"]
4253 db_nsr_update[
4254 "_admin.deployed.RO.nsr_delete_action_id"
4255 ] = ro_delete_action
4256 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4257 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4258 if ro_delete_action:
4259 # wait until NS is deleted from VIM
4260 stage[2] = "Waiting ns deleted from VIM."
4261 detailed_status_old = None
4262 self.logger.debug(
4263 logging_text
4264 + stage[2]
4265 + " RO_id={} ro_delete_action={}".format(
4266 ro_nsr_id, ro_delete_action
4267 )
4268 )
4269 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4270 self._write_op_status(nslcmop_id, stage)
4271
4272 delete_timeout = 20 * 60 # 20 minutes
4273 while delete_timeout > 0:
4274 desc = await self.RO.show(
4275 "ns",
4276 item_id_name=ro_nsr_id,
4277 extra_item="action",
4278 extra_item_id=ro_delete_action,
4279 )
4280
4281 # deploymentStatus
4282 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4283
4284 ns_status, ns_status_info = self.RO.check_action_status(desc)
4285 if ns_status == "ERROR":
4286 raise ROclient.ROClientException(ns_status_info)
4287 elif ns_status == "BUILD":
4288 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4289 elif ns_status == "ACTIVE":
4290 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4291 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4292 break
4293 else:
4294 assert (
4295 False
4296 ), "ROclient.check_action_status returns unknown {}".format(
4297 ns_status
4298 )
4299 if stage[2] != detailed_status_old:
4300 detailed_status_old = stage[2]
4301 db_nsr_update["detailed-status"] = " ".join(stage)
4302 self._write_op_status(nslcmop_id, stage)
4303 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4304 await asyncio.sleep(5, loop=self.loop)
4305 delete_timeout -= 5
4306 else: # delete_timeout <= 0:
4307 raise ROclient.ROClientException(
4308 "Timeout waiting ns deleted from VIM"
4309 )
4310
4311 except Exception as e:
4312 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4313 if (
4314 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4315 ): # not found
4316 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4317 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4318 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4319 self.logger.debug(
4320 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4321 )
4322 elif (
4323 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4324 ): # conflict
4325 failed_detail.append("delete conflict: {}".format(e))
4326 self.logger.debug(
4327 logging_text
4328 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4329 )
4330 else:
4331 failed_detail.append("delete error: {}".format(e))
4332 self.logger.error(
4333 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4334 )
4335
4336 # Delete nsd
4337 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4338 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4339 try:
4340 stage[2] = "Deleting nsd from RO."
4341 db_nsr_update["detailed-status"] = " ".join(stage)
4342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4343 self._write_op_status(nslcmop_id, stage)
4344 await self.RO.delete("nsd", ro_nsd_id)
4345 self.logger.debug(
4346 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4347 )
4348 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4349 except Exception as e:
4350 if (
4351 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4352 ): # not found
4353 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4354 self.logger.debug(
4355 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4356 )
4357 elif (
4358 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4359 ): # conflict
4360 failed_detail.append(
4361 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4362 )
4363 self.logger.debug(logging_text + failed_detail[-1])
4364 else:
4365 failed_detail.append(
4366 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4367 )
4368 self.logger.error(logging_text + failed_detail[-1])
4369
4370 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4371 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4372 if not vnf_deployed or not vnf_deployed["id"]:
4373 continue
4374 try:
4375 ro_vnfd_id = vnf_deployed["id"]
4376 stage[
4377 2
4378 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4379 vnf_deployed["member-vnf-index"], ro_vnfd_id
4380 )
4381 db_nsr_update["detailed-status"] = " ".join(stage)
4382 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4383 self._write_op_status(nslcmop_id, stage)
4384 await self.RO.delete("vnfd", ro_vnfd_id)
4385 self.logger.debug(
4386 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4387 )
4388 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4389 except Exception as e:
4390 if (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4392 ): # not found
4393 db_nsr_update[
4394 "_admin.deployed.RO.vnfd.{}.id".format(index)
4395 ] = None
4396 self.logger.debug(
4397 logging_text
4398 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4399 )
4400 elif (
4401 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4402 ): # conflict
4403 failed_detail.append(
4404 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4405 )
4406 self.logger.debug(logging_text + failed_detail[-1])
4407 else:
4408 failed_detail.append(
4409 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4410 )
4411 self.logger.error(logging_text + failed_detail[-1])
4412
4413 if failed_detail:
4414 stage[2] = "Error deleting from VIM"
4415 else:
4416 stage[2] = "Deleted from VIM"
4417 db_nsr_update["detailed-status"] = " ".join(stage)
4418 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4419 self._write_op_status(nslcmop_id, stage)
4420
4421 if failed_detail:
4422 raise LcmException("; ".join(failed_detail))
4423
4424 async def terminate(self, nsr_id, nslcmop_id):
4425 # Try to lock HA task here
4426 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4427 if not task_is_locked_by_me:
4428 return
4429
4430 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4431 self.logger.debug(logging_text + "Enter")
4432 timeout_ns_terminate = self.timeout_ns_terminate
4433 db_nsr = None
4434 db_nslcmop = None
4435 operation_params = None
4436 exc = None
4437 error_list = [] # annotates all failed error messages
4438 db_nslcmop_update = {}
4439 autoremove = False # autoremove after terminated
4440 tasks_dict_info = {}
4441 db_nsr_update = {}
4442 stage = [
4443 "Stage 1/3: Preparing task.",
4444 "Waiting for previous operations to terminate.",
4445 "",
4446 ]
4447 # ^ contains [stage, step, VIM-status]
4448 try:
4449 # wait for any previous tasks in process
4450 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4451
4452 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4453 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4454 operation_params = db_nslcmop.get("operationParams") or {}
4455 if operation_params.get("timeout_ns_terminate"):
4456 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4457 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4458 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4459
4460 db_nsr_update["operational-status"] = "terminating"
4461 db_nsr_update["config-status"] = "terminating"
4462 self._write_ns_status(
4463 nsr_id=nsr_id,
4464 ns_state="TERMINATING",
4465 current_operation="TERMINATING",
4466 current_operation_id=nslcmop_id,
4467 other_update=db_nsr_update,
4468 )
4469 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4470 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4471 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4472 return
4473
4474 stage[1] = "Getting vnf descriptors from db."
4475 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4476 db_vnfrs_dict = {
4477 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4478 }
4479 db_vnfds_from_id = {}
4480 db_vnfds_from_member_index = {}
4481 # Loop over VNFRs
4482 for vnfr in db_vnfrs_list:
4483 vnfd_id = vnfr["vnfd-id"]
4484 if vnfd_id not in db_vnfds_from_id:
4485 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4486 db_vnfds_from_id[vnfd_id] = vnfd
4487 db_vnfds_from_member_index[
4488 vnfr["member-vnf-index-ref"]
4489 ] = db_vnfds_from_id[vnfd_id]
4490
4491 # Destroy individual execution environments when there are terminating primitives.
4492 # Rest of EE will be deleted at once
4493 # TODO - check before calling _destroy_N2VC
4494 # if not operation_params.get("skip_terminate_primitives"):#
4495 # or not vca.get("needed_terminate"):
4496 stage[0] = "Stage 2/3 execute terminating primitives."
4497 self.logger.debug(logging_text + stage[0])
4498 stage[1] = "Looking execution environment that needs terminate."
4499 self.logger.debug(logging_text + stage[1])
4500
4501 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4502 config_descriptor = None
4503 vca_member_vnf_index = vca.get("member-vnf-index")
4504 vca_id = self.get_vca_id(
4505 db_vnfrs_dict.get(vca_member_vnf_index)
4506 if vca_member_vnf_index
4507 else None,
4508 db_nsr,
4509 )
4510 if not vca or not vca.get("ee_id"):
4511 continue
4512 if not vca.get("member-vnf-index"):
4513 # ns
4514 config_descriptor = db_nsr.get("ns-configuration")
4515 elif vca.get("vdu_id"):
4516 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4517 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4518 elif vca.get("kdu_name"):
4519 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4520 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4521 else:
4522 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4523 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4524 vca_type = vca.get("type")
4525 exec_terminate_primitives = not operation_params.get(
4526 "skip_terminate_primitives"
4527 ) and vca.get("needed_terminate")
4528 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4529 # pending native charms
4530 destroy_ee = (
4531 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4532 )
4533 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4534 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4535 task = asyncio.ensure_future(
4536 self.destroy_N2VC(
4537 logging_text,
4538 db_nslcmop,
4539 vca,
4540 config_descriptor,
4541 vca_index,
4542 destroy_ee,
4543 exec_terminate_primitives,
4544 vca_id=vca_id,
4545 )
4546 )
4547 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4548
4549 # wait for pending tasks of terminate primitives
4550 if tasks_dict_info:
4551 self.logger.debug(
4552 logging_text
4553 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4554 )
4555 error_list = await self._wait_for_tasks(
4556 logging_text,
4557 tasks_dict_info,
4558 min(self.timeout_charm_delete, timeout_ns_terminate),
4559 stage,
4560 nslcmop_id,
4561 )
4562 tasks_dict_info.clear()
4563 if error_list:
4564 return # raise LcmException("; ".join(error_list))
4565
4566 # remove All execution environments at once
4567 stage[0] = "Stage 3/3 delete all."
4568
4569 if nsr_deployed.get("VCA"):
4570 stage[1] = "Deleting all execution environments."
4571 self.logger.debug(logging_text + stage[1])
4572 vca_id = self.get_vca_id({}, db_nsr)
4573 task_delete_ee = asyncio.ensure_future(
4574 asyncio.wait_for(
4575 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4576 timeout=self.timeout_charm_delete,
4577 )
4578 )
4579 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4580 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4581
4582 # Delete from k8scluster
4583 stage[1] = "Deleting KDUs."
4584 self.logger.debug(logging_text + stage[1])
4585 # print(nsr_deployed)
4586 for kdu in get_iterable(nsr_deployed, "K8s"):
4587 if not kdu or not kdu.get("kdu-instance"):
4588 continue
4589 kdu_instance = kdu.get("kdu-instance")
4590 if kdu.get("k8scluster-type") in self.k8scluster_map:
4591 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4592 vca_id = self.get_vca_id({}, db_nsr)
4593 task_delete_kdu_instance = asyncio.ensure_future(
4594 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4595 cluster_uuid=kdu.get("k8scluster-uuid"),
4596 kdu_instance=kdu_instance,
4597 vca_id=vca_id,
4598 namespace=kdu.get("namespace"),
4599 )
4600 )
4601 else:
4602 self.logger.error(
4603 logging_text
4604 + "Unknown k8s deployment type {}".format(
4605 kdu.get("k8scluster-type")
4606 )
4607 )
4608 continue
4609 tasks_dict_info[
4610 task_delete_kdu_instance
4611 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4612
4613 # remove from RO
4614 stage[1] = "Deleting ns from VIM."
4615 if self.ng_ro:
4616 task_delete_ro = asyncio.ensure_future(
4617 self._terminate_ng_ro(
4618 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4619 )
4620 )
4621 else:
4622 task_delete_ro = asyncio.ensure_future(
4623 self._terminate_RO(
4624 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4625 )
4626 )
4627 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4628
4629 # rest of staff will be done at finally
4630
4631 except (
4632 ROclient.ROClientException,
4633 DbException,
4634 LcmException,
4635 N2VCException,
4636 ) as e:
4637 self.logger.error(logging_text + "Exit Exception {}".format(e))
4638 exc = e
4639 except asyncio.CancelledError:
4640 self.logger.error(
4641 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4642 )
4643 exc = "Operation was cancelled"
4644 except Exception as e:
4645 exc = traceback.format_exc()
4646 self.logger.critical(
4647 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4648 exc_info=True,
4649 )
4650 finally:
4651 if exc:
4652 error_list.append(str(exc))
4653 try:
4654 # wait for pending tasks
4655 if tasks_dict_info:
4656 stage[1] = "Waiting for terminate pending tasks."
4657 self.logger.debug(logging_text + stage[1])
4658 error_list += await self._wait_for_tasks(
4659 logging_text,
4660 tasks_dict_info,
4661 timeout_ns_terminate,
4662 stage,
4663 nslcmop_id,
4664 )
4665 stage[1] = stage[2] = ""
4666 except asyncio.CancelledError:
4667 error_list.append("Cancelled")
4668 # TODO cancell all tasks
4669 except Exception as exc:
4670 error_list.append(str(exc))
4671 # update status at database
4672 if error_list:
4673 error_detail = "; ".join(error_list)
4674 # self.logger.error(logging_text + error_detail)
4675 error_description_nslcmop = "{} Detail: {}".format(
4676 stage[0], error_detail
4677 )
4678 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4679 nslcmop_id, stage[0]
4680 )
4681
4682 db_nsr_update["operational-status"] = "failed"
4683 db_nsr_update["detailed-status"] = (
4684 error_description_nsr + " Detail: " + error_detail
4685 )
4686 db_nslcmop_update["detailed-status"] = error_detail
4687 nslcmop_operation_state = "FAILED"
4688 ns_state = "BROKEN"
4689 else:
4690 error_detail = None
4691 error_description_nsr = error_description_nslcmop = None
4692 ns_state = "NOT_INSTANTIATED"
4693 db_nsr_update["operational-status"] = "terminated"
4694 db_nsr_update["detailed-status"] = "Done"
4695 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4696 db_nslcmop_update["detailed-status"] = "Done"
4697 nslcmop_operation_state = "COMPLETED"
4698
4699 if db_nsr:
4700 self._write_ns_status(
4701 nsr_id=nsr_id,
4702 ns_state=ns_state,
4703 current_operation="IDLE",
4704 current_operation_id=None,
4705 error_description=error_description_nsr,
4706 error_detail=error_detail,
4707 other_update=db_nsr_update,
4708 )
4709 self._write_op_status(
4710 op_id=nslcmop_id,
4711 stage="",
4712 error_message=error_description_nslcmop,
4713 operation_state=nslcmop_operation_state,
4714 other_update=db_nslcmop_update,
4715 )
4716 if ns_state == "NOT_INSTANTIATED":
4717 try:
4718 self.db.set_list(
4719 "vnfrs",
4720 {"nsr-id-ref": nsr_id},
4721 {"_admin.nsState": "NOT_INSTANTIATED"},
4722 )
4723 except DbException as e:
4724 self.logger.warn(
4725 logging_text
4726 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4727 nsr_id, e
4728 )
4729 )
4730 if operation_params:
4731 autoremove = operation_params.get("autoremove", False)
4732 if nslcmop_operation_state:
4733 try:
4734 await self.msg.aiowrite(
4735 "ns",
4736 "terminated",
4737 {
4738 "nsr_id": nsr_id,
4739 "nslcmop_id": nslcmop_id,
4740 "operationState": nslcmop_operation_state,
4741 "autoremove": autoremove,
4742 },
4743 loop=self.loop,
4744 )
4745 except Exception as e:
4746 self.logger.error(
4747 logging_text + "kafka_write notification Exception {}".format(e)
4748 )
4749
4750 self.logger.debug(logging_text + "Exit")
4751 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4752
4753 async def _wait_for_tasks(
4754 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4755 ):
4756 time_start = time()
4757 error_detail_list = []
4758 error_list = []
4759 pending_tasks = list(created_tasks_info.keys())
4760 num_tasks = len(pending_tasks)
4761 num_done = 0
4762 stage[1] = "{}/{}.".format(num_done, num_tasks)
4763 self._write_op_status(nslcmop_id, stage)
4764 while pending_tasks:
4765 new_error = None
4766 _timeout = timeout + time_start - time()
4767 done, pending_tasks = await asyncio.wait(
4768 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4769 )
4770 num_done += len(done)
4771 if not done: # Timeout
4772 for task in pending_tasks:
4773 new_error = created_tasks_info[task] + ": Timeout"
4774 error_detail_list.append(new_error)
4775 error_list.append(new_error)
4776 break
4777 for task in done:
4778 if task.cancelled():
4779 exc = "Cancelled"
4780 else:
4781 exc = task.exception()
4782 if exc:
4783 if isinstance(exc, asyncio.TimeoutError):
4784 exc = "Timeout"
4785 new_error = created_tasks_info[task] + ": {}".format(exc)
4786 error_list.append(created_tasks_info[task])
4787 error_detail_list.append(new_error)
4788 if isinstance(
4789 exc,
4790 (
4791 str,
4792 DbException,
4793 N2VCException,
4794 ROclient.ROClientException,
4795 LcmException,
4796 K8sException,
4797 NgRoException,
4798 ),
4799 ):
4800 self.logger.error(logging_text + new_error)
4801 else:
4802 exc_traceback = "".join(
4803 traceback.format_exception(None, exc, exc.__traceback__)
4804 )
4805 self.logger.error(
4806 logging_text
4807 + created_tasks_info[task]
4808 + " "
4809 + exc_traceback
4810 )
4811 else:
4812 self.logger.debug(
4813 logging_text + created_tasks_info[task] + ": Done"
4814 )
4815 stage[1] = "{}/{}.".format(num_done, num_tasks)
4816 if new_error:
4817 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4818 if nsr_id: # update also nsr
4819 self.update_db_2(
4820 "nsrs",
4821 nsr_id,
4822 {
4823 "errorDescription": "Error at: " + ", ".join(error_list),
4824 "errorDetail": ". ".join(error_detail_list),
4825 },
4826 )
4827 self._write_op_status(nslcmop_id, stage)
4828 return error_detail_list
4829
4830 @staticmethod
4831 def _map_primitive_params(primitive_desc, params, instantiation_params):
4832 """
4833 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4834 The default-value is used. If it is between < > it look for a value at instantiation_params
4835 :param primitive_desc: portion of VNFD/NSD that describes primitive
4836 :param params: Params provided by user
4837 :param instantiation_params: Instantiation params provided by user
4838 :return: a dictionary with the calculated params
4839 """
4840 calculated_params = {}
4841 for parameter in primitive_desc.get("parameter", ()):
4842 param_name = parameter["name"]
4843 if param_name in params:
4844 calculated_params[param_name] = params[param_name]
4845 elif "default-value" in parameter or "value" in parameter:
4846 if "value" in parameter:
4847 calculated_params[param_name] = parameter["value"]
4848 else:
4849 calculated_params[param_name] = parameter["default-value"]
4850 if (
4851 isinstance(calculated_params[param_name], str)
4852 and calculated_params[param_name].startswith("<")
4853 and calculated_params[param_name].endswith(">")
4854 ):
4855 if calculated_params[param_name][1:-1] in instantiation_params:
4856 calculated_params[param_name] = instantiation_params[
4857 calculated_params[param_name][1:-1]
4858 ]
4859 else:
4860 raise LcmException(
4861 "Parameter {} needed to execute primitive {} not provided".format(
4862 calculated_params[param_name], primitive_desc["name"]
4863 )
4864 )
4865 else:
4866 raise LcmException(
4867 "Parameter {} needed to execute primitive {} not provided".format(
4868 param_name, primitive_desc["name"]
4869 )
4870 )
4871
4872 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4873 calculated_params[param_name] = yaml.safe_dump(
4874 calculated_params[param_name], default_flow_style=True, width=256
4875 )
4876 elif isinstance(calculated_params[param_name], str) and calculated_params[
4877 param_name
4878 ].startswith("!!yaml "):
4879 calculated_params[param_name] = calculated_params[param_name][7:]
4880 if parameter.get("data-type") == "INTEGER":
4881 try:
4882 calculated_params[param_name] = int(calculated_params[param_name])
4883 except ValueError: # error converting string to int
4884 raise LcmException(
4885 "Parameter {} of primitive {} must be integer".format(
4886 param_name, primitive_desc["name"]
4887 )
4888 )
4889 elif parameter.get("data-type") == "BOOLEAN":
4890 calculated_params[param_name] = not (
4891 (str(calculated_params[param_name])).lower() == "false"
4892 )
4893
4894 # add always ns_config_info if primitive name is config
4895 if primitive_desc["name"] == "config":
4896 if "ns_config_info" in instantiation_params:
4897 calculated_params["ns_config_info"] = instantiation_params[
4898 "ns_config_info"
4899 ]
4900 return calculated_params
4901
4902 def _look_for_deployed_vca(
4903 self,
4904 deployed_vca,
4905 member_vnf_index,
4906 vdu_id,
4907 vdu_count_index,
4908 kdu_name=None,
4909 ee_descriptor_id=None,
4910 ):
4911 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4912 for vca in deployed_vca:
4913 if not vca:
4914 continue
4915 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4916 continue
4917 if (
4918 vdu_count_index is not None
4919 and vdu_count_index != vca["vdu_count_index"]
4920 ):
4921 continue
4922 if kdu_name and kdu_name != vca["kdu_name"]:
4923 continue
4924 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4925 continue
4926 break
4927 else:
4928 # vca_deployed not found
4929 raise LcmException(
4930 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4931 " is not deployed".format(
4932 member_vnf_index,
4933 vdu_id,
4934 vdu_count_index,
4935 kdu_name,
4936 ee_descriptor_id,
4937 )
4938 )
4939 # get ee_id
4940 ee_id = vca.get("ee_id")
4941 vca_type = vca.get(
4942 "type", "lxc_proxy_charm"
4943 ) # default value for backward compatibility - proxy charm
4944 if not ee_id:
4945 raise LcmException(
4946 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4947 "execution environment".format(
4948 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4949 )
4950 )
4951 return ee_id, vca_type
4952
4953 async def _ns_execute_primitive(
4954 self,
4955 ee_id,
4956 primitive,
4957 primitive_params,
4958 retries=0,
4959 retries_interval=30,
4960 timeout=None,
4961 vca_type=None,
4962 db_dict=None,
4963 vca_id: str = None,
4964 ) -> (str, str):
4965 try:
4966 if primitive == "config":
4967 primitive_params = {"params": primitive_params}
4968
4969 vca_type = vca_type or "lxc_proxy_charm"
4970
4971 while retries >= 0:
4972 try:
4973 output = await asyncio.wait_for(
4974 self.vca_map[vca_type].exec_primitive(
4975 ee_id=ee_id,
4976 primitive_name=primitive,
4977 params_dict=primitive_params,
4978 progress_timeout=self.timeout_progress_primitive,
4979 total_timeout=self.timeout_primitive,
4980 db_dict=db_dict,
4981 vca_id=vca_id,
4982 vca_type=vca_type,
4983 ),
4984 timeout=timeout or self.timeout_primitive,
4985 )
4986 # execution was OK
4987 break
4988 except asyncio.CancelledError:
4989 raise
4990 except Exception as e: # asyncio.TimeoutError
4991 if isinstance(e, asyncio.TimeoutError):
4992 e = "Timeout"
4993 retries -= 1
4994 if retries >= 0:
4995 self.logger.debug(
4996 "Error executing action {} on {} -> {}".format(
4997 primitive, ee_id, e
4998 )
4999 )
5000 # wait and retry
5001 await asyncio.sleep(retries_interval, loop=self.loop)
5002 else:
5003 return "FAILED", str(e)
5004
5005 return "COMPLETED", output
5006
5007 except (LcmException, asyncio.CancelledError):
5008 raise
5009 except Exception as e:
5010 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5011
5012 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5013 """
5014 Updating the vca_status with latest juju information in nsrs record
5015 :param: nsr_id: Id of the nsr
5016 :param: nslcmop_id: Id of the nslcmop
5017 :return: None
5018 """
5019
5020 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5021 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5022 vca_id = self.get_vca_id({}, db_nsr)
5023 if db_nsr["_admin"]["deployed"]["K8s"]:
5024 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5025 cluster_uuid, kdu_instance, cluster_type = (
5026 k8s["k8scluster-uuid"],
5027 k8s["kdu-instance"],
5028 k8s["k8scluster-type"],
5029 )
5030 await self._on_update_k8s_db(
5031 cluster_uuid=cluster_uuid,
5032 kdu_instance=kdu_instance,
5033 filter={"_id": nsr_id},
5034 vca_id=vca_id,
5035 cluster_type=cluster_type,
5036 )
5037 else:
5038 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5039 table, filter = "nsrs", {"_id": nsr_id}
5040 path = "_admin.deployed.VCA.{}.".format(vca_index)
5041 await self._on_update_n2vc_db(table, filter, path, {})
5042
5043 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5044 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5045
5046 async def action(self, nsr_id, nslcmop_id):
5047 # Try to lock HA task here
5048 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5049 if not task_is_locked_by_me:
5050 return
5051
5052 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5053 self.logger.debug(logging_text + "Enter")
5054 # get all needed from database
5055 db_nsr = None
5056 db_nslcmop = None
5057 db_nsr_update = {}
5058 db_nslcmop_update = {}
5059 nslcmop_operation_state = None
5060 error_description_nslcmop = None
5061 exc = None
5062 try:
5063 # wait for any previous tasks in process
5064 step = "Waiting for previous operations to terminate"
5065 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5066
5067 self._write_ns_status(
5068 nsr_id=nsr_id,
5069 ns_state=None,
5070 current_operation="RUNNING ACTION",
5071 current_operation_id=nslcmop_id,
5072 )
5073
5074 step = "Getting information from database"
5075 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5077 if db_nslcmop["operationParams"].get("primitive_params"):
5078 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5079 db_nslcmop["operationParams"]["primitive_params"]
5080 )
5081
5082 nsr_deployed = db_nsr["_admin"].get("deployed")
5083 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5084 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5085 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5086 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5087 primitive = db_nslcmop["operationParams"]["primitive"]
5088 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5089 timeout_ns_action = db_nslcmop["operationParams"].get(
5090 "timeout_ns_action", self.timeout_primitive
5091 )
5092
5093 if vnf_index:
5094 step = "Getting vnfr from database"
5095 db_vnfr = self.db.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5097 )
5098 if db_vnfr.get("kdur"):
5099 kdur_list = []
5100 for kdur in db_vnfr["kdur"]:
5101 if kdur.get("additionalParams"):
5102 kdur["additionalParams"] = json.loads(
5103 kdur["additionalParams"]
5104 )
5105 kdur_list.append(kdur)
5106 db_vnfr["kdur"] = kdur_list
5107 step = "Getting vnfd from database"
5108 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5109
5110 # Sync filesystem before running a primitive
5111 self.fs.sync(db_vnfr["vnfd-id"])
5112 else:
5113 step = "Getting nsd from database"
5114 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5115
5116 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5117 # for backward compatibility
5118 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5119 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5120 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5122
5123 # look for primitive
5124 config_primitive_desc = descriptor_configuration = None
5125 if vdu_id:
5126 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5127 elif kdu_name:
5128 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5129 elif vnf_index:
5130 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5131 else:
5132 descriptor_configuration = db_nsd.get("ns-configuration")
5133
5134 if descriptor_configuration and descriptor_configuration.get(
5135 "config-primitive"
5136 ):
5137 for config_primitive in descriptor_configuration["config-primitive"]:
5138 if config_primitive["name"] == primitive:
5139 config_primitive_desc = config_primitive
5140 break
5141
5142 if not config_primitive_desc:
5143 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5144 raise LcmException(
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5146 primitive
5147 )
5148 )
5149 primitive_name = primitive
5150 ee_descriptor_id = None
5151 else:
5152 primitive_name = config_primitive_desc.get(
5153 "execution-environment-primitive", primitive
5154 )
5155 ee_descriptor_id = config_primitive_desc.get(
5156 "execution-environment-ref"
5157 )
5158
5159 if vnf_index:
5160 if vdu_id:
5161 vdur = next(
5162 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5163 )
5164 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5165 elif kdu_name:
5166 kdur = next(
5167 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5168 )
5169 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5170 else:
5171 desc_params = parse_yaml_strings(
5172 db_vnfr.get("additionalParamsForVnf")
5173 )
5174 else:
5175 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5176 if kdu_name and get_configuration(db_vnfd, kdu_name):
5177 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5178 actions = set()
5179 for primitive in kdu_configuration.get("initial-config-primitive", []):
5180 actions.add(primitive["name"])
5181 for primitive in kdu_configuration.get("config-primitive", []):
5182 actions.add(primitive["name"])
5183 kdu = find_in_list(
5184 nsr_deployed["K8s"],
5185 lambda kdu: kdu_name == kdu["kdu-name"]
5186 and kdu["member-vnf-index"] == vnf_index,
5187 )
5188 kdu_action = (
5189 True
5190 if primitive_name in actions
5191 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5192 else False
5193 )
5194
5195 # TODO check if ns is in a proper status
5196 if kdu_name and (
5197 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5198 ):
5199 # kdur and desc_params already set from before
5200 if primitive_params:
5201 desc_params.update(primitive_params)
5202 # TODO Check if we will need something at vnf level
5203 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5204 if (
5205 kdu_name == kdu["kdu-name"]
5206 and kdu["member-vnf-index"] == vnf_index
5207 ):
5208 break
5209 else:
5210 raise LcmException(
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5212 )
5213
5214 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5215 msg = "unknown k8scluster-type '{}'".format(
5216 kdu.get("k8scluster-type")
5217 )
5218 raise LcmException(msg)
5219
5220 db_dict = {
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id},
5223 "path": "_admin.deployed.K8s.{}".format(index),
5224 }
5225 self.logger.debug(
5226 logging_text
5227 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5228 )
5229 step = "Executing kdu {}".format(primitive_name)
5230 if primitive_name == "upgrade":
5231 if desc_params.get("kdu_model"):
5232 kdu_model = desc_params.get("kdu_model")
5233 del desc_params["kdu_model"]
5234 else:
5235 kdu_model = kdu.get("kdu-model")
5236 parts = kdu_model.split(sep=":")
5237 if len(parts) == 2:
5238 kdu_model = parts[0]
5239
5240 detailed_status = await asyncio.wait_for(
5241 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5242 cluster_uuid=kdu.get("k8scluster-uuid"),
5243 kdu_instance=kdu.get("kdu-instance"),
5244 atomic=True,
5245 kdu_model=kdu_model,
5246 params=desc_params,
5247 db_dict=db_dict,
5248 timeout=timeout_ns_action,
5249 ),
5250 timeout=timeout_ns_action + 10,
5251 )
5252 self.logger.debug(
5253 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5254 )
5255 elif primitive_name == "rollback":
5256 detailed_status = await asyncio.wait_for(
5257 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5258 cluster_uuid=kdu.get("k8scluster-uuid"),
5259 kdu_instance=kdu.get("kdu-instance"),
5260 db_dict=db_dict,
5261 ),
5262 timeout=timeout_ns_action,
5263 )
5264 elif primitive_name == "status":
5265 detailed_status = await asyncio.wait_for(
5266 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5267 cluster_uuid=kdu.get("k8scluster-uuid"),
5268 kdu_instance=kdu.get("kdu-instance"),
5269 vca_id=vca_id,
5270 ),
5271 timeout=timeout_ns_action,
5272 )
5273 else:
5274 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5275 kdu["kdu-name"], nsr_id
5276 )
5277 params = self._map_primitive_params(
5278 config_primitive_desc, primitive_params, desc_params
5279 )
5280
5281 detailed_status = await asyncio.wait_for(
5282 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5283 cluster_uuid=kdu.get("k8scluster-uuid"),
5284 kdu_instance=kdu_instance,
5285 primitive_name=primitive_name,
5286 params=params,
5287 db_dict=db_dict,
5288 timeout=timeout_ns_action,
5289 vca_id=vca_id,
5290 ),
5291 timeout=timeout_ns_action,
5292 )
5293
5294 if detailed_status:
5295 nslcmop_operation_state = "COMPLETED"
5296 else:
5297 detailed_status = ""
5298 nslcmop_operation_state = "FAILED"
5299 else:
5300 ee_id, vca_type = self._look_for_deployed_vca(
5301 nsr_deployed["VCA"],
5302 member_vnf_index=vnf_index,
5303 vdu_id=vdu_id,
5304 vdu_count_index=vdu_count_index,
5305 ee_descriptor_id=ee_descriptor_id,
5306 )
5307 for vca_index, vca_deployed in enumerate(
5308 db_nsr["_admin"]["deployed"]["VCA"]
5309 ):
5310 if vca_deployed.get("member-vnf-index") == vnf_index:
5311 db_dict = {
5312 "collection": "nsrs",
5313 "filter": {"_id": nsr_id},
5314 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5315 }
5316 break
5317 (
5318 nslcmop_operation_state,
5319 detailed_status,
5320 ) = await self._ns_execute_primitive(
5321 ee_id,
5322 primitive=primitive_name,
5323 primitive_params=self._map_primitive_params(
5324 config_primitive_desc, primitive_params, desc_params
5325 ),
5326 timeout=timeout_ns_action,
5327 vca_type=vca_type,
5328 db_dict=db_dict,
5329 vca_id=vca_id,
5330 )
5331
5332 db_nslcmop_update["detailed-status"] = detailed_status
5333 error_description_nslcmop = (
5334 detailed_status if nslcmop_operation_state == "FAILED" else ""
5335 )
5336 self.logger.debug(
5337 logging_text
5338 + " task Done with result {} {}".format(
5339 nslcmop_operation_state, detailed_status
5340 )
5341 )
5342 return # database update is called inside finally
5343
5344 except (DbException, LcmException, N2VCException, K8sException) as e:
5345 self.logger.error(logging_text + "Exit Exception {}".format(e))
5346 exc = e
5347 except asyncio.CancelledError:
5348 self.logger.error(
5349 logging_text + "Cancelled Exception while '{}'".format(step)
5350 )
5351 exc = "Operation was cancelled"
5352 except asyncio.TimeoutError:
5353 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5354 exc = "Timeout"
5355 except Exception as e:
5356 exc = traceback.format_exc()
5357 self.logger.critical(
5358 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5359 exc_info=True,
5360 )
5361 finally:
5362 if exc:
5363 db_nslcmop_update[
5364 "detailed-status"
5365 ] = (
5366 detailed_status
5367 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5368 nslcmop_operation_state = "FAILED"
5369 if db_nsr:
5370 self._write_ns_status(
5371 nsr_id=nsr_id,
5372 ns_state=db_nsr[
5373 "nsState"
5374 ], # TODO check if degraded. For the moment use previous status
5375 current_operation="IDLE",
5376 current_operation_id=None,
5377 # error_description=error_description_nsr,
5378 # error_detail=error_detail,
5379 other_update=db_nsr_update,
5380 )
5381
5382 self._write_op_status(
5383 op_id=nslcmop_id,
5384 stage="",
5385 error_message=error_description_nslcmop,
5386 operation_state=nslcmop_operation_state,
5387 other_update=db_nslcmop_update,
5388 )
5389
5390 if nslcmop_operation_state:
5391 try:
5392 await self.msg.aiowrite(
5393 "ns",
5394 "actioned",
5395 {
5396 "nsr_id": nsr_id,
5397 "nslcmop_id": nslcmop_id,
5398 "operationState": nslcmop_operation_state,
5399 },
5400 loop=self.loop,
5401 )
5402 except Exception as e:
5403 self.logger.error(
5404 logging_text + "kafka_write notification Exception {}".format(e)
5405 )
5406 self.logger.debug(logging_text + "Exit")
5407 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5408 return nslcmop_operation_state, detailed_status
5409
5410 async def terminate_vdus(
5411 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5412 ):
5413 """This method terminates VDUs
5414
5415 Args:
5416 db_vnfr: VNF instance record
5417 member_vnf_index: VNF index to identify the VDUs to be removed
5418 db_nsr: NS instance record
5419 update_db_nslcmops: Nslcmop update record
5420 """
5421 vca_scaling_info = []
5422 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5423 scaling_info["scaling_direction"] = "IN"
5424 scaling_info["vdu-delete"] = {}
5425 scaling_info["kdu-delete"] = {}
5426 db_vdur = db_vnfr.get("vdur")
5427 vdur_list = copy(db_vdur)
5428 count_index = 0
5429 for index, vdu in enumerate(vdur_list):
5430 vca_scaling_info.append(
5431 {
5432 "osm_vdu_id": vdu["vdu-id-ref"],
5433 "member-vnf-index": member_vnf_index,
5434 "type": "delete",
5435 "vdu_index": count_index,
5436 })
5437 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5438 scaling_info["vdu"].append(
5439 {
5440 "name": vdu.get("name") or vdu.get("vdu-name"),
5441 "vdu_id": vdu["vdu-id-ref"],
5442 "interface": [],
5443 })
5444 for interface in vdu["interfaces"]:
5445 scaling_info["vdu"][index]["interface"].append(
5446 {
5447 "name": interface["name"],
5448 "ip_address": interface["ip-address"],
5449 "mac_address": interface.get("mac-address"),
5450 })
5451 self.logger.info("NS update scaling info{}".format(scaling_info))
5452 stage[2] = "Terminating VDUs"
5453 if scaling_info.get("vdu-delete"):
5454 # scale_process = "RO"
5455 if self.ro_config.get("ng"):
5456 await self._scale_ng_ro(
5457 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5458 )
5459
5460 async def remove_vnf(
5461 self, nsr_id, nslcmop_id, vnf_instance_id
5462 ):
5463 """This method is to Remove VNF instances from NS.
5464
5465 Args:
5466 nsr_id: NS instance id
5467 nslcmop_id: nslcmop id of update
5468 vnf_instance_id: id of the VNF instance to be removed
5469
5470 Returns:
5471 result: (str, str) COMPLETED/FAILED, details
5472 """
5473 try:
5474 db_nsr_update = {}
5475 logging_text = "Task ns={} update ".format(nsr_id)
5476 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5477 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5478 if check_vnfr_count > 1:
5479 stage = ["", "", ""]
5480 step = "Getting nslcmop from database"
5481 self.logger.debug(step + " after having waited for previous tasks to be completed")
5482 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5483 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5484 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5485 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5486 """ db_vnfr = self.db.get_one(
5487 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5488
5489 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5490 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5491
5492 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5493 constituent_vnfr.remove(db_vnfr.get("_id"))
5494 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5495 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5496 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5497 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5498 return "COMPLETED", "Done"
5499 else:
5500 step = "Terminate VNF Failed with"
5501 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5502 vnf_instance_id))
5503 except (LcmException, asyncio.CancelledError):
5504 raise
5505 except Exception as e:
5506 self.logger.debug("Error removing VNF {}".format(e))
5507 return "FAILED", "Error removing VNF {}".format(e)
5508
5509 async def _ns_redeploy_vnf(
5510 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5511 ):
5512 """This method updates and redeploys VNF instances
5513
5514 Args:
5515 nsr_id: NS instance id
5516 nslcmop_id: nslcmop id
5517 db_vnfd: VNF descriptor
5518 db_vnfr: VNF instance record
5519 db_nsr: NS instance record
5520
5521 Returns:
5522 result: (str, str) COMPLETED/FAILED, details
5523 """
5524 try:
5525 count_index = 0
5526 stage = ["", "", ""]
5527 logging_text = "Task ns={} update ".format(nsr_id)
5528 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5529 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5530
5531 # Terminate old VNF resources
5532 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5533 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5534
5535 # old_vnfd_id = db_vnfr["vnfd-id"]
5536 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5537 new_db_vnfd = db_vnfd
5538 # new_vnfd_ref = new_db_vnfd["id"]
5539 # new_vnfd_id = vnfd_id
5540
5541 # Create VDUR
5542 new_vnfr_cp = []
5543 for cp in new_db_vnfd.get("ext-cpd", ()):
5544 vnf_cp = {
5545 "name": cp.get("id"),
5546 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5547 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5548 "id": cp.get("id"),
5549 }
5550 new_vnfr_cp.append(vnf_cp)
5551 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5552 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5553 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5554 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5555 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5556 updated_db_vnfr = self.db.get_one(
5557 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5558 )
5559
5560 # Instantiate new VNF resources
5561 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5562 vca_scaling_info = []
5563 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5564 scaling_info["scaling_direction"] = "OUT"
5565 scaling_info["vdu-create"] = {}
5566 scaling_info["kdu-create"] = {}
5567 vdud_instantiate_list = db_vnfd["vdu"]
5568 for index, vdud in enumerate(vdud_instantiate_list):
5569 cloud_init_text = self._get_vdu_cloud_init_content(
5570 vdud, db_vnfd
5571 )
5572 if cloud_init_text:
5573 additional_params = (
5574 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5575 or {}
5576 )
5577 cloud_init_list = []
5578 if cloud_init_text:
5579 # TODO Information of its own ip is not available because db_vnfr is not updated.
5580 additional_params["OSM"] = get_osm_params(
5581 updated_db_vnfr, vdud["id"], 1
5582 )
5583 cloud_init_list.append(
5584 self._parse_cloud_init(
5585 cloud_init_text,
5586 additional_params,
5587 db_vnfd["id"],
5588 vdud["id"],
5589 )
5590 )
5591 vca_scaling_info.append(
5592 {
5593 "osm_vdu_id": vdud["id"],
5594 "member-vnf-index": member_vnf_index,
5595 "type": "create",
5596 "vdu_index": count_index,
5597 }
5598 )
5599 scaling_info["vdu-create"][vdud["id"]] = count_index
5600 if self.ro_config.get("ng"):
5601 self.logger.debug(
5602 "New Resources to be deployed: {}".format(scaling_info))
5603 await self._scale_ng_ro(
5604 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5605 )
5606 return "COMPLETED", "Done"
5607 except (LcmException, asyncio.CancelledError):
5608 raise
5609 except Exception as e:
5610 self.logger.debug("Error updating VNF {}".format(e))
5611 return "FAILED", "Error updating VNF {}".format(e)
5612
5613 async def _ns_charm_upgrade(
5614 self,
5615 ee_id,
5616 charm_id,
5617 charm_type,
5618 path,
5619 timeout: float = None,
5620 ) -> (str, str):
5621 """This method upgrade charms in VNF instances
5622
5623 Args:
5624 ee_id: Execution environment id
5625 path: Local path to the charm
5626 charm_id: charm-id
5627 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5628 timeout: (Float) Timeout for the ns update operation
5629
5630 Returns:
5631 result: (str, str) COMPLETED/FAILED, details
5632 """
5633 try:
5634 charm_type = charm_type or "lxc_proxy_charm"
5635 output = await self.vca_map[charm_type].upgrade_charm(
5636 ee_id=ee_id,
5637 path=path,
5638 charm_id=charm_id,
5639 charm_type=charm_type,
5640 timeout=timeout or self.timeout_ns_update,
5641 )
5642
5643 if output:
5644 return "COMPLETED", output
5645
5646 except (LcmException, asyncio.CancelledError):
5647 raise
5648
5649 except Exception as e:
5650
5651 self.logger.debug("Error upgrading charm {}".format(path))
5652
5653 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5654
5655 async def update(self, nsr_id, nslcmop_id):
5656 """Update NS according to different update types
5657
5658 This method performs upgrade of VNF instances then updates the revision
5659 number in VNF record
5660
5661 Args:
5662 nsr_id: Network service will be updated
5663 nslcmop_id: ns lcm operation id
5664
5665 Returns:
5666 It may raise DbException, LcmException, N2VCException, K8sException
5667
5668 """
5669 # Try to lock HA task here
5670 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5671 if not task_is_locked_by_me:
5672 return
5673
5674 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5675 self.logger.debug(logging_text + "Enter")
5676
5677 # Set the required variables to be filled up later
5678 db_nsr = None
5679 db_nslcmop_update = {}
5680 vnfr_update = {}
5681 nslcmop_operation_state = None
5682 db_nsr_update = {}
5683 error_description_nslcmop = ""
5684 exc = None
5685 change_type = "updated"
5686 detailed_status = ""
5687
5688 try:
5689 # wait for any previous tasks in process
5690 step = "Waiting for previous operations to terminate"
5691 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5692 self._write_ns_status(
5693 nsr_id=nsr_id,
5694 ns_state=None,
5695 current_operation="UPDATING",
5696 current_operation_id=nslcmop_id,
5697 )
5698
5699 step = "Getting nslcmop from database"
5700 db_nslcmop = self.db.get_one(
5701 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5702 )
5703 update_type = db_nslcmop["operationParams"]["updateType"]
5704
5705 step = "Getting nsr from database"
5706 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5707 old_operational_status = db_nsr["operational-status"]
5708 db_nsr_update["operational-status"] = "updating"
5709 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5710 nsr_deployed = db_nsr["_admin"].get("deployed")
5711
5712 if update_type == "CHANGE_VNFPKG":
5713
5714 # Get the input parameters given through update request
5715 vnf_instance_id = db_nslcmop["operationParams"][
5716 "changeVnfPackageData"
5717 ].get("vnfInstanceId")
5718
5719 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5720 "vnfdId"
5721 )
5722 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5723
5724 step = "Getting vnfr from database"
5725 db_vnfr = self.db.get_one(
5726 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5727 )
5728
5729 step = "Getting vnfds from database"
5730 # Latest VNFD
5731 latest_vnfd = self.db.get_one(
5732 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5733 )
5734 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5735
5736 # Current VNFD
5737 current_vnf_revision = db_vnfr.get("revision", 1)
5738 current_vnfd = self.db.get_one(
5739 "vnfds_revisions",
5740 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5741 fail_on_empty=False,
5742 )
5743 # Charm artifact paths will be filled up later
5744 (
5745 current_charm_artifact_path,
5746 target_charm_artifact_path,
5747 charm_artifact_paths,
5748 ) = ([], [], [])
5749
5750 step = "Checking if revision has changed in VNFD"
5751 if current_vnf_revision != latest_vnfd_revision:
5752
5753 change_type = "policy_updated"
5754
5755 # There is new revision of VNFD, update operation is required
5756 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5757 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5758
5759 step = "Removing the VNFD packages if they exist in the local path"
5760 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5761 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5762
5763 step = "Get the VNFD packages from FSMongo"
5764 self.fs.sync(from_path=latest_vnfd_path)
5765 self.fs.sync(from_path=current_vnfd_path)
5766
5767 step = (
5768 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5769 )
5770 base_folder = latest_vnfd["_admin"]["storage"]
5771
5772 for charm_index, charm_deployed in enumerate(
5773 get_iterable(nsr_deployed, "VCA")
5774 ):
5775 vnf_index = db_vnfr.get("member-vnf-index-ref")
5776
5777 # Getting charm-id and charm-type
5778 if charm_deployed.get("member-vnf-index") == vnf_index:
5779 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5780 charm_type = charm_deployed.get("type")
5781
5782 # Getting ee-id
5783 ee_id = charm_deployed.get("ee_id")
5784
5785 step = "Getting descriptor config"
5786 descriptor_config = get_configuration(
5787 current_vnfd, current_vnfd["id"]
5788 )
5789
5790 if "execution-environment-list" in descriptor_config:
5791 ee_list = descriptor_config.get(
5792 "execution-environment-list", []
5793 )
5794 else:
5795 ee_list = []
5796
5797 # There could be several charm used in the same VNF
5798 for ee_item in ee_list:
5799 if ee_item.get("juju"):
5800
5801 step = "Getting charm name"
5802 charm_name = ee_item["juju"].get("charm")
5803
5804 step = "Setting Charm artifact paths"
5805 current_charm_artifact_path.append(
5806 get_charm_artifact_path(
5807 base_folder,
5808 charm_name,
5809 charm_type,
5810 current_vnf_revision,
5811 )
5812 )
5813 target_charm_artifact_path.append(
5814 get_charm_artifact_path(
5815 base_folder,
5816 charm_name,
5817 charm_type,
5818 latest_vnfd_revision,
5819 )
5820 )
5821
5822 charm_artifact_paths = zip(
5823 current_charm_artifact_path, target_charm_artifact_path
5824 )
5825
5826 step = "Checking if software version has changed in VNFD"
5827 if find_software_version(current_vnfd) != find_software_version(
5828 latest_vnfd
5829 ):
5830
5831 step = "Checking if existing VNF has charm"
5832 for current_charm_path, target_charm_path in list(
5833 charm_artifact_paths
5834 ):
5835 if current_charm_path:
5836 raise LcmException(
5837 "Software version change is not supported as VNF instance {} has charm.".format(
5838 vnf_instance_id
5839 )
5840 )
5841
5842 # There is no change in the charm package, then redeploy the VNF
5843 # based on new descriptor
5844 step = "Redeploying VNF"
5845 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5846 (
5847 result,
5848 detailed_status
5849 ) = await self._ns_redeploy_vnf(
5850 nsr_id,
5851 nslcmop_id,
5852 latest_vnfd,
5853 db_vnfr,
5854 db_nsr
5855 )
5856 if result == "FAILED":
5857 nslcmop_operation_state = result
5858 error_description_nslcmop = detailed_status
5859 db_nslcmop_update["detailed-status"] = detailed_status
5860 self.logger.debug(
5861 logging_text
5862 + " step {} Done with result {} {}".format(
5863 step, nslcmop_operation_state, detailed_status
5864 )
5865 )
5866
5867 else:
5868 step = "Checking if any charm package has changed or not"
5869 for current_charm_path, target_charm_path in list(
5870 charm_artifact_paths
5871 ):
5872 if (
5873 current_charm_path
5874 and target_charm_path
5875 and self.check_charm_hash_changed(
5876 current_charm_path, target_charm_path
5877 )
5878 ):
5879
5880 step = "Checking whether VNF uses juju bundle"
5881 if check_juju_bundle_existence(current_vnfd):
5882
5883 raise LcmException(
5884 "Charm upgrade is not supported for the instance which"
5885 " uses juju-bundle: {}".format(
5886 check_juju_bundle_existence(current_vnfd)
5887 )
5888 )
5889
5890 step = "Upgrading Charm"
5891 (
5892 result,
5893 detailed_status,
5894 ) = await self._ns_charm_upgrade(
5895 ee_id=ee_id,
5896 charm_id=charm_id,
5897 charm_type=charm_type,
5898 path=self.fs.path + target_charm_path,
5899 timeout=timeout_seconds,
5900 )
5901
5902 if result == "FAILED":
5903 nslcmop_operation_state = result
5904 error_description_nslcmop = detailed_status
5905
5906 db_nslcmop_update["detailed-status"] = detailed_status
5907 self.logger.debug(
5908 logging_text
5909 + " step {} Done with result {} {}".format(
5910 step, nslcmop_operation_state, detailed_status
5911 )
5912 )
5913
5914 step = "Updating policies"
5915 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5916 result = "COMPLETED"
5917 detailed_status = "Done"
5918 db_nslcmop_update["detailed-status"] = "Done"
5919
5920 # If nslcmop_operation_state is None, so any operation is not failed.
5921 if not nslcmop_operation_state:
5922 nslcmop_operation_state = "COMPLETED"
5923
5924 # If update CHANGE_VNFPKG nslcmop_operation is successful
5925 # vnf revision need to be updated
5926 vnfr_update["revision"] = latest_vnfd_revision
5927 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5928
5929 self.logger.debug(
5930 logging_text
5931 + " task Done with result {} {}".format(
5932 nslcmop_operation_state, detailed_status
5933 )
5934 )
5935 elif update_type == "REMOVE_VNF":
5936 # This part is included in https://osm.etsi.org/gerrit/11876
5937 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5938 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5939 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5940 step = "Removing VNF"
5941 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5942 if result == "FAILED":
5943 nslcmop_operation_state = result
5944 error_description_nslcmop = detailed_status
5945 db_nslcmop_update["detailed-status"] = detailed_status
5946 change_type = "vnf_terminated"
5947 if not nslcmop_operation_state:
5948 nslcmop_operation_state = "COMPLETED"
5949 self.logger.debug(
5950 logging_text
5951 + " task Done with result {} {}".format(
5952 nslcmop_operation_state, detailed_status
5953 )
5954 )
5955
5956 elif update_type == "OPERATE_VNF":
5957 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5958 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5959 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5960 (result, detailed_status) = await self.rebuild_start_stop(
5961 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5962 )
5963 if result == "FAILED":
5964 nslcmop_operation_state = result
5965 error_description_nslcmop = detailed_status
5966 db_nslcmop_update["detailed-status"] = detailed_status
5967 if not nslcmop_operation_state:
5968 nslcmop_operation_state = "COMPLETED"
5969 self.logger.debug(
5970 logging_text
5971 + " task Done with result {} {}".format(
5972 nslcmop_operation_state, detailed_status
5973 )
5974 )
5975
5976 # If nslcmop_operation_state is None, so any operation is not failed.
5977 # All operations are executed in overall.
5978 if not nslcmop_operation_state:
5979 nslcmop_operation_state = "COMPLETED"
5980 db_nsr_update["operational-status"] = old_operational_status
5981
5982 except (DbException, LcmException, N2VCException, K8sException) as e:
5983 self.logger.error(logging_text + "Exit Exception {}".format(e))
5984 exc = e
5985 except asyncio.CancelledError:
5986 self.logger.error(
5987 logging_text + "Cancelled Exception while '{}'".format(step)
5988 )
5989 exc = "Operation was cancelled"
5990 except asyncio.TimeoutError:
5991 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5992 exc = "Timeout"
5993 except Exception as e:
5994 exc = traceback.format_exc()
5995 self.logger.critical(
5996 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5997 exc_info=True,
5998 )
5999 finally:
6000 if exc:
6001 db_nslcmop_update[
6002 "detailed-status"
6003 ] = (
6004 detailed_status
6005 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6006 nslcmop_operation_state = "FAILED"
6007 db_nsr_update["operational-status"] = old_operational_status
6008 if db_nsr:
6009 self._write_ns_status(
6010 nsr_id=nsr_id,
6011 ns_state=db_nsr["nsState"],
6012 current_operation="IDLE",
6013 current_operation_id=None,
6014 other_update=db_nsr_update,
6015 )
6016
6017 self._write_op_status(
6018 op_id=nslcmop_id,
6019 stage="",
6020 error_message=error_description_nslcmop,
6021 operation_state=nslcmop_operation_state,
6022 other_update=db_nslcmop_update,
6023 )
6024
6025 if nslcmop_operation_state:
6026 try:
6027 msg = {
6028 "nsr_id": nsr_id,
6029 "nslcmop_id": nslcmop_id,
6030 "operationState": nslcmop_operation_state,
6031 }
6032 if change_type in ("vnf_terminated", "policy_updated"):
6033 msg.update({"vnf_member_index": member_vnf_index})
6034 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6035 except Exception as e:
6036 self.logger.error(
6037 logging_text + "kafka_write notification Exception {}".format(e)
6038 )
6039 self.logger.debug(logging_text + "Exit")
6040 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6041 return nslcmop_operation_state, detailed_status
6042
6043 async def scale(self, nsr_id, nslcmop_id):
6044 # Try to lock HA task here
6045 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6046 if not task_is_locked_by_me:
6047 return
6048
6049 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6050 stage = ["", "", ""]
6051 tasks_dict_info = {}
6052 # ^ stage, step, VIM progress
6053 self.logger.debug(logging_text + "Enter")
6054 # get all needed from database
6055 db_nsr = None
6056 db_nslcmop_update = {}
6057 db_nsr_update = {}
6058 exc = None
6059 # in case of error, indicates what part of scale was failed to put nsr at error status
6060 scale_process = None
6061 old_operational_status = ""
6062 old_config_status = ""
6063 nsi_id = None
6064 try:
6065 # wait for any previous tasks in process
6066 step = "Waiting for previous operations to terminate"
6067 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6068 self._write_ns_status(
6069 nsr_id=nsr_id,
6070 ns_state=None,
6071 current_operation="SCALING",
6072 current_operation_id=nslcmop_id,
6073 )
6074
6075 step = "Getting nslcmop from database"
6076 self.logger.debug(
6077 step + " after having waited for previous tasks to be completed"
6078 )
6079 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6080
6081 step = "Getting nsr from database"
6082 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6083 old_operational_status = db_nsr["operational-status"]
6084 old_config_status = db_nsr["config-status"]
6085
6086 step = "Parsing scaling parameters"
6087 db_nsr_update["operational-status"] = "scaling"
6088 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6089 nsr_deployed = db_nsr["_admin"].get("deployed")
6090
6091 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6092 "scaleByStepData"
6093 ]["member-vnf-index"]
6094 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6095 "scaleByStepData"
6096 ]["scaling-group-descriptor"]
6097 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6098 # for backward compatibility
6099 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6100 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6101 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6102 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6103
6104 step = "Getting vnfr from database"
6105 db_vnfr = self.db.get_one(
6106 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6107 )
6108
6109 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6110
6111 step = "Getting vnfd from database"
6112 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6113
6114 base_folder = db_vnfd["_admin"]["storage"]
6115
6116 step = "Getting scaling-group-descriptor"
6117 scaling_descriptor = find_in_list(
6118 get_scaling_aspect(db_vnfd),
6119 lambda scale_desc: scale_desc["name"] == scaling_group,
6120 )
6121 if not scaling_descriptor:
6122 raise LcmException(
6123 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6124 "at vnfd:scaling-group-descriptor".format(scaling_group)
6125 )
6126
6127 step = "Sending scale order to VIM"
6128 # TODO check if ns is in a proper status
6129 nb_scale_op = 0
6130 if not db_nsr["_admin"].get("scaling-group"):
6131 self.update_db_2(
6132 "nsrs",
6133 nsr_id,
6134 {
6135 "_admin.scaling-group": [
6136 {"name": scaling_group, "nb-scale-op": 0}
6137 ]
6138 },
6139 )
6140 admin_scale_index = 0
6141 else:
6142 for admin_scale_index, admin_scale_info in enumerate(
6143 db_nsr["_admin"]["scaling-group"]
6144 ):
6145 if admin_scale_info["name"] == scaling_group:
6146 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6147 break
6148 else: # not found, set index one plus last element and add new entry with the name
6149 admin_scale_index += 1
6150 db_nsr_update[
6151 "_admin.scaling-group.{}.name".format(admin_scale_index)
6152 ] = scaling_group
6153
6154 vca_scaling_info = []
6155 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6156 if scaling_type == "SCALE_OUT":
6157 if "aspect-delta-details" not in scaling_descriptor:
6158 raise LcmException(
6159 "Aspect delta details not fount in scaling descriptor {}".format(
6160 scaling_descriptor["name"]
6161 )
6162 )
6163 # count if max-instance-count is reached
6164 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6165
6166 scaling_info["scaling_direction"] = "OUT"
6167 scaling_info["vdu-create"] = {}
6168 scaling_info["kdu-create"] = {}
6169 for delta in deltas:
6170 for vdu_delta in delta.get("vdu-delta", {}):
6171 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6172 # vdu_index also provides the number of instance of the targeted vdu
6173 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6174 cloud_init_text = self._get_vdu_cloud_init_content(
6175 vdud, db_vnfd
6176 )
6177 if cloud_init_text:
6178 additional_params = (
6179 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6180 or {}
6181 )
6182 cloud_init_list = []
6183
6184 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6185 max_instance_count = 10
6186 if vdu_profile and "max-number-of-instances" in vdu_profile:
6187 max_instance_count = vdu_profile.get(
6188 "max-number-of-instances", 10
6189 )
6190
6191 default_instance_num = get_number_of_instances(
6192 db_vnfd, vdud["id"]
6193 )
6194 instances_number = vdu_delta.get("number-of-instances", 1)
6195 nb_scale_op += instances_number
6196
6197 new_instance_count = nb_scale_op + default_instance_num
6198 # Control if new count is over max and vdu count is less than max.
6199 # Then assign new instance count
6200 if new_instance_count > max_instance_count > vdu_count:
6201 instances_number = new_instance_count - max_instance_count
6202 else:
6203 instances_number = instances_number
6204
6205 if new_instance_count > max_instance_count:
6206 raise LcmException(
6207 "reached the limit of {} (max-instance-count) "
6208 "scaling-out operations for the "
6209 "scaling-group-descriptor '{}'".format(
6210 nb_scale_op, scaling_group
6211 )
6212 )
6213 for x in range(vdu_delta.get("number-of-instances", 1)):
6214 if cloud_init_text:
6215 # TODO Information of its own ip is not available because db_vnfr is not updated.
6216 additional_params["OSM"] = get_osm_params(
6217 db_vnfr, vdu_delta["id"], vdu_index + x
6218 )
6219 cloud_init_list.append(
6220 self._parse_cloud_init(
6221 cloud_init_text,
6222 additional_params,
6223 db_vnfd["id"],
6224 vdud["id"],
6225 )
6226 )
6227 vca_scaling_info.append(
6228 {
6229 "osm_vdu_id": vdu_delta["id"],
6230 "member-vnf-index": vnf_index,
6231 "type": "create",
6232 "vdu_index": vdu_index + x,
6233 }
6234 )
6235 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6236 for kdu_delta in delta.get("kdu-resource-delta", {}):
6237 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6238 kdu_name = kdu_profile["kdu-name"]
6239 resource_name = kdu_profile.get("resource-name", "")
6240
6241 # Might have different kdus in the same delta
6242 # Should have list for each kdu
6243 if not scaling_info["kdu-create"].get(kdu_name, None):
6244 scaling_info["kdu-create"][kdu_name] = []
6245
6246 kdur = get_kdur(db_vnfr, kdu_name)
6247 if kdur.get("helm-chart"):
6248 k8s_cluster_type = "helm-chart-v3"
6249 self.logger.debug("kdur: {}".format(kdur))
6250 if (
6251 kdur.get("helm-version")
6252 and kdur.get("helm-version") == "v2"
6253 ):
6254 k8s_cluster_type = "helm-chart"
6255 elif kdur.get("juju-bundle"):
6256 k8s_cluster_type = "juju-bundle"
6257 else:
6258 raise LcmException(
6259 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6260 "juju-bundle. Maybe an old NBI version is running".format(
6261 db_vnfr["member-vnf-index-ref"], kdu_name
6262 )
6263 )
6264
6265 max_instance_count = 10
6266 if kdu_profile and "max-number-of-instances" in kdu_profile:
6267 max_instance_count = kdu_profile.get(
6268 "max-number-of-instances", 10
6269 )
6270
6271 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6272 deployed_kdu, _ = get_deployed_kdu(
6273 nsr_deployed, kdu_name, vnf_index
6274 )
6275 if deployed_kdu is None:
6276 raise LcmException(
6277 "KDU '{}' for vnf '{}' not deployed".format(
6278 kdu_name, vnf_index
6279 )
6280 )
6281 kdu_instance = deployed_kdu.get("kdu-instance")
6282 instance_num = await self.k8scluster_map[
6283 k8s_cluster_type
6284 ].get_scale_count(
6285 resource_name,
6286 kdu_instance,
6287 vca_id=vca_id,
6288 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6289 kdu_model=deployed_kdu.get("kdu-model"),
6290 )
6291 kdu_replica_count = instance_num + kdu_delta.get(
6292 "number-of-instances", 1
6293 )
6294
6295 # Control if new count is over max and instance_num is less than max.
6296 # Then assign max instance number to kdu replica count
6297 if kdu_replica_count > max_instance_count > instance_num:
6298 kdu_replica_count = max_instance_count
6299 if kdu_replica_count > max_instance_count:
6300 raise LcmException(
6301 "reached the limit of {} (max-instance-count) "
6302 "scaling-out operations for the "
6303 "scaling-group-descriptor '{}'".format(
6304 instance_num, scaling_group
6305 )
6306 )
6307
6308 for x in range(kdu_delta.get("number-of-instances", 1)):
6309 vca_scaling_info.append(
6310 {
6311 "osm_kdu_id": kdu_name,
6312 "member-vnf-index": vnf_index,
6313 "type": "create",
6314 "kdu_index": instance_num + x - 1,
6315 }
6316 )
6317 scaling_info["kdu-create"][kdu_name].append(
6318 {
6319 "member-vnf-index": vnf_index,
6320 "type": "create",
6321 "k8s-cluster-type": k8s_cluster_type,
6322 "resource-name": resource_name,
6323 "scale": kdu_replica_count,
6324 }
6325 )
6326 elif scaling_type == "SCALE_IN":
6327 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6328
6329 scaling_info["scaling_direction"] = "IN"
6330 scaling_info["vdu-delete"] = {}
6331 scaling_info["kdu-delete"] = {}
6332
6333 for delta in deltas:
6334 for vdu_delta in delta.get("vdu-delta", {}):
6335 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6336 min_instance_count = 0
6337 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6338 if vdu_profile and "min-number-of-instances" in vdu_profile:
6339 min_instance_count = vdu_profile["min-number-of-instances"]
6340
6341 default_instance_num = get_number_of_instances(
6342 db_vnfd, vdu_delta["id"]
6343 )
6344 instance_num = vdu_delta.get("number-of-instances", 1)
6345 nb_scale_op -= instance_num
6346
6347 new_instance_count = nb_scale_op + default_instance_num
6348
6349 if new_instance_count < min_instance_count < vdu_count:
6350 instances_number = min_instance_count - new_instance_count
6351 else:
6352 instances_number = instance_num
6353
6354 if new_instance_count < min_instance_count:
6355 raise LcmException(
6356 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6357 "scaling-group-descriptor '{}'".format(
6358 nb_scale_op, scaling_group
6359 )
6360 )
6361 for x in range(vdu_delta.get("number-of-instances", 1)):
6362 vca_scaling_info.append(
6363 {
6364 "osm_vdu_id": vdu_delta["id"],
6365 "member-vnf-index": vnf_index,
6366 "type": "delete",
6367 "vdu_index": vdu_index - 1 - x,
6368 }
6369 )
6370 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6371 for kdu_delta in delta.get("kdu-resource-delta", {}):
6372 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6373 kdu_name = kdu_profile["kdu-name"]
6374 resource_name = kdu_profile.get("resource-name", "")
6375
6376 if not scaling_info["kdu-delete"].get(kdu_name, None):
6377 scaling_info["kdu-delete"][kdu_name] = []
6378
6379 kdur = get_kdur(db_vnfr, kdu_name)
6380 if kdur.get("helm-chart"):
6381 k8s_cluster_type = "helm-chart-v3"
6382 self.logger.debug("kdur: {}".format(kdur))
6383 if (
6384 kdur.get("helm-version")
6385 and kdur.get("helm-version") == "v2"
6386 ):
6387 k8s_cluster_type = "helm-chart"
6388 elif kdur.get("juju-bundle"):
6389 k8s_cluster_type = "juju-bundle"
6390 else:
6391 raise LcmException(
6392 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6393 "juju-bundle. Maybe an old NBI version is running".format(
6394 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6395 )
6396 )
6397
6398 min_instance_count = 0
6399 if kdu_profile and "min-number-of-instances" in kdu_profile:
6400 min_instance_count = kdu_profile["min-number-of-instances"]
6401
6402 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6403 deployed_kdu, _ = get_deployed_kdu(
6404 nsr_deployed, kdu_name, vnf_index
6405 )
6406 if deployed_kdu is None:
6407 raise LcmException(
6408 "KDU '{}' for vnf '{}' not deployed".format(
6409 kdu_name, vnf_index
6410 )
6411 )
6412 kdu_instance = deployed_kdu.get("kdu-instance")
6413 instance_num = await self.k8scluster_map[
6414 k8s_cluster_type
6415 ].get_scale_count(
6416 resource_name,
6417 kdu_instance,
6418 vca_id=vca_id,
6419 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6420 kdu_model=deployed_kdu.get("kdu-model"),
6421 )
6422 kdu_replica_count = instance_num - kdu_delta.get(
6423 "number-of-instances", 1
6424 )
6425
6426 if kdu_replica_count < min_instance_count < instance_num:
6427 kdu_replica_count = min_instance_count
6428 if kdu_replica_count < min_instance_count:
6429 raise LcmException(
6430 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6431 "scaling-group-descriptor '{}'".format(
6432 instance_num, scaling_group
6433 )
6434 )
6435
6436 for x in range(kdu_delta.get("number-of-instances", 1)):
6437 vca_scaling_info.append(
6438 {
6439 "osm_kdu_id": kdu_name,
6440 "member-vnf-index": vnf_index,
6441 "type": "delete",
6442 "kdu_index": instance_num - x - 1,
6443 }
6444 )
6445 scaling_info["kdu-delete"][kdu_name].append(
6446 {
6447 "member-vnf-index": vnf_index,
6448 "type": "delete",
6449 "k8s-cluster-type": k8s_cluster_type,
6450 "resource-name": resource_name,
6451 "scale": kdu_replica_count,
6452 }
6453 )
6454
6455 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6456 vdu_delete = copy(scaling_info.get("vdu-delete"))
6457 if scaling_info["scaling_direction"] == "IN":
6458 for vdur in reversed(db_vnfr["vdur"]):
6459 if vdu_delete.get(vdur["vdu-id-ref"]):
6460 vdu_delete[vdur["vdu-id-ref"]] -= 1
6461 scaling_info["vdu"].append(
6462 {
6463 "name": vdur.get("name") or vdur.get("vdu-name"),
6464 "vdu_id": vdur["vdu-id-ref"],
6465 "interface": [],
6466 }
6467 )
6468 for interface in vdur["interfaces"]:
6469 scaling_info["vdu"][-1]["interface"].append(
6470 {
6471 "name": interface["name"],
6472 "ip_address": interface["ip-address"],
6473 "mac_address": interface.get("mac-address"),
6474 }
6475 )
6476 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6477
6478 # PRE-SCALE BEGIN
6479 step = "Executing pre-scale vnf-config-primitive"
6480 if scaling_descriptor.get("scaling-config-action"):
6481 for scaling_config_action in scaling_descriptor[
6482 "scaling-config-action"
6483 ]:
6484 if (
6485 scaling_config_action.get("trigger") == "pre-scale-in"
6486 and scaling_type == "SCALE_IN"
6487 ) or (
6488 scaling_config_action.get("trigger") == "pre-scale-out"
6489 and scaling_type == "SCALE_OUT"
6490 ):
6491 vnf_config_primitive = scaling_config_action[
6492 "vnf-config-primitive-name-ref"
6493 ]
6494 step = db_nslcmop_update[
6495 "detailed-status"
6496 ] = "executing pre-scale scaling-config-action '{}'".format(
6497 vnf_config_primitive
6498 )
6499
6500 # look for primitive
6501 for config_primitive in (
6502 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6503 ).get("config-primitive", ()):
6504 if config_primitive["name"] == vnf_config_primitive:
6505 break
6506 else:
6507 raise LcmException(
6508 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6509 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6510 "primitive".format(scaling_group, vnf_config_primitive)
6511 )
6512
6513 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6514 if db_vnfr.get("additionalParamsForVnf"):
6515 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6516
6517 scale_process = "VCA"
6518 db_nsr_update["config-status"] = "configuring pre-scaling"
6519 primitive_params = self._map_primitive_params(
6520 config_primitive, {}, vnfr_params
6521 )
6522
6523 # Pre-scale retry check: Check if this sub-operation has been executed before
6524 op_index = self._check_or_add_scale_suboperation(
6525 db_nslcmop,
6526 vnf_index,
6527 vnf_config_primitive,
6528 primitive_params,
6529 "PRE-SCALE",
6530 )
6531 if op_index == self.SUBOPERATION_STATUS_SKIP:
6532 # Skip sub-operation
6533 result = "COMPLETED"
6534 result_detail = "Done"
6535 self.logger.debug(
6536 logging_text
6537 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6538 vnf_config_primitive, result, result_detail
6539 )
6540 )
6541 else:
6542 if op_index == self.SUBOPERATION_STATUS_NEW:
6543 # New sub-operation: Get index of this sub-operation
6544 op_index = (
6545 len(db_nslcmop.get("_admin", {}).get("operations"))
6546 - 1
6547 )
6548 self.logger.debug(
6549 logging_text
6550 + "vnf_config_primitive={} New sub-operation".format(
6551 vnf_config_primitive
6552 )
6553 )
6554 else:
6555 # retry: Get registered params for this existing sub-operation
6556 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6557 op_index
6558 ]
6559 vnf_index = op.get("member_vnf_index")
6560 vnf_config_primitive = op.get("primitive")
6561 primitive_params = op.get("primitive_params")
6562 self.logger.debug(
6563 logging_text
6564 + "vnf_config_primitive={} Sub-operation retry".format(
6565 vnf_config_primitive
6566 )
6567 )
6568 # Execute the primitive, either with new (first-time) or registered (reintent) args
6569 ee_descriptor_id = config_primitive.get(
6570 "execution-environment-ref"
6571 )
6572 primitive_name = config_primitive.get(
6573 "execution-environment-primitive", vnf_config_primitive
6574 )
6575 ee_id, vca_type = self._look_for_deployed_vca(
6576 nsr_deployed["VCA"],
6577 member_vnf_index=vnf_index,
6578 vdu_id=None,
6579 vdu_count_index=None,
6580 ee_descriptor_id=ee_descriptor_id,
6581 )
6582 result, result_detail = await self._ns_execute_primitive(
6583 ee_id,
6584 primitive_name,
6585 primitive_params,
6586 vca_type=vca_type,
6587 vca_id=vca_id,
6588 )
6589 self.logger.debug(
6590 logging_text
6591 + "vnf_config_primitive={} Done with result {} {}".format(
6592 vnf_config_primitive, result, result_detail
6593 )
6594 )
6595 # Update operationState = COMPLETED | FAILED
6596 self._update_suboperation_status(
6597 db_nslcmop, op_index, result, result_detail
6598 )
6599
6600 if result == "FAILED":
6601 raise LcmException(result_detail)
6602 db_nsr_update["config-status"] = old_config_status
6603 scale_process = None
6604 # PRE-SCALE END
6605
6606 db_nsr_update[
6607 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6608 ] = nb_scale_op
6609 db_nsr_update[
6610 "_admin.scaling-group.{}.time".format(admin_scale_index)
6611 ] = time()
6612
6613 # SCALE-IN VCA - BEGIN
6614 if vca_scaling_info:
6615 step = db_nslcmop_update[
6616 "detailed-status"
6617 ] = "Deleting the execution environments"
6618 scale_process = "VCA"
6619 for vca_info in vca_scaling_info:
6620 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6621 member_vnf_index = str(vca_info["member-vnf-index"])
6622 self.logger.debug(
6623 logging_text + "vdu info: {}".format(vca_info)
6624 )
6625 if vca_info.get("osm_vdu_id"):
6626 vdu_id = vca_info["osm_vdu_id"]
6627 vdu_index = int(vca_info["vdu_index"])
6628 stage[
6629 1
6630 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6631 member_vnf_index, vdu_id, vdu_index
6632 )
6633 stage[2] = step = "Scaling in VCA"
6634 self._write_op_status(op_id=nslcmop_id, stage=stage)
6635 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6636 config_update = db_nsr["configurationStatus"]
6637 for vca_index, vca in enumerate(vca_update):
6638 if (
6639 (vca or vca.get("ee_id"))
6640 and vca["member-vnf-index"] == member_vnf_index
6641 and vca["vdu_count_index"] == vdu_index
6642 ):
6643 if vca.get("vdu_id"):
6644 config_descriptor = get_configuration(
6645 db_vnfd, vca.get("vdu_id")
6646 )
6647 elif vca.get("kdu_name"):
6648 config_descriptor = get_configuration(
6649 db_vnfd, vca.get("kdu_name")
6650 )
6651 else:
6652 config_descriptor = get_configuration(
6653 db_vnfd, db_vnfd["id"]
6654 )
6655 operation_params = (
6656 db_nslcmop.get("operationParams") or {}
6657 )
6658 exec_terminate_primitives = not operation_params.get(
6659 "skip_terminate_primitives"
6660 ) and vca.get("needed_terminate")
6661 task = asyncio.ensure_future(
6662 asyncio.wait_for(
6663 self.destroy_N2VC(
6664 logging_text,
6665 db_nslcmop,
6666 vca,
6667 config_descriptor,
6668 vca_index,
6669 destroy_ee=True,
6670 exec_primitives=exec_terminate_primitives,
6671 scaling_in=True,
6672 vca_id=vca_id,
6673 ),
6674 timeout=self.timeout_charm_delete,
6675 )
6676 )
6677 tasks_dict_info[task] = "Terminating VCA {}".format(
6678 vca.get("ee_id")
6679 )
6680 del vca_update[vca_index]
6681 del config_update[vca_index]
6682 # wait for pending tasks of terminate primitives
6683 if tasks_dict_info:
6684 self.logger.debug(
6685 logging_text
6686 + "Waiting for tasks {}".format(
6687 list(tasks_dict_info.keys())
6688 )
6689 )
6690 error_list = await self._wait_for_tasks(
6691 logging_text,
6692 tasks_dict_info,
6693 min(
6694 self.timeout_charm_delete, self.timeout_ns_terminate
6695 ),
6696 stage,
6697 nslcmop_id,
6698 )
6699 tasks_dict_info.clear()
6700 if error_list:
6701 raise LcmException("; ".join(error_list))
6702
6703 db_vca_and_config_update = {
6704 "_admin.deployed.VCA": vca_update,
6705 "configurationStatus": config_update,
6706 }
6707 self.update_db_2(
6708 "nsrs", db_nsr["_id"], db_vca_and_config_update
6709 )
6710 scale_process = None
6711 # SCALE-IN VCA - END
6712
6713 # SCALE RO - BEGIN
6714 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6715 scale_process = "RO"
6716 if self.ro_config.get("ng"):
6717 await self._scale_ng_ro(
6718 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6719 )
6720 scaling_info.pop("vdu-create", None)
6721 scaling_info.pop("vdu-delete", None)
6722
6723 scale_process = None
6724 # SCALE RO - END
6725
6726 # SCALE KDU - BEGIN
6727 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6728 scale_process = "KDU"
6729 await self._scale_kdu(
6730 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6731 )
6732 scaling_info.pop("kdu-create", None)
6733 scaling_info.pop("kdu-delete", None)
6734
6735 scale_process = None
6736 # SCALE KDU - END
6737
6738 if db_nsr_update:
6739 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6740
6741 # SCALE-UP VCA - BEGIN
6742 if vca_scaling_info:
6743 step = db_nslcmop_update[
6744 "detailed-status"
6745 ] = "Creating new execution environments"
6746 scale_process = "VCA"
6747 for vca_info in vca_scaling_info:
6748 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6749 member_vnf_index = str(vca_info["member-vnf-index"])
6750 self.logger.debug(
6751 logging_text + "vdu info: {}".format(vca_info)
6752 )
6753 vnfd_id = db_vnfr["vnfd-ref"]
6754 if vca_info.get("osm_vdu_id"):
6755 vdu_index = int(vca_info["vdu_index"])
6756 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6757 if db_vnfr.get("additionalParamsForVnf"):
6758 deploy_params.update(
6759 parse_yaml_strings(
6760 db_vnfr["additionalParamsForVnf"].copy()
6761 )
6762 )
6763 descriptor_config = get_configuration(
6764 db_vnfd, db_vnfd["id"]
6765 )
6766 if descriptor_config:
6767 vdu_id = None
6768 vdu_name = None
6769 kdu_name = None
6770 self._deploy_n2vc(
6771 logging_text=logging_text
6772 + "member_vnf_index={} ".format(member_vnf_index),
6773 db_nsr=db_nsr,
6774 db_vnfr=db_vnfr,
6775 nslcmop_id=nslcmop_id,
6776 nsr_id=nsr_id,
6777 nsi_id=nsi_id,
6778 vnfd_id=vnfd_id,
6779 vdu_id=vdu_id,
6780 kdu_name=kdu_name,
6781 member_vnf_index=member_vnf_index,
6782 vdu_index=vdu_index,
6783 vdu_name=vdu_name,
6784 deploy_params=deploy_params,
6785 descriptor_config=descriptor_config,
6786 base_folder=base_folder,
6787 task_instantiation_info=tasks_dict_info,
6788 stage=stage,
6789 )
6790 vdu_id = vca_info["osm_vdu_id"]
6791 vdur = find_in_list(
6792 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6793 )
6794 descriptor_config = get_configuration(db_vnfd, vdu_id)
6795 if vdur.get("additionalParams"):
6796 deploy_params_vdu = parse_yaml_strings(
6797 vdur["additionalParams"]
6798 )
6799 else:
6800 deploy_params_vdu = deploy_params
6801 deploy_params_vdu["OSM"] = get_osm_params(
6802 db_vnfr, vdu_id, vdu_count_index=vdu_index
6803 )
6804 if descriptor_config:
6805 vdu_name = None
6806 kdu_name = None
6807 stage[
6808 1
6809 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6810 member_vnf_index, vdu_id, vdu_index
6811 )
6812 stage[2] = step = "Scaling out VCA"
6813 self._write_op_status(op_id=nslcmop_id, stage=stage)
6814 self._deploy_n2vc(
6815 logging_text=logging_text
6816 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6817 member_vnf_index, vdu_id, vdu_index
6818 ),
6819 db_nsr=db_nsr,
6820 db_vnfr=db_vnfr,
6821 nslcmop_id=nslcmop_id,
6822 nsr_id=nsr_id,
6823 nsi_id=nsi_id,
6824 vnfd_id=vnfd_id,
6825 vdu_id=vdu_id,
6826 kdu_name=kdu_name,
6827 member_vnf_index=member_vnf_index,
6828 vdu_index=vdu_index,
6829 vdu_name=vdu_name,
6830 deploy_params=deploy_params_vdu,
6831 descriptor_config=descriptor_config,
6832 base_folder=base_folder,
6833 task_instantiation_info=tasks_dict_info,
6834 stage=stage,
6835 )
6836 # SCALE-UP VCA - END
6837 scale_process = None
6838
6839 # POST-SCALE BEGIN
6840 # execute primitive service POST-SCALING
6841 step = "Executing post-scale vnf-config-primitive"
6842 if scaling_descriptor.get("scaling-config-action"):
6843 for scaling_config_action in scaling_descriptor[
6844 "scaling-config-action"
6845 ]:
6846 if (
6847 scaling_config_action.get("trigger") == "post-scale-in"
6848 and scaling_type == "SCALE_IN"
6849 ) or (
6850 scaling_config_action.get("trigger") == "post-scale-out"
6851 and scaling_type == "SCALE_OUT"
6852 ):
6853 vnf_config_primitive = scaling_config_action[
6854 "vnf-config-primitive-name-ref"
6855 ]
6856 step = db_nslcmop_update[
6857 "detailed-status"
6858 ] = "executing post-scale scaling-config-action '{}'".format(
6859 vnf_config_primitive
6860 )
6861
6862 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6863 if db_vnfr.get("additionalParamsForVnf"):
6864 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6865
6866 # look for primitive
6867 for config_primitive in (
6868 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6869 ).get("config-primitive", ()):
6870 if config_primitive["name"] == vnf_config_primitive:
6871 break
6872 else:
6873 raise LcmException(
6874 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6875 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6876 "config-primitive".format(
6877 scaling_group, vnf_config_primitive
6878 )
6879 )
6880 scale_process = "VCA"
6881 db_nsr_update["config-status"] = "configuring post-scaling"
6882 primitive_params = self._map_primitive_params(
6883 config_primitive, {}, vnfr_params
6884 )
6885
6886 # Post-scale retry check: Check if this sub-operation has been executed before
6887 op_index = self._check_or_add_scale_suboperation(
6888 db_nslcmop,
6889 vnf_index,
6890 vnf_config_primitive,
6891 primitive_params,
6892 "POST-SCALE",
6893 )
6894 if op_index == self.SUBOPERATION_STATUS_SKIP:
6895 # Skip sub-operation
6896 result = "COMPLETED"
6897 result_detail = "Done"
6898 self.logger.debug(
6899 logging_text
6900 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6901 vnf_config_primitive, result, result_detail
6902 )
6903 )
6904 else:
6905 if op_index == self.SUBOPERATION_STATUS_NEW:
6906 # New sub-operation: Get index of this sub-operation
6907 op_index = (
6908 len(db_nslcmop.get("_admin", {}).get("operations"))
6909 - 1
6910 )
6911 self.logger.debug(
6912 logging_text
6913 + "vnf_config_primitive={} New sub-operation".format(
6914 vnf_config_primitive
6915 )
6916 )
6917 else:
6918 # retry: Get registered params for this existing sub-operation
6919 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6920 op_index
6921 ]
6922 vnf_index = op.get("member_vnf_index")
6923 vnf_config_primitive = op.get("primitive")
6924 primitive_params = op.get("primitive_params")
6925 self.logger.debug(
6926 logging_text
6927 + "vnf_config_primitive={} Sub-operation retry".format(
6928 vnf_config_primitive
6929 )
6930 )
6931 # Execute the primitive, either with new (first-time) or registered (reintent) args
6932 ee_descriptor_id = config_primitive.get(
6933 "execution-environment-ref"
6934 )
6935 primitive_name = config_primitive.get(
6936 "execution-environment-primitive", vnf_config_primitive
6937 )
6938 ee_id, vca_type = self._look_for_deployed_vca(
6939 nsr_deployed["VCA"],
6940 member_vnf_index=vnf_index,
6941 vdu_id=None,
6942 vdu_count_index=None,
6943 ee_descriptor_id=ee_descriptor_id,
6944 )
6945 result, result_detail = await self._ns_execute_primitive(
6946 ee_id,
6947 primitive_name,
6948 primitive_params,
6949 vca_type=vca_type,
6950 vca_id=vca_id,
6951 )
6952 self.logger.debug(
6953 logging_text
6954 + "vnf_config_primitive={} Done with result {} {}".format(
6955 vnf_config_primitive, result, result_detail
6956 )
6957 )
6958 # Update operationState = COMPLETED | FAILED
6959 self._update_suboperation_status(
6960 db_nslcmop, op_index, result, result_detail
6961 )
6962
6963 if result == "FAILED":
6964 raise LcmException(result_detail)
6965 db_nsr_update["config-status"] = old_config_status
6966 scale_process = None
6967 # POST-SCALE END
6968
6969 db_nsr_update[
6970 "detailed-status"
6971 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6972 db_nsr_update["operational-status"] = (
6973 "running"
6974 if old_operational_status == "failed"
6975 else old_operational_status
6976 )
6977 db_nsr_update["config-status"] = old_config_status
6978 return
6979 except (
6980 ROclient.ROClientException,
6981 DbException,
6982 LcmException,
6983 NgRoException,
6984 ) as e:
6985 self.logger.error(logging_text + "Exit Exception {}".format(e))
6986 exc = e
6987 except asyncio.CancelledError:
6988 self.logger.error(
6989 logging_text + "Cancelled Exception while '{}'".format(step)
6990 )
6991 exc = "Operation was cancelled"
6992 except Exception as e:
6993 exc = traceback.format_exc()
6994 self.logger.critical(
6995 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6996 exc_info=True,
6997 )
6998 finally:
6999 self._write_ns_status(
7000 nsr_id=nsr_id,
7001 ns_state=None,
7002 current_operation="IDLE",
7003 current_operation_id=None,
7004 )
7005 if tasks_dict_info:
7006 stage[1] = "Waiting for instantiate pending tasks."
7007 self.logger.debug(logging_text + stage[1])
7008 exc = await self._wait_for_tasks(
7009 logging_text,
7010 tasks_dict_info,
7011 self.timeout_ns_deploy,
7012 stage,
7013 nslcmop_id,
7014 nsr_id=nsr_id,
7015 )
7016 if exc:
7017 db_nslcmop_update[
7018 "detailed-status"
7019 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7020 nslcmop_operation_state = "FAILED"
7021 if db_nsr:
7022 db_nsr_update["operational-status"] = old_operational_status
7023 db_nsr_update["config-status"] = old_config_status
7024 db_nsr_update["detailed-status"] = ""
7025 if scale_process:
7026 if "VCA" in scale_process:
7027 db_nsr_update["config-status"] = "failed"
7028 if "RO" in scale_process:
7029 db_nsr_update["operational-status"] = "failed"
7030 db_nsr_update[
7031 "detailed-status"
7032 ] = "FAILED scaling nslcmop={} {}: {}".format(
7033 nslcmop_id, step, exc
7034 )
7035 else:
7036 error_description_nslcmop = None
7037 nslcmop_operation_state = "COMPLETED"
7038 db_nslcmop_update["detailed-status"] = "Done"
7039
7040 self._write_op_status(
7041 op_id=nslcmop_id,
7042 stage="",
7043 error_message=error_description_nslcmop,
7044 operation_state=nslcmop_operation_state,
7045 other_update=db_nslcmop_update,
7046 )
7047 if db_nsr:
7048 self._write_ns_status(
7049 nsr_id=nsr_id,
7050 ns_state=None,
7051 current_operation="IDLE",
7052 current_operation_id=None,
7053 other_update=db_nsr_update,
7054 )
7055
7056 if nslcmop_operation_state:
7057 try:
7058 msg = {
7059 "nsr_id": nsr_id,
7060 "nslcmop_id": nslcmop_id,
7061 "operationState": nslcmop_operation_state,
7062 }
7063 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7064 except Exception as e:
7065 self.logger.error(
7066 logging_text + "kafka_write notification Exception {}".format(e)
7067 )
7068 self.logger.debug(logging_text + "Exit")
7069 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7070
7071 async def _scale_kdu(
7072 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7073 ):
7074 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7075 for kdu_name in _scaling_info:
7076 for kdu_scaling_info in _scaling_info[kdu_name]:
7077 deployed_kdu, index = get_deployed_kdu(
7078 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7079 )
7080 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7081 kdu_instance = deployed_kdu["kdu-instance"]
7082 kdu_model = deployed_kdu.get("kdu-model")
7083 scale = int(kdu_scaling_info["scale"])
7084 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7085
7086 db_dict = {
7087 "collection": "nsrs",
7088 "filter": {"_id": nsr_id},
7089 "path": "_admin.deployed.K8s.{}".format(index),
7090 }
7091
7092 step = "scaling application {}".format(
7093 kdu_scaling_info["resource-name"]
7094 )
7095 self.logger.debug(logging_text + step)
7096
7097 if kdu_scaling_info["type"] == "delete":
7098 kdu_config = get_configuration(db_vnfd, kdu_name)
7099 if (
7100 kdu_config
7101 and kdu_config.get("terminate-config-primitive")
7102 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7103 ):
7104 terminate_config_primitive_list = kdu_config.get(
7105 "terminate-config-primitive"
7106 )
7107 terminate_config_primitive_list.sort(
7108 key=lambda val: int(val["seq"])
7109 )
7110
7111 for (
7112 terminate_config_primitive
7113 ) in terminate_config_primitive_list:
7114 primitive_params_ = self._map_primitive_params(
7115 terminate_config_primitive, {}, {}
7116 )
7117 step = "execute terminate config primitive"
7118 self.logger.debug(logging_text + step)
7119 await asyncio.wait_for(
7120 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7121 cluster_uuid=cluster_uuid,
7122 kdu_instance=kdu_instance,
7123 primitive_name=terminate_config_primitive["name"],
7124 params=primitive_params_,
7125 db_dict=db_dict,
7126 vca_id=vca_id,
7127 ),
7128 timeout=600,
7129 )
7130
7131 await asyncio.wait_for(
7132 self.k8scluster_map[k8s_cluster_type].scale(
7133 kdu_instance,
7134 scale,
7135 kdu_scaling_info["resource-name"],
7136 vca_id=vca_id,
7137 cluster_uuid=cluster_uuid,
7138 kdu_model=kdu_model,
7139 atomic=True,
7140 db_dict=db_dict,
7141 ),
7142 timeout=self.timeout_vca_on_error,
7143 )
7144
7145 if kdu_scaling_info["type"] == "create":
7146 kdu_config = get_configuration(db_vnfd, kdu_name)
7147 if (
7148 kdu_config
7149 and kdu_config.get("initial-config-primitive")
7150 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7151 ):
7152 initial_config_primitive_list = kdu_config.get(
7153 "initial-config-primitive"
7154 )
7155 initial_config_primitive_list.sort(
7156 key=lambda val: int(val["seq"])
7157 )
7158
7159 for initial_config_primitive in initial_config_primitive_list:
7160 primitive_params_ = self._map_primitive_params(
7161 initial_config_primitive, {}, {}
7162 )
7163 step = "execute initial config primitive"
7164 self.logger.debug(logging_text + step)
7165 await asyncio.wait_for(
7166 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7167 cluster_uuid=cluster_uuid,
7168 kdu_instance=kdu_instance,
7169 primitive_name=initial_config_primitive["name"],
7170 params=primitive_params_,
7171 db_dict=db_dict,
7172 vca_id=vca_id,
7173 ),
7174 timeout=600,
7175 )
7176
7177 async def _scale_ng_ro(
7178 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7179 ):
7180 nsr_id = db_nslcmop["nsInstanceId"]
7181 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7182 db_vnfrs = {}
7183
7184 # read from db: vnfd's for every vnf
7185 db_vnfds = []
7186
7187 # for each vnf in ns, read vnfd
7188 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7189 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7190 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7191 # if we haven't this vnfd, read it from db
7192 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7193 # read from db
7194 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7195 db_vnfds.append(vnfd)
7196 n2vc_key = self.n2vc.get_public_key()
7197 n2vc_key_list = [n2vc_key]
7198 self.scale_vnfr(
7199 db_vnfr,
7200 vdu_scaling_info.get("vdu-create"),
7201 vdu_scaling_info.get("vdu-delete"),
7202 mark_delete=True,
7203 )
7204 # db_vnfr has been updated, update db_vnfrs to use it
7205 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7206 await self._instantiate_ng_ro(
7207 logging_text,
7208 nsr_id,
7209 db_nsd,
7210 db_nsr,
7211 db_nslcmop,
7212 db_vnfrs,
7213 db_vnfds,
7214 n2vc_key_list,
7215 stage=stage,
7216 start_deploy=time(),
7217 timeout_ns_deploy=self.timeout_ns_deploy,
7218 )
7219 if vdu_scaling_info.get("vdu-delete"):
7220 self.scale_vnfr(
7221 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7222 )
7223
7224 async def extract_prometheus_scrape_jobs(
7225 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7226 ):
7227 # look if exist a file called 'prometheus*.j2' and
7228 artifact_content = self.fs.dir_ls(artifact_path)
7229 job_file = next(
7230 (
7231 f
7232 for f in artifact_content
7233 if f.startswith("prometheus") and f.endswith(".j2")
7234 ),
7235 None,
7236 )
7237 if not job_file:
7238 return
7239 with self.fs.file_open((artifact_path, job_file), "r") as f:
7240 job_data = f.read()
7241
7242 # TODO get_service
7243 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7244 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7245 host_port = "80"
7246 vnfr_id = vnfr_id.replace("-", "")
7247 variables = {
7248 "JOB_NAME": vnfr_id,
7249 "TARGET_IP": target_ip,
7250 "EXPORTER_POD_IP": host_name,
7251 "EXPORTER_POD_PORT": host_port,
7252 }
7253 job_list = parse_job(job_data, variables)
7254 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7255 for job in job_list:
7256 if (
7257 not isinstance(job.get("job_name"), str)
7258 or vnfr_id not in job["job_name"]
7259 ):
7260 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7261 job["nsr_id"] = nsr_id
7262 job["vnfr_id"] = vnfr_id
7263 return job_list
7264
7265 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7266 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7267 self.logger.info(logging_text + "Enter")
7268 stage = ["Preparing the environment", ""]
7269 # database nsrs record
7270 db_nsr_update = {}
7271 vdu_vim_name = None
7272 vim_vm_id = None
7273 # in case of error, indicates what part of scale was failed to put nsr at error status
7274 start_deploy = time()
7275 try:
7276 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7277 vim_account_id = db_vnfr.get("vim-account-id")
7278 vim_info_key = "vim:" + vim_account_id
7279 vdu_id = additional_param["vdu_id"]
7280 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7281 vdur = find_in_list(
7282 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7283 )
7284 if vdur:
7285 vdu_vim_name = vdur["name"]
7286 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7287 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7288 else:
7289 raise LcmException("Target vdu is not found")
7290 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7291 # wait for any previous tasks in process
7292 stage[1] = "Waiting for previous operations to terminate"
7293 self.logger.info(stage[1])
7294 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7295
7296 stage[1] = "Reading from database."
7297 self.logger.info(stage[1])
7298 self._write_ns_status(
7299 nsr_id=nsr_id,
7300 ns_state=None,
7301 current_operation=operation_type.upper(),
7302 current_operation_id=nslcmop_id
7303 )
7304 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7305
7306 # read from db: ns
7307 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7308 db_nsr_update["operational-status"] = operation_type
7309 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7310 # Payload for RO
7311 desc = {
7312 operation_type: {
7313 "vim_vm_id": vim_vm_id,
7314 "vnf_id": vnf_id,
7315 "vdu_index": additional_param["count-index"],
7316 "vdu_id": vdur["id"],
7317 "target_vim": target_vim,
7318 "vim_account_id": vim_account_id
7319 }
7320 }
7321 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7322 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7323 self.logger.info("ro nsr id: {}".format(nsr_id))
7324 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7325 self.logger.info("response from RO: {}".format(result_dict))
7326 action_id = result_dict["action_id"]
7327 await self._wait_ng_ro(
7328 nsr_id, action_id, nslcmop_id, start_deploy,
7329 self.timeout_operate, None, "start_stop_rebuild",
7330 )
7331 return "COMPLETED", "Done"
7332 except (ROclient.ROClientException, DbException, LcmException) as e:
7333 self.logger.error("Exit Exception {}".format(e))
7334 exc = e
7335 except asyncio.CancelledError:
7336 self.logger.error("Cancelled Exception while '{}'".format(stage))
7337 exc = "Operation was cancelled"
7338 except Exception as e:
7339 exc = traceback.format_exc()
7340 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7341 return "FAILED", "Error in operate VNF {}".format(exc)
7342
7343 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7344 """
7345 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7346
7347 :param: vim_account_id: VIM Account ID
7348
7349 :return: (cloud_name, cloud_credential)
7350 """
7351 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7352 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7353
7354 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7355 """
7356 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7357
7358 :param: vim_account_id: VIM Account ID
7359
7360 :return: (cloud_name, cloud_credential)
7361 """
7362 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7363 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7364
7365 async def migrate(self, nsr_id, nslcmop_id):
7366 """
7367 Migrate VNFs and VDUs instances in a NS
7368
7369 :param: nsr_id: NS Instance ID
7370 :param: nslcmop_id: nslcmop ID of migrate
7371
7372 """
7373 # Try to lock HA task here
7374 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7375 if not task_is_locked_by_me:
7376 return
7377 logging_text = "Task ns={} migrate ".format(nsr_id)
7378 self.logger.debug(logging_text + "Enter")
7379 # get all needed from database
7380 db_nslcmop = None
7381 db_nslcmop_update = {}
7382 nslcmop_operation_state = None
7383 db_nsr_update = {}
7384 target = {}
7385 exc = None
7386 # in case of error, indicates what part of scale was failed to put nsr at error status
7387 start_deploy = time()
7388
7389 try:
7390 # wait for any previous tasks in process
7391 step = "Waiting for previous operations to terminate"
7392 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7393
7394 self._write_ns_status(
7395 nsr_id=nsr_id,
7396 ns_state=None,
7397 current_operation="MIGRATING",
7398 current_operation_id=nslcmop_id,
7399 )
7400 step = "Getting nslcmop from database"
7401 self.logger.debug(
7402 step + " after having waited for previous tasks to be completed"
7403 )
7404 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7405 migrate_params = db_nslcmop.get("operationParams")
7406
7407 target = {}
7408 target.update(migrate_params)
7409 desc = await self.RO.migrate(nsr_id, target)
7410 self.logger.debug("RO return > {}".format(desc))
7411 action_id = desc["action_id"]
7412 await self._wait_ng_ro(
7413 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7414 operation="migrate"
7415 )
7416 except (ROclient.ROClientException, DbException, LcmException) as e:
7417 self.logger.error("Exit Exception {}".format(e))
7418 exc = e
7419 except asyncio.CancelledError:
7420 self.logger.error("Cancelled Exception while '{}'".format(step))
7421 exc = "Operation was cancelled"
7422 except Exception as e:
7423 exc = traceback.format_exc()
7424 self.logger.critical(
7425 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7426 )
7427 finally:
7428 self._write_ns_status(
7429 nsr_id=nsr_id,
7430 ns_state=None,
7431 current_operation="IDLE",
7432 current_operation_id=None,
7433 )
7434 if exc:
7435 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7436 nslcmop_operation_state = "FAILED"
7437 else:
7438 nslcmop_operation_state = "COMPLETED"
7439 db_nslcmop_update["detailed-status"] = "Done"
7440 db_nsr_update["detailed-status"] = "Done"
7441
7442 self._write_op_status(
7443 op_id=nslcmop_id,
7444 stage="",
7445 error_message="",
7446 operation_state=nslcmop_operation_state,
7447 other_update=db_nslcmop_update,
7448 )
7449 if nslcmop_operation_state:
7450 try:
7451 msg = {
7452 "nsr_id": nsr_id,
7453 "nslcmop_id": nslcmop_id,
7454 "operationState": nslcmop_operation_state,
7455 }
7456 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7457 except Exception as e:
7458 self.logger.error(
7459 logging_text + "kafka_write notification Exception {}".format(e)
7460 )
7461 self.logger.debug(logging_text + "Exit")
7462 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7463
7464
7465 async def heal(self, nsr_id, nslcmop_id):
7466 """
7467 Heal NS
7468
7469 :param nsr_id: ns instance to heal
7470 :param nslcmop_id: operation to run
7471 :return:
7472 """
7473
7474 # Try to lock HA task here
7475 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7476 if not task_is_locked_by_me:
7477 return
7478
7479 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7480 stage = ["", "", ""]
7481 tasks_dict_info = {}
7482 # ^ stage, step, VIM progress
7483 self.logger.debug(logging_text + "Enter")
7484 # get all needed from database
7485 db_nsr = None
7486 db_nslcmop_update = {}
7487 db_nsr_update = {}
7488 db_vnfrs = {} # vnf's info indexed by _id
7489 exc = None
7490 old_operational_status = ""
7491 old_config_status = ""
7492 nsi_id = None
7493 try:
7494 # wait for any previous tasks in process
7495 step = "Waiting for previous operations to terminate"
7496 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7497 self._write_ns_status(
7498 nsr_id=nsr_id,
7499 ns_state=None,
7500 current_operation="HEALING",
7501 current_operation_id=nslcmop_id,
7502 )
7503
7504 step = "Getting nslcmop from database"
7505 self.logger.debug(
7506 step + " after having waited for previous tasks to be completed"
7507 )
7508 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7509
7510 step = "Getting nsr from database"
7511 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7512 old_operational_status = db_nsr["operational-status"]
7513 old_config_status = db_nsr["config-status"]
7514
7515 db_nsr_update = {
7516 "_admin.deployed.RO.operational-status": "healing",
7517 }
7518 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7519
7520 step = "Sending heal order to VIM"
7521 task_ro = asyncio.ensure_future(
7522 self.heal_RO(
7523 logging_text=logging_text,
7524 nsr_id=nsr_id,
7525 db_nslcmop=db_nslcmop,
7526 stage=stage,
7527 )
7528 )
7529 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7530 tasks_dict_info[task_ro] = "Healing at VIM"
7531
7532 # VCA tasks
7533 # read from db: nsd
7534 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7535 self.logger.debug(logging_text + stage[1])
7536 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7537 self.fs.sync(db_nsr["nsd-id"])
7538 db_nsr["nsd"] = nsd
7539 # read from db: vnfr's of this ns
7540 step = "Getting vnfrs from db"
7541 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7542 for vnfr in db_vnfrs_list:
7543 db_vnfrs[vnfr["_id"]] = vnfr
7544 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7545
7546 # Check for each target VNF
7547 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7548 for target_vnf in target_list:
7549 # Find this VNF in the list from DB
7550 vnfr_id = target_vnf.get("vnfInstanceId", None)
7551 if vnfr_id:
7552 db_vnfr = db_vnfrs[vnfr_id]
7553 vnfd_id = db_vnfr.get("vnfd-id")
7554 vnfd_ref = db_vnfr.get("vnfd-ref")
7555 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7556 base_folder = vnfd["_admin"]["storage"]
7557 vdu_id = None
7558 vdu_index = 0
7559 vdu_name = None
7560 kdu_name = None
7561 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7562 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7563
7564 # Check each target VDU and deploy N2VC
7565 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7566 deploy_params_vdu = target_vdu
7567 # Set run-day1 vnf level value if not vdu level value exists
7568 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7569 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7570 vdu_name = target_vdu.get("vdu-id", None)
7571 # TODO: Get vdu_id from vdud.
7572 vdu_id = vdu_name
7573 # For multi instance VDU count-index is mandatory
7574 # For single session VDU count-indes is 0
7575 vdu_index = target_vdu.get("count-index",0)
7576
7577 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7578 stage[1] = "Deploying Execution Environments."
7579 self.logger.debug(logging_text + stage[1])
7580
7581 # VNF Level charm. Normal case when proxy charms.
7582 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7583 descriptor_config = get_configuration(vnfd, vnfd_ref)
7584 if descriptor_config:
7585 # Continue if healed machine is management machine
7586 vnf_ip_address = db_vnfr.get("ip-address")
7587 target_instance = None
7588 for instance in db_vnfr.get("vdur", None):
7589 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7590 target_instance = instance
7591 break
7592 if vnf_ip_address == target_instance.get("ip-address"):
7593 self._heal_n2vc(
7594 logging_text=logging_text
7595 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7596 member_vnf_index, vdu_name, vdu_index
7597 ),
7598 db_nsr=db_nsr,
7599 db_vnfr=db_vnfr,
7600 nslcmop_id=nslcmop_id,
7601 nsr_id=nsr_id,
7602 nsi_id=nsi_id,
7603 vnfd_id=vnfd_ref,
7604 vdu_id=None,
7605 kdu_name=None,
7606 member_vnf_index=member_vnf_index,
7607 vdu_index=0,
7608 vdu_name=None,
7609 deploy_params=deploy_params_vdu,
7610 descriptor_config=descriptor_config,
7611 base_folder=base_folder,
7612 task_instantiation_info=tasks_dict_info,
7613 stage=stage,
7614 )
7615
7616 # VDU Level charm. Normal case with native charms.
7617 descriptor_config = get_configuration(vnfd, vdu_name)
7618 if descriptor_config:
7619 self._heal_n2vc(
7620 logging_text=logging_text
7621 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7622 member_vnf_index, vdu_name, vdu_index
7623 ),
7624 db_nsr=db_nsr,
7625 db_vnfr=db_vnfr,
7626 nslcmop_id=nslcmop_id,
7627 nsr_id=nsr_id,
7628 nsi_id=nsi_id,
7629 vnfd_id=vnfd_ref,
7630 vdu_id=vdu_id,
7631 kdu_name=kdu_name,
7632 member_vnf_index=member_vnf_index,
7633 vdu_index=vdu_index,
7634 vdu_name=vdu_name,
7635 deploy_params=deploy_params_vdu,
7636 descriptor_config=descriptor_config,
7637 base_folder=base_folder,
7638 task_instantiation_info=tasks_dict_info,
7639 stage=stage,
7640 )
7641
7642 except (
7643 ROclient.ROClientException,
7644 DbException,
7645 LcmException,
7646 NgRoException,
7647 ) as e:
7648 self.logger.error(logging_text + "Exit Exception {}".format(e))
7649 exc = e
7650 except asyncio.CancelledError:
7651 self.logger.error(
7652 logging_text + "Cancelled Exception while '{}'".format(step)
7653 )
7654 exc = "Operation was cancelled"
7655 except Exception as e:
7656 exc = traceback.format_exc()
7657 self.logger.critical(
7658 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7659 exc_info=True,
7660 )
7661 finally:
7662 if tasks_dict_info:
7663 stage[1] = "Waiting for healing pending tasks."
7664 self.logger.debug(logging_text + stage[1])
7665 exc = await self._wait_for_tasks(
7666 logging_text,
7667 tasks_dict_info,
7668 self.timeout_ns_deploy,
7669 stage,
7670 nslcmop_id,
7671 nsr_id=nsr_id,
7672 )
7673 if exc:
7674 db_nslcmop_update[
7675 "detailed-status"
7676 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7677 nslcmop_operation_state = "FAILED"
7678 if db_nsr:
7679 db_nsr_update["operational-status"] = old_operational_status
7680 db_nsr_update["config-status"] = old_config_status
7681 db_nsr_update[
7682 "detailed-status"
7683 ] = "FAILED healing nslcmop={} {}: {}".format(
7684 nslcmop_id, step, exc
7685 )
7686 for task, task_name in tasks_dict_info.items():
7687 if not task.done() or task.cancelled() or task.exception():
7688 if task_name.startswith(self.task_name_deploy_vca):
7689 # A N2VC task is pending
7690 db_nsr_update["config-status"] = "failed"
7691 else:
7692 # RO task is pending
7693 db_nsr_update["operational-status"] = "failed"
7694 else:
7695 error_description_nslcmop = None
7696 nslcmop_operation_state = "COMPLETED"
7697 db_nslcmop_update["detailed-status"] = "Done"
7698 db_nsr_update["detailed-status"] = "Done"
7699 db_nsr_update["operational-status"] = "running"
7700 db_nsr_update["config-status"] = "configured"
7701
7702 self._write_op_status(
7703 op_id=nslcmop_id,
7704 stage="",
7705 error_message=error_description_nslcmop,
7706 operation_state=nslcmop_operation_state,
7707 other_update=db_nslcmop_update,
7708 )
7709 if db_nsr:
7710 self._write_ns_status(
7711 nsr_id=nsr_id,
7712 ns_state=None,
7713 current_operation="IDLE",
7714 current_operation_id=None,
7715 other_update=db_nsr_update,
7716 )
7717
7718 if nslcmop_operation_state:
7719 try:
7720 msg = {
7721 "nsr_id": nsr_id,
7722 "nslcmop_id": nslcmop_id,
7723 "operationState": nslcmop_operation_state,
7724 }
7725 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7726 except Exception as e:
7727 self.logger.error(
7728 logging_text + "kafka_write notification Exception {}".format(e)
7729 )
7730 self.logger.debug(logging_text + "Exit")
7731 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7732
7733 async def heal_RO(
7734 self,
7735 logging_text,
7736 nsr_id,
7737 db_nslcmop,
7738 stage,
7739 ):
7740 """
7741 Heal at RO
7742 :param logging_text: preffix text to use at logging
7743 :param nsr_id: nsr identity
7744 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7745 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7746 :return: None or exception
7747 """
7748 def get_vim_account(vim_account_id):
7749 nonlocal db_vims
7750 if vim_account_id in db_vims:
7751 return db_vims[vim_account_id]
7752 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7753 db_vims[vim_account_id] = db_vim
7754 return db_vim
7755
7756 try:
7757 start_heal = time()
7758 ns_params = db_nslcmop.get("operationParams")
7759 if ns_params and ns_params.get("timeout_ns_heal"):
7760 timeout_ns_heal = ns_params["timeout_ns_heal"]
7761 else:
7762 timeout_ns_heal = self.timeout.get(
7763 "ns_heal", self.timeout_ns_heal
7764 )
7765
7766 db_vims = {}
7767
7768 nslcmop_id = db_nslcmop["_id"]
7769 target = {
7770 "action_id": nslcmop_id,
7771 }
7772 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7773 target.update(db_nslcmop.get("operationParams", {}))
7774
7775 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7776 desc = await self.RO.recreate(nsr_id, target)
7777 self.logger.debug("RO return > {}".format(desc))
7778 action_id = desc["action_id"]
7779 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7780 await self._wait_ng_ro(
7781 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7782 operation="healing"
7783 )
7784
7785 # Updating NSR
7786 db_nsr_update = {
7787 "_admin.deployed.RO.operational-status": "running",
7788 "detailed-status": " ".join(stage),
7789 }
7790 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7791 self._write_op_status(nslcmop_id, stage)
7792 self.logger.debug(
7793 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7794 )
7795
7796 except Exception as e:
7797 stage[2] = "ERROR healing at VIM"
7798 #self.set_vnfr_at_error(db_vnfrs, str(e))
7799 self.logger.error(
7800 "Error healing at VIM {}".format(e),
7801 exc_info=not isinstance(
7802 e,
7803 (
7804 ROclient.ROClientException,
7805 LcmException,
7806 DbException,
7807 NgRoException,
7808 ),
7809 ),
7810 )
7811 raise
7812
7813 def _heal_n2vc(
7814 self,
7815 logging_text,
7816 db_nsr,
7817 db_vnfr,
7818 nslcmop_id,
7819 nsr_id,
7820 nsi_id,
7821 vnfd_id,
7822 vdu_id,
7823 kdu_name,
7824 member_vnf_index,
7825 vdu_index,
7826 vdu_name,
7827 deploy_params,
7828 descriptor_config,
7829 base_folder,
7830 task_instantiation_info,
7831 stage,
7832 ):
7833 # launch instantiate_N2VC in a asyncio task and register task object
7834 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7835 # if not found, create one entry and update database
7836 # fill db_nsr._admin.deployed.VCA.<index>
7837
7838 self.logger.debug(
7839 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7840 )
7841 if "execution-environment-list" in descriptor_config:
7842 ee_list = descriptor_config.get("execution-environment-list", [])
7843 elif "juju" in descriptor_config:
7844 ee_list = [descriptor_config] # ns charms
7845 else: # other types as script are not supported
7846 ee_list = []
7847
7848 for ee_item in ee_list:
7849 self.logger.debug(
7850 logging_text
7851 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7852 ee_item.get("juju"), ee_item.get("helm-chart")
7853 )
7854 )
7855 ee_descriptor_id = ee_item.get("id")
7856 if ee_item.get("juju"):
7857 vca_name = ee_item["juju"].get("charm")
7858 vca_type = (
7859 "lxc_proxy_charm"
7860 if ee_item["juju"].get("charm") is not None
7861 else "native_charm"
7862 )
7863 if ee_item["juju"].get("cloud") == "k8s":
7864 vca_type = "k8s_proxy_charm"
7865 elif ee_item["juju"].get("proxy") is False:
7866 vca_type = "native_charm"
7867 elif ee_item.get("helm-chart"):
7868 vca_name = ee_item["helm-chart"]
7869 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7870 vca_type = "helm"
7871 else:
7872 vca_type = "helm-v3"
7873 else:
7874 self.logger.debug(
7875 logging_text + "skipping non juju neither charm configuration"
7876 )
7877 continue
7878
7879 vca_index = -1
7880 for vca_index, vca_deployed in enumerate(
7881 db_nsr["_admin"]["deployed"]["VCA"]
7882 ):
7883 if not vca_deployed:
7884 continue
7885 if (
7886 vca_deployed.get("member-vnf-index") == member_vnf_index
7887 and vca_deployed.get("vdu_id") == vdu_id
7888 and vca_deployed.get("kdu_name") == kdu_name
7889 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7890 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7891 ):
7892 break
7893 else:
7894 # not found, create one.
7895 target = (
7896 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7897 )
7898 if vdu_id:
7899 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7900 elif kdu_name:
7901 target += "/kdu/{}".format(kdu_name)
7902 vca_deployed = {
7903 "target_element": target,
7904 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7905 "member-vnf-index": member_vnf_index,
7906 "vdu_id": vdu_id,
7907 "kdu_name": kdu_name,
7908 "vdu_count_index": vdu_index,
7909 "operational-status": "init", # TODO revise
7910 "detailed-status": "", # TODO revise
7911 "step": "initial-deploy", # TODO revise
7912 "vnfd_id": vnfd_id,
7913 "vdu_name": vdu_name,
7914 "type": vca_type,
7915 "ee_descriptor_id": ee_descriptor_id,
7916 }
7917 vca_index += 1
7918
7919 # create VCA and configurationStatus in db
7920 db_dict = {
7921 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7922 "configurationStatus.{}".format(vca_index): dict(),
7923 }
7924 self.update_db_2("nsrs", nsr_id, db_dict)
7925
7926 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7927
7928 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7929 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7930 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7931
7932 # Launch task
7933 task_n2vc = asyncio.ensure_future(
7934 self.heal_N2VC(
7935 logging_text=logging_text,
7936 vca_index=vca_index,
7937 nsi_id=nsi_id,
7938 db_nsr=db_nsr,
7939 db_vnfr=db_vnfr,
7940 vdu_id=vdu_id,
7941 kdu_name=kdu_name,
7942 vdu_index=vdu_index,
7943 deploy_params=deploy_params,
7944 config_descriptor=descriptor_config,
7945 base_folder=base_folder,
7946 nslcmop_id=nslcmop_id,
7947 stage=stage,
7948 vca_type=vca_type,
7949 vca_name=vca_name,
7950 ee_config_descriptor=ee_item,
7951 )
7952 )
7953 self.lcm_tasks.register(
7954 "ns",
7955 nsr_id,
7956 nslcmop_id,
7957 "instantiate_N2VC-{}".format(vca_index),
7958 task_n2vc,
7959 )
7960 task_instantiation_info[
7961 task_n2vc
7962 ] = self.task_name_deploy_vca + " {}.{}".format(
7963 member_vnf_index or "", vdu_id or ""
7964 )
7965
7966 async def heal_N2VC(
7967 self,
7968 logging_text,
7969 vca_index,
7970 nsi_id,
7971 db_nsr,
7972 db_vnfr,
7973 vdu_id,
7974 kdu_name,
7975 vdu_index,
7976 config_descriptor,
7977 deploy_params,
7978 base_folder,
7979 nslcmop_id,
7980 stage,
7981 vca_type,
7982 vca_name,
7983 ee_config_descriptor,
7984 ):
7985 nsr_id = db_nsr["_id"]
7986 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7987 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7988 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7989 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7990 db_dict = {
7991 "collection": "nsrs",
7992 "filter": {"_id": nsr_id},
7993 "path": db_update_entry,
7994 }
7995 step = ""
7996 try:
7997
7998 element_type = "NS"
7999 element_under_configuration = nsr_id
8000
8001 vnfr_id = None
8002 if db_vnfr:
8003 vnfr_id = db_vnfr["_id"]
8004 osm_config["osm"]["vnf_id"] = vnfr_id
8005
8006 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8007
8008 if vca_type == "native_charm":
8009 index_number = 0
8010 else:
8011 index_number = vdu_index or 0
8012
8013 if vnfr_id:
8014 element_type = "VNF"
8015 element_under_configuration = vnfr_id
8016 namespace += ".{}-{}".format(vnfr_id, index_number)
8017 if vdu_id:
8018 namespace += ".{}-{}".format(vdu_id, index_number)
8019 element_type = "VDU"
8020 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8021 osm_config["osm"]["vdu_id"] = vdu_id
8022 elif kdu_name:
8023 namespace += ".{}".format(kdu_name)
8024 element_type = "KDU"
8025 element_under_configuration = kdu_name
8026 osm_config["osm"]["kdu_name"] = kdu_name
8027
8028 # Get artifact path
8029 if base_folder["pkg-dir"]:
8030 artifact_path = "{}/{}/{}/{}".format(
8031 base_folder["folder"],
8032 base_folder["pkg-dir"],
8033 "charms"
8034 if vca_type
8035 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8036 else "helm-charts",
8037 vca_name,
8038 )
8039 else:
8040 artifact_path = "{}/Scripts/{}/{}/".format(
8041 base_folder["folder"],
8042 "charms"
8043 if vca_type
8044 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8045 else "helm-charts",
8046 vca_name,
8047 )
8048
8049 self.logger.debug("Artifact path > {}".format(artifact_path))
8050
8051 # get initial_config_primitive_list that applies to this element
8052 initial_config_primitive_list = config_descriptor.get(
8053 "initial-config-primitive"
8054 )
8055
8056 self.logger.debug(
8057 "Initial config primitive list > {}".format(
8058 initial_config_primitive_list
8059 )
8060 )
8061
8062 # add config if not present for NS charm
8063 ee_descriptor_id = ee_config_descriptor.get("id")
8064 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8065 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8066 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8067 )
8068
8069 self.logger.debug(
8070 "Initial config primitive list #2 > {}".format(
8071 initial_config_primitive_list
8072 )
8073 )
8074 # n2vc_redesign STEP 3.1
8075 # find old ee_id if exists
8076 ee_id = vca_deployed.get("ee_id")
8077
8078 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8079 # create or register execution environment in VCA. Only for native charms when healing
8080 if vca_type == "native_charm":
8081 step = "Waiting to VM being up and getting IP address"
8082 self.logger.debug(logging_text + step)
8083 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8084 logging_text,
8085 nsr_id,
8086 vnfr_id,
8087 vdu_id,
8088 vdu_index,
8089 user=None,
8090 pub_key=None,
8091 )
8092 credentials = {"hostname": rw_mgmt_ip}
8093 # get username
8094 username = deep_get(
8095 config_descriptor, ("config-access", "ssh-access", "default-user")
8096 )
8097 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8098 # merged. Meanwhile let's get username from initial-config-primitive
8099 if not username and initial_config_primitive_list:
8100 for config_primitive in initial_config_primitive_list:
8101 for param in config_primitive.get("parameter", ()):
8102 if param["name"] == "ssh-username":
8103 username = param["value"]
8104 break
8105 if not username:
8106 raise LcmException(
8107 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8108 "'config-access.ssh-access.default-user'"
8109 )
8110 credentials["username"] = username
8111
8112 # n2vc_redesign STEP 3.2
8113 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8114 self._write_configuration_status(
8115 nsr_id=nsr_id,
8116 vca_index=vca_index,
8117 status="REGISTERING",
8118 element_under_configuration=element_under_configuration,
8119 element_type=element_type,
8120 )
8121
8122 step = "register execution environment {}".format(credentials)
8123 self.logger.debug(logging_text + step)
8124 ee_id = await self.vca_map[vca_type].register_execution_environment(
8125 credentials=credentials,
8126 namespace=namespace,
8127 db_dict=db_dict,
8128 vca_id=vca_id,
8129 )
8130
8131 # update ee_id en db
8132 db_dict_ee_id = {
8133 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8134 }
8135 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8136
8137 # for compatibility with MON/POL modules, the need model and application name at database
8138 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8139 # Not sure if this need to be done when healing
8140 """
8141 ee_id_parts = ee_id.split(".")
8142 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8143 if len(ee_id_parts) >= 2:
8144 model_name = ee_id_parts[0]
8145 application_name = ee_id_parts[1]
8146 db_nsr_update[db_update_entry + "model"] = model_name
8147 db_nsr_update[db_update_entry + "application"] = application_name
8148 """
8149
8150 # n2vc_redesign STEP 3.3
8151 # Install configuration software. Only for native charms.
8152 step = "Install configuration Software"
8153
8154 self._write_configuration_status(
8155 nsr_id=nsr_id,
8156 vca_index=vca_index,
8157 status="INSTALLING SW",
8158 element_under_configuration=element_under_configuration,
8159 element_type=element_type,
8160 #other_update=db_nsr_update,
8161 other_update=None,
8162 )
8163
8164 # TODO check if already done
8165 self.logger.debug(logging_text + step)
8166 config = None
8167 if vca_type == "native_charm":
8168 config_primitive = next(
8169 (p for p in initial_config_primitive_list if p["name"] == "config"),
8170 None,
8171 )
8172 if config_primitive:
8173 config = self._map_primitive_params(
8174 config_primitive, {}, deploy_params
8175 )
8176 await self.vca_map[vca_type].install_configuration_sw(
8177 ee_id=ee_id,
8178 artifact_path=artifact_path,
8179 db_dict=db_dict,
8180 config=config,
8181 num_units=1,
8182 vca_id=vca_id,
8183 vca_type=vca_type,
8184 )
8185
8186 # write in db flag of configuration_sw already installed
8187 self.update_db_2(
8188 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8189 )
8190
8191 # Not sure if this need to be done when healing
8192 """
8193 # add relations for this VCA (wait for other peers related with this VCA)
8194 await self._add_vca_relations(
8195 logging_text=logging_text,
8196 nsr_id=nsr_id,
8197 vca_type=vca_type,
8198 vca_index=vca_index,
8199 )
8200 """
8201
8202 # if SSH access is required, then get execution environment SSH public
8203 # if native charm we have waited already to VM be UP
8204 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8205 pub_key = None
8206 user = None
8207 # self.logger.debug("get ssh key block")
8208 if deep_get(
8209 config_descriptor, ("config-access", "ssh-access", "required")
8210 ):
8211 # self.logger.debug("ssh key needed")
8212 # Needed to inject a ssh key
8213 user = deep_get(
8214 config_descriptor,
8215 ("config-access", "ssh-access", "default-user"),
8216 )
8217 step = "Install configuration Software, getting public ssh key"
8218 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8219 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8220 )
8221
8222 step = "Insert public key into VM user={} ssh_key={}".format(
8223 user, pub_key
8224 )
8225 else:
8226 # self.logger.debug("no need to get ssh key")
8227 step = "Waiting to VM being up and getting IP address"
8228 self.logger.debug(logging_text + step)
8229
8230 # n2vc_redesign STEP 5.1
8231 # wait for RO (ip-address) Insert pub_key into VM
8232 # IMPORTANT: We need do wait for RO to complete healing operation.
8233 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8234 if vnfr_id:
8235 if kdu_name:
8236 rw_mgmt_ip = await self.wait_kdu_up(
8237 logging_text, nsr_id, vnfr_id, kdu_name
8238 )
8239 else:
8240 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8241 logging_text,
8242 nsr_id,
8243 vnfr_id,
8244 vdu_id,
8245 vdu_index,
8246 user=user,
8247 pub_key=pub_key,
8248 )
8249 else:
8250 rw_mgmt_ip = None # This is for a NS configuration
8251
8252 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8253
8254 # store rw_mgmt_ip in deploy params for later replacement
8255 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8256
8257 # Day1 operations.
8258 # get run-day1 operation parameter
8259 runDay1 = deploy_params.get("run-day1",False)
8260 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8261 if runDay1:
8262 # n2vc_redesign STEP 6 Execute initial config primitive
8263 step = "execute initial config primitive"
8264
8265 # wait for dependent primitives execution (NS -> VNF -> VDU)
8266 if initial_config_primitive_list:
8267 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8268
8269 # stage, in function of element type: vdu, kdu, vnf or ns
8270 my_vca = vca_deployed_list[vca_index]
8271 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8272 # VDU or KDU
8273 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8274 elif my_vca.get("member-vnf-index"):
8275 # VNF
8276 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8277 else:
8278 # NS
8279 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8280
8281 self._write_configuration_status(
8282 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8283 )
8284
8285 self._write_op_status(op_id=nslcmop_id, stage=stage)
8286
8287 check_if_terminated_needed = True
8288 for initial_config_primitive in initial_config_primitive_list:
8289 # adding information on the vca_deployed if it is a NS execution environment
8290 if not vca_deployed["member-vnf-index"]:
8291 deploy_params["ns_config_info"] = json.dumps(
8292 self._get_ns_config_info(nsr_id)
8293 )
8294 # TODO check if already done
8295 primitive_params_ = self._map_primitive_params(
8296 initial_config_primitive, {}, deploy_params
8297 )
8298
8299 step = "execute primitive '{}' params '{}'".format(
8300 initial_config_primitive["name"], primitive_params_
8301 )
8302 self.logger.debug(logging_text + step)
8303 await self.vca_map[vca_type].exec_primitive(
8304 ee_id=ee_id,
8305 primitive_name=initial_config_primitive["name"],
8306 params_dict=primitive_params_,
8307 db_dict=db_dict,
8308 vca_id=vca_id,
8309 vca_type=vca_type,
8310 )
8311 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8312 if check_if_terminated_needed:
8313 if config_descriptor.get("terminate-config-primitive"):
8314 self.update_db_2(
8315 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8316 )
8317 check_if_terminated_needed = False
8318
8319 # TODO register in database that primitive is done
8320
8321 # STEP 7 Configure metrics
8322 # Not sure if this need to be done when healing
8323 """
8324 if vca_type == "helm" or vca_type == "helm-v3":
8325 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8326 ee_id=ee_id,
8327 artifact_path=artifact_path,
8328 ee_config_descriptor=ee_config_descriptor,
8329 vnfr_id=vnfr_id,
8330 nsr_id=nsr_id,
8331 target_ip=rw_mgmt_ip,
8332 )
8333 if prometheus_jobs:
8334 self.update_db_2(
8335 "nsrs",
8336 nsr_id,
8337 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8338 )
8339
8340 for job in prometheus_jobs:
8341 self.db.set_one(
8342 "prometheus_jobs",
8343 {"job_name": job["job_name"]},
8344 job,
8345 upsert=True,
8346 fail_on_empty=False,
8347 )
8348
8349 """
8350 step = "instantiated at VCA"
8351 self.logger.debug(logging_text + step)
8352
8353 self._write_configuration_status(
8354 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8355 )
8356
8357 except Exception as e: # TODO not use Exception but N2VC exception
8358 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8359 if not isinstance(
8360 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8361 ):
8362 self.logger.error(
8363 "Exception while {} : {}".format(step, e), exc_info=True
8364 )
8365 self._write_configuration_status(
8366 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8367 )
8368 raise LcmException("{} {}".format(step, e)) from e
8369
8370 async def _wait_heal_ro(
8371 self,
8372 nsr_id,
8373 timeout=600,
8374 ):
8375 start_time = time()
8376 while time() <= start_time + timeout:
8377 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8378 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8379 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8380 if operational_status_ro != "healing":
8381 break
8382 await asyncio.sleep(15, loop=self.loop)
8383 else: # timeout_ns_deploy
8384 raise NgRoException("Timeout waiting ns to deploy")
8385
8386 async def vertical_scale(self, nsr_id, nslcmop_id):
8387 """
8388 Vertical Scale the VDUs in a NS
8389
8390 :param: nsr_id: NS Instance ID
8391 :param: nslcmop_id: nslcmop ID of migrate
8392
8393 """
8394 # Try to lock HA task here
8395 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8396 if not task_is_locked_by_me:
8397 return
8398 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8399 self.logger.debug(logging_text + "Enter")
8400 # get all needed from database
8401 db_nslcmop = None
8402 db_nslcmop_update = {}
8403 nslcmop_operation_state = None
8404 db_nsr_update = {}
8405 target = {}
8406 exc = None
8407 # in case of error, indicates what part of scale was failed to put nsr at error status
8408 start_deploy = time()
8409
8410 try:
8411 # wait for any previous tasks in process
8412 step = "Waiting for previous operations to terminate"
8413 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8414
8415 self._write_ns_status(
8416 nsr_id=nsr_id,
8417 ns_state=None,
8418 current_operation="VerticalScale",
8419 current_operation_id=nslcmop_id
8420 )
8421 step = "Getting nslcmop from database"
8422 self.logger.debug(step + " after having waited for previous tasks to be completed")
8423 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8424 operationParams = db_nslcmop.get("operationParams")
8425 target = {}
8426 target.update(operationParams)
8427 desc = await self.RO.vertical_scale(nsr_id, target)
8428 self.logger.debug("RO return > {}".format(desc))
8429 action_id = desc["action_id"]
8430 await self._wait_ng_ro(
8431 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
8432 operation="verticalscale"
8433 )
8434 except (ROclient.ROClientException, DbException, LcmException) as e:
8435 self.logger.error("Exit Exception {}".format(e))
8436 exc = e
8437 except asyncio.CancelledError:
8438 self.logger.error("Cancelled Exception while '{}'".format(step))
8439 exc = "Operation was cancelled"
8440 except Exception as e:
8441 exc = traceback.format_exc()
8442 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8443 finally:
8444 self._write_ns_status(
8445 nsr_id=nsr_id,
8446 ns_state=None,
8447 current_operation="IDLE",
8448 current_operation_id=None,
8449 )
8450 if exc:
8451 db_nslcmop_update[
8452 "detailed-status"
8453 ] = "FAILED {}: {}".format(step, exc)
8454 nslcmop_operation_state = "FAILED"
8455 else:
8456 nslcmop_operation_state = "COMPLETED"
8457 db_nslcmop_update["detailed-status"] = "Done"
8458 db_nsr_update["detailed-status"] = "Done"
8459
8460 self._write_op_status(
8461 op_id=nslcmop_id,
8462 stage="",
8463 error_message="",
8464 operation_state=nslcmop_operation_state,
8465 other_update=db_nslcmop_update,
8466 )
8467 if nslcmop_operation_state:
8468 try:
8469 msg = {
8470 "nsr_id": nsr_id,
8471 "nslcmop_id": nslcmop_id,
8472 "operationState": nslcmop_operation_state,
8473 }
8474 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8475 except Exception as e:
8476 self.logger.error(
8477 logging_text + "kafka_write notification Exception {}".format(e)
8478 )
8479 self.logger.debug(logging_text + "Exit")
8480 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")