Feature 10922: Stop, start and rebuild
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
106 from n2vc.n2vc_juju_conn import N2VCJujuConnector
107 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
108
109 from osm_lcm.lcm_helm_conn import LCMHelmConn
110 from osm_lcm.osm_config import OsmConfigBuilder
111 from osm_lcm.prometheus import parse_job
112
113 from copy import copy, deepcopy
114 from time import time
115 from uuid import uuid4
116
117 from random import randint
118
119 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
120
121
122 class NsLcm(LcmBase):
123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
129 timeout_charm_delete = 10 * 60
130 timeout_primitive = 30 * 60 # timeout for primitive execution
131 timeout_ns_update = 30 * 60 # timeout for ns update
132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
135 timeout_migrate = 1800 # default global timeout for migrating vnfs
136 timeout_operate = 1800 # default global timeout for migrating vnfs
137 SUBOPERATION_STATUS_NOT_FOUND = -1
138 SUBOPERATION_STATUS_NEW = -2
139 SUBOPERATION_STATUS_SKIP = -3
140 task_name_deploy_vca = "Deploying VCA"
141
142 def __init__(self, msg, lcm_tasks, config, loop):
143 """
144 Init, Connect to database, filesystem storage, and messaging
145 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
146 :return: None
147 """
148 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
149
150 self.db = Database().instance.db
151 self.fs = Filesystem().instance.fs
152 self.loop = loop
153 self.lcm_tasks = lcm_tasks
154 self.timeout = config["timeout"]
155 self.ro_config = config["ro_config"]
156 self.ng_ro = config["ro_config"].get("ng")
157 self.vca_config = config["VCA"].copy()
158
159 # create N2VC connector
160 self.n2vc = N2VCJujuConnector(
161 log=self.logger,
162 loop=self.loop,
163 on_update_db=self._on_update_n2vc_db,
164 fs=self.fs,
165 db=self.db,
166 )
167
168 self.conn_helm_ee = LCMHelmConn(
169 log=self.logger,
170 loop=self.loop,
171 vca_config=self.vca_config,
172 on_update_db=self._on_update_n2vc_db,
173 )
174
175 self.k8sclusterhelm2 = K8sHelmConnector(
176 kubectl_command=self.vca_config.get("kubectlpath"),
177 helm_command=self.vca_config.get("helmpath"),
178 log=self.logger,
179 on_update_db=None,
180 fs=self.fs,
181 db=self.db,
182 )
183
184 self.k8sclusterhelm3 = K8sHelm3Connector(
185 kubectl_command=self.vca_config.get("kubectlpath"),
186 helm_command=self.vca_config.get("helm3path"),
187 fs=self.fs,
188 log=self.logger,
189 db=self.db,
190 on_update_db=None,
191 )
192
193 self.k8sclusterjuju = K8sJujuConnector(
194 kubectl_command=self.vca_config.get("kubectlpath"),
195 juju_command=self.vca_config.get("jujupath"),
196 log=self.logger,
197 loop=self.loop,
198 on_update_db=self._on_update_k8s_db,
199 fs=self.fs,
200 db=self.db,
201 )
202
203 self.k8scluster_map = {
204 "helm-chart": self.k8sclusterhelm2,
205 "helm-chart-v3": self.k8sclusterhelm3,
206 "chart": self.k8sclusterhelm3,
207 "juju-bundle": self.k8sclusterjuju,
208 "juju": self.k8sclusterjuju,
209 }
210
211 self.vca_map = {
212 "lxc_proxy_charm": self.n2vc,
213 "native_charm": self.n2vc,
214 "k8s_proxy_charm": self.n2vc,
215 "helm": self.conn_helm_ee,
216 "helm-v3": self.conn_helm_ee,
217 }
218
219 # create RO client
220 self.RO = NgRoClient(self.loop, **self.ro_config)
221
222 self.op_status_map = {
223 "instantiation": self.RO.status,
224 "termination": self.RO.status,
225 "migrate": self.RO.status,
226 "healing": self.RO.recreate_status,
227 }
228
229 @staticmethod
230 def increment_ip_mac(ip_mac, vm_index=1):
231 if not isinstance(ip_mac, str):
232 return ip_mac
233 try:
234 # try with ipv4 look for last dot
235 i = ip_mac.rfind(".")
236 if i > 0:
237 i += 1
238 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
239 # try with ipv6 or mac look for last colon. Operate in hex
240 i = ip_mac.rfind(":")
241 if i > 0:
242 i += 1
243 # format in hex, len can be 2 for mac or 4 for ipv6
244 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
245 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
246 )
247 except Exception:
248 pass
249 return None
250
251 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
252
253 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
254
255 try:
256 # TODO filter RO descriptor fields...
257
258 # write to database
259 db_dict = dict()
260 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
261 db_dict["deploymentStatus"] = ro_descriptor
262 self.update_db_2("nsrs", nsrs_id, db_dict)
263
264 except Exception as e:
265 self.logger.warn(
266 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
267 )
268
269 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
270
271 # remove last dot from path (if exists)
272 if path.endswith("."):
273 path = path[:-1]
274
275 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
276 # .format(table, filter, path, updated_data))
277 try:
278
279 nsr_id = filter.get("_id")
280
281 # read ns record from database
282 nsr = self.db.get_one(table="nsrs", q_filter=filter)
283 current_ns_status = nsr.get("nsState")
284
285 # get vca status for NS
286 status_dict = await self.n2vc.get_status(
287 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
288 )
289
290 # vcaStatus
291 db_dict = dict()
292 db_dict["vcaStatus"] = status_dict
293 await self.n2vc.update_vca_status(db_dict["vcaStatus"], vca_id=vca_id)
294
295 # update configurationStatus for this VCA
296 try:
297 vca_index = int(path[path.rfind(".") + 1 :])
298
299 vca_list = deep_get(
300 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
301 )
302 vca_status = vca_list[vca_index].get("status")
303
304 configuration_status_list = nsr.get("configurationStatus")
305 config_status = configuration_status_list[vca_index].get("status")
306
307 if config_status == "BROKEN" and vca_status != "failed":
308 db_dict["configurationStatus"][vca_index] = "READY"
309 elif config_status != "BROKEN" and vca_status == "failed":
310 db_dict["configurationStatus"][vca_index] = "BROKEN"
311 except Exception as e:
312 # not update configurationStatus
313 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
314
315 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
316 # if nsState = 'DEGRADED' check if all is OK
317 is_degraded = False
318 if current_ns_status in ("READY", "DEGRADED"):
319 error_description = ""
320 # check machines
321 if status_dict.get("machines"):
322 for machine_id in status_dict.get("machines"):
323 machine = status_dict.get("machines").get(machine_id)
324 # check machine agent-status
325 if machine.get("agent-status"):
326 s = machine.get("agent-status").get("status")
327 if s != "started":
328 is_degraded = True
329 error_description += (
330 "machine {} agent-status={} ; ".format(
331 machine_id, s
332 )
333 )
334 # check machine instance status
335 if machine.get("instance-status"):
336 s = machine.get("instance-status").get("status")
337 if s != "running":
338 is_degraded = True
339 error_description += (
340 "machine {} instance-status={} ; ".format(
341 machine_id, s
342 )
343 )
344 # check applications
345 if status_dict.get("applications"):
346 for app_id in status_dict.get("applications"):
347 app = status_dict.get("applications").get(app_id)
348 # check application status
349 if app.get("status"):
350 s = app.get("status").get("status")
351 if s != "active":
352 is_degraded = True
353 error_description += (
354 "application {} status={} ; ".format(app_id, s)
355 )
356
357 if error_description:
358 db_dict["errorDescription"] = error_description
359 if current_ns_status == "READY" and is_degraded:
360 db_dict["nsState"] = "DEGRADED"
361 if current_ns_status == "DEGRADED" and not is_degraded:
362 db_dict["nsState"] = "READY"
363
364 # write to database
365 self.update_db_2("nsrs", nsr_id, db_dict)
366
367 except (asyncio.CancelledError, asyncio.TimeoutError):
368 raise
369 except Exception as e:
370 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
371
372 async def _on_update_k8s_db(
373 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
374 ):
375 """
376 Updating vca status in NSR record
377 :param cluster_uuid: UUID of a k8s cluster
378 :param kdu_instance: The unique name of the KDU instance
379 :param filter: To get nsr_id
380 :cluster_type: The cluster type (juju, k8s)
381 :return: none
382 """
383
384 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
385 # .format(cluster_uuid, kdu_instance, filter))
386
387 nsr_id = filter.get("_id")
388 try:
389 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
390 cluster_uuid=cluster_uuid,
391 kdu_instance=kdu_instance,
392 yaml_format=False,
393 complete_status=True,
394 vca_id=vca_id,
395 )
396
397 # vcaStatus
398 db_dict = dict()
399 db_dict["vcaStatus"] = {nsr_id: vca_status}
400
401 if cluster_type in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self.k8sclusterjuju.update_vca_status(
405 db_dict["vcaStatus"],
406 kdu_instance,
407 vca_id=vca_id,
408 )
409
410 self.logger.debug(
411 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
412 )
413
414 # write to database
415 self.update_db_2("nsrs", nsr_id, db_dict)
416 except (asyncio.CancelledError, asyncio.TimeoutError):
417 raise
418 except Exception as e:
419 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
420
421 @staticmethod
422 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
423 try:
424 env = Environment(undefined=StrictUndefined)
425 template = env.from_string(cloud_init_text)
426 return template.render(additional_params or {})
427 except UndefinedError as e:
428 raise LcmException(
429 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
430 "file, must be provided in the instantiation parameters inside the "
431 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
432 )
433 except (TemplateError, TemplateNotFound) as e:
434 raise LcmException(
435 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
436 vnfd_id, vdu_id, e
437 )
438 )
439
440 def _get_vdu_cloud_init_content(self, vdu, vnfd):
441 cloud_init_content = cloud_init_file = None
442 try:
443 if vdu.get("cloud-init-file"):
444 base_folder = vnfd["_admin"]["storage"]
445 if base_folder["pkg-dir"]:
446 cloud_init_file = "{}/{}/cloud_init/{}".format(
447 base_folder["folder"],
448 base_folder["pkg-dir"],
449 vdu["cloud-init-file"],
450 )
451 else:
452 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
453 base_folder["folder"],
454 vdu["cloud-init-file"],
455 )
456 with self.fs.file_open(cloud_init_file, "r") as ci_file:
457 cloud_init_content = ci_file.read()
458 elif vdu.get("cloud-init"):
459 cloud_init_content = vdu["cloud-init"]
460
461 return cloud_init_content
462 except FsException as e:
463 raise LcmException(
464 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
465 vnfd["id"], vdu["id"], cloud_init_file, e
466 )
467 )
468
469 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
470 vdur = next(
471 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
472 )
473 additional_params = vdur.get("additionalParams")
474 return parse_yaml_strings(additional_params)
475
476 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
477 """
478 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
479 :param vnfd: input vnfd
480 :param new_id: overrides vnf id if provided
481 :param additionalParams: Instantiation params for VNFs provided
482 :param nsrId: Id of the NSR
483 :return: copy of vnfd
484 """
485 vnfd_RO = deepcopy(vnfd)
486 # remove unused by RO configuration, monitoring, scaling and internal keys
487 vnfd_RO.pop("_id", None)
488 vnfd_RO.pop("_admin", None)
489 vnfd_RO.pop("monitoring-param", None)
490 vnfd_RO.pop("scaling-group-descriptor", None)
491 vnfd_RO.pop("kdu", None)
492 vnfd_RO.pop("k8s-cluster", None)
493 if new_id:
494 vnfd_RO["id"] = new_id
495
496 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
497 for vdu in get_iterable(vnfd_RO, "vdu"):
498 vdu.pop("cloud-init-file", None)
499 vdu.pop("cloud-init", None)
500 return vnfd_RO
501
502 @staticmethod
503 def ip_profile_2_RO(ip_profile):
504 RO_ip_profile = deepcopy(ip_profile)
505 if "dns-server" in RO_ip_profile:
506 if isinstance(RO_ip_profile["dns-server"], list):
507 RO_ip_profile["dns-address"] = []
508 for ds in RO_ip_profile.pop("dns-server"):
509 RO_ip_profile["dns-address"].append(ds["address"])
510 else:
511 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
512 if RO_ip_profile.get("ip-version") == "ipv4":
513 RO_ip_profile["ip-version"] = "IPv4"
514 if RO_ip_profile.get("ip-version") == "ipv6":
515 RO_ip_profile["ip-version"] = "IPv6"
516 if "dhcp-params" in RO_ip_profile:
517 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
518 return RO_ip_profile
519
520 def _get_ro_vim_id_for_vim_account(self, vim_account):
521 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
522 if db_vim["_admin"]["operationalState"] != "ENABLED":
523 raise LcmException(
524 "VIM={} is not available. operationalState={}".format(
525 vim_account, db_vim["_admin"]["operationalState"]
526 )
527 )
528 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
529 return RO_vim_id
530
531 def get_ro_wim_id_for_wim_account(self, wim_account):
532 if isinstance(wim_account, str):
533 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
534 if db_wim["_admin"]["operationalState"] != "ENABLED":
535 raise LcmException(
536 "WIM={} is not available. operationalState={}".format(
537 wim_account, db_wim["_admin"]["operationalState"]
538 )
539 )
540 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
541 return RO_wim_id
542 else:
543 return wim_account
544
545 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
546
547 db_vdu_push_list = []
548 template_vdur = []
549 db_update = {"_admin.modified": time()}
550 if vdu_create:
551 for vdu_id, vdu_count in vdu_create.items():
552 vdur = next(
553 (
554 vdur
555 for vdur in reversed(db_vnfr["vdur"])
556 if vdur["vdu-id-ref"] == vdu_id
557 ),
558 None,
559 )
560 if not vdur:
561 # Read the template saved in the db:
562 self.logger.debug(
563 "No vdur in the database. Using the vdur-template to scale"
564 )
565 vdur_template = db_vnfr.get("vdur-template")
566 if not vdur_template:
567 raise LcmException(
568 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
569 vdu_id
570 )
571 )
572 vdur = vdur_template[0]
573 # Delete a template from the database after using it
574 self.db.set_one(
575 "vnfrs",
576 {"_id": db_vnfr["_id"]},
577 None,
578 pull={"vdur-template": {"_id": vdur["_id"]}},
579 )
580 for count in range(vdu_count):
581 vdur_copy = deepcopy(vdur)
582 vdur_copy["status"] = "BUILD"
583 vdur_copy["status-detailed"] = None
584 vdur_copy["ip-address"] = None
585 vdur_copy["_id"] = str(uuid4())
586 vdur_copy["count-index"] += count + 1
587 vdur_copy["id"] = "{}-{}".format(
588 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
589 )
590 vdur_copy.pop("vim_info", None)
591 for iface in vdur_copy["interfaces"]:
592 if iface.get("fixed-ip"):
593 iface["ip-address"] = self.increment_ip_mac(
594 iface["ip-address"], count + 1
595 )
596 else:
597 iface.pop("ip-address", None)
598 if iface.get("fixed-mac"):
599 iface["mac-address"] = self.increment_ip_mac(
600 iface["mac-address"], count + 1
601 )
602 else:
603 iface.pop("mac-address", None)
604 if db_vnfr["vdur"]:
605 iface.pop(
606 "mgmt_vnf", None
607 ) # only first vdu can be managment of vnf
608 db_vdu_push_list.append(vdur_copy)
609 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
610 if vdu_delete:
611 if len(db_vnfr["vdur"]) == 1:
612 # The scale will move to 0 instances
613 self.logger.debug(
614 "Scaling to 0 !, creating the template with the last vdur"
615 )
616 template_vdur = [db_vnfr["vdur"][0]]
617 for vdu_id, vdu_count in vdu_delete.items():
618 if mark_delete:
619 indexes_to_delete = [
620 iv[0]
621 for iv in enumerate(db_vnfr["vdur"])
622 if iv[1]["vdu-id-ref"] == vdu_id
623 ]
624 db_update.update(
625 {
626 "vdur.{}.status".format(i): "DELETING"
627 for i in indexes_to_delete[-vdu_count:]
628 }
629 )
630 else:
631 # it must be deleted one by one because common.db does not allow otherwise
632 vdus_to_delete = [
633 v
634 for v in reversed(db_vnfr["vdur"])
635 if v["vdu-id-ref"] == vdu_id
636 ]
637 for vdu in vdus_to_delete[:vdu_count]:
638 self.db.set_one(
639 "vnfrs",
640 {"_id": db_vnfr["_id"]},
641 None,
642 pull={"vdur": {"_id": vdu["_id"]}},
643 )
644 db_push = {}
645 if db_vdu_push_list:
646 db_push["vdur"] = db_vdu_push_list
647 if template_vdur:
648 db_push["vdur-template"] = template_vdur
649 if not db_push:
650 db_push = None
651 db_vnfr["vdur-template"] = template_vdur
652 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
653 # modify passed dictionary db_vnfr
654 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
655 db_vnfr["vdur"] = db_vnfr_["vdur"]
656
657 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
658 """
659 Updates database nsr with the RO info for the created vld
660 :param ns_update_nsr: dictionary to be filled with the updated info
661 :param db_nsr: content of db_nsr. This is also modified
662 :param nsr_desc_RO: nsr descriptor from RO
663 :return: Nothing, LcmException is raised on errors
664 """
665
666 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
667 for net_RO in get_iterable(nsr_desc_RO, "nets"):
668 if vld["id"] != net_RO.get("ns_net_osm_id"):
669 continue
670 vld["vim-id"] = net_RO.get("vim_net_id")
671 vld["name"] = net_RO.get("vim_name")
672 vld["status"] = net_RO.get("status")
673 vld["status-detailed"] = net_RO.get("error_msg")
674 ns_update_nsr["vld.{}".format(vld_index)] = vld
675 break
676 else:
677 raise LcmException(
678 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
679 )
680
681 def set_vnfr_at_error(self, db_vnfrs, error_text):
682 try:
683 for db_vnfr in db_vnfrs.values():
684 vnfr_update = {"status": "ERROR"}
685 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
686 if "status" not in vdur:
687 vdur["status"] = "ERROR"
688 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
689 if error_text:
690 vdur["status-detailed"] = str(error_text)
691 vnfr_update[
692 "vdur.{}.status-detailed".format(vdu_index)
693 ] = "ERROR"
694 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
695 except DbException as e:
696 self.logger.error("Cannot update vnf. {}".format(e))
697
698 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
699 """
700 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
701 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
702 :param nsr_desc_RO: nsr descriptor from RO
703 :return: Nothing, LcmException is raised on errors
704 """
705 for vnf_index, db_vnfr in db_vnfrs.items():
706 for vnf_RO in nsr_desc_RO["vnfs"]:
707 if vnf_RO["member_vnf_index"] != vnf_index:
708 continue
709 vnfr_update = {}
710 if vnf_RO.get("ip_address"):
711 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
712 "ip_address"
713 ].split(";")[0]
714 elif not db_vnfr.get("ip-address"):
715 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
716 raise LcmExceptionNoMgmtIP(
717 "ns member_vnf_index '{}' has no IP address".format(
718 vnf_index
719 )
720 )
721
722 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
723 vdur_RO_count_index = 0
724 if vdur.get("pdu-type"):
725 continue
726 for vdur_RO in get_iterable(vnf_RO, "vms"):
727 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
728 continue
729 if vdur["count-index"] != vdur_RO_count_index:
730 vdur_RO_count_index += 1
731 continue
732 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
733 if vdur_RO.get("ip_address"):
734 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
735 else:
736 vdur["ip-address"] = None
737 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
738 vdur["name"] = vdur_RO.get("vim_name")
739 vdur["status"] = vdur_RO.get("status")
740 vdur["status-detailed"] = vdur_RO.get("error_msg")
741 for ifacer in get_iterable(vdur, "interfaces"):
742 for interface_RO in get_iterable(vdur_RO, "interfaces"):
743 if ifacer["name"] == interface_RO.get("internal_name"):
744 ifacer["ip-address"] = interface_RO.get(
745 "ip_address"
746 )
747 ifacer["mac-address"] = interface_RO.get(
748 "mac_address"
749 )
750 break
751 else:
752 raise LcmException(
753 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
754 "from VIM info".format(
755 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
756 )
757 )
758 vnfr_update["vdur.{}".format(vdu_index)] = vdur
759 break
760 else:
761 raise LcmException(
762 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
763 "VIM info".format(
764 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
765 )
766 )
767
768 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
769 for net_RO in get_iterable(nsr_desc_RO, "nets"):
770 if vld["id"] != net_RO.get("vnf_net_osm_id"):
771 continue
772 vld["vim-id"] = net_RO.get("vim_net_id")
773 vld["name"] = net_RO.get("vim_name")
774 vld["status"] = net_RO.get("status")
775 vld["status-detailed"] = net_RO.get("error_msg")
776 vnfr_update["vld.{}".format(vld_index)] = vld
777 break
778 else:
779 raise LcmException(
780 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
781 vnf_index, vld["id"]
782 )
783 )
784
785 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
786 break
787
788 else:
789 raise LcmException(
790 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
791 vnf_index
792 )
793 )
794
795 def _get_ns_config_info(self, nsr_id):
796 """
797 Generates a mapping between vnf,vdu elements and the N2VC id
798 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
799 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
800 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
801 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
802 """
803 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
804 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
805 mapping = {}
806 ns_config_info = {"osm-config-mapping": mapping}
807 for vca in vca_deployed_list:
808 if not vca["member-vnf-index"]:
809 continue
810 if not vca["vdu_id"]:
811 mapping[vca["member-vnf-index"]] = vca["application"]
812 else:
813 mapping[
814 "{}.{}.{}".format(
815 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
816 )
817 ] = vca["application"]
818 return ns_config_info
819
820 async def _instantiate_ng_ro(
821 self,
822 logging_text,
823 nsr_id,
824 nsd,
825 db_nsr,
826 db_nslcmop,
827 db_vnfrs,
828 db_vnfds,
829 n2vc_key_list,
830 stage,
831 start_deploy,
832 timeout_ns_deploy,
833 ):
834
835 db_vims = {}
836
837 def get_vim_account(vim_account_id):
838 nonlocal db_vims
839 if vim_account_id in db_vims:
840 return db_vims[vim_account_id]
841 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
842 db_vims[vim_account_id] = db_vim
843 return db_vim
844
845 # modify target_vld info with instantiation parameters
846 def parse_vld_instantiation_params(
847 target_vim, target_vld, vld_params, target_sdn
848 ):
849 if vld_params.get("ip-profile"):
850 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
851 "ip-profile"
852 ]
853 if vld_params.get("provider-network"):
854 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
855 "provider-network"
856 ]
857 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
858 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
859 "provider-network"
860 ]["sdn-ports"]
861 if vld_params.get("wimAccountId"):
862 target_wim = "wim:{}".format(vld_params["wimAccountId"])
863 target_vld["vim_info"][target_wim] = {}
864 for param in ("vim-network-name", "vim-network-id"):
865 if vld_params.get(param):
866 if isinstance(vld_params[param], dict):
867 for vim, vim_net in vld_params[param].items():
868 other_target_vim = "vim:" + vim
869 populate_dict(
870 target_vld["vim_info"],
871 (other_target_vim, param.replace("-", "_")),
872 vim_net,
873 )
874 else: # isinstance str
875 target_vld["vim_info"][target_vim][
876 param.replace("-", "_")
877 ] = vld_params[param]
878 if vld_params.get("common_id"):
879 target_vld["common_id"] = vld_params.get("common_id")
880
881 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
882 def update_ns_vld_target(target, ns_params):
883 for vnf_params in ns_params.get("vnf", ()):
884 if vnf_params.get("vimAccountId"):
885 target_vnf = next(
886 (
887 vnfr
888 for vnfr in db_vnfrs.values()
889 if vnf_params["member-vnf-index"]
890 == vnfr["member-vnf-index-ref"]
891 ),
892 None,
893 )
894 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
895 for a_index, a_vld in enumerate(target["ns"]["vld"]):
896 target_vld = find_in_list(
897 get_iterable(vdur, "interfaces"),
898 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
899 )
900 if target_vld:
901 if vnf_params.get("vimAccountId") not in a_vld.get(
902 "vim_info", {}
903 ):
904 target["ns"]["vld"][a_index].get("vim_info").update(
905 {
906 "vim:{}".format(vnf_params["vimAccountId"]): {
907 "vim_network_name": ""
908 }
909 }
910 )
911
912 nslcmop_id = db_nslcmop["_id"]
913 target = {
914 "name": db_nsr["name"],
915 "ns": {"vld": []},
916 "vnf": [],
917 "image": deepcopy(db_nsr["image"]),
918 "flavor": deepcopy(db_nsr["flavor"]),
919 "action_id": nslcmop_id,
920 "cloud_init_content": {},
921 }
922 for image in target["image"]:
923 image["vim_info"] = {}
924 for flavor in target["flavor"]:
925 flavor["vim_info"] = {}
926 if db_nsr.get("affinity-or-anti-affinity-group"):
927 target["affinity-or-anti-affinity-group"] = deepcopy(
928 db_nsr["affinity-or-anti-affinity-group"]
929 )
930 for affinity_or_anti_affinity_group in target[
931 "affinity-or-anti-affinity-group"
932 ]:
933 affinity_or_anti_affinity_group["vim_info"] = {}
934
935 if db_nslcmop.get("lcmOperationType") != "instantiate":
936 # get parameters of instantiation:
937 db_nslcmop_instantiate = self.db.get_list(
938 "nslcmops",
939 {
940 "nsInstanceId": db_nslcmop["nsInstanceId"],
941 "lcmOperationType": "instantiate",
942 },
943 )[-1]
944 ns_params = db_nslcmop_instantiate.get("operationParams")
945 else:
946 ns_params = db_nslcmop.get("operationParams")
947 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
948 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
949
950 cp2target = {}
951 for vld_index, vld in enumerate(db_nsr.get("vld")):
952 target_vim = "vim:{}".format(ns_params["vimAccountId"])
953 target_vld = {
954 "id": vld["id"],
955 "name": vld["name"],
956 "mgmt-network": vld.get("mgmt-network", False),
957 "type": vld.get("type"),
958 "vim_info": {
959 target_vim: {
960 "vim_network_name": vld.get("vim-network-name"),
961 "vim_account_id": ns_params["vimAccountId"],
962 }
963 },
964 }
965 # check if this network needs SDN assist
966 if vld.get("pci-interfaces"):
967 db_vim = get_vim_account(ns_params["vimAccountId"])
968 sdnc_id = db_vim["config"].get("sdn-controller")
969 if sdnc_id:
970 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
971 target_sdn = "sdn:{}".format(sdnc_id)
972 target_vld["vim_info"][target_sdn] = {
973 "sdn": True,
974 "target_vim": target_vim,
975 "vlds": [sdn_vld],
976 "type": vld.get("type"),
977 }
978
979 nsd_vnf_profiles = get_vnf_profiles(nsd)
980 for nsd_vnf_profile in nsd_vnf_profiles:
981 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
982 if cp["virtual-link-profile-id"] == vld["id"]:
983 cp2target[
984 "member_vnf:{}.{}".format(
985 cp["constituent-cpd-id"][0][
986 "constituent-base-element-id"
987 ],
988 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
989 )
990 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
991
992 # check at nsd descriptor, if there is an ip-profile
993 vld_params = {}
994 nsd_vlp = find_in_list(
995 get_virtual_link_profiles(nsd),
996 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
997 == vld["id"],
998 )
999 if (
1000 nsd_vlp
1001 and nsd_vlp.get("virtual-link-protocol-data")
1002 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1003 ):
1004 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1005 "l3-protocol-data"
1006 ]
1007 ip_profile_dest_data = {}
1008 if "ip-version" in ip_profile_source_data:
1009 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1010 "ip-version"
1011 ]
1012 if "cidr" in ip_profile_source_data:
1013 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1014 "cidr"
1015 ]
1016 if "gateway-ip" in ip_profile_source_data:
1017 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1018 "gateway-ip"
1019 ]
1020 if "dhcp-enabled" in ip_profile_source_data:
1021 ip_profile_dest_data["dhcp-params"] = {
1022 "enabled": ip_profile_source_data["dhcp-enabled"]
1023 }
1024 vld_params["ip-profile"] = ip_profile_dest_data
1025
1026 # update vld_params with instantiation params
1027 vld_instantiation_params = find_in_list(
1028 get_iterable(ns_params, "vld"),
1029 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1030 )
1031 if vld_instantiation_params:
1032 vld_params.update(vld_instantiation_params)
1033 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1034 target["ns"]["vld"].append(target_vld)
1035 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1036 update_ns_vld_target(target, ns_params)
1037
1038 for vnfr in db_vnfrs.values():
1039 vnfd = find_in_list(
1040 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1041 )
1042 vnf_params = find_in_list(
1043 get_iterable(ns_params, "vnf"),
1044 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1045 )
1046 target_vnf = deepcopy(vnfr)
1047 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1048 for vld in target_vnf.get("vld", ()):
1049 # check if connected to a ns.vld, to fill target'
1050 vnf_cp = find_in_list(
1051 vnfd.get("int-virtual-link-desc", ()),
1052 lambda cpd: cpd.get("id") == vld["id"],
1053 )
1054 if vnf_cp:
1055 ns_cp = "member_vnf:{}.{}".format(
1056 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1057 )
1058 if cp2target.get(ns_cp):
1059 vld["target"] = cp2target[ns_cp]
1060
1061 vld["vim_info"] = {
1062 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1063 }
1064 # check if this network needs SDN assist
1065 target_sdn = None
1066 if vld.get("pci-interfaces"):
1067 db_vim = get_vim_account(vnfr["vim-account-id"])
1068 sdnc_id = db_vim["config"].get("sdn-controller")
1069 if sdnc_id:
1070 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1071 target_sdn = "sdn:{}".format(sdnc_id)
1072 vld["vim_info"][target_sdn] = {
1073 "sdn": True,
1074 "target_vim": target_vim,
1075 "vlds": [sdn_vld],
1076 "type": vld.get("type"),
1077 }
1078
1079 # check at vnfd descriptor, if there is an ip-profile
1080 vld_params = {}
1081 vnfd_vlp = find_in_list(
1082 get_virtual_link_profiles(vnfd),
1083 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1084 )
1085 if (
1086 vnfd_vlp
1087 and vnfd_vlp.get("virtual-link-protocol-data")
1088 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1089 ):
1090 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1091 "l3-protocol-data"
1092 ]
1093 ip_profile_dest_data = {}
1094 if "ip-version" in ip_profile_source_data:
1095 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1096 "ip-version"
1097 ]
1098 if "cidr" in ip_profile_source_data:
1099 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1100 "cidr"
1101 ]
1102 if "gateway-ip" in ip_profile_source_data:
1103 ip_profile_dest_data[
1104 "gateway-address"
1105 ] = ip_profile_source_data["gateway-ip"]
1106 if "dhcp-enabled" in ip_profile_source_data:
1107 ip_profile_dest_data["dhcp-params"] = {
1108 "enabled": ip_profile_source_data["dhcp-enabled"]
1109 }
1110
1111 vld_params["ip-profile"] = ip_profile_dest_data
1112 # update vld_params with instantiation params
1113 if vnf_params:
1114 vld_instantiation_params = find_in_list(
1115 get_iterable(vnf_params, "internal-vld"),
1116 lambda i_vld: i_vld["name"] == vld["id"],
1117 )
1118 if vld_instantiation_params:
1119 vld_params.update(vld_instantiation_params)
1120 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1121
1122 vdur_list = []
1123 for vdur in target_vnf.get("vdur", ()):
1124 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1125 continue # This vdu must not be created
1126 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1127
1128 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1129
1130 if ssh_keys_all:
1131 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1132 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1133 if (
1134 vdu_configuration
1135 and vdu_configuration.get("config-access")
1136 and vdu_configuration.get("config-access").get("ssh-access")
1137 ):
1138 vdur["ssh-keys"] = ssh_keys_all
1139 vdur["ssh-access-required"] = vdu_configuration[
1140 "config-access"
1141 ]["ssh-access"]["required"]
1142 elif (
1143 vnf_configuration
1144 and vnf_configuration.get("config-access")
1145 and vnf_configuration.get("config-access").get("ssh-access")
1146 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1147 ):
1148 vdur["ssh-keys"] = ssh_keys_all
1149 vdur["ssh-access-required"] = vnf_configuration[
1150 "config-access"
1151 ]["ssh-access"]["required"]
1152 elif ssh_keys_instantiation and find_in_list(
1153 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_instantiation
1156
1157 self.logger.debug("NS > vdur > {}".format(vdur))
1158
1159 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1160 # cloud-init
1161 if vdud.get("cloud-init-file"):
1162 vdur["cloud-init"] = "{}:file:{}".format(
1163 vnfd["_id"], vdud.get("cloud-init-file")
1164 )
1165 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1166 if vdur["cloud-init"] not in target["cloud_init_content"]:
1167 base_folder = vnfd["_admin"]["storage"]
1168 if base_folder["pkg-dir"]:
1169 cloud_init_file = "{}/{}/cloud_init/{}".format(
1170 base_folder["folder"],
1171 base_folder["pkg-dir"],
1172 vdud.get("cloud-init-file"),
1173 )
1174 else:
1175 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1176 base_folder["folder"],
1177 vdud.get("cloud-init-file"),
1178 )
1179 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1180 target["cloud_init_content"][
1181 vdur["cloud-init"]
1182 ] = ci_file.read()
1183 elif vdud.get("cloud-init"):
1184 vdur["cloud-init"] = "{}:vdu:{}".format(
1185 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1186 )
1187 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1188 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1189 "cloud-init"
1190 ]
1191 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1192 deploy_params_vdu = self._format_additional_params(
1193 vdur.get("additionalParams") or {}
1194 )
1195 deploy_params_vdu["OSM"] = get_osm_params(
1196 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1197 )
1198 vdur["additionalParams"] = deploy_params_vdu
1199
1200 # flavor
1201 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1202 if target_vim not in ns_flavor["vim_info"]:
1203 ns_flavor["vim_info"][target_vim] = {}
1204
1205 # deal with images
1206 # in case alternative images are provided we must check if they should be applied
1207 # for the vim_type, modify the vim_type taking into account
1208 ns_image_id = int(vdur["ns-image-id"])
1209 if vdur.get("alt-image-ids"):
1210 db_vim = get_vim_account(vnfr["vim-account-id"])
1211 vim_type = db_vim["vim_type"]
1212 for alt_image_id in vdur.get("alt-image-ids"):
1213 ns_alt_image = target["image"][int(alt_image_id)]
1214 if vim_type == ns_alt_image.get("vim-type"):
1215 # must use alternative image
1216 self.logger.debug(
1217 "use alternative image id: {}".format(alt_image_id)
1218 )
1219 ns_image_id = alt_image_id
1220 vdur["ns-image-id"] = ns_image_id
1221 break
1222 ns_image = target["image"][int(ns_image_id)]
1223 if target_vim not in ns_image["vim_info"]:
1224 ns_image["vim_info"][target_vim] = {}
1225
1226 # Affinity groups
1227 if vdur.get("affinity-or-anti-affinity-group-id"):
1228 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1229 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1230 if target_vim not in ns_ags["vim_info"]:
1231 ns_ags["vim_info"][target_vim] = {}
1232
1233 vdur["vim_info"] = {target_vim: {}}
1234 # instantiation parameters
1235 if vnf_params:
1236 vdu_instantiation_params = find_in_list(
1237 get_iterable(vnf_params, "vdu"),
1238 lambda i_vdu: i_vdu["id"] == vdud["id"],
1239 )
1240 if vdu_instantiation_params:
1241 # Parse the vdu_volumes from the instantiation params
1242 vdu_volumes = get_volumes_from_instantiation_params(
1243 vdu_instantiation_params, vdud
1244 )
1245 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1246 vdur_list.append(vdur)
1247 target_vnf["vdur"] = vdur_list
1248 target["vnf"].append(target_vnf)
1249
1250 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1251 desc = await self.RO.deploy(nsr_id, target)
1252 self.logger.debug("RO return > {}".format(desc))
1253 action_id = desc["action_id"]
1254 await self._wait_ng_ro(
1255 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1256 operation="instantiation"
1257 )
1258
1259 # Updating NSR
1260 db_nsr_update = {
1261 "_admin.deployed.RO.operational-status": "running",
1262 "detailed-status": " ".join(stage),
1263 }
1264 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1265 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1266 self._write_op_status(nslcmop_id, stage)
1267 self.logger.debug(
1268 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1269 )
1270 return
1271
1272 async def _wait_ng_ro(
1273 self,
1274 nsr_id,
1275 action_id,
1276 nslcmop_id=None,
1277 start_time=None,
1278 timeout=600,
1279 stage=None,
1280 operation=None,
1281 ):
1282 detailed_status_old = None
1283 db_nsr_update = {}
1284 start_time = start_time or time()
1285 while time() <= start_time + timeout:
1286 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1287 self.logger.debug("Wait NG RO > {}".format(desc_status))
1288 if desc_status["status"] == "FAILED":
1289 raise NgRoException(desc_status["details"])
1290 elif desc_status["status"] == "BUILD":
1291 if stage:
1292 stage[2] = "VIM: ({})".format(desc_status["details"])
1293 elif desc_status["status"] == "DONE":
1294 if stage:
1295 stage[2] = "Deployed at VIM"
1296 break
1297 else:
1298 assert False, "ROclient.check_ns_status returns unknown {}".format(
1299 desc_status["status"]
1300 )
1301 if stage and nslcmop_id and stage[2] != detailed_status_old:
1302 detailed_status_old = stage[2]
1303 db_nsr_update["detailed-status"] = " ".join(stage)
1304 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1305 self._write_op_status(nslcmop_id, stage)
1306 await asyncio.sleep(15, loop=self.loop)
1307 else: # timeout_ns_deploy
1308 raise NgRoException("Timeout waiting ns to deploy")
1309
1310 async def _terminate_ng_ro(
1311 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1312 ):
1313 db_nsr_update = {}
1314 failed_detail = []
1315 action_id = None
1316 start_deploy = time()
1317 try:
1318 target = {
1319 "ns": {"vld": []},
1320 "vnf": [],
1321 "image": [],
1322 "flavor": [],
1323 "action_id": nslcmop_id,
1324 }
1325 desc = await self.RO.deploy(nsr_id, target)
1326 action_id = desc["action_id"]
1327 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1328 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1329 self.logger.debug(
1330 logging_text
1331 + "ns terminate action at RO. action_id={}".format(action_id)
1332 )
1333
1334 # wait until done
1335 delete_timeout = 20 * 60 # 20 minutes
1336 await self._wait_ng_ro(
1337 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1338 operation="termination"
1339 )
1340
1341 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1342 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1343 # delete all nsr
1344 await self.RO.delete(nsr_id)
1345 except Exception as e:
1346 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1347 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1348 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1349 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1350 self.logger.debug(
1351 logging_text + "RO_action_id={} already deleted".format(action_id)
1352 )
1353 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1354 failed_detail.append("delete conflict: {}".format(e))
1355 self.logger.debug(
1356 logging_text
1357 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1358 )
1359 else:
1360 failed_detail.append("delete error: {}".format(e))
1361 self.logger.error(
1362 logging_text
1363 + "RO_action_id={} delete error: {}".format(action_id, e)
1364 )
1365
1366 if failed_detail:
1367 stage[2] = "Error deleting from VIM"
1368 else:
1369 stage[2] = "Deleted from VIM"
1370 db_nsr_update["detailed-status"] = " ".join(stage)
1371 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1372 self._write_op_status(nslcmop_id, stage)
1373
1374 if failed_detail:
1375 raise LcmException("; ".join(failed_detail))
1376 return
1377
1378 async def instantiate_RO(
1379 self,
1380 logging_text,
1381 nsr_id,
1382 nsd,
1383 db_nsr,
1384 db_nslcmop,
1385 db_vnfrs,
1386 db_vnfds,
1387 n2vc_key_list,
1388 stage,
1389 ):
1390 """
1391 Instantiate at RO
1392 :param logging_text: preffix text to use at logging
1393 :param nsr_id: nsr identity
1394 :param nsd: database content of ns descriptor
1395 :param db_nsr: database content of ns record
1396 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1397 :param db_vnfrs:
1398 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1399 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1400 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1401 :return: None or exception
1402 """
1403 try:
1404 start_deploy = time()
1405 ns_params = db_nslcmop.get("operationParams")
1406 if ns_params and ns_params.get("timeout_ns_deploy"):
1407 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1408 else:
1409 timeout_ns_deploy = self.timeout.get(
1410 "ns_deploy", self.timeout_ns_deploy
1411 )
1412
1413 # Check for and optionally request placement optimization. Database will be updated if placement activated
1414 stage[2] = "Waiting for Placement."
1415 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1416 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1417 for vnfr in db_vnfrs.values():
1418 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1419 break
1420 else:
1421 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1422
1423 return await self._instantiate_ng_ro(
1424 logging_text,
1425 nsr_id,
1426 nsd,
1427 db_nsr,
1428 db_nslcmop,
1429 db_vnfrs,
1430 db_vnfds,
1431 n2vc_key_list,
1432 stage,
1433 start_deploy,
1434 timeout_ns_deploy,
1435 )
1436 except Exception as e:
1437 stage[2] = "ERROR deploying at VIM"
1438 self.set_vnfr_at_error(db_vnfrs, str(e))
1439 self.logger.error(
1440 "Error deploying at VIM {}".format(e),
1441 exc_info=not isinstance(
1442 e,
1443 (
1444 ROclient.ROClientException,
1445 LcmException,
1446 DbException,
1447 NgRoException,
1448 ),
1449 ),
1450 )
1451 raise
1452
1453 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1454 """
1455 Wait for kdu to be up, get ip address
1456 :param logging_text: prefix use for logging
1457 :param nsr_id:
1458 :param vnfr_id:
1459 :param kdu_name:
1460 :return: IP address, K8s services
1461 """
1462
1463 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1464 nb_tries = 0
1465
1466 while nb_tries < 360:
1467 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1468 kdur = next(
1469 (
1470 x
1471 for x in get_iterable(db_vnfr, "kdur")
1472 if x.get("kdu-name") == kdu_name
1473 ),
1474 None,
1475 )
1476 if not kdur:
1477 raise LcmException(
1478 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1479 )
1480 if kdur.get("status"):
1481 if kdur["status"] in ("READY", "ENABLED"):
1482 return kdur.get("ip-address"), kdur.get("services")
1483 else:
1484 raise LcmException(
1485 "target KDU={} is in error state".format(kdu_name)
1486 )
1487
1488 await asyncio.sleep(10, loop=self.loop)
1489 nb_tries += 1
1490 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1491
1492 async def wait_vm_up_insert_key_ro(
1493 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1494 ):
1495 """
1496 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1497 :param logging_text: prefix use for logging
1498 :param nsr_id:
1499 :param vnfr_id:
1500 :param vdu_id:
1501 :param vdu_index:
1502 :param pub_key: public ssh key to inject, None to skip
1503 :param user: user to apply the public ssh key
1504 :return: IP address
1505 """
1506
1507 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1508 ro_nsr_id = None
1509 ip_address = None
1510 nb_tries = 0
1511 target_vdu_id = None
1512 ro_retries = 0
1513
1514 while True:
1515
1516 ro_retries += 1
1517 if ro_retries >= 360: # 1 hour
1518 raise LcmException(
1519 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1520 )
1521
1522 await asyncio.sleep(10, loop=self.loop)
1523
1524 # get ip address
1525 if not target_vdu_id:
1526 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1527
1528 if not vdu_id: # for the VNF case
1529 if db_vnfr.get("status") == "ERROR":
1530 raise LcmException(
1531 "Cannot inject ssh-key because target VNF is in error state"
1532 )
1533 ip_address = db_vnfr.get("ip-address")
1534 if not ip_address:
1535 continue
1536 vdur = next(
1537 (
1538 x
1539 for x in get_iterable(db_vnfr, "vdur")
1540 if x.get("ip-address") == ip_address
1541 ),
1542 None,
1543 )
1544 else: # VDU case
1545 vdur = next(
1546 (
1547 x
1548 for x in get_iterable(db_vnfr, "vdur")
1549 if x.get("vdu-id-ref") == vdu_id
1550 and x.get("count-index") == vdu_index
1551 ),
1552 None,
1553 )
1554
1555 if (
1556 not vdur and len(db_vnfr.get("vdur", ())) == 1
1557 ): # If only one, this should be the target vdu
1558 vdur = db_vnfr["vdur"][0]
1559 if not vdur:
1560 raise LcmException(
1561 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1562 vnfr_id, vdu_id, vdu_index
1563 )
1564 )
1565 # New generation RO stores information at "vim_info"
1566 ng_ro_status = None
1567 target_vim = None
1568 if vdur.get("vim_info"):
1569 target_vim = next(
1570 t for t in vdur["vim_info"]
1571 ) # there should be only one key
1572 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1573 if (
1574 vdur.get("pdu-type")
1575 or vdur.get("status") == "ACTIVE"
1576 or ng_ro_status == "ACTIVE"
1577 ):
1578 ip_address = vdur.get("ip-address")
1579 if not ip_address:
1580 continue
1581 target_vdu_id = vdur["vdu-id-ref"]
1582 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1583 raise LcmException(
1584 "Cannot inject ssh-key because target VM is in error state"
1585 )
1586
1587 if not target_vdu_id:
1588 continue
1589
1590 # inject public key into machine
1591 if pub_key and user:
1592 self.logger.debug(logging_text + "Inserting RO key")
1593 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1594 if vdur.get("pdu-type"):
1595 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1596 return ip_address
1597 try:
1598 ro_vm_id = "{}-{}".format(
1599 db_vnfr["member-vnf-index-ref"], target_vdu_id
1600 ) # TODO add vdu_index
1601 if self.ng_ro:
1602 target = {
1603 "action": {
1604 "action": "inject_ssh_key",
1605 "key": pub_key,
1606 "user": user,
1607 },
1608 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1609 }
1610 desc = await self.RO.deploy(nsr_id, target)
1611 action_id = desc["action_id"]
1612 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
1613 break
1614 else:
1615 # wait until NS is deployed at RO
1616 if not ro_nsr_id:
1617 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1618 ro_nsr_id = deep_get(
1619 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1620 )
1621 if not ro_nsr_id:
1622 continue
1623 result_dict = await self.RO.create_action(
1624 item="ns",
1625 item_id_name=ro_nsr_id,
1626 descriptor={
1627 "add_public_key": pub_key,
1628 "vms": [ro_vm_id],
1629 "user": user,
1630 },
1631 )
1632 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1633 if not result_dict or not isinstance(result_dict, dict):
1634 raise LcmException(
1635 "Unknown response from RO when injecting key"
1636 )
1637 for result in result_dict.values():
1638 if result.get("vim_result") == 200:
1639 break
1640 else:
1641 raise ROclient.ROClientException(
1642 "error injecting key: {}".format(
1643 result.get("description")
1644 )
1645 )
1646 break
1647 except NgRoException as e:
1648 raise LcmException(
1649 "Reaching max tries injecting key. Error: {}".format(e)
1650 )
1651 except ROclient.ROClientException as e:
1652 if not nb_tries:
1653 self.logger.debug(
1654 logging_text
1655 + "error injecting key: {}. Retrying until {} seconds".format(
1656 e, 20 * 10
1657 )
1658 )
1659 nb_tries += 1
1660 if nb_tries >= 20:
1661 raise LcmException(
1662 "Reaching max tries injecting key. Error: {}".format(e)
1663 )
1664 else:
1665 break
1666
1667 return ip_address
1668
1669 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1670 """
1671 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1672 """
1673 my_vca = vca_deployed_list[vca_index]
1674 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1675 # vdu or kdu: no dependencies
1676 return
1677 timeout = 300
1678 while timeout >= 0:
1679 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1680 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1681 configuration_status_list = db_nsr["configurationStatus"]
1682 for index, vca_deployed in enumerate(configuration_status_list):
1683 if index == vca_index:
1684 # myself
1685 continue
1686 if not my_vca.get("member-vnf-index") or (
1687 vca_deployed.get("member-vnf-index")
1688 == my_vca.get("member-vnf-index")
1689 ):
1690 internal_status = configuration_status_list[index].get("status")
1691 if internal_status == "READY":
1692 continue
1693 elif internal_status == "BROKEN":
1694 raise LcmException(
1695 "Configuration aborted because dependent charm/s has failed"
1696 )
1697 else:
1698 break
1699 else:
1700 # no dependencies, return
1701 return
1702 await asyncio.sleep(10)
1703 timeout -= 1
1704
1705 raise LcmException("Configuration aborted because dependent charm/s timeout")
1706
1707 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1708 vca_id = None
1709 if db_vnfr:
1710 vca_id = deep_get(db_vnfr, ("vca-id",))
1711 elif db_nsr:
1712 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1713 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1714 return vca_id
1715
1716 async def instantiate_N2VC(
1717 self,
1718 logging_text,
1719 vca_index,
1720 nsi_id,
1721 db_nsr,
1722 db_vnfr,
1723 vdu_id,
1724 kdu_name,
1725 vdu_index,
1726 config_descriptor,
1727 deploy_params,
1728 base_folder,
1729 nslcmop_id,
1730 stage,
1731 vca_type,
1732 vca_name,
1733 ee_config_descriptor,
1734 ):
1735 nsr_id = db_nsr["_id"]
1736 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1737 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1738 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1739 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1740 db_dict = {
1741 "collection": "nsrs",
1742 "filter": {"_id": nsr_id},
1743 "path": db_update_entry,
1744 }
1745 step = ""
1746 try:
1747
1748 element_type = "NS"
1749 element_under_configuration = nsr_id
1750
1751 vnfr_id = None
1752 if db_vnfr:
1753 vnfr_id = db_vnfr["_id"]
1754 osm_config["osm"]["vnf_id"] = vnfr_id
1755
1756 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1757
1758 if vca_type == "native_charm":
1759 index_number = 0
1760 else:
1761 index_number = vdu_index or 0
1762
1763 if vnfr_id:
1764 element_type = "VNF"
1765 element_under_configuration = vnfr_id
1766 namespace += ".{}-{}".format(vnfr_id, index_number)
1767 if vdu_id:
1768 namespace += ".{}-{}".format(vdu_id, index_number)
1769 element_type = "VDU"
1770 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1771 osm_config["osm"]["vdu_id"] = vdu_id
1772 elif kdu_name:
1773 namespace += ".{}".format(kdu_name)
1774 element_type = "KDU"
1775 element_under_configuration = kdu_name
1776 osm_config["osm"]["kdu_name"] = kdu_name
1777
1778 # Get artifact path
1779 if base_folder["pkg-dir"]:
1780 artifact_path = "{}/{}/{}/{}".format(
1781 base_folder["folder"],
1782 base_folder["pkg-dir"],
1783 "charms"
1784 if vca_type
1785 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1786 else "helm-charts",
1787 vca_name,
1788 )
1789 else:
1790 artifact_path = "{}/Scripts/{}/{}/".format(
1791 base_folder["folder"],
1792 "charms"
1793 if vca_type
1794 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1795 else "helm-charts",
1796 vca_name,
1797 )
1798
1799 self.logger.debug("Artifact path > {}".format(artifact_path))
1800
1801 # get initial_config_primitive_list that applies to this element
1802 initial_config_primitive_list = config_descriptor.get(
1803 "initial-config-primitive"
1804 )
1805
1806 self.logger.debug(
1807 "Initial config primitive list > {}".format(
1808 initial_config_primitive_list
1809 )
1810 )
1811
1812 # add config if not present for NS charm
1813 ee_descriptor_id = ee_config_descriptor.get("id")
1814 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1815 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1816 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1817 )
1818
1819 self.logger.debug(
1820 "Initial config primitive list #2 > {}".format(
1821 initial_config_primitive_list
1822 )
1823 )
1824 # n2vc_redesign STEP 3.1
1825 # find old ee_id if exists
1826 ee_id = vca_deployed.get("ee_id")
1827
1828 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1829 # create or register execution environment in VCA
1830 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1831
1832 self._write_configuration_status(
1833 nsr_id=nsr_id,
1834 vca_index=vca_index,
1835 status="CREATING",
1836 element_under_configuration=element_under_configuration,
1837 element_type=element_type,
1838 )
1839
1840 step = "create execution environment"
1841 self.logger.debug(logging_text + step)
1842
1843 ee_id = None
1844 credentials = None
1845 if vca_type == "k8s_proxy_charm":
1846 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1847 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1848 namespace=namespace,
1849 artifact_path=artifact_path,
1850 db_dict=db_dict,
1851 vca_id=vca_id,
1852 )
1853 elif vca_type == "helm" or vca_type == "helm-v3":
1854 ee_id, credentials = await self.vca_map[
1855 vca_type
1856 ].create_execution_environment(
1857 namespace=namespace,
1858 reuse_ee_id=ee_id,
1859 db_dict=db_dict,
1860 config=osm_config,
1861 artifact_path=artifact_path,
1862 vca_type=vca_type,
1863 )
1864 else:
1865 ee_id, credentials = await self.vca_map[
1866 vca_type
1867 ].create_execution_environment(
1868 namespace=namespace,
1869 reuse_ee_id=ee_id,
1870 db_dict=db_dict,
1871 vca_id=vca_id,
1872 )
1873
1874 elif vca_type == "native_charm":
1875 step = "Waiting to VM being up and getting IP address"
1876 self.logger.debug(logging_text + step)
1877 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1878 logging_text,
1879 nsr_id,
1880 vnfr_id,
1881 vdu_id,
1882 vdu_index,
1883 user=None,
1884 pub_key=None,
1885 )
1886 credentials = {"hostname": rw_mgmt_ip}
1887 # get username
1888 username = deep_get(
1889 config_descriptor, ("config-access", "ssh-access", "default-user")
1890 )
1891 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1892 # merged. Meanwhile let's get username from initial-config-primitive
1893 if not username and initial_config_primitive_list:
1894 for config_primitive in initial_config_primitive_list:
1895 for param in config_primitive.get("parameter", ()):
1896 if param["name"] == "ssh-username":
1897 username = param["value"]
1898 break
1899 if not username:
1900 raise LcmException(
1901 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1902 "'config-access.ssh-access.default-user'"
1903 )
1904 credentials["username"] = username
1905 # n2vc_redesign STEP 3.2
1906
1907 self._write_configuration_status(
1908 nsr_id=nsr_id,
1909 vca_index=vca_index,
1910 status="REGISTERING",
1911 element_under_configuration=element_under_configuration,
1912 element_type=element_type,
1913 )
1914
1915 step = "register execution environment {}".format(credentials)
1916 self.logger.debug(logging_text + step)
1917 ee_id = await self.vca_map[vca_type].register_execution_environment(
1918 credentials=credentials,
1919 namespace=namespace,
1920 db_dict=db_dict,
1921 vca_id=vca_id,
1922 )
1923
1924 # for compatibility with MON/POL modules, the need model and application name at database
1925 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1926 ee_id_parts = ee_id.split(".")
1927 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1928 if len(ee_id_parts) >= 2:
1929 model_name = ee_id_parts[0]
1930 application_name = ee_id_parts[1]
1931 db_nsr_update[db_update_entry + "model"] = model_name
1932 db_nsr_update[db_update_entry + "application"] = application_name
1933
1934 # n2vc_redesign STEP 3.3
1935 step = "Install configuration Software"
1936
1937 self._write_configuration_status(
1938 nsr_id=nsr_id,
1939 vca_index=vca_index,
1940 status="INSTALLING SW",
1941 element_under_configuration=element_under_configuration,
1942 element_type=element_type,
1943 other_update=db_nsr_update,
1944 )
1945
1946 # TODO check if already done
1947 self.logger.debug(logging_text + step)
1948 config = None
1949 if vca_type == "native_charm":
1950 config_primitive = next(
1951 (p for p in initial_config_primitive_list if p["name"] == "config"),
1952 None,
1953 )
1954 if config_primitive:
1955 config = self._map_primitive_params(
1956 config_primitive, {}, deploy_params
1957 )
1958 num_units = 1
1959 if vca_type == "lxc_proxy_charm":
1960 if element_type == "NS":
1961 num_units = db_nsr.get("config-units") or 1
1962 elif element_type == "VNF":
1963 num_units = db_vnfr.get("config-units") or 1
1964 elif element_type == "VDU":
1965 for v in db_vnfr["vdur"]:
1966 if vdu_id == v["vdu-id-ref"]:
1967 num_units = v.get("config-units") or 1
1968 break
1969 if vca_type != "k8s_proxy_charm":
1970 await self.vca_map[vca_type].install_configuration_sw(
1971 ee_id=ee_id,
1972 artifact_path=artifact_path,
1973 db_dict=db_dict,
1974 config=config,
1975 num_units=num_units,
1976 vca_id=vca_id,
1977 vca_type=vca_type,
1978 )
1979
1980 # write in db flag of configuration_sw already installed
1981 self.update_db_2(
1982 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1983 )
1984
1985 # add relations for this VCA (wait for other peers related with this VCA)
1986 await self._add_vca_relations(
1987 logging_text=logging_text,
1988 nsr_id=nsr_id,
1989 vca_type=vca_type,
1990 vca_index=vca_index,
1991 )
1992
1993 # if SSH access is required, then get execution environment SSH public
1994 # if native charm we have waited already to VM be UP
1995 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1996 pub_key = None
1997 user = None
1998 # self.logger.debug("get ssh key block")
1999 if deep_get(
2000 config_descriptor, ("config-access", "ssh-access", "required")
2001 ):
2002 # self.logger.debug("ssh key needed")
2003 # Needed to inject a ssh key
2004 user = deep_get(
2005 config_descriptor,
2006 ("config-access", "ssh-access", "default-user"),
2007 )
2008 step = "Install configuration Software, getting public ssh key"
2009 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2010 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2011 )
2012
2013 step = "Insert public key into VM user={} ssh_key={}".format(
2014 user, pub_key
2015 )
2016 else:
2017 # self.logger.debug("no need to get ssh key")
2018 step = "Waiting to VM being up and getting IP address"
2019 self.logger.debug(logging_text + step)
2020
2021 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2022 rw_mgmt_ip = None
2023
2024 # n2vc_redesign STEP 5.1
2025 # wait for RO (ip-address) Insert pub_key into VM
2026 if vnfr_id:
2027 if kdu_name:
2028 rw_mgmt_ip, services = await self.wait_kdu_up(
2029 logging_text, nsr_id, vnfr_id, kdu_name
2030 )
2031 vnfd = self.db.get_one(
2032 "vnfds_revisions",
2033 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2034 )
2035 kdu = get_kdu(vnfd, kdu_name)
2036 kdu_services = [
2037 service["name"] for service in get_kdu_services(kdu)
2038 ]
2039 exposed_services = []
2040 for service in services:
2041 if any(s in service["name"] for s in kdu_services):
2042 exposed_services.append(service)
2043 await self.vca_map[vca_type].exec_primitive(
2044 ee_id=ee_id,
2045 primitive_name="config",
2046 params_dict={
2047 "osm-config": json.dumps(
2048 OsmConfigBuilder(
2049 k8s={"services": exposed_services}
2050 ).build()
2051 )
2052 },
2053 vca_id=vca_id,
2054 )
2055
2056 # This verification is needed in order to avoid trying to add a public key
2057 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2058 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2059 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2060 # or it is a KNF)
2061 elif db_vnfr.get('vdur'):
2062 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2063 logging_text,
2064 nsr_id,
2065 vnfr_id,
2066 vdu_id,
2067 vdu_index,
2068 user=user,
2069 pub_key=pub_key,
2070 )
2071
2072 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2073
2074 # store rw_mgmt_ip in deploy params for later replacement
2075 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2076
2077 # n2vc_redesign STEP 6 Execute initial config primitive
2078 step = "execute initial config primitive"
2079
2080 # wait for dependent primitives execution (NS -> VNF -> VDU)
2081 if initial_config_primitive_list:
2082 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2083
2084 # stage, in function of element type: vdu, kdu, vnf or ns
2085 my_vca = vca_deployed_list[vca_index]
2086 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2087 # VDU or KDU
2088 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2089 elif my_vca.get("member-vnf-index"):
2090 # VNF
2091 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2092 else:
2093 # NS
2094 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2095
2096 self._write_configuration_status(
2097 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2098 )
2099
2100 self._write_op_status(op_id=nslcmop_id, stage=stage)
2101
2102 check_if_terminated_needed = True
2103 for initial_config_primitive in initial_config_primitive_list:
2104 # adding information on the vca_deployed if it is a NS execution environment
2105 if not vca_deployed["member-vnf-index"]:
2106 deploy_params["ns_config_info"] = json.dumps(
2107 self._get_ns_config_info(nsr_id)
2108 )
2109 # TODO check if already done
2110 primitive_params_ = self._map_primitive_params(
2111 initial_config_primitive, {}, deploy_params
2112 )
2113
2114 step = "execute primitive '{}' params '{}'".format(
2115 initial_config_primitive["name"], primitive_params_
2116 )
2117 self.logger.debug(logging_text + step)
2118 await self.vca_map[vca_type].exec_primitive(
2119 ee_id=ee_id,
2120 primitive_name=initial_config_primitive["name"],
2121 params_dict=primitive_params_,
2122 db_dict=db_dict,
2123 vca_id=vca_id,
2124 vca_type=vca_type,
2125 )
2126 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2127 if check_if_terminated_needed:
2128 if config_descriptor.get("terminate-config-primitive"):
2129 self.update_db_2(
2130 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2131 )
2132 check_if_terminated_needed = False
2133
2134 # TODO register in database that primitive is done
2135
2136 # STEP 7 Configure metrics
2137 if vca_type == "helm" or vca_type == "helm-v3":
2138 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2139 ee_id=ee_id,
2140 artifact_path=artifact_path,
2141 ee_config_descriptor=ee_config_descriptor,
2142 vnfr_id=vnfr_id,
2143 nsr_id=nsr_id,
2144 target_ip=rw_mgmt_ip,
2145 )
2146 if prometheus_jobs:
2147 self.update_db_2(
2148 "nsrs",
2149 nsr_id,
2150 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2151 )
2152
2153 for job in prometheus_jobs:
2154 self.db.set_one(
2155 "prometheus_jobs",
2156 {"job_name": job["job_name"]},
2157 job,
2158 upsert=True,
2159 fail_on_empty=False,
2160 )
2161
2162 step = "instantiated at VCA"
2163 self.logger.debug(logging_text + step)
2164
2165 self._write_configuration_status(
2166 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2167 )
2168
2169 except Exception as e: # TODO not use Exception but N2VC exception
2170 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2171 if not isinstance(
2172 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2173 ):
2174 self.logger.error(
2175 "Exception while {} : {}".format(step, e), exc_info=True
2176 )
2177 self._write_configuration_status(
2178 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2179 )
2180 raise LcmException("{} {}".format(step, e)) from e
2181
2182 def _write_ns_status(
2183 self,
2184 nsr_id: str,
2185 ns_state: str,
2186 current_operation: str,
2187 current_operation_id: str,
2188 error_description: str = None,
2189 error_detail: str = None,
2190 other_update: dict = None,
2191 ):
2192 """
2193 Update db_nsr fields.
2194 :param nsr_id:
2195 :param ns_state:
2196 :param current_operation:
2197 :param current_operation_id:
2198 :param error_description:
2199 :param error_detail:
2200 :param other_update: Other required changes at database if provided, will be cleared
2201 :return:
2202 """
2203 try:
2204 db_dict = other_update or {}
2205 db_dict[
2206 "_admin.nslcmop"
2207 ] = current_operation_id # for backward compatibility
2208 db_dict["_admin.current-operation"] = current_operation_id
2209 db_dict["_admin.operation-type"] = (
2210 current_operation if current_operation != "IDLE" else None
2211 )
2212 db_dict["currentOperation"] = current_operation
2213 db_dict["currentOperationID"] = current_operation_id
2214 db_dict["errorDescription"] = error_description
2215 db_dict["errorDetail"] = error_detail
2216
2217 if ns_state:
2218 db_dict["nsState"] = ns_state
2219 self.update_db_2("nsrs", nsr_id, db_dict)
2220 except DbException as e:
2221 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2222
2223 def _write_op_status(
2224 self,
2225 op_id: str,
2226 stage: list = None,
2227 error_message: str = None,
2228 queuePosition: int = 0,
2229 operation_state: str = None,
2230 other_update: dict = None,
2231 ):
2232 try:
2233 db_dict = other_update or {}
2234 db_dict["queuePosition"] = queuePosition
2235 if isinstance(stage, list):
2236 db_dict["stage"] = stage[0]
2237 db_dict["detailed-status"] = " ".join(stage)
2238 elif stage is not None:
2239 db_dict["stage"] = str(stage)
2240
2241 if error_message is not None:
2242 db_dict["errorMessage"] = error_message
2243 if operation_state is not None:
2244 db_dict["operationState"] = operation_state
2245 db_dict["statusEnteredTime"] = time()
2246 self.update_db_2("nslcmops", op_id, db_dict)
2247 except DbException as e:
2248 self.logger.warn(
2249 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2250 )
2251
2252 def _write_all_config_status(self, db_nsr: dict, status: str):
2253 try:
2254 nsr_id = db_nsr["_id"]
2255 # configurationStatus
2256 config_status = db_nsr.get("configurationStatus")
2257 if config_status:
2258 db_nsr_update = {
2259 "configurationStatus.{}.status".format(index): status
2260 for index, v in enumerate(config_status)
2261 if v
2262 }
2263 # update status
2264 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2265
2266 except DbException as e:
2267 self.logger.warn(
2268 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2269 )
2270
2271 def _write_configuration_status(
2272 self,
2273 nsr_id: str,
2274 vca_index: int,
2275 status: str = None,
2276 element_under_configuration: str = None,
2277 element_type: str = None,
2278 other_update: dict = None,
2279 ):
2280
2281 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2282 # .format(vca_index, status))
2283
2284 try:
2285 db_path = "configurationStatus.{}.".format(vca_index)
2286 db_dict = other_update or {}
2287 if status:
2288 db_dict[db_path + "status"] = status
2289 if element_under_configuration:
2290 db_dict[
2291 db_path + "elementUnderConfiguration"
2292 ] = element_under_configuration
2293 if element_type:
2294 db_dict[db_path + "elementType"] = element_type
2295 self.update_db_2("nsrs", nsr_id, db_dict)
2296 except DbException as e:
2297 self.logger.warn(
2298 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2299 status, nsr_id, vca_index, e
2300 )
2301 )
2302
2303 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2304 """
2305 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2306 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2307 Database is used because the result can be obtained from a different LCM worker in case of HA.
2308 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2309 :param db_nslcmop: database content of nslcmop
2310 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2311 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2312 computed 'vim-account-id'
2313 """
2314 modified = False
2315 nslcmop_id = db_nslcmop["_id"]
2316 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2317 if placement_engine == "PLA":
2318 self.logger.debug(
2319 logging_text + "Invoke and wait for placement optimization"
2320 )
2321 await self.msg.aiowrite(
2322 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2323 )
2324 db_poll_interval = 5
2325 wait = db_poll_interval * 10
2326 pla_result = None
2327 while not pla_result and wait >= 0:
2328 await asyncio.sleep(db_poll_interval)
2329 wait -= db_poll_interval
2330 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2331 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2332
2333 if not pla_result:
2334 raise LcmException(
2335 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2336 )
2337
2338 for pla_vnf in pla_result["vnf"]:
2339 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2340 if not pla_vnf.get("vimAccountId") or not vnfr:
2341 continue
2342 modified = True
2343 self.db.set_one(
2344 "vnfrs",
2345 {"_id": vnfr["_id"]},
2346 {"vim-account-id": pla_vnf["vimAccountId"]},
2347 )
2348 # Modifies db_vnfrs
2349 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2350 return modified
2351
2352 def update_nsrs_with_pla_result(self, params):
2353 try:
2354 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2355 self.update_db_2(
2356 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2357 )
2358 except Exception as e:
2359 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2360
2361 async def instantiate(self, nsr_id, nslcmop_id):
2362 """
2363
2364 :param nsr_id: ns instance to deploy
2365 :param nslcmop_id: operation to run
2366 :return:
2367 """
2368
2369 # Try to lock HA task here
2370 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2371 if not task_is_locked_by_me:
2372 self.logger.debug(
2373 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2374 )
2375 return
2376
2377 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2378 self.logger.debug(logging_text + "Enter")
2379
2380 # get all needed from database
2381
2382 # database nsrs record
2383 db_nsr = None
2384
2385 # database nslcmops record
2386 db_nslcmop = None
2387
2388 # update operation on nsrs
2389 db_nsr_update = {}
2390 # update operation on nslcmops
2391 db_nslcmop_update = {}
2392
2393 nslcmop_operation_state = None
2394 db_vnfrs = {} # vnf's info indexed by member-index
2395 # n2vc_info = {}
2396 tasks_dict_info = {} # from task to info text
2397 exc = None
2398 error_list = []
2399 stage = [
2400 "Stage 1/5: preparation of the environment.",
2401 "Waiting for previous operations to terminate.",
2402 "",
2403 ]
2404 # ^ stage, step, VIM progress
2405 try:
2406 # wait for any previous tasks in process
2407 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2408
2409 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2410 stage[1] = "Reading from database."
2411 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2412 db_nsr_update["detailed-status"] = "creating"
2413 db_nsr_update["operational-status"] = "init"
2414 self._write_ns_status(
2415 nsr_id=nsr_id,
2416 ns_state="BUILDING",
2417 current_operation="INSTANTIATING",
2418 current_operation_id=nslcmop_id,
2419 other_update=db_nsr_update,
2420 )
2421 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2422
2423 # read from db: operation
2424 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2425 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2426 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2427 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2428 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2429 )
2430 ns_params = db_nslcmop.get("operationParams")
2431 if ns_params and ns_params.get("timeout_ns_deploy"):
2432 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2433 else:
2434 timeout_ns_deploy = self.timeout.get(
2435 "ns_deploy", self.timeout_ns_deploy
2436 )
2437
2438 # read from db: ns
2439 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2440 self.logger.debug(logging_text + stage[1])
2441 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2442 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2443 self.logger.debug(logging_text + stage[1])
2444 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2445 self.fs.sync(db_nsr["nsd-id"])
2446 db_nsr["nsd"] = nsd
2447 # nsr_name = db_nsr["name"] # TODO short-name??
2448
2449 # read from db: vnf's of this ns
2450 stage[1] = "Getting vnfrs from db."
2451 self.logger.debug(logging_text + stage[1])
2452 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2453
2454 # read from db: vnfd's for every vnf
2455 db_vnfds = [] # every vnfd data
2456
2457 # for each vnf in ns, read vnfd
2458 for vnfr in db_vnfrs_list:
2459 if vnfr.get("kdur"):
2460 kdur_list = []
2461 for kdur in vnfr["kdur"]:
2462 if kdur.get("additionalParams"):
2463 kdur["additionalParams"] = json.loads(
2464 kdur["additionalParams"]
2465 )
2466 kdur_list.append(kdur)
2467 vnfr["kdur"] = kdur_list
2468
2469 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2470 vnfd_id = vnfr["vnfd-id"]
2471 vnfd_ref = vnfr["vnfd-ref"]
2472 self.fs.sync(vnfd_id)
2473
2474 # if we haven't this vnfd, read it from db
2475 if vnfd_id not in db_vnfds:
2476 # read from db
2477 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2478 vnfd_id, vnfd_ref
2479 )
2480 self.logger.debug(logging_text + stage[1])
2481 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2482
2483 # store vnfd
2484 db_vnfds.append(vnfd)
2485
2486 # Get or generates the _admin.deployed.VCA list
2487 vca_deployed_list = None
2488 if db_nsr["_admin"].get("deployed"):
2489 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2490 if vca_deployed_list is None:
2491 vca_deployed_list = []
2492 configuration_status_list = []
2493 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2494 db_nsr_update["configurationStatus"] = configuration_status_list
2495 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2496 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2497 elif isinstance(vca_deployed_list, dict):
2498 # maintain backward compatibility. Change a dict to list at database
2499 vca_deployed_list = list(vca_deployed_list.values())
2500 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2501 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2502
2503 if not isinstance(
2504 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2505 ):
2506 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2507 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2508
2509 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2510 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2511 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2512 self.db.set_list(
2513 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2514 )
2515
2516 # n2vc_redesign STEP 2 Deploy Network Scenario
2517 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2518 self._write_op_status(op_id=nslcmop_id, stage=stage)
2519
2520 stage[1] = "Deploying KDUs."
2521 # self.logger.debug(logging_text + "Before deploy_kdus")
2522 # Call to deploy_kdus in case exists the "vdu:kdu" param
2523 await self.deploy_kdus(
2524 logging_text=logging_text,
2525 nsr_id=nsr_id,
2526 nslcmop_id=nslcmop_id,
2527 db_vnfrs=db_vnfrs,
2528 db_vnfds=db_vnfds,
2529 task_instantiation_info=tasks_dict_info,
2530 )
2531
2532 stage[1] = "Getting VCA public key."
2533 # n2vc_redesign STEP 1 Get VCA public ssh-key
2534 # feature 1429. Add n2vc public key to needed VMs
2535 n2vc_key = self.n2vc.get_public_key()
2536 n2vc_key_list = [n2vc_key]
2537 if self.vca_config.get("public_key"):
2538 n2vc_key_list.append(self.vca_config["public_key"])
2539
2540 stage[1] = "Deploying NS at VIM."
2541 task_ro = asyncio.ensure_future(
2542 self.instantiate_RO(
2543 logging_text=logging_text,
2544 nsr_id=nsr_id,
2545 nsd=nsd,
2546 db_nsr=db_nsr,
2547 db_nslcmop=db_nslcmop,
2548 db_vnfrs=db_vnfrs,
2549 db_vnfds=db_vnfds,
2550 n2vc_key_list=n2vc_key_list,
2551 stage=stage,
2552 )
2553 )
2554 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2555 tasks_dict_info[task_ro] = "Deploying at VIM"
2556
2557 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2558 stage[1] = "Deploying Execution Environments."
2559 self.logger.debug(logging_text + stage[1])
2560
2561 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2562 for vnf_profile in get_vnf_profiles(nsd):
2563 vnfd_id = vnf_profile["vnfd-id"]
2564 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2565 member_vnf_index = str(vnf_profile["id"])
2566 db_vnfr = db_vnfrs[member_vnf_index]
2567 base_folder = vnfd["_admin"]["storage"]
2568 vdu_id = None
2569 vdu_index = 0
2570 vdu_name = None
2571 kdu_name = None
2572
2573 # Get additional parameters
2574 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2575 if db_vnfr.get("additionalParamsForVnf"):
2576 deploy_params.update(
2577 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2578 )
2579
2580 descriptor_config = get_configuration(vnfd, vnfd["id"])
2581 if descriptor_config:
2582 self._deploy_n2vc(
2583 logging_text=logging_text
2584 + "member_vnf_index={} ".format(member_vnf_index),
2585 db_nsr=db_nsr,
2586 db_vnfr=db_vnfr,
2587 nslcmop_id=nslcmop_id,
2588 nsr_id=nsr_id,
2589 nsi_id=nsi_id,
2590 vnfd_id=vnfd_id,
2591 vdu_id=vdu_id,
2592 kdu_name=kdu_name,
2593 member_vnf_index=member_vnf_index,
2594 vdu_index=vdu_index,
2595 vdu_name=vdu_name,
2596 deploy_params=deploy_params,
2597 descriptor_config=descriptor_config,
2598 base_folder=base_folder,
2599 task_instantiation_info=tasks_dict_info,
2600 stage=stage,
2601 )
2602
2603 # Deploy charms for each VDU that supports one.
2604 for vdud in get_vdu_list(vnfd):
2605 vdu_id = vdud["id"]
2606 descriptor_config = get_configuration(vnfd, vdu_id)
2607 vdur = find_in_list(
2608 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2609 )
2610
2611 if vdur.get("additionalParams"):
2612 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2613 else:
2614 deploy_params_vdu = deploy_params
2615 deploy_params_vdu["OSM"] = get_osm_params(
2616 db_vnfr, vdu_id, vdu_count_index=0
2617 )
2618 vdud_count = get_number_of_instances(vnfd, vdu_id)
2619
2620 self.logger.debug("VDUD > {}".format(vdud))
2621 self.logger.debug(
2622 "Descriptor config > {}".format(descriptor_config)
2623 )
2624 if descriptor_config:
2625 vdu_name = None
2626 kdu_name = None
2627 for vdu_index in range(vdud_count):
2628 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2629 self._deploy_n2vc(
2630 logging_text=logging_text
2631 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2632 member_vnf_index, vdu_id, vdu_index
2633 ),
2634 db_nsr=db_nsr,
2635 db_vnfr=db_vnfr,
2636 nslcmop_id=nslcmop_id,
2637 nsr_id=nsr_id,
2638 nsi_id=nsi_id,
2639 vnfd_id=vnfd_id,
2640 vdu_id=vdu_id,
2641 kdu_name=kdu_name,
2642 member_vnf_index=member_vnf_index,
2643 vdu_index=vdu_index,
2644 vdu_name=vdu_name,
2645 deploy_params=deploy_params_vdu,
2646 descriptor_config=descriptor_config,
2647 base_folder=base_folder,
2648 task_instantiation_info=tasks_dict_info,
2649 stage=stage,
2650 )
2651 for kdud in get_kdu_list(vnfd):
2652 kdu_name = kdud["name"]
2653 descriptor_config = get_configuration(vnfd, kdu_name)
2654 if descriptor_config:
2655 vdu_id = None
2656 vdu_index = 0
2657 vdu_name = None
2658 kdur = next(
2659 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2660 )
2661 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2662 if kdur.get("additionalParams"):
2663 deploy_params_kdu.update(
2664 parse_yaml_strings(kdur["additionalParams"].copy())
2665 )
2666
2667 self._deploy_n2vc(
2668 logging_text=logging_text,
2669 db_nsr=db_nsr,
2670 db_vnfr=db_vnfr,
2671 nslcmop_id=nslcmop_id,
2672 nsr_id=nsr_id,
2673 nsi_id=nsi_id,
2674 vnfd_id=vnfd_id,
2675 vdu_id=vdu_id,
2676 kdu_name=kdu_name,
2677 member_vnf_index=member_vnf_index,
2678 vdu_index=vdu_index,
2679 vdu_name=vdu_name,
2680 deploy_params=deploy_params_kdu,
2681 descriptor_config=descriptor_config,
2682 base_folder=base_folder,
2683 task_instantiation_info=tasks_dict_info,
2684 stage=stage,
2685 )
2686
2687 # Check if this NS has a charm configuration
2688 descriptor_config = nsd.get("ns-configuration")
2689 if descriptor_config and descriptor_config.get("juju"):
2690 vnfd_id = None
2691 db_vnfr = None
2692 member_vnf_index = None
2693 vdu_id = None
2694 kdu_name = None
2695 vdu_index = 0
2696 vdu_name = None
2697
2698 # Get additional parameters
2699 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2700 if db_nsr.get("additionalParamsForNs"):
2701 deploy_params.update(
2702 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2703 )
2704 base_folder = nsd["_admin"]["storage"]
2705 self._deploy_n2vc(
2706 logging_text=logging_text,
2707 db_nsr=db_nsr,
2708 db_vnfr=db_vnfr,
2709 nslcmop_id=nslcmop_id,
2710 nsr_id=nsr_id,
2711 nsi_id=nsi_id,
2712 vnfd_id=vnfd_id,
2713 vdu_id=vdu_id,
2714 kdu_name=kdu_name,
2715 member_vnf_index=member_vnf_index,
2716 vdu_index=vdu_index,
2717 vdu_name=vdu_name,
2718 deploy_params=deploy_params,
2719 descriptor_config=descriptor_config,
2720 base_folder=base_folder,
2721 task_instantiation_info=tasks_dict_info,
2722 stage=stage,
2723 )
2724
2725 # rest of staff will be done at finally
2726
2727 except (
2728 ROclient.ROClientException,
2729 DbException,
2730 LcmException,
2731 N2VCException,
2732 ) as e:
2733 self.logger.error(
2734 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2735 )
2736 exc = e
2737 except asyncio.CancelledError:
2738 self.logger.error(
2739 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2740 )
2741 exc = "Operation was cancelled"
2742 except Exception as e:
2743 exc = traceback.format_exc()
2744 self.logger.critical(
2745 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2746 exc_info=True,
2747 )
2748 finally:
2749 if exc:
2750 error_list.append(str(exc))
2751 try:
2752 # wait for pending tasks
2753 if tasks_dict_info:
2754 stage[1] = "Waiting for instantiate pending tasks."
2755 self.logger.debug(logging_text + stage[1])
2756 error_list += await self._wait_for_tasks(
2757 logging_text,
2758 tasks_dict_info,
2759 timeout_ns_deploy,
2760 stage,
2761 nslcmop_id,
2762 nsr_id=nsr_id,
2763 )
2764 stage[1] = stage[2] = ""
2765 except asyncio.CancelledError:
2766 error_list.append("Cancelled")
2767 # TODO cancel all tasks
2768 except Exception as exc:
2769 error_list.append(str(exc))
2770
2771 # update operation-status
2772 db_nsr_update["operational-status"] = "running"
2773 # let's begin with VCA 'configured' status (later we can change it)
2774 db_nsr_update["config-status"] = "configured"
2775 for task, task_name in tasks_dict_info.items():
2776 if not task.done() or task.cancelled() or task.exception():
2777 if task_name.startswith(self.task_name_deploy_vca):
2778 # A N2VC task is pending
2779 db_nsr_update["config-status"] = "failed"
2780 else:
2781 # RO or KDU task is pending
2782 db_nsr_update["operational-status"] = "failed"
2783
2784 # update status at database
2785 if error_list:
2786 error_detail = ". ".join(error_list)
2787 self.logger.error(logging_text + error_detail)
2788 error_description_nslcmop = "{} Detail: {}".format(
2789 stage[0], error_detail
2790 )
2791 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2792 nslcmop_id, stage[0]
2793 )
2794
2795 db_nsr_update["detailed-status"] = (
2796 error_description_nsr + " Detail: " + error_detail
2797 )
2798 db_nslcmop_update["detailed-status"] = error_detail
2799 nslcmop_operation_state = "FAILED"
2800 ns_state = "BROKEN"
2801 else:
2802 error_detail = None
2803 error_description_nsr = error_description_nslcmop = None
2804 ns_state = "READY"
2805 db_nsr_update["detailed-status"] = "Done"
2806 db_nslcmop_update["detailed-status"] = "Done"
2807 nslcmop_operation_state = "COMPLETED"
2808
2809 if db_nsr:
2810 self._write_ns_status(
2811 nsr_id=nsr_id,
2812 ns_state=ns_state,
2813 current_operation="IDLE",
2814 current_operation_id=None,
2815 error_description=error_description_nsr,
2816 error_detail=error_detail,
2817 other_update=db_nsr_update,
2818 )
2819 self._write_op_status(
2820 op_id=nslcmop_id,
2821 stage="",
2822 error_message=error_description_nslcmop,
2823 operation_state=nslcmop_operation_state,
2824 other_update=db_nslcmop_update,
2825 )
2826
2827 if nslcmop_operation_state:
2828 try:
2829 await self.msg.aiowrite(
2830 "ns",
2831 "instantiated",
2832 {
2833 "nsr_id": nsr_id,
2834 "nslcmop_id": nslcmop_id,
2835 "operationState": nslcmop_operation_state,
2836 },
2837 loop=self.loop,
2838 )
2839 except Exception as e:
2840 self.logger.error(
2841 logging_text + "kafka_write notification Exception {}".format(e)
2842 )
2843
2844 self.logger.debug(logging_text + "Exit")
2845 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2846
2847 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2848 if vnfd_id not in cached_vnfds:
2849 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2850 return cached_vnfds[vnfd_id]
2851
2852 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2853 if vnf_profile_id not in cached_vnfrs:
2854 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2855 "vnfrs",
2856 {
2857 "member-vnf-index-ref": vnf_profile_id,
2858 "nsr-id-ref": nsr_id,
2859 },
2860 )
2861 return cached_vnfrs[vnf_profile_id]
2862
2863 def _is_deployed_vca_in_relation(
2864 self, vca: DeployedVCA, relation: Relation
2865 ) -> bool:
2866 found = False
2867 for endpoint in (relation.provider, relation.requirer):
2868 if endpoint["kdu-resource-profile-id"]:
2869 continue
2870 found = (
2871 vca.vnf_profile_id == endpoint.vnf_profile_id
2872 and vca.vdu_profile_id == endpoint.vdu_profile_id
2873 and vca.execution_environment_ref == endpoint.execution_environment_ref
2874 )
2875 if found:
2876 break
2877 return found
2878
2879 def _update_ee_relation_data_with_implicit_data(
2880 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2881 ):
2882 ee_relation_data = safe_get_ee_relation(
2883 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2884 )
2885 ee_relation_level = EELevel.get_level(ee_relation_data)
2886 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2887 "execution-environment-ref"
2888 ]:
2889 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2890 vnfd_id = vnf_profile["vnfd-id"]
2891 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2892 entity_id = (
2893 vnfd_id
2894 if ee_relation_level == EELevel.VNF
2895 else ee_relation_data["vdu-profile-id"]
2896 )
2897 ee = get_juju_ee_ref(db_vnfd, entity_id)
2898 if not ee:
2899 raise Exception(
2900 f"not execution environments found for ee_relation {ee_relation_data}"
2901 )
2902 ee_relation_data["execution-environment-ref"] = ee["id"]
2903 return ee_relation_data
2904
2905 def _get_ns_relations(
2906 self,
2907 nsr_id: str,
2908 nsd: Dict[str, Any],
2909 vca: DeployedVCA,
2910 cached_vnfds: Dict[str, Any],
2911 ) -> List[Relation]:
2912 relations = []
2913 db_ns_relations = get_ns_configuration_relation_list(nsd)
2914 for r in db_ns_relations:
2915 provider_dict = None
2916 requirer_dict = None
2917 if all(key in r for key in ("provider", "requirer")):
2918 provider_dict = r["provider"]
2919 requirer_dict = r["requirer"]
2920 elif "entities" in r:
2921 provider_id = r["entities"][0]["id"]
2922 provider_dict = {
2923 "nsr-id": nsr_id,
2924 "endpoint": r["entities"][0]["endpoint"],
2925 }
2926 if provider_id != nsd["id"]:
2927 provider_dict["vnf-profile-id"] = provider_id
2928 requirer_id = r["entities"][1]["id"]
2929 requirer_dict = {
2930 "nsr-id": nsr_id,
2931 "endpoint": r["entities"][1]["endpoint"],
2932 }
2933 if requirer_id != nsd["id"]:
2934 requirer_dict["vnf-profile-id"] = requirer_id
2935 else:
2936 raise Exception(
2937 "provider/requirer or entities must be included in the relation."
2938 )
2939 relation_provider = self._update_ee_relation_data_with_implicit_data(
2940 nsr_id, nsd, provider_dict, cached_vnfds
2941 )
2942 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2943 nsr_id, nsd, requirer_dict, cached_vnfds
2944 )
2945 provider = EERelation(relation_provider)
2946 requirer = EERelation(relation_requirer)
2947 relation = Relation(r["name"], provider, requirer)
2948 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2949 if vca_in_relation:
2950 relations.append(relation)
2951 return relations
2952
2953 def _get_vnf_relations(
2954 self,
2955 nsr_id: str,
2956 nsd: Dict[str, Any],
2957 vca: DeployedVCA,
2958 cached_vnfds: Dict[str, Any],
2959 ) -> List[Relation]:
2960 relations = []
2961 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2962 vnf_profile_id = vnf_profile["id"]
2963 vnfd_id = vnf_profile["vnfd-id"]
2964 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2965 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2966 for r in db_vnf_relations:
2967 provider_dict = None
2968 requirer_dict = None
2969 if all(key in r for key in ("provider", "requirer")):
2970 provider_dict = r["provider"]
2971 requirer_dict = r["requirer"]
2972 elif "entities" in r:
2973 provider_id = r["entities"][0]["id"]
2974 provider_dict = {
2975 "nsr-id": nsr_id,
2976 "vnf-profile-id": vnf_profile_id,
2977 "endpoint": r["entities"][0]["endpoint"],
2978 }
2979 if provider_id != vnfd_id:
2980 provider_dict["vdu-profile-id"] = provider_id
2981 requirer_id = r["entities"][1]["id"]
2982 requirer_dict = {
2983 "nsr-id": nsr_id,
2984 "vnf-profile-id": vnf_profile_id,
2985 "endpoint": r["entities"][1]["endpoint"],
2986 }
2987 if requirer_id != vnfd_id:
2988 requirer_dict["vdu-profile-id"] = requirer_id
2989 else:
2990 raise Exception(
2991 "provider/requirer or entities must be included in the relation."
2992 )
2993 relation_provider = self._update_ee_relation_data_with_implicit_data(
2994 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2995 )
2996 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2997 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2998 )
2999 provider = EERelation(relation_provider)
3000 requirer = EERelation(relation_requirer)
3001 relation = Relation(r["name"], provider, requirer)
3002 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3003 if vca_in_relation:
3004 relations.append(relation)
3005 return relations
3006
3007 def _get_kdu_resource_data(
3008 self,
3009 ee_relation: EERelation,
3010 db_nsr: Dict[str, Any],
3011 cached_vnfds: Dict[str, Any],
3012 ) -> DeployedK8sResource:
3013 nsd = get_nsd(db_nsr)
3014 vnf_profiles = get_vnf_profiles(nsd)
3015 vnfd_id = find_in_list(
3016 vnf_profiles,
3017 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3018 )["vnfd-id"]
3019 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3020 kdu_resource_profile = get_kdu_resource_profile(
3021 db_vnfd, ee_relation.kdu_resource_profile_id
3022 )
3023 kdu_name = kdu_resource_profile["kdu-name"]
3024 deployed_kdu, _ = get_deployed_kdu(
3025 db_nsr.get("_admin", ()).get("deployed", ()),
3026 kdu_name,
3027 ee_relation.vnf_profile_id,
3028 )
3029 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3030 return deployed_kdu
3031
3032 def _get_deployed_component(
3033 self,
3034 ee_relation: EERelation,
3035 db_nsr: Dict[str, Any],
3036 cached_vnfds: Dict[str, Any],
3037 ) -> DeployedComponent:
3038 nsr_id = db_nsr["_id"]
3039 deployed_component = None
3040 ee_level = EELevel.get_level(ee_relation)
3041 if ee_level == EELevel.NS:
3042 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3043 if vca:
3044 deployed_component = DeployedVCA(nsr_id, vca)
3045 elif ee_level == EELevel.VNF:
3046 vca = get_deployed_vca(
3047 db_nsr,
3048 {
3049 "vdu_id": None,
3050 "member-vnf-index": ee_relation.vnf_profile_id,
3051 "ee_descriptor_id": ee_relation.execution_environment_ref,
3052 },
3053 )
3054 if vca:
3055 deployed_component = DeployedVCA(nsr_id, vca)
3056 elif ee_level == EELevel.VDU:
3057 vca = get_deployed_vca(
3058 db_nsr,
3059 {
3060 "vdu_id": ee_relation.vdu_profile_id,
3061 "member-vnf-index": ee_relation.vnf_profile_id,
3062 "ee_descriptor_id": ee_relation.execution_environment_ref,
3063 },
3064 )
3065 if vca:
3066 deployed_component = DeployedVCA(nsr_id, vca)
3067 elif ee_level == EELevel.KDU:
3068 kdu_resource_data = self._get_kdu_resource_data(
3069 ee_relation, db_nsr, cached_vnfds
3070 )
3071 if kdu_resource_data:
3072 deployed_component = DeployedK8sResource(kdu_resource_data)
3073 return deployed_component
3074
3075 async def _add_relation(
3076 self,
3077 relation: Relation,
3078 vca_type: str,
3079 db_nsr: Dict[str, Any],
3080 cached_vnfds: Dict[str, Any],
3081 cached_vnfrs: Dict[str, Any],
3082 ) -> bool:
3083 deployed_provider = self._get_deployed_component(
3084 relation.provider, db_nsr, cached_vnfds
3085 )
3086 deployed_requirer = self._get_deployed_component(
3087 relation.requirer, db_nsr, cached_vnfds
3088 )
3089 if (
3090 deployed_provider
3091 and deployed_requirer
3092 and deployed_provider.config_sw_installed
3093 and deployed_requirer.config_sw_installed
3094 ):
3095 provider_db_vnfr = (
3096 self._get_vnfr(
3097 relation.provider.nsr_id,
3098 relation.provider.vnf_profile_id,
3099 cached_vnfrs,
3100 )
3101 if relation.provider.vnf_profile_id
3102 else None
3103 )
3104 requirer_db_vnfr = (
3105 self._get_vnfr(
3106 relation.requirer.nsr_id,
3107 relation.requirer.vnf_profile_id,
3108 cached_vnfrs,
3109 )
3110 if relation.requirer.vnf_profile_id
3111 else None
3112 )
3113 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3114 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3115 provider_relation_endpoint = RelationEndpoint(
3116 deployed_provider.ee_id,
3117 provider_vca_id,
3118 relation.provider.endpoint,
3119 )
3120 requirer_relation_endpoint = RelationEndpoint(
3121 deployed_requirer.ee_id,
3122 requirer_vca_id,
3123 relation.requirer.endpoint,
3124 )
3125 await self.vca_map[vca_type].add_relation(
3126 provider=provider_relation_endpoint,
3127 requirer=requirer_relation_endpoint,
3128 )
3129 # remove entry from relations list
3130 return True
3131 return False
3132
3133 async def _add_vca_relations(
3134 self,
3135 logging_text,
3136 nsr_id,
3137 vca_type: str,
3138 vca_index: int,
3139 timeout: int = 3600,
3140 ) -> bool:
3141
3142 # steps:
3143 # 1. find all relations for this VCA
3144 # 2. wait for other peers related
3145 # 3. add relations
3146
3147 try:
3148 # STEP 1: find all relations for this VCA
3149
3150 # read nsr record
3151 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3152 nsd = get_nsd(db_nsr)
3153
3154 # this VCA data
3155 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3156 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3157
3158 cached_vnfds = {}
3159 cached_vnfrs = {}
3160 relations = []
3161 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3162 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3163
3164 # if no relations, terminate
3165 if not relations:
3166 self.logger.debug(logging_text + " No relations")
3167 return True
3168
3169 self.logger.debug(logging_text + " adding relations {}".format(relations))
3170
3171 # add all relations
3172 start = time()
3173 while True:
3174 # check timeout
3175 now = time()
3176 if now - start >= timeout:
3177 self.logger.error(logging_text + " : timeout adding relations")
3178 return False
3179
3180 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3181 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3182
3183 # for each relation, find the VCA's related
3184 for relation in relations.copy():
3185 added = await self._add_relation(
3186 relation,
3187 vca_type,
3188 db_nsr,
3189 cached_vnfds,
3190 cached_vnfrs,
3191 )
3192 if added:
3193 relations.remove(relation)
3194
3195 if not relations:
3196 self.logger.debug("Relations added")
3197 break
3198 await asyncio.sleep(5.0)
3199
3200 return True
3201
3202 except Exception as e:
3203 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3204 return False
3205
3206 async def _install_kdu(
3207 self,
3208 nsr_id: str,
3209 nsr_db_path: str,
3210 vnfr_data: dict,
3211 kdu_index: int,
3212 kdud: dict,
3213 vnfd: dict,
3214 k8s_instance_info: dict,
3215 k8params: dict = None,
3216 timeout: int = 600,
3217 vca_id: str = None,
3218 ):
3219
3220 try:
3221 k8sclustertype = k8s_instance_info["k8scluster-type"]
3222 # Instantiate kdu
3223 db_dict_install = {
3224 "collection": "nsrs",
3225 "filter": {"_id": nsr_id},
3226 "path": nsr_db_path,
3227 }
3228
3229 if k8s_instance_info.get("kdu-deployment-name"):
3230 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3231 else:
3232 kdu_instance = self.k8scluster_map[
3233 k8sclustertype
3234 ].generate_kdu_instance_name(
3235 db_dict=db_dict_install,
3236 kdu_model=k8s_instance_info["kdu-model"],
3237 kdu_name=k8s_instance_info["kdu-name"],
3238 )
3239
3240 # Update the nsrs table with the kdu-instance value
3241 self.update_db_2(
3242 item="nsrs",
3243 _id=nsr_id,
3244 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3245 )
3246
3247 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3248 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3249 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3250 # namespace, this first verification could be removed, and the next step would be done for any kind
3251 # of KNF.
3252 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3253 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3254 if k8sclustertype in ("juju", "juju-bundle"):
3255 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3256 # that the user passed a namespace which he wants its KDU to be deployed in)
3257 if (
3258 self.db.count(
3259 table="nsrs",
3260 q_filter={
3261 "_id": nsr_id,
3262 "_admin.projects_write": k8s_instance_info["namespace"],
3263 "_admin.projects_read": k8s_instance_info["namespace"],
3264 },
3265 )
3266 > 0
3267 ):
3268 self.logger.debug(
3269 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3270 )
3271 self.update_db_2(
3272 item="nsrs",
3273 _id=nsr_id,
3274 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3275 )
3276 k8s_instance_info["namespace"] = kdu_instance
3277
3278 await self.k8scluster_map[k8sclustertype].install(
3279 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3280 kdu_model=k8s_instance_info["kdu-model"],
3281 atomic=True,
3282 params=k8params,
3283 db_dict=db_dict_install,
3284 timeout=timeout,
3285 kdu_name=k8s_instance_info["kdu-name"],
3286 namespace=k8s_instance_info["namespace"],
3287 kdu_instance=kdu_instance,
3288 vca_id=vca_id,
3289 )
3290
3291 # Obtain services to obtain management service ip
3292 services = await self.k8scluster_map[k8sclustertype].get_services(
3293 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3294 kdu_instance=kdu_instance,
3295 namespace=k8s_instance_info["namespace"],
3296 )
3297
3298 # Obtain management service info (if exists)
3299 vnfr_update_dict = {}
3300 kdu_config = get_configuration(vnfd, kdud["name"])
3301 if kdu_config:
3302 target_ee_list = kdu_config.get("execution-environment-list", [])
3303 else:
3304 target_ee_list = []
3305
3306 if services:
3307 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3308 mgmt_services = [
3309 service
3310 for service in kdud.get("service", [])
3311 if service.get("mgmt-service")
3312 ]
3313 for mgmt_service in mgmt_services:
3314 for service in services:
3315 if service["name"].startswith(mgmt_service["name"]):
3316 # Mgmt service found, Obtain service ip
3317 ip = service.get("external_ip", service.get("cluster_ip"))
3318 if isinstance(ip, list) and len(ip) == 1:
3319 ip = ip[0]
3320
3321 vnfr_update_dict[
3322 "kdur.{}.ip-address".format(kdu_index)
3323 ] = ip
3324
3325 # Check if must update also mgmt ip at the vnf
3326 service_external_cp = mgmt_service.get(
3327 "external-connection-point-ref"
3328 )
3329 if service_external_cp:
3330 if (
3331 deep_get(vnfd, ("mgmt-interface", "cp"))
3332 == service_external_cp
3333 ):
3334 vnfr_update_dict["ip-address"] = ip
3335
3336 if find_in_list(
3337 target_ee_list,
3338 lambda ee: ee.get(
3339 "external-connection-point-ref", ""
3340 )
3341 == service_external_cp,
3342 ):
3343 vnfr_update_dict[
3344 "kdur.{}.ip-address".format(kdu_index)
3345 ] = ip
3346 break
3347 else:
3348 self.logger.warn(
3349 "Mgmt service name: {} not found".format(
3350 mgmt_service["name"]
3351 )
3352 )
3353
3354 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3355 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3356
3357 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3358 if (
3359 kdu_config
3360 and kdu_config.get("initial-config-primitive")
3361 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3362 ):
3363 initial_config_primitive_list = kdu_config.get(
3364 "initial-config-primitive"
3365 )
3366 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3367
3368 for initial_config_primitive in initial_config_primitive_list:
3369 primitive_params_ = self._map_primitive_params(
3370 initial_config_primitive, {}, {}
3371 )
3372
3373 await asyncio.wait_for(
3374 self.k8scluster_map[k8sclustertype].exec_primitive(
3375 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3376 kdu_instance=kdu_instance,
3377 primitive_name=initial_config_primitive["name"],
3378 params=primitive_params_,
3379 db_dict=db_dict_install,
3380 vca_id=vca_id,
3381 ),
3382 timeout=timeout,
3383 )
3384
3385 except Exception as e:
3386 # Prepare update db with error and raise exception
3387 try:
3388 self.update_db_2(
3389 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3390 )
3391 self.update_db_2(
3392 "vnfrs",
3393 vnfr_data.get("_id"),
3394 {"kdur.{}.status".format(kdu_index): "ERROR"},
3395 )
3396 except Exception:
3397 # ignore to keep original exception
3398 pass
3399 # reraise original error
3400 raise
3401
3402 return kdu_instance
3403
3404 async def deploy_kdus(
3405 self,
3406 logging_text,
3407 nsr_id,
3408 nslcmop_id,
3409 db_vnfrs,
3410 db_vnfds,
3411 task_instantiation_info,
3412 ):
3413 # Launch kdus if present in the descriptor
3414
3415 k8scluster_id_2_uuic = {
3416 "helm-chart-v3": {},
3417 "helm-chart": {},
3418 "juju-bundle": {},
3419 }
3420
3421 async def _get_cluster_id(cluster_id, cluster_type):
3422 nonlocal k8scluster_id_2_uuic
3423 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3424 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3425
3426 # check if K8scluster is creating and wait look if previous tasks in process
3427 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3428 "k8scluster", cluster_id
3429 )
3430 if task_dependency:
3431 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3432 task_name, cluster_id
3433 )
3434 self.logger.debug(logging_text + text)
3435 await asyncio.wait(task_dependency, timeout=3600)
3436
3437 db_k8scluster = self.db.get_one(
3438 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3439 )
3440 if not db_k8scluster:
3441 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3442
3443 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3444 if not k8s_id:
3445 if cluster_type == "helm-chart-v3":
3446 try:
3447 # backward compatibility for existing clusters that have not been initialized for helm v3
3448 k8s_credentials = yaml.safe_dump(
3449 db_k8scluster.get("credentials")
3450 )
3451 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3452 k8s_credentials, reuse_cluster_uuid=cluster_id
3453 )
3454 db_k8scluster_update = {}
3455 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3456 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3457 db_k8scluster_update[
3458 "_admin.helm-chart-v3.created"
3459 ] = uninstall_sw
3460 db_k8scluster_update[
3461 "_admin.helm-chart-v3.operationalState"
3462 ] = "ENABLED"
3463 self.update_db_2(
3464 "k8sclusters", cluster_id, db_k8scluster_update
3465 )
3466 except Exception as e:
3467 self.logger.error(
3468 logging_text
3469 + "error initializing helm-v3 cluster: {}".format(str(e))
3470 )
3471 raise LcmException(
3472 "K8s cluster '{}' has not been initialized for '{}'".format(
3473 cluster_id, cluster_type
3474 )
3475 )
3476 else:
3477 raise LcmException(
3478 "K8s cluster '{}' has not been initialized for '{}'".format(
3479 cluster_id, cluster_type
3480 )
3481 )
3482 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3483 return k8s_id
3484
3485 logging_text += "Deploy kdus: "
3486 step = ""
3487 try:
3488 db_nsr_update = {"_admin.deployed.K8s": []}
3489 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3490
3491 index = 0
3492 updated_cluster_list = []
3493 updated_v3_cluster_list = []
3494
3495 for vnfr_data in db_vnfrs.values():
3496 vca_id = self.get_vca_id(vnfr_data, {})
3497 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3498 # Step 0: Prepare and set parameters
3499 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3500 vnfd_id = vnfr_data.get("vnfd-id")
3501 vnfd_with_id = find_in_list(
3502 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3503 )
3504 kdud = next(
3505 kdud
3506 for kdud in vnfd_with_id["kdu"]
3507 if kdud["name"] == kdur["kdu-name"]
3508 )
3509 namespace = kdur.get("k8s-namespace")
3510 kdu_deployment_name = kdur.get("kdu-deployment-name")
3511 if kdur.get("helm-chart"):
3512 kdumodel = kdur["helm-chart"]
3513 # Default version: helm3, if helm-version is v2 assign v2
3514 k8sclustertype = "helm-chart-v3"
3515 self.logger.debug("kdur: {}".format(kdur))
3516 if (
3517 kdur.get("helm-version")
3518 and kdur.get("helm-version") == "v2"
3519 ):
3520 k8sclustertype = "helm-chart"
3521 elif kdur.get("juju-bundle"):
3522 kdumodel = kdur["juju-bundle"]
3523 k8sclustertype = "juju-bundle"
3524 else:
3525 raise LcmException(
3526 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3527 "juju-bundle. Maybe an old NBI version is running".format(
3528 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3529 )
3530 )
3531 # check if kdumodel is a file and exists
3532 try:
3533 vnfd_with_id = find_in_list(
3534 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3535 )
3536 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3537 if storage: # may be not present if vnfd has not artifacts
3538 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3539 if storage["pkg-dir"]:
3540 filename = "{}/{}/{}s/{}".format(
3541 storage["folder"],
3542 storage["pkg-dir"],
3543 k8sclustertype,
3544 kdumodel,
3545 )
3546 else:
3547 filename = "{}/Scripts/{}s/{}".format(
3548 storage["folder"],
3549 k8sclustertype,
3550 kdumodel,
3551 )
3552 if self.fs.file_exists(
3553 filename, mode="file"
3554 ) or self.fs.file_exists(filename, mode="dir"):
3555 kdumodel = self.fs.path + filename
3556 except (asyncio.TimeoutError, asyncio.CancelledError):
3557 raise
3558 except Exception: # it is not a file
3559 pass
3560
3561 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3562 step = "Synchronize repos for k8s cluster '{}'".format(
3563 k8s_cluster_id
3564 )
3565 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3566
3567 # Synchronize repos
3568 if (
3569 k8sclustertype == "helm-chart"
3570 and cluster_uuid not in updated_cluster_list
3571 ) or (
3572 k8sclustertype == "helm-chart-v3"
3573 and cluster_uuid not in updated_v3_cluster_list
3574 ):
3575 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3576 self.k8scluster_map[k8sclustertype].synchronize_repos(
3577 cluster_uuid=cluster_uuid
3578 )
3579 )
3580 if del_repo_list or added_repo_dict:
3581 if k8sclustertype == "helm-chart":
3582 unset = {
3583 "_admin.helm_charts_added." + item: None
3584 for item in del_repo_list
3585 }
3586 updated = {
3587 "_admin.helm_charts_added." + item: name
3588 for item, name in added_repo_dict.items()
3589 }
3590 updated_cluster_list.append(cluster_uuid)
3591 elif k8sclustertype == "helm-chart-v3":
3592 unset = {
3593 "_admin.helm_charts_v3_added." + item: None
3594 for item in del_repo_list
3595 }
3596 updated = {
3597 "_admin.helm_charts_v3_added." + item: name
3598 for item, name in added_repo_dict.items()
3599 }
3600 updated_v3_cluster_list.append(cluster_uuid)
3601 self.logger.debug(
3602 logging_text + "repos synchronized on k8s cluster "
3603 "'{}' to_delete: {}, to_add: {}".format(
3604 k8s_cluster_id, del_repo_list, added_repo_dict
3605 )
3606 )
3607 self.db.set_one(
3608 "k8sclusters",
3609 {"_id": k8s_cluster_id},
3610 updated,
3611 unset=unset,
3612 )
3613
3614 # Instantiate kdu
3615 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3616 vnfr_data["member-vnf-index-ref"],
3617 kdur["kdu-name"],
3618 k8s_cluster_id,
3619 )
3620 k8s_instance_info = {
3621 "kdu-instance": None,
3622 "k8scluster-uuid": cluster_uuid,
3623 "k8scluster-type": k8sclustertype,
3624 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3625 "kdu-name": kdur["kdu-name"],
3626 "kdu-model": kdumodel,
3627 "namespace": namespace,
3628 "kdu-deployment-name": kdu_deployment_name,
3629 }
3630 db_path = "_admin.deployed.K8s.{}".format(index)
3631 db_nsr_update[db_path] = k8s_instance_info
3632 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3633 vnfd_with_id = find_in_list(
3634 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3635 )
3636 task = asyncio.ensure_future(
3637 self._install_kdu(
3638 nsr_id,
3639 db_path,
3640 vnfr_data,
3641 kdu_index,
3642 kdud,
3643 vnfd_with_id,
3644 k8s_instance_info,
3645 k8params=desc_params,
3646 timeout=1800,
3647 vca_id=vca_id,
3648 )
3649 )
3650 self.lcm_tasks.register(
3651 "ns",
3652 nsr_id,
3653 nslcmop_id,
3654 "instantiate_KDU-{}".format(index),
3655 task,
3656 )
3657 task_instantiation_info[task] = "Deploying KDU {}".format(
3658 kdur["kdu-name"]
3659 )
3660
3661 index += 1
3662
3663 except (LcmException, asyncio.CancelledError):
3664 raise
3665 except Exception as e:
3666 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3667 if isinstance(e, (N2VCException, DbException)):
3668 self.logger.error(logging_text + msg)
3669 else:
3670 self.logger.critical(logging_text + msg, exc_info=True)
3671 raise LcmException(msg)
3672 finally:
3673 if db_nsr_update:
3674 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3675
3676 def _deploy_n2vc(
3677 self,
3678 logging_text,
3679 db_nsr,
3680 db_vnfr,
3681 nslcmop_id,
3682 nsr_id,
3683 nsi_id,
3684 vnfd_id,
3685 vdu_id,
3686 kdu_name,
3687 member_vnf_index,
3688 vdu_index,
3689 vdu_name,
3690 deploy_params,
3691 descriptor_config,
3692 base_folder,
3693 task_instantiation_info,
3694 stage,
3695 ):
3696 # launch instantiate_N2VC in a asyncio task and register task object
3697 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3698 # if not found, create one entry and update database
3699 # fill db_nsr._admin.deployed.VCA.<index>
3700
3701 self.logger.debug(
3702 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3703 )
3704 if "execution-environment-list" in descriptor_config:
3705 ee_list = descriptor_config.get("execution-environment-list", [])
3706 elif "juju" in descriptor_config:
3707 ee_list = [descriptor_config] # ns charms
3708 else: # other types as script are not supported
3709 ee_list = []
3710
3711 for ee_item in ee_list:
3712 self.logger.debug(
3713 logging_text
3714 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3715 ee_item.get("juju"), ee_item.get("helm-chart")
3716 )
3717 )
3718 ee_descriptor_id = ee_item.get("id")
3719 if ee_item.get("juju"):
3720 vca_name = ee_item["juju"].get("charm")
3721 vca_type = (
3722 "lxc_proxy_charm"
3723 if ee_item["juju"].get("charm") is not None
3724 else "native_charm"
3725 )
3726 if ee_item["juju"].get("cloud") == "k8s":
3727 vca_type = "k8s_proxy_charm"
3728 elif ee_item["juju"].get("proxy") is False:
3729 vca_type = "native_charm"
3730 elif ee_item.get("helm-chart"):
3731 vca_name = ee_item["helm-chart"]
3732 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3733 vca_type = "helm"
3734 else:
3735 vca_type = "helm-v3"
3736 else:
3737 self.logger.debug(
3738 logging_text + "skipping non juju neither charm configuration"
3739 )
3740 continue
3741
3742 vca_index = -1
3743 for vca_index, vca_deployed in enumerate(
3744 db_nsr["_admin"]["deployed"]["VCA"]
3745 ):
3746 if not vca_deployed:
3747 continue
3748 if (
3749 vca_deployed.get("member-vnf-index") == member_vnf_index
3750 and vca_deployed.get("vdu_id") == vdu_id
3751 and vca_deployed.get("kdu_name") == kdu_name
3752 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3753 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3754 ):
3755 break
3756 else:
3757 # not found, create one.
3758 target = (
3759 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3760 )
3761 if vdu_id:
3762 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3763 elif kdu_name:
3764 target += "/kdu/{}".format(kdu_name)
3765 vca_deployed = {
3766 "target_element": target,
3767 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3768 "member-vnf-index": member_vnf_index,
3769 "vdu_id": vdu_id,
3770 "kdu_name": kdu_name,
3771 "vdu_count_index": vdu_index,
3772 "operational-status": "init", # TODO revise
3773 "detailed-status": "", # TODO revise
3774 "step": "initial-deploy", # TODO revise
3775 "vnfd_id": vnfd_id,
3776 "vdu_name": vdu_name,
3777 "type": vca_type,
3778 "ee_descriptor_id": ee_descriptor_id,
3779 }
3780 vca_index += 1
3781
3782 # create VCA and configurationStatus in db
3783 db_dict = {
3784 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3785 "configurationStatus.{}".format(vca_index): dict(),
3786 }
3787 self.update_db_2("nsrs", nsr_id, db_dict)
3788
3789 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3790
3791 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3792 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3793 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3794
3795 # Launch task
3796 task_n2vc = asyncio.ensure_future(
3797 self.instantiate_N2VC(
3798 logging_text=logging_text,
3799 vca_index=vca_index,
3800 nsi_id=nsi_id,
3801 db_nsr=db_nsr,
3802 db_vnfr=db_vnfr,
3803 vdu_id=vdu_id,
3804 kdu_name=kdu_name,
3805 vdu_index=vdu_index,
3806 deploy_params=deploy_params,
3807 config_descriptor=descriptor_config,
3808 base_folder=base_folder,
3809 nslcmop_id=nslcmop_id,
3810 stage=stage,
3811 vca_type=vca_type,
3812 vca_name=vca_name,
3813 ee_config_descriptor=ee_item,
3814 )
3815 )
3816 self.lcm_tasks.register(
3817 "ns",
3818 nsr_id,
3819 nslcmop_id,
3820 "instantiate_N2VC-{}".format(vca_index),
3821 task_n2vc,
3822 )
3823 task_instantiation_info[
3824 task_n2vc
3825 ] = self.task_name_deploy_vca + " {}.{}".format(
3826 member_vnf_index or "", vdu_id or ""
3827 )
3828
3829 @staticmethod
3830 def _create_nslcmop(nsr_id, operation, params):
3831 """
3832 Creates a ns-lcm-opp content to be stored at database.
3833 :param nsr_id: internal id of the instance
3834 :param operation: instantiate, terminate, scale, action, ...
3835 :param params: user parameters for the operation
3836 :return: dictionary following SOL005 format
3837 """
3838 # Raise exception if invalid arguments
3839 if not (nsr_id and operation and params):
3840 raise LcmException(
3841 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3842 )
3843 now = time()
3844 _id = str(uuid4())
3845 nslcmop = {
3846 "id": _id,
3847 "_id": _id,
3848 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3849 "operationState": "PROCESSING",
3850 "statusEnteredTime": now,
3851 "nsInstanceId": nsr_id,
3852 "lcmOperationType": operation,
3853 "startTime": now,
3854 "isAutomaticInvocation": False,
3855 "operationParams": params,
3856 "isCancelPending": False,
3857 "links": {
3858 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3859 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3860 },
3861 }
3862 return nslcmop
3863
3864 def _format_additional_params(self, params):
3865 params = params or {}
3866 for key, value in params.items():
3867 if str(value).startswith("!!yaml "):
3868 params[key] = yaml.safe_load(value[7:])
3869 return params
3870
3871 def _get_terminate_primitive_params(self, seq, vnf_index):
3872 primitive = seq.get("name")
3873 primitive_params = {}
3874 params = {
3875 "member_vnf_index": vnf_index,
3876 "primitive": primitive,
3877 "primitive_params": primitive_params,
3878 }
3879 desc_params = {}
3880 return self._map_primitive_params(seq, params, desc_params)
3881
3882 # sub-operations
3883
3884 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3885 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3886 if op.get("operationState") == "COMPLETED":
3887 # b. Skip sub-operation
3888 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3889 return self.SUBOPERATION_STATUS_SKIP
3890 else:
3891 # c. retry executing sub-operation
3892 # The sub-operation exists, and operationState != 'COMPLETED'
3893 # Update operationState = 'PROCESSING' to indicate a retry.
3894 operationState = "PROCESSING"
3895 detailed_status = "In progress"
3896 self._update_suboperation_status(
3897 db_nslcmop, op_index, operationState, detailed_status
3898 )
3899 # Return the sub-operation index
3900 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3901 # with arguments extracted from the sub-operation
3902 return op_index
3903
3904 # Find a sub-operation where all keys in a matching dictionary must match
3905 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3906 def _find_suboperation(self, db_nslcmop, match):
3907 if db_nslcmop and match:
3908 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3909 for i, op in enumerate(op_list):
3910 if all(op.get(k) == match[k] for k in match):
3911 return i
3912 return self.SUBOPERATION_STATUS_NOT_FOUND
3913
3914 # Update status for a sub-operation given its index
3915 def _update_suboperation_status(
3916 self, db_nslcmop, op_index, operationState, detailed_status
3917 ):
3918 # Update DB for HA tasks
3919 q_filter = {"_id": db_nslcmop["_id"]}
3920 update_dict = {
3921 "_admin.operations.{}.operationState".format(op_index): operationState,
3922 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3923 }
3924 self.db.set_one(
3925 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3926 )
3927
3928 # Add sub-operation, return the index of the added sub-operation
3929 # Optionally, set operationState, detailed-status, and operationType
3930 # Status and type are currently set for 'scale' sub-operations:
3931 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3932 # 'detailed-status' : status message
3933 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3934 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3935 def _add_suboperation(
3936 self,
3937 db_nslcmop,
3938 vnf_index,
3939 vdu_id,
3940 vdu_count_index,
3941 vdu_name,
3942 primitive,
3943 mapped_primitive_params,
3944 operationState=None,
3945 detailed_status=None,
3946 operationType=None,
3947 RO_nsr_id=None,
3948 RO_scaling_info=None,
3949 ):
3950 if not db_nslcmop:
3951 return self.SUBOPERATION_STATUS_NOT_FOUND
3952 # Get the "_admin.operations" list, if it exists
3953 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3954 op_list = db_nslcmop_admin.get("operations")
3955 # Create or append to the "_admin.operations" list
3956 new_op = {
3957 "member_vnf_index": vnf_index,
3958 "vdu_id": vdu_id,
3959 "vdu_count_index": vdu_count_index,
3960 "primitive": primitive,
3961 "primitive_params": mapped_primitive_params,
3962 }
3963 if operationState:
3964 new_op["operationState"] = operationState
3965 if detailed_status:
3966 new_op["detailed-status"] = detailed_status
3967 if operationType:
3968 new_op["lcmOperationType"] = operationType
3969 if RO_nsr_id:
3970 new_op["RO_nsr_id"] = RO_nsr_id
3971 if RO_scaling_info:
3972 new_op["RO_scaling_info"] = RO_scaling_info
3973 if not op_list:
3974 # No existing operations, create key 'operations' with current operation as first list element
3975 db_nslcmop_admin.update({"operations": [new_op]})
3976 op_list = db_nslcmop_admin.get("operations")
3977 else:
3978 # Existing operations, append operation to list
3979 op_list.append(new_op)
3980
3981 db_nslcmop_update = {"_admin.operations": op_list}
3982 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3983 op_index = len(op_list) - 1
3984 return op_index
3985
3986 # Helper methods for scale() sub-operations
3987
3988 # pre-scale/post-scale:
3989 # Check for 3 different cases:
3990 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3991 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3992 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3993 def _check_or_add_scale_suboperation(
3994 self,
3995 db_nslcmop,
3996 vnf_index,
3997 vnf_config_primitive,
3998 primitive_params,
3999 operationType,
4000 RO_nsr_id=None,
4001 RO_scaling_info=None,
4002 ):
4003 # Find this sub-operation
4004 if RO_nsr_id and RO_scaling_info:
4005 operationType = "SCALE-RO"
4006 match = {
4007 "member_vnf_index": vnf_index,
4008 "RO_nsr_id": RO_nsr_id,
4009 "RO_scaling_info": RO_scaling_info,
4010 }
4011 else:
4012 match = {
4013 "member_vnf_index": vnf_index,
4014 "primitive": vnf_config_primitive,
4015 "primitive_params": primitive_params,
4016 "lcmOperationType": operationType,
4017 }
4018 op_index = self._find_suboperation(db_nslcmop, match)
4019 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4020 # a. New sub-operation
4021 # The sub-operation does not exist, add it.
4022 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4023 # The following parameters are set to None for all kind of scaling:
4024 vdu_id = None
4025 vdu_count_index = None
4026 vdu_name = None
4027 if RO_nsr_id and RO_scaling_info:
4028 vnf_config_primitive = None
4029 primitive_params = None
4030 else:
4031 RO_nsr_id = None
4032 RO_scaling_info = None
4033 # Initial status for sub-operation
4034 operationState = "PROCESSING"
4035 detailed_status = "In progress"
4036 # Add sub-operation for pre/post-scaling (zero or more operations)
4037 self._add_suboperation(
4038 db_nslcmop,
4039 vnf_index,
4040 vdu_id,
4041 vdu_count_index,
4042 vdu_name,
4043 vnf_config_primitive,
4044 primitive_params,
4045 operationState,
4046 detailed_status,
4047 operationType,
4048 RO_nsr_id,
4049 RO_scaling_info,
4050 )
4051 return self.SUBOPERATION_STATUS_NEW
4052 else:
4053 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4054 # or op_index (operationState != 'COMPLETED')
4055 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4056
4057 # Function to return execution_environment id
4058
4059 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4060 # TODO vdu_index_count
4061 for vca in vca_deployed_list:
4062 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4063 return vca["ee_id"]
4064
4065 async def destroy_N2VC(
4066 self,
4067 logging_text,
4068 db_nslcmop,
4069 vca_deployed,
4070 config_descriptor,
4071 vca_index,
4072 destroy_ee=True,
4073 exec_primitives=True,
4074 scaling_in=False,
4075 vca_id: str = None,
4076 ):
4077 """
4078 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4079 :param logging_text:
4080 :param db_nslcmop:
4081 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4082 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4083 :param vca_index: index in the database _admin.deployed.VCA
4084 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4085 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4086 not executed properly
4087 :param scaling_in: True destroys the application, False destroys the model
4088 :return: None or exception
4089 """
4090
4091 self.logger.debug(
4092 logging_text
4093 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4094 vca_index, vca_deployed, config_descriptor, destroy_ee
4095 )
4096 )
4097
4098 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4099
4100 # execute terminate_primitives
4101 if exec_primitives:
4102 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4103 config_descriptor.get("terminate-config-primitive"),
4104 vca_deployed.get("ee_descriptor_id"),
4105 )
4106 vdu_id = vca_deployed.get("vdu_id")
4107 vdu_count_index = vca_deployed.get("vdu_count_index")
4108 vdu_name = vca_deployed.get("vdu_name")
4109 vnf_index = vca_deployed.get("member-vnf-index")
4110 if terminate_primitives and vca_deployed.get("needed_terminate"):
4111 for seq in terminate_primitives:
4112 # For each sequence in list, get primitive and call _ns_execute_primitive()
4113 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4114 vnf_index, seq.get("name")
4115 )
4116 self.logger.debug(logging_text + step)
4117 # Create the primitive for each sequence, i.e. "primitive": "touch"
4118 primitive = seq.get("name")
4119 mapped_primitive_params = self._get_terminate_primitive_params(
4120 seq, vnf_index
4121 )
4122
4123 # Add sub-operation
4124 self._add_suboperation(
4125 db_nslcmop,
4126 vnf_index,
4127 vdu_id,
4128 vdu_count_index,
4129 vdu_name,
4130 primitive,
4131 mapped_primitive_params,
4132 )
4133 # Sub-operations: Call _ns_execute_primitive() instead of action()
4134 try:
4135 result, result_detail = await self._ns_execute_primitive(
4136 vca_deployed["ee_id"],
4137 primitive,
4138 mapped_primitive_params,
4139 vca_type=vca_type,
4140 vca_id=vca_id,
4141 )
4142 except LcmException:
4143 # this happens when VCA is not deployed. In this case it is not needed to terminate
4144 continue
4145 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4146 if result not in result_ok:
4147 raise LcmException(
4148 "terminate_primitive {} for vnf_member_index={} fails with "
4149 "error {}".format(seq.get("name"), vnf_index, result_detail)
4150 )
4151 # set that this VCA do not need terminated
4152 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4153 vca_index
4154 )
4155 self.update_db_2(
4156 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4157 )
4158
4159 # Delete Prometheus Jobs if any
4160 # This uses NSR_ID, so it will destroy any jobs under this index
4161 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4162
4163 if destroy_ee:
4164 await self.vca_map[vca_type].delete_execution_environment(
4165 vca_deployed["ee_id"],
4166 scaling_in=scaling_in,
4167 vca_type=vca_type,
4168 vca_id=vca_id,
4169 )
4170
4171 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4172 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4173 namespace = "." + db_nsr["_id"]
4174 try:
4175 await self.n2vc.delete_namespace(
4176 namespace=namespace,
4177 total_timeout=self.timeout_charm_delete,
4178 vca_id=vca_id,
4179 )
4180 except N2VCNotFound: # already deleted. Skip
4181 pass
4182 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4183
4184 async def _terminate_RO(
4185 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4186 ):
4187 """
4188 Terminates a deployment from RO
4189 :param logging_text:
4190 :param nsr_deployed: db_nsr._admin.deployed
4191 :param nsr_id:
4192 :param nslcmop_id:
4193 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4194 this method will update only the index 2, but it will write on database the concatenated content of the list
4195 :return:
4196 """
4197 db_nsr_update = {}
4198 failed_detail = []
4199 ro_nsr_id = ro_delete_action = None
4200 if nsr_deployed and nsr_deployed.get("RO"):
4201 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4202 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4203 try:
4204 if ro_nsr_id:
4205 stage[2] = "Deleting ns from VIM."
4206 db_nsr_update["detailed-status"] = " ".join(stage)
4207 self._write_op_status(nslcmop_id, stage)
4208 self.logger.debug(logging_text + stage[2])
4209 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4210 self._write_op_status(nslcmop_id, stage)
4211 desc = await self.RO.delete("ns", ro_nsr_id)
4212 ro_delete_action = desc["action_id"]
4213 db_nsr_update[
4214 "_admin.deployed.RO.nsr_delete_action_id"
4215 ] = ro_delete_action
4216 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4217 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4218 if ro_delete_action:
4219 # wait until NS is deleted from VIM
4220 stage[2] = "Waiting ns deleted from VIM."
4221 detailed_status_old = None
4222 self.logger.debug(
4223 logging_text
4224 + stage[2]
4225 + " RO_id={} ro_delete_action={}".format(
4226 ro_nsr_id, ro_delete_action
4227 )
4228 )
4229 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4230 self._write_op_status(nslcmop_id, stage)
4231
4232 delete_timeout = 20 * 60 # 20 minutes
4233 while delete_timeout > 0:
4234 desc = await self.RO.show(
4235 "ns",
4236 item_id_name=ro_nsr_id,
4237 extra_item="action",
4238 extra_item_id=ro_delete_action,
4239 )
4240
4241 # deploymentStatus
4242 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4243
4244 ns_status, ns_status_info = self.RO.check_action_status(desc)
4245 if ns_status == "ERROR":
4246 raise ROclient.ROClientException(ns_status_info)
4247 elif ns_status == "BUILD":
4248 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4249 elif ns_status == "ACTIVE":
4250 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4251 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4252 break
4253 else:
4254 assert (
4255 False
4256 ), "ROclient.check_action_status returns unknown {}".format(
4257 ns_status
4258 )
4259 if stage[2] != detailed_status_old:
4260 detailed_status_old = stage[2]
4261 db_nsr_update["detailed-status"] = " ".join(stage)
4262 self._write_op_status(nslcmop_id, stage)
4263 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4264 await asyncio.sleep(5, loop=self.loop)
4265 delete_timeout -= 5
4266 else: # delete_timeout <= 0:
4267 raise ROclient.ROClientException(
4268 "Timeout waiting ns deleted from VIM"
4269 )
4270
4271 except Exception as e:
4272 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4273 if (
4274 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4275 ): # not found
4276 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4277 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4278 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4279 self.logger.debug(
4280 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4281 )
4282 elif (
4283 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4284 ): # conflict
4285 failed_detail.append("delete conflict: {}".format(e))
4286 self.logger.debug(
4287 logging_text
4288 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4289 )
4290 else:
4291 failed_detail.append("delete error: {}".format(e))
4292 self.logger.error(
4293 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4294 )
4295
4296 # Delete nsd
4297 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4298 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4299 try:
4300 stage[2] = "Deleting nsd from RO."
4301 db_nsr_update["detailed-status"] = " ".join(stage)
4302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4303 self._write_op_status(nslcmop_id, stage)
4304 await self.RO.delete("nsd", ro_nsd_id)
4305 self.logger.debug(
4306 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4307 )
4308 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4309 except Exception as e:
4310 if (
4311 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4312 ): # not found
4313 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4314 self.logger.debug(
4315 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4316 )
4317 elif (
4318 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4319 ): # conflict
4320 failed_detail.append(
4321 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4322 )
4323 self.logger.debug(logging_text + failed_detail[-1])
4324 else:
4325 failed_detail.append(
4326 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4327 )
4328 self.logger.error(logging_text + failed_detail[-1])
4329
4330 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4331 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4332 if not vnf_deployed or not vnf_deployed["id"]:
4333 continue
4334 try:
4335 ro_vnfd_id = vnf_deployed["id"]
4336 stage[
4337 2
4338 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4339 vnf_deployed["member-vnf-index"], ro_vnfd_id
4340 )
4341 db_nsr_update["detailed-status"] = " ".join(stage)
4342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4343 self._write_op_status(nslcmop_id, stage)
4344 await self.RO.delete("vnfd", ro_vnfd_id)
4345 self.logger.debug(
4346 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4347 )
4348 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4349 except Exception as e:
4350 if (
4351 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4352 ): # not found
4353 db_nsr_update[
4354 "_admin.deployed.RO.vnfd.{}.id".format(index)
4355 ] = None
4356 self.logger.debug(
4357 logging_text
4358 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4359 )
4360 elif (
4361 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4362 ): # conflict
4363 failed_detail.append(
4364 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4365 )
4366 self.logger.debug(logging_text + failed_detail[-1])
4367 else:
4368 failed_detail.append(
4369 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4370 )
4371 self.logger.error(logging_text + failed_detail[-1])
4372
4373 if failed_detail:
4374 stage[2] = "Error deleting from VIM"
4375 else:
4376 stage[2] = "Deleted from VIM"
4377 db_nsr_update["detailed-status"] = " ".join(stage)
4378 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4379 self._write_op_status(nslcmop_id, stage)
4380
4381 if failed_detail:
4382 raise LcmException("; ".join(failed_detail))
4383
4384 async def terminate(self, nsr_id, nslcmop_id):
4385 # Try to lock HA task here
4386 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4387 if not task_is_locked_by_me:
4388 return
4389
4390 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4391 self.logger.debug(logging_text + "Enter")
4392 timeout_ns_terminate = self.timeout_ns_terminate
4393 db_nsr = None
4394 db_nslcmop = None
4395 operation_params = None
4396 exc = None
4397 error_list = [] # annotates all failed error messages
4398 db_nslcmop_update = {}
4399 autoremove = False # autoremove after terminated
4400 tasks_dict_info = {}
4401 db_nsr_update = {}
4402 stage = [
4403 "Stage 1/3: Preparing task.",
4404 "Waiting for previous operations to terminate.",
4405 "",
4406 ]
4407 # ^ contains [stage, step, VIM-status]
4408 try:
4409 # wait for any previous tasks in process
4410 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4411
4412 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4413 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4414 operation_params = db_nslcmop.get("operationParams") or {}
4415 if operation_params.get("timeout_ns_terminate"):
4416 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4417 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4418 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4419
4420 db_nsr_update["operational-status"] = "terminating"
4421 db_nsr_update["config-status"] = "terminating"
4422 self._write_ns_status(
4423 nsr_id=nsr_id,
4424 ns_state="TERMINATING",
4425 current_operation="TERMINATING",
4426 current_operation_id=nslcmop_id,
4427 other_update=db_nsr_update,
4428 )
4429 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4430 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4431 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4432 return
4433
4434 stage[1] = "Getting vnf descriptors from db."
4435 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4436 db_vnfrs_dict = {
4437 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4438 }
4439 db_vnfds_from_id = {}
4440 db_vnfds_from_member_index = {}
4441 # Loop over VNFRs
4442 for vnfr in db_vnfrs_list:
4443 vnfd_id = vnfr["vnfd-id"]
4444 if vnfd_id not in db_vnfds_from_id:
4445 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4446 db_vnfds_from_id[vnfd_id] = vnfd
4447 db_vnfds_from_member_index[
4448 vnfr["member-vnf-index-ref"]
4449 ] = db_vnfds_from_id[vnfd_id]
4450
4451 # Destroy individual execution environments when there are terminating primitives.
4452 # Rest of EE will be deleted at once
4453 # TODO - check before calling _destroy_N2VC
4454 # if not operation_params.get("skip_terminate_primitives"):#
4455 # or not vca.get("needed_terminate"):
4456 stage[0] = "Stage 2/3 execute terminating primitives."
4457 self.logger.debug(logging_text + stage[0])
4458 stage[1] = "Looking execution environment that needs terminate."
4459 self.logger.debug(logging_text + stage[1])
4460
4461 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4462 config_descriptor = None
4463 vca_member_vnf_index = vca.get("member-vnf-index")
4464 vca_id = self.get_vca_id(
4465 db_vnfrs_dict.get(vca_member_vnf_index)
4466 if vca_member_vnf_index
4467 else None,
4468 db_nsr,
4469 )
4470 if not vca or not vca.get("ee_id"):
4471 continue
4472 if not vca.get("member-vnf-index"):
4473 # ns
4474 config_descriptor = db_nsr.get("ns-configuration")
4475 elif vca.get("vdu_id"):
4476 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4477 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4478 elif vca.get("kdu_name"):
4479 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4480 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4481 else:
4482 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4483 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4484 vca_type = vca.get("type")
4485 exec_terminate_primitives = not operation_params.get(
4486 "skip_terminate_primitives"
4487 ) and vca.get("needed_terminate")
4488 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4489 # pending native charms
4490 destroy_ee = (
4491 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4492 )
4493 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4494 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4495 task = asyncio.ensure_future(
4496 self.destroy_N2VC(
4497 logging_text,
4498 db_nslcmop,
4499 vca,
4500 config_descriptor,
4501 vca_index,
4502 destroy_ee,
4503 exec_terminate_primitives,
4504 vca_id=vca_id,
4505 )
4506 )
4507 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4508
4509 # wait for pending tasks of terminate primitives
4510 if tasks_dict_info:
4511 self.logger.debug(
4512 logging_text
4513 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4514 )
4515 error_list = await self._wait_for_tasks(
4516 logging_text,
4517 tasks_dict_info,
4518 min(self.timeout_charm_delete, timeout_ns_terminate),
4519 stage,
4520 nslcmop_id,
4521 )
4522 tasks_dict_info.clear()
4523 if error_list:
4524 return # raise LcmException("; ".join(error_list))
4525
4526 # remove All execution environments at once
4527 stage[0] = "Stage 3/3 delete all."
4528
4529 if nsr_deployed.get("VCA"):
4530 stage[1] = "Deleting all execution environments."
4531 self.logger.debug(logging_text + stage[1])
4532 vca_id = self.get_vca_id({}, db_nsr)
4533 task_delete_ee = asyncio.ensure_future(
4534 asyncio.wait_for(
4535 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4536 timeout=self.timeout_charm_delete,
4537 )
4538 )
4539 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4540 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4541
4542 # Delete from k8scluster
4543 stage[1] = "Deleting KDUs."
4544 self.logger.debug(logging_text + stage[1])
4545 # print(nsr_deployed)
4546 for kdu in get_iterable(nsr_deployed, "K8s"):
4547 if not kdu or not kdu.get("kdu-instance"):
4548 continue
4549 kdu_instance = kdu.get("kdu-instance")
4550 if kdu.get("k8scluster-type") in self.k8scluster_map:
4551 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4552 vca_id = self.get_vca_id({}, db_nsr)
4553 task_delete_kdu_instance = asyncio.ensure_future(
4554 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4555 cluster_uuid=kdu.get("k8scluster-uuid"),
4556 kdu_instance=kdu_instance,
4557 vca_id=vca_id,
4558 namespace=kdu.get("namespace"),
4559 )
4560 )
4561 else:
4562 self.logger.error(
4563 logging_text
4564 + "Unknown k8s deployment type {}".format(
4565 kdu.get("k8scluster-type")
4566 )
4567 )
4568 continue
4569 tasks_dict_info[
4570 task_delete_kdu_instance
4571 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4572
4573 # remove from RO
4574 stage[1] = "Deleting ns from VIM."
4575 if self.ng_ro:
4576 task_delete_ro = asyncio.ensure_future(
4577 self._terminate_ng_ro(
4578 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4579 )
4580 )
4581 else:
4582 task_delete_ro = asyncio.ensure_future(
4583 self._terminate_RO(
4584 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4585 )
4586 )
4587 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4588
4589 # rest of staff will be done at finally
4590
4591 except (
4592 ROclient.ROClientException,
4593 DbException,
4594 LcmException,
4595 N2VCException,
4596 ) as e:
4597 self.logger.error(logging_text + "Exit Exception {}".format(e))
4598 exc = e
4599 except asyncio.CancelledError:
4600 self.logger.error(
4601 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4602 )
4603 exc = "Operation was cancelled"
4604 except Exception as e:
4605 exc = traceback.format_exc()
4606 self.logger.critical(
4607 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4608 exc_info=True,
4609 )
4610 finally:
4611 if exc:
4612 error_list.append(str(exc))
4613 try:
4614 # wait for pending tasks
4615 if tasks_dict_info:
4616 stage[1] = "Waiting for terminate pending tasks."
4617 self.logger.debug(logging_text + stage[1])
4618 error_list += await self._wait_for_tasks(
4619 logging_text,
4620 tasks_dict_info,
4621 timeout_ns_terminate,
4622 stage,
4623 nslcmop_id,
4624 )
4625 stage[1] = stage[2] = ""
4626 except asyncio.CancelledError:
4627 error_list.append("Cancelled")
4628 # TODO cancell all tasks
4629 except Exception as exc:
4630 error_list.append(str(exc))
4631 # update status at database
4632 if error_list:
4633 error_detail = "; ".join(error_list)
4634 # self.logger.error(logging_text + error_detail)
4635 error_description_nslcmop = "{} Detail: {}".format(
4636 stage[0], error_detail
4637 )
4638 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4639 nslcmop_id, stage[0]
4640 )
4641
4642 db_nsr_update["operational-status"] = "failed"
4643 db_nsr_update["detailed-status"] = (
4644 error_description_nsr + " Detail: " + error_detail
4645 )
4646 db_nslcmop_update["detailed-status"] = error_detail
4647 nslcmop_operation_state = "FAILED"
4648 ns_state = "BROKEN"
4649 else:
4650 error_detail = None
4651 error_description_nsr = error_description_nslcmop = None
4652 ns_state = "NOT_INSTANTIATED"
4653 db_nsr_update["operational-status"] = "terminated"
4654 db_nsr_update["detailed-status"] = "Done"
4655 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4656 db_nslcmop_update["detailed-status"] = "Done"
4657 nslcmop_operation_state = "COMPLETED"
4658
4659 if db_nsr:
4660 self._write_ns_status(
4661 nsr_id=nsr_id,
4662 ns_state=ns_state,
4663 current_operation="IDLE",
4664 current_operation_id=None,
4665 error_description=error_description_nsr,
4666 error_detail=error_detail,
4667 other_update=db_nsr_update,
4668 )
4669 self._write_op_status(
4670 op_id=nslcmop_id,
4671 stage="",
4672 error_message=error_description_nslcmop,
4673 operation_state=nslcmop_operation_state,
4674 other_update=db_nslcmop_update,
4675 )
4676 if ns_state == "NOT_INSTANTIATED":
4677 try:
4678 self.db.set_list(
4679 "vnfrs",
4680 {"nsr-id-ref": nsr_id},
4681 {"_admin.nsState": "NOT_INSTANTIATED"},
4682 )
4683 except DbException as e:
4684 self.logger.warn(
4685 logging_text
4686 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4687 nsr_id, e
4688 )
4689 )
4690 if operation_params:
4691 autoremove = operation_params.get("autoremove", False)
4692 if nslcmop_operation_state:
4693 try:
4694 await self.msg.aiowrite(
4695 "ns",
4696 "terminated",
4697 {
4698 "nsr_id": nsr_id,
4699 "nslcmop_id": nslcmop_id,
4700 "operationState": nslcmop_operation_state,
4701 "autoremove": autoremove,
4702 },
4703 loop=self.loop,
4704 )
4705 except Exception as e:
4706 self.logger.error(
4707 logging_text + "kafka_write notification Exception {}".format(e)
4708 )
4709
4710 self.logger.debug(logging_text + "Exit")
4711 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4712
4713 async def _wait_for_tasks(
4714 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4715 ):
4716 time_start = time()
4717 error_detail_list = []
4718 error_list = []
4719 pending_tasks = list(created_tasks_info.keys())
4720 num_tasks = len(pending_tasks)
4721 num_done = 0
4722 stage[1] = "{}/{}.".format(num_done, num_tasks)
4723 self._write_op_status(nslcmop_id, stage)
4724 while pending_tasks:
4725 new_error = None
4726 _timeout = timeout + time_start - time()
4727 done, pending_tasks = await asyncio.wait(
4728 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4729 )
4730 num_done += len(done)
4731 if not done: # Timeout
4732 for task in pending_tasks:
4733 new_error = created_tasks_info[task] + ": Timeout"
4734 error_detail_list.append(new_error)
4735 error_list.append(new_error)
4736 break
4737 for task in done:
4738 if task.cancelled():
4739 exc = "Cancelled"
4740 else:
4741 exc = task.exception()
4742 if exc:
4743 if isinstance(exc, asyncio.TimeoutError):
4744 exc = "Timeout"
4745 new_error = created_tasks_info[task] + ": {}".format(exc)
4746 error_list.append(created_tasks_info[task])
4747 error_detail_list.append(new_error)
4748 if isinstance(
4749 exc,
4750 (
4751 str,
4752 DbException,
4753 N2VCException,
4754 ROclient.ROClientException,
4755 LcmException,
4756 K8sException,
4757 NgRoException,
4758 ),
4759 ):
4760 self.logger.error(logging_text + new_error)
4761 else:
4762 exc_traceback = "".join(
4763 traceback.format_exception(None, exc, exc.__traceback__)
4764 )
4765 self.logger.error(
4766 logging_text
4767 + created_tasks_info[task]
4768 + " "
4769 + exc_traceback
4770 )
4771 else:
4772 self.logger.debug(
4773 logging_text + created_tasks_info[task] + ": Done"
4774 )
4775 stage[1] = "{}/{}.".format(num_done, num_tasks)
4776 if new_error:
4777 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4778 if nsr_id: # update also nsr
4779 self.update_db_2(
4780 "nsrs",
4781 nsr_id,
4782 {
4783 "errorDescription": "Error at: " + ", ".join(error_list),
4784 "errorDetail": ". ".join(error_detail_list),
4785 },
4786 )
4787 self._write_op_status(nslcmop_id, stage)
4788 return error_detail_list
4789
4790 @staticmethod
4791 def _map_primitive_params(primitive_desc, params, instantiation_params):
4792 """
4793 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4794 The default-value is used. If it is between < > it look for a value at instantiation_params
4795 :param primitive_desc: portion of VNFD/NSD that describes primitive
4796 :param params: Params provided by user
4797 :param instantiation_params: Instantiation params provided by user
4798 :return: a dictionary with the calculated params
4799 """
4800 calculated_params = {}
4801 for parameter in primitive_desc.get("parameter", ()):
4802 param_name = parameter["name"]
4803 if param_name in params:
4804 calculated_params[param_name] = params[param_name]
4805 elif "default-value" in parameter or "value" in parameter:
4806 if "value" in parameter:
4807 calculated_params[param_name] = parameter["value"]
4808 else:
4809 calculated_params[param_name] = parameter["default-value"]
4810 if (
4811 isinstance(calculated_params[param_name], str)
4812 and calculated_params[param_name].startswith("<")
4813 and calculated_params[param_name].endswith(">")
4814 ):
4815 if calculated_params[param_name][1:-1] in instantiation_params:
4816 calculated_params[param_name] = instantiation_params[
4817 calculated_params[param_name][1:-1]
4818 ]
4819 else:
4820 raise LcmException(
4821 "Parameter {} needed to execute primitive {} not provided".format(
4822 calculated_params[param_name], primitive_desc["name"]
4823 )
4824 )
4825 else:
4826 raise LcmException(
4827 "Parameter {} needed to execute primitive {} not provided".format(
4828 param_name, primitive_desc["name"]
4829 )
4830 )
4831
4832 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4833 calculated_params[param_name] = yaml.safe_dump(
4834 calculated_params[param_name], default_flow_style=True, width=256
4835 )
4836 elif isinstance(calculated_params[param_name], str) and calculated_params[
4837 param_name
4838 ].startswith("!!yaml "):
4839 calculated_params[param_name] = calculated_params[param_name][7:]
4840 if parameter.get("data-type") == "INTEGER":
4841 try:
4842 calculated_params[param_name] = int(calculated_params[param_name])
4843 except ValueError: # error converting string to int
4844 raise LcmException(
4845 "Parameter {} of primitive {} must be integer".format(
4846 param_name, primitive_desc["name"]
4847 )
4848 )
4849 elif parameter.get("data-type") == "BOOLEAN":
4850 calculated_params[param_name] = not (
4851 (str(calculated_params[param_name])).lower() == "false"
4852 )
4853
4854 # add always ns_config_info if primitive name is config
4855 if primitive_desc["name"] == "config":
4856 if "ns_config_info" in instantiation_params:
4857 calculated_params["ns_config_info"] = instantiation_params[
4858 "ns_config_info"
4859 ]
4860 return calculated_params
4861
4862 def _look_for_deployed_vca(
4863 self,
4864 deployed_vca,
4865 member_vnf_index,
4866 vdu_id,
4867 vdu_count_index,
4868 kdu_name=None,
4869 ee_descriptor_id=None,
4870 ):
4871 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4872 for vca in deployed_vca:
4873 if not vca:
4874 continue
4875 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4876 continue
4877 if (
4878 vdu_count_index is not None
4879 and vdu_count_index != vca["vdu_count_index"]
4880 ):
4881 continue
4882 if kdu_name and kdu_name != vca["kdu_name"]:
4883 continue
4884 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4885 continue
4886 break
4887 else:
4888 # vca_deployed not found
4889 raise LcmException(
4890 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4891 " is not deployed".format(
4892 member_vnf_index,
4893 vdu_id,
4894 vdu_count_index,
4895 kdu_name,
4896 ee_descriptor_id,
4897 )
4898 )
4899 # get ee_id
4900 ee_id = vca.get("ee_id")
4901 vca_type = vca.get(
4902 "type", "lxc_proxy_charm"
4903 ) # default value for backward compatibility - proxy charm
4904 if not ee_id:
4905 raise LcmException(
4906 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4907 "execution environment".format(
4908 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4909 )
4910 )
4911 return ee_id, vca_type
4912
4913 async def _ns_execute_primitive(
4914 self,
4915 ee_id,
4916 primitive,
4917 primitive_params,
4918 retries=0,
4919 retries_interval=30,
4920 timeout=None,
4921 vca_type=None,
4922 db_dict=None,
4923 vca_id: str = None,
4924 ) -> (str, str):
4925 try:
4926 if primitive == "config":
4927 primitive_params = {"params": primitive_params}
4928
4929 vca_type = vca_type or "lxc_proxy_charm"
4930
4931 while retries >= 0:
4932 try:
4933 output = await asyncio.wait_for(
4934 self.vca_map[vca_type].exec_primitive(
4935 ee_id=ee_id,
4936 primitive_name=primitive,
4937 params_dict=primitive_params,
4938 progress_timeout=self.timeout_progress_primitive,
4939 total_timeout=self.timeout_primitive,
4940 db_dict=db_dict,
4941 vca_id=vca_id,
4942 vca_type=vca_type,
4943 ),
4944 timeout=timeout or self.timeout_primitive,
4945 )
4946 # execution was OK
4947 break
4948 except asyncio.CancelledError:
4949 raise
4950 except Exception as e: # asyncio.TimeoutError
4951 if isinstance(e, asyncio.TimeoutError):
4952 e = "Timeout"
4953 retries -= 1
4954 if retries >= 0:
4955 self.logger.debug(
4956 "Error executing action {} on {} -> {}".format(
4957 primitive, ee_id, e
4958 )
4959 )
4960 # wait and retry
4961 await asyncio.sleep(retries_interval, loop=self.loop)
4962 else:
4963 return "FAILED", str(e)
4964
4965 return "COMPLETED", output
4966
4967 except (LcmException, asyncio.CancelledError):
4968 raise
4969 except Exception as e:
4970 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4971
4972 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4973 """
4974 Updating the vca_status with latest juju information in nsrs record
4975 :param: nsr_id: Id of the nsr
4976 :param: nslcmop_id: Id of the nslcmop
4977 :return: None
4978 """
4979
4980 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4981 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4982 vca_id = self.get_vca_id({}, db_nsr)
4983 if db_nsr["_admin"]["deployed"]["K8s"]:
4984 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4985 cluster_uuid, kdu_instance, cluster_type = (
4986 k8s["k8scluster-uuid"],
4987 k8s["kdu-instance"],
4988 k8s["k8scluster-type"],
4989 )
4990 await self._on_update_k8s_db(
4991 cluster_uuid=cluster_uuid,
4992 kdu_instance=kdu_instance,
4993 filter={"_id": nsr_id},
4994 vca_id=vca_id,
4995 cluster_type=cluster_type,
4996 )
4997 else:
4998 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4999 table, filter = "nsrs", {"_id": nsr_id}
5000 path = "_admin.deployed.VCA.{}.".format(vca_index)
5001 await self._on_update_n2vc_db(table, filter, path, {})
5002
5003 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5004 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5005
5006 async def action(self, nsr_id, nslcmop_id):
5007 # Try to lock HA task here
5008 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5009 if not task_is_locked_by_me:
5010 return
5011
5012 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5013 self.logger.debug(logging_text + "Enter")
5014 # get all needed from database
5015 db_nsr = None
5016 db_nslcmop = None
5017 db_nsr_update = {}
5018 db_nslcmop_update = {}
5019 nslcmop_operation_state = None
5020 error_description_nslcmop = None
5021 exc = None
5022 try:
5023 # wait for any previous tasks in process
5024 step = "Waiting for previous operations to terminate"
5025 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5026
5027 self._write_ns_status(
5028 nsr_id=nsr_id,
5029 ns_state=None,
5030 current_operation="RUNNING ACTION",
5031 current_operation_id=nslcmop_id,
5032 )
5033
5034 step = "Getting information from database"
5035 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5036 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5037 if db_nslcmop["operationParams"].get("primitive_params"):
5038 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5039 db_nslcmop["operationParams"]["primitive_params"]
5040 )
5041
5042 nsr_deployed = db_nsr["_admin"].get("deployed")
5043 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5044 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5045 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5046 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5047 primitive = db_nslcmop["operationParams"]["primitive"]
5048 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5049 timeout_ns_action = db_nslcmop["operationParams"].get(
5050 "timeout_ns_action", self.timeout_primitive
5051 )
5052
5053 if vnf_index:
5054 step = "Getting vnfr from database"
5055 db_vnfr = self.db.get_one(
5056 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5057 )
5058 if db_vnfr.get("kdur"):
5059 kdur_list = []
5060 for kdur in db_vnfr["kdur"]:
5061 if kdur.get("additionalParams"):
5062 kdur["additionalParams"] = json.loads(
5063 kdur["additionalParams"]
5064 )
5065 kdur_list.append(kdur)
5066 db_vnfr["kdur"] = kdur_list
5067 step = "Getting vnfd from database"
5068 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5069
5070 # Sync filesystem before running a primitive
5071 self.fs.sync(db_vnfr["vnfd-id"])
5072 else:
5073 step = "Getting nsd from database"
5074 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5075
5076 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5077 # for backward compatibility
5078 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5079 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5080 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5081 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5082
5083 # look for primitive
5084 config_primitive_desc = descriptor_configuration = None
5085 if vdu_id:
5086 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5087 elif kdu_name:
5088 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5089 elif vnf_index:
5090 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5091 else:
5092 descriptor_configuration = db_nsd.get("ns-configuration")
5093
5094 if descriptor_configuration and descriptor_configuration.get(
5095 "config-primitive"
5096 ):
5097 for config_primitive in descriptor_configuration["config-primitive"]:
5098 if config_primitive["name"] == primitive:
5099 config_primitive_desc = config_primitive
5100 break
5101
5102 if not config_primitive_desc:
5103 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5104 raise LcmException(
5105 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5106 primitive
5107 )
5108 )
5109 primitive_name = primitive
5110 ee_descriptor_id = None
5111 else:
5112 primitive_name = config_primitive_desc.get(
5113 "execution-environment-primitive", primitive
5114 )
5115 ee_descriptor_id = config_primitive_desc.get(
5116 "execution-environment-ref"
5117 )
5118
5119 if vnf_index:
5120 if vdu_id:
5121 vdur = next(
5122 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5123 )
5124 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5125 elif kdu_name:
5126 kdur = next(
5127 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5128 )
5129 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5130 else:
5131 desc_params = parse_yaml_strings(
5132 db_vnfr.get("additionalParamsForVnf")
5133 )
5134 else:
5135 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5136 if kdu_name and get_configuration(db_vnfd, kdu_name):
5137 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5138 actions = set()
5139 for primitive in kdu_configuration.get("initial-config-primitive", []):
5140 actions.add(primitive["name"])
5141 for primitive in kdu_configuration.get("config-primitive", []):
5142 actions.add(primitive["name"])
5143 kdu = find_in_list(
5144 nsr_deployed["K8s"],
5145 lambda kdu: kdu_name == kdu["kdu-name"]
5146 and kdu["member-vnf-index"] == vnf_index,
5147 )
5148 kdu_action = (
5149 True
5150 if primitive_name in actions
5151 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5152 else False
5153 )
5154
5155 # TODO check if ns is in a proper status
5156 if kdu_name and (
5157 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5158 ):
5159 # kdur and desc_params already set from before
5160 if primitive_params:
5161 desc_params.update(primitive_params)
5162 # TODO Check if we will need something at vnf level
5163 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5164 if (
5165 kdu_name == kdu["kdu-name"]
5166 and kdu["member-vnf-index"] == vnf_index
5167 ):
5168 break
5169 else:
5170 raise LcmException(
5171 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5172 )
5173
5174 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5175 msg = "unknown k8scluster-type '{}'".format(
5176 kdu.get("k8scluster-type")
5177 )
5178 raise LcmException(msg)
5179
5180 db_dict = {
5181 "collection": "nsrs",
5182 "filter": {"_id": nsr_id},
5183 "path": "_admin.deployed.K8s.{}".format(index),
5184 }
5185 self.logger.debug(
5186 logging_text
5187 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5188 )
5189 step = "Executing kdu {}".format(primitive_name)
5190 if primitive_name == "upgrade":
5191 if desc_params.get("kdu_model"):
5192 kdu_model = desc_params.get("kdu_model")
5193 del desc_params["kdu_model"]
5194 else:
5195 kdu_model = kdu.get("kdu-model")
5196 parts = kdu_model.split(sep=":")
5197 if len(parts) == 2:
5198 kdu_model = parts[0]
5199
5200 detailed_status = await asyncio.wait_for(
5201 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5202 cluster_uuid=kdu.get("k8scluster-uuid"),
5203 kdu_instance=kdu.get("kdu-instance"),
5204 atomic=True,
5205 kdu_model=kdu_model,
5206 params=desc_params,
5207 db_dict=db_dict,
5208 timeout=timeout_ns_action,
5209 ),
5210 timeout=timeout_ns_action + 10,
5211 )
5212 self.logger.debug(
5213 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5214 )
5215 elif primitive_name == "rollback":
5216 detailed_status = await asyncio.wait_for(
5217 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5218 cluster_uuid=kdu.get("k8scluster-uuid"),
5219 kdu_instance=kdu.get("kdu-instance"),
5220 db_dict=db_dict,
5221 ),
5222 timeout=timeout_ns_action,
5223 )
5224 elif primitive_name == "status":
5225 detailed_status = await asyncio.wait_for(
5226 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5227 cluster_uuid=kdu.get("k8scluster-uuid"),
5228 kdu_instance=kdu.get("kdu-instance"),
5229 vca_id=vca_id,
5230 ),
5231 timeout=timeout_ns_action,
5232 )
5233 else:
5234 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5235 kdu["kdu-name"], nsr_id
5236 )
5237 params = self._map_primitive_params(
5238 config_primitive_desc, primitive_params, desc_params
5239 )
5240
5241 detailed_status = await asyncio.wait_for(
5242 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5243 cluster_uuid=kdu.get("k8scluster-uuid"),
5244 kdu_instance=kdu_instance,
5245 primitive_name=primitive_name,
5246 params=params,
5247 db_dict=db_dict,
5248 timeout=timeout_ns_action,
5249 vca_id=vca_id,
5250 ),
5251 timeout=timeout_ns_action,
5252 )
5253
5254 if detailed_status:
5255 nslcmop_operation_state = "COMPLETED"
5256 else:
5257 detailed_status = ""
5258 nslcmop_operation_state = "FAILED"
5259 else:
5260 ee_id, vca_type = self._look_for_deployed_vca(
5261 nsr_deployed["VCA"],
5262 member_vnf_index=vnf_index,
5263 vdu_id=vdu_id,
5264 vdu_count_index=vdu_count_index,
5265 ee_descriptor_id=ee_descriptor_id,
5266 )
5267 for vca_index, vca_deployed in enumerate(
5268 db_nsr["_admin"]["deployed"]["VCA"]
5269 ):
5270 if vca_deployed.get("member-vnf-index") == vnf_index:
5271 db_dict = {
5272 "collection": "nsrs",
5273 "filter": {"_id": nsr_id},
5274 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5275 }
5276 break
5277 (
5278 nslcmop_operation_state,
5279 detailed_status,
5280 ) = await self._ns_execute_primitive(
5281 ee_id,
5282 primitive=primitive_name,
5283 primitive_params=self._map_primitive_params(
5284 config_primitive_desc, primitive_params, desc_params
5285 ),
5286 timeout=timeout_ns_action,
5287 vca_type=vca_type,
5288 db_dict=db_dict,
5289 vca_id=vca_id,
5290 )
5291
5292 db_nslcmop_update["detailed-status"] = detailed_status
5293 error_description_nslcmop = (
5294 detailed_status if nslcmop_operation_state == "FAILED" else ""
5295 )
5296 self.logger.debug(
5297 logging_text
5298 + " task Done with result {} {}".format(
5299 nslcmop_operation_state, detailed_status
5300 )
5301 )
5302 return # database update is called inside finally
5303
5304 except (DbException, LcmException, N2VCException, K8sException) as e:
5305 self.logger.error(logging_text + "Exit Exception {}".format(e))
5306 exc = e
5307 except asyncio.CancelledError:
5308 self.logger.error(
5309 logging_text + "Cancelled Exception while '{}'".format(step)
5310 )
5311 exc = "Operation was cancelled"
5312 except asyncio.TimeoutError:
5313 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5314 exc = "Timeout"
5315 except Exception as e:
5316 exc = traceback.format_exc()
5317 self.logger.critical(
5318 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5319 exc_info=True,
5320 )
5321 finally:
5322 if exc:
5323 db_nslcmop_update[
5324 "detailed-status"
5325 ] = (
5326 detailed_status
5327 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5328 nslcmop_operation_state = "FAILED"
5329 if db_nsr:
5330 self._write_ns_status(
5331 nsr_id=nsr_id,
5332 ns_state=db_nsr[
5333 "nsState"
5334 ], # TODO check if degraded. For the moment use previous status
5335 current_operation="IDLE",
5336 current_operation_id=None,
5337 # error_description=error_description_nsr,
5338 # error_detail=error_detail,
5339 other_update=db_nsr_update,
5340 )
5341
5342 self._write_op_status(
5343 op_id=nslcmop_id,
5344 stage="",
5345 error_message=error_description_nslcmop,
5346 operation_state=nslcmop_operation_state,
5347 other_update=db_nslcmop_update,
5348 )
5349
5350 if nslcmop_operation_state:
5351 try:
5352 await self.msg.aiowrite(
5353 "ns",
5354 "actioned",
5355 {
5356 "nsr_id": nsr_id,
5357 "nslcmop_id": nslcmop_id,
5358 "operationState": nslcmop_operation_state,
5359 },
5360 loop=self.loop,
5361 )
5362 except Exception as e:
5363 self.logger.error(
5364 logging_text + "kafka_write notification Exception {}".format(e)
5365 )
5366 self.logger.debug(logging_text + "Exit")
5367 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5368 return nslcmop_operation_state, detailed_status
5369
5370 async def terminate_vdus(
5371 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5372 ):
5373 """This method terminates VDUs
5374
5375 Args:
5376 db_vnfr: VNF instance record
5377 member_vnf_index: VNF index to identify the VDUs to be removed
5378 db_nsr: NS instance record
5379 update_db_nslcmops: Nslcmop update record
5380 """
5381 vca_scaling_info = []
5382 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5383 scaling_info["scaling_direction"] = "IN"
5384 scaling_info["vdu-delete"] = {}
5385 scaling_info["kdu-delete"] = {}
5386 db_vdur = db_vnfr.get("vdur")
5387 vdur_list = copy(db_vdur)
5388 count_index = 0
5389 for index, vdu in enumerate(vdur_list):
5390 vca_scaling_info.append(
5391 {
5392 "osm_vdu_id": vdu["vdu-id-ref"],
5393 "member-vnf-index": member_vnf_index,
5394 "type": "delete",
5395 "vdu_index": count_index,
5396 })
5397 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5398 scaling_info["vdu"].append(
5399 {
5400 "name": vdu.get("name") or vdu.get("vdu-name"),
5401 "vdu_id": vdu["vdu-id-ref"],
5402 "interface": [],
5403 })
5404 for interface in vdu["interfaces"]:
5405 scaling_info["vdu"][index]["interface"].append(
5406 {
5407 "name": interface["name"],
5408 "ip_address": interface["ip-address"],
5409 "mac_address": interface.get("mac-address"),
5410 })
5411 self.logger.info("NS update scaling info{}".format(scaling_info))
5412 stage[2] = "Terminating VDUs"
5413 if scaling_info.get("vdu-delete"):
5414 # scale_process = "RO"
5415 if self.ro_config.get("ng"):
5416 await self._scale_ng_ro(
5417 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5418 )
5419
5420 async def remove_vnf(
5421 self, nsr_id, nslcmop_id, vnf_instance_id
5422 ):
5423 """This method is to Remove VNF instances from NS.
5424
5425 Args:
5426 nsr_id: NS instance id
5427 nslcmop_id: nslcmop id of update
5428 vnf_instance_id: id of the VNF instance to be removed
5429
5430 Returns:
5431 result: (str, str) COMPLETED/FAILED, details
5432 """
5433 try:
5434 db_nsr_update = {}
5435 logging_text = "Task ns={} update ".format(nsr_id)
5436 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5437 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5438 if check_vnfr_count > 1:
5439 stage = ["", "", ""]
5440 step = "Getting nslcmop from database"
5441 self.logger.debug(step + " after having waited for previous tasks to be completed")
5442 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5443 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5444 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5445 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5446 """ db_vnfr = self.db.get_one(
5447 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5448
5449 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5450 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5451
5452 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5453 constituent_vnfr.remove(db_vnfr.get("_id"))
5454 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5455 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5456 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5457 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5458 return "COMPLETED", "Done"
5459 else:
5460 step = "Terminate VNF Failed with"
5461 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5462 vnf_instance_id))
5463 except (LcmException, asyncio.CancelledError):
5464 raise
5465 except Exception as e:
5466 self.logger.debug("Error removing VNF {}".format(e))
5467 return "FAILED", "Error removing VNF {}".format(e)
5468
5469 async def _ns_redeploy_vnf(
5470 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5471 ):
5472 """This method updates and redeploys VNF instances
5473
5474 Args:
5475 nsr_id: NS instance id
5476 nslcmop_id: nslcmop id
5477 db_vnfd: VNF descriptor
5478 db_vnfr: VNF instance record
5479 db_nsr: NS instance record
5480
5481 Returns:
5482 result: (str, str) COMPLETED/FAILED, details
5483 """
5484 try:
5485 count_index = 0
5486 stage = ["", "", ""]
5487 logging_text = "Task ns={} update ".format(nsr_id)
5488 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5489 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5490
5491 # Terminate old VNF resources
5492 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5493 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5494
5495 # old_vnfd_id = db_vnfr["vnfd-id"]
5496 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5497 new_db_vnfd = db_vnfd
5498 # new_vnfd_ref = new_db_vnfd["id"]
5499 # new_vnfd_id = vnfd_id
5500
5501 # Create VDUR
5502 new_vnfr_cp = []
5503 for cp in new_db_vnfd.get("ext-cpd", ()):
5504 vnf_cp = {
5505 "name": cp.get("id"),
5506 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5507 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5508 "id": cp.get("id"),
5509 }
5510 new_vnfr_cp.append(vnf_cp)
5511 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5512 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5513 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5514 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5515 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5516 updated_db_vnfr = self.db.get_one(
5517 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5518 )
5519
5520 # Instantiate new VNF resources
5521 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5522 vca_scaling_info = []
5523 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5524 scaling_info["scaling_direction"] = "OUT"
5525 scaling_info["vdu-create"] = {}
5526 scaling_info["kdu-create"] = {}
5527 vdud_instantiate_list = db_vnfd["vdu"]
5528 for index, vdud in enumerate(vdud_instantiate_list):
5529 cloud_init_text = self._get_vdu_cloud_init_content(
5530 vdud, db_vnfd
5531 )
5532 if cloud_init_text:
5533 additional_params = (
5534 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5535 or {}
5536 )
5537 cloud_init_list = []
5538 if cloud_init_text:
5539 # TODO Information of its own ip is not available because db_vnfr is not updated.
5540 additional_params["OSM"] = get_osm_params(
5541 updated_db_vnfr, vdud["id"], 1
5542 )
5543 cloud_init_list.append(
5544 self._parse_cloud_init(
5545 cloud_init_text,
5546 additional_params,
5547 db_vnfd["id"],
5548 vdud["id"],
5549 )
5550 )
5551 vca_scaling_info.append(
5552 {
5553 "osm_vdu_id": vdud["id"],
5554 "member-vnf-index": member_vnf_index,
5555 "type": "create",
5556 "vdu_index": count_index,
5557 }
5558 )
5559 scaling_info["vdu-create"][vdud["id"]] = count_index
5560 if self.ro_config.get("ng"):
5561 self.logger.debug(
5562 "New Resources to be deployed: {}".format(scaling_info))
5563 await self._scale_ng_ro(
5564 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5565 )
5566 return "COMPLETED", "Done"
5567 except (LcmException, asyncio.CancelledError):
5568 raise
5569 except Exception as e:
5570 self.logger.debug("Error updating VNF {}".format(e))
5571 return "FAILED", "Error updating VNF {}".format(e)
5572
5573 async def _ns_charm_upgrade(
5574 self,
5575 ee_id,
5576 charm_id,
5577 charm_type,
5578 path,
5579 timeout: float = None,
5580 ) -> (str, str):
5581 """This method upgrade charms in VNF instances
5582
5583 Args:
5584 ee_id: Execution environment id
5585 path: Local path to the charm
5586 charm_id: charm-id
5587 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5588 timeout: (Float) Timeout for the ns update operation
5589
5590 Returns:
5591 result: (str, str) COMPLETED/FAILED, details
5592 """
5593 try:
5594 charm_type = charm_type or "lxc_proxy_charm"
5595 output = await self.vca_map[charm_type].upgrade_charm(
5596 ee_id=ee_id,
5597 path=path,
5598 charm_id=charm_id,
5599 charm_type=charm_type,
5600 timeout=timeout or self.timeout_ns_update,
5601 )
5602
5603 if output:
5604 return "COMPLETED", output
5605
5606 except (LcmException, asyncio.CancelledError):
5607 raise
5608
5609 except Exception as e:
5610
5611 self.logger.debug("Error upgrading charm {}".format(path))
5612
5613 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5614
5615 async def update(self, nsr_id, nslcmop_id):
5616 """Update NS according to different update types
5617
5618 This method performs upgrade of VNF instances then updates the revision
5619 number in VNF record
5620
5621 Args:
5622 nsr_id: Network service will be updated
5623 nslcmop_id: ns lcm operation id
5624
5625 Returns:
5626 It may raise DbException, LcmException, N2VCException, K8sException
5627
5628 """
5629 # Try to lock HA task here
5630 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5631 if not task_is_locked_by_me:
5632 return
5633
5634 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5635 self.logger.debug(logging_text + "Enter")
5636
5637 # Set the required variables to be filled up later
5638 db_nsr = None
5639 db_nslcmop_update = {}
5640 vnfr_update = {}
5641 nslcmop_operation_state = None
5642 db_nsr_update = {}
5643 error_description_nslcmop = ""
5644 exc = None
5645 change_type = "updated"
5646 detailed_status = ""
5647
5648 try:
5649 # wait for any previous tasks in process
5650 step = "Waiting for previous operations to terminate"
5651 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5652 self._write_ns_status(
5653 nsr_id=nsr_id,
5654 ns_state=None,
5655 current_operation="UPDATING",
5656 current_operation_id=nslcmop_id,
5657 )
5658
5659 step = "Getting nslcmop from database"
5660 db_nslcmop = self.db.get_one(
5661 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5662 )
5663 update_type = db_nslcmop["operationParams"]["updateType"]
5664
5665 step = "Getting nsr from database"
5666 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5667 old_operational_status = db_nsr["operational-status"]
5668 db_nsr_update["operational-status"] = "updating"
5669 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5670 nsr_deployed = db_nsr["_admin"].get("deployed")
5671
5672 if update_type == "CHANGE_VNFPKG":
5673
5674 # Get the input parameters given through update request
5675 vnf_instance_id = db_nslcmop["operationParams"][
5676 "changeVnfPackageData"
5677 ].get("vnfInstanceId")
5678
5679 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5680 "vnfdId"
5681 )
5682 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5683
5684 step = "Getting vnfr from database"
5685 db_vnfr = self.db.get_one(
5686 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5687 )
5688
5689 step = "Getting vnfds from database"
5690 # Latest VNFD
5691 latest_vnfd = self.db.get_one(
5692 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5693 )
5694 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5695
5696 # Current VNFD
5697 current_vnf_revision = db_vnfr.get("revision", 1)
5698 current_vnfd = self.db.get_one(
5699 "vnfds_revisions",
5700 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5701 fail_on_empty=False,
5702 )
5703 # Charm artifact paths will be filled up later
5704 (
5705 current_charm_artifact_path,
5706 target_charm_artifact_path,
5707 charm_artifact_paths,
5708 ) = ([], [], [])
5709
5710 step = "Checking if revision has changed in VNFD"
5711 if current_vnf_revision != latest_vnfd_revision:
5712
5713 change_type = "policy_updated"
5714
5715 # There is new revision of VNFD, update operation is required
5716 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5717 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5718
5719 step = "Removing the VNFD packages if they exist in the local path"
5720 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5721 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5722
5723 step = "Get the VNFD packages from FSMongo"
5724 self.fs.sync(from_path=latest_vnfd_path)
5725 self.fs.sync(from_path=current_vnfd_path)
5726
5727 step = (
5728 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5729 )
5730 base_folder = latest_vnfd["_admin"]["storage"]
5731
5732 for charm_index, charm_deployed in enumerate(
5733 get_iterable(nsr_deployed, "VCA")
5734 ):
5735 vnf_index = db_vnfr.get("member-vnf-index-ref")
5736
5737 # Getting charm-id and charm-type
5738 if charm_deployed.get("member-vnf-index") == vnf_index:
5739 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5740 charm_type = charm_deployed.get("type")
5741
5742 # Getting ee-id
5743 ee_id = charm_deployed.get("ee_id")
5744
5745 step = "Getting descriptor config"
5746 descriptor_config = get_configuration(
5747 current_vnfd, current_vnfd["id"]
5748 )
5749
5750 if "execution-environment-list" in descriptor_config:
5751 ee_list = descriptor_config.get(
5752 "execution-environment-list", []
5753 )
5754 else:
5755 ee_list = []
5756
5757 # There could be several charm used in the same VNF
5758 for ee_item in ee_list:
5759 if ee_item.get("juju"):
5760
5761 step = "Getting charm name"
5762 charm_name = ee_item["juju"].get("charm")
5763
5764 step = "Setting Charm artifact paths"
5765 current_charm_artifact_path.append(
5766 get_charm_artifact_path(
5767 base_folder,
5768 charm_name,
5769 charm_type,
5770 current_vnf_revision,
5771 )
5772 )
5773 target_charm_artifact_path.append(
5774 get_charm_artifact_path(
5775 base_folder,
5776 charm_name,
5777 charm_type,
5778 latest_vnfd_revision,
5779 )
5780 )
5781
5782 charm_artifact_paths = zip(
5783 current_charm_artifact_path, target_charm_artifact_path
5784 )
5785
5786 step = "Checking if software version has changed in VNFD"
5787 if find_software_version(current_vnfd) != find_software_version(
5788 latest_vnfd
5789 ):
5790
5791 step = "Checking if existing VNF has charm"
5792 for current_charm_path, target_charm_path in list(
5793 charm_artifact_paths
5794 ):
5795 if current_charm_path:
5796 raise LcmException(
5797 "Software version change is not supported as VNF instance {} has charm.".format(
5798 vnf_instance_id
5799 )
5800 )
5801
5802 # There is no change in the charm package, then redeploy the VNF
5803 # based on new descriptor
5804 step = "Redeploying VNF"
5805 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5806 (
5807 result,
5808 detailed_status
5809 ) = await self._ns_redeploy_vnf(
5810 nsr_id,
5811 nslcmop_id,
5812 latest_vnfd,
5813 db_vnfr,
5814 db_nsr
5815 )
5816 if result == "FAILED":
5817 nslcmop_operation_state = result
5818 error_description_nslcmop = detailed_status
5819 db_nslcmop_update["detailed-status"] = detailed_status
5820 self.logger.debug(
5821 logging_text
5822 + " step {} Done with result {} {}".format(
5823 step, nslcmop_operation_state, detailed_status
5824 )
5825 )
5826
5827 else:
5828 step = "Checking if any charm package has changed or not"
5829 for current_charm_path, target_charm_path in list(
5830 charm_artifact_paths
5831 ):
5832 if (
5833 current_charm_path
5834 and target_charm_path
5835 and self.check_charm_hash_changed(
5836 current_charm_path, target_charm_path
5837 )
5838 ):
5839
5840 step = "Checking whether VNF uses juju bundle"
5841 if check_juju_bundle_existence(current_vnfd):
5842
5843 raise LcmException(
5844 "Charm upgrade is not supported for the instance which"
5845 " uses juju-bundle: {}".format(
5846 check_juju_bundle_existence(current_vnfd)
5847 )
5848 )
5849
5850 step = "Upgrading Charm"
5851 (
5852 result,
5853 detailed_status,
5854 ) = await self._ns_charm_upgrade(
5855 ee_id=ee_id,
5856 charm_id=charm_id,
5857 charm_type=charm_type,
5858 path=self.fs.path + target_charm_path,
5859 timeout=timeout_seconds,
5860 )
5861
5862 if result == "FAILED":
5863 nslcmop_operation_state = result
5864 error_description_nslcmop = detailed_status
5865
5866 db_nslcmop_update["detailed-status"] = detailed_status
5867 self.logger.debug(
5868 logging_text
5869 + " step {} Done with result {} {}".format(
5870 step, nslcmop_operation_state, detailed_status
5871 )
5872 )
5873
5874 step = "Updating policies"
5875 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5876 result = "COMPLETED"
5877 detailed_status = "Done"
5878 db_nslcmop_update["detailed-status"] = "Done"
5879
5880 # If nslcmop_operation_state is None, so any operation is not failed.
5881 if not nslcmop_operation_state:
5882 nslcmop_operation_state = "COMPLETED"
5883
5884 # If update CHANGE_VNFPKG nslcmop_operation is successful
5885 # vnf revision need to be updated
5886 vnfr_update["revision"] = latest_vnfd_revision
5887 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5888
5889 self.logger.debug(
5890 logging_text
5891 + " task Done with result {} {}".format(
5892 nslcmop_operation_state, detailed_status
5893 )
5894 )
5895 elif update_type == "REMOVE_VNF":
5896 # This part is included in https://osm.etsi.org/gerrit/11876
5897 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5898 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5899 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5900 step = "Removing VNF"
5901 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5902 if result == "FAILED":
5903 nslcmop_operation_state = result
5904 error_description_nslcmop = detailed_status
5905 db_nslcmop_update["detailed-status"] = detailed_status
5906 change_type = "vnf_terminated"
5907 if not nslcmop_operation_state:
5908 nslcmop_operation_state = "COMPLETED"
5909 self.logger.debug(
5910 logging_text
5911 + " task Done with result {} {}".format(
5912 nslcmop_operation_state, detailed_status
5913 )
5914 )
5915
5916 elif update_type == "OPERATE_VNF":
5917 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5918 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5919 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5920 (result, detailed_status) = await self.rebuild_start_stop(
5921 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5922 )
5923 if result == "FAILED":
5924 nslcmop_operation_state = result
5925 error_description_nslcmop = detailed_status
5926 db_nslcmop_update["detailed-status"] = detailed_status
5927 if not nslcmop_operation_state:
5928 nslcmop_operation_state = "COMPLETED"
5929 self.logger.debug(
5930 logging_text
5931 + " task Done with result {} {}".format(
5932 nslcmop_operation_state, detailed_status
5933 )
5934 )
5935
5936 # If nslcmop_operation_state is None, so any operation is not failed.
5937 # All operations are executed in overall.
5938 if not nslcmop_operation_state:
5939 nslcmop_operation_state = "COMPLETED"
5940 db_nsr_update["operational-status"] = old_operational_status
5941
5942 except (DbException, LcmException, N2VCException, K8sException) as e:
5943 self.logger.error(logging_text + "Exit Exception {}".format(e))
5944 exc = e
5945 except asyncio.CancelledError:
5946 self.logger.error(
5947 logging_text + "Cancelled Exception while '{}'".format(step)
5948 )
5949 exc = "Operation was cancelled"
5950 except asyncio.TimeoutError:
5951 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5952 exc = "Timeout"
5953 except Exception as e:
5954 exc = traceback.format_exc()
5955 self.logger.critical(
5956 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5957 exc_info=True,
5958 )
5959 finally:
5960 if exc:
5961 db_nslcmop_update[
5962 "detailed-status"
5963 ] = (
5964 detailed_status
5965 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5966 nslcmop_operation_state = "FAILED"
5967 db_nsr_update["operational-status"] = old_operational_status
5968 if db_nsr:
5969 self._write_ns_status(
5970 nsr_id=nsr_id,
5971 ns_state=db_nsr["nsState"],
5972 current_operation="IDLE",
5973 current_operation_id=None,
5974 other_update=db_nsr_update,
5975 )
5976
5977 self._write_op_status(
5978 op_id=nslcmop_id,
5979 stage="",
5980 error_message=error_description_nslcmop,
5981 operation_state=nslcmop_operation_state,
5982 other_update=db_nslcmop_update,
5983 )
5984
5985 if nslcmop_operation_state:
5986 try:
5987 msg = {
5988 "nsr_id": nsr_id,
5989 "nslcmop_id": nslcmop_id,
5990 "operationState": nslcmop_operation_state,
5991 }
5992 if change_type in ("vnf_terminated", "policy_updated"):
5993 msg.update({"vnf_member_index": member_vnf_index})
5994 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
5995 except Exception as e:
5996 self.logger.error(
5997 logging_text + "kafka_write notification Exception {}".format(e)
5998 )
5999 self.logger.debug(logging_text + "Exit")
6000 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6001 return nslcmop_operation_state, detailed_status
6002
6003 async def scale(self, nsr_id, nslcmop_id):
6004 # Try to lock HA task here
6005 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6006 if not task_is_locked_by_me:
6007 return
6008
6009 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6010 stage = ["", "", ""]
6011 tasks_dict_info = {}
6012 # ^ stage, step, VIM progress
6013 self.logger.debug(logging_text + "Enter")
6014 # get all needed from database
6015 db_nsr = None
6016 db_nslcmop_update = {}
6017 db_nsr_update = {}
6018 exc = None
6019 # in case of error, indicates what part of scale was failed to put nsr at error status
6020 scale_process = None
6021 old_operational_status = ""
6022 old_config_status = ""
6023 nsi_id = None
6024 try:
6025 # wait for any previous tasks in process
6026 step = "Waiting for previous operations to terminate"
6027 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6028 self._write_ns_status(
6029 nsr_id=nsr_id,
6030 ns_state=None,
6031 current_operation="SCALING",
6032 current_operation_id=nslcmop_id,
6033 )
6034
6035 step = "Getting nslcmop from database"
6036 self.logger.debug(
6037 step + " after having waited for previous tasks to be completed"
6038 )
6039 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6040
6041 step = "Getting nsr from database"
6042 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6043 old_operational_status = db_nsr["operational-status"]
6044 old_config_status = db_nsr["config-status"]
6045
6046 step = "Parsing scaling parameters"
6047 db_nsr_update["operational-status"] = "scaling"
6048 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6049 nsr_deployed = db_nsr["_admin"].get("deployed")
6050
6051 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6052 "scaleByStepData"
6053 ]["member-vnf-index"]
6054 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6055 "scaleByStepData"
6056 ]["scaling-group-descriptor"]
6057 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6058 # for backward compatibility
6059 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6060 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6061 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6062 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6063
6064 step = "Getting vnfr from database"
6065 db_vnfr = self.db.get_one(
6066 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6067 )
6068
6069 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6070
6071 step = "Getting vnfd from database"
6072 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6073
6074 base_folder = db_vnfd["_admin"]["storage"]
6075
6076 step = "Getting scaling-group-descriptor"
6077 scaling_descriptor = find_in_list(
6078 get_scaling_aspect(db_vnfd),
6079 lambda scale_desc: scale_desc["name"] == scaling_group,
6080 )
6081 if not scaling_descriptor:
6082 raise LcmException(
6083 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6084 "at vnfd:scaling-group-descriptor".format(scaling_group)
6085 )
6086
6087 step = "Sending scale order to VIM"
6088 # TODO check if ns is in a proper status
6089 nb_scale_op = 0
6090 if not db_nsr["_admin"].get("scaling-group"):
6091 self.update_db_2(
6092 "nsrs",
6093 nsr_id,
6094 {
6095 "_admin.scaling-group": [
6096 {"name": scaling_group, "nb-scale-op": 0}
6097 ]
6098 },
6099 )
6100 admin_scale_index = 0
6101 else:
6102 for admin_scale_index, admin_scale_info in enumerate(
6103 db_nsr["_admin"]["scaling-group"]
6104 ):
6105 if admin_scale_info["name"] == scaling_group:
6106 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6107 break
6108 else: # not found, set index one plus last element and add new entry with the name
6109 admin_scale_index += 1
6110 db_nsr_update[
6111 "_admin.scaling-group.{}.name".format(admin_scale_index)
6112 ] = scaling_group
6113
6114 vca_scaling_info = []
6115 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6116 if scaling_type == "SCALE_OUT":
6117 if "aspect-delta-details" not in scaling_descriptor:
6118 raise LcmException(
6119 "Aspect delta details not fount in scaling descriptor {}".format(
6120 scaling_descriptor["name"]
6121 )
6122 )
6123 # count if max-instance-count is reached
6124 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6125
6126 scaling_info["scaling_direction"] = "OUT"
6127 scaling_info["vdu-create"] = {}
6128 scaling_info["kdu-create"] = {}
6129 for delta in deltas:
6130 for vdu_delta in delta.get("vdu-delta", {}):
6131 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6132 # vdu_index also provides the number of instance of the targeted vdu
6133 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6134 cloud_init_text = self._get_vdu_cloud_init_content(
6135 vdud, db_vnfd
6136 )
6137 if cloud_init_text:
6138 additional_params = (
6139 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6140 or {}
6141 )
6142 cloud_init_list = []
6143
6144 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6145 max_instance_count = 10
6146 if vdu_profile and "max-number-of-instances" in vdu_profile:
6147 max_instance_count = vdu_profile.get(
6148 "max-number-of-instances", 10
6149 )
6150
6151 default_instance_num = get_number_of_instances(
6152 db_vnfd, vdud["id"]
6153 )
6154 instances_number = vdu_delta.get("number-of-instances", 1)
6155 nb_scale_op += instances_number
6156
6157 new_instance_count = nb_scale_op + default_instance_num
6158 # Control if new count is over max and vdu count is less than max.
6159 # Then assign new instance count
6160 if new_instance_count > max_instance_count > vdu_count:
6161 instances_number = new_instance_count - max_instance_count
6162 else:
6163 instances_number = instances_number
6164
6165 if new_instance_count > max_instance_count:
6166 raise LcmException(
6167 "reached the limit of {} (max-instance-count) "
6168 "scaling-out operations for the "
6169 "scaling-group-descriptor '{}'".format(
6170 nb_scale_op, scaling_group
6171 )
6172 )
6173 for x in range(vdu_delta.get("number-of-instances", 1)):
6174 if cloud_init_text:
6175 # TODO Information of its own ip is not available because db_vnfr is not updated.
6176 additional_params["OSM"] = get_osm_params(
6177 db_vnfr, vdu_delta["id"], vdu_index + x
6178 )
6179 cloud_init_list.append(
6180 self._parse_cloud_init(
6181 cloud_init_text,
6182 additional_params,
6183 db_vnfd["id"],
6184 vdud["id"],
6185 )
6186 )
6187 vca_scaling_info.append(
6188 {
6189 "osm_vdu_id": vdu_delta["id"],
6190 "member-vnf-index": vnf_index,
6191 "type": "create",
6192 "vdu_index": vdu_index + x,
6193 }
6194 )
6195 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6196 for kdu_delta in delta.get("kdu-resource-delta", {}):
6197 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6198 kdu_name = kdu_profile["kdu-name"]
6199 resource_name = kdu_profile.get("resource-name", "")
6200
6201 # Might have different kdus in the same delta
6202 # Should have list for each kdu
6203 if not scaling_info["kdu-create"].get(kdu_name, None):
6204 scaling_info["kdu-create"][kdu_name] = []
6205
6206 kdur = get_kdur(db_vnfr, kdu_name)
6207 if kdur.get("helm-chart"):
6208 k8s_cluster_type = "helm-chart-v3"
6209 self.logger.debug("kdur: {}".format(kdur))
6210 if (
6211 kdur.get("helm-version")
6212 and kdur.get("helm-version") == "v2"
6213 ):
6214 k8s_cluster_type = "helm-chart"
6215 elif kdur.get("juju-bundle"):
6216 k8s_cluster_type = "juju-bundle"
6217 else:
6218 raise LcmException(
6219 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6220 "juju-bundle. Maybe an old NBI version is running".format(
6221 db_vnfr["member-vnf-index-ref"], kdu_name
6222 )
6223 )
6224
6225 max_instance_count = 10
6226 if kdu_profile and "max-number-of-instances" in kdu_profile:
6227 max_instance_count = kdu_profile.get(
6228 "max-number-of-instances", 10
6229 )
6230
6231 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6232 deployed_kdu, _ = get_deployed_kdu(
6233 nsr_deployed, kdu_name, vnf_index
6234 )
6235 if deployed_kdu is None:
6236 raise LcmException(
6237 "KDU '{}' for vnf '{}' not deployed".format(
6238 kdu_name, vnf_index
6239 )
6240 )
6241 kdu_instance = deployed_kdu.get("kdu-instance")
6242 instance_num = await self.k8scluster_map[
6243 k8s_cluster_type
6244 ].get_scale_count(
6245 resource_name,
6246 kdu_instance,
6247 vca_id=vca_id,
6248 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6249 kdu_model=deployed_kdu.get("kdu-model"),
6250 )
6251 kdu_replica_count = instance_num + kdu_delta.get(
6252 "number-of-instances", 1
6253 )
6254
6255 # Control if new count is over max and instance_num is less than max.
6256 # Then assign max instance number to kdu replica count
6257 if kdu_replica_count > max_instance_count > instance_num:
6258 kdu_replica_count = max_instance_count
6259 if kdu_replica_count > max_instance_count:
6260 raise LcmException(
6261 "reached the limit of {} (max-instance-count) "
6262 "scaling-out operations for the "
6263 "scaling-group-descriptor '{}'".format(
6264 instance_num, scaling_group
6265 )
6266 )
6267
6268 for x in range(kdu_delta.get("number-of-instances", 1)):
6269 vca_scaling_info.append(
6270 {
6271 "osm_kdu_id": kdu_name,
6272 "member-vnf-index": vnf_index,
6273 "type": "create",
6274 "kdu_index": instance_num + x - 1,
6275 }
6276 )
6277 scaling_info["kdu-create"][kdu_name].append(
6278 {
6279 "member-vnf-index": vnf_index,
6280 "type": "create",
6281 "k8s-cluster-type": k8s_cluster_type,
6282 "resource-name": resource_name,
6283 "scale": kdu_replica_count,
6284 }
6285 )
6286 elif scaling_type == "SCALE_IN":
6287 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6288
6289 scaling_info["scaling_direction"] = "IN"
6290 scaling_info["vdu-delete"] = {}
6291 scaling_info["kdu-delete"] = {}
6292
6293 for delta in deltas:
6294 for vdu_delta in delta.get("vdu-delta", {}):
6295 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6296 min_instance_count = 0
6297 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6298 if vdu_profile and "min-number-of-instances" in vdu_profile:
6299 min_instance_count = vdu_profile["min-number-of-instances"]
6300
6301 default_instance_num = get_number_of_instances(
6302 db_vnfd, vdu_delta["id"]
6303 )
6304 instance_num = vdu_delta.get("number-of-instances", 1)
6305 nb_scale_op -= instance_num
6306
6307 new_instance_count = nb_scale_op + default_instance_num
6308
6309 if new_instance_count < min_instance_count < vdu_count:
6310 instances_number = min_instance_count - new_instance_count
6311 else:
6312 instances_number = instance_num
6313
6314 if new_instance_count < min_instance_count:
6315 raise LcmException(
6316 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6317 "scaling-group-descriptor '{}'".format(
6318 nb_scale_op, scaling_group
6319 )
6320 )
6321 for x in range(vdu_delta.get("number-of-instances", 1)):
6322 vca_scaling_info.append(
6323 {
6324 "osm_vdu_id": vdu_delta["id"],
6325 "member-vnf-index": vnf_index,
6326 "type": "delete",
6327 "vdu_index": vdu_index - 1 - x,
6328 }
6329 )
6330 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6331 for kdu_delta in delta.get("kdu-resource-delta", {}):
6332 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6333 kdu_name = kdu_profile["kdu-name"]
6334 resource_name = kdu_profile.get("resource-name", "")
6335
6336 if not scaling_info["kdu-delete"].get(kdu_name, None):
6337 scaling_info["kdu-delete"][kdu_name] = []
6338
6339 kdur = get_kdur(db_vnfr, kdu_name)
6340 if kdur.get("helm-chart"):
6341 k8s_cluster_type = "helm-chart-v3"
6342 self.logger.debug("kdur: {}".format(kdur))
6343 if (
6344 kdur.get("helm-version")
6345 and kdur.get("helm-version") == "v2"
6346 ):
6347 k8s_cluster_type = "helm-chart"
6348 elif kdur.get("juju-bundle"):
6349 k8s_cluster_type = "juju-bundle"
6350 else:
6351 raise LcmException(
6352 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6353 "juju-bundle. Maybe an old NBI version is running".format(
6354 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6355 )
6356 )
6357
6358 min_instance_count = 0
6359 if kdu_profile and "min-number-of-instances" in kdu_profile:
6360 min_instance_count = kdu_profile["min-number-of-instances"]
6361
6362 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6363 deployed_kdu, _ = get_deployed_kdu(
6364 nsr_deployed, kdu_name, vnf_index
6365 )
6366 if deployed_kdu is None:
6367 raise LcmException(
6368 "KDU '{}' for vnf '{}' not deployed".format(
6369 kdu_name, vnf_index
6370 )
6371 )
6372 kdu_instance = deployed_kdu.get("kdu-instance")
6373 instance_num = await self.k8scluster_map[
6374 k8s_cluster_type
6375 ].get_scale_count(
6376 resource_name,
6377 kdu_instance,
6378 vca_id=vca_id,
6379 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6380 kdu_model=deployed_kdu.get("kdu-model"),
6381 )
6382 kdu_replica_count = instance_num - kdu_delta.get(
6383 "number-of-instances", 1
6384 )
6385
6386 if kdu_replica_count < min_instance_count < instance_num:
6387 kdu_replica_count = min_instance_count
6388 if kdu_replica_count < min_instance_count:
6389 raise LcmException(
6390 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6391 "scaling-group-descriptor '{}'".format(
6392 instance_num, scaling_group
6393 )
6394 )
6395
6396 for x in range(kdu_delta.get("number-of-instances", 1)):
6397 vca_scaling_info.append(
6398 {
6399 "osm_kdu_id": kdu_name,
6400 "member-vnf-index": vnf_index,
6401 "type": "delete",
6402 "kdu_index": instance_num - x - 1,
6403 }
6404 )
6405 scaling_info["kdu-delete"][kdu_name].append(
6406 {
6407 "member-vnf-index": vnf_index,
6408 "type": "delete",
6409 "k8s-cluster-type": k8s_cluster_type,
6410 "resource-name": resource_name,
6411 "scale": kdu_replica_count,
6412 }
6413 )
6414
6415 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6416 vdu_delete = copy(scaling_info.get("vdu-delete"))
6417 if scaling_info["scaling_direction"] == "IN":
6418 for vdur in reversed(db_vnfr["vdur"]):
6419 if vdu_delete.get(vdur["vdu-id-ref"]):
6420 vdu_delete[vdur["vdu-id-ref"]] -= 1
6421 scaling_info["vdu"].append(
6422 {
6423 "name": vdur.get("name") or vdur.get("vdu-name"),
6424 "vdu_id": vdur["vdu-id-ref"],
6425 "interface": [],
6426 }
6427 )
6428 for interface in vdur["interfaces"]:
6429 scaling_info["vdu"][-1]["interface"].append(
6430 {
6431 "name": interface["name"],
6432 "ip_address": interface["ip-address"],
6433 "mac_address": interface.get("mac-address"),
6434 }
6435 )
6436 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6437
6438 # PRE-SCALE BEGIN
6439 step = "Executing pre-scale vnf-config-primitive"
6440 if scaling_descriptor.get("scaling-config-action"):
6441 for scaling_config_action in scaling_descriptor[
6442 "scaling-config-action"
6443 ]:
6444 if (
6445 scaling_config_action.get("trigger") == "pre-scale-in"
6446 and scaling_type == "SCALE_IN"
6447 ) or (
6448 scaling_config_action.get("trigger") == "pre-scale-out"
6449 and scaling_type == "SCALE_OUT"
6450 ):
6451 vnf_config_primitive = scaling_config_action[
6452 "vnf-config-primitive-name-ref"
6453 ]
6454 step = db_nslcmop_update[
6455 "detailed-status"
6456 ] = "executing pre-scale scaling-config-action '{}'".format(
6457 vnf_config_primitive
6458 )
6459
6460 # look for primitive
6461 for config_primitive in (
6462 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6463 ).get("config-primitive", ()):
6464 if config_primitive["name"] == vnf_config_primitive:
6465 break
6466 else:
6467 raise LcmException(
6468 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6469 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6470 "primitive".format(scaling_group, vnf_config_primitive)
6471 )
6472
6473 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6474 if db_vnfr.get("additionalParamsForVnf"):
6475 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6476
6477 scale_process = "VCA"
6478 db_nsr_update["config-status"] = "configuring pre-scaling"
6479 primitive_params = self._map_primitive_params(
6480 config_primitive, {}, vnfr_params
6481 )
6482
6483 # Pre-scale retry check: Check if this sub-operation has been executed before
6484 op_index = self._check_or_add_scale_suboperation(
6485 db_nslcmop,
6486 vnf_index,
6487 vnf_config_primitive,
6488 primitive_params,
6489 "PRE-SCALE",
6490 )
6491 if op_index == self.SUBOPERATION_STATUS_SKIP:
6492 # Skip sub-operation
6493 result = "COMPLETED"
6494 result_detail = "Done"
6495 self.logger.debug(
6496 logging_text
6497 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6498 vnf_config_primitive, result, result_detail
6499 )
6500 )
6501 else:
6502 if op_index == self.SUBOPERATION_STATUS_NEW:
6503 # New sub-operation: Get index of this sub-operation
6504 op_index = (
6505 len(db_nslcmop.get("_admin", {}).get("operations"))
6506 - 1
6507 )
6508 self.logger.debug(
6509 logging_text
6510 + "vnf_config_primitive={} New sub-operation".format(
6511 vnf_config_primitive
6512 )
6513 )
6514 else:
6515 # retry: Get registered params for this existing sub-operation
6516 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6517 op_index
6518 ]
6519 vnf_index = op.get("member_vnf_index")
6520 vnf_config_primitive = op.get("primitive")
6521 primitive_params = op.get("primitive_params")
6522 self.logger.debug(
6523 logging_text
6524 + "vnf_config_primitive={} Sub-operation retry".format(
6525 vnf_config_primitive
6526 )
6527 )
6528 # Execute the primitive, either with new (first-time) or registered (reintent) args
6529 ee_descriptor_id = config_primitive.get(
6530 "execution-environment-ref"
6531 )
6532 primitive_name = config_primitive.get(
6533 "execution-environment-primitive", vnf_config_primitive
6534 )
6535 ee_id, vca_type = self._look_for_deployed_vca(
6536 nsr_deployed["VCA"],
6537 member_vnf_index=vnf_index,
6538 vdu_id=None,
6539 vdu_count_index=None,
6540 ee_descriptor_id=ee_descriptor_id,
6541 )
6542 result, result_detail = await self._ns_execute_primitive(
6543 ee_id,
6544 primitive_name,
6545 primitive_params,
6546 vca_type=vca_type,
6547 vca_id=vca_id,
6548 )
6549 self.logger.debug(
6550 logging_text
6551 + "vnf_config_primitive={} Done with result {} {}".format(
6552 vnf_config_primitive, result, result_detail
6553 )
6554 )
6555 # Update operationState = COMPLETED | FAILED
6556 self._update_suboperation_status(
6557 db_nslcmop, op_index, result, result_detail
6558 )
6559
6560 if result == "FAILED":
6561 raise LcmException(result_detail)
6562 db_nsr_update["config-status"] = old_config_status
6563 scale_process = None
6564 # PRE-SCALE END
6565
6566 db_nsr_update[
6567 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6568 ] = nb_scale_op
6569 db_nsr_update[
6570 "_admin.scaling-group.{}.time".format(admin_scale_index)
6571 ] = time()
6572
6573 # SCALE-IN VCA - BEGIN
6574 if vca_scaling_info:
6575 step = db_nslcmop_update[
6576 "detailed-status"
6577 ] = "Deleting the execution environments"
6578 scale_process = "VCA"
6579 for vca_info in vca_scaling_info:
6580 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6581 member_vnf_index = str(vca_info["member-vnf-index"])
6582 self.logger.debug(
6583 logging_text + "vdu info: {}".format(vca_info)
6584 )
6585 if vca_info.get("osm_vdu_id"):
6586 vdu_id = vca_info["osm_vdu_id"]
6587 vdu_index = int(vca_info["vdu_index"])
6588 stage[
6589 1
6590 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6591 member_vnf_index, vdu_id, vdu_index
6592 )
6593 stage[2] = step = "Scaling in VCA"
6594 self._write_op_status(op_id=nslcmop_id, stage=stage)
6595 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6596 config_update = db_nsr["configurationStatus"]
6597 for vca_index, vca in enumerate(vca_update):
6598 if (
6599 (vca or vca.get("ee_id"))
6600 and vca["member-vnf-index"] == member_vnf_index
6601 and vca["vdu_count_index"] == vdu_index
6602 ):
6603 if vca.get("vdu_id"):
6604 config_descriptor = get_configuration(
6605 db_vnfd, vca.get("vdu_id")
6606 )
6607 elif vca.get("kdu_name"):
6608 config_descriptor = get_configuration(
6609 db_vnfd, vca.get("kdu_name")
6610 )
6611 else:
6612 config_descriptor = get_configuration(
6613 db_vnfd, db_vnfd["id"]
6614 )
6615 operation_params = (
6616 db_nslcmop.get("operationParams") or {}
6617 )
6618 exec_terminate_primitives = not operation_params.get(
6619 "skip_terminate_primitives"
6620 ) and vca.get("needed_terminate")
6621 task = asyncio.ensure_future(
6622 asyncio.wait_for(
6623 self.destroy_N2VC(
6624 logging_text,
6625 db_nslcmop,
6626 vca,
6627 config_descriptor,
6628 vca_index,
6629 destroy_ee=True,
6630 exec_primitives=exec_terminate_primitives,
6631 scaling_in=True,
6632 vca_id=vca_id,
6633 ),
6634 timeout=self.timeout_charm_delete,
6635 )
6636 )
6637 tasks_dict_info[task] = "Terminating VCA {}".format(
6638 vca.get("ee_id")
6639 )
6640 del vca_update[vca_index]
6641 del config_update[vca_index]
6642 # wait for pending tasks of terminate primitives
6643 if tasks_dict_info:
6644 self.logger.debug(
6645 logging_text
6646 + "Waiting for tasks {}".format(
6647 list(tasks_dict_info.keys())
6648 )
6649 )
6650 error_list = await self._wait_for_tasks(
6651 logging_text,
6652 tasks_dict_info,
6653 min(
6654 self.timeout_charm_delete, self.timeout_ns_terminate
6655 ),
6656 stage,
6657 nslcmop_id,
6658 )
6659 tasks_dict_info.clear()
6660 if error_list:
6661 raise LcmException("; ".join(error_list))
6662
6663 db_vca_and_config_update = {
6664 "_admin.deployed.VCA": vca_update,
6665 "configurationStatus": config_update,
6666 }
6667 self.update_db_2(
6668 "nsrs", db_nsr["_id"], db_vca_and_config_update
6669 )
6670 scale_process = None
6671 # SCALE-IN VCA - END
6672
6673 # SCALE RO - BEGIN
6674 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6675 scale_process = "RO"
6676 if self.ro_config.get("ng"):
6677 await self._scale_ng_ro(
6678 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6679 )
6680 scaling_info.pop("vdu-create", None)
6681 scaling_info.pop("vdu-delete", None)
6682
6683 scale_process = None
6684 # SCALE RO - END
6685
6686 # SCALE KDU - BEGIN
6687 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6688 scale_process = "KDU"
6689 await self._scale_kdu(
6690 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6691 )
6692 scaling_info.pop("kdu-create", None)
6693 scaling_info.pop("kdu-delete", None)
6694
6695 scale_process = None
6696 # SCALE KDU - END
6697
6698 if db_nsr_update:
6699 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6700
6701 # SCALE-UP VCA - BEGIN
6702 if vca_scaling_info:
6703 step = db_nslcmop_update[
6704 "detailed-status"
6705 ] = "Creating new execution environments"
6706 scale_process = "VCA"
6707 for vca_info in vca_scaling_info:
6708 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6709 member_vnf_index = str(vca_info["member-vnf-index"])
6710 self.logger.debug(
6711 logging_text + "vdu info: {}".format(vca_info)
6712 )
6713 vnfd_id = db_vnfr["vnfd-ref"]
6714 if vca_info.get("osm_vdu_id"):
6715 vdu_index = int(vca_info["vdu_index"])
6716 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6717 if db_vnfr.get("additionalParamsForVnf"):
6718 deploy_params.update(
6719 parse_yaml_strings(
6720 db_vnfr["additionalParamsForVnf"].copy()
6721 )
6722 )
6723 descriptor_config = get_configuration(
6724 db_vnfd, db_vnfd["id"]
6725 )
6726 if descriptor_config:
6727 vdu_id = None
6728 vdu_name = None
6729 kdu_name = None
6730 self._deploy_n2vc(
6731 logging_text=logging_text
6732 + "member_vnf_index={} ".format(member_vnf_index),
6733 db_nsr=db_nsr,
6734 db_vnfr=db_vnfr,
6735 nslcmop_id=nslcmop_id,
6736 nsr_id=nsr_id,
6737 nsi_id=nsi_id,
6738 vnfd_id=vnfd_id,
6739 vdu_id=vdu_id,
6740 kdu_name=kdu_name,
6741 member_vnf_index=member_vnf_index,
6742 vdu_index=vdu_index,
6743 vdu_name=vdu_name,
6744 deploy_params=deploy_params,
6745 descriptor_config=descriptor_config,
6746 base_folder=base_folder,
6747 task_instantiation_info=tasks_dict_info,
6748 stage=stage,
6749 )
6750 vdu_id = vca_info["osm_vdu_id"]
6751 vdur = find_in_list(
6752 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6753 )
6754 descriptor_config = get_configuration(db_vnfd, vdu_id)
6755 if vdur.get("additionalParams"):
6756 deploy_params_vdu = parse_yaml_strings(
6757 vdur["additionalParams"]
6758 )
6759 else:
6760 deploy_params_vdu = deploy_params
6761 deploy_params_vdu["OSM"] = get_osm_params(
6762 db_vnfr, vdu_id, vdu_count_index=vdu_index
6763 )
6764 if descriptor_config:
6765 vdu_name = None
6766 kdu_name = None
6767 stage[
6768 1
6769 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6770 member_vnf_index, vdu_id, vdu_index
6771 )
6772 stage[2] = step = "Scaling out VCA"
6773 self._write_op_status(op_id=nslcmop_id, stage=stage)
6774 self._deploy_n2vc(
6775 logging_text=logging_text
6776 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6777 member_vnf_index, vdu_id, vdu_index
6778 ),
6779 db_nsr=db_nsr,
6780 db_vnfr=db_vnfr,
6781 nslcmop_id=nslcmop_id,
6782 nsr_id=nsr_id,
6783 nsi_id=nsi_id,
6784 vnfd_id=vnfd_id,
6785 vdu_id=vdu_id,
6786 kdu_name=kdu_name,
6787 member_vnf_index=member_vnf_index,
6788 vdu_index=vdu_index,
6789 vdu_name=vdu_name,
6790 deploy_params=deploy_params_vdu,
6791 descriptor_config=descriptor_config,
6792 base_folder=base_folder,
6793 task_instantiation_info=tasks_dict_info,
6794 stage=stage,
6795 )
6796 # SCALE-UP VCA - END
6797 scale_process = None
6798
6799 # POST-SCALE BEGIN
6800 # execute primitive service POST-SCALING
6801 step = "Executing post-scale vnf-config-primitive"
6802 if scaling_descriptor.get("scaling-config-action"):
6803 for scaling_config_action in scaling_descriptor[
6804 "scaling-config-action"
6805 ]:
6806 if (
6807 scaling_config_action.get("trigger") == "post-scale-in"
6808 and scaling_type == "SCALE_IN"
6809 ) or (
6810 scaling_config_action.get("trigger") == "post-scale-out"
6811 and scaling_type == "SCALE_OUT"
6812 ):
6813 vnf_config_primitive = scaling_config_action[
6814 "vnf-config-primitive-name-ref"
6815 ]
6816 step = db_nslcmop_update[
6817 "detailed-status"
6818 ] = "executing post-scale scaling-config-action '{}'".format(
6819 vnf_config_primitive
6820 )
6821
6822 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6823 if db_vnfr.get("additionalParamsForVnf"):
6824 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6825
6826 # look for primitive
6827 for config_primitive in (
6828 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6829 ).get("config-primitive", ()):
6830 if config_primitive["name"] == vnf_config_primitive:
6831 break
6832 else:
6833 raise LcmException(
6834 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6835 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6836 "config-primitive".format(
6837 scaling_group, vnf_config_primitive
6838 )
6839 )
6840 scale_process = "VCA"
6841 db_nsr_update["config-status"] = "configuring post-scaling"
6842 primitive_params = self._map_primitive_params(
6843 config_primitive, {}, vnfr_params
6844 )
6845
6846 # Post-scale retry check: Check if this sub-operation has been executed before
6847 op_index = self._check_or_add_scale_suboperation(
6848 db_nslcmop,
6849 vnf_index,
6850 vnf_config_primitive,
6851 primitive_params,
6852 "POST-SCALE",
6853 )
6854 if op_index == self.SUBOPERATION_STATUS_SKIP:
6855 # Skip sub-operation
6856 result = "COMPLETED"
6857 result_detail = "Done"
6858 self.logger.debug(
6859 logging_text
6860 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6861 vnf_config_primitive, result, result_detail
6862 )
6863 )
6864 else:
6865 if op_index == self.SUBOPERATION_STATUS_NEW:
6866 # New sub-operation: Get index of this sub-operation
6867 op_index = (
6868 len(db_nslcmop.get("_admin", {}).get("operations"))
6869 - 1
6870 )
6871 self.logger.debug(
6872 logging_text
6873 + "vnf_config_primitive={} New sub-operation".format(
6874 vnf_config_primitive
6875 )
6876 )
6877 else:
6878 # retry: Get registered params for this existing sub-operation
6879 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6880 op_index
6881 ]
6882 vnf_index = op.get("member_vnf_index")
6883 vnf_config_primitive = op.get("primitive")
6884 primitive_params = op.get("primitive_params")
6885 self.logger.debug(
6886 logging_text
6887 + "vnf_config_primitive={} Sub-operation retry".format(
6888 vnf_config_primitive
6889 )
6890 )
6891 # Execute the primitive, either with new (first-time) or registered (reintent) args
6892 ee_descriptor_id = config_primitive.get(
6893 "execution-environment-ref"
6894 )
6895 primitive_name = config_primitive.get(
6896 "execution-environment-primitive", vnf_config_primitive
6897 )
6898 ee_id, vca_type = self._look_for_deployed_vca(
6899 nsr_deployed["VCA"],
6900 member_vnf_index=vnf_index,
6901 vdu_id=None,
6902 vdu_count_index=None,
6903 ee_descriptor_id=ee_descriptor_id,
6904 )
6905 result, result_detail = await self._ns_execute_primitive(
6906 ee_id,
6907 primitive_name,
6908 primitive_params,
6909 vca_type=vca_type,
6910 vca_id=vca_id,
6911 )
6912 self.logger.debug(
6913 logging_text
6914 + "vnf_config_primitive={} Done with result {} {}".format(
6915 vnf_config_primitive, result, result_detail
6916 )
6917 )
6918 # Update operationState = COMPLETED | FAILED
6919 self._update_suboperation_status(
6920 db_nslcmop, op_index, result, result_detail
6921 )
6922
6923 if result == "FAILED":
6924 raise LcmException(result_detail)
6925 db_nsr_update["config-status"] = old_config_status
6926 scale_process = None
6927 # POST-SCALE END
6928
6929 db_nsr_update[
6930 "detailed-status"
6931 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6932 db_nsr_update["operational-status"] = (
6933 "running"
6934 if old_operational_status == "failed"
6935 else old_operational_status
6936 )
6937 db_nsr_update["config-status"] = old_config_status
6938 return
6939 except (
6940 ROclient.ROClientException,
6941 DbException,
6942 LcmException,
6943 NgRoException,
6944 ) as e:
6945 self.logger.error(logging_text + "Exit Exception {}".format(e))
6946 exc = e
6947 except asyncio.CancelledError:
6948 self.logger.error(
6949 logging_text + "Cancelled Exception while '{}'".format(step)
6950 )
6951 exc = "Operation was cancelled"
6952 except Exception as e:
6953 exc = traceback.format_exc()
6954 self.logger.critical(
6955 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6956 exc_info=True,
6957 )
6958 finally:
6959 self._write_ns_status(
6960 nsr_id=nsr_id,
6961 ns_state=None,
6962 current_operation="IDLE",
6963 current_operation_id=None,
6964 )
6965 if tasks_dict_info:
6966 stage[1] = "Waiting for instantiate pending tasks."
6967 self.logger.debug(logging_text + stage[1])
6968 exc = await self._wait_for_tasks(
6969 logging_text,
6970 tasks_dict_info,
6971 self.timeout_ns_deploy,
6972 stage,
6973 nslcmop_id,
6974 nsr_id=nsr_id,
6975 )
6976 if exc:
6977 db_nslcmop_update[
6978 "detailed-status"
6979 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6980 nslcmop_operation_state = "FAILED"
6981 if db_nsr:
6982 db_nsr_update["operational-status"] = old_operational_status
6983 db_nsr_update["config-status"] = old_config_status
6984 db_nsr_update["detailed-status"] = ""
6985 if scale_process:
6986 if "VCA" in scale_process:
6987 db_nsr_update["config-status"] = "failed"
6988 if "RO" in scale_process:
6989 db_nsr_update["operational-status"] = "failed"
6990 db_nsr_update[
6991 "detailed-status"
6992 ] = "FAILED scaling nslcmop={} {}: {}".format(
6993 nslcmop_id, step, exc
6994 )
6995 else:
6996 error_description_nslcmop = None
6997 nslcmop_operation_state = "COMPLETED"
6998 db_nslcmop_update["detailed-status"] = "Done"
6999
7000 self._write_op_status(
7001 op_id=nslcmop_id,
7002 stage="",
7003 error_message=error_description_nslcmop,
7004 operation_state=nslcmop_operation_state,
7005 other_update=db_nslcmop_update,
7006 )
7007 if db_nsr:
7008 self._write_ns_status(
7009 nsr_id=nsr_id,
7010 ns_state=None,
7011 current_operation="IDLE",
7012 current_operation_id=None,
7013 other_update=db_nsr_update,
7014 )
7015
7016 if nslcmop_operation_state:
7017 try:
7018 msg = {
7019 "nsr_id": nsr_id,
7020 "nslcmop_id": nslcmop_id,
7021 "operationState": nslcmop_operation_state,
7022 }
7023 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7024 except Exception as e:
7025 self.logger.error(
7026 logging_text + "kafka_write notification Exception {}".format(e)
7027 )
7028 self.logger.debug(logging_text + "Exit")
7029 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7030
7031 async def _scale_kdu(
7032 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7033 ):
7034 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7035 for kdu_name in _scaling_info:
7036 for kdu_scaling_info in _scaling_info[kdu_name]:
7037 deployed_kdu, index = get_deployed_kdu(
7038 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7039 )
7040 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7041 kdu_instance = deployed_kdu["kdu-instance"]
7042 kdu_model = deployed_kdu.get("kdu-model")
7043 scale = int(kdu_scaling_info["scale"])
7044 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7045
7046 db_dict = {
7047 "collection": "nsrs",
7048 "filter": {"_id": nsr_id},
7049 "path": "_admin.deployed.K8s.{}".format(index),
7050 }
7051
7052 step = "scaling application {}".format(
7053 kdu_scaling_info["resource-name"]
7054 )
7055 self.logger.debug(logging_text + step)
7056
7057 if kdu_scaling_info["type"] == "delete":
7058 kdu_config = get_configuration(db_vnfd, kdu_name)
7059 if (
7060 kdu_config
7061 and kdu_config.get("terminate-config-primitive")
7062 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7063 ):
7064 terminate_config_primitive_list = kdu_config.get(
7065 "terminate-config-primitive"
7066 )
7067 terminate_config_primitive_list.sort(
7068 key=lambda val: int(val["seq"])
7069 )
7070
7071 for (
7072 terminate_config_primitive
7073 ) in terminate_config_primitive_list:
7074 primitive_params_ = self._map_primitive_params(
7075 terminate_config_primitive, {}, {}
7076 )
7077 step = "execute terminate config primitive"
7078 self.logger.debug(logging_text + step)
7079 await asyncio.wait_for(
7080 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7081 cluster_uuid=cluster_uuid,
7082 kdu_instance=kdu_instance,
7083 primitive_name=terminate_config_primitive["name"],
7084 params=primitive_params_,
7085 db_dict=db_dict,
7086 vca_id=vca_id,
7087 ),
7088 timeout=600,
7089 )
7090
7091 await asyncio.wait_for(
7092 self.k8scluster_map[k8s_cluster_type].scale(
7093 kdu_instance,
7094 scale,
7095 kdu_scaling_info["resource-name"],
7096 vca_id=vca_id,
7097 cluster_uuid=cluster_uuid,
7098 kdu_model=kdu_model,
7099 atomic=True,
7100 db_dict=db_dict,
7101 ),
7102 timeout=self.timeout_vca_on_error,
7103 )
7104
7105 if kdu_scaling_info["type"] == "create":
7106 kdu_config = get_configuration(db_vnfd, kdu_name)
7107 if (
7108 kdu_config
7109 and kdu_config.get("initial-config-primitive")
7110 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7111 ):
7112 initial_config_primitive_list = kdu_config.get(
7113 "initial-config-primitive"
7114 )
7115 initial_config_primitive_list.sort(
7116 key=lambda val: int(val["seq"])
7117 )
7118
7119 for initial_config_primitive in initial_config_primitive_list:
7120 primitive_params_ = self._map_primitive_params(
7121 initial_config_primitive, {}, {}
7122 )
7123 step = "execute initial config primitive"
7124 self.logger.debug(logging_text + step)
7125 await asyncio.wait_for(
7126 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7127 cluster_uuid=cluster_uuid,
7128 kdu_instance=kdu_instance,
7129 primitive_name=initial_config_primitive["name"],
7130 params=primitive_params_,
7131 db_dict=db_dict,
7132 vca_id=vca_id,
7133 ),
7134 timeout=600,
7135 )
7136
7137 async def _scale_ng_ro(
7138 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7139 ):
7140 nsr_id = db_nslcmop["nsInstanceId"]
7141 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7142 db_vnfrs = {}
7143
7144 # read from db: vnfd's for every vnf
7145 db_vnfds = []
7146
7147 # for each vnf in ns, read vnfd
7148 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7149 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7150 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7151 # if we haven't this vnfd, read it from db
7152 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7153 # read from db
7154 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7155 db_vnfds.append(vnfd)
7156 n2vc_key = self.n2vc.get_public_key()
7157 n2vc_key_list = [n2vc_key]
7158 self.scale_vnfr(
7159 db_vnfr,
7160 vdu_scaling_info.get("vdu-create"),
7161 vdu_scaling_info.get("vdu-delete"),
7162 mark_delete=True,
7163 )
7164 # db_vnfr has been updated, update db_vnfrs to use it
7165 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7166 await self._instantiate_ng_ro(
7167 logging_text,
7168 nsr_id,
7169 db_nsd,
7170 db_nsr,
7171 db_nslcmop,
7172 db_vnfrs,
7173 db_vnfds,
7174 n2vc_key_list,
7175 stage=stage,
7176 start_deploy=time(),
7177 timeout_ns_deploy=self.timeout_ns_deploy,
7178 )
7179 if vdu_scaling_info.get("vdu-delete"):
7180 self.scale_vnfr(
7181 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7182 )
7183
7184 async def extract_prometheus_scrape_jobs(
7185 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7186 ):
7187 # look if exist a file called 'prometheus*.j2' and
7188 artifact_content = self.fs.dir_ls(artifact_path)
7189 job_file = next(
7190 (
7191 f
7192 for f in artifact_content
7193 if f.startswith("prometheus") and f.endswith(".j2")
7194 ),
7195 None,
7196 )
7197 if not job_file:
7198 return
7199 with self.fs.file_open((artifact_path, job_file), "r") as f:
7200 job_data = f.read()
7201
7202 # TODO get_service
7203 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7204 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7205 host_port = "80"
7206 vnfr_id = vnfr_id.replace("-", "")
7207 variables = {
7208 "JOB_NAME": vnfr_id,
7209 "TARGET_IP": target_ip,
7210 "EXPORTER_POD_IP": host_name,
7211 "EXPORTER_POD_PORT": host_port,
7212 }
7213 job_list = parse_job(job_data, variables)
7214 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7215 for job in job_list:
7216 if (
7217 not isinstance(job.get("job_name"), str)
7218 or vnfr_id not in job["job_name"]
7219 ):
7220 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7221 job["nsr_id"] = nsr_id
7222 job["vnfr_id"] = vnfr_id
7223 return job_list
7224
7225 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7226 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7227 self.logger.info(logging_text + "Enter")
7228 stage = ["Preparing the environment", ""]
7229 # database nsrs record
7230 db_nsr_update = {}
7231 vdu_vim_name = None
7232 vim_vm_id = None
7233 # in case of error, indicates what part of scale was failed to put nsr at error status
7234 start_deploy = time()
7235 try:
7236 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7237 vim_account_id = db_vnfr.get("vim-account-id")
7238 vim_info_key = "vim:" + vim_account_id
7239 vdur = find_in_list(
7240 db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
7241 )
7242 if vdur:
7243 vdu_vim_name = vdur["name"]
7244 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7245 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7246 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7247 # wait for any previous tasks in process
7248 stage[1] = "Waiting for previous operations to terminate"
7249 self.logger.info(stage[1])
7250 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7251
7252 stage[1] = "Reading from database."
7253 self.logger.info(stage[1])
7254 self._write_ns_status(
7255 nsr_id=nsr_id,
7256 ns_state=None,
7257 current_operation=operation_type.upper(),
7258 current_operation_id=nslcmop_id
7259 )
7260 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7261
7262 # read from db: ns
7263 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7264 db_nsr_update["operational-status"] = operation_type
7265 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7266 # Payload for RO
7267 desc = {
7268 operation_type: {
7269 "vim_vm_id": vim_vm_id,
7270 "vnf_id": vnf_id,
7271 "vdu_index": additional_param["count-index"],
7272 "vdu_id": vdur["id"],
7273 "target_vim": target_vim,
7274 "vim_account_id": vim_account_id
7275 }
7276 }
7277 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7278 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7279 self.logger.info("ro nsr id: {}".format(nsr_id))
7280 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7281 self.logger.info("response from RO: {}".format(result_dict))
7282 action_id = result_dict["action_id"]
7283 await self._wait_ng_ro(
7284 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_operate
7285 )
7286 return "COMPLETED", "Done"
7287 except (ROclient.ROClientException, DbException, LcmException) as e:
7288 self.logger.error("Exit Exception {}".format(e))
7289 exc = e
7290 except asyncio.CancelledError:
7291 self.logger.error("Cancelled Exception while '{}'".format(stage))
7292 exc = "Operation was cancelled"
7293 except Exception as e:
7294 exc = traceback.format_exc()
7295 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7296 return "FAILED", "Error in operate VNF {}".format(exc)
7297
7298 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7299 """
7300 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7301
7302 :param: vim_account_id: VIM Account ID
7303
7304 :return: (cloud_name, cloud_credential)
7305 """
7306 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7307 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7308
7309 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7310 """
7311 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7312
7313 :param: vim_account_id: VIM Account ID
7314
7315 :return: (cloud_name, cloud_credential)
7316 """
7317 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7318 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7319
7320 async def migrate(self, nsr_id, nslcmop_id):
7321 """
7322 Migrate VNFs and VDUs instances in a NS
7323
7324 :param: nsr_id: NS Instance ID
7325 :param: nslcmop_id: nslcmop ID of migrate
7326
7327 """
7328 # Try to lock HA task here
7329 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7330 if not task_is_locked_by_me:
7331 return
7332 logging_text = "Task ns={} migrate ".format(nsr_id)
7333 self.logger.debug(logging_text + "Enter")
7334 # get all needed from database
7335 db_nslcmop = None
7336 db_nslcmop_update = {}
7337 nslcmop_operation_state = None
7338 db_nsr_update = {}
7339 target = {}
7340 exc = None
7341 # in case of error, indicates what part of scale was failed to put nsr at error status
7342 start_deploy = time()
7343
7344 try:
7345 # wait for any previous tasks in process
7346 step = "Waiting for previous operations to terminate"
7347 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7348
7349 self._write_ns_status(
7350 nsr_id=nsr_id,
7351 ns_state=None,
7352 current_operation="MIGRATING",
7353 current_operation_id=nslcmop_id,
7354 )
7355 step = "Getting nslcmop from database"
7356 self.logger.debug(
7357 step + " after having waited for previous tasks to be completed"
7358 )
7359 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7360 migrate_params = db_nslcmop.get("operationParams")
7361
7362 target = {}
7363 target.update(migrate_params)
7364 desc = await self.RO.migrate(nsr_id, target)
7365 self.logger.debug("RO return > {}".format(desc))
7366 action_id = desc["action_id"]
7367 await self._wait_ng_ro(
7368 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7369 operation="migrate"
7370 )
7371 except (ROclient.ROClientException, DbException, LcmException) as e:
7372 self.logger.error("Exit Exception {}".format(e))
7373 exc = e
7374 except asyncio.CancelledError:
7375 self.logger.error("Cancelled Exception while '{}'".format(step))
7376 exc = "Operation was cancelled"
7377 except Exception as e:
7378 exc = traceback.format_exc()
7379 self.logger.critical(
7380 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7381 )
7382 finally:
7383 self._write_ns_status(
7384 nsr_id=nsr_id,
7385 ns_state=None,
7386 current_operation="IDLE",
7387 current_operation_id=None,
7388 )
7389 if exc:
7390 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7391 nslcmop_operation_state = "FAILED"
7392 else:
7393 nslcmop_operation_state = "COMPLETED"
7394 db_nslcmop_update["detailed-status"] = "Done"
7395 db_nsr_update["detailed-status"] = "Done"
7396
7397 self._write_op_status(
7398 op_id=nslcmop_id,
7399 stage="",
7400 error_message="",
7401 operation_state=nslcmop_operation_state,
7402 other_update=db_nslcmop_update,
7403 )
7404 if nslcmop_operation_state:
7405 try:
7406 msg = {
7407 "nsr_id": nsr_id,
7408 "nslcmop_id": nslcmop_id,
7409 "operationState": nslcmop_operation_state,
7410 }
7411 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7412 except Exception as e:
7413 self.logger.error(
7414 logging_text + "kafka_write notification Exception {}".format(e)
7415 )
7416 self.logger.debug(logging_text + "Exit")
7417 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7418
7419
7420 async def heal(self, nsr_id, nslcmop_id):
7421 """
7422 Heal NS
7423
7424 :param nsr_id: ns instance to heal
7425 :param nslcmop_id: operation to run
7426 :return:
7427 """
7428
7429 # Try to lock HA task here
7430 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7431 if not task_is_locked_by_me:
7432 return
7433
7434 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7435 stage = ["", "", ""]
7436 tasks_dict_info = {}
7437 # ^ stage, step, VIM progress
7438 self.logger.debug(logging_text + "Enter")
7439 # get all needed from database
7440 db_nsr = None
7441 db_nslcmop_update = {}
7442 db_nsr_update = {}
7443 db_vnfrs = {} # vnf's info indexed by _id
7444 exc = None
7445 old_operational_status = ""
7446 old_config_status = ""
7447 nsi_id = None
7448 try:
7449 # wait for any previous tasks in process
7450 step = "Waiting for previous operations to terminate"
7451 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7452 self._write_ns_status(
7453 nsr_id=nsr_id,
7454 ns_state=None,
7455 current_operation="HEALING",
7456 current_operation_id=nslcmop_id,
7457 )
7458
7459 step = "Getting nslcmop from database"
7460 self.logger.debug(
7461 step + " after having waited for previous tasks to be completed"
7462 )
7463 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7464
7465 step = "Getting nsr from database"
7466 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7467 old_operational_status = db_nsr["operational-status"]
7468 old_config_status = db_nsr["config-status"]
7469
7470 db_nsr_update = {
7471 "_admin.deployed.RO.operational-status": "healing",
7472 }
7473 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7474
7475 step = "Sending heal order to VIM"
7476 task_ro = asyncio.ensure_future(
7477 self.heal_RO(
7478 logging_text=logging_text,
7479 nsr_id=nsr_id,
7480 db_nslcmop=db_nslcmop,
7481 stage=stage,
7482 )
7483 )
7484 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7485 tasks_dict_info[task_ro] = "Healing at VIM"
7486
7487 # VCA tasks
7488 # read from db: nsd
7489 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7490 self.logger.debug(logging_text + stage[1])
7491 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7492 self.fs.sync(db_nsr["nsd-id"])
7493 db_nsr["nsd"] = nsd
7494 # read from db: vnfr's of this ns
7495 step = "Getting vnfrs from db"
7496 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7497 for vnfr in db_vnfrs_list:
7498 db_vnfrs[vnfr["_id"]] = vnfr
7499 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7500
7501 # Check for each target VNF
7502 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7503 for target_vnf in target_list:
7504 # Find this VNF in the list from DB
7505 vnfr_id = target_vnf.get("vnfInstanceId", None)
7506 if vnfr_id:
7507 db_vnfr = db_vnfrs[vnfr_id]
7508 vnfd_id = db_vnfr.get("vnfd-id")
7509 vnfd_ref = db_vnfr.get("vnfd-ref")
7510 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7511 base_folder = vnfd["_admin"]["storage"]
7512 vdu_id = None
7513 vdu_index = 0
7514 vdu_name = None
7515 kdu_name = None
7516 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7517 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7518
7519 # Check each target VDU and deploy N2VC
7520 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7521 deploy_params_vdu = target_vdu
7522 # Set run-day1 vnf level value if not vdu level value exists
7523 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7524 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7525 vdu_name = target_vdu.get("vdu-id", None)
7526 # TODO: Get vdu_id from vdud.
7527 vdu_id = vdu_name
7528 # For multi instance VDU count-index is mandatory
7529 # For single session VDU count-indes is 0
7530 vdu_index = target_vdu.get("count-index",0)
7531
7532 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7533 stage[1] = "Deploying Execution Environments."
7534 self.logger.debug(logging_text + stage[1])
7535
7536 # VNF Level charm. Normal case when proxy charms.
7537 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7538 descriptor_config = get_configuration(vnfd, vnfd_ref)
7539 if descriptor_config:
7540 # Continue if healed machine is management machine
7541 vnf_ip_address = db_vnfr.get("ip-address")
7542 target_instance = None
7543 for instance in db_vnfr.get("vdur", None):
7544 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7545 target_instance = instance
7546 break
7547 if vnf_ip_address == target_instance.get("ip-address"):
7548 self._heal_n2vc(
7549 logging_text=logging_text
7550 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7551 member_vnf_index, vdu_name, vdu_index
7552 ),
7553 db_nsr=db_nsr,
7554 db_vnfr=db_vnfr,
7555 nslcmop_id=nslcmop_id,
7556 nsr_id=nsr_id,
7557 nsi_id=nsi_id,
7558 vnfd_id=vnfd_ref,
7559 vdu_id=None,
7560 kdu_name=None,
7561 member_vnf_index=member_vnf_index,
7562 vdu_index=0,
7563 vdu_name=None,
7564 deploy_params=deploy_params_vdu,
7565 descriptor_config=descriptor_config,
7566 base_folder=base_folder,
7567 task_instantiation_info=tasks_dict_info,
7568 stage=stage,
7569 )
7570
7571 # VDU Level charm. Normal case with native charms.
7572 descriptor_config = get_configuration(vnfd, vdu_name)
7573 if descriptor_config:
7574 self._heal_n2vc(
7575 logging_text=logging_text
7576 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7577 member_vnf_index, vdu_name, vdu_index
7578 ),
7579 db_nsr=db_nsr,
7580 db_vnfr=db_vnfr,
7581 nslcmop_id=nslcmop_id,
7582 nsr_id=nsr_id,
7583 nsi_id=nsi_id,
7584 vnfd_id=vnfd_ref,
7585 vdu_id=vdu_id,
7586 kdu_name=kdu_name,
7587 member_vnf_index=member_vnf_index,
7588 vdu_index=vdu_index,
7589 vdu_name=vdu_name,
7590 deploy_params=deploy_params_vdu,
7591 descriptor_config=descriptor_config,
7592 base_folder=base_folder,
7593 task_instantiation_info=tasks_dict_info,
7594 stage=stage,
7595 )
7596
7597 except (
7598 ROclient.ROClientException,
7599 DbException,
7600 LcmException,
7601 NgRoException,
7602 ) as e:
7603 self.logger.error(logging_text + "Exit Exception {}".format(e))
7604 exc = e
7605 except asyncio.CancelledError:
7606 self.logger.error(
7607 logging_text + "Cancelled Exception while '{}'".format(step)
7608 )
7609 exc = "Operation was cancelled"
7610 except Exception as e:
7611 exc = traceback.format_exc()
7612 self.logger.critical(
7613 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7614 exc_info=True,
7615 )
7616 finally:
7617 if tasks_dict_info:
7618 stage[1] = "Waiting for healing pending tasks."
7619 self.logger.debug(logging_text + stage[1])
7620 exc = await self._wait_for_tasks(
7621 logging_text,
7622 tasks_dict_info,
7623 self.timeout_ns_deploy,
7624 stage,
7625 nslcmop_id,
7626 nsr_id=nsr_id,
7627 )
7628 if exc:
7629 db_nslcmop_update[
7630 "detailed-status"
7631 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7632 nslcmop_operation_state = "FAILED"
7633 if db_nsr:
7634 db_nsr_update["operational-status"] = old_operational_status
7635 db_nsr_update["config-status"] = old_config_status
7636 db_nsr_update[
7637 "detailed-status"
7638 ] = "FAILED healing nslcmop={} {}: {}".format(
7639 nslcmop_id, step, exc
7640 )
7641 for task, task_name in tasks_dict_info.items():
7642 if not task.done() or task.cancelled() or task.exception():
7643 if task_name.startswith(self.task_name_deploy_vca):
7644 # A N2VC task is pending
7645 db_nsr_update["config-status"] = "failed"
7646 else:
7647 # RO task is pending
7648 db_nsr_update["operational-status"] = "failed"
7649 else:
7650 error_description_nslcmop = None
7651 nslcmop_operation_state = "COMPLETED"
7652 db_nslcmop_update["detailed-status"] = "Done"
7653 db_nsr_update["detailed-status"] = "Done"
7654 db_nsr_update["operational-status"] = "running"
7655 db_nsr_update["config-status"] = "configured"
7656
7657 self._write_op_status(
7658 op_id=nslcmop_id,
7659 stage="",
7660 error_message=error_description_nslcmop,
7661 operation_state=nslcmop_operation_state,
7662 other_update=db_nslcmop_update,
7663 )
7664 if db_nsr:
7665 self._write_ns_status(
7666 nsr_id=nsr_id,
7667 ns_state=None,
7668 current_operation="IDLE",
7669 current_operation_id=None,
7670 other_update=db_nsr_update,
7671 )
7672
7673 if nslcmop_operation_state:
7674 try:
7675 msg = {
7676 "nsr_id": nsr_id,
7677 "nslcmop_id": nslcmop_id,
7678 "operationState": nslcmop_operation_state,
7679 }
7680 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7681 except Exception as e:
7682 self.logger.error(
7683 logging_text + "kafka_write notification Exception {}".format(e)
7684 )
7685 self.logger.debug(logging_text + "Exit")
7686 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7687
7688 async def heal_RO(
7689 self,
7690 logging_text,
7691 nsr_id,
7692 db_nslcmop,
7693 stage,
7694 ):
7695 """
7696 Heal at RO
7697 :param logging_text: preffix text to use at logging
7698 :param nsr_id: nsr identity
7699 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7700 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7701 :return: None or exception
7702 """
7703 def get_vim_account(vim_account_id):
7704 nonlocal db_vims
7705 if vim_account_id in db_vims:
7706 return db_vims[vim_account_id]
7707 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7708 db_vims[vim_account_id] = db_vim
7709 return db_vim
7710
7711 try:
7712 start_heal = time()
7713 ns_params = db_nslcmop.get("operationParams")
7714 if ns_params and ns_params.get("timeout_ns_heal"):
7715 timeout_ns_heal = ns_params["timeout_ns_heal"]
7716 else:
7717 timeout_ns_heal = self.timeout.get(
7718 "ns_heal", self.timeout_ns_heal
7719 )
7720
7721 db_vims = {}
7722
7723 nslcmop_id = db_nslcmop["_id"]
7724 target = {
7725 "action_id": nslcmop_id,
7726 }
7727 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7728 target.update(db_nslcmop.get("operationParams", {}))
7729
7730 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7731 desc = await self.RO.recreate(nsr_id, target)
7732 self.logger.debug("RO return > {}".format(desc))
7733 action_id = desc["action_id"]
7734 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7735 await self._wait_ng_ro(
7736 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7737 operation="healing"
7738 )
7739
7740 # Updating NSR
7741 db_nsr_update = {
7742 "_admin.deployed.RO.operational-status": "running",
7743 "detailed-status": " ".join(stage),
7744 }
7745 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7746 self._write_op_status(nslcmop_id, stage)
7747 self.logger.debug(
7748 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7749 )
7750
7751 except Exception as e:
7752 stage[2] = "ERROR healing at VIM"
7753 #self.set_vnfr_at_error(db_vnfrs, str(e))
7754 self.logger.error(
7755 "Error healing at VIM {}".format(e),
7756 exc_info=not isinstance(
7757 e,
7758 (
7759 ROclient.ROClientException,
7760 LcmException,
7761 DbException,
7762 NgRoException,
7763 ),
7764 ),
7765 )
7766 raise
7767
7768 def _heal_n2vc(
7769 self,
7770 logging_text,
7771 db_nsr,
7772 db_vnfr,
7773 nslcmop_id,
7774 nsr_id,
7775 nsi_id,
7776 vnfd_id,
7777 vdu_id,
7778 kdu_name,
7779 member_vnf_index,
7780 vdu_index,
7781 vdu_name,
7782 deploy_params,
7783 descriptor_config,
7784 base_folder,
7785 task_instantiation_info,
7786 stage,
7787 ):
7788 # launch instantiate_N2VC in a asyncio task and register task object
7789 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7790 # if not found, create one entry and update database
7791 # fill db_nsr._admin.deployed.VCA.<index>
7792
7793 self.logger.debug(
7794 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7795 )
7796 if "execution-environment-list" in descriptor_config:
7797 ee_list = descriptor_config.get("execution-environment-list", [])
7798 elif "juju" in descriptor_config:
7799 ee_list = [descriptor_config] # ns charms
7800 else: # other types as script are not supported
7801 ee_list = []
7802
7803 for ee_item in ee_list:
7804 self.logger.debug(
7805 logging_text
7806 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7807 ee_item.get("juju"), ee_item.get("helm-chart")
7808 )
7809 )
7810 ee_descriptor_id = ee_item.get("id")
7811 if ee_item.get("juju"):
7812 vca_name = ee_item["juju"].get("charm")
7813 vca_type = (
7814 "lxc_proxy_charm"
7815 if ee_item["juju"].get("charm") is not None
7816 else "native_charm"
7817 )
7818 if ee_item["juju"].get("cloud") == "k8s":
7819 vca_type = "k8s_proxy_charm"
7820 elif ee_item["juju"].get("proxy") is False:
7821 vca_type = "native_charm"
7822 elif ee_item.get("helm-chart"):
7823 vca_name = ee_item["helm-chart"]
7824 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7825 vca_type = "helm"
7826 else:
7827 vca_type = "helm-v3"
7828 else:
7829 self.logger.debug(
7830 logging_text + "skipping non juju neither charm configuration"
7831 )
7832 continue
7833
7834 vca_index = -1
7835 for vca_index, vca_deployed in enumerate(
7836 db_nsr["_admin"]["deployed"]["VCA"]
7837 ):
7838 if not vca_deployed:
7839 continue
7840 if (
7841 vca_deployed.get("member-vnf-index") == member_vnf_index
7842 and vca_deployed.get("vdu_id") == vdu_id
7843 and vca_deployed.get("kdu_name") == kdu_name
7844 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7845 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7846 ):
7847 break
7848 else:
7849 # not found, create one.
7850 target = (
7851 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7852 )
7853 if vdu_id:
7854 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7855 elif kdu_name:
7856 target += "/kdu/{}".format(kdu_name)
7857 vca_deployed = {
7858 "target_element": target,
7859 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7860 "member-vnf-index": member_vnf_index,
7861 "vdu_id": vdu_id,
7862 "kdu_name": kdu_name,
7863 "vdu_count_index": vdu_index,
7864 "operational-status": "init", # TODO revise
7865 "detailed-status": "", # TODO revise
7866 "step": "initial-deploy", # TODO revise
7867 "vnfd_id": vnfd_id,
7868 "vdu_name": vdu_name,
7869 "type": vca_type,
7870 "ee_descriptor_id": ee_descriptor_id,
7871 }
7872 vca_index += 1
7873
7874 # create VCA and configurationStatus in db
7875 db_dict = {
7876 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7877 "configurationStatus.{}".format(vca_index): dict(),
7878 }
7879 self.update_db_2("nsrs", nsr_id, db_dict)
7880
7881 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7882
7883 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7884 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7885 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7886
7887 # Launch task
7888 task_n2vc = asyncio.ensure_future(
7889 self.heal_N2VC(
7890 logging_text=logging_text,
7891 vca_index=vca_index,
7892 nsi_id=nsi_id,
7893 db_nsr=db_nsr,
7894 db_vnfr=db_vnfr,
7895 vdu_id=vdu_id,
7896 kdu_name=kdu_name,
7897 vdu_index=vdu_index,
7898 deploy_params=deploy_params,
7899 config_descriptor=descriptor_config,
7900 base_folder=base_folder,
7901 nslcmop_id=nslcmop_id,
7902 stage=stage,
7903 vca_type=vca_type,
7904 vca_name=vca_name,
7905 ee_config_descriptor=ee_item,
7906 )
7907 )
7908 self.lcm_tasks.register(
7909 "ns",
7910 nsr_id,
7911 nslcmop_id,
7912 "instantiate_N2VC-{}".format(vca_index),
7913 task_n2vc,
7914 )
7915 task_instantiation_info[
7916 task_n2vc
7917 ] = self.task_name_deploy_vca + " {}.{}".format(
7918 member_vnf_index or "", vdu_id or ""
7919 )
7920
7921 async def heal_N2VC(
7922 self,
7923 logging_text,
7924 vca_index,
7925 nsi_id,
7926 db_nsr,
7927 db_vnfr,
7928 vdu_id,
7929 kdu_name,
7930 vdu_index,
7931 config_descriptor,
7932 deploy_params,
7933 base_folder,
7934 nslcmop_id,
7935 stage,
7936 vca_type,
7937 vca_name,
7938 ee_config_descriptor,
7939 ):
7940 nsr_id = db_nsr["_id"]
7941 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7942 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7943 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7944 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7945 db_dict = {
7946 "collection": "nsrs",
7947 "filter": {"_id": nsr_id},
7948 "path": db_update_entry,
7949 }
7950 step = ""
7951 try:
7952
7953 element_type = "NS"
7954 element_under_configuration = nsr_id
7955
7956 vnfr_id = None
7957 if db_vnfr:
7958 vnfr_id = db_vnfr["_id"]
7959 osm_config["osm"]["vnf_id"] = vnfr_id
7960
7961 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
7962
7963 if vca_type == "native_charm":
7964 index_number = 0
7965 else:
7966 index_number = vdu_index or 0
7967
7968 if vnfr_id:
7969 element_type = "VNF"
7970 element_under_configuration = vnfr_id
7971 namespace += ".{}-{}".format(vnfr_id, index_number)
7972 if vdu_id:
7973 namespace += ".{}-{}".format(vdu_id, index_number)
7974 element_type = "VDU"
7975 element_under_configuration = "{}-{}".format(vdu_id, index_number)
7976 osm_config["osm"]["vdu_id"] = vdu_id
7977 elif kdu_name:
7978 namespace += ".{}".format(kdu_name)
7979 element_type = "KDU"
7980 element_under_configuration = kdu_name
7981 osm_config["osm"]["kdu_name"] = kdu_name
7982
7983 # Get artifact path
7984 if base_folder["pkg-dir"]:
7985 artifact_path = "{}/{}/{}/{}".format(
7986 base_folder["folder"],
7987 base_folder["pkg-dir"],
7988 "charms"
7989 if vca_type
7990 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
7991 else "helm-charts",
7992 vca_name,
7993 )
7994 else:
7995 artifact_path = "{}/Scripts/{}/{}/".format(
7996 base_folder["folder"],
7997 "charms"
7998 if vca_type
7999 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8000 else "helm-charts",
8001 vca_name,
8002 )
8003
8004 self.logger.debug("Artifact path > {}".format(artifact_path))
8005
8006 # get initial_config_primitive_list that applies to this element
8007 initial_config_primitive_list = config_descriptor.get(
8008 "initial-config-primitive"
8009 )
8010
8011 self.logger.debug(
8012 "Initial config primitive list > {}".format(
8013 initial_config_primitive_list
8014 )
8015 )
8016
8017 # add config if not present for NS charm
8018 ee_descriptor_id = ee_config_descriptor.get("id")
8019 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8020 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8021 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8022 )
8023
8024 self.logger.debug(
8025 "Initial config primitive list #2 > {}".format(
8026 initial_config_primitive_list
8027 )
8028 )
8029 # n2vc_redesign STEP 3.1
8030 # find old ee_id if exists
8031 ee_id = vca_deployed.get("ee_id")
8032
8033 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8034 # create or register execution environment in VCA. Only for native charms when healing
8035 if vca_type == "native_charm":
8036 step = "Waiting to VM being up and getting IP address"
8037 self.logger.debug(logging_text + step)
8038 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8039 logging_text,
8040 nsr_id,
8041 vnfr_id,
8042 vdu_id,
8043 vdu_index,
8044 user=None,
8045 pub_key=None,
8046 )
8047 credentials = {"hostname": rw_mgmt_ip}
8048 # get username
8049 username = deep_get(
8050 config_descriptor, ("config-access", "ssh-access", "default-user")
8051 )
8052 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8053 # merged. Meanwhile let's get username from initial-config-primitive
8054 if not username and initial_config_primitive_list:
8055 for config_primitive in initial_config_primitive_list:
8056 for param in config_primitive.get("parameter", ()):
8057 if param["name"] == "ssh-username":
8058 username = param["value"]
8059 break
8060 if not username:
8061 raise LcmException(
8062 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8063 "'config-access.ssh-access.default-user'"
8064 )
8065 credentials["username"] = username
8066
8067 # n2vc_redesign STEP 3.2
8068 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8069 self._write_configuration_status(
8070 nsr_id=nsr_id,
8071 vca_index=vca_index,
8072 status="REGISTERING",
8073 element_under_configuration=element_under_configuration,
8074 element_type=element_type,
8075 )
8076
8077 step = "register execution environment {}".format(credentials)
8078 self.logger.debug(logging_text + step)
8079 ee_id = await self.vca_map[vca_type].register_execution_environment(
8080 credentials=credentials,
8081 namespace=namespace,
8082 db_dict=db_dict,
8083 vca_id=vca_id,
8084 )
8085
8086 # update ee_id en db
8087 db_dict_ee_id = {
8088 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8089 }
8090 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8091
8092 # for compatibility with MON/POL modules, the need model and application name at database
8093 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8094 # Not sure if this need to be done when healing
8095 """
8096 ee_id_parts = ee_id.split(".")
8097 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8098 if len(ee_id_parts) >= 2:
8099 model_name = ee_id_parts[0]
8100 application_name = ee_id_parts[1]
8101 db_nsr_update[db_update_entry + "model"] = model_name
8102 db_nsr_update[db_update_entry + "application"] = application_name
8103 """
8104
8105 # n2vc_redesign STEP 3.3
8106 # Install configuration software. Only for native charms.
8107 step = "Install configuration Software"
8108
8109 self._write_configuration_status(
8110 nsr_id=nsr_id,
8111 vca_index=vca_index,
8112 status="INSTALLING SW",
8113 element_under_configuration=element_under_configuration,
8114 element_type=element_type,
8115 #other_update=db_nsr_update,
8116 other_update=None,
8117 )
8118
8119 # TODO check if already done
8120 self.logger.debug(logging_text + step)
8121 config = None
8122 if vca_type == "native_charm":
8123 config_primitive = next(
8124 (p for p in initial_config_primitive_list if p["name"] == "config"),
8125 None,
8126 )
8127 if config_primitive:
8128 config = self._map_primitive_params(
8129 config_primitive, {}, deploy_params
8130 )
8131 await self.vca_map[vca_type].install_configuration_sw(
8132 ee_id=ee_id,
8133 artifact_path=artifact_path,
8134 db_dict=db_dict,
8135 config=config,
8136 num_units=1,
8137 vca_id=vca_id,
8138 vca_type=vca_type,
8139 )
8140
8141 # write in db flag of configuration_sw already installed
8142 self.update_db_2(
8143 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8144 )
8145
8146 # Not sure if this need to be done when healing
8147 """
8148 # add relations for this VCA (wait for other peers related with this VCA)
8149 await self._add_vca_relations(
8150 logging_text=logging_text,
8151 nsr_id=nsr_id,
8152 vca_type=vca_type,
8153 vca_index=vca_index,
8154 )
8155 """
8156
8157 # if SSH access is required, then get execution environment SSH public
8158 # if native charm we have waited already to VM be UP
8159 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8160 pub_key = None
8161 user = None
8162 # self.logger.debug("get ssh key block")
8163 if deep_get(
8164 config_descriptor, ("config-access", "ssh-access", "required")
8165 ):
8166 # self.logger.debug("ssh key needed")
8167 # Needed to inject a ssh key
8168 user = deep_get(
8169 config_descriptor,
8170 ("config-access", "ssh-access", "default-user"),
8171 )
8172 step = "Install configuration Software, getting public ssh key"
8173 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8174 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8175 )
8176
8177 step = "Insert public key into VM user={} ssh_key={}".format(
8178 user, pub_key
8179 )
8180 else:
8181 # self.logger.debug("no need to get ssh key")
8182 step = "Waiting to VM being up and getting IP address"
8183 self.logger.debug(logging_text + step)
8184
8185 # n2vc_redesign STEP 5.1
8186 # wait for RO (ip-address) Insert pub_key into VM
8187 # IMPORTANT: We need do wait for RO to complete healing operation.
8188 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8189 if vnfr_id:
8190 if kdu_name:
8191 rw_mgmt_ip = await self.wait_kdu_up(
8192 logging_text, nsr_id, vnfr_id, kdu_name
8193 )
8194 else:
8195 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8196 logging_text,
8197 nsr_id,
8198 vnfr_id,
8199 vdu_id,
8200 vdu_index,
8201 user=user,
8202 pub_key=pub_key,
8203 )
8204 else:
8205 rw_mgmt_ip = None # This is for a NS configuration
8206
8207 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8208
8209 # store rw_mgmt_ip in deploy params for later replacement
8210 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8211
8212 # Day1 operations.
8213 # get run-day1 operation parameter
8214 runDay1 = deploy_params.get("run-day1",False)
8215 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8216 if runDay1:
8217 # n2vc_redesign STEP 6 Execute initial config primitive
8218 step = "execute initial config primitive"
8219
8220 # wait for dependent primitives execution (NS -> VNF -> VDU)
8221 if initial_config_primitive_list:
8222 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8223
8224 # stage, in function of element type: vdu, kdu, vnf or ns
8225 my_vca = vca_deployed_list[vca_index]
8226 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8227 # VDU or KDU
8228 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8229 elif my_vca.get("member-vnf-index"):
8230 # VNF
8231 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8232 else:
8233 # NS
8234 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8235
8236 self._write_configuration_status(
8237 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8238 )
8239
8240 self._write_op_status(op_id=nslcmop_id, stage=stage)
8241
8242 check_if_terminated_needed = True
8243 for initial_config_primitive in initial_config_primitive_list:
8244 # adding information on the vca_deployed if it is a NS execution environment
8245 if not vca_deployed["member-vnf-index"]:
8246 deploy_params["ns_config_info"] = json.dumps(
8247 self._get_ns_config_info(nsr_id)
8248 )
8249 # TODO check if already done
8250 primitive_params_ = self._map_primitive_params(
8251 initial_config_primitive, {}, deploy_params
8252 )
8253
8254 step = "execute primitive '{}' params '{}'".format(
8255 initial_config_primitive["name"], primitive_params_
8256 )
8257 self.logger.debug(logging_text + step)
8258 await self.vca_map[vca_type].exec_primitive(
8259 ee_id=ee_id,
8260 primitive_name=initial_config_primitive["name"],
8261 params_dict=primitive_params_,
8262 db_dict=db_dict,
8263 vca_id=vca_id,
8264 vca_type=vca_type,
8265 )
8266 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8267 if check_if_terminated_needed:
8268 if config_descriptor.get("terminate-config-primitive"):
8269 self.update_db_2(
8270 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8271 )
8272 check_if_terminated_needed = False
8273
8274 # TODO register in database that primitive is done
8275
8276 # STEP 7 Configure metrics
8277 # Not sure if this need to be done when healing
8278 """
8279 if vca_type == "helm" or vca_type == "helm-v3":
8280 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8281 ee_id=ee_id,
8282 artifact_path=artifact_path,
8283 ee_config_descriptor=ee_config_descriptor,
8284 vnfr_id=vnfr_id,
8285 nsr_id=nsr_id,
8286 target_ip=rw_mgmt_ip,
8287 )
8288 if prometheus_jobs:
8289 self.update_db_2(
8290 "nsrs",
8291 nsr_id,
8292 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8293 )
8294
8295 for job in prometheus_jobs:
8296 self.db.set_one(
8297 "prometheus_jobs",
8298 {"job_name": job["job_name"]},
8299 job,
8300 upsert=True,
8301 fail_on_empty=False,
8302 )
8303
8304 """
8305 step = "instantiated at VCA"
8306 self.logger.debug(logging_text + step)
8307
8308 self._write_configuration_status(
8309 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8310 )
8311
8312 except Exception as e: # TODO not use Exception but N2VC exception
8313 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8314 if not isinstance(
8315 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8316 ):
8317 self.logger.error(
8318 "Exception while {} : {}".format(step, e), exc_info=True
8319 )
8320 self._write_configuration_status(
8321 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8322 )
8323 raise LcmException("{} {}".format(step, e)) from e
8324
8325 async def _wait_heal_ro(
8326 self,
8327 nsr_id,
8328 timeout=600,
8329 ):
8330 start_time = time()
8331 while time() <= start_time + timeout:
8332 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8333 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8334 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8335 if operational_status_ro != "healing":
8336 break
8337 await asyncio.sleep(15, loop=self.loop)
8338 else: # timeout_ns_deploy
8339 raise NgRoException("Timeout waiting ns to deploy")