Fixing flake and black issues in code, enabling the same in tox
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 )
63 from osm_lcm.data_utils.nsd import (
64 get_ns_configuration_relation_list,
65 get_vnf_profile,
66 get_vnf_profiles,
67 )
68 from osm_lcm.data_utils.vnfd import (
69 get_kdu,
70 get_kdu_services,
71 get_relation_list,
72 get_vdu_list,
73 get_vdu_profile,
74 get_ee_sorted_initial_config_primitive_list,
75 get_ee_sorted_terminate_config_primitive_list,
76 get_kdu_list,
77 get_virtual_link_profiles,
78 get_vdu,
79 get_configuration,
80 get_vdu_index,
81 get_scaling_aspect,
82 get_number_of_instances,
83 get_juju_ee_ref,
84 get_kdu_resource_profile,
85 find_software_version,
86 )
87 from osm_lcm.data_utils.list_utils import find_in_list
88 from osm_lcm.data_utils.vnfr import (
89 get_osm_params,
90 get_vdur_index,
91 get_kdur,
92 get_volumes_from_instantiation_params,
93 )
94 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
95 from osm_lcm.data_utils.database.vim_account import VimAccountDB
96 from n2vc.definitions import RelationEndpoint
97 from n2vc.k8s_helm_conn import K8sHelmConnector
98 from n2vc.k8s_helm3_conn import K8sHelm3Connector
99 from n2vc.k8s_juju_conn import K8sJujuConnector
100
101 from osm_common.dbbase import DbException
102 from osm_common.fsbase import FsException
103
104 from osm_lcm.data_utils.database.database import Database
105 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
106
107 from n2vc.n2vc_juju_conn import N2VCJujuConnector
108 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
109
110 from osm_lcm.lcm_helm_conn import LCMHelmConn
111 from osm_lcm.osm_config import OsmConfigBuilder
112 from osm_lcm.prometheus import parse_job
113
114 from copy import copy, deepcopy
115 from time import time
116 from uuid import uuid4
117
118 from random import randint
119
120 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
121
122
123 class NsLcm(LcmBase):
124 timeout_vca_on_error = (
125 5 * 60
126 ) # Time for charm from first time at blocked,error status to mark as failed
127 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
128 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
129 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
130 timeout_charm_delete = 10 * 60
131 timeout_primitive = 30 * 60 # timeout for primitive execution
132 timeout_ns_update = 30 * 60 # timeout for ns update
133 timeout_progress_primitive = (
134 10 * 60
135 ) # timeout for some progress in a primitive execution
136 timeout_migrate = 1800 # default global timeout for migrating vnfs
137 timeout_operate = 1800 # default global timeout for migrating vnfs
138 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
139 SUBOPERATION_STATUS_NOT_FOUND = -1
140 SUBOPERATION_STATUS_NEW = -2
141 SUBOPERATION_STATUS_SKIP = -3
142 task_name_deploy_vca = "Deploying VCA"
143
144 def __init__(self, msg, lcm_tasks, config, loop):
145 """
146 Init, Connect to database, filesystem storage, and messaging
147 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
148 :return: None
149 """
150 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
151
152 self.db = Database().instance.db
153 self.fs = Filesystem().instance.fs
154 self.loop = loop
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config["timeout"]
157 self.ro_config = config["ro_config"]
158 self.ng_ro = config["ro_config"].get("ng")
159 self.vca_config = config["VCA"].copy()
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 loop=self.loop,
165 on_update_db=self._on_update_n2vc_db,
166 fs=self.fs,
167 db=self.db,
168 )
169
170 self.conn_helm_ee = LCMHelmConn(
171 log=self.logger,
172 loop=self.loop,
173 vca_config=self.vca_config,
174 on_update_db=self._on_update_n2vc_db,
175 )
176
177 self.k8sclusterhelm2 = K8sHelmConnector(
178 kubectl_command=self.vca_config.get("kubectlpath"),
179 helm_command=self.vca_config.get("helmpath"),
180 log=self.logger,
181 on_update_db=None,
182 fs=self.fs,
183 db=self.db,
184 )
185
186 self.k8sclusterhelm3 = K8sHelm3Connector(
187 kubectl_command=self.vca_config.get("kubectlpath"),
188 helm_command=self.vca_config.get("helm3path"),
189 fs=self.fs,
190 log=self.logger,
191 db=self.db,
192 on_update_db=None,
193 )
194
195 self.k8sclusterjuju = K8sJujuConnector(
196 kubectl_command=self.vca_config.get("kubectlpath"),
197 juju_command=self.vca_config.get("jujupath"),
198 log=self.logger,
199 loop=self.loop,
200 on_update_db=self._on_update_k8s_db,
201 fs=self.fs,
202 db=self.db,
203 )
204
205 self.k8scluster_map = {
206 "helm-chart": self.k8sclusterhelm2,
207 "helm-chart-v3": self.k8sclusterhelm3,
208 "chart": self.k8sclusterhelm3,
209 "juju-bundle": self.k8sclusterjuju,
210 "juju": self.k8sclusterjuju,
211 }
212
213 self.vca_map = {
214 "lxc_proxy_charm": self.n2vc,
215 "native_charm": self.n2vc,
216 "k8s_proxy_charm": self.n2vc,
217 "helm": self.conn_helm_ee,
218 "helm-v3": self.conn_helm_ee,
219 }
220
221 # create RO client
222 self.RO = NgRoClient(self.loop, **self.ro_config)
223
224 self.op_status_map = {
225 "instantiation": self.RO.status,
226 "termination": self.RO.status,
227 "migrate": self.RO.status,
228 "healing": self.RO.recreate_status,
229 "verticalscale": self.RO.status,
230 "start_stop_rebuild": self.RO.status,
231 }
232
233 @staticmethod
234 def increment_ip_mac(ip_mac, vm_index=1):
235 if not isinstance(ip_mac, str):
236 return ip_mac
237 try:
238 # try with ipv4 look for last dot
239 i = ip_mac.rfind(".")
240 if i > 0:
241 i += 1
242 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
243 # try with ipv6 or mac look for last colon. Operate in hex
244 i = ip_mac.rfind(":")
245 if i > 0:
246 i += 1
247 # format in hex, len can be 2 for mac or 4 for ipv6
248 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
249 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
250 )
251 except Exception:
252 pass
253 return None
254
255 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
256
257 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
258
259 try:
260 # TODO filter RO descriptor fields...
261
262 # write to database
263 db_dict = dict()
264 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
265 db_dict["deploymentStatus"] = ro_descriptor
266 self.update_db_2("nsrs", nsrs_id, db_dict)
267
268 except Exception as e:
269 self.logger.warn(
270 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
271 )
272
273 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
274
275 # remove last dot from path (if exists)
276 if path.endswith("."):
277 path = path[:-1]
278
279 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
280 # .format(table, filter, path, updated_data))
281 try:
282
283 nsr_id = filter.get("_id")
284
285 # read ns record from database
286 nsr = self.db.get_one(table="nsrs", q_filter=filter)
287 current_ns_status = nsr.get("nsState")
288
289 # get vca status for NS
290 status_dict = await self.n2vc.get_status(
291 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
292 )
293
294 # vcaStatus
295 db_dict = dict()
296 db_dict["vcaStatus"] = status_dict
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 self.logger.debug(
405 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
406 )
407
408 # write to database
409 self.update_db_2("nsrs", nsr_id, db_dict)
410 except (asyncio.CancelledError, asyncio.TimeoutError):
411 raise
412 except Exception as e:
413 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
414
415 @staticmethod
416 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
417 try:
418 env = Environment(
419 undefined=StrictUndefined,
420 autoescape=select_autoescape(default_for_string=True, default=True),
421 )
422 template = env.from_string(cloud_init_text)
423 return template.render(additional_params or {})
424 except UndefinedError as e:
425 raise LcmException(
426 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
427 "file, must be provided in the instantiation parameters inside the "
428 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
429 )
430 except (TemplateError, TemplateNotFound) as e:
431 raise LcmException(
432 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
433 vnfd_id, vdu_id, e
434 )
435 )
436
437 def _get_vdu_cloud_init_content(self, vdu, vnfd):
438 cloud_init_content = cloud_init_file = None
439 try:
440 if vdu.get("cloud-init-file"):
441 base_folder = vnfd["_admin"]["storage"]
442 if base_folder["pkg-dir"]:
443 cloud_init_file = "{}/{}/cloud_init/{}".format(
444 base_folder["folder"],
445 base_folder["pkg-dir"],
446 vdu["cloud-init-file"],
447 )
448 else:
449 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
450 base_folder["folder"],
451 vdu["cloud-init-file"],
452 )
453 with self.fs.file_open(cloud_init_file, "r") as ci_file:
454 cloud_init_content = ci_file.read()
455 elif vdu.get("cloud-init"):
456 cloud_init_content = vdu["cloud-init"]
457
458 return cloud_init_content
459 except FsException as e:
460 raise LcmException(
461 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
462 vnfd["id"], vdu["id"], cloud_init_file, e
463 )
464 )
465
466 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
467 vdur = next(
468 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
469 )
470 additional_params = vdur.get("additionalParams")
471 return parse_yaml_strings(additional_params)
472
473 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
474 """
475 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
476 :param vnfd: input vnfd
477 :param new_id: overrides vnf id if provided
478 :param additionalParams: Instantiation params for VNFs provided
479 :param nsrId: Id of the NSR
480 :return: copy of vnfd
481 """
482 vnfd_RO = deepcopy(vnfd)
483 # remove unused by RO configuration, monitoring, scaling and internal keys
484 vnfd_RO.pop("_id", None)
485 vnfd_RO.pop("_admin", None)
486 vnfd_RO.pop("monitoring-param", None)
487 vnfd_RO.pop("scaling-group-descriptor", None)
488 vnfd_RO.pop("kdu", None)
489 vnfd_RO.pop("k8s-cluster", None)
490 if new_id:
491 vnfd_RO["id"] = new_id
492
493 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
494 for vdu in get_iterable(vnfd_RO, "vdu"):
495 vdu.pop("cloud-init-file", None)
496 vdu.pop("cloud-init", None)
497 return vnfd_RO
498
499 @staticmethod
500 def ip_profile_2_RO(ip_profile):
501 RO_ip_profile = deepcopy(ip_profile)
502 if "dns-server" in RO_ip_profile:
503 if isinstance(RO_ip_profile["dns-server"], list):
504 RO_ip_profile["dns-address"] = []
505 for ds in RO_ip_profile.pop("dns-server"):
506 RO_ip_profile["dns-address"].append(ds["address"])
507 else:
508 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
509 if RO_ip_profile.get("ip-version") == "ipv4":
510 RO_ip_profile["ip-version"] = "IPv4"
511 if RO_ip_profile.get("ip-version") == "ipv6":
512 RO_ip_profile["ip-version"] = "IPv6"
513 if "dhcp-params" in RO_ip_profile:
514 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
515 return RO_ip_profile
516
517 def _get_ro_vim_id_for_vim_account(self, vim_account):
518 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
519 if db_vim["_admin"]["operationalState"] != "ENABLED":
520 raise LcmException(
521 "VIM={} is not available. operationalState={}".format(
522 vim_account, db_vim["_admin"]["operationalState"]
523 )
524 )
525 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
526 return RO_vim_id
527
528 def get_ro_wim_id_for_wim_account(self, wim_account):
529 if isinstance(wim_account, str):
530 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
531 if db_wim["_admin"]["operationalState"] != "ENABLED":
532 raise LcmException(
533 "WIM={} is not available. operationalState={}".format(
534 wim_account, db_wim["_admin"]["operationalState"]
535 )
536 )
537 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
538 return RO_wim_id
539 else:
540 return wim_account
541
542 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
543
544 db_vdu_push_list = []
545 template_vdur = []
546 db_update = {"_admin.modified": time()}
547 if vdu_create:
548 for vdu_id, vdu_count in vdu_create.items():
549 vdur = next(
550 (
551 vdur
552 for vdur in reversed(db_vnfr["vdur"])
553 if vdur["vdu-id-ref"] == vdu_id
554 ),
555 None,
556 )
557 if not vdur:
558 # Read the template saved in the db:
559 self.logger.debug(
560 "No vdur in the database. Using the vdur-template to scale"
561 )
562 vdur_template = db_vnfr.get("vdur-template")
563 if not vdur_template:
564 raise LcmException(
565 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
566 vdu_id
567 )
568 )
569 vdur = vdur_template[0]
570 # Delete a template from the database after using it
571 self.db.set_one(
572 "vnfrs",
573 {"_id": db_vnfr["_id"]},
574 None,
575 pull={"vdur-template": {"_id": vdur["_id"]}},
576 )
577 for count in range(vdu_count):
578 vdur_copy = deepcopy(vdur)
579 vdur_copy["status"] = "BUILD"
580 vdur_copy["status-detailed"] = None
581 vdur_copy["ip-address"] = None
582 vdur_copy["_id"] = str(uuid4())
583 vdur_copy["count-index"] += count + 1
584 vdur_copy["id"] = "{}-{}".format(
585 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
586 )
587 vdur_copy.pop("vim_info", None)
588 for iface in vdur_copy["interfaces"]:
589 if iface.get("fixed-ip"):
590 iface["ip-address"] = self.increment_ip_mac(
591 iface["ip-address"], count + 1
592 )
593 else:
594 iface.pop("ip-address", None)
595 if iface.get("fixed-mac"):
596 iface["mac-address"] = self.increment_ip_mac(
597 iface["mac-address"], count + 1
598 )
599 else:
600 iface.pop("mac-address", None)
601 if db_vnfr["vdur"]:
602 iface.pop(
603 "mgmt_vnf", None
604 ) # only first vdu can be managment of vnf
605 db_vdu_push_list.append(vdur_copy)
606 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
607 if vdu_delete:
608 if len(db_vnfr["vdur"]) == 1:
609 # The scale will move to 0 instances
610 self.logger.debug(
611 "Scaling to 0 !, creating the template with the last vdur"
612 )
613 template_vdur = [db_vnfr["vdur"][0]]
614 for vdu_id, vdu_count in vdu_delete.items():
615 if mark_delete:
616 indexes_to_delete = [
617 iv[0]
618 for iv in enumerate(db_vnfr["vdur"])
619 if iv[1]["vdu-id-ref"] == vdu_id
620 ]
621 db_update.update(
622 {
623 "vdur.{}.status".format(i): "DELETING"
624 for i in indexes_to_delete[-vdu_count:]
625 }
626 )
627 else:
628 # it must be deleted one by one because common.db does not allow otherwise
629 vdus_to_delete = [
630 v
631 for v in reversed(db_vnfr["vdur"])
632 if v["vdu-id-ref"] == vdu_id
633 ]
634 for vdu in vdus_to_delete[:vdu_count]:
635 self.db.set_one(
636 "vnfrs",
637 {"_id": db_vnfr["_id"]},
638 None,
639 pull={"vdur": {"_id": vdu["_id"]}},
640 )
641 db_push = {}
642 if db_vdu_push_list:
643 db_push["vdur"] = db_vdu_push_list
644 if template_vdur:
645 db_push["vdur-template"] = template_vdur
646 if not db_push:
647 db_push = None
648 db_vnfr["vdur-template"] = template_vdur
649 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
650 # modify passed dictionary db_vnfr
651 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
652 db_vnfr["vdur"] = db_vnfr_["vdur"]
653
654 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
655 """
656 Updates database nsr with the RO info for the created vld
657 :param ns_update_nsr: dictionary to be filled with the updated info
658 :param db_nsr: content of db_nsr. This is also modified
659 :param nsr_desc_RO: nsr descriptor from RO
660 :return: Nothing, LcmException is raised on errors
661 """
662
663 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
664 for net_RO in get_iterable(nsr_desc_RO, "nets"):
665 if vld["id"] != net_RO.get("ns_net_osm_id"):
666 continue
667 vld["vim-id"] = net_RO.get("vim_net_id")
668 vld["name"] = net_RO.get("vim_name")
669 vld["status"] = net_RO.get("status")
670 vld["status-detailed"] = net_RO.get("error_msg")
671 ns_update_nsr["vld.{}".format(vld_index)] = vld
672 break
673 else:
674 raise LcmException(
675 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
676 )
677
678 def set_vnfr_at_error(self, db_vnfrs, error_text):
679 try:
680 for db_vnfr in db_vnfrs.values():
681 vnfr_update = {"status": "ERROR"}
682 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
683 if "status" not in vdur:
684 vdur["status"] = "ERROR"
685 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
686 if error_text:
687 vdur["status-detailed"] = str(error_text)
688 vnfr_update[
689 "vdur.{}.status-detailed".format(vdu_index)
690 ] = "ERROR"
691 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
692 except DbException as e:
693 self.logger.error("Cannot update vnf. {}".format(e))
694
695 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
696 """
697 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
698 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
699 :param nsr_desc_RO: nsr descriptor from RO
700 :return: Nothing, LcmException is raised on errors
701 """
702 for vnf_index, db_vnfr in db_vnfrs.items():
703 for vnf_RO in nsr_desc_RO["vnfs"]:
704 if vnf_RO["member_vnf_index"] != vnf_index:
705 continue
706 vnfr_update = {}
707 if vnf_RO.get("ip_address"):
708 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
709 "ip_address"
710 ].split(";")[0]
711 elif not db_vnfr.get("ip-address"):
712 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
713 raise LcmExceptionNoMgmtIP(
714 "ns member_vnf_index '{}' has no IP address".format(
715 vnf_index
716 )
717 )
718
719 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
720 vdur_RO_count_index = 0
721 if vdur.get("pdu-type"):
722 continue
723 for vdur_RO in get_iterable(vnf_RO, "vms"):
724 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
725 continue
726 if vdur["count-index"] != vdur_RO_count_index:
727 vdur_RO_count_index += 1
728 continue
729 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
730 if vdur_RO.get("ip_address"):
731 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
732 else:
733 vdur["ip-address"] = None
734 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
735 vdur["name"] = vdur_RO.get("vim_name")
736 vdur["status"] = vdur_RO.get("status")
737 vdur["status-detailed"] = vdur_RO.get("error_msg")
738 for ifacer in get_iterable(vdur, "interfaces"):
739 for interface_RO in get_iterable(vdur_RO, "interfaces"):
740 if ifacer["name"] == interface_RO.get("internal_name"):
741 ifacer["ip-address"] = interface_RO.get(
742 "ip_address"
743 )
744 ifacer["mac-address"] = interface_RO.get(
745 "mac_address"
746 )
747 break
748 else:
749 raise LcmException(
750 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
751 "from VIM info".format(
752 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
753 )
754 )
755 vnfr_update["vdur.{}".format(vdu_index)] = vdur
756 break
757 else:
758 raise LcmException(
759 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
760 "VIM info".format(
761 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
762 )
763 )
764
765 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
766 for net_RO in get_iterable(nsr_desc_RO, "nets"):
767 if vld["id"] != net_RO.get("vnf_net_osm_id"):
768 continue
769 vld["vim-id"] = net_RO.get("vim_net_id")
770 vld["name"] = net_RO.get("vim_name")
771 vld["status"] = net_RO.get("status")
772 vld["status-detailed"] = net_RO.get("error_msg")
773 vnfr_update["vld.{}".format(vld_index)] = vld
774 break
775 else:
776 raise LcmException(
777 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
778 vnf_index, vld["id"]
779 )
780 )
781
782 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
783 break
784
785 else:
786 raise LcmException(
787 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
788 vnf_index
789 )
790 )
791
792 def _get_ns_config_info(self, nsr_id):
793 """
794 Generates a mapping between vnf,vdu elements and the N2VC id
795 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
796 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
797 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
798 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
799 """
800 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
801 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
802 mapping = {}
803 ns_config_info = {"osm-config-mapping": mapping}
804 for vca in vca_deployed_list:
805 if not vca["member-vnf-index"]:
806 continue
807 if not vca["vdu_id"]:
808 mapping[vca["member-vnf-index"]] = vca["application"]
809 else:
810 mapping[
811 "{}.{}.{}".format(
812 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
813 )
814 ] = vca["application"]
815 return ns_config_info
816
817 async def _instantiate_ng_ro(
818 self,
819 logging_text,
820 nsr_id,
821 nsd,
822 db_nsr,
823 db_nslcmop,
824 db_vnfrs,
825 db_vnfds,
826 n2vc_key_list,
827 stage,
828 start_deploy,
829 timeout_ns_deploy,
830 ):
831
832 db_vims = {}
833
834 def get_vim_account(vim_account_id):
835 nonlocal db_vims
836 if vim_account_id in db_vims:
837 return db_vims[vim_account_id]
838 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
839 db_vims[vim_account_id] = db_vim
840 return db_vim
841
842 # modify target_vld info with instantiation parameters
843 def parse_vld_instantiation_params(
844 target_vim, target_vld, vld_params, target_sdn
845 ):
846 if vld_params.get("ip-profile"):
847 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
848 "ip-profile"
849 ]
850 if vld_params.get("provider-network"):
851 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
852 "provider-network"
853 ]
854 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
855 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
856 "provider-network"
857 ]["sdn-ports"]
858 if vld_params.get("wimAccountId"):
859 target_wim = "wim:{}".format(vld_params["wimAccountId"])
860 target_vld["vim_info"][target_wim] = {}
861 for param in ("vim-network-name", "vim-network-id"):
862 if vld_params.get(param):
863 if isinstance(vld_params[param], dict):
864 for vim, vim_net in vld_params[param].items():
865 other_target_vim = "vim:" + vim
866 populate_dict(
867 target_vld["vim_info"],
868 (other_target_vim, param.replace("-", "_")),
869 vim_net,
870 )
871 else: # isinstance str
872 target_vld["vim_info"][target_vim][
873 param.replace("-", "_")
874 ] = vld_params[param]
875 if vld_params.get("common_id"):
876 target_vld["common_id"] = vld_params.get("common_id")
877
878 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
879 def update_ns_vld_target(target, ns_params):
880 for vnf_params in ns_params.get("vnf", ()):
881 if vnf_params.get("vimAccountId"):
882 target_vnf = next(
883 (
884 vnfr
885 for vnfr in db_vnfrs.values()
886 if vnf_params["member-vnf-index"]
887 == vnfr["member-vnf-index-ref"]
888 ),
889 None,
890 )
891 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
892 for a_index, a_vld in enumerate(target["ns"]["vld"]):
893 target_vld = find_in_list(
894 get_iterable(vdur, "interfaces"),
895 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
896 )
897
898 vld_params = find_in_list(
899 get_iterable(ns_params, "vld"),
900 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
901 )
902 if target_vld:
903
904 if vnf_params.get("vimAccountId") not in a_vld.get(
905 "vim_info", {}
906 ):
907 target_vim_network_list = [
908 v for _, v in a_vld.get("vim_info").items()
909 ]
910 target_vim_network_name = next(
911 (
912 item.get("vim_network_name", "")
913 for item in target_vim_network_list
914 ),
915 "",
916 )
917
918 target["ns"]["vld"][a_index].get("vim_info").update(
919 {
920 "vim:{}".format(vnf_params["vimAccountId"]): {
921 "vim_network_name": target_vim_network_name,
922 }
923 }
924 )
925
926 if vld_params:
927 for param in ("vim-network-name", "vim-network-id"):
928 if vld_params.get(param) and isinstance(
929 vld_params[param], dict
930 ):
931 for vim, vim_net in vld_params[
932 param
933 ].items():
934 other_target_vim = "vim:" + vim
935 populate_dict(
936 target["ns"]["vld"][a_index].get(
937 "vim_info"
938 ),
939 (
940 other_target_vim,
941 param.replace("-", "_"),
942 ),
943 vim_net,
944 )
945
946 nslcmop_id = db_nslcmop["_id"]
947 target = {
948 "name": db_nsr["name"],
949 "ns": {"vld": []},
950 "vnf": [],
951 "image": deepcopy(db_nsr["image"]),
952 "flavor": deepcopy(db_nsr["flavor"]),
953 "action_id": nslcmop_id,
954 "cloud_init_content": {},
955 }
956 for image in target["image"]:
957 image["vim_info"] = {}
958 for flavor in target["flavor"]:
959 flavor["vim_info"] = {}
960 if db_nsr.get("affinity-or-anti-affinity-group"):
961 target["affinity-or-anti-affinity-group"] = deepcopy(
962 db_nsr["affinity-or-anti-affinity-group"]
963 )
964 for affinity_or_anti_affinity_group in target[
965 "affinity-or-anti-affinity-group"
966 ]:
967 affinity_or_anti_affinity_group["vim_info"] = {}
968
969 if db_nslcmop.get("lcmOperationType") != "instantiate":
970 # get parameters of instantiation:
971 db_nslcmop_instantiate = self.db.get_list(
972 "nslcmops",
973 {
974 "nsInstanceId": db_nslcmop["nsInstanceId"],
975 "lcmOperationType": "instantiate",
976 },
977 )[-1]
978 ns_params = db_nslcmop_instantiate.get("operationParams")
979 else:
980 ns_params = db_nslcmop.get("operationParams")
981 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
982 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
983
984 cp2target = {}
985 for vld_index, vld in enumerate(db_nsr.get("vld")):
986 target_vim = "vim:{}".format(ns_params["vimAccountId"])
987 target_vld = {
988 "id": vld["id"],
989 "name": vld["name"],
990 "mgmt-network": vld.get("mgmt-network", False),
991 "type": vld.get("type"),
992 "vim_info": {
993 target_vim: {
994 "vim_network_name": vld.get("vim-network-name"),
995 "vim_account_id": ns_params["vimAccountId"],
996 }
997 },
998 }
999 # check if this network needs SDN assist
1000 if vld.get("pci-interfaces"):
1001 db_vim = get_vim_account(ns_params["vimAccountId"])
1002 sdnc_id = db_vim["config"].get("sdn-controller")
1003 if sdnc_id:
1004 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1005 target_sdn = "sdn:{}".format(sdnc_id)
1006 target_vld["vim_info"][target_sdn] = {
1007 "sdn": True,
1008 "target_vim": target_vim,
1009 "vlds": [sdn_vld],
1010 "type": vld.get("type"),
1011 }
1012
1013 nsd_vnf_profiles = get_vnf_profiles(nsd)
1014 for nsd_vnf_profile in nsd_vnf_profiles:
1015 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1016 if cp["virtual-link-profile-id"] == vld["id"]:
1017 cp2target[
1018 "member_vnf:{}.{}".format(
1019 cp["constituent-cpd-id"][0][
1020 "constituent-base-element-id"
1021 ],
1022 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1023 )
1024 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1025
1026 # check at nsd descriptor, if there is an ip-profile
1027 vld_params = {}
1028 nsd_vlp = find_in_list(
1029 get_virtual_link_profiles(nsd),
1030 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1031 == vld["id"],
1032 )
1033 if (
1034 nsd_vlp
1035 and nsd_vlp.get("virtual-link-protocol-data")
1036 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1037 ):
1038 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1039 "l3-protocol-data"
1040 ]
1041 ip_profile_dest_data = {}
1042 if "ip-version" in ip_profile_source_data:
1043 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1044 "ip-version"
1045 ]
1046 if "cidr" in ip_profile_source_data:
1047 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1048 "cidr"
1049 ]
1050 if "gateway-ip" in ip_profile_source_data:
1051 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1052 "gateway-ip"
1053 ]
1054 if "dhcp-enabled" in ip_profile_source_data:
1055 ip_profile_dest_data["dhcp-params"] = {
1056 "enabled": ip_profile_source_data["dhcp-enabled"]
1057 }
1058 vld_params["ip-profile"] = ip_profile_dest_data
1059
1060 # update vld_params with instantiation params
1061 vld_instantiation_params = find_in_list(
1062 get_iterable(ns_params, "vld"),
1063 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1064 )
1065 if vld_instantiation_params:
1066 vld_params.update(vld_instantiation_params)
1067 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1068 target["ns"]["vld"].append(target_vld)
1069 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1070 update_ns_vld_target(target, ns_params)
1071
1072 for vnfr in db_vnfrs.values():
1073 vnfd = find_in_list(
1074 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1075 )
1076 vnf_params = find_in_list(
1077 get_iterable(ns_params, "vnf"),
1078 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1079 )
1080 target_vnf = deepcopy(vnfr)
1081 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1082 for vld in target_vnf.get("vld", ()):
1083 # check if connected to a ns.vld, to fill target'
1084 vnf_cp = find_in_list(
1085 vnfd.get("int-virtual-link-desc", ()),
1086 lambda cpd: cpd.get("id") == vld["id"],
1087 )
1088 if vnf_cp:
1089 ns_cp = "member_vnf:{}.{}".format(
1090 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1091 )
1092 if cp2target.get(ns_cp):
1093 vld["target"] = cp2target[ns_cp]
1094
1095 vld["vim_info"] = {
1096 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1097 }
1098 # check if this network needs SDN assist
1099 target_sdn = None
1100 if vld.get("pci-interfaces"):
1101 db_vim = get_vim_account(vnfr["vim-account-id"])
1102 sdnc_id = db_vim["config"].get("sdn-controller")
1103 if sdnc_id:
1104 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1105 target_sdn = "sdn:{}".format(sdnc_id)
1106 vld["vim_info"][target_sdn] = {
1107 "sdn": True,
1108 "target_vim": target_vim,
1109 "vlds": [sdn_vld],
1110 "type": vld.get("type"),
1111 }
1112
1113 # check at vnfd descriptor, if there is an ip-profile
1114 vld_params = {}
1115 vnfd_vlp = find_in_list(
1116 get_virtual_link_profiles(vnfd),
1117 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1118 )
1119 if (
1120 vnfd_vlp
1121 and vnfd_vlp.get("virtual-link-protocol-data")
1122 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1123 ):
1124 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1125 "l3-protocol-data"
1126 ]
1127 ip_profile_dest_data = {}
1128 if "ip-version" in ip_profile_source_data:
1129 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1130 "ip-version"
1131 ]
1132 if "cidr" in ip_profile_source_data:
1133 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1134 "cidr"
1135 ]
1136 if "gateway-ip" in ip_profile_source_data:
1137 ip_profile_dest_data[
1138 "gateway-address"
1139 ] = ip_profile_source_data["gateway-ip"]
1140 if "dhcp-enabled" in ip_profile_source_data:
1141 ip_profile_dest_data["dhcp-params"] = {
1142 "enabled": ip_profile_source_data["dhcp-enabled"]
1143 }
1144
1145 vld_params["ip-profile"] = ip_profile_dest_data
1146 # update vld_params with instantiation params
1147 if vnf_params:
1148 vld_instantiation_params = find_in_list(
1149 get_iterable(vnf_params, "internal-vld"),
1150 lambda i_vld: i_vld["name"] == vld["id"],
1151 )
1152 if vld_instantiation_params:
1153 vld_params.update(vld_instantiation_params)
1154 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1155
1156 vdur_list = []
1157 for vdur in target_vnf.get("vdur", ()):
1158 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1159 continue # This vdu must not be created
1160 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1161
1162 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1163
1164 if ssh_keys_all:
1165 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1166 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1167 if (
1168 vdu_configuration
1169 and vdu_configuration.get("config-access")
1170 and vdu_configuration.get("config-access").get("ssh-access")
1171 ):
1172 vdur["ssh-keys"] = ssh_keys_all
1173 vdur["ssh-access-required"] = vdu_configuration[
1174 "config-access"
1175 ]["ssh-access"]["required"]
1176 elif (
1177 vnf_configuration
1178 and vnf_configuration.get("config-access")
1179 and vnf_configuration.get("config-access").get("ssh-access")
1180 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1181 ):
1182 vdur["ssh-keys"] = ssh_keys_all
1183 vdur["ssh-access-required"] = vnf_configuration[
1184 "config-access"
1185 ]["ssh-access"]["required"]
1186 elif ssh_keys_instantiation and find_in_list(
1187 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1188 ):
1189 vdur["ssh-keys"] = ssh_keys_instantiation
1190
1191 self.logger.debug("NS > vdur > {}".format(vdur))
1192
1193 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1194 # cloud-init
1195 if vdud.get("cloud-init-file"):
1196 vdur["cloud-init"] = "{}:file:{}".format(
1197 vnfd["_id"], vdud.get("cloud-init-file")
1198 )
1199 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1200 if vdur["cloud-init"] not in target["cloud_init_content"]:
1201 base_folder = vnfd["_admin"]["storage"]
1202 if base_folder["pkg-dir"]:
1203 cloud_init_file = "{}/{}/cloud_init/{}".format(
1204 base_folder["folder"],
1205 base_folder["pkg-dir"],
1206 vdud.get("cloud-init-file"),
1207 )
1208 else:
1209 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1210 base_folder["folder"],
1211 vdud.get("cloud-init-file"),
1212 )
1213 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1214 target["cloud_init_content"][
1215 vdur["cloud-init"]
1216 ] = ci_file.read()
1217 elif vdud.get("cloud-init"):
1218 vdur["cloud-init"] = "{}:vdu:{}".format(
1219 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1220 )
1221 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1222 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1223 "cloud-init"
1224 ]
1225 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1226 deploy_params_vdu = self._format_additional_params(
1227 vdur.get("additionalParams") or {}
1228 )
1229 deploy_params_vdu["OSM"] = get_osm_params(
1230 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1231 )
1232 vdur["additionalParams"] = deploy_params_vdu
1233
1234 # flavor
1235 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1236 if target_vim not in ns_flavor["vim_info"]:
1237 ns_flavor["vim_info"][target_vim] = {}
1238
1239 # deal with images
1240 # in case alternative images are provided we must check if they should be applied
1241 # for the vim_type, modify the vim_type taking into account
1242 ns_image_id = int(vdur["ns-image-id"])
1243 if vdur.get("alt-image-ids"):
1244 db_vim = get_vim_account(vnfr["vim-account-id"])
1245 vim_type = db_vim["vim_type"]
1246 for alt_image_id in vdur.get("alt-image-ids"):
1247 ns_alt_image = target["image"][int(alt_image_id)]
1248 if vim_type == ns_alt_image.get("vim-type"):
1249 # must use alternative image
1250 self.logger.debug(
1251 "use alternative image id: {}".format(alt_image_id)
1252 )
1253 ns_image_id = alt_image_id
1254 vdur["ns-image-id"] = ns_image_id
1255 break
1256 ns_image = target["image"][int(ns_image_id)]
1257 if target_vim not in ns_image["vim_info"]:
1258 ns_image["vim_info"][target_vim] = {}
1259
1260 # Affinity groups
1261 if vdur.get("affinity-or-anti-affinity-group-id"):
1262 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1263 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1264 if target_vim not in ns_ags["vim_info"]:
1265 ns_ags["vim_info"][target_vim] = {}
1266
1267 vdur["vim_info"] = {target_vim: {}}
1268 # instantiation parameters
1269 if vnf_params:
1270 vdu_instantiation_params = find_in_list(
1271 get_iterable(vnf_params, "vdu"),
1272 lambda i_vdu: i_vdu["id"] == vdud["id"],
1273 )
1274 if vdu_instantiation_params:
1275 # Parse the vdu_volumes from the instantiation params
1276 vdu_volumes = get_volumes_from_instantiation_params(
1277 vdu_instantiation_params, vdud
1278 )
1279 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1280 vdur_list.append(vdur)
1281 target_vnf["vdur"] = vdur_list
1282 target["vnf"].append(target_vnf)
1283
1284 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1285 desc = await self.RO.deploy(nsr_id, target)
1286 self.logger.debug("RO return > {}".format(desc))
1287 action_id = desc["action_id"]
1288 await self._wait_ng_ro(
1289 nsr_id,
1290 action_id,
1291 nslcmop_id,
1292 start_deploy,
1293 timeout_ns_deploy,
1294 stage,
1295 operation="instantiation",
1296 )
1297
1298 # Updating NSR
1299 db_nsr_update = {
1300 "_admin.deployed.RO.operational-status": "running",
1301 "detailed-status": " ".join(stage),
1302 }
1303 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1304 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1305 self._write_op_status(nslcmop_id, stage)
1306 self.logger.debug(
1307 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1308 )
1309 return
1310
1311 async def _wait_ng_ro(
1312 self,
1313 nsr_id,
1314 action_id,
1315 nslcmop_id=None,
1316 start_time=None,
1317 timeout=600,
1318 stage=None,
1319 operation=None,
1320 ):
1321 detailed_status_old = None
1322 db_nsr_update = {}
1323 start_time = start_time or time()
1324 while time() <= start_time + timeout:
1325 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1326 self.logger.debug("Wait NG RO > {}".format(desc_status))
1327 if desc_status["status"] == "FAILED":
1328 raise NgRoException(desc_status["details"])
1329 elif desc_status["status"] == "BUILD":
1330 if stage:
1331 stage[2] = "VIM: ({})".format(desc_status["details"])
1332 elif desc_status["status"] == "DONE":
1333 if stage:
1334 stage[2] = "Deployed at VIM"
1335 break
1336 else:
1337 assert False, "ROclient.check_ns_status returns unknown {}".format(
1338 desc_status["status"]
1339 )
1340 if stage and nslcmop_id and stage[2] != detailed_status_old:
1341 detailed_status_old = stage[2]
1342 db_nsr_update["detailed-status"] = " ".join(stage)
1343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1344 self._write_op_status(nslcmop_id, stage)
1345 await asyncio.sleep(15, loop=self.loop)
1346 else: # timeout_ns_deploy
1347 raise NgRoException("Timeout waiting ns to deploy")
1348
1349 async def _terminate_ng_ro(
1350 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1351 ):
1352 db_nsr_update = {}
1353 failed_detail = []
1354 action_id = None
1355 start_deploy = time()
1356 try:
1357 target = {
1358 "ns": {"vld": []},
1359 "vnf": [],
1360 "image": [],
1361 "flavor": [],
1362 "action_id": nslcmop_id,
1363 }
1364 desc = await self.RO.deploy(nsr_id, target)
1365 action_id = desc["action_id"]
1366 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1367 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1368 self.logger.debug(
1369 logging_text
1370 + "ns terminate action at RO. action_id={}".format(action_id)
1371 )
1372
1373 # wait until done
1374 delete_timeout = 20 * 60 # 20 minutes
1375 await self._wait_ng_ro(
1376 nsr_id,
1377 action_id,
1378 nslcmop_id,
1379 start_deploy,
1380 delete_timeout,
1381 stage,
1382 operation="termination",
1383 )
1384
1385 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1386 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1387 # delete all nsr
1388 await self.RO.delete(nsr_id)
1389 except Exception as e:
1390 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1391 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1392 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1393 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1394 self.logger.debug(
1395 logging_text + "RO_action_id={} already deleted".format(action_id)
1396 )
1397 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1398 failed_detail.append("delete conflict: {}".format(e))
1399 self.logger.debug(
1400 logging_text
1401 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1402 )
1403 else:
1404 failed_detail.append("delete error: {}".format(e))
1405 self.logger.error(
1406 logging_text
1407 + "RO_action_id={} delete error: {}".format(action_id, e)
1408 )
1409
1410 if failed_detail:
1411 stage[2] = "Error deleting from VIM"
1412 else:
1413 stage[2] = "Deleted from VIM"
1414 db_nsr_update["detailed-status"] = " ".join(stage)
1415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1416 self._write_op_status(nslcmop_id, stage)
1417
1418 if failed_detail:
1419 raise LcmException("; ".join(failed_detail))
1420 return
1421
1422 async def instantiate_RO(
1423 self,
1424 logging_text,
1425 nsr_id,
1426 nsd,
1427 db_nsr,
1428 db_nslcmop,
1429 db_vnfrs,
1430 db_vnfds,
1431 n2vc_key_list,
1432 stage,
1433 ):
1434 """
1435 Instantiate at RO
1436 :param logging_text: preffix text to use at logging
1437 :param nsr_id: nsr identity
1438 :param nsd: database content of ns descriptor
1439 :param db_nsr: database content of ns record
1440 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1441 :param db_vnfrs:
1442 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1443 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1444 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1445 :return: None or exception
1446 """
1447 try:
1448 start_deploy = time()
1449 ns_params = db_nslcmop.get("operationParams")
1450 if ns_params and ns_params.get("timeout_ns_deploy"):
1451 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1452 else:
1453 timeout_ns_deploy = self.timeout.get(
1454 "ns_deploy", self.timeout_ns_deploy
1455 )
1456
1457 # Check for and optionally request placement optimization. Database will be updated if placement activated
1458 stage[2] = "Waiting for Placement."
1459 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1460 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1461 for vnfr in db_vnfrs.values():
1462 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1463 break
1464 else:
1465 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1466
1467 return await self._instantiate_ng_ro(
1468 logging_text,
1469 nsr_id,
1470 nsd,
1471 db_nsr,
1472 db_nslcmop,
1473 db_vnfrs,
1474 db_vnfds,
1475 n2vc_key_list,
1476 stage,
1477 start_deploy,
1478 timeout_ns_deploy,
1479 )
1480 except Exception as e:
1481 stage[2] = "ERROR deploying at VIM"
1482 self.set_vnfr_at_error(db_vnfrs, str(e))
1483 self.logger.error(
1484 "Error deploying at VIM {}".format(e),
1485 exc_info=not isinstance(
1486 e,
1487 (
1488 ROclient.ROClientException,
1489 LcmException,
1490 DbException,
1491 NgRoException,
1492 ),
1493 ),
1494 )
1495 raise
1496
1497 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1498 """
1499 Wait for kdu to be up, get ip address
1500 :param logging_text: prefix use for logging
1501 :param nsr_id:
1502 :param vnfr_id:
1503 :param kdu_name:
1504 :return: IP address, K8s services
1505 """
1506
1507 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1508 nb_tries = 0
1509
1510 while nb_tries < 360:
1511 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1512 kdur = next(
1513 (
1514 x
1515 for x in get_iterable(db_vnfr, "kdur")
1516 if x.get("kdu-name") == kdu_name
1517 ),
1518 None,
1519 )
1520 if not kdur:
1521 raise LcmException(
1522 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1523 )
1524 if kdur.get("status"):
1525 if kdur["status"] in ("READY", "ENABLED"):
1526 return kdur.get("ip-address"), kdur.get("services")
1527 else:
1528 raise LcmException(
1529 "target KDU={} is in error state".format(kdu_name)
1530 )
1531
1532 await asyncio.sleep(10, loop=self.loop)
1533 nb_tries += 1
1534 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1535
1536 async def wait_vm_up_insert_key_ro(
1537 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1538 ):
1539 """
1540 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1541 :param logging_text: prefix use for logging
1542 :param nsr_id:
1543 :param vnfr_id:
1544 :param vdu_id:
1545 :param vdu_index:
1546 :param pub_key: public ssh key to inject, None to skip
1547 :param user: user to apply the public ssh key
1548 :return: IP address
1549 """
1550
1551 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1552 ro_nsr_id = None
1553 ip_address = None
1554 nb_tries = 0
1555 target_vdu_id = None
1556 ro_retries = 0
1557
1558 while True:
1559
1560 ro_retries += 1
1561 if ro_retries >= 360: # 1 hour
1562 raise LcmException(
1563 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1564 )
1565
1566 await asyncio.sleep(10, loop=self.loop)
1567
1568 # get ip address
1569 if not target_vdu_id:
1570 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1571
1572 if not vdu_id: # for the VNF case
1573 if db_vnfr.get("status") == "ERROR":
1574 raise LcmException(
1575 "Cannot inject ssh-key because target VNF is in error state"
1576 )
1577 ip_address = db_vnfr.get("ip-address")
1578 if not ip_address:
1579 continue
1580 vdur = next(
1581 (
1582 x
1583 for x in get_iterable(db_vnfr, "vdur")
1584 if x.get("ip-address") == ip_address
1585 ),
1586 None,
1587 )
1588 else: # VDU case
1589 vdur = next(
1590 (
1591 x
1592 for x in get_iterable(db_vnfr, "vdur")
1593 if x.get("vdu-id-ref") == vdu_id
1594 and x.get("count-index") == vdu_index
1595 ),
1596 None,
1597 )
1598
1599 if (
1600 not vdur and len(db_vnfr.get("vdur", ())) == 1
1601 ): # If only one, this should be the target vdu
1602 vdur = db_vnfr["vdur"][0]
1603 if not vdur:
1604 raise LcmException(
1605 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1606 vnfr_id, vdu_id, vdu_index
1607 )
1608 )
1609 # New generation RO stores information at "vim_info"
1610 ng_ro_status = None
1611 target_vim = None
1612 if vdur.get("vim_info"):
1613 target_vim = next(
1614 t for t in vdur["vim_info"]
1615 ) # there should be only one key
1616 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1617 if (
1618 vdur.get("pdu-type")
1619 or vdur.get("status") == "ACTIVE"
1620 or ng_ro_status == "ACTIVE"
1621 ):
1622 ip_address = vdur.get("ip-address")
1623 if not ip_address:
1624 continue
1625 target_vdu_id = vdur["vdu-id-ref"]
1626 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1627 raise LcmException(
1628 "Cannot inject ssh-key because target VM is in error state"
1629 )
1630
1631 if not target_vdu_id:
1632 continue
1633
1634 # inject public key into machine
1635 if pub_key and user:
1636 self.logger.debug(logging_text + "Inserting RO key")
1637 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1638 if vdur.get("pdu-type"):
1639 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1640 return ip_address
1641 try:
1642 ro_vm_id = "{}-{}".format(
1643 db_vnfr["member-vnf-index-ref"], target_vdu_id
1644 ) # TODO add vdu_index
1645 if self.ng_ro:
1646 target = {
1647 "action": {
1648 "action": "inject_ssh_key",
1649 "key": pub_key,
1650 "user": user,
1651 },
1652 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1653 }
1654 desc = await self.RO.deploy(nsr_id, target)
1655 action_id = desc["action_id"]
1656 await self._wait_ng_ro(
1657 nsr_id, action_id, timeout=600, operation="instantiation"
1658 )
1659 break
1660 else:
1661 # wait until NS is deployed at RO
1662 if not ro_nsr_id:
1663 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1664 ro_nsr_id = deep_get(
1665 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1666 )
1667 if not ro_nsr_id:
1668 continue
1669 result_dict = await self.RO.create_action(
1670 item="ns",
1671 item_id_name=ro_nsr_id,
1672 descriptor={
1673 "add_public_key": pub_key,
1674 "vms": [ro_vm_id],
1675 "user": user,
1676 },
1677 )
1678 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1679 if not result_dict or not isinstance(result_dict, dict):
1680 raise LcmException(
1681 "Unknown response from RO when injecting key"
1682 )
1683 for result in result_dict.values():
1684 if result.get("vim_result") == 200:
1685 break
1686 else:
1687 raise ROclient.ROClientException(
1688 "error injecting key: {}".format(
1689 result.get("description")
1690 )
1691 )
1692 break
1693 except NgRoException as e:
1694 raise LcmException(
1695 "Reaching max tries injecting key. Error: {}".format(e)
1696 )
1697 except ROclient.ROClientException as e:
1698 if not nb_tries:
1699 self.logger.debug(
1700 logging_text
1701 + "error injecting key: {}. Retrying until {} seconds".format(
1702 e, 20 * 10
1703 )
1704 )
1705 nb_tries += 1
1706 if nb_tries >= 20:
1707 raise LcmException(
1708 "Reaching max tries injecting key. Error: {}".format(e)
1709 )
1710 else:
1711 break
1712
1713 return ip_address
1714
1715 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1716 """
1717 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1718 """
1719 my_vca = vca_deployed_list[vca_index]
1720 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1721 # vdu or kdu: no dependencies
1722 return
1723 timeout = 300
1724 while timeout >= 0:
1725 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1726 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1727 configuration_status_list = db_nsr["configurationStatus"]
1728 for index, vca_deployed in enumerate(configuration_status_list):
1729 if index == vca_index:
1730 # myself
1731 continue
1732 if not my_vca.get("member-vnf-index") or (
1733 vca_deployed.get("member-vnf-index")
1734 == my_vca.get("member-vnf-index")
1735 ):
1736 internal_status = configuration_status_list[index].get("status")
1737 if internal_status == "READY":
1738 continue
1739 elif internal_status == "BROKEN":
1740 raise LcmException(
1741 "Configuration aborted because dependent charm/s has failed"
1742 )
1743 else:
1744 break
1745 else:
1746 # no dependencies, return
1747 return
1748 await asyncio.sleep(10)
1749 timeout -= 1
1750
1751 raise LcmException("Configuration aborted because dependent charm/s timeout")
1752
1753 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1754 vca_id = None
1755 if db_vnfr:
1756 vca_id = deep_get(db_vnfr, ("vca-id",))
1757 elif db_nsr:
1758 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1759 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1760 return vca_id
1761
1762 async def instantiate_N2VC(
1763 self,
1764 logging_text,
1765 vca_index,
1766 nsi_id,
1767 db_nsr,
1768 db_vnfr,
1769 vdu_id,
1770 kdu_name,
1771 vdu_index,
1772 config_descriptor,
1773 deploy_params,
1774 base_folder,
1775 nslcmop_id,
1776 stage,
1777 vca_type,
1778 vca_name,
1779 ee_config_descriptor,
1780 ):
1781 nsr_id = db_nsr["_id"]
1782 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1783 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1784 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1785 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1786 db_dict = {
1787 "collection": "nsrs",
1788 "filter": {"_id": nsr_id},
1789 "path": db_update_entry,
1790 }
1791 step = ""
1792 try:
1793
1794 element_type = "NS"
1795 element_under_configuration = nsr_id
1796
1797 vnfr_id = None
1798 if db_vnfr:
1799 vnfr_id = db_vnfr["_id"]
1800 osm_config["osm"]["vnf_id"] = vnfr_id
1801
1802 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1803
1804 if vca_type == "native_charm":
1805 index_number = 0
1806 else:
1807 index_number = vdu_index or 0
1808
1809 if vnfr_id:
1810 element_type = "VNF"
1811 element_under_configuration = vnfr_id
1812 namespace += ".{}-{}".format(vnfr_id, index_number)
1813 if vdu_id:
1814 namespace += ".{}-{}".format(vdu_id, index_number)
1815 element_type = "VDU"
1816 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1817 osm_config["osm"]["vdu_id"] = vdu_id
1818 elif kdu_name:
1819 namespace += ".{}".format(kdu_name)
1820 element_type = "KDU"
1821 element_under_configuration = kdu_name
1822 osm_config["osm"]["kdu_name"] = kdu_name
1823
1824 # Get artifact path
1825 if base_folder["pkg-dir"]:
1826 artifact_path = "{}/{}/{}/{}".format(
1827 base_folder["folder"],
1828 base_folder["pkg-dir"],
1829 "charms"
1830 if vca_type
1831 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1832 else "helm-charts",
1833 vca_name,
1834 )
1835 else:
1836 artifact_path = "{}/Scripts/{}/{}/".format(
1837 base_folder["folder"],
1838 "charms"
1839 if vca_type
1840 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1841 else "helm-charts",
1842 vca_name,
1843 )
1844
1845 self.logger.debug("Artifact path > {}".format(artifact_path))
1846
1847 # get initial_config_primitive_list that applies to this element
1848 initial_config_primitive_list = config_descriptor.get(
1849 "initial-config-primitive"
1850 )
1851
1852 self.logger.debug(
1853 "Initial config primitive list > {}".format(
1854 initial_config_primitive_list
1855 )
1856 )
1857
1858 # add config if not present for NS charm
1859 ee_descriptor_id = ee_config_descriptor.get("id")
1860 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1861 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1862 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1863 )
1864
1865 self.logger.debug(
1866 "Initial config primitive list #2 > {}".format(
1867 initial_config_primitive_list
1868 )
1869 )
1870 # n2vc_redesign STEP 3.1
1871 # find old ee_id if exists
1872 ee_id = vca_deployed.get("ee_id")
1873
1874 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1875 # create or register execution environment in VCA
1876 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1877
1878 self._write_configuration_status(
1879 nsr_id=nsr_id,
1880 vca_index=vca_index,
1881 status="CREATING",
1882 element_under_configuration=element_under_configuration,
1883 element_type=element_type,
1884 )
1885
1886 step = "create execution environment"
1887 self.logger.debug(logging_text + step)
1888
1889 ee_id = None
1890 credentials = None
1891 if vca_type == "k8s_proxy_charm":
1892 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1893 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1894 namespace=namespace,
1895 artifact_path=artifact_path,
1896 db_dict=db_dict,
1897 vca_id=vca_id,
1898 )
1899 elif vca_type == "helm" or vca_type == "helm-v3":
1900 ee_id, credentials = await self.vca_map[
1901 vca_type
1902 ].create_execution_environment(
1903 namespace=namespace,
1904 reuse_ee_id=ee_id,
1905 db_dict=db_dict,
1906 config=osm_config,
1907 artifact_path=artifact_path,
1908 vca_type=vca_type,
1909 )
1910 else:
1911 ee_id, credentials = await self.vca_map[
1912 vca_type
1913 ].create_execution_environment(
1914 namespace=namespace,
1915 reuse_ee_id=ee_id,
1916 db_dict=db_dict,
1917 vca_id=vca_id,
1918 )
1919
1920 elif vca_type == "native_charm":
1921 step = "Waiting to VM being up and getting IP address"
1922 self.logger.debug(logging_text + step)
1923 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1924 logging_text,
1925 nsr_id,
1926 vnfr_id,
1927 vdu_id,
1928 vdu_index,
1929 user=None,
1930 pub_key=None,
1931 )
1932 credentials = {"hostname": rw_mgmt_ip}
1933 # get username
1934 username = deep_get(
1935 config_descriptor, ("config-access", "ssh-access", "default-user")
1936 )
1937 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1938 # merged. Meanwhile let's get username from initial-config-primitive
1939 if not username and initial_config_primitive_list:
1940 for config_primitive in initial_config_primitive_list:
1941 for param in config_primitive.get("parameter", ()):
1942 if param["name"] == "ssh-username":
1943 username = param["value"]
1944 break
1945 if not username:
1946 raise LcmException(
1947 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1948 "'config-access.ssh-access.default-user'"
1949 )
1950 credentials["username"] = username
1951 # n2vc_redesign STEP 3.2
1952
1953 self._write_configuration_status(
1954 nsr_id=nsr_id,
1955 vca_index=vca_index,
1956 status="REGISTERING",
1957 element_under_configuration=element_under_configuration,
1958 element_type=element_type,
1959 )
1960
1961 step = "register execution environment {}".format(credentials)
1962 self.logger.debug(logging_text + step)
1963 ee_id = await self.vca_map[vca_type].register_execution_environment(
1964 credentials=credentials,
1965 namespace=namespace,
1966 db_dict=db_dict,
1967 vca_id=vca_id,
1968 )
1969
1970 # for compatibility with MON/POL modules, the need model and application name at database
1971 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1972 ee_id_parts = ee_id.split(".")
1973 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1974 if len(ee_id_parts) >= 2:
1975 model_name = ee_id_parts[0]
1976 application_name = ee_id_parts[1]
1977 db_nsr_update[db_update_entry + "model"] = model_name
1978 db_nsr_update[db_update_entry + "application"] = application_name
1979
1980 # n2vc_redesign STEP 3.3
1981 step = "Install configuration Software"
1982
1983 self._write_configuration_status(
1984 nsr_id=nsr_id,
1985 vca_index=vca_index,
1986 status="INSTALLING SW",
1987 element_under_configuration=element_under_configuration,
1988 element_type=element_type,
1989 other_update=db_nsr_update,
1990 )
1991
1992 # TODO check if already done
1993 self.logger.debug(logging_text + step)
1994 config = None
1995 if vca_type == "native_charm":
1996 config_primitive = next(
1997 (p for p in initial_config_primitive_list if p["name"] == "config"),
1998 None,
1999 )
2000 if config_primitive:
2001 config = self._map_primitive_params(
2002 config_primitive, {}, deploy_params
2003 )
2004 num_units = 1
2005 if vca_type == "lxc_proxy_charm":
2006 if element_type == "NS":
2007 num_units = db_nsr.get("config-units") or 1
2008 elif element_type == "VNF":
2009 num_units = db_vnfr.get("config-units") or 1
2010 elif element_type == "VDU":
2011 for v in db_vnfr["vdur"]:
2012 if vdu_id == v["vdu-id-ref"]:
2013 num_units = v.get("config-units") or 1
2014 break
2015 if vca_type != "k8s_proxy_charm":
2016 await self.vca_map[vca_type].install_configuration_sw(
2017 ee_id=ee_id,
2018 artifact_path=artifact_path,
2019 db_dict=db_dict,
2020 config=config,
2021 num_units=num_units,
2022 vca_id=vca_id,
2023 vca_type=vca_type,
2024 )
2025
2026 # write in db flag of configuration_sw already installed
2027 self.update_db_2(
2028 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2029 )
2030
2031 # add relations for this VCA (wait for other peers related with this VCA)
2032 await self._add_vca_relations(
2033 logging_text=logging_text,
2034 nsr_id=nsr_id,
2035 vca_type=vca_type,
2036 vca_index=vca_index,
2037 )
2038
2039 # if SSH access is required, then get execution environment SSH public
2040 # if native charm we have waited already to VM be UP
2041 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2042 pub_key = None
2043 user = None
2044 # self.logger.debug("get ssh key block")
2045 if deep_get(
2046 config_descriptor, ("config-access", "ssh-access", "required")
2047 ):
2048 # self.logger.debug("ssh key needed")
2049 # Needed to inject a ssh key
2050 user = deep_get(
2051 config_descriptor,
2052 ("config-access", "ssh-access", "default-user"),
2053 )
2054 step = "Install configuration Software, getting public ssh key"
2055 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2056 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2057 )
2058
2059 step = "Insert public key into VM user={} ssh_key={}".format(
2060 user, pub_key
2061 )
2062 else:
2063 # self.logger.debug("no need to get ssh key")
2064 step = "Waiting to VM being up and getting IP address"
2065 self.logger.debug(logging_text + step)
2066
2067 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2068 rw_mgmt_ip = None
2069
2070 # n2vc_redesign STEP 5.1
2071 # wait for RO (ip-address) Insert pub_key into VM
2072 if vnfr_id:
2073 if kdu_name:
2074 rw_mgmt_ip, services = await self.wait_kdu_up(
2075 logging_text, nsr_id, vnfr_id, kdu_name
2076 )
2077 vnfd = self.db.get_one(
2078 "vnfds_revisions",
2079 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2080 )
2081 kdu = get_kdu(vnfd, kdu_name)
2082 kdu_services = [
2083 service["name"] for service in get_kdu_services(kdu)
2084 ]
2085 exposed_services = []
2086 for service in services:
2087 if any(s in service["name"] for s in kdu_services):
2088 exposed_services.append(service)
2089 await self.vca_map[vca_type].exec_primitive(
2090 ee_id=ee_id,
2091 primitive_name="config",
2092 params_dict={
2093 "osm-config": json.dumps(
2094 OsmConfigBuilder(
2095 k8s={"services": exposed_services}
2096 ).build()
2097 )
2098 },
2099 vca_id=vca_id,
2100 )
2101
2102 # This verification is needed in order to avoid trying to add a public key
2103 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2104 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2105 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2106 # or it is a KNF)
2107 elif db_vnfr.get("vdur"):
2108 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2109 logging_text,
2110 nsr_id,
2111 vnfr_id,
2112 vdu_id,
2113 vdu_index,
2114 user=user,
2115 pub_key=pub_key,
2116 )
2117
2118 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2119
2120 # store rw_mgmt_ip in deploy params for later replacement
2121 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2122
2123 # n2vc_redesign STEP 6 Execute initial config primitive
2124 step = "execute initial config primitive"
2125
2126 # wait for dependent primitives execution (NS -> VNF -> VDU)
2127 if initial_config_primitive_list:
2128 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2129
2130 # stage, in function of element type: vdu, kdu, vnf or ns
2131 my_vca = vca_deployed_list[vca_index]
2132 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2133 # VDU or KDU
2134 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2135 elif my_vca.get("member-vnf-index"):
2136 # VNF
2137 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2138 else:
2139 # NS
2140 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2141
2142 self._write_configuration_status(
2143 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2144 )
2145
2146 self._write_op_status(op_id=nslcmop_id, stage=stage)
2147
2148 check_if_terminated_needed = True
2149 for initial_config_primitive in initial_config_primitive_list:
2150 # adding information on the vca_deployed if it is a NS execution environment
2151 if not vca_deployed["member-vnf-index"]:
2152 deploy_params["ns_config_info"] = json.dumps(
2153 self._get_ns_config_info(nsr_id)
2154 )
2155 # TODO check if already done
2156 primitive_params_ = self._map_primitive_params(
2157 initial_config_primitive, {}, deploy_params
2158 )
2159
2160 step = "execute primitive '{}' params '{}'".format(
2161 initial_config_primitive["name"], primitive_params_
2162 )
2163 self.logger.debug(logging_text + step)
2164 await self.vca_map[vca_type].exec_primitive(
2165 ee_id=ee_id,
2166 primitive_name=initial_config_primitive["name"],
2167 params_dict=primitive_params_,
2168 db_dict=db_dict,
2169 vca_id=vca_id,
2170 vca_type=vca_type,
2171 )
2172 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2173 if check_if_terminated_needed:
2174 if config_descriptor.get("terminate-config-primitive"):
2175 self.update_db_2(
2176 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2177 )
2178 check_if_terminated_needed = False
2179
2180 # TODO register in database that primitive is done
2181
2182 # STEP 7 Configure metrics
2183 if vca_type == "helm" or vca_type == "helm-v3":
2184 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2185 ee_id=ee_id,
2186 artifact_path=artifact_path,
2187 ee_config_descriptor=ee_config_descriptor,
2188 vnfr_id=vnfr_id,
2189 nsr_id=nsr_id,
2190 target_ip=rw_mgmt_ip,
2191 )
2192 if prometheus_jobs:
2193 self.update_db_2(
2194 "nsrs",
2195 nsr_id,
2196 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2197 )
2198
2199 for job in prometheus_jobs:
2200 self.db.set_one(
2201 "prometheus_jobs",
2202 {"job_name": job["job_name"]},
2203 job,
2204 upsert=True,
2205 fail_on_empty=False,
2206 )
2207
2208 step = "instantiated at VCA"
2209 self.logger.debug(logging_text + step)
2210
2211 self._write_configuration_status(
2212 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2213 )
2214
2215 except Exception as e: # TODO not use Exception but N2VC exception
2216 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2217 if not isinstance(
2218 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2219 ):
2220 self.logger.error(
2221 "Exception while {} : {}".format(step, e), exc_info=True
2222 )
2223 self._write_configuration_status(
2224 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2225 )
2226 raise LcmException("{} {}".format(step, e)) from e
2227
2228 def _write_ns_status(
2229 self,
2230 nsr_id: str,
2231 ns_state: str,
2232 current_operation: str,
2233 current_operation_id: str,
2234 error_description: str = None,
2235 error_detail: str = None,
2236 other_update: dict = None,
2237 ):
2238 """
2239 Update db_nsr fields.
2240 :param nsr_id:
2241 :param ns_state:
2242 :param current_operation:
2243 :param current_operation_id:
2244 :param error_description:
2245 :param error_detail:
2246 :param other_update: Other required changes at database if provided, will be cleared
2247 :return:
2248 """
2249 try:
2250 db_dict = other_update or {}
2251 db_dict[
2252 "_admin.nslcmop"
2253 ] = current_operation_id # for backward compatibility
2254 db_dict["_admin.current-operation"] = current_operation_id
2255 db_dict["_admin.operation-type"] = (
2256 current_operation if current_operation != "IDLE" else None
2257 )
2258 db_dict["currentOperation"] = current_operation
2259 db_dict["currentOperationID"] = current_operation_id
2260 db_dict["errorDescription"] = error_description
2261 db_dict["errorDetail"] = error_detail
2262
2263 if ns_state:
2264 db_dict["nsState"] = ns_state
2265 self.update_db_2("nsrs", nsr_id, db_dict)
2266 except DbException as e:
2267 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2268
2269 def _write_op_status(
2270 self,
2271 op_id: str,
2272 stage: list = None,
2273 error_message: str = None,
2274 queuePosition: int = 0,
2275 operation_state: str = None,
2276 other_update: dict = None,
2277 ):
2278 try:
2279 db_dict = other_update or {}
2280 db_dict["queuePosition"] = queuePosition
2281 if isinstance(stage, list):
2282 db_dict["stage"] = stage[0]
2283 db_dict["detailed-status"] = " ".join(stage)
2284 elif stage is not None:
2285 db_dict["stage"] = str(stage)
2286
2287 if error_message is not None:
2288 db_dict["errorMessage"] = error_message
2289 if operation_state is not None:
2290 db_dict["operationState"] = operation_state
2291 db_dict["statusEnteredTime"] = time()
2292 self.update_db_2("nslcmops", op_id, db_dict)
2293 except DbException as e:
2294 self.logger.warn(
2295 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2296 )
2297
2298 def _write_all_config_status(self, db_nsr: dict, status: str):
2299 try:
2300 nsr_id = db_nsr["_id"]
2301 # configurationStatus
2302 config_status = db_nsr.get("configurationStatus")
2303 if config_status:
2304 db_nsr_update = {
2305 "configurationStatus.{}.status".format(index): status
2306 for index, v in enumerate(config_status)
2307 if v
2308 }
2309 # update status
2310 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2311
2312 except DbException as e:
2313 self.logger.warn(
2314 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2315 )
2316
2317 def _write_configuration_status(
2318 self,
2319 nsr_id: str,
2320 vca_index: int,
2321 status: str = None,
2322 element_under_configuration: str = None,
2323 element_type: str = None,
2324 other_update: dict = None,
2325 ):
2326
2327 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2328 # .format(vca_index, status))
2329
2330 try:
2331 db_path = "configurationStatus.{}.".format(vca_index)
2332 db_dict = other_update or {}
2333 if status:
2334 db_dict[db_path + "status"] = status
2335 if element_under_configuration:
2336 db_dict[
2337 db_path + "elementUnderConfiguration"
2338 ] = element_under_configuration
2339 if element_type:
2340 db_dict[db_path + "elementType"] = element_type
2341 self.update_db_2("nsrs", nsr_id, db_dict)
2342 except DbException as e:
2343 self.logger.warn(
2344 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2345 status, nsr_id, vca_index, e
2346 )
2347 )
2348
2349 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2350 """
2351 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2352 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2353 Database is used because the result can be obtained from a different LCM worker in case of HA.
2354 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2355 :param db_nslcmop: database content of nslcmop
2356 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2357 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2358 computed 'vim-account-id'
2359 """
2360 modified = False
2361 nslcmop_id = db_nslcmop["_id"]
2362 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2363 if placement_engine == "PLA":
2364 self.logger.debug(
2365 logging_text + "Invoke and wait for placement optimization"
2366 )
2367 await self.msg.aiowrite(
2368 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2369 )
2370 db_poll_interval = 5
2371 wait = db_poll_interval * 10
2372 pla_result = None
2373 while not pla_result and wait >= 0:
2374 await asyncio.sleep(db_poll_interval)
2375 wait -= db_poll_interval
2376 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2377 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2378
2379 if not pla_result:
2380 raise LcmException(
2381 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2382 )
2383
2384 for pla_vnf in pla_result["vnf"]:
2385 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2386 if not pla_vnf.get("vimAccountId") or not vnfr:
2387 continue
2388 modified = True
2389 self.db.set_one(
2390 "vnfrs",
2391 {"_id": vnfr["_id"]},
2392 {"vim-account-id": pla_vnf["vimAccountId"]},
2393 )
2394 # Modifies db_vnfrs
2395 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2396 return modified
2397
2398 def update_nsrs_with_pla_result(self, params):
2399 try:
2400 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2401 self.update_db_2(
2402 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2403 )
2404 except Exception as e:
2405 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2406
2407 async def instantiate(self, nsr_id, nslcmop_id):
2408 """
2409
2410 :param nsr_id: ns instance to deploy
2411 :param nslcmop_id: operation to run
2412 :return:
2413 """
2414
2415 # Try to lock HA task here
2416 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2417 if not task_is_locked_by_me:
2418 self.logger.debug(
2419 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2420 )
2421 return
2422
2423 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2424 self.logger.debug(logging_text + "Enter")
2425
2426 # get all needed from database
2427
2428 # database nsrs record
2429 db_nsr = None
2430
2431 # database nslcmops record
2432 db_nslcmop = None
2433
2434 # update operation on nsrs
2435 db_nsr_update = {}
2436 # update operation on nslcmops
2437 db_nslcmop_update = {}
2438
2439 nslcmop_operation_state = None
2440 db_vnfrs = {} # vnf's info indexed by member-index
2441 # n2vc_info = {}
2442 tasks_dict_info = {} # from task to info text
2443 exc = None
2444 error_list = []
2445 stage = [
2446 "Stage 1/5: preparation of the environment.",
2447 "Waiting for previous operations to terminate.",
2448 "",
2449 ]
2450 # ^ stage, step, VIM progress
2451 try:
2452 # wait for any previous tasks in process
2453 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2454
2455 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2456 stage[1] = "Reading from database."
2457 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2458 db_nsr_update["detailed-status"] = "creating"
2459 db_nsr_update["operational-status"] = "init"
2460 self._write_ns_status(
2461 nsr_id=nsr_id,
2462 ns_state="BUILDING",
2463 current_operation="INSTANTIATING",
2464 current_operation_id=nslcmop_id,
2465 other_update=db_nsr_update,
2466 )
2467 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2468
2469 # read from db: operation
2470 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2471 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2472 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2473 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2474 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2475 )
2476 ns_params = db_nslcmop.get("operationParams")
2477 if ns_params and ns_params.get("timeout_ns_deploy"):
2478 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2479 else:
2480 timeout_ns_deploy = self.timeout.get(
2481 "ns_deploy", self.timeout_ns_deploy
2482 )
2483
2484 # read from db: ns
2485 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2486 self.logger.debug(logging_text + stage[1])
2487 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2488 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2489 self.logger.debug(logging_text + stage[1])
2490 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2491 self.fs.sync(db_nsr["nsd-id"])
2492 db_nsr["nsd"] = nsd
2493 # nsr_name = db_nsr["name"] # TODO short-name??
2494
2495 # read from db: vnf's of this ns
2496 stage[1] = "Getting vnfrs from db."
2497 self.logger.debug(logging_text + stage[1])
2498 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2499
2500 # read from db: vnfd's for every vnf
2501 db_vnfds = [] # every vnfd data
2502
2503 # for each vnf in ns, read vnfd
2504 for vnfr in db_vnfrs_list:
2505 if vnfr.get("kdur"):
2506 kdur_list = []
2507 for kdur in vnfr["kdur"]:
2508 if kdur.get("additionalParams"):
2509 kdur["additionalParams"] = json.loads(
2510 kdur["additionalParams"]
2511 )
2512 kdur_list.append(kdur)
2513 vnfr["kdur"] = kdur_list
2514
2515 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2516 vnfd_id = vnfr["vnfd-id"]
2517 vnfd_ref = vnfr["vnfd-ref"]
2518 self.fs.sync(vnfd_id)
2519
2520 # if we haven't this vnfd, read it from db
2521 if vnfd_id not in db_vnfds:
2522 # read from db
2523 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2524 vnfd_id, vnfd_ref
2525 )
2526 self.logger.debug(logging_text + stage[1])
2527 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2528
2529 # store vnfd
2530 db_vnfds.append(vnfd)
2531
2532 # Get or generates the _admin.deployed.VCA list
2533 vca_deployed_list = None
2534 if db_nsr["_admin"].get("deployed"):
2535 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2536 if vca_deployed_list is None:
2537 vca_deployed_list = []
2538 configuration_status_list = []
2539 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2540 db_nsr_update["configurationStatus"] = configuration_status_list
2541 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2542 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2543 elif isinstance(vca_deployed_list, dict):
2544 # maintain backward compatibility. Change a dict to list at database
2545 vca_deployed_list = list(vca_deployed_list.values())
2546 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2547 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2548
2549 if not isinstance(
2550 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2551 ):
2552 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2553 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2554
2555 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2556 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2557 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2558 self.db.set_list(
2559 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2560 )
2561
2562 # n2vc_redesign STEP 2 Deploy Network Scenario
2563 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2564 self._write_op_status(op_id=nslcmop_id, stage=stage)
2565
2566 stage[1] = "Deploying KDUs."
2567 # self.logger.debug(logging_text + "Before deploy_kdus")
2568 # Call to deploy_kdus in case exists the "vdu:kdu" param
2569 await self.deploy_kdus(
2570 logging_text=logging_text,
2571 nsr_id=nsr_id,
2572 nslcmop_id=nslcmop_id,
2573 db_vnfrs=db_vnfrs,
2574 db_vnfds=db_vnfds,
2575 task_instantiation_info=tasks_dict_info,
2576 )
2577
2578 stage[1] = "Getting VCA public key."
2579 # n2vc_redesign STEP 1 Get VCA public ssh-key
2580 # feature 1429. Add n2vc public key to needed VMs
2581 n2vc_key = self.n2vc.get_public_key()
2582 n2vc_key_list = [n2vc_key]
2583 if self.vca_config.get("public_key"):
2584 n2vc_key_list.append(self.vca_config["public_key"])
2585
2586 stage[1] = "Deploying NS at VIM."
2587 task_ro = asyncio.ensure_future(
2588 self.instantiate_RO(
2589 logging_text=logging_text,
2590 nsr_id=nsr_id,
2591 nsd=nsd,
2592 db_nsr=db_nsr,
2593 db_nslcmop=db_nslcmop,
2594 db_vnfrs=db_vnfrs,
2595 db_vnfds=db_vnfds,
2596 n2vc_key_list=n2vc_key_list,
2597 stage=stage,
2598 )
2599 )
2600 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2601 tasks_dict_info[task_ro] = "Deploying at VIM"
2602
2603 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2604 stage[1] = "Deploying Execution Environments."
2605 self.logger.debug(logging_text + stage[1])
2606
2607 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2608 for vnf_profile in get_vnf_profiles(nsd):
2609 vnfd_id = vnf_profile["vnfd-id"]
2610 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2611 member_vnf_index = str(vnf_profile["id"])
2612 db_vnfr = db_vnfrs[member_vnf_index]
2613 base_folder = vnfd["_admin"]["storage"]
2614 vdu_id = None
2615 vdu_index = 0
2616 vdu_name = None
2617 kdu_name = None
2618
2619 # Get additional parameters
2620 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2621 if db_vnfr.get("additionalParamsForVnf"):
2622 deploy_params.update(
2623 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2624 )
2625
2626 descriptor_config = get_configuration(vnfd, vnfd["id"])
2627 if descriptor_config:
2628 self._deploy_n2vc(
2629 logging_text=logging_text
2630 + "member_vnf_index={} ".format(member_vnf_index),
2631 db_nsr=db_nsr,
2632 db_vnfr=db_vnfr,
2633 nslcmop_id=nslcmop_id,
2634 nsr_id=nsr_id,
2635 nsi_id=nsi_id,
2636 vnfd_id=vnfd_id,
2637 vdu_id=vdu_id,
2638 kdu_name=kdu_name,
2639 member_vnf_index=member_vnf_index,
2640 vdu_index=vdu_index,
2641 vdu_name=vdu_name,
2642 deploy_params=deploy_params,
2643 descriptor_config=descriptor_config,
2644 base_folder=base_folder,
2645 task_instantiation_info=tasks_dict_info,
2646 stage=stage,
2647 )
2648
2649 # Deploy charms for each VDU that supports one.
2650 for vdud in get_vdu_list(vnfd):
2651 vdu_id = vdud["id"]
2652 descriptor_config = get_configuration(vnfd, vdu_id)
2653 vdur = find_in_list(
2654 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2655 )
2656
2657 if vdur.get("additionalParams"):
2658 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2659 else:
2660 deploy_params_vdu = deploy_params
2661 deploy_params_vdu["OSM"] = get_osm_params(
2662 db_vnfr, vdu_id, vdu_count_index=0
2663 )
2664 vdud_count = get_number_of_instances(vnfd, vdu_id)
2665
2666 self.logger.debug("VDUD > {}".format(vdud))
2667 self.logger.debug(
2668 "Descriptor config > {}".format(descriptor_config)
2669 )
2670 if descriptor_config:
2671 vdu_name = None
2672 kdu_name = None
2673 for vdu_index in range(vdud_count):
2674 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2675 self._deploy_n2vc(
2676 logging_text=logging_text
2677 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2678 member_vnf_index, vdu_id, vdu_index
2679 ),
2680 db_nsr=db_nsr,
2681 db_vnfr=db_vnfr,
2682 nslcmop_id=nslcmop_id,
2683 nsr_id=nsr_id,
2684 nsi_id=nsi_id,
2685 vnfd_id=vnfd_id,
2686 vdu_id=vdu_id,
2687 kdu_name=kdu_name,
2688 member_vnf_index=member_vnf_index,
2689 vdu_index=vdu_index,
2690 vdu_name=vdu_name,
2691 deploy_params=deploy_params_vdu,
2692 descriptor_config=descriptor_config,
2693 base_folder=base_folder,
2694 task_instantiation_info=tasks_dict_info,
2695 stage=stage,
2696 )
2697 for kdud in get_kdu_list(vnfd):
2698 kdu_name = kdud["name"]
2699 descriptor_config = get_configuration(vnfd, kdu_name)
2700 if descriptor_config:
2701 vdu_id = None
2702 vdu_index = 0
2703 vdu_name = None
2704 kdur = next(
2705 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2706 )
2707 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2708 if kdur.get("additionalParams"):
2709 deploy_params_kdu.update(
2710 parse_yaml_strings(kdur["additionalParams"].copy())
2711 )
2712
2713 self._deploy_n2vc(
2714 logging_text=logging_text,
2715 db_nsr=db_nsr,
2716 db_vnfr=db_vnfr,
2717 nslcmop_id=nslcmop_id,
2718 nsr_id=nsr_id,
2719 nsi_id=nsi_id,
2720 vnfd_id=vnfd_id,
2721 vdu_id=vdu_id,
2722 kdu_name=kdu_name,
2723 member_vnf_index=member_vnf_index,
2724 vdu_index=vdu_index,
2725 vdu_name=vdu_name,
2726 deploy_params=deploy_params_kdu,
2727 descriptor_config=descriptor_config,
2728 base_folder=base_folder,
2729 task_instantiation_info=tasks_dict_info,
2730 stage=stage,
2731 )
2732
2733 # Check if this NS has a charm configuration
2734 descriptor_config = nsd.get("ns-configuration")
2735 if descriptor_config and descriptor_config.get("juju"):
2736 vnfd_id = None
2737 db_vnfr = None
2738 member_vnf_index = None
2739 vdu_id = None
2740 kdu_name = None
2741 vdu_index = 0
2742 vdu_name = None
2743
2744 # Get additional parameters
2745 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2746 if db_nsr.get("additionalParamsForNs"):
2747 deploy_params.update(
2748 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2749 )
2750 base_folder = nsd["_admin"]["storage"]
2751 self._deploy_n2vc(
2752 logging_text=logging_text,
2753 db_nsr=db_nsr,
2754 db_vnfr=db_vnfr,
2755 nslcmop_id=nslcmop_id,
2756 nsr_id=nsr_id,
2757 nsi_id=nsi_id,
2758 vnfd_id=vnfd_id,
2759 vdu_id=vdu_id,
2760 kdu_name=kdu_name,
2761 member_vnf_index=member_vnf_index,
2762 vdu_index=vdu_index,
2763 vdu_name=vdu_name,
2764 deploy_params=deploy_params,
2765 descriptor_config=descriptor_config,
2766 base_folder=base_folder,
2767 task_instantiation_info=tasks_dict_info,
2768 stage=stage,
2769 )
2770
2771 # rest of staff will be done at finally
2772
2773 except (
2774 ROclient.ROClientException,
2775 DbException,
2776 LcmException,
2777 N2VCException,
2778 ) as e:
2779 self.logger.error(
2780 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2781 )
2782 exc = e
2783 except asyncio.CancelledError:
2784 self.logger.error(
2785 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2786 )
2787 exc = "Operation was cancelled"
2788 except Exception as e:
2789 exc = traceback.format_exc()
2790 self.logger.critical(
2791 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2792 exc_info=True,
2793 )
2794 finally:
2795 if exc:
2796 error_list.append(str(exc))
2797 try:
2798 # wait for pending tasks
2799 if tasks_dict_info:
2800 stage[1] = "Waiting for instantiate pending tasks."
2801 self.logger.debug(logging_text + stage[1])
2802 error_list += await self._wait_for_tasks(
2803 logging_text,
2804 tasks_dict_info,
2805 timeout_ns_deploy,
2806 stage,
2807 nslcmop_id,
2808 nsr_id=nsr_id,
2809 )
2810 stage[1] = stage[2] = ""
2811 except asyncio.CancelledError:
2812 error_list.append("Cancelled")
2813 # TODO cancel all tasks
2814 except Exception as exc:
2815 error_list.append(str(exc))
2816
2817 # update operation-status
2818 db_nsr_update["operational-status"] = "running"
2819 # let's begin with VCA 'configured' status (later we can change it)
2820 db_nsr_update["config-status"] = "configured"
2821 for task, task_name in tasks_dict_info.items():
2822 if not task.done() or task.cancelled() or task.exception():
2823 if task_name.startswith(self.task_name_deploy_vca):
2824 # A N2VC task is pending
2825 db_nsr_update["config-status"] = "failed"
2826 else:
2827 # RO or KDU task is pending
2828 db_nsr_update["operational-status"] = "failed"
2829
2830 # update status at database
2831 if error_list:
2832 error_detail = ". ".join(error_list)
2833 self.logger.error(logging_text + error_detail)
2834 error_description_nslcmop = "{} Detail: {}".format(
2835 stage[0], error_detail
2836 )
2837 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2838 nslcmop_id, stage[0]
2839 )
2840
2841 db_nsr_update["detailed-status"] = (
2842 error_description_nsr + " Detail: " + error_detail
2843 )
2844 db_nslcmop_update["detailed-status"] = error_detail
2845 nslcmop_operation_state = "FAILED"
2846 ns_state = "BROKEN"
2847 else:
2848 error_detail = None
2849 error_description_nsr = error_description_nslcmop = None
2850 ns_state = "READY"
2851 db_nsr_update["detailed-status"] = "Done"
2852 db_nslcmop_update["detailed-status"] = "Done"
2853 nslcmop_operation_state = "COMPLETED"
2854
2855 if db_nsr:
2856 self._write_ns_status(
2857 nsr_id=nsr_id,
2858 ns_state=ns_state,
2859 current_operation="IDLE",
2860 current_operation_id=None,
2861 error_description=error_description_nsr,
2862 error_detail=error_detail,
2863 other_update=db_nsr_update,
2864 )
2865 self._write_op_status(
2866 op_id=nslcmop_id,
2867 stage="",
2868 error_message=error_description_nslcmop,
2869 operation_state=nslcmop_operation_state,
2870 other_update=db_nslcmop_update,
2871 )
2872
2873 if nslcmop_operation_state:
2874 try:
2875 await self.msg.aiowrite(
2876 "ns",
2877 "instantiated",
2878 {
2879 "nsr_id": nsr_id,
2880 "nslcmop_id": nslcmop_id,
2881 "operationState": nslcmop_operation_state,
2882 },
2883 loop=self.loop,
2884 )
2885 except Exception as e:
2886 self.logger.error(
2887 logging_text + "kafka_write notification Exception {}".format(e)
2888 )
2889
2890 self.logger.debug(logging_text + "Exit")
2891 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2892
2893 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2894 if vnfd_id not in cached_vnfds:
2895 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2896 return cached_vnfds[vnfd_id]
2897
2898 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2899 if vnf_profile_id not in cached_vnfrs:
2900 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2901 "vnfrs",
2902 {
2903 "member-vnf-index-ref": vnf_profile_id,
2904 "nsr-id-ref": nsr_id,
2905 },
2906 )
2907 return cached_vnfrs[vnf_profile_id]
2908
2909 def _is_deployed_vca_in_relation(
2910 self, vca: DeployedVCA, relation: Relation
2911 ) -> bool:
2912 found = False
2913 for endpoint in (relation.provider, relation.requirer):
2914 if endpoint["kdu-resource-profile-id"]:
2915 continue
2916 found = (
2917 vca.vnf_profile_id == endpoint.vnf_profile_id
2918 and vca.vdu_profile_id == endpoint.vdu_profile_id
2919 and vca.execution_environment_ref == endpoint.execution_environment_ref
2920 )
2921 if found:
2922 break
2923 return found
2924
2925 def _update_ee_relation_data_with_implicit_data(
2926 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2927 ):
2928 ee_relation_data = safe_get_ee_relation(
2929 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2930 )
2931 ee_relation_level = EELevel.get_level(ee_relation_data)
2932 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2933 "execution-environment-ref"
2934 ]:
2935 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2936 vnfd_id = vnf_profile["vnfd-id"]
2937 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2938 entity_id = (
2939 vnfd_id
2940 if ee_relation_level == EELevel.VNF
2941 else ee_relation_data["vdu-profile-id"]
2942 )
2943 ee = get_juju_ee_ref(db_vnfd, entity_id)
2944 if not ee:
2945 raise Exception(
2946 f"not execution environments found for ee_relation {ee_relation_data}"
2947 )
2948 ee_relation_data["execution-environment-ref"] = ee["id"]
2949 return ee_relation_data
2950
2951 def _get_ns_relations(
2952 self,
2953 nsr_id: str,
2954 nsd: Dict[str, Any],
2955 vca: DeployedVCA,
2956 cached_vnfds: Dict[str, Any],
2957 ) -> List[Relation]:
2958 relations = []
2959 db_ns_relations = get_ns_configuration_relation_list(nsd)
2960 for r in db_ns_relations:
2961 provider_dict = None
2962 requirer_dict = None
2963 if all(key in r for key in ("provider", "requirer")):
2964 provider_dict = r["provider"]
2965 requirer_dict = r["requirer"]
2966 elif "entities" in r:
2967 provider_id = r["entities"][0]["id"]
2968 provider_dict = {
2969 "nsr-id": nsr_id,
2970 "endpoint": r["entities"][0]["endpoint"],
2971 }
2972 if provider_id != nsd["id"]:
2973 provider_dict["vnf-profile-id"] = provider_id
2974 requirer_id = r["entities"][1]["id"]
2975 requirer_dict = {
2976 "nsr-id": nsr_id,
2977 "endpoint": r["entities"][1]["endpoint"],
2978 }
2979 if requirer_id != nsd["id"]:
2980 requirer_dict["vnf-profile-id"] = requirer_id
2981 else:
2982 raise Exception(
2983 "provider/requirer or entities must be included in the relation."
2984 )
2985 relation_provider = self._update_ee_relation_data_with_implicit_data(
2986 nsr_id, nsd, provider_dict, cached_vnfds
2987 )
2988 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2989 nsr_id, nsd, requirer_dict, cached_vnfds
2990 )
2991 provider = EERelation(relation_provider)
2992 requirer = EERelation(relation_requirer)
2993 relation = Relation(r["name"], provider, requirer)
2994 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2995 if vca_in_relation:
2996 relations.append(relation)
2997 return relations
2998
2999 def _get_vnf_relations(
3000 self,
3001 nsr_id: str,
3002 nsd: Dict[str, Any],
3003 vca: DeployedVCA,
3004 cached_vnfds: Dict[str, Any],
3005 ) -> List[Relation]:
3006 relations = []
3007 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3008 vnf_profile_id = vnf_profile["id"]
3009 vnfd_id = vnf_profile["vnfd-id"]
3010 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3011 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3012 for r in db_vnf_relations:
3013 provider_dict = None
3014 requirer_dict = None
3015 if all(key in r for key in ("provider", "requirer")):
3016 provider_dict = r["provider"]
3017 requirer_dict = r["requirer"]
3018 elif "entities" in r:
3019 provider_id = r["entities"][0]["id"]
3020 provider_dict = {
3021 "nsr-id": nsr_id,
3022 "vnf-profile-id": vnf_profile_id,
3023 "endpoint": r["entities"][0]["endpoint"],
3024 }
3025 if provider_id != vnfd_id:
3026 provider_dict["vdu-profile-id"] = provider_id
3027 requirer_id = r["entities"][1]["id"]
3028 requirer_dict = {
3029 "nsr-id": nsr_id,
3030 "vnf-profile-id": vnf_profile_id,
3031 "endpoint": r["entities"][1]["endpoint"],
3032 }
3033 if requirer_id != vnfd_id:
3034 requirer_dict["vdu-profile-id"] = requirer_id
3035 else:
3036 raise Exception(
3037 "provider/requirer or entities must be included in the relation."
3038 )
3039 relation_provider = self._update_ee_relation_data_with_implicit_data(
3040 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3041 )
3042 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3043 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3044 )
3045 provider = EERelation(relation_provider)
3046 requirer = EERelation(relation_requirer)
3047 relation = Relation(r["name"], provider, requirer)
3048 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3049 if vca_in_relation:
3050 relations.append(relation)
3051 return relations
3052
3053 def _get_kdu_resource_data(
3054 self,
3055 ee_relation: EERelation,
3056 db_nsr: Dict[str, Any],
3057 cached_vnfds: Dict[str, Any],
3058 ) -> DeployedK8sResource:
3059 nsd = get_nsd(db_nsr)
3060 vnf_profiles = get_vnf_profiles(nsd)
3061 vnfd_id = find_in_list(
3062 vnf_profiles,
3063 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3064 )["vnfd-id"]
3065 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3066 kdu_resource_profile = get_kdu_resource_profile(
3067 db_vnfd, ee_relation.kdu_resource_profile_id
3068 )
3069 kdu_name = kdu_resource_profile["kdu-name"]
3070 deployed_kdu, _ = get_deployed_kdu(
3071 db_nsr.get("_admin", ()).get("deployed", ()),
3072 kdu_name,
3073 ee_relation.vnf_profile_id,
3074 )
3075 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3076 return deployed_kdu
3077
3078 def _get_deployed_component(
3079 self,
3080 ee_relation: EERelation,
3081 db_nsr: Dict[str, Any],
3082 cached_vnfds: Dict[str, Any],
3083 ) -> DeployedComponent:
3084 nsr_id = db_nsr["_id"]
3085 deployed_component = None
3086 ee_level = EELevel.get_level(ee_relation)
3087 if ee_level == EELevel.NS:
3088 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3089 if vca:
3090 deployed_component = DeployedVCA(nsr_id, vca)
3091 elif ee_level == EELevel.VNF:
3092 vca = get_deployed_vca(
3093 db_nsr,
3094 {
3095 "vdu_id": None,
3096 "member-vnf-index": ee_relation.vnf_profile_id,
3097 "ee_descriptor_id": ee_relation.execution_environment_ref,
3098 },
3099 )
3100 if vca:
3101 deployed_component = DeployedVCA(nsr_id, vca)
3102 elif ee_level == EELevel.VDU:
3103 vca = get_deployed_vca(
3104 db_nsr,
3105 {
3106 "vdu_id": ee_relation.vdu_profile_id,
3107 "member-vnf-index": ee_relation.vnf_profile_id,
3108 "ee_descriptor_id": ee_relation.execution_environment_ref,
3109 },
3110 )
3111 if vca:
3112 deployed_component = DeployedVCA(nsr_id, vca)
3113 elif ee_level == EELevel.KDU:
3114 kdu_resource_data = self._get_kdu_resource_data(
3115 ee_relation, db_nsr, cached_vnfds
3116 )
3117 if kdu_resource_data:
3118 deployed_component = DeployedK8sResource(kdu_resource_data)
3119 return deployed_component
3120
3121 async def _add_relation(
3122 self,
3123 relation: Relation,
3124 vca_type: str,
3125 db_nsr: Dict[str, Any],
3126 cached_vnfds: Dict[str, Any],
3127 cached_vnfrs: Dict[str, Any],
3128 ) -> bool:
3129 deployed_provider = self._get_deployed_component(
3130 relation.provider, db_nsr, cached_vnfds
3131 )
3132 deployed_requirer = self._get_deployed_component(
3133 relation.requirer, db_nsr, cached_vnfds
3134 )
3135 if (
3136 deployed_provider
3137 and deployed_requirer
3138 and deployed_provider.config_sw_installed
3139 and deployed_requirer.config_sw_installed
3140 ):
3141 provider_db_vnfr = (
3142 self._get_vnfr(
3143 relation.provider.nsr_id,
3144 relation.provider.vnf_profile_id,
3145 cached_vnfrs,
3146 )
3147 if relation.provider.vnf_profile_id
3148 else None
3149 )
3150 requirer_db_vnfr = (
3151 self._get_vnfr(
3152 relation.requirer.nsr_id,
3153 relation.requirer.vnf_profile_id,
3154 cached_vnfrs,
3155 )
3156 if relation.requirer.vnf_profile_id
3157 else None
3158 )
3159 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3160 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3161 provider_relation_endpoint = RelationEndpoint(
3162 deployed_provider.ee_id,
3163 provider_vca_id,
3164 relation.provider.endpoint,
3165 )
3166 requirer_relation_endpoint = RelationEndpoint(
3167 deployed_requirer.ee_id,
3168 requirer_vca_id,
3169 relation.requirer.endpoint,
3170 )
3171 await self.vca_map[vca_type].add_relation(
3172 provider=provider_relation_endpoint,
3173 requirer=requirer_relation_endpoint,
3174 )
3175 # remove entry from relations list
3176 return True
3177 return False
3178
3179 async def _add_vca_relations(
3180 self,
3181 logging_text,
3182 nsr_id,
3183 vca_type: str,
3184 vca_index: int,
3185 timeout: int = 3600,
3186 ) -> bool:
3187
3188 # steps:
3189 # 1. find all relations for this VCA
3190 # 2. wait for other peers related
3191 # 3. add relations
3192
3193 try:
3194 # STEP 1: find all relations for this VCA
3195
3196 # read nsr record
3197 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3198 nsd = get_nsd(db_nsr)
3199
3200 # this VCA data
3201 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3202 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3203
3204 cached_vnfds = {}
3205 cached_vnfrs = {}
3206 relations = []
3207 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3208 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3209
3210 # if no relations, terminate
3211 if not relations:
3212 self.logger.debug(logging_text + " No relations")
3213 return True
3214
3215 self.logger.debug(logging_text + " adding relations {}".format(relations))
3216
3217 # add all relations
3218 start = time()
3219 while True:
3220 # check timeout
3221 now = time()
3222 if now - start >= timeout:
3223 self.logger.error(logging_text + " : timeout adding relations")
3224 return False
3225
3226 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3228
3229 # for each relation, find the VCA's related
3230 for relation in relations.copy():
3231 added = await self._add_relation(
3232 relation,
3233 vca_type,
3234 db_nsr,
3235 cached_vnfds,
3236 cached_vnfrs,
3237 )
3238 if added:
3239 relations.remove(relation)
3240
3241 if not relations:
3242 self.logger.debug("Relations added")
3243 break
3244 await asyncio.sleep(5.0)
3245
3246 return True
3247
3248 except Exception as e:
3249 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3250 return False
3251
3252 async def _install_kdu(
3253 self,
3254 nsr_id: str,
3255 nsr_db_path: str,
3256 vnfr_data: dict,
3257 kdu_index: int,
3258 kdud: dict,
3259 vnfd: dict,
3260 k8s_instance_info: dict,
3261 k8params: dict = None,
3262 timeout: int = 600,
3263 vca_id: str = None,
3264 ):
3265
3266 try:
3267 k8sclustertype = k8s_instance_info["k8scluster-type"]
3268 # Instantiate kdu
3269 db_dict_install = {
3270 "collection": "nsrs",
3271 "filter": {"_id": nsr_id},
3272 "path": nsr_db_path,
3273 }
3274
3275 if k8s_instance_info.get("kdu-deployment-name"):
3276 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3277 else:
3278 kdu_instance = self.k8scluster_map[
3279 k8sclustertype
3280 ].generate_kdu_instance_name(
3281 db_dict=db_dict_install,
3282 kdu_model=k8s_instance_info["kdu-model"],
3283 kdu_name=k8s_instance_info["kdu-name"],
3284 )
3285
3286 # Update the nsrs table with the kdu-instance value
3287 self.update_db_2(
3288 item="nsrs",
3289 _id=nsr_id,
3290 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3291 )
3292
3293 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3294 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3295 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3296 # namespace, this first verification could be removed, and the next step would be done for any kind
3297 # of KNF.
3298 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3299 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3300 if k8sclustertype in ("juju", "juju-bundle"):
3301 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3302 # that the user passed a namespace which he wants its KDU to be deployed in)
3303 if (
3304 self.db.count(
3305 table="nsrs",
3306 q_filter={
3307 "_id": nsr_id,
3308 "_admin.projects_write": k8s_instance_info["namespace"],
3309 "_admin.projects_read": k8s_instance_info["namespace"],
3310 },
3311 )
3312 > 0
3313 ):
3314 self.logger.debug(
3315 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3316 )
3317 self.update_db_2(
3318 item="nsrs",
3319 _id=nsr_id,
3320 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3321 )
3322 k8s_instance_info["namespace"] = kdu_instance
3323
3324 await self.k8scluster_map[k8sclustertype].install(
3325 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3326 kdu_model=k8s_instance_info["kdu-model"],
3327 atomic=True,
3328 params=k8params,
3329 db_dict=db_dict_install,
3330 timeout=timeout,
3331 kdu_name=k8s_instance_info["kdu-name"],
3332 namespace=k8s_instance_info["namespace"],
3333 kdu_instance=kdu_instance,
3334 vca_id=vca_id,
3335 )
3336
3337 # Obtain services to obtain management service ip
3338 services = await self.k8scluster_map[k8sclustertype].get_services(
3339 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3340 kdu_instance=kdu_instance,
3341 namespace=k8s_instance_info["namespace"],
3342 )
3343
3344 # Obtain management service info (if exists)
3345 vnfr_update_dict = {}
3346 kdu_config = get_configuration(vnfd, kdud["name"])
3347 if kdu_config:
3348 target_ee_list = kdu_config.get("execution-environment-list", [])
3349 else:
3350 target_ee_list = []
3351
3352 if services:
3353 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3354 mgmt_services = [
3355 service
3356 for service in kdud.get("service", [])
3357 if service.get("mgmt-service")
3358 ]
3359 for mgmt_service in mgmt_services:
3360 for service in services:
3361 if service["name"].startswith(mgmt_service["name"]):
3362 # Mgmt service found, Obtain service ip
3363 ip = service.get("external_ip", service.get("cluster_ip"))
3364 if isinstance(ip, list) and len(ip) == 1:
3365 ip = ip[0]
3366
3367 vnfr_update_dict[
3368 "kdur.{}.ip-address".format(kdu_index)
3369 ] = ip
3370
3371 # Check if must update also mgmt ip at the vnf
3372 service_external_cp = mgmt_service.get(
3373 "external-connection-point-ref"
3374 )
3375 if service_external_cp:
3376 if (
3377 deep_get(vnfd, ("mgmt-interface", "cp"))
3378 == service_external_cp
3379 ):
3380 vnfr_update_dict["ip-address"] = ip
3381
3382 if find_in_list(
3383 target_ee_list,
3384 lambda ee: ee.get(
3385 "external-connection-point-ref", ""
3386 )
3387 == service_external_cp,
3388 ):
3389 vnfr_update_dict[
3390 "kdur.{}.ip-address".format(kdu_index)
3391 ] = ip
3392 break
3393 else:
3394 self.logger.warn(
3395 "Mgmt service name: {} not found".format(
3396 mgmt_service["name"]
3397 )
3398 )
3399
3400 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3401 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3402
3403 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3404 if (
3405 kdu_config
3406 and kdu_config.get("initial-config-primitive")
3407 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3408 ):
3409 initial_config_primitive_list = kdu_config.get(
3410 "initial-config-primitive"
3411 )
3412 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3413
3414 for initial_config_primitive in initial_config_primitive_list:
3415 primitive_params_ = self._map_primitive_params(
3416 initial_config_primitive, {}, {}
3417 )
3418
3419 await asyncio.wait_for(
3420 self.k8scluster_map[k8sclustertype].exec_primitive(
3421 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3422 kdu_instance=kdu_instance,
3423 primitive_name=initial_config_primitive["name"],
3424 params=primitive_params_,
3425 db_dict=db_dict_install,
3426 vca_id=vca_id,
3427 ),
3428 timeout=timeout,
3429 )
3430
3431 except Exception as e:
3432 # Prepare update db with error and raise exception
3433 try:
3434 self.update_db_2(
3435 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3436 )
3437 self.update_db_2(
3438 "vnfrs",
3439 vnfr_data.get("_id"),
3440 {"kdur.{}.status".format(kdu_index): "ERROR"},
3441 )
3442 except Exception:
3443 # ignore to keep original exception
3444 pass
3445 # reraise original error
3446 raise
3447
3448 return kdu_instance
3449
3450 async def deploy_kdus(
3451 self,
3452 logging_text,
3453 nsr_id,
3454 nslcmop_id,
3455 db_vnfrs,
3456 db_vnfds,
3457 task_instantiation_info,
3458 ):
3459 # Launch kdus if present in the descriptor
3460
3461 k8scluster_id_2_uuic = {
3462 "helm-chart-v3": {},
3463 "helm-chart": {},
3464 "juju-bundle": {},
3465 }
3466
3467 async def _get_cluster_id(cluster_id, cluster_type):
3468 nonlocal k8scluster_id_2_uuic
3469 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3470 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3471
3472 # check if K8scluster is creating and wait look if previous tasks in process
3473 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3474 "k8scluster", cluster_id
3475 )
3476 if task_dependency:
3477 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3478 task_name, cluster_id
3479 )
3480 self.logger.debug(logging_text + text)
3481 await asyncio.wait(task_dependency, timeout=3600)
3482
3483 db_k8scluster = self.db.get_one(
3484 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3485 )
3486 if not db_k8scluster:
3487 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3488
3489 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3490 if not k8s_id:
3491 if cluster_type == "helm-chart-v3":
3492 try:
3493 # backward compatibility for existing clusters that have not been initialized for helm v3
3494 k8s_credentials = yaml.safe_dump(
3495 db_k8scluster.get("credentials")
3496 )
3497 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3498 k8s_credentials, reuse_cluster_uuid=cluster_id
3499 )
3500 db_k8scluster_update = {}
3501 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3502 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3503 db_k8scluster_update[
3504 "_admin.helm-chart-v3.created"
3505 ] = uninstall_sw
3506 db_k8scluster_update[
3507 "_admin.helm-chart-v3.operationalState"
3508 ] = "ENABLED"
3509 self.update_db_2(
3510 "k8sclusters", cluster_id, db_k8scluster_update
3511 )
3512 except Exception as e:
3513 self.logger.error(
3514 logging_text
3515 + "error initializing helm-v3 cluster: {}".format(str(e))
3516 )
3517 raise LcmException(
3518 "K8s cluster '{}' has not been initialized for '{}'".format(
3519 cluster_id, cluster_type
3520 )
3521 )
3522 else:
3523 raise LcmException(
3524 "K8s cluster '{}' has not been initialized for '{}'".format(
3525 cluster_id, cluster_type
3526 )
3527 )
3528 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3529 return k8s_id
3530
3531 logging_text += "Deploy kdus: "
3532 step = ""
3533 try:
3534 db_nsr_update = {"_admin.deployed.K8s": []}
3535 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3536
3537 index = 0
3538 updated_cluster_list = []
3539 updated_v3_cluster_list = []
3540
3541 for vnfr_data in db_vnfrs.values():
3542 vca_id = self.get_vca_id(vnfr_data, {})
3543 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3544 # Step 0: Prepare and set parameters
3545 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3546 vnfd_id = vnfr_data.get("vnfd-id")
3547 vnfd_with_id = find_in_list(
3548 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3549 )
3550 kdud = next(
3551 kdud
3552 for kdud in vnfd_with_id["kdu"]
3553 if kdud["name"] == kdur["kdu-name"]
3554 )
3555 namespace = kdur.get("k8s-namespace")
3556 kdu_deployment_name = kdur.get("kdu-deployment-name")
3557 if kdur.get("helm-chart"):
3558 kdumodel = kdur["helm-chart"]
3559 # Default version: helm3, if helm-version is v2 assign v2
3560 k8sclustertype = "helm-chart-v3"
3561 self.logger.debug("kdur: {}".format(kdur))
3562 if (
3563 kdur.get("helm-version")
3564 and kdur.get("helm-version") == "v2"
3565 ):
3566 k8sclustertype = "helm-chart"
3567 elif kdur.get("juju-bundle"):
3568 kdumodel = kdur["juju-bundle"]
3569 k8sclustertype = "juju-bundle"
3570 else:
3571 raise LcmException(
3572 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3573 "juju-bundle. Maybe an old NBI version is running".format(
3574 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3575 )
3576 )
3577 # check if kdumodel is a file and exists
3578 try:
3579 vnfd_with_id = find_in_list(
3580 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3581 )
3582 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3583 if storage: # may be not present if vnfd has not artifacts
3584 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3585 if storage["pkg-dir"]:
3586 filename = "{}/{}/{}s/{}".format(
3587 storage["folder"],
3588 storage["pkg-dir"],
3589 k8sclustertype,
3590 kdumodel,
3591 )
3592 else:
3593 filename = "{}/Scripts/{}s/{}".format(
3594 storage["folder"],
3595 k8sclustertype,
3596 kdumodel,
3597 )
3598 if self.fs.file_exists(
3599 filename, mode="file"
3600 ) or self.fs.file_exists(filename, mode="dir"):
3601 kdumodel = self.fs.path + filename
3602 except (asyncio.TimeoutError, asyncio.CancelledError):
3603 raise
3604 except Exception: # it is not a file
3605 pass
3606
3607 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3608 step = "Synchronize repos for k8s cluster '{}'".format(
3609 k8s_cluster_id
3610 )
3611 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3612
3613 # Synchronize repos
3614 if (
3615 k8sclustertype == "helm-chart"
3616 and cluster_uuid not in updated_cluster_list
3617 ) or (
3618 k8sclustertype == "helm-chart-v3"
3619 and cluster_uuid not in updated_v3_cluster_list
3620 ):
3621 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3622 self.k8scluster_map[k8sclustertype].synchronize_repos(
3623 cluster_uuid=cluster_uuid
3624 )
3625 )
3626 if del_repo_list or added_repo_dict:
3627 if k8sclustertype == "helm-chart":
3628 unset = {
3629 "_admin.helm_charts_added." + item: None
3630 for item in del_repo_list
3631 }
3632 updated = {
3633 "_admin.helm_charts_added." + item: name
3634 for item, name in added_repo_dict.items()
3635 }
3636 updated_cluster_list.append(cluster_uuid)
3637 elif k8sclustertype == "helm-chart-v3":
3638 unset = {
3639 "_admin.helm_charts_v3_added." + item: None
3640 for item in del_repo_list
3641 }
3642 updated = {
3643 "_admin.helm_charts_v3_added." + item: name
3644 for item, name in added_repo_dict.items()
3645 }
3646 updated_v3_cluster_list.append(cluster_uuid)
3647 self.logger.debug(
3648 logging_text + "repos synchronized on k8s cluster "
3649 "'{}' to_delete: {}, to_add: {}".format(
3650 k8s_cluster_id, del_repo_list, added_repo_dict
3651 )
3652 )
3653 self.db.set_one(
3654 "k8sclusters",
3655 {"_id": k8s_cluster_id},
3656 updated,
3657 unset=unset,
3658 )
3659
3660 # Instantiate kdu
3661 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3662 vnfr_data["member-vnf-index-ref"],
3663 kdur["kdu-name"],
3664 k8s_cluster_id,
3665 )
3666 k8s_instance_info = {
3667 "kdu-instance": None,
3668 "k8scluster-uuid": cluster_uuid,
3669 "k8scluster-type": k8sclustertype,
3670 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3671 "kdu-name": kdur["kdu-name"],
3672 "kdu-model": kdumodel,
3673 "namespace": namespace,
3674 "kdu-deployment-name": kdu_deployment_name,
3675 }
3676 db_path = "_admin.deployed.K8s.{}".format(index)
3677 db_nsr_update[db_path] = k8s_instance_info
3678 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3679 vnfd_with_id = find_in_list(
3680 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3681 )
3682 task = asyncio.ensure_future(
3683 self._install_kdu(
3684 nsr_id,
3685 db_path,
3686 vnfr_data,
3687 kdu_index,
3688 kdud,
3689 vnfd_with_id,
3690 k8s_instance_info,
3691 k8params=desc_params,
3692 timeout=1800,
3693 vca_id=vca_id,
3694 )
3695 )
3696 self.lcm_tasks.register(
3697 "ns",
3698 nsr_id,
3699 nslcmop_id,
3700 "instantiate_KDU-{}".format(index),
3701 task,
3702 )
3703 task_instantiation_info[task] = "Deploying KDU {}".format(
3704 kdur["kdu-name"]
3705 )
3706
3707 index += 1
3708
3709 except (LcmException, asyncio.CancelledError):
3710 raise
3711 except Exception as e:
3712 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3713 if isinstance(e, (N2VCException, DbException)):
3714 self.logger.error(logging_text + msg)
3715 else:
3716 self.logger.critical(logging_text + msg, exc_info=True)
3717 raise LcmException(msg)
3718 finally:
3719 if db_nsr_update:
3720 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3721
3722 def _deploy_n2vc(
3723 self,
3724 logging_text,
3725 db_nsr,
3726 db_vnfr,
3727 nslcmop_id,
3728 nsr_id,
3729 nsi_id,
3730 vnfd_id,
3731 vdu_id,
3732 kdu_name,
3733 member_vnf_index,
3734 vdu_index,
3735 vdu_name,
3736 deploy_params,
3737 descriptor_config,
3738 base_folder,
3739 task_instantiation_info,
3740 stage,
3741 ):
3742 # launch instantiate_N2VC in a asyncio task and register task object
3743 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3744 # if not found, create one entry and update database
3745 # fill db_nsr._admin.deployed.VCA.<index>
3746
3747 self.logger.debug(
3748 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3749 )
3750
3751 charm_name = ""
3752 get_charm_name = False
3753 if "execution-environment-list" in descriptor_config:
3754 ee_list = descriptor_config.get("execution-environment-list", [])
3755 elif "juju" in descriptor_config:
3756 ee_list = [descriptor_config] # ns charms
3757 if "execution-environment-list" not in descriptor_config:
3758 # charm name is only required for ns charms
3759 get_charm_name = True
3760 else: # other types as script are not supported
3761 ee_list = []
3762
3763 for ee_item in ee_list:
3764 self.logger.debug(
3765 logging_text
3766 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3767 ee_item.get("juju"), ee_item.get("helm-chart")
3768 )
3769 )
3770 ee_descriptor_id = ee_item.get("id")
3771 if ee_item.get("juju"):
3772 vca_name = ee_item["juju"].get("charm")
3773 if get_charm_name:
3774 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3775 vca_type = (
3776 "lxc_proxy_charm"
3777 if ee_item["juju"].get("charm") is not None
3778 else "native_charm"
3779 )
3780 if ee_item["juju"].get("cloud") == "k8s":
3781 vca_type = "k8s_proxy_charm"
3782 elif ee_item["juju"].get("proxy") is False:
3783 vca_type = "native_charm"
3784 elif ee_item.get("helm-chart"):
3785 vca_name = ee_item["helm-chart"]
3786 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3787 vca_type = "helm"
3788 else:
3789 vca_type = "helm-v3"
3790 else:
3791 self.logger.debug(
3792 logging_text + "skipping non juju neither charm configuration"
3793 )
3794 continue
3795
3796 vca_index = -1
3797 for vca_index, vca_deployed in enumerate(
3798 db_nsr["_admin"]["deployed"]["VCA"]
3799 ):
3800 if not vca_deployed:
3801 continue
3802 if (
3803 vca_deployed.get("member-vnf-index") == member_vnf_index
3804 and vca_deployed.get("vdu_id") == vdu_id
3805 and vca_deployed.get("kdu_name") == kdu_name
3806 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3807 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3808 ):
3809 break
3810 else:
3811 # not found, create one.
3812 target = (
3813 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3814 )
3815 if vdu_id:
3816 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3817 elif kdu_name:
3818 target += "/kdu/{}".format(kdu_name)
3819 vca_deployed = {
3820 "target_element": target,
3821 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3822 "member-vnf-index": member_vnf_index,
3823 "vdu_id": vdu_id,
3824 "kdu_name": kdu_name,
3825 "vdu_count_index": vdu_index,
3826 "operational-status": "init", # TODO revise
3827 "detailed-status": "", # TODO revise
3828 "step": "initial-deploy", # TODO revise
3829 "vnfd_id": vnfd_id,
3830 "vdu_name": vdu_name,
3831 "type": vca_type,
3832 "ee_descriptor_id": ee_descriptor_id,
3833 "charm_name": charm_name,
3834 }
3835 vca_index += 1
3836
3837 # create VCA and configurationStatus in db
3838 db_dict = {
3839 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3840 "configurationStatus.{}".format(vca_index): dict(),
3841 }
3842 self.update_db_2("nsrs", nsr_id, db_dict)
3843
3844 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3845
3846 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3847 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3848 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3849
3850 # Launch task
3851 task_n2vc = asyncio.ensure_future(
3852 self.instantiate_N2VC(
3853 logging_text=logging_text,
3854 vca_index=vca_index,
3855 nsi_id=nsi_id,
3856 db_nsr=db_nsr,
3857 db_vnfr=db_vnfr,
3858 vdu_id=vdu_id,
3859 kdu_name=kdu_name,
3860 vdu_index=vdu_index,
3861 deploy_params=deploy_params,
3862 config_descriptor=descriptor_config,
3863 base_folder=base_folder,
3864 nslcmop_id=nslcmop_id,
3865 stage=stage,
3866 vca_type=vca_type,
3867 vca_name=vca_name,
3868 ee_config_descriptor=ee_item,
3869 )
3870 )
3871 self.lcm_tasks.register(
3872 "ns",
3873 nsr_id,
3874 nslcmop_id,
3875 "instantiate_N2VC-{}".format(vca_index),
3876 task_n2vc,
3877 )
3878 task_instantiation_info[
3879 task_n2vc
3880 ] = self.task_name_deploy_vca + " {}.{}".format(
3881 member_vnf_index or "", vdu_id or ""
3882 )
3883
3884 @staticmethod
3885 def _create_nslcmop(nsr_id, operation, params):
3886 """
3887 Creates a ns-lcm-opp content to be stored at database.
3888 :param nsr_id: internal id of the instance
3889 :param operation: instantiate, terminate, scale, action, ...
3890 :param params: user parameters for the operation
3891 :return: dictionary following SOL005 format
3892 """
3893 # Raise exception if invalid arguments
3894 if not (nsr_id and operation and params):
3895 raise LcmException(
3896 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3897 )
3898 now = time()
3899 _id = str(uuid4())
3900 nslcmop = {
3901 "id": _id,
3902 "_id": _id,
3903 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3904 "operationState": "PROCESSING",
3905 "statusEnteredTime": now,
3906 "nsInstanceId": nsr_id,
3907 "lcmOperationType": operation,
3908 "startTime": now,
3909 "isAutomaticInvocation": False,
3910 "operationParams": params,
3911 "isCancelPending": False,
3912 "links": {
3913 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3914 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3915 },
3916 }
3917 return nslcmop
3918
3919 def _format_additional_params(self, params):
3920 params = params or {}
3921 for key, value in params.items():
3922 if str(value).startswith("!!yaml "):
3923 params[key] = yaml.safe_load(value[7:])
3924 return params
3925
3926 def _get_terminate_primitive_params(self, seq, vnf_index):
3927 primitive = seq.get("name")
3928 primitive_params = {}
3929 params = {
3930 "member_vnf_index": vnf_index,
3931 "primitive": primitive,
3932 "primitive_params": primitive_params,
3933 }
3934 desc_params = {}
3935 return self._map_primitive_params(seq, params, desc_params)
3936
3937 # sub-operations
3938
3939 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3940 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3941 if op.get("operationState") == "COMPLETED":
3942 # b. Skip sub-operation
3943 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3944 return self.SUBOPERATION_STATUS_SKIP
3945 else:
3946 # c. retry executing sub-operation
3947 # The sub-operation exists, and operationState != 'COMPLETED'
3948 # Update operationState = 'PROCESSING' to indicate a retry.
3949 operationState = "PROCESSING"
3950 detailed_status = "In progress"
3951 self._update_suboperation_status(
3952 db_nslcmop, op_index, operationState, detailed_status
3953 )
3954 # Return the sub-operation index
3955 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3956 # with arguments extracted from the sub-operation
3957 return op_index
3958
3959 # Find a sub-operation where all keys in a matching dictionary must match
3960 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3961 def _find_suboperation(self, db_nslcmop, match):
3962 if db_nslcmop and match:
3963 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3964 for i, op in enumerate(op_list):
3965 if all(op.get(k) == match[k] for k in match):
3966 return i
3967 return self.SUBOPERATION_STATUS_NOT_FOUND
3968
3969 # Update status for a sub-operation given its index
3970 def _update_suboperation_status(
3971 self, db_nslcmop, op_index, operationState, detailed_status
3972 ):
3973 # Update DB for HA tasks
3974 q_filter = {"_id": db_nslcmop["_id"]}
3975 update_dict = {
3976 "_admin.operations.{}.operationState".format(op_index): operationState,
3977 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3978 }
3979 self.db.set_one(
3980 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3981 )
3982
3983 # Add sub-operation, return the index of the added sub-operation
3984 # Optionally, set operationState, detailed-status, and operationType
3985 # Status and type are currently set for 'scale' sub-operations:
3986 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3987 # 'detailed-status' : status message
3988 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3989 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3990 def _add_suboperation(
3991 self,
3992 db_nslcmop,
3993 vnf_index,
3994 vdu_id,
3995 vdu_count_index,
3996 vdu_name,
3997 primitive,
3998 mapped_primitive_params,
3999 operationState=None,
4000 detailed_status=None,
4001 operationType=None,
4002 RO_nsr_id=None,
4003 RO_scaling_info=None,
4004 ):
4005 if not db_nslcmop:
4006 return self.SUBOPERATION_STATUS_NOT_FOUND
4007 # Get the "_admin.operations" list, if it exists
4008 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4009 op_list = db_nslcmop_admin.get("operations")
4010 # Create or append to the "_admin.operations" list
4011 new_op = {
4012 "member_vnf_index": vnf_index,
4013 "vdu_id": vdu_id,
4014 "vdu_count_index": vdu_count_index,
4015 "primitive": primitive,
4016 "primitive_params": mapped_primitive_params,
4017 }
4018 if operationState:
4019 new_op["operationState"] = operationState
4020 if detailed_status:
4021 new_op["detailed-status"] = detailed_status
4022 if operationType:
4023 new_op["lcmOperationType"] = operationType
4024 if RO_nsr_id:
4025 new_op["RO_nsr_id"] = RO_nsr_id
4026 if RO_scaling_info:
4027 new_op["RO_scaling_info"] = RO_scaling_info
4028 if not op_list:
4029 # No existing operations, create key 'operations' with current operation as first list element
4030 db_nslcmop_admin.update({"operations": [new_op]})
4031 op_list = db_nslcmop_admin.get("operations")
4032 else:
4033 # Existing operations, append operation to list
4034 op_list.append(new_op)
4035
4036 db_nslcmop_update = {"_admin.operations": op_list}
4037 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4038 op_index = len(op_list) - 1
4039 return op_index
4040
4041 # Helper methods for scale() sub-operations
4042
4043 # pre-scale/post-scale:
4044 # Check for 3 different cases:
4045 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4046 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4047 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4048 def _check_or_add_scale_suboperation(
4049 self,
4050 db_nslcmop,
4051 vnf_index,
4052 vnf_config_primitive,
4053 primitive_params,
4054 operationType,
4055 RO_nsr_id=None,
4056 RO_scaling_info=None,
4057 ):
4058 # Find this sub-operation
4059 if RO_nsr_id and RO_scaling_info:
4060 operationType = "SCALE-RO"
4061 match = {
4062 "member_vnf_index": vnf_index,
4063 "RO_nsr_id": RO_nsr_id,
4064 "RO_scaling_info": RO_scaling_info,
4065 }
4066 else:
4067 match = {
4068 "member_vnf_index": vnf_index,
4069 "primitive": vnf_config_primitive,
4070 "primitive_params": primitive_params,
4071 "lcmOperationType": operationType,
4072 }
4073 op_index = self._find_suboperation(db_nslcmop, match)
4074 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4075 # a. New sub-operation
4076 # The sub-operation does not exist, add it.
4077 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4078 # The following parameters are set to None for all kind of scaling:
4079 vdu_id = None
4080 vdu_count_index = None
4081 vdu_name = None
4082 if RO_nsr_id and RO_scaling_info:
4083 vnf_config_primitive = None
4084 primitive_params = None
4085 else:
4086 RO_nsr_id = None
4087 RO_scaling_info = None
4088 # Initial status for sub-operation
4089 operationState = "PROCESSING"
4090 detailed_status = "In progress"
4091 # Add sub-operation for pre/post-scaling (zero or more operations)
4092 self._add_suboperation(
4093 db_nslcmop,
4094 vnf_index,
4095 vdu_id,
4096 vdu_count_index,
4097 vdu_name,
4098 vnf_config_primitive,
4099 primitive_params,
4100 operationState,
4101 detailed_status,
4102 operationType,
4103 RO_nsr_id,
4104 RO_scaling_info,
4105 )
4106 return self.SUBOPERATION_STATUS_NEW
4107 else:
4108 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4109 # or op_index (operationState != 'COMPLETED')
4110 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4111
4112 # Function to return execution_environment id
4113
4114 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4115 # TODO vdu_index_count
4116 for vca in vca_deployed_list:
4117 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4118 return vca["ee_id"]
4119
4120 async def destroy_N2VC(
4121 self,
4122 logging_text,
4123 db_nslcmop,
4124 vca_deployed,
4125 config_descriptor,
4126 vca_index,
4127 destroy_ee=True,
4128 exec_primitives=True,
4129 scaling_in=False,
4130 vca_id: str = None,
4131 ):
4132 """
4133 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4134 :param logging_text:
4135 :param db_nslcmop:
4136 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4137 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4138 :param vca_index: index in the database _admin.deployed.VCA
4139 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4140 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4141 not executed properly
4142 :param scaling_in: True destroys the application, False destroys the model
4143 :return: None or exception
4144 """
4145
4146 self.logger.debug(
4147 logging_text
4148 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4149 vca_index, vca_deployed, config_descriptor, destroy_ee
4150 )
4151 )
4152
4153 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4154
4155 # execute terminate_primitives
4156 if exec_primitives:
4157 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4158 config_descriptor.get("terminate-config-primitive"),
4159 vca_deployed.get("ee_descriptor_id"),
4160 )
4161 vdu_id = vca_deployed.get("vdu_id")
4162 vdu_count_index = vca_deployed.get("vdu_count_index")
4163 vdu_name = vca_deployed.get("vdu_name")
4164 vnf_index = vca_deployed.get("member-vnf-index")
4165 if terminate_primitives and vca_deployed.get("needed_terminate"):
4166 for seq in terminate_primitives:
4167 # For each sequence in list, get primitive and call _ns_execute_primitive()
4168 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4169 vnf_index, seq.get("name")
4170 )
4171 self.logger.debug(logging_text + step)
4172 # Create the primitive for each sequence, i.e. "primitive": "touch"
4173 primitive = seq.get("name")
4174 mapped_primitive_params = self._get_terminate_primitive_params(
4175 seq, vnf_index
4176 )
4177
4178 # Add sub-operation
4179 self._add_suboperation(
4180 db_nslcmop,
4181 vnf_index,
4182 vdu_id,
4183 vdu_count_index,
4184 vdu_name,
4185 primitive,
4186 mapped_primitive_params,
4187 )
4188 # Sub-operations: Call _ns_execute_primitive() instead of action()
4189 try:
4190 result, result_detail = await self._ns_execute_primitive(
4191 vca_deployed["ee_id"],
4192 primitive,
4193 mapped_primitive_params,
4194 vca_type=vca_type,
4195 vca_id=vca_id,
4196 )
4197 except LcmException:
4198 # this happens when VCA is not deployed. In this case it is not needed to terminate
4199 continue
4200 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4201 if result not in result_ok:
4202 raise LcmException(
4203 "terminate_primitive {} for vnf_member_index={} fails with "
4204 "error {}".format(seq.get("name"), vnf_index, result_detail)
4205 )
4206 # set that this VCA do not need terminated
4207 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4208 vca_index
4209 )
4210 self.update_db_2(
4211 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4212 )
4213
4214 # Delete Prometheus Jobs if any
4215 # This uses NSR_ID, so it will destroy any jobs under this index
4216 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4217
4218 if destroy_ee:
4219 await self.vca_map[vca_type].delete_execution_environment(
4220 vca_deployed["ee_id"],
4221 scaling_in=scaling_in,
4222 vca_type=vca_type,
4223 vca_id=vca_id,
4224 )
4225
4226 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4227 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4228 namespace = "." + db_nsr["_id"]
4229 try:
4230 await self.n2vc.delete_namespace(
4231 namespace=namespace,
4232 total_timeout=self.timeout_charm_delete,
4233 vca_id=vca_id,
4234 )
4235 except N2VCNotFound: # already deleted. Skip
4236 pass
4237 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4238
4239 async def _terminate_RO(
4240 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4241 ):
4242 """
4243 Terminates a deployment from RO
4244 :param logging_text:
4245 :param nsr_deployed: db_nsr._admin.deployed
4246 :param nsr_id:
4247 :param nslcmop_id:
4248 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4249 this method will update only the index 2, but it will write on database the concatenated content of the list
4250 :return:
4251 """
4252 db_nsr_update = {}
4253 failed_detail = []
4254 ro_nsr_id = ro_delete_action = None
4255 if nsr_deployed and nsr_deployed.get("RO"):
4256 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4257 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4258 try:
4259 if ro_nsr_id:
4260 stage[2] = "Deleting ns from VIM."
4261 db_nsr_update["detailed-status"] = " ".join(stage)
4262 self._write_op_status(nslcmop_id, stage)
4263 self.logger.debug(logging_text + stage[2])
4264 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4265 self._write_op_status(nslcmop_id, stage)
4266 desc = await self.RO.delete("ns", ro_nsr_id)
4267 ro_delete_action = desc["action_id"]
4268 db_nsr_update[
4269 "_admin.deployed.RO.nsr_delete_action_id"
4270 ] = ro_delete_action
4271 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4272 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4273 if ro_delete_action:
4274 # wait until NS is deleted from VIM
4275 stage[2] = "Waiting ns deleted from VIM."
4276 detailed_status_old = None
4277 self.logger.debug(
4278 logging_text
4279 + stage[2]
4280 + " RO_id={} ro_delete_action={}".format(
4281 ro_nsr_id, ro_delete_action
4282 )
4283 )
4284 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4285 self._write_op_status(nslcmop_id, stage)
4286
4287 delete_timeout = 20 * 60 # 20 minutes
4288 while delete_timeout > 0:
4289 desc = await self.RO.show(
4290 "ns",
4291 item_id_name=ro_nsr_id,
4292 extra_item="action",
4293 extra_item_id=ro_delete_action,
4294 )
4295
4296 # deploymentStatus
4297 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4298
4299 ns_status, ns_status_info = self.RO.check_action_status(desc)
4300 if ns_status == "ERROR":
4301 raise ROclient.ROClientException(ns_status_info)
4302 elif ns_status == "BUILD":
4303 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4304 elif ns_status == "ACTIVE":
4305 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4306 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4307 break
4308 else:
4309 assert (
4310 False
4311 ), "ROclient.check_action_status returns unknown {}".format(
4312 ns_status
4313 )
4314 if stage[2] != detailed_status_old:
4315 detailed_status_old = stage[2]
4316 db_nsr_update["detailed-status"] = " ".join(stage)
4317 self._write_op_status(nslcmop_id, stage)
4318 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4319 await asyncio.sleep(5, loop=self.loop)
4320 delete_timeout -= 5
4321 else: # delete_timeout <= 0:
4322 raise ROclient.ROClientException(
4323 "Timeout waiting ns deleted from VIM"
4324 )
4325
4326 except Exception as e:
4327 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4328 if (
4329 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4330 ): # not found
4331 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4332 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4333 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4334 self.logger.debug(
4335 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4336 )
4337 elif (
4338 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4339 ): # conflict
4340 failed_detail.append("delete conflict: {}".format(e))
4341 self.logger.debug(
4342 logging_text
4343 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4344 )
4345 else:
4346 failed_detail.append("delete error: {}".format(e))
4347 self.logger.error(
4348 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4349 )
4350
4351 # Delete nsd
4352 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4353 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4354 try:
4355 stage[2] = "Deleting nsd from RO."
4356 db_nsr_update["detailed-status"] = " ".join(stage)
4357 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4358 self._write_op_status(nslcmop_id, stage)
4359 await self.RO.delete("nsd", ro_nsd_id)
4360 self.logger.debug(
4361 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4362 )
4363 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4364 except Exception as e:
4365 if (
4366 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4367 ): # not found
4368 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4369 self.logger.debug(
4370 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4371 )
4372 elif (
4373 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4374 ): # conflict
4375 failed_detail.append(
4376 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4377 )
4378 self.logger.debug(logging_text + failed_detail[-1])
4379 else:
4380 failed_detail.append(
4381 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4382 )
4383 self.logger.error(logging_text + failed_detail[-1])
4384
4385 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4386 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4387 if not vnf_deployed or not vnf_deployed["id"]:
4388 continue
4389 try:
4390 ro_vnfd_id = vnf_deployed["id"]
4391 stage[
4392 2
4393 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4394 vnf_deployed["member-vnf-index"], ro_vnfd_id
4395 )
4396 db_nsr_update["detailed-status"] = " ".join(stage)
4397 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4398 self._write_op_status(nslcmop_id, stage)
4399 await self.RO.delete("vnfd", ro_vnfd_id)
4400 self.logger.debug(
4401 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4402 )
4403 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4404 except Exception as e:
4405 if (
4406 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4407 ): # not found
4408 db_nsr_update[
4409 "_admin.deployed.RO.vnfd.{}.id".format(index)
4410 ] = None
4411 self.logger.debug(
4412 logging_text
4413 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4414 )
4415 elif (
4416 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4417 ): # conflict
4418 failed_detail.append(
4419 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4420 )
4421 self.logger.debug(logging_text + failed_detail[-1])
4422 else:
4423 failed_detail.append(
4424 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4425 )
4426 self.logger.error(logging_text + failed_detail[-1])
4427
4428 if failed_detail:
4429 stage[2] = "Error deleting from VIM"
4430 else:
4431 stage[2] = "Deleted from VIM"
4432 db_nsr_update["detailed-status"] = " ".join(stage)
4433 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4434 self._write_op_status(nslcmop_id, stage)
4435
4436 if failed_detail:
4437 raise LcmException("; ".join(failed_detail))
4438
4439 async def terminate(self, nsr_id, nslcmop_id):
4440 # Try to lock HA task here
4441 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4442 if not task_is_locked_by_me:
4443 return
4444
4445 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4446 self.logger.debug(logging_text + "Enter")
4447 timeout_ns_terminate = self.timeout_ns_terminate
4448 db_nsr = None
4449 db_nslcmop = None
4450 operation_params = None
4451 exc = None
4452 error_list = [] # annotates all failed error messages
4453 db_nslcmop_update = {}
4454 autoremove = False # autoremove after terminated
4455 tasks_dict_info = {}
4456 db_nsr_update = {}
4457 stage = [
4458 "Stage 1/3: Preparing task.",
4459 "Waiting for previous operations to terminate.",
4460 "",
4461 ]
4462 # ^ contains [stage, step, VIM-status]
4463 try:
4464 # wait for any previous tasks in process
4465 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4466
4467 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4468 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4469 operation_params = db_nslcmop.get("operationParams") or {}
4470 if operation_params.get("timeout_ns_terminate"):
4471 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4472 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4473 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4474
4475 db_nsr_update["operational-status"] = "terminating"
4476 db_nsr_update["config-status"] = "terminating"
4477 self._write_ns_status(
4478 nsr_id=nsr_id,
4479 ns_state="TERMINATING",
4480 current_operation="TERMINATING",
4481 current_operation_id=nslcmop_id,
4482 other_update=db_nsr_update,
4483 )
4484 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4485 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4486 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4487 return
4488
4489 stage[1] = "Getting vnf descriptors from db."
4490 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4491 db_vnfrs_dict = {
4492 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4493 }
4494 db_vnfds_from_id = {}
4495 db_vnfds_from_member_index = {}
4496 # Loop over VNFRs
4497 for vnfr in db_vnfrs_list:
4498 vnfd_id = vnfr["vnfd-id"]
4499 if vnfd_id not in db_vnfds_from_id:
4500 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4501 db_vnfds_from_id[vnfd_id] = vnfd
4502 db_vnfds_from_member_index[
4503 vnfr["member-vnf-index-ref"]
4504 ] = db_vnfds_from_id[vnfd_id]
4505
4506 # Destroy individual execution environments when there are terminating primitives.
4507 # Rest of EE will be deleted at once
4508 # TODO - check before calling _destroy_N2VC
4509 # if not operation_params.get("skip_terminate_primitives"):#
4510 # or not vca.get("needed_terminate"):
4511 stage[0] = "Stage 2/3 execute terminating primitives."
4512 self.logger.debug(logging_text + stage[0])
4513 stage[1] = "Looking execution environment that needs terminate."
4514 self.logger.debug(logging_text + stage[1])
4515
4516 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4517 config_descriptor = None
4518 vca_member_vnf_index = vca.get("member-vnf-index")
4519 vca_id = self.get_vca_id(
4520 db_vnfrs_dict.get(vca_member_vnf_index)
4521 if vca_member_vnf_index
4522 else None,
4523 db_nsr,
4524 )
4525 if not vca or not vca.get("ee_id"):
4526 continue
4527 if not vca.get("member-vnf-index"):
4528 # ns
4529 config_descriptor = db_nsr.get("ns-configuration")
4530 elif vca.get("vdu_id"):
4531 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4532 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4533 elif vca.get("kdu_name"):
4534 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4535 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4536 else:
4537 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4538 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4539 vca_type = vca.get("type")
4540 exec_terminate_primitives = not operation_params.get(
4541 "skip_terminate_primitives"
4542 ) and vca.get("needed_terminate")
4543 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4544 # pending native charms
4545 destroy_ee = (
4546 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4547 )
4548 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4549 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4550 task = asyncio.ensure_future(
4551 self.destroy_N2VC(
4552 logging_text,
4553 db_nslcmop,
4554 vca,
4555 config_descriptor,
4556 vca_index,
4557 destroy_ee,
4558 exec_terminate_primitives,
4559 vca_id=vca_id,
4560 )
4561 )
4562 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4563
4564 # wait for pending tasks of terminate primitives
4565 if tasks_dict_info:
4566 self.logger.debug(
4567 logging_text
4568 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4569 )
4570 error_list = await self._wait_for_tasks(
4571 logging_text,
4572 tasks_dict_info,
4573 min(self.timeout_charm_delete, timeout_ns_terminate),
4574 stage,
4575 nslcmop_id,
4576 )
4577 tasks_dict_info.clear()
4578 if error_list:
4579 return # raise LcmException("; ".join(error_list))
4580
4581 # remove All execution environments at once
4582 stage[0] = "Stage 3/3 delete all."
4583
4584 if nsr_deployed.get("VCA"):
4585 stage[1] = "Deleting all execution environments."
4586 self.logger.debug(logging_text + stage[1])
4587 vca_id = self.get_vca_id({}, db_nsr)
4588 task_delete_ee = asyncio.ensure_future(
4589 asyncio.wait_for(
4590 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4591 timeout=self.timeout_charm_delete,
4592 )
4593 )
4594 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4595 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4596
4597 # Delete from k8scluster
4598 stage[1] = "Deleting KDUs."
4599 self.logger.debug(logging_text + stage[1])
4600 # print(nsr_deployed)
4601 for kdu in get_iterable(nsr_deployed, "K8s"):
4602 if not kdu or not kdu.get("kdu-instance"):
4603 continue
4604 kdu_instance = kdu.get("kdu-instance")
4605 if kdu.get("k8scluster-type") in self.k8scluster_map:
4606 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4607 vca_id = self.get_vca_id({}, db_nsr)
4608 task_delete_kdu_instance = asyncio.ensure_future(
4609 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4610 cluster_uuid=kdu.get("k8scluster-uuid"),
4611 kdu_instance=kdu_instance,
4612 vca_id=vca_id,
4613 namespace=kdu.get("namespace"),
4614 )
4615 )
4616 else:
4617 self.logger.error(
4618 logging_text
4619 + "Unknown k8s deployment type {}".format(
4620 kdu.get("k8scluster-type")
4621 )
4622 )
4623 continue
4624 tasks_dict_info[
4625 task_delete_kdu_instance
4626 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4627
4628 # remove from RO
4629 stage[1] = "Deleting ns from VIM."
4630 if self.ng_ro:
4631 task_delete_ro = asyncio.ensure_future(
4632 self._terminate_ng_ro(
4633 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4634 )
4635 )
4636 else:
4637 task_delete_ro = asyncio.ensure_future(
4638 self._terminate_RO(
4639 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4640 )
4641 )
4642 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4643
4644 # rest of staff will be done at finally
4645
4646 except (
4647 ROclient.ROClientException,
4648 DbException,
4649 LcmException,
4650 N2VCException,
4651 ) as e:
4652 self.logger.error(logging_text + "Exit Exception {}".format(e))
4653 exc = e
4654 except asyncio.CancelledError:
4655 self.logger.error(
4656 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4657 )
4658 exc = "Operation was cancelled"
4659 except Exception as e:
4660 exc = traceback.format_exc()
4661 self.logger.critical(
4662 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4663 exc_info=True,
4664 )
4665 finally:
4666 if exc:
4667 error_list.append(str(exc))
4668 try:
4669 # wait for pending tasks
4670 if tasks_dict_info:
4671 stage[1] = "Waiting for terminate pending tasks."
4672 self.logger.debug(logging_text + stage[1])
4673 error_list += await self._wait_for_tasks(
4674 logging_text,
4675 tasks_dict_info,
4676 timeout_ns_terminate,
4677 stage,
4678 nslcmop_id,
4679 )
4680 stage[1] = stage[2] = ""
4681 except asyncio.CancelledError:
4682 error_list.append("Cancelled")
4683 # TODO cancell all tasks
4684 except Exception as exc:
4685 error_list.append(str(exc))
4686 # update status at database
4687 if error_list:
4688 error_detail = "; ".join(error_list)
4689 # self.logger.error(logging_text + error_detail)
4690 error_description_nslcmop = "{} Detail: {}".format(
4691 stage[0], error_detail
4692 )
4693 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4694 nslcmop_id, stage[0]
4695 )
4696
4697 db_nsr_update["operational-status"] = "failed"
4698 db_nsr_update["detailed-status"] = (
4699 error_description_nsr + " Detail: " + error_detail
4700 )
4701 db_nslcmop_update["detailed-status"] = error_detail
4702 nslcmop_operation_state = "FAILED"
4703 ns_state = "BROKEN"
4704 else:
4705 error_detail = None
4706 error_description_nsr = error_description_nslcmop = None
4707 ns_state = "NOT_INSTANTIATED"
4708 db_nsr_update["operational-status"] = "terminated"
4709 db_nsr_update["detailed-status"] = "Done"
4710 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4711 db_nslcmop_update["detailed-status"] = "Done"
4712 nslcmop_operation_state = "COMPLETED"
4713
4714 if db_nsr:
4715 self._write_ns_status(
4716 nsr_id=nsr_id,
4717 ns_state=ns_state,
4718 current_operation="IDLE",
4719 current_operation_id=None,
4720 error_description=error_description_nsr,
4721 error_detail=error_detail,
4722 other_update=db_nsr_update,
4723 )
4724 self._write_op_status(
4725 op_id=nslcmop_id,
4726 stage="",
4727 error_message=error_description_nslcmop,
4728 operation_state=nslcmop_operation_state,
4729 other_update=db_nslcmop_update,
4730 )
4731 if ns_state == "NOT_INSTANTIATED":
4732 try:
4733 self.db.set_list(
4734 "vnfrs",
4735 {"nsr-id-ref": nsr_id},
4736 {"_admin.nsState": "NOT_INSTANTIATED"},
4737 )
4738 except DbException as e:
4739 self.logger.warn(
4740 logging_text
4741 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4742 nsr_id, e
4743 )
4744 )
4745 if operation_params:
4746 autoremove = operation_params.get("autoremove", False)
4747 if nslcmop_operation_state:
4748 try:
4749 await self.msg.aiowrite(
4750 "ns",
4751 "terminated",
4752 {
4753 "nsr_id": nsr_id,
4754 "nslcmop_id": nslcmop_id,
4755 "operationState": nslcmop_operation_state,
4756 "autoremove": autoremove,
4757 },
4758 loop=self.loop,
4759 )
4760 except Exception as e:
4761 self.logger.error(
4762 logging_text + "kafka_write notification Exception {}".format(e)
4763 )
4764
4765 self.logger.debug(logging_text + "Exit")
4766 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4767
4768 async def _wait_for_tasks(
4769 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4770 ):
4771 time_start = time()
4772 error_detail_list = []
4773 error_list = []
4774 pending_tasks = list(created_tasks_info.keys())
4775 num_tasks = len(pending_tasks)
4776 num_done = 0
4777 stage[1] = "{}/{}.".format(num_done, num_tasks)
4778 self._write_op_status(nslcmop_id, stage)
4779 while pending_tasks:
4780 new_error = None
4781 _timeout = timeout + time_start - time()
4782 done, pending_tasks = await asyncio.wait(
4783 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4784 )
4785 num_done += len(done)
4786 if not done: # Timeout
4787 for task in pending_tasks:
4788 new_error = created_tasks_info[task] + ": Timeout"
4789 error_detail_list.append(new_error)
4790 error_list.append(new_error)
4791 break
4792 for task in done:
4793 if task.cancelled():
4794 exc = "Cancelled"
4795 else:
4796 exc = task.exception()
4797 if exc:
4798 if isinstance(exc, asyncio.TimeoutError):
4799 exc = "Timeout"
4800 new_error = created_tasks_info[task] + ": {}".format(exc)
4801 error_list.append(created_tasks_info[task])
4802 error_detail_list.append(new_error)
4803 if isinstance(
4804 exc,
4805 (
4806 str,
4807 DbException,
4808 N2VCException,
4809 ROclient.ROClientException,
4810 LcmException,
4811 K8sException,
4812 NgRoException,
4813 ),
4814 ):
4815 self.logger.error(logging_text + new_error)
4816 else:
4817 exc_traceback = "".join(
4818 traceback.format_exception(None, exc, exc.__traceback__)
4819 )
4820 self.logger.error(
4821 logging_text
4822 + created_tasks_info[task]
4823 + " "
4824 + exc_traceback
4825 )
4826 else:
4827 self.logger.debug(
4828 logging_text + created_tasks_info[task] + ": Done"
4829 )
4830 stage[1] = "{}/{}.".format(num_done, num_tasks)
4831 if new_error:
4832 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4833 if nsr_id: # update also nsr
4834 self.update_db_2(
4835 "nsrs",
4836 nsr_id,
4837 {
4838 "errorDescription": "Error at: " + ", ".join(error_list),
4839 "errorDetail": ". ".join(error_detail_list),
4840 },
4841 )
4842 self._write_op_status(nslcmop_id, stage)
4843 return error_detail_list
4844
4845 @staticmethod
4846 def _map_primitive_params(primitive_desc, params, instantiation_params):
4847 """
4848 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4849 The default-value is used. If it is between < > it look for a value at instantiation_params
4850 :param primitive_desc: portion of VNFD/NSD that describes primitive
4851 :param params: Params provided by user
4852 :param instantiation_params: Instantiation params provided by user
4853 :return: a dictionary with the calculated params
4854 """
4855 calculated_params = {}
4856 for parameter in primitive_desc.get("parameter", ()):
4857 param_name = parameter["name"]
4858 if param_name in params:
4859 calculated_params[param_name] = params[param_name]
4860 elif "default-value" in parameter or "value" in parameter:
4861 if "value" in parameter:
4862 calculated_params[param_name] = parameter["value"]
4863 else:
4864 calculated_params[param_name] = parameter["default-value"]
4865 if (
4866 isinstance(calculated_params[param_name], str)
4867 and calculated_params[param_name].startswith("<")
4868 and calculated_params[param_name].endswith(">")
4869 ):
4870 if calculated_params[param_name][1:-1] in instantiation_params:
4871 calculated_params[param_name] = instantiation_params[
4872 calculated_params[param_name][1:-1]
4873 ]
4874 else:
4875 raise LcmException(
4876 "Parameter {} needed to execute primitive {} not provided".format(
4877 calculated_params[param_name], primitive_desc["name"]
4878 )
4879 )
4880 else:
4881 raise LcmException(
4882 "Parameter {} needed to execute primitive {} not provided".format(
4883 param_name, primitive_desc["name"]
4884 )
4885 )
4886
4887 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4888 calculated_params[param_name] = yaml.safe_dump(
4889 calculated_params[param_name], default_flow_style=True, width=256
4890 )
4891 elif isinstance(calculated_params[param_name], str) and calculated_params[
4892 param_name
4893 ].startswith("!!yaml "):
4894 calculated_params[param_name] = calculated_params[param_name][7:]
4895 if parameter.get("data-type") == "INTEGER":
4896 try:
4897 calculated_params[param_name] = int(calculated_params[param_name])
4898 except ValueError: # error converting string to int
4899 raise LcmException(
4900 "Parameter {} of primitive {} must be integer".format(
4901 param_name, primitive_desc["name"]
4902 )
4903 )
4904 elif parameter.get("data-type") == "BOOLEAN":
4905 calculated_params[param_name] = not (
4906 (str(calculated_params[param_name])).lower() == "false"
4907 )
4908
4909 # add always ns_config_info if primitive name is config
4910 if primitive_desc["name"] == "config":
4911 if "ns_config_info" in instantiation_params:
4912 calculated_params["ns_config_info"] = instantiation_params[
4913 "ns_config_info"
4914 ]
4915 return calculated_params
4916
4917 def _look_for_deployed_vca(
4918 self,
4919 deployed_vca,
4920 member_vnf_index,
4921 vdu_id,
4922 vdu_count_index,
4923 kdu_name=None,
4924 ee_descriptor_id=None,
4925 ):
4926 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4927 for vca in deployed_vca:
4928 if not vca:
4929 continue
4930 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4931 continue
4932 if (
4933 vdu_count_index is not None
4934 and vdu_count_index != vca["vdu_count_index"]
4935 ):
4936 continue
4937 if kdu_name and kdu_name != vca["kdu_name"]:
4938 continue
4939 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4940 continue
4941 break
4942 else:
4943 # vca_deployed not found
4944 raise LcmException(
4945 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4946 " is not deployed".format(
4947 member_vnf_index,
4948 vdu_id,
4949 vdu_count_index,
4950 kdu_name,
4951 ee_descriptor_id,
4952 )
4953 )
4954 # get ee_id
4955 ee_id = vca.get("ee_id")
4956 vca_type = vca.get(
4957 "type", "lxc_proxy_charm"
4958 ) # default value for backward compatibility - proxy charm
4959 if not ee_id:
4960 raise LcmException(
4961 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4962 "execution environment".format(
4963 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4964 )
4965 )
4966 return ee_id, vca_type
4967
4968 async def _ns_execute_primitive(
4969 self,
4970 ee_id,
4971 primitive,
4972 primitive_params,
4973 retries=0,
4974 retries_interval=30,
4975 timeout=None,
4976 vca_type=None,
4977 db_dict=None,
4978 vca_id: str = None,
4979 ) -> (str, str):
4980 try:
4981 if primitive == "config":
4982 primitive_params = {"params": primitive_params}
4983
4984 vca_type = vca_type or "lxc_proxy_charm"
4985
4986 while retries >= 0:
4987 try:
4988 output = await asyncio.wait_for(
4989 self.vca_map[vca_type].exec_primitive(
4990 ee_id=ee_id,
4991 primitive_name=primitive,
4992 params_dict=primitive_params,
4993 progress_timeout=self.timeout_progress_primitive,
4994 total_timeout=self.timeout_primitive,
4995 db_dict=db_dict,
4996 vca_id=vca_id,
4997 vca_type=vca_type,
4998 ),
4999 timeout=timeout or self.timeout_primitive,
5000 )
5001 # execution was OK
5002 break
5003 except asyncio.CancelledError:
5004 raise
5005 except Exception as e:
5006 retries -= 1
5007 if retries >= 0:
5008 self.logger.debug(
5009 "Error executing action {} on {} -> {}".format(
5010 primitive, ee_id, e
5011 )
5012 )
5013 # wait and retry
5014 await asyncio.sleep(retries_interval, loop=self.loop)
5015 else:
5016 if isinstance(e, asyncio.TimeoutError):
5017 e = N2VCException(
5018 message="Timed out waiting for action to complete"
5019 )
5020 return "FAILED", getattr(e, "message", repr(e))
5021
5022 return "COMPLETED", output
5023
5024 except (LcmException, asyncio.CancelledError):
5025 raise
5026 except Exception as e:
5027 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5028
5029 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5030 """
5031 Updating the vca_status with latest juju information in nsrs record
5032 :param: nsr_id: Id of the nsr
5033 :param: nslcmop_id: Id of the nslcmop
5034 :return: None
5035 """
5036
5037 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5038 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5039 vca_id = self.get_vca_id({}, db_nsr)
5040 if db_nsr["_admin"]["deployed"]["K8s"]:
5041 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5042 cluster_uuid, kdu_instance, cluster_type = (
5043 k8s["k8scluster-uuid"],
5044 k8s["kdu-instance"],
5045 k8s["k8scluster-type"],
5046 )
5047 await self._on_update_k8s_db(
5048 cluster_uuid=cluster_uuid,
5049 kdu_instance=kdu_instance,
5050 filter={"_id": nsr_id},
5051 vca_id=vca_id,
5052 cluster_type=cluster_type,
5053 )
5054 else:
5055 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5056 table, filter = "nsrs", {"_id": nsr_id}
5057 path = "_admin.deployed.VCA.{}.".format(vca_index)
5058 await self._on_update_n2vc_db(table, filter, path, {})
5059
5060 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5061 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5062
5063 async def action(self, nsr_id, nslcmop_id):
5064 # Try to lock HA task here
5065 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5066 if not task_is_locked_by_me:
5067 return
5068
5069 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5070 self.logger.debug(logging_text + "Enter")
5071 # get all needed from database
5072 db_nsr = None
5073 db_nslcmop = None
5074 db_nsr_update = {}
5075 db_nslcmop_update = {}
5076 nslcmop_operation_state = None
5077 error_description_nslcmop = None
5078 exc = None
5079 try:
5080 # wait for any previous tasks in process
5081 step = "Waiting for previous operations to terminate"
5082 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5083
5084 self._write_ns_status(
5085 nsr_id=nsr_id,
5086 ns_state=None,
5087 current_operation="RUNNING ACTION",
5088 current_operation_id=nslcmop_id,
5089 )
5090
5091 step = "Getting information from database"
5092 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5093 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5094 if db_nslcmop["operationParams"].get("primitive_params"):
5095 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5096 db_nslcmop["operationParams"]["primitive_params"]
5097 )
5098
5099 nsr_deployed = db_nsr["_admin"].get("deployed")
5100 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5101 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5102 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5103 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5104 primitive = db_nslcmop["operationParams"]["primitive"]
5105 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5106 timeout_ns_action = db_nslcmop["operationParams"].get(
5107 "timeout_ns_action", self.timeout_primitive
5108 )
5109
5110 if vnf_index:
5111 step = "Getting vnfr from database"
5112 db_vnfr = self.db.get_one(
5113 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5114 )
5115 if db_vnfr.get("kdur"):
5116 kdur_list = []
5117 for kdur in db_vnfr["kdur"]:
5118 if kdur.get("additionalParams"):
5119 kdur["additionalParams"] = json.loads(
5120 kdur["additionalParams"]
5121 )
5122 kdur_list.append(kdur)
5123 db_vnfr["kdur"] = kdur_list
5124 step = "Getting vnfd from database"
5125 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5126
5127 # Sync filesystem before running a primitive
5128 self.fs.sync(db_vnfr["vnfd-id"])
5129 else:
5130 step = "Getting nsd from database"
5131 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5132
5133 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5134 # for backward compatibility
5135 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5136 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5137 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5138 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5139
5140 # look for primitive
5141 config_primitive_desc = descriptor_configuration = None
5142 if vdu_id:
5143 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5144 elif kdu_name:
5145 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5146 elif vnf_index:
5147 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5148 else:
5149 descriptor_configuration = db_nsd.get("ns-configuration")
5150
5151 if descriptor_configuration and descriptor_configuration.get(
5152 "config-primitive"
5153 ):
5154 for config_primitive in descriptor_configuration["config-primitive"]:
5155 if config_primitive["name"] == primitive:
5156 config_primitive_desc = config_primitive
5157 break
5158
5159 if not config_primitive_desc:
5160 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5161 raise LcmException(
5162 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5163 primitive
5164 )
5165 )
5166 primitive_name = primitive
5167 ee_descriptor_id = None
5168 else:
5169 primitive_name = config_primitive_desc.get(
5170 "execution-environment-primitive", primitive
5171 )
5172 ee_descriptor_id = config_primitive_desc.get(
5173 "execution-environment-ref"
5174 )
5175
5176 if vnf_index:
5177 if vdu_id:
5178 vdur = next(
5179 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5180 )
5181 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5182 elif kdu_name:
5183 kdur = next(
5184 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5185 )
5186 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5187 else:
5188 desc_params = parse_yaml_strings(
5189 db_vnfr.get("additionalParamsForVnf")
5190 )
5191 else:
5192 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5193 if kdu_name and get_configuration(db_vnfd, kdu_name):
5194 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5195 actions = set()
5196 for primitive in kdu_configuration.get("initial-config-primitive", []):
5197 actions.add(primitive["name"])
5198 for primitive in kdu_configuration.get("config-primitive", []):
5199 actions.add(primitive["name"])
5200 kdu = find_in_list(
5201 nsr_deployed["K8s"],
5202 lambda kdu: kdu_name == kdu["kdu-name"]
5203 and kdu["member-vnf-index"] == vnf_index,
5204 )
5205 kdu_action = (
5206 True
5207 if primitive_name in actions
5208 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5209 else False
5210 )
5211
5212 # TODO check if ns is in a proper status
5213 if kdu_name and (
5214 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5215 ):
5216 # kdur and desc_params already set from before
5217 if primitive_params:
5218 desc_params.update(primitive_params)
5219 # TODO Check if we will need something at vnf level
5220 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5221 if (
5222 kdu_name == kdu["kdu-name"]
5223 and kdu["member-vnf-index"] == vnf_index
5224 ):
5225 break
5226 else:
5227 raise LcmException(
5228 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5229 )
5230
5231 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5232 msg = "unknown k8scluster-type '{}'".format(
5233 kdu.get("k8scluster-type")
5234 )
5235 raise LcmException(msg)
5236
5237 db_dict = {
5238 "collection": "nsrs",
5239 "filter": {"_id": nsr_id},
5240 "path": "_admin.deployed.K8s.{}".format(index),
5241 }
5242 self.logger.debug(
5243 logging_text
5244 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5245 )
5246 step = "Executing kdu {}".format(primitive_name)
5247 if primitive_name == "upgrade":
5248 if desc_params.get("kdu_model"):
5249 kdu_model = desc_params.get("kdu_model")
5250 del desc_params["kdu_model"]
5251 else:
5252 kdu_model = kdu.get("kdu-model")
5253 parts = kdu_model.split(sep=":")
5254 if len(parts) == 2:
5255 kdu_model = parts[0]
5256
5257 detailed_status = await asyncio.wait_for(
5258 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5259 cluster_uuid=kdu.get("k8scluster-uuid"),
5260 kdu_instance=kdu.get("kdu-instance"),
5261 atomic=True,
5262 kdu_model=kdu_model,
5263 params=desc_params,
5264 db_dict=db_dict,
5265 timeout=timeout_ns_action,
5266 ),
5267 timeout=timeout_ns_action + 10,
5268 )
5269 self.logger.debug(
5270 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5271 )
5272 elif primitive_name == "rollback":
5273 detailed_status = await asyncio.wait_for(
5274 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5275 cluster_uuid=kdu.get("k8scluster-uuid"),
5276 kdu_instance=kdu.get("kdu-instance"),
5277 db_dict=db_dict,
5278 ),
5279 timeout=timeout_ns_action,
5280 )
5281 elif primitive_name == "status":
5282 detailed_status = await asyncio.wait_for(
5283 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5284 cluster_uuid=kdu.get("k8scluster-uuid"),
5285 kdu_instance=kdu.get("kdu-instance"),
5286 vca_id=vca_id,
5287 ),
5288 timeout=timeout_ns_action,
5289 )
5290 else:
5291 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5292 kdu["kdu-name"], nsr_id
5293 )
5294 params = self._map_primitive_params(
5295 config_primitive_desc, primitive_params, desc_params
5296 )
5297
5298 detailed_status = await asyncio.wait_for(
5299 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5300 cluster_uuid=kdu.get("k8scluster-uuid"),
5301 kdu_instance=kdu_instance,
5302 primitive_name=primitive_name,
5303 params=params,
5304 db_dict=db_dict,
5305 timeout=timeout_ns_action,
5306 vca_id=vca_id,
5307 ),
5308 timeout=timeout_ns_action,
5309 )
5310
5311 if detailed_status:
5312 nslcmop_operation_state = "COMPLETED"
5313 else:
5314 detailed_status = ""
5315 nslcmop_operation_state = "FAILED"
5316 else:
5317 ee_id, vca_type = self._look_for_deployed_vca(
5318 nsr_deployed["VCA"],
5319 member_vnf_index=vnf_index,
5320 vdu_id=vdu_id,
5321 vdu_count_index=vdu_count_index,
5322 ee_descriptor_id=ee_descriptor_id,
5323 )
5324 for vca_index, vca_deployed in enumerate(
5325 db_nsr["_admin"]["deployed"]["VCA"]
5326 ):
5327 if vca_deployed.get("member-vnf-index") == vnf_index:
5328 db_dict = {
5329 "collection": "nsrs",
5330 "filter": {"_id": nsr_id},
5331 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5332 }
5333 break
5334 (
5335 nslcmop_operation_state,
5336 detailed_status,
5337 ) = await self._ns_execute_primitive(
5338 ee_id,
5339 primitive=primitive_name,
5340 primitive_params=self._map_primitive_params(
5341 config_primitive_desc, primitive_params, desc_params
5342 ),
5343 timeout=timeout_ns_action,
5344 vca_type=vca_type,
5345 db_dict=db_dict,
5346 vca_id=vca_id,
5347 )
5348
5349 db_nslcmop_update["detailed-status"] = detailed_status
5350 error_description_nslcmop = (
5351 detailed_status if nslcmop_operation_state == "FAILED" else ""
5352 )
5353 self.logger.debug(
5354 logging_text
5355 + "Done with result {} {}".format(
5356 nslcmop_operation_state, detailed_status
5357 )
5358 )
5359 return # database update is called inside finally
5360
5361 except (DbException, LcmException, N2VCException, K8sException) as e:
5362 self.logger.error(logging_text + "Exit Exception {}".format(e))
5363 exc = e
5364 except asyncio.CancelledError:
5365 self.logger.error(
5366 logging_text + "Cancelled Exception while '{}'".format(step)
5367 )
5368 exc = "Operation was cancelled"
5369 except asyncio.TimeoutError:
5370 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5371 exc = "Timeout"
5372 except Exception as e:
5373 exc = traceback.format_exc()
5374 self.logger.critical(
5375 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5376 exc_info=True,
5377 )
5378 finally:
5379 if exc:
5380 db_nslcmop_update[
5381 "detailed-status"
5382 ] = (
5383 detailed_status
5384 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5385 nslcmop_operation_state = "FAILED"
5386 if db_nsr:
5387 self._write_ns_status(
5388 nsr_id=nsr_id,
5389 ns_state=db_nsr[
5390 "nsState"
5391 ], # TODO check if degraded. For the moment use previous status
5392 current_operation="IDLE",
5393 current_operation_id=None,
5394 # error_description=error_description_nsr,
5395 # error_detail=error_detail,
5396 other_update=db_nsr_update,
5397 )
5398
5399 self._write_op_status(
5400 op_id=nslcmop_id,
5401 stage="",
5402 error_message=error_description_nslcmop,
5403 operation_state=nslcmop_operation_state,
5404 other_update=db_nslcmop_update,
5405 )
5406
5407 if nslcmop_operation_state:
5408 try:
5409 await self.msg.aiowrite(
5410 "ns",
5411 "actioned",
5412 {
5413 "nsr_id": nsr_id,
5414 "nslcmop_id": nslcmop_id,
5415 "operationState": nslcmop_operation_state,
5416 },
5417 loop=self.loop,
5418 )
5419 except Exception as e:
5420 self.logger.error(
5421 logging_text + "kafka_write notification Exception {}".format(e)
5422 )
5423 self.logger.debug(logging_text + "Exit")
5424 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5425 return nslcmop_operation_state, detailed_status
5426
5427 async def terminate_vdus(
5428 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5429 ):
5430 """This method terminates VDUs
5431
5432 Args:
5433 db_vnfr: VNF instance record
5434 member_vnf_index: VNF index to identify the VDUs to be removed
5435 db_nsr: NS instance record
5436 update_db_nslcmops: Nslcmop update record
5437 """
5438 vca_scaling_info = []
5439 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5440 scaling_info["scaling_direction"] = "IN"
5441 scaling_info["vdu-delete"] = {}
5442 scaling_info["kdu-delete"] = {}
5443 db_vdur = db_vnfr.get("vdur")
5444 vdur_list = copy(db_vdur)
5445 count_index = 0
5446 for index, vdu in enumerate(vdur_list):
5447 vca_scaling_info.append(
5448 {
5449 "osm_vdu_id": vdu["vdu-id-ref"],
5450 "member-vnf-index": member_vnf_index,
5451 "type": "delete",
5452 "vdu_index": count_index,
5453 }
5454 )
5455 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5456 scaling_info["vdu"].append(
5457 {
5458 "name": vdu.get("name") or vdu.get("vdu-name"),
5459 "vdu_id": vdu["vdu-id-ref"],
5460 "interface": [],
5461 }
5462 )
5463 for interface in vdu["interfaces"]:
5464 scaling_info["vdu"][index]["interface"].append(
5465 {
5466 "name": interface["name"],
5467 "ip_address": interface["ip-address"],
5468 "mac_address": interface.get("mac-address"),
5469 }
5470 )
5471 self.logger.info("NS update scaling info{}".format(scaling_info))
5472 stage[2] = "Terminating VDUs"
5473 if scaling_info.get("vdu-delete"):
5474 # scale_process = "RO"
5475 if self.ro_config.get("ng"):
5476 await self._scale_ng_ro(
5477 logging_text,
5478 db_nsr,
5479 update_db_nslcmops,
5480 db_vnfr,
5481 scaling_info,
5482 stage,
5483 )
5484
5485 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5486 """This method is to Remove VNF instances from NS.
5487
5488 Args:
5489 nsr_id: NS instance id
5490 nslcmop_id: nslcmop id of update
5491 vnf_instance_id: id of the VNF instance to be removed
5492
5493 Returns:
5494 result: (str, str) COMPLETED/FAILED, details
5495 """
5496 try:
5497 db_nsr_update = {}
5498 logging_text = "Task ns={} update ".format(nsr_id)
5499 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5500 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5501 if check_vnfr_count > 1:
5502 stage = ["", "", ""]
5503 step = "Getting nslcmop from database"
5504 self.logger.debug(
5505 step + " after having waited for previous tasks to be completed"
5506 )
5507 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5508 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5509 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5510 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5511 """ db_vnfr = self.db.get_one(
5512 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5513
5514 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5515 await self.terminate_vdus(
5516 db_vnfr,
5517 member_vnf_index,
5518 db_nsr,
5519 update_db_nslcmops,
5520 stage,
5521 logging_text,
5522 )
5523
5524 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5525 constituent_vnfr.remove(db_vnfr.get("_id"))
5526 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5527 "constituent-vnfr-ref"
5528 )
5529 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5530 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5531 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5532 return "COMPLETED", "Done"
5533 else:
5534 step = "Terminate VNF Failed with"
5535 raise LcmException(
5536 "{} Cannot terminate the last VNF in this NS.".format(
5537 vnf_instance_id
5538 )
5539 )
5540 except (LcmException, asyncio.CancelledError):
5541 raise
5542 except Exception as e:
5543 self.logger.debug("Error removing VNF {}".format(e))
5544 return "FAILED", "Error removing VNF {}".format(e)
5545
5546 async def _ns_redeploy_vnf(
5547 self,
5548 nsr_id,
5549 nslcmop_id,
5550 db_vnfd,
5551 db_vnfr,
5552 db_nsr,
5553 ):
5554 """This method updates and redeploys VNF instances
5555
5556 Args:
5557 nsr_id: NS instance id
5558 nslcmop_id: nslcmop id
5559 db_vnfd: VNF descriptor
5560 db_vnfr: VNF instance record
5561 db_nsr: NS instance record
5562
5563 Returns:
5564 result: (str, str) COMPLETED/FAILED, details
5565 """
5566 try:
5567 count_index = 0
5568 stage = ["", "", ""]
5569 logging_text = "Task ns={} update ".format(nsr_id)
5570 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5571 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5572
5573 # Terminate old VNF resources
5574 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5575 await self.terminate_vdus(
5576 db_vnfr,
5577 member_vnf_index,
5578 db_nsr,
5579 update_db_nslcmops,
5580 stage,
5581 logging_text,
5582 )
5583
5584 # old_vnfd_id = db_vnfr["vnfd-id"]
5585 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5586 new_db_vnfd = db_vnfd
5587 # new_vnfd_ref = new_db_vnfd["id"]
5588 # new_vnfd_id = vnfd_id
5589
5590 # Create VDUR
5591 new_vnfr_cp = []
5592 for cp in new_db_vnfd.get("ext-cpd", ()):
5593 vnf_cp = {
5594 "name": cp.get("id"),
5595 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5596 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5597 "id": cp.get("id"),
5598 }
5599 new_vnfr_cp.append(vnf_cp)
5600 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5601 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5602 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5603 new_vnfr_update = {
5604 "revision": latest_vnfd_revision,
5605 "connection-point": new_vnfr_cp,
5606 "vdur": new_vdur,
5607 "ip-address": "",
5608 }
5609 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5610 updated_db_vnfr = self.db.get_one(
5611 "vnfrs",
5612 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5613 )
5614
5615 # Instantiate new VNF resources
5616 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5617 vca_scaling_info = []
5618 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5619 scaling_info["scaling_direction"] = "OUT"
5620 scaling_info["vdu-create"] = {}
5621 scaling_info["kdu-create"] = {}
5622 vdud_instantiate_list = db_vnfd["vdu"]
5623 for index, vdud in enumerate(vdud_instantiate_list):
5624 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5625 if cloud_init_text:
5626 additional_params = (
5627 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5628 or {}
5629 )
5630 cloud_init_list = []
5631 if cloud_init_text:
5632 # TODO Information of its own ip is not available because db_vnfr is not updated.
5633 additional_params["OSM"] = get_osm_params(
5634 updated_db_vnfr, vdud["id"], 1
5635 )
5636 cloud_init_list.append(
5637 self._parse_cloud_init(
5638 cloud_init_text,
5639 additional_params,
5640 db_vnfd["id"],
5641 vdud["id"],
5642 )
5643 )
5644 vca_scaling_info.append(
5645 {
5646 "osm_vdu_id": vdud["id"],
5647 "member-vnf-index": member_vnf_index,
5648 "type": "create",
5649 "vdu_index": count_index,
5650 }
5651 )
5652 scaling_info["vdu-create"][vdud["id"]] = count_index
5653 if self.ro_config.get("ng"):
5654 self.logger.debug(
5655 "New Resources to be deployed: {}".format(scaling_info)
5656 )
5657 await self._scale_ng_ro(
5658 logging_text,
5659 db_nsr,
5660 update_db_nslcmops,
5661 updated_db_vnfr,
5662 scaling_info,
5663 stage,
5664 )
5665 return "COMPLETED", "Done"
5666 except (LcmException, asyncio.CancelledError):
5667 raise
5668 except Exception as e:
5669 self.logger.debug("Error updating VNF {}".format(e))
5670 return "FAILED", "Error updating VNF {}".format(e)
5671
5672 async def _ns_charm_upgrade(
5673 self,
5674 ee_id,
5675 charm_id,
5676 charm_type,
5677 path,
5678 timeout: float = None,
5679 ) -> (str, str):
5680 """This method upgrade charms in VNF instances
5681
5682 Args:
5683 ee_id: Execution environment id
5684 path: Local path to the charm
5685 charm_id: charm-id
5686 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5687 timeout: (Float) Timeout for the ns update operation
5688
5689 Returns:
5690 result: (str, str) COMPLETED/FAILED, details
5691 """
5692 try:
5693 charm_type = charm_type or "lxc_proxy_charm"
5694 output = await self.vca_map[charm_type].upgrade_charm(
5695 ee_id=ee_id,
5696 path=path,
5697 charm_id=charm_id,
5698 charm_type=charm_type,
5699 timeout=timeout or self.timeout_ns_update,
5700 )
5701
5702 if output:
5703 return "COMPLETED", output
5704
5705 except (LcmException, asyncio.CancelledError):
5706 raise
5707
5708 except Exception as e:
5709
5710 self.logger.debug("Error upgrading charm {}".format(path))
5711
5712 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5713
5714 async def update(self, nsr_id, nslcmop_id):
5715 """Update NS according to different update types
5716
5717 This method performs upgrade of VNF instances then updates the revision
5718 number in VNF record
5719
5720 Args:
5721 nsr_id: Network service will be updated
5722 nslcmop_id: ns lcm operation id
5723
5724 Returns:
5725 It may raise DbException, LcmException, N2VCException, K8sException
5726
5727 """
5728 # Try to lock HA task here
5729 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5730 if not task_is_locked_by_me:
5731 return
5732
5733 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5734 self.logger.debug(logging_text + "Enter")
5735
5736 # Set the required variables to be filled up later
5737 db_nsr = None
5738 db_nslcmop_update = {}
5739 vnfr_update = {}
5740 nslcmop_operation_state = None
5741 db_nsr_update = {}
5742 error_description_nslcmop = ""
5743 exc = None
5744 change_type = "updated"
5745 detailed_status = ""
5746
5747 try:
5748 # wait for any previous tasks in process
5749 step = "Waiting for previous operations to terminate"
5750 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5751 self._write_ns_status(
5752 nsr_id=nsr_id,
5753 ns_state=None,
5754 current_operation="UPDATING",
5755 current_operation_id=nslcmop_id,
5756 )
5757
5758 step = "Getting nslcmop from database"
5759 db_nslcmop = self.db.get_one(
5760 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5761 )
5762 update_type = db_nslcmop["operationParams"]["updateType"]
5763
5764 step = "Getting nsr from database"
5765 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5766 old_operational_status = db_nsr["operational-status"]
5767 db_nsr_update["operational-status"] = "updating"
5768 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5769 nsr_deployed = db_nsr["_admin"].get("deployed")
5770
5771 if update_type == "CHANGE_VNFPKG":
5772
5773 # Get the input parameters given through update request
5774 vnf_instance_id = db_nslcmop["operationParams"][
5775 "changeVnfPackageData"
5776 ].get("vnfInstanceId")
5777
5778 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5779 "vnfdId"
5780 )
5781 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5782
5783 step = "Getting vnfr from database"
5784 db_vnfr = self.db.get_one(
5785 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5786 )
5787
5788 step = "Getting vnfds from database"
5789 # Latest VNFD
5790 latest_vnfd = self.db.get_one(
5791 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5792 )
5793 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5794
5795 # Current VNFD
5796 current_vnf_revision = db_vnfr.get("revision", 1)
5797 current_vnfd = self.db.get_one(
5798 "vnfds_revisions",
5799 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5800 fail_on_empty=False,
5801 )
5802 # Charm artifact paths will be filled up later
5803 (
5804 current_charm_artifact_path,
5805 target_charm_artifact_path,
5806 charm_artifact_paths,
5807 ) = ([], [], [])
5808
5809 step = "Checking if revision has changed in VNFD"
5810 if current_vnf_revision != latest_vnfd_revision:
5811
5812 change_type = "policy_updated"
5813
5814 # There is new revision of VNFD, update operation is required
5815 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5816 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5817
5818 step = "Removing the VNFD packages if they exist in the local path"
5819 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5820 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5821
5822 step = "Get the VNFD packages from FSMongo"
5823 self.fs.sync(from_path=latest_vnfd_path)
5824 self.fs.sync(from_path=current_vnfd_path)
5825
5826 step = (
5827 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5828 )
5829 base_folder = latest_vnfd["_admin"]["storage"]
5830
5831 for charm_index, charm_deployed in enumerate(
5832 get_iterable(nsr_deployed, "VCA")
5833 ):
5834 vnf_index = db_vnfr.get("member-vnf-index-ref")
5835
5836 # Getting charm-id and charm-type
5837 if charm_deployed.get("member-vnf-index") == vnf_index:
5838 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5839 charm_type = charm_deployed.get("type")
5840
5841 # Getting ee-id
5842 ee_id = charm_deployed.get("ee_id")
5843
5844 step = "Getting descriptor config"
5845 descriptor_config = get_configuration(
5846 current_vnfd, current_vnfd["id"]
5847 )
5848
5849 if "execution-environment-list" in descriptor_config:
5850 ee_list = descriptor_config.get(
5851 "execution-environment-list", []
5852 )
5853 else:
5854 ee_list = []
5855
5856 # There could be several charm used in the same VNF
5857 for ee_item in ee_list:
5858 if ee_item.get("juju"):
5859
5860 step = "Getting charm name"
5861 charm_name = ee_item["juju"].get("charm")
5862
5863 step = "Setting Charm artifact paths"
5864 current_charm_artifact_path.append(
5865 get_charm_artifact_path(
5866 base_folder,
5867 charm_name,
5868 charm_type,
5869 current_vnf_revision,
5870 )
5871 )
5872 target_charm_artifact_path.append(
5873 get_charm_artifact_path(
5874 base_folder,
5875 charm_name,
5876 charm_type,
5877 latest_vnfd_revision,
5878 )
5879 )
5880
5881 charm_artifact_paths = zip(
5882 current_charm_artifact_path, target_charm_artifact_path
5883 )
5884
5885 step = "Checking if software version has changed in VNFD"
5886 if find_software_version(current_vnfd) != find_software_version(
5887 latest_vnfd
5888 ):
5889
5890 step = "Checking if existing VNF has charm"
5891 for current_charm_path, target_charm_path in list(
5892 charm_artifact_paths
5893 ):
5894 if current_charm_path:
5895 raise LcmException(
5896 "Software version change is not supported as VNF instance {} has charm.".format(
5897 vnf_instance_id
5898 )
5899 )
5900
5901 # There is no change in the charm package, then redeploy the VNF
5902 # based on new descriptor
5903 step = "Redeploying VNF"
5904 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5905 (result, detailed_status) = await self._ns_redeploy_vnf(
5906 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5907 )
5908 if result == "FAILED":
5909 nslcmop_operation_state = result
5910 error_description_nslcmop = detailed_status
5911 db_nslcmop_update["detailed-status"] = detailed_status
5912 self.logger.debug(
5913 logging_text
5914 + " step {} Done with result {} {}".format(
5915 step, nslcmop_operation_state, detailed_status
5916 )
5917 )
5918
5919 else:
5920 step = "Checking if any charm package has changed or not"
5921 for current_charm_path, target_charm_path in list(
5922 charm_artifact_paths
5923 ):
5924 if (
5925 current_charm_path
5926 and target_charm_path
5927 and self.check_charm_hash_changed(
5928 current_charm_path, target_charm_path
5929 )
5930 ):
5931
5932 step = "Checking whether VNF uses juju bundle"
5933 if check_juju_bundle_existence(current_vnfd):
5934
5935 raise LcmException(
5936 "Charm upgrade is not supported for the instance which"
5937 " uses juju-bundle: {}".format(
5938 check_juju_bundle_existence(current_vnfd)
5939 )
5940 )
5941
5942 step = "Upgrading Charm"
5943 (
5944 result,
5945 detailed_status,
5946 ) = await self._ns_charm_upgrade(
5947 ee_id=ee_id,
5948 charm_id=charm_id,
5949 charm_type=charm_type,
5950 path=self.fs.path + target_charm_path,
5951 timeout=timeout_seconds,
5952 )
5953
5954 if result == "FAILED":
5955 nslcmop_operation_state = result
5956 error_description_nslcmop = detailed_status
5957
5958 db_nslcmop_update["detailed-status"] = detailed_status
5959 self.logger.debug(
5960 logging_text
5961 + " step {} Done with result {} {}".format(
5962 step, nslcmop_operation_state, detailed_status
5963 )
5964 )
5965
5966 step = "Updating policies"
5967 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5968 result = "COMPLETED"
5969 detailed_status = "Done"
5970 db_nslcmop_update["detailed-status"] = "Done"
5971
5972 # If nslcmop_operation_state is None, so any operation is not failed.
5973 if not nslcmop_operation_state:
5974 nslcmop_operation_state = "COMPLETED"
5975
5976 # If update CHANGE_VNFPKG nslcmop_operation is successful
5977 # vnf revision need to be updated
5978 vnfr_update["revision"] = latest_vnfd_revision
5979 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5980
5981 self.logger.debug(
5982 logging_text
5983 + " task Done with result {} {}".format(
5984 nslcmop_operation_state, detailed_status
5985 )
5986 )
5987 elif update_type == "REMOVE_VNF":
5988 # This part is included in https://osm.etsi.org/gerrit/11876
5989 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5990 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5991 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5992 step = "Removing VNF"
5993 (result, detailed_status) = await self.remove_vnf(
5994 nsr_id, nslcmop_id, vnf_instance_id
5995 )
5996 if result == "FAILED":
5997 nslcmop_operation_state = result
5998 error_description_nslcmop = detailed_status
5999 db_nslcmop_update["detailed-status"] = detailed_status
6000 change_type = "vnf_terminated"
6001 if not nslcmop_operation_state:
6002 nslcmop_operation_state = "COMPLETED"
6003 self.logger.debug(
6004 logging_text
6005 + " task Done with result {} {}".format(
6006 nslcmop_operation_state, detailed_status
6007 )
6008 )
6009
6010 elif update_type == "OPERATE_VNF":
6011 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6012 "vnfInstanceId"
6013 ]
6014 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6015 "changeStateTo"
6016 ]
6017 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6018 "additionalParam"
6019 ]
6020 (result, detailed_status) = await self.rebuild_start_stop(
6021 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6022 )
6023 if result == "FAILED":
6024 nslcmop_operation_state = result
6025 error_description_nslcmop = detailed_status
6026 db_nslcmop_update["detailed-status"] = detailed_status
6027 if not nslcmop_operation_state:
6028 nslcmop_operation_state = "COMPLETED"
6029 self.logger.debug(
6030 logging_text
6031 + " task Done with result {} {}".format(
6032 nslcmop_operation_state, detailed_status
6033 )
6034 )
6035
6036 # If nslcmop_operation_state is None, so any operation is not failed.
6037 # All operations are executed in overall.
6038 if not nslcmop_operation_state:
6039 nslcmop_operation_state = "COMPLETED"
6040 db_nsr_update["operational-status"] = old_operational_status
6041
6042 except (DbException, LcmException, N2VCException, K8sException) as e:
6043 self.logger.error(logging_text + "Exit Exception {}".format(e))
6044 exc = e
6045 except asyncio.CancelledError:
6046 self.logger.error(
6047 logging_text + "Cancelled Exception while '{}'".format(step)
6048 )
6049 exc = "Operation was cancelled"
6050 except asyncio.TimeoutError:
6051 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6052 exc = "Timeout"
6053 except Exception as e:
6054 exc = traceback.format_exc()
6055 self.logger.critical(
6056 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6057 exc_info=True,
6058 )
6059 finally:
6060 if exc:
6061 db_nslcmop_update[
6062 "detailed-status"
6063 ] = (
6064 detailed_status
6065 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6066 nslcmop_operation_state = "FAILED"
6067 db_nsr_update["operational-status"] = old_operational_status
6068 if db_nsr:
6069 self._write_ns_status(
6070 nsr_id=nsr_id,
6071 ns_state=db_nsr["nsState"],
6072 current_operation="IDLE",
6073 current_operation_id=None,
6074 other_update=db_nsr_update,
6075 )
6076
6077 self._write_op_status(
6078 op_id=nslcmop_id,
6079 stage="",
6080 error_message=error_description_nslcmop,
6081 operation_state=nslcmop_operation_state,
6082 other_update=db_nslcmop_update,
6083 )
6084
6085 if nslcmop_operation_state:
6086 try:
6087 msg = {
6088 "nsr_id": nsr_id,
6089 "nslcmop_id": nslcmop_id,
6090 "operationState": nslcmop_operation_state,
6091 }
6092 if change_type in ("vnf_terminated", "policy_updated"):
6093 msg.update({"vnf_member_index": member_vnf_index})
6094 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6095 except Exception as e:
6096 self.logger.error(
6097 logging_text + "kafka_write notification Exception {}".format(e)
6098 )
6099 self.logger.debug(logging_text + "Exit")
6100 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6101 return nslcmop_operation_state, detailed_status
6102
6103 async def scale(self, nsr_id, nslcmop_id):
6104 # Try to lock HA task here
6105 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6106 if not task_is_locked_by_me:
6107 return
6108
6109 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6110 stage = ["", "", ""]
6111 tasks_dict_info = {}
6112 # ^ stage, step, VIM progress
6113 self.logger.debug(logging_text + "Enter")
6114 # get all needed from database
6115 db_nsr = None
6116 db_nslcmop_update = {}
6117 db_nsr_update = {}
6118 exc = None
6119 # in case of error, indicates what part of scale was failed to put nsr at error status
6120 scale_process = None
6121 old_operational_status = ""
6122 old_config_status = ""
6123 nsi_id = None
6124 try:
6125 # wait for any previous tasks in process
6126 step = "Waiting for previous operations to terminate"
6127 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6128 self._write_ns_status(
6129 nsr_id=nsr_id,
6130 ns_state=None,
6131 current_operation="SCALING",
6132 current_operation_id=nslcmop_id,
6133 )
6134
6135 step = "Getting nslcmop from database"
6136 self.logger.debug(
6137 step + " after having waited for previous tasks to be completed"
6138 )
6139 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6140
6141 step = "Getting nsr from database"
6142 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6143 old_operational_status = db_nsr["operational-status"]
6144 old_config_status = db_nsr["config-status"]
6145
6146 step = "Parsing scaling parameters"
6147 db_nsr_update["operational-status"] = "scaling"
6148 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6149 nsr_deployed = db_nsr["_admin"].get("deployed")
6150
6151 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6152 "scaleByStepData"
6153 ]["member-vnf-index"]
6154 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6155 "scaleByStepData"
6156 ]["scaling-group-descriptor"]
6157 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6158 # for backward compatibility
6159 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6160 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6161 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6162 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6163
6164 step = "Getting vnfr from database"
6165 db_vnfr = self.db.get_one(
6166 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6167 )
6168
6169 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6170
6171 step = "Getting vnfd from database"
6172 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6173
6174 base_folder = db_vnfd["_admin"]["storage"]
6175
6176 step = "Getting scaling-group-descriptor"
6177 scaling_descriptor = find_in_list(
6178 get_scaling_aspect(db_vnfd),
6179 lambda scale_desc: scale_desc["name"] == scaling_group,
6180 )
6181 if not scaling_descriptor:
6182 raise LcmException(
6183 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6184 "at vnfd:scaling-group-descriptor".format(scaling_group)
6185 )
6186
6187 step = "Sending scale order to VIM"
6188 # TODO check if ns is in a proper status
6189 nb_scale_op = 0
6190 if not db_nsr["_admin"].get("scaling-group"):
6191 self.update_db_2(
6192 "nsrs",
6193 nsr_id,
6194 {
6195 "_admin.scaling-group": [
6196 {"name": scaling_group, "nb-scale-op": 0}
6197 ]
6198 },
6199 )
6200 admin_scale_index = 0
6201 else:
6202 for admin_scale_index, admin_scale_info in enumerate(
6203 db_nsr["_admin"]["scaling-group"]
6204 ):
6205 if admin_scale_info["name"] == scaling_group:
6206 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6207 break
6208 else: # not found, set index one plus last element and add new entry with the name
6209 admin_scale_index += 1
6210 db_nsr_update[
6211 "_admin.scaling-group.{}.name".format(admin_scale_index)
6212 ] = scaling_group
6213
6214 vca_scaling_info = []
6215 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6216 if scaling_type == "SCALE_OUT":
6217 if "aspect-delta-details" not in scaling_descriptor:
6218 raise LcmException(
6219 "Aspect delta details not fount in scaling descriptor {}".format(
6220 scaling_descriptor["name"]
6221 )
6222 )
6223 # count if max-instance-count is reached
6224 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6225
6226 scaling_info["scaling_direction"] = "OUT"
6227 scaling_info["vdu-create"] = {}
6228 scaling_info["kdu-create"] = {}
6229 for delta in deltas:
6230 for vdu_delta in delta.get("vdu-delta", {}):
6231 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6232 # vdu_index also provides the number of instance of the targeted vdu
6233 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6234 cloud_init_text = self._get_vdu_cloud_init_content(
6235 vdud, db_vnfd
6236 )
6237 if cloud_init_text:
6238 additional_params = (
6239 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6240 or {}
6241 )
6242 cloud_init_list = []
6243
6244 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6245 max_instance_count = 10
6246 if vdu_profile and "max-number-of-instances" in vdu_profile:
6247 max_instance_count = vdu_profile.get(
6248 "max-number-of-instances", 10
6249 )
6250
6251 default_instance_num = get_number_of_instances(
6252 db_vnfd, vdud["id"]
6253 )
6254 instances_number = vdu_delta.get("number-of-instances", 1)
6255 nb_scale_op += instances_number
6256
6257 new_instance_count = nb_scale_op + default_instance_num
6258 # Control if new count is over max and vdu count is less than max.
6259 # Then assign new instance count
6260 if new_instance_count > max_instance_count > vdu_count:
6261 instances_number = new_instance_count - max_instance_count
6262 else:
6263 instances_number = instances_number
6264
6265 if new_instance_count > max_instance_count:
6266 raise LcmException(
6267 "reached the limit of {} (max-instance-count) "
6268 "scaling-out operations for the "
6269 "scaling-group-descriptor '{}'".format(
6270 nb_scale_op, scaling_group
6271 )
6272 )
6273 for x in range(vdu_delta.get("number-of-instances", 1)):
6274 if cloud_init_text:
6275 # TODO Information of its own ip is not available because db_vnfr is not updated.
6276 additional_params["OSM"] = get_osm_params(
6277 db_vnfr, vdu_delta["id"], vdu_index + x
6278 )
6279 cloud_init_list.append(
6280 self._parse_cloud_init(
6281 cloud_init_text,
6282 additional_params,
6283 db_vnfd["id"],
6284 vdud["id"],
6285 )
6286 )
6287 vca_scaling_info.append(
6288 {
6289 "osm_vdu_id": vdu_delta["id"],
6290 "member-vnf-index": vnf_index,
6291 "type": "create",
6292 "vdu_index": vdu_index + x,
6293 }
6294 )
6295 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6296 for kdu_delta in delta.get("kdu-resource-delta", {}):
6297 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6298 kdu_name = kdu_profile["kdu-name"]
6299 resource_name = kdu_profile.get("resource-name", "")
6300
6301 # Might have different kdus in the same delta
6302 # Should have list for each kdu
6303 if not scaling_info["kdu-create"].get(kdu_name, None):
6304 scaling_info["kdu-create"][kdu_name] = []
6305
6306 kdur = get_kdur(db_vnfr, kdu_name)
6307 if kdur.get("helm-chart"):
6308 k8s_cluster_type = "helm-chart-v3"
6309 self.logger.debug("kdur: {}".format(kdur))
6310 if (
6311 kdur.get("helm-version")
6312 and kdur.get("helm-version") == "v2"
6313 ):
6314 k8s_cluster_type = "helm-chart"
6315 elif kdur.get("juju-bundle"):
6316 k8s_cluster_type = "juju-bundle"
6317 else:
6318 raise LcmException(
6319 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6320 "juju-bundle. Maybe an old NBI version is running".format(
6321 db_vnfr["member-vnf-index-ref"], kdu_name
6322 )
6323 )
6324
6325 max_instance_count = 10
6326 if kdu_profile and "max-number-of-instances" in kdu_profile:
6327 max_instance_count = kdu_profile.get(
6328 "max-number-of-instances", 10
6329 )
6330
6331 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6332 deployed_kdu, _ = get_deployed_kdu(
6333 nsr_deployed, kdu_name, vnf_index
6334 )
6335 if deployed_kdu is None:
6336 raise LcmException(
6337 "KDU '{}' for vnf '{}' not deployed".format(
6338 kdu_name, vnf_index
6339 )
6340 )
6341 kdu_instance = deployed_kdu.get("kdu-instance")
6342 instance_num = await self.k8scluster_map[
6343 k8s_cluster_type
6344 ].get_scale_count(
6345 resource_name,
6346 kdu_instance,
6347 vca_id=vca_id,
6348 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6349 kdu_model=deployed_kdu.get("kdu-model"),
6350 )
6351 kdu_replica_count = instance_num + kdu_delta.get(
6352 "number-of-instances", 1
6353 )
6354
6355 # Control if new count is over max and instance_num is less than max.
6356 # Then assign max instance number to kdu replica count
6357 if kdu_replica_count > max_instance_count > instance_num:
6358 kdu_replica_count = max_instance_count
6359 if kdu_replica_count > max_instance_count:
6360 raise LcmException(
6361 "reached the limit of {} (max-instance-count) "
6362 "scaling-out operations for the "
6363 "scaling-group-descriptor '{}'".format(
6364 instance_num, scaling_group
6365 )
6366 )
6367
6368 for x in range(kdu_delta.get("number-of-instances", 1)):
6369 vca_scaling_info.append(
6370 {
6371 "osm_kdu_id": kdu_name,
6372 "member-vnf-index": vnf_index,
6373 "type": "create",
6374 "kdu_index": instance_num + x - 1,
6375 }
6376 )
6377 scaling_info["kdu-create"][kdu_name].append(
6378 {
6379 "member-vnf-index": vnf_index,
6380 "type": "create",
6381 "k8s-cluster-type": k8s_cluster_type,
6382 "resource-name": resource_name,
6383 "scale": kdu_replica_count,
6384 }
6385 )
6386 elif scaling_type == "SCALE_IN":
6387 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6388
6389 scaling_info["scaling_direction"] = "IN"
6390 scaling_info["vdu-delete"] = {}
6391 scaling_info["kdu-delete"] = {}
6392
6393 for delta in deltas:
6394 for vdu_delta in delta.get("vdu-delta", {}):
6395 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6396 min_instance_count = 0
6397 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6398 if vdu_profile and "min-number-of-instances" in vdu_profile:
6399 min_instance_count = vdu_profile["min-number-of-instances"]
6400
6401 default_instance_num = get_number_of_instances(
6402 db_vnfd, vdu_delta["id"]
6403 )
6404 instance_num = vdu_delta.get("number-of-instances", 1)
6405 nb_scale_op -= instance_num
6406
6407 new_instance_count = nb_scale_op + default_instance_num
6408
6409 if new_instance_count < min_instance_count < vdu_count:
6410 instances_number = min_instance_count - new_instance_count
6411 else:
6412 instances_number = instance_num
6413
6414 if new_instance_count < min_instance_count:
6415 raise LcmException(
6416 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6417 "scaling-group-descriptor '{}'".format(
6418 nb_scale_op, scaling_group
6419 )
6420 )
6421 for x in range(vdu_delta.get("number-of-instances", 1)):
6422 vca_scaling_info.append(
6423 {
6424 "osm_vdu_id": vdu_delta["id"],
6425 "member-vnf-index": vnf_index,
6426 "type": "delete",
6427 "vdu_index": vdu_index - 1 - x,
6428 }
6429 )
6430 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6431 for kdu_delta in delta.get("kdu-resource-delta", {}):
6432 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6433 kdu_name = kdu_profile["kdu-name"]
6434 resource_name = kdu_profile.get("resource-name", "")
6435
6436 if not scaling_info["kdu-delete"].get(kdu_name, None):
6437 scaling_info["kdu-delete"][kdu_name] = []
6438
6439 kdur = get_kdur(db_vnfr, kdu_name)
6440 if kdur.get("helm-chart"):
6441 k8s_cluster_type = "helm-chart-v3"
6442 self.logger.debug("kdur: {}".format(kdur))
6443 if (
6444 kdur.get("helm-version")
6445 and kdur.get("helm-version") == "v2"
6446 ):
6447 k8s_cluster_type = "helm-chart"
6448 elif kdur.get("juju-bundle"):
6449 k8s_cluster_type = "juju-bundle"
6450 else:
6451 raise LcmException(
6452 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6453 "juju-bundle. Maybe an old NBI version is running".format(
6454 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6455 )
6456 )
6457
6458 min_instance_count = 0
6459 if kdu_profile and "min-number-of-instances" in kdu_profile:
6460 min_instance_count = kdu_profile["min-number-of-instances"]
6461
6462 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6463 deployed_kdu, _ = get_deployed_kdu(
6464 nsr_deployed, kdu_name, vnf_index
6465 )
6466 if deployed_kdu is None:
6467 raise LcmException(
6468 "KDU '{}' for vnf '{}' not deployed".format(
6469 kdu_name, vnf_index
6470 )
6471 )
6472 kdu_instance = deployed_kdu.get("kdu-instance")
6473 instance_num = await self.k8scluster_map[
6474 k8s_cluster_type
6475 ].get_scale_count(
6476 resource_name,
6477 kdu_instance,
6478 vca_id=vca_id,
6479 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6480 kdu_model=deployed_kdu.get("kdu-model"),
6481 )
6482 kdu_replica_count = instance_num - kdu_delta.get(
6483 "number-of-instances", 1
6484 )
6485
6486 if kdu_replica_count < min_instance_count < instance_num:
6487 kdu_replica_count = min_instance_count
6488 if kdu_replica_count < min_instance_count:
6489 raise LcmException(
6490 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6491 "scaling-group-descriptor '{}'".format(
6492 instance_num, scaling_group
6493 )
6494 )
6495
6496 for x in range(kdu_delta.get("number-of-instances", 1)):
6497 vca_scaling_info.append(
6498 {
6499 "osm_kdu_id": kdu_name,
6500 "member-vnf-index": vnf_index,
6501 "type": "delete",
6502 "kdu_index": instance_num - x - 1,
6503 }
6504 )
6505 scaling_info["kdu-delete"][kdu_name].append(
6506 {
6507 "member-vnf-index": vnf_index,
6508 "type": "delete",
6509 "k8s-cluster-type": k8s_cluster_type,
6510 "resource-name": resource_name,
6511 "scale": kdu_replica_count,
6512 }
6513 )
6514
6515 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6516 vdu_delete = copy(scaling_info.get("vdu-delete"))
6517 if scaling_info["scaling_direction"] == "IN":
6518 for vdur in reversed(db_vnfr["vdur"]):
6519 if vdu_delete.get(vdur["vdu-id-ref"]):
6520 vdu_delete[vdur["vdu-id-ref"]] -= 1
6521 scaling_info["vdu"].append(
6522 {
6523 "name": vdur.get("name") or vdur.get("vdu-name"),
6524 "vdu_id": vdur["vdu-id-ref"],
6525 "interface": [],
6526 }
6527 )
6528 for interface in vdur["interfaces"]:
6529 scaling_info["vdu"][-1]["interface"].append(
6530 {
6531 "name": interface["name"],
6532 "ip_address": interface["ip-address"],
6533 "mac_address": interface.get("mac-address"),
6534 }
6535 )
6536 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6537
6538 # PRE-SCALE BEGIN
6539 step = "Executing pre-scale vnf-config-primitive"
6540 if scaling_descriptor.get("scaling-config-action"):
6541 for scaling_config_action in scaling_descriptor[
6542 "scaling-config-action"
6543 ]:
6544 if (
6545 scaling_config_action.get("trigger") == "pre-scale-in"
6546 and scaling_type == "SCALE_IN"
6547 ) or (
6548 scaling_config_action.get("trigger") == "pre-scale-out"
6549 and scaling_type == "SCALE_OUT"
6550 ):
6551 vnf_config_primitive = scaling_config_action[
6552 "vnf-config-primitive-name-ref"
6553 ]
6554 step = db_nslcmop_update[
6555 "detailed-status"
6556 ] = "executing pre-scale scaling-config-action '{}'".format(
6557 vnf_config_primitive
6558 )
6559
6560 # look for primitive
6561 for config_primitive in (
6562 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6563 ).get("config-primitive", ()):
6564 if config_primitive["name"] == vnf_config_primitive:
6565 break
6566 else:
6567 raise LcmException(
6568 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6569 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6570 "primitive".format(scaling_group, vnf_config_primitive)
6571 )
6572
6573 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6574 if db_vnfr.get("additionalParamsForVnf"):
6575 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6576
6577 scale_process = "VCA"
6578 db_nsr_update["config-status"] = "configuring pre-scaling"
6579 primitive_params = self._map_primitive_params(
6580 config_primitive, {}, vnfr_params
6581 )
6582
6583 # Pre-scale retry check: Check if this sub-operation has been executed before
6584 op_index = self._check_or_add_scale_suboperation(
6585 db_nslcmop,
6586 vnf_index,
6587 vnf_config_primitive,
6588 primitive_params,
6589 "PRE-SCALE",
6590 )
6591 if op_index == self.SUBOPERATION_STATUS_SKIP:
6592 # Skip sub-operation
6593 result = "COMPLETED"
6594 result_detail = "Done"
6595 self.logger.debug(
6596 logging_text
6597 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6598 vnf_config_primitive, result, result_detail
6599 )
6600 )
6601 else:
6602 if op_index == self.SUBOPERATION_STATUS_NEW:
6603 # New sub-operation: Get index of this sub-operation
6604 op_index = (
6605 len(db_nslcmop.get("_admin", {}).get("operations"))
6606 - 1
6607 )
6608 self.logger.debug(
6609 logging_text
6610 + "vnf_config_primitive={} New sub-operation".format(
6611 vnf_config_primitive
6612 )
6613 )
6614 else:
6615 # retry: Get registered params for this existing sub-operation
6616 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6617 op_index
6618 ]
6619 vnf_index = op.get("member_vnf_index")
6620 vnf_config_primitive = op.get("primitive")
6621 primitive_params = op.get("primitive_params")
6622 self.logger.debug(
6623 logging_text
6624 + "vnf_config_primitive={} Sub-operation retry".format(
6625 vnf_config_primitive
6626 )
6627 )
6628 # Execute the primitive, either with new (first-time) or registered (reintent) args
6629 ee_descriptor_id = config_primitive.get(
6630 "execution-environment-ref"
6631 )
6632 primitive_name = config_primitive.get(
6633 "execution-environment-primitive", vnf_config_primitive
6634 )
6635 ee_id, vca_type = self._look_for_deployed_vca(
6636 nsr_deployed["VCA"],
6637 member_vnf_index=vnf_index,
6638 vdu_id=None,
6639 vdu_count_index=None,
6640 ee_descriptor_id=ee_descriptor_id,
6641 )
6642 result, result_detail = await self._ns_execute_primitive(
6643 ee_id,
6644 primitive_name,
6645 primitive_params,
6646 vca_type=vca_type,
6647 vca_id=vca_id,
6648 )
6649 self.logger.debug(
6650 logging_text
6651 + "vnf_config_primitive={} Done with result {} {}".format(
6652 vnf_config_primitive, result, result_detail
6653 )
6654 )
6655 # Update operationState = COMPLETED | FAILED
6656 self._update_suboperation_status(
6657 db_nslcmop, op_index, result, result_detail
6658 )
6659
6660 if result == "FAILED":
6661 raise LcmException(result_detail)
6662 db_nsr_update["config-status"] = old_config_status
6663 scale_process = None
6664 # PRE-SCALE END
6665
6666 db_nsr_update[
6667 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6668 ] = nb_scale_op
6669 db_nsr_update[
6670 "_admin.scaling-group.{}.time".format(admin_scale_index)
6671 ] = time()
6672
6673 # SCALE-IN VCA - BEGIN
6674 if vca_scaling_info:
6675 step = db_nslcmop_update[
6676 "detailed-status"
6677 ] = "Deleting the execution environments"
6678 scale_process = "VCA"
6679 for vca_info in vca_scaling_info:
6680 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6681 member_vnf_index = str(vca_info["member-vnf-index"])
6682 self.logger.debug(
6683 logging_text + "vdu info: {}".format(vca_info)
6684 )
6685 if vca_info.get("osm_vdu_id"):
6686 vdu_id = vca_info["osm_vdu_id"]
6687 vdu_index = int(vca_info["vdu_index"])
6688 stage[
6689 1
6690 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6691 member_vnf_index, vdu_id, vdu_index
6692 )
6693 stage[2] = step = "Scaling in VCA"
6694 self._write_op_status(op_id=nslcmop_id, stage=stage)
6695 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6696 config_update = db_nsr["configurationStatus"]
6697 for vca_index, vca in enumerate(vca_update):
6698 if (
6699 (vca or vca.get("ee_id"))
6700 and vca["member-vnf-index"] == member_vnf_index
6701 and vca["vdu_count_index"] == vdu_index
6702 ):
6703 if vca.get("vdu_id"):
6704 config_descriptor = get_configuration(
6705 db_vnfd, vca.get("vdu_id")
6706 )
6707 elif vca.get("kdu_name"):
6708 config_descriptor = get_configuration(
6709 db_vnfd, vca.get("kdu_name")
6710 )
6711 else:
6712 config_descriptor = get_configuration(
6713 db_vnfd, db_vnfd["id"]
6714 )
6715 operation_params = (
6716 db_nslcmop.get("operationParams") or {}
6717 )
6718 exec_terminate_primitives = not operation_params.get(
6719 "skip_terminate_primitives"
6720 ) and vca.get("needed_terminate")
6721 task = asyncio.ensure_future(
6722 asyncio.wait_for(
6723 self.destroy_N2VC(
6724 logging_text,
6725 db_nslcmop,
6726 vca,
6727 config_descriptor,
6728 vca_index,
6729 destroy_ee=True,
6730 exec_primitives=exec_terminate_primitives,
6731 scaling_in=True,
6732 vca_id=vca_id,
6733 ),
6734 timeout=self.timeout_charm_delete,
6735 )
6736 )
6737 tasks_dict_info[task] = "Terminating VCA {}".format(
6738 vca.get("ee_id")
6739 )
6740 del vca_update[vca_index]
6741 del config_update[vca_index]
6742 # wait for pending tasks of terminate primitives
6743 if tasks_dict_info:
6744 self.logger.debug(
6745 logging_text
6746 + "Waiting for tasks {}".format(
6747 list(tasks_dict_info.keys())
6748 )
6749 )
6750 error_list = await self._wait_for_tasks(
6751 logging_text,
6752 tasks_dict_info,
6753 min(
6754 self.timeout_charm_delete, self.timeout_ns_terminate
6755 ),
6756 stage,
6757 nslcmop_id,
6758 )
6759 tasks_dict_info.clear()
6760 if error_list:
6761 raise LcmException("; ".join(error_list))
6762
6763 db_vca_and_config_update = {
6764 "_admin.deployed.VCA": vca_update,
6765 "configurationStatus": config_update,
6766 }
6767 self.update_db_2(
6768 "nsrs", db_nsr["_id"], db_vca_and_config_update
6769 )
6770 scale_process = None
6771 # SCALE-IN VCA - END
6772
6773 # SCALE RO - BEGIN
6774 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6775 scale_process = "RO"
6776 if self.ro_config.get("ng"):
6777 await self._scale_ng_ro(
6778 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6779 )
6780 scaling_info.pop("vdu-create", None)
6781 scaling_info.pop("vdu-delete", None)
6782
6783 scale_process = None
6784 # SCALE RO - END
6785
6786 # SCALE KDU - BEGIN
6787 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6788 scale_process = "KDU"
6789 await self._scale_kdu(
6790 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6791 )
6792 scaling_info.pop("kdu-create", None)
6793 scaling_info.pop("kdu-delete", None)
6794
6795 scale_process = None
6796 # SCALE KDU - END
6797
6798 if db_nsr_update:
6799 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6800
6801 # SCALE-UP VCA - BEGIN
6802 if vca_scaling_info:
6803 step = db_nslcmop_update[
6804 "detailed-status"
6805 ] = "Creating new execution environments"
6806 scale_process = "VCA"
6807 for vca_info in vca_scaling_info:
6808 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6809 member_vnf_index = str(vca_info["member-vnf-index"])
6810 self.logger.debug(
6811 logging_text + "vdu info: {}".format(vca_info)
6812 )
6813 vnfd_id = db_vnfr["vnfd-ref"]
6814 if vca_info.get("osm_vdu_id"):
6815 vdu_index = int(vca_info["vdu_index"])
6816 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6817 if db_vnfr.get("additionalParamsForVnf"):
6818 deploy_params.update(
6819 parse_yaml_strings(
6820 db_vnfr["additionalParamsForVnf"].copy()
6821 )
6822 )
6823 descriptor_config = get_configuration(
6824 db_vnfd, db_vnfd["id"]
6825 )
6826 if descriptor_config:
6827 vdu_id = None
6828 vdu_name = None
6829 kdu_name = None
6830 self._deploy_n2vc(
6831 logging_text=logging_text
6832 + "member_vnf_index={} ".format(member_vnf_index),
6833 db_nsr=db_nsr,
6834 db_vnfr=db_vnfr,
6835 nslcmop_id=nslcmop_id,
6836 nsr_id=nsr_id,
6837 nsi_id=nsi_id,
6838 vnfd_id=vnfd_id,
6839 vdu_id=vdu_id,
6840 kdu_name=kdu_name,
6841 member_vnf_index=member_vnf_index,
6842 vdu_index=vdu_index,
6843 vdu_name=vdu_name,
6844 deploy_params=deploy_params,
6845 descriptor_config=descriptor_config,
6846 base_folder=base_folder,
6847 task_instantiation_info=tasks_dict_info,
6848 stage=stage,
6849 )
6850 vdu_id = vca_info["osm_vdu_id"]
6851 vdur = find_in_list(
6852 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6853 )
6854 descriptor_config = get_configuration(db_vnfd, vdu_id)
6855 if vdur.get("additionalParams"):
6856 deploy_params_vdu = parse_yaml_strings(
6857 vdur["additionalParams"]
6858 )
6859 else:
6860 deploy_params_vdu = deploy_params
6861 deploy_params_vdu["OSM"] = get_osm_params(
6862 db_vnfr, vdu_id, vdu_count_index=vdu_index
6863 )
6864 if descriptor_config:
6865 vdu_name = None
6866 kdu_name = None
6867 stage[
6868 1
6869 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6870 member_vnf_index, vdu_id, vdu_index
6871 )
6872 stage[2] = step = "Scaling out VCA"
6873 self._write_op_status(op_id=nslcmop_id, stage=stage)
6874 self._deploy_n2vc(
6875 logging_text=logging_text
6876 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6877 member_vnf_index, vdu_id, vdu_index
6878 ),
6879 db_nsr=db_nsr,
6880 db_vnfr=db_vnfr,
6881 nslcmop_id=nslcmop_id,
6882 nsr_id=nsr_id,
6883 nsi_id=nsi_id,
6884 vnfd_id=vnfd_id,
6885 vdu_id=vdu_id,
6886 kdu_name=kdu_name,
6887 member_vnf_index=member_vnf_index,
6888 vdu_index=vdu_index,
6889 vdu_name=vdu_name,
6890 deploy_params=deploy_params_vdu,
6891 descriptor_config=descriptor_config,
6892 base_folder=base_folder,
6893 task_instantiation_info=tasks_dict_info,
6894 stage=stage,
6895 )
6896 # SCALE-UP VCA - END
6897 scale_process = None
6898
6899 # POST-SCALE BEGIN
6900 # execute primitive service POST-SCALING
6901 step = "Executing post-scale vnf-config-primitive"
6902 if scaling_descriptor.get("scaling-config-action"):
6903 for scaling_config_action in scaling_descriptor[
6904 "scaling-config-action"
6905 ]:
6906 if (
6907 scaling_config_action.get("trigger") == "post-scale-in"
6908 and scaling_type == "SCALE_IN"
6909 ) or (
6910 scaling_config_action.get("trigger") == "post-scale-out"
6911 and scaling_type == "SCALE_OUT"
6912 ):
6913 vnf_config_primitive = scaling_config_action[
6914 "vnf-config-primitive-name-ref"
6915 ]
6916 step = db_nslcmop_update[
6917 "detailed-status"
6918 ] = "executing post-scale scaling-config-action '{}'".format(
6919 vnf_config_primitive
6920 )
6921
6922 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6923 if db_vnfr.get("additionalParamsForVnf"):
6924 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6925
6926 # look for primitive
6927 for config_primitive in (
6928 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6929 ).get("config-primitive", ()):
6930 if config_primitive["name"] == vnf_config_primitive:
6931 break
6932 else:
6933 raise LcmException(
6934 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6935 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6936 "config-primitive".format(
6937 scaling_group, vnf_config_primitive
6938 )
6939 )
6940 scale_process = "VCA"
6941 db_nsr_update["config-status"] = "configuring post-scaling"
6942 primitive_params = self._map_primitive_params(
6943 config_primitive, {}, vnfr_params
6944 )
6945
6946 # Post-scale retry check: Check if this sub-operation has been executed before
6947 op_index = self._check_or_add_scale_suboperation(
6948 db_nslcmop,
6949 vnf_index,
6950 vnf_config_primitive,
6951 primitive_params,
6952 "POST-SCALE",
6953 )
6954 if op_index == self.SUBOPERATION_STATUS_SKIP:
6955 # Skip sub-operation
6956 result = "COMPLETED"
6957 result_detail = "Done"
6958 self.logger.debug(
6959 logging_text
6960 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6961 vnf_config_primitive, result, result_detail
6962 )
6963 )
6964 else:
6965 if op_index == self.SUBOPERATION_STATUS_NEW:
6966 # New sub-operation: Get index of this sub-operation
6967 op_index = (
6968 len(db_nslcmop.get("_admin", {}).get("operations"))
6969 - 1
6970 )
6971 self.logger.debug(
6972 logging_text
6973 + "vnf_config_primitive={} New sub-operation".format(
6974 vnf_config_primitive
6975 )
6976 )
6977 else:
6978 # retry: Get registered params for this existing sub-operation
6979 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6980 op_index
6981 ]
6982 vnf_index = op.get("member_vnf_index")
6983 vnf_config_primitive = op.get("primitive")
6984 primitive_params = op.get("primitive_params")
6985 self.logger.debug(
6986 logging_text
6987 + "vnf_config_primitive={} Sub-operation retry".format(
6988 vnf_config_primitive
6989 )
6990 )
6991 # Execute the primitive, either with new (first-time) or registered (reintent) args
6992 ee_descriptor_id = config_primitive.get(
6993 "execution-environment-ref"
6994 )
6995 primitive_name = config_primitive.get(
6996 "execution-environment-primitive", vnf_config_primitive
6997 )
6998 ee_id, vca_type = self._look_for_deployed_vca(
6999 nsr_deployed["VCA"],
7000 member_vnf_index=vnf_index,
7001 vdu_id=None,
7002 vdu_count_index=None,
7003 ee_descriptor_id=ee_descriptor_id,
7004 )
7005 result, result_detail = await self._ns_execute_primitive(
7006 ee_id,
7007 primitive_name,
7008 primitive_params,
7009 vca_type=vca_type,
7010 vca_id=vca_id,
7011 )
7012 self.logger.debug(
7013 logging_text
7014 + "vnf_config_primitive={} Done with result {} {}".format(
7015 vnf_config_primitive, result, result_detail
7016 )
7017 )
7018 # Update operationState = COMPLETED | FAILED
7019 self._update_suboperation_status(
7020 db_nslcmop, op_index, result, result_detail
7021 )
7022
7023 if result == "FAILED":
7024 raise LcmException(result_detail)
7025 db_nsr_update["config-status"] = old_config_status
7026 scale_process = None
7027 # POST-SCALE END
7028
7029 db_nsr_update[
7030 "detailed-status"
7031 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7032 db_nsr_update["operational-status"] = (
7033 "running"
7034 if old_operational_status == "failed"
7035 else old_operational_status
7036 )
7037 db_nsr_update["config-status"] = old_config_status
7038 return
7039 except (
7040 ROclient.ROClientException,
7041 DbException,
7042 LcmException,
7043 NgRoException,
7044 ) as e:
7045 self.logger.error(logging_text + "Exit Exception {}".format(e))
7046 exc = e
7047 except asyncio.CancelledError:
7048 self.logger.error(
7049 logging_text + "Cancelled Exception while '{}'".format(step)
7050 )
7051 exc = "Operation was cancelled"
7052 except Exception as e:
7053 exc = traceback.format_exc()
7054 self.logger.critical(
7055 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7056 exc_info=True,
7057 )
7058 finally:
7059 self._write_ns_status(
7060 nsr_id=nsr_id,
7061 ns_state=None,
7062 current_operation="IDLE",
7063 current_operation_id=None,
7064 )
7065 if tasks_dict_info:
7066 stage[1] = "Waiting for instantiate pending tasks."
7067 self.logger.debug(logging_text + stage[1])
7068 exc = await self._wait_for_tasks(
7069 logging_text,
7070 tasks_dict_info,
7071 self.timeout_ns_deploy,
7072 stage,
7073 nslcmop_id,
7074 nsr_id=nsr_id,
7075 )
7076 if exc:
7077 db_nslcmop_update[
7078 "detailed-status"
7079 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7080 nslcmop_operation_state = "FAILED"
7081 if db_nsr:
7082 db_nsr_update["operational-status"] = old_operational_status
7083 db_nsr_update["config-status"] = old_config_status
7084 db_nsr_update["detailed-status"] = ""
7085 if scale_process:
7086 if "VCA" in scale_process:
7087 db_nsr_update["config-status"] = "failed"
7088 if "RO" in scale_process:
7089 db_nsr_update["operational-status"] = "failed"
7090 db_nsr_update[
7091 "detailed-status"
7092 ] = "FAILED scaling nslcmop={} {}: {}".format(
7093 nslcmop_id, step, exc
7094 )
7095 else:
7096 error_description_nslcmop = None
7097 nslcmop_operation_state = "COMPLETED"
7098 db_nslcmop_update["detailed-status"] = "Done"
7099
7100 self._write_op_status(
7101 op_id=nslcmop_id,
7102 stage="",
7103 error_message=error_description_nslcmop,
7104 operation_state=nslcmop_operation_state,
7105 other_update=db_nslcmop_update,
7106 )
7107 if db_nsr:
7108 self._write_ns_status(
7109 nsr_id=nsr_id,
7110 ns_state=None,
7111 current_operation="IDLE",
7112 current_operation_id=None,
7113 other_update=db_nsr_update,
7114 )
7115
7116 if nslcmop_operation_state:
7117 try:
7118 msg = {
7119 "nsr_id": nsr_id,
7120 "nslcmop_id": nslcmop_id,
7121 "operationState": nslcmop_operation_state,
7122 }
7123 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7124 except Exception as e:
7125 self.logger.error(
7126 logging_text + "kafka_write notification Exception {}".format(e)
7127 )
7128 self.logger.debug(logging_text + "Exit")
7129 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7130
7131 async def _scale_kdu(
7132 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7133 ):
7134 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7135 for kdu_name in _scaling_info:
7136 for kdu_scaling_info in _scaling_info[kdu_name]:
7137 deployed_kdu, index = get_deployed_kdu(
7138 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7139 )
7140 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7141 kdu_instance = deployed_kdu["kdu-instance"]
7142 kdu_model = deployed_kdu.get("kdu-model")
7143 scale = int(kdu_scaling_info["scale"])
7144 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7145
7146 db_dict = {
7147 "collection": "nsrs",
7148 "filter": {"_id": nsr_id},
7149 "path": "_admin.deployed.K8s.{}".format(index),
7150 }
7151
7152 step = "scaling application {}".format(
7153 kdu_scaling_info["resource-name"]
7154 )
7155 self.logger.debug(logging_text + step)
7156
7157 if kdu_scaling_info["type"] == "delete":
7158 kdu_config = get_configuration(db_vnfd, kdu_name)
7159 if (
7160 kdu_config
7161 and kdu_config.get("terminate-config-primitive")
7162 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7163 ):
7164 terminate_config_primitive_list = kdu_config.get(
7165 "terminate-config-primitive"
7166 )
7167 terminate_config_primitive_list.sort(
7168 key=lambda val: int(val["seq"])
7169 )
7170
7171 for (
7172 terminate_config_primitive
7173 ) in terminate_config_primitive_list:
7174 primitive_params_ = self._map_primitive_params(
7175 terminate_config_primitive, {}, {}
7176 )
7177 step = "execute terminate config primitive"
7178 self.logger.debug(logging_text + step)
7179 await asyncio.wait_for(
7180 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7181 cluster_uuid=cluster_uuid,
7182 kdu_instance=kdu_instance,
7183 primitive_name=terminate_config_primitive["name"],
7184 params=primitive_params_,
7185 db_dict=db_dict,
7186 vca_id=vca_id,
7187 ),
7188 timeout=600,
7189 )
7190
7191 await asyncio.wait_for(
7192 self.k8scluster_map[k8s_cluster_type].scale(
7193 kdu_instance,
7194 scale,
7195 kdu_scaling_info["resource-name"],
7196 vca_id=vca_id,
7197 cluster_uuid=cluster_uuid,
7198 kdu_model=kdu_model,
7199 atomic=True,
7200 db_dict=db_dict,
7201 ),
7202 timeout=self.timeout_vca_on_error,
7203 )
7204
7205 if kdu_scaling_info["type"] == "create":
7206 kdu_config = get_configuration(db_vnfd, kdu_name)
7207 if (
7208 kdu_config
7209 and kdu_config.get("initial-config-primitive")
7210 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7211 ):
7212 initial_config_primitive_list = kdu_config.get(
7213 "initial-config-primitive"
7214 )
7215 initial_config_primitive_list.sort(
7216 key=lambda val: int(val["seq"])
7217 )
7218
7219 for initial_config_primitive in initial_config_primitive_list:
7220 primitive_params_ = self._map_primitive_params(
7221 initial_config_primitive, {}, {}
7222 )
7223 step = "execute initial config primitive"
7224 self.logger.debug(logging_text + step)
7225 await asyncio.wait_for(
7226 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7227 cluster_uuid=cluster_uuid,
7228 kdu_instance=kdu_instance,
7229 primitive_name=initial_config_primitive["name"],
7230 params=primitive_params_,
7231 db_dict=db_dict,
7232 vca_id=vca_id,
7233 ),
7234 timeout=600,
7235 )
7236
7237 async def _scale_ng_ro(
7238 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7239 ):
7240 nsr_id = db_nslcmop["nsInstanceId"]
7241 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7242 db_vnfrs = {}
7243
7244 # read from db: vnfd's for every vnf
7245 db_vnfds = []
7246
7247 # for each vnf in ns, read vnfd
7248 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7249 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7250 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7251 # if we haven't this vnfd, read it from db
7252 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7253 # read from db
7254 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7255 db_vnfds.append(vnfd)
7256 n2vc_key = self.n2vc.get_public_key()
7257 n2vc_key_list = [n2vc_key]
7258 self.scale_vnfr(
7259 db_vnfr,
7260 vdu_scaling_info.get("vdu-create"),
7261 vdu_scaling_info.get("vdu-delete"),
7262 mark_delete=True,
7263 )
7264 # db_vnfr has been updated, update db_vnfrs to use it
7265 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7266 await self._instantiate_ng_ro(
7267 logging_text,
7268 nsr_id,
7269 db_nsd,
7270 db_nsr,
7271 db_nslcmop,
7272 db_vnfrs,
7273 db_vnfds,
7274 n2vc_key_list,
7275 stage=stage,
7276 start_deploy=time(),
7277 timeout_ns_deploy=self.timeout_ns_deploy,
7278 )
7279 if vdu_scaling_info.get("vdu-delete"):
7280 self.scale_vnfr(
7281 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7282 )
7283
7284 async def extract_prometheus_scrape_jobs(
7285 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7286 ):
7287 # look if exist a file called 'prometheus*.j2' and
7288 artifact_content = self.fs.dir_ls(artifact_path)
7289 job_file = next(
7290 (
7291 f
7292 for f in artifact_content
7293 if f.startswith("prometheus") and f.endswith(".j2")
7294 ),
7295 None,
7296 )
7297 if not job_file:
7298 return
7299 with self.fs.file_open((artifact_path, job_file), "r") as f:
7300 job_data = f.read()
7301
7302 # TODO get_service
7303 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7304 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7305 host_port = "80"
7306 vnfr_id = vnfr_id.replace("-", "")
7307 variables = {
7308 "JOB_NAME": vnfr_id,
7309 "TARGET_IP": target_ip,
7310 "EXPORTER_POD_IP": host_name,
7311 "EXPORTER_POD_PORT": host_port,
7312 }
7313 job_list = parse_job(job_data, variables)
7314 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7315 for job in job_list:
7316 if (
7317 not isinstance(job.get("job_name"), str)
7318 or vnfr_id not in job["job_name"]
7319 ):
7320 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7321 job["nsr_id"] = nsr_id
7322 job["vnfr_id"] = vnfr_id
7323 return job_list
7324
7325 async def rebuild_start_stop(
7326 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7327 ):
7328 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7329 self.logger.info(logging_text + "Enter")
7330 stage = ["Preparing the environment", ""]
7331 # database nsrs record
7332 db_nsr_update = {}
7333 vdu_vim_name = None
7334 vim_vm_id = None
7335 # in case of error, indicates what part of scale was failed to put nsr at error status
7336 start_deploy = time()
7337 try:
7338 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7339 vim_account_id = db_vnfr.get("vim-account-id")
7340 vim_info_key = "vim:" + vim_account_id
7341 vdu_id = additional_param["vdu_id"]
7342 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7343 vdur = find_in_list(
7344 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7345 )
7346 if vdur:
7347 vdu_vim_name = vdur["name"]
7348 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7349 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7350 else:
7351 raise LcmException("Target vdu is not found")
7352 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7353 # wait for any previous tasks in process
7354 stage[1] = "Waiting for previous operations to terminate"
7355 self.logger.info(stage[1])
7356 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7357
7358 stage[1] = "Reading from database."
7359 self.logger.info(stage[1])
7360 self._write_ns_status(
7361 nsr_id=nsr_id,
7362 ns_state=None,
7363 current_operation=operation_type.upper(),
7364 current_operation_id=nslcmop_id,
7365 )
7366 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7367
7368 # read from db: ns
7369 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7370 db_nsr_update["operational-status"] = operation_type
7371 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7372 # Payload for RO
7373 desc = {
7374 operation_type: {
7375 "vim_vm_id": vim_vm_id,
7376 "vnf_id": vnf_id,
7377 "vdu_index": additional_param["count-index"],
7378 "vdu_id": vdur["id"],
7379 "target_vim": target_vim,
7380 "vim_account_id": vim_account_id,
7381 }
7382 }
7383 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7384 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7385 self.logger.info("ro nsr id: {}".format(nsr_id))
7386 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7387 self.logger.info("response from RO: {}".format(result_dict))
7388 action_id = result_dict["action_id"]
7389 await self._wait_ng_ro(
7390 nsr_id,
7391 action_id,
7392 nslcmop_id,
7393 start_deploy,
7394 self.timeout_operate,
7395 None,
7396 "start_stop_rebuild",
7397 )
7398 return "COMPLETED", "Done"
7399 except (ROclient.ROClientException, DbException, LcmException) as e:
7400 self.logger.error("Exit Exception {}".format(e))
7401 exc = e
7402 except asyncio.CancelledError:
7403 self.logger.error("Cancelled Exception while '{}'".format(stage))
7404 exc = "Operation was cancelled"
7405 except Exception as e:
7406 exc = traceback.format_exc()
7407 self.logger.critical(
7408 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7409 )
7410 return "FAILED", "Error in operate VNF {}".format(exc)
7411
7412 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7413 """
7414 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7415
7416 :param: vim_account_id: VIM Account ID
7417
7418 :return: (cloud_name, cloud_credential)
7419 """
7420 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7421 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7422
7423 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7424 """
7425 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7426
7427 :param: vim_account_id: VIM Account ID
7428
7429 :return: (cloud_name, cloud_credential)
7430 """
7431 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7432 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7433
7434 async def migrate(self, nsr_id, nslcmop_id):
7435 """
7436 Migrate VNFs and VDUs instances in a NS
7437
7438 :param: nsr_id: NS Instance ID
7439 :param: nslcmop_id: nslcmop ID of migrate
7440
7441 """
7442 # Try to lock HA task here
7443 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7444 if not task_is_locked_by_me:
7445 return
7446 logging_text = "Task ns={} migrate ".format(nsr_id)
7447 self.logger.debug(logging_text + "Enter")
7448 # get all needed from database
7449 db_nslcmop = None
7450 db_nslcmop_update = {}
7451 nslcmop_operation_state = None
7452 db_nsr_update = {}
7453 target = {}
7454 exc = None
7455 # in case of error, indicates what part of scale was failed to put nsr at error status
7456 start_deploy = time()
7457
7458 try:
7459 # wait for any previous tasks in process
7460 step = "Waiting for previous operations to terminate"
7461 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7462
7463 self._write_ns_status(
7464 nsr_id=nsr_id,
7465 ns_state=None,
7466 current_operation="MIGRATING",
7467 current_operation_id=nslcmop_id,
7468 )
7469 step = "Getting nslcmop from database"
7470 self.logger.debug(
7471 step + " after having waited for previous tasks to be completed"
7472 )
7473 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7474 migrate_params = db_nslcmop.get("operationParams")
7475
7476 target = {}
7477 target.update(migrate_params)
7478 desc = await self.RO.migrate(nsr_id, target)
7479 self.logger.debug("RO return > {}".format(desc))
7480 action_id = desc["action_id"]
7481 await self._wait_ng_ro(
7482 nsr_id,
7483 action_id,
7484 nslcmop_id,
7485 start_deploy,
7486 self.timeout_migrate,
7487 operation="migrate",
7488 )
7489 except (ROclient.ROClientException, DbException, LcmException) as e:
7490 self.logger.error("Exit Exception {}".format(e))
7491 exc = e
7492 except asyncio.CancelledError:
7493 self.logger.error("Cancelled Exception while '{}'".format(step))
7494 exc = "Operation was cancelled"
7495 except Exception as e:
7496 exc = traceback.format_exc()
7497 self.logger.critical(
7498 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7499 )
7500 finally:
7501 self._write_ns_status(
7502 nsr_id=nsr_id,
7503 ns_state=None,
7504 current_operation="IDLE",
7505 current_operation_id=None,
7506 )
7507 if exc:
7508 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7509 nslcmop_operation_state = "FAILED"
7510 else:
7511 nslcmop_operation_state = "COMPLETED"
7512 db_nslcmop_update["detailed-status"] = "Done"
7513 db_nsr_update["detailed-status"] = "Done"
7514
7515 self._write_op_status(
7516 op_id=nslcmop_id,
7517 stage="",
7518 error_message="",
7519 operation_state=nslcmop_operation_state,
7520 other_update=db_nslcmop_update,
7521 )
7522 if nslcmop_operation_state:
7523 try:
7524 msg = {
7525 "nsr_id": nsr_id,
7526 "nslcmop_id": nslcmop_id,
7527 "operationState": nslcmop_operation_state,
7528 }
7529 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7530 except Exception as e:
7531 self.logger.error(
7532 logging_text + "kafka_write notification Exception {}".format(e)
7533 )
7534 self.logger.debug(logging_text + "Exit")
7535 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7536
7537 async def heal(self, nsr_id, nslcmop_id):
7538 """
7539 Heal NS
7540
7541 :param nsr_id: ns instance to heal
7542 :param nslcmop_id: operation to run
7543 :return:
7544 """
7545
7546 # Try to lock HA task here
7547 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7548 if not task_is_locked_by_me:
7549 return
7550
7551 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7552 stage = ["", "", ""]
7553 tasks_dict_info = {}
7554 # ^ stage, step, VIM progress
7555 self.logger.debug(logging_text + "Enter")
7556 # get all needed from database
7557 db_nsr = None
7558 db_nslcmop_update = {}
7559 db_nsr_update = {}
7560 db_vnfrs = {} # vnf's info indexed by _id
7561 exc = None
7562 old_operational_status = ""
7563 old_config_status = ""
7564 nsi_id = None
7565 try:
7566 # wait for any previous tasks in process
7567 step = "Waiting for previous operations to terminate"
7568 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7569 self._write_ns_status(
7570 nsr_id=nsr_id,
7571 ns_state=None,
7572 current_operation="HEALING",
7573 current_operation_id=nslcmop_id,
7574 )
7575
7576 step = "Getting nslcmop from database"
7577 self.logger.debug(
7578 step + " after having waited for previous tasks to be completed"
7579 )
7580 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7581
7582 step = "Getting nsr from database"
7583 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7584 old_operational_status = db_nsr["operational-status"]
7585 old_config_status = db_nsr["config-status"]
7586
7587 db_nsr_update = {
7588 "_admin.deployed.RO.operational-status": "healing",
7589 }
7590 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7591
7592 step = "Sending heal order to VIM"
7593 task_ro = asyncio.ensure_future(
7594 self.heal_RO(
7595 logging_text=logging_text,
7596 nsr_id=nsr_id,
7597 db_nslcmop=db_nslcmop,
7598 stage=stage,
7599 )
7600 )
7601 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7602 tasks_dict_info[task_ro] = "Healing at VIM"
7603
7604 # VCA tasks
7605 # read from db: nsd
7606 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7607 self.logger.debug(logging_text + stage[1])
7608 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7609 self.fs.sync(db_nsr["nsd-id"])
7610 db_nsr["nsd"] = nsd
7611 # read from db: vnfr's of this ns
7612 step = "Getting vnfrs from db"
7613 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7614 for vnfr in db_vnfrs_list:
7615 db_vnfrs[vnfr["_id"]] = vnfr
7616 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7617
7618 # Check for each target VNF
7619 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7620 for target_vnf in target_list:
7621 # Find this VNF in the list from DB
7622 vnfr_id = target_vnf.get("vnfInstanceId", None)
7623 if vnfr_id:
7624 db_vnfr = db_vnfrs[vnfr_id]
7625 vnfd_id = db_vnfr.get("vnfd-id")
7626 vnfd_ref = db_vnfr.get("vnfd-ref")
7627 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7628 base_folder = vnfd["_admin"]["storage"]
7629 vdu_id = None
7630 vdu_index = 0
7631 vdu_name = None
7632 kdu_name = None
7633 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7634 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7635
7636 # Check each target VDU and deploy N2VC
7637 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7638 "vdu", []
7639 )
7640 if not target_vdu_list:
7641 # Codigo nuevo para crear diccionario
7642 target_vdu_list = []
7643 for existing_vdu in db_vnfr.get("vdur"):
7644 vdu_name = existing_vdu.get("vdu-name", None)
7645 vdu_index = existing_vdu.get("count-index", 0)
7646 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7647 "run-day1", False
7648 )
7649 vdu_to_be_healed = {
7650 "vdu-id": vdu_name,
7651 "count-index": vdu_index,
7652 "run-day1": vdu_run_day1,
7653 }
7654 target_vdu_list.append(vdu_to_be_healed)
7655 for target_vdu in target_vdu_list:
7656 deploy_params_vdu = target_vdu
7657 # Set run-day1 vnf level value if not vdu level value exists
7658 if not deploy_params_vdu.get("run-day1") and target_vnf[
7659 "additionalParams"
7660 ].get("run-day1"):
7661 deploy_params_vdu["run-day1"] = target_vnf[
7662 "additionalParams"
7663 ].get("run-day1")
7664 vdu_name = target_vdu.get("vdu-id", None)
7665 # TODO: Get vdu_id from vdud.
7666 vdu_id = vdu_name
7667 # For multi instance VDU count-index is mandatory
7668 # For single session VDU count-indes is 0
7669 vdu_index = target_vdu.get("count-index", 0)
7670
7671 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7672 stage[1] = "Deploying Execution Environments."
7673 self.logger.debug(logging_text + stage[1])
7674
7675 # VNF Level charm. Normal case when proxy charms.
7676 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7677 descriptor_config = get_configuration(vnfd, vnfd_ref)
7678 if descriptor_config:
7679 # Continue if healed machine is management machine
7680 vnf_ip_address = db_vnfr.get("ip-address")
7681 target_instance = None
7682 for instance in db_vnfr.get("vdur", None):
7683 if (
7684 instance["vdu-name"] == vdu_name
7685 and instance["count-index"] == vdu_index
7686 ):
7687 target_instance = instance
7688 break
7689 if vnf_ip_address == target_instance.get("ip-address"):
7690 self._heal_n2vc(
7691 logging_text=logging_text
7692 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7693 member_vnf_index, vdu_name, vdu_index
7694 ),
7695 db_nsr=db_nsr,
7696 db_vnfr=db_vnfr,
7697 nslcmop_id=nslcmop_id,
7698 nsr_id=nsr_id,
7699 nsi_id=nsi_id,
7700 vnfd_id=vnfd_ref,
7701 vdu_id=None,
7702 kdu_name=None,
7703 member_vnf_index=member_vnf_index,
7704 vdu_index=0,
7705 vdu_name=None,
7706 deploy_params=deploy_params_vdu,
7707 descriptor_config=descriptor_config,
7708 base_folder=base_folder,
7709 task_instantiation_info=tasks_dict_info,
7710 stage=stage,
7711 )
7712
7713 # VDU Level charm. Normal case with native charms.
7714 descriptor_config = get_configuration(vnfd, vdu_name)
7715 if descriptor_config:
7716 self._heal_n2vc(
7717 logging_text=logging_text
7718 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7719 member_vnf_index, vdu_name, vdu_index
7720 ),
7721 db_nsr=db_nsr,
7722 db_vnfr=db_vnfr,
7723 nslcmop_id=nslcmop_id,
7724 nsr_id=nsr_id,
7725 nsi_id=nsi_id,
7726 vnfd_id=vnfd_ref,
7727 vdu_id=vdu_id,
7728 kdu_name=kdu_name,
7729 member_vnf_index=member_vnf_index,
7730 vdu_index=vdu_index,
7731 vdu_name=vdu_name,
7732 deploy_params=deploy_params_vdu,
7733 descriptor_config=descriptor_config,
7734 base_folder=base_folder,
7735 task_instantiation_info=tasks_dict_info,
7736 stage=stage,
7737 )
7738
7739 except (
7740 ROclient.ROClientException,
7741 DbException,
7742 LcmException,
7743 NgRoException,
7744 ) as e:
7745 self.logger.error(logging_text + "Exit Exception {}".format(e))
7746 exc = e
7747 except asyncio.CancelledError:
7748 self.logger.error(
7749 logging_text + "Cancelled Exception while '{}'".format(step)
7750 )
7751 exc = "Operation was cancelled"
7752 except Exception as e:
7753 exc = traceback.format_exc()
7754 self.logger.critical(
7755 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7756 exc_info=True,
7757 )
7758 finally:
7759 if tasks_dict_info:
7760 stage[1] = "Waiting for healing pending tasks."
7761 self.logger.debug(logging_text + stage[1])
7762 exc = await self._wait_for_tasks(
7763 logging_text,
7764 tasks_dict_info,
7765 self.timeout_ns_deploy,
7766 stage,
7767 nslcmop_id,
7768 nsr_id=nsr_id,
7769 )
7770 if exc:
7771 db_nslcmop_update[
7772 "detailed-status"
7773 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7774 nslcmop_operation_state = "FAILED"
7775 if db_nsr:
7776 db_nsr_update["operational-status"] = old_operational_status
7777 db_nsr_update["config-status"] = old_config_status
7778 db_nsr_update[
7779 "detailed-status"
7780 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7781 for task, task_name in tasks_dict_info.items():
7782 if not task.done() or task.cancelled() or task.exception():
7783 if task_name.startswith(self.task_name_deploy_vca):
7784 # A N2VC task is pending
7785 db_nsr_update["config-status"] = "failed"
7786 else:
7787 # RO task is pending
7788 db_nsr_update["operational-status"] = "failed"
7789 else:
7790 error_description_nslcmop = None
7791 nslcmop_operation_state = "COMPLETED"
7792 db_nslcmop_update["detailed-status"] = "Done"
7793 db_nsr_update["detailed-status"] = "Done"
7794 db_nsr_update["operational-status"] = "running"
7795 db_nsr_update["config-status"] = "configured"
7796
7797 self._write_op_status(
7798 op_id=nslcmop_id,
7799 stage="",
7800 error_message=error_description_nslcmop,
7801 operation_state=nslcmop_operation_state,
7802 other_update=db_nslcmop_update,
7803 )
7804 if db_nsr:
7805 self._write_ns_status(
7806 nsr_id=nsr_id,
7807 ns_state=None,
7808 current_operation="IDLE",
7809 current_operation_id=None,
7810 other_update=db_nsr_update,
7811 )
7812
7813 if nslcmop_operation_state:
7814 try:
7815 msg = {
7816 "nsr_id": nsr_id,
7817 "nslcmop_id": nslcmop_id,
7818 "operationState": nslcmop_operation_state,
7819 }
7820 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7821 except Exception as e:
7822 self.logger.error(
7823 logging_text + "kafka_write notification Exception {}".format(e)
7824 )
7825 self.logger.debug(logging_text + "Exit")
7826 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7827
7828 async def heal_RO(
7829 self,
7830 logging_text,
7831 nsr_id,
7832 db_nslcmop,
7833 stage,
7834 ):
7835 """
7836 Heal at RO
7837 :param logging_text: preffix text to use at logging
7838 :param nsr_id: nsr identity
7839 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7840 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7841 :return: None or exception
7842 """
7843
7844 def get_vim_account(vim_account_id):
7845 nonlocal db_vims
7846 if vim_account_id in db_vims:
7847 return db_vims[vim_account_id]
7848 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7849 db_vims[vim_account_id] = db_vim
7850 return db_vim
7851
7852 try:
7853 start_heal = time()
7854 ns_params = db_nslcmop.get("operationParams")
7855 if ns_params and ns_params.get("timeout_ns_heal"):
7856 timeout_ns_heal = ns_params["timeout_ns_heal"]
7857 else:
7858 timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
7859
7860 db_vims = {}
7861
7862 nslcmop_id = db_nslcmop["_id"]
7863 target = {
7864 "action_id": nslcmop_id,
7865 }
7866 self.logger.warning(
7867 "db_nslcmop={} and timeout_ns_heal={}".format(
7868 db_nslcmop, timeout_ns_heal
7869 )
7870 )
7871 target.update(db_nslcmop.get("operationParams", {}))
7872
7873 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7874 desc = await self.RO.recreate(nsr_id, target)
7875 self.logger.debug("RO return > {}".format(desc))
7876 action_id = desc["action_id"]
7877 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7878 await self._wait_ng_ro(
7879 nsr_id,
7880 action_id,
7881 nslcmop_id,
7882 start_heal,
7883 timeout_ns_heal,
7884 stage,
7885 operation="healing",
7886 )
7887
7888 # Updating NSR
7889 db_nsr_update = {
7890 "_admin.deployed.RO.operational-status": "running",
7891 "detailed-status": " ".join(stage),
7892 }
7893 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7894 self._write_op_status(nslcmop_id, stage)
7895 self.logger.debug(
7896 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7897 )
7898
7899 except Exception as e:
7900 stage[2] = "ERROR healing at VIM"
7901 # self.set_vnfr_at_error(db_vnfrs, str(e))
7902 self.logger.error(
7903 "Error healing at VIM {}".format(e),
7904 exc_info=not isinstance(
7905 e,
7906 (
7907 ROclient.ROClientException,
7908 LcmException,
7909 DbException,
7910 NgRoException,
7911 ),
7912 ),
7913 )
7914 raise
7915
7916 def _heal_n2vc(
7917 self,
7918 logging_text,
7919 db_nsr,
7920 db_vnfr,
7921 nslcmop_id,
7922 nsr_id,
7923 nsi_id,
7924 vnfd_id,
7925 vdu_id,
7926 kdu_name,
7927 member_vnf_index,
7928 vdu_index,
7929 vdu_name,
7930 deploy_params,
7931 descriptor_config,
7932 base_folder,
7933 task_instantiation_info,
7934 stage,
7935 ):
7936 # launch instantiate_N2VC in a asyncio task and register task object
7937 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7938 # if not found, create one entry and update database
7939 # fill db_nsr._admin.deployed.VCA.<index>
7940
7941 self.logger.debug(
7942 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7943 )
7944
7945 charm_name = ""
7946 get_charm_name = False
7947 if "execution-environment-list" in descriptor_config:
7948 ee_list = descriptor_config.get("execution-environment-list", [])
7949 elif "juju" in descriptor_config:
7950 ee_list = [descriptor_config] # ns charms
7951 if "execution-environment-list" not in descriptor_config:
7952 # charm name is only required for ns charms
7953 get_charm_name = True
7954 else: # other types as script are not supported
7955 ee_list = []
7956
7957 for ee_item in ee_list:
7958 self.logger.debug(
7959 logging_text
7960 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7961 ee_item.get("juju"), ee_item.get("helm-chart")
7962 )
7963 )
7964 ee_descriptor_id = ee_item.get("id")
7965 if ee_item.get("juju"):
7966 vca_name = ee_item["juju"].get("charm")
7967 if get_charm_name:
7968 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7969 vca_type = (
7970 "lxc_proxy_charm"
7971 if ee_item["juju"].get("charm") is not None
7972 else "native_charm"
7973 )
7974 if ee_item["juju"].get("cloud") == "k8s":
7975 vca_type = "k8s_proxy_charm"
7976 elif ee_item["juju"].get("proxy") is False:
7977 vca_type = "native_charm"
7978 elif ee_item.get("helm-chart"):
7979 vca_name = ee_item["helm-chart"]
7980 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7981 vca_type = "helm"
7982 else:
7983 vca_type = "helm-v3"
7984 else:
7985 self.logger.debug(
7986 logging_text + "skipping non juju neither charm configuration"
7987 )
7988 continue
7989
7990 vca_index = -1
7991 for vca_index, vca_deployed in enumerate(
7992 db_nsr["_admin"]["deployed"]["VCA"]
7993 ):
7994 if not vca_deployed:
7995 continue
7996 if (
7997 vca_deployed.get("member-vnf-index") == member_vnf_index
7998 and vca_deployed.get("vdu_id") == vdu_id
7999 and vca_deployed.get("kdu_name") == kdu_name
8000 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8001 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8002 ):
8003 break
8004 else:
8005 # not found, create one.
8006 target = (
8007 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8008 )
8009 if vdu_id:
8010 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8011 elif kdu_name:
8012 target += "/kdu/{}".format(kdu_name)
8013 vca_deployed = {
8014 "target_element": target,
8015 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8016 "member-vnf-index": member_vnf_index,
8017 "vdu_id": vdu_id,
8018 "kdu_name": kdu_name,
8019 "vdu_count_index": vdu_index,
8020 "operational-status": "init", # TODO revise
8021 "detailed-status": "", # TODO revise
8022 "step": "initial-deploy", # TODO revise
8023 "vnfd_id": vnfd_id,
8024 "vdu_name": vdu_name,
8025 "type": vca_type,
8026 "ee_descriptor_id": ee_descriptor_id,
8027 "charm_name": charm_name,
8028 }
8029 vca_index += 1
8030
8031 # create VCA and configurationStatus in db
8032 db_dict = {
8033 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8034 "configurationStatus.{}".format(vca_index): dict(),
8035 }
8036 self.update_db_2("nsrs", nsr_id, db_dict)
8037
8038 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8039
8040 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8041 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8042 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8043
8044 # Launch task
8045 task_n2vc = asyncio.ensure_future(
8046 self.heal_N2VC(
8047 logging_text=logging_text,
8048 vca_index=vca_index,
8049 nsi_id=nsi_id,
8050 db_nsr=db_nsr,
8051 db_vnfr=db_vnfr,
8052 vdu_id=vdu_id,
8053 kdu_name=kdu_name,
8054 vdu_index=vdu_index,
8055 deploy_params=deploy_params,
8056 config_descriptor=descriptor_config,
8057 base_folder=base_folder,
8058 nslcmop_id=nslcmop_id,
8059 stage=stage,
8060 vca_type=vca_type,
8061 vca_name=vca_name,
8062 ee_config_descriptor=ee_item,
8063 )
8064 )
8065 self.lcm_tasks.register(
8066 "ns",
8067 nsr_id,
8068 nslcmop_id,
8069 "instantiate_N2VC-{}".format(vca_index),
8070 task_n2vc,
8071 )
8072 task_instantiation_info[
8073 task_n2vc
8074 ] = self.task_name_deploy_vca + " {}.{}".format(
8075 member_vnf_index or "", vdu_id or ""
8076 )
8077
8078 async def heal_N2VC(
8079 self,
8080 logging_text,
8081 vca_index,
8082 nsi_id,
8083 db_nsr,
8084 db_vnfr,
8085 vdu_id,
8086 kdu_name,
8087 vdu_index,
8088 config_descriptor,
8089 deploy_params,
8090 base_folder,
8091 nslcmop_id,
8092 stage,
8093 vca_type,
8094 vca_name,
8095 ee_config_descriptor,
8096 ):
8097 nsr_id = db_nsr["_id"]
8098 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8099 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8100 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8101 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8102 db_dict = {
8103 "collection": "nsrs",
8104 "filter": {"_id": nsr_id},
8105 "path": db_update_entry,
8106 }
8107 step = ""
8108 try:
8109
8110 element_type = "NS"
8111 element_under_configuration = nsr_id
8112
8113 vnfr_id = None
8114 if db_vnfr:
8115 vnfr_id = db_vnfr["_id"]
8116 osm_config["osm"]["vnf_id"] = vnfr_id
8117
8118 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8119
8120 if vca_type == "native_charm":
8121 index_number = 0
8122 else:
8123 index_number = vdu_index or 0
8124
8125 if vnfr_id:
8126 element_type = "VNF"
8127 element_under_configuration = vnfr_id
8128 namespace += ".{}-{}".format(vnfr_id, index_number)
8129 if vdu_id:
8130 namespace += ".{}-{}".format(vdu_id, index_number)
8131 element_type = "VDU"
8132 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8133 osm_config["osm"]["vdu_id"] = vdu_id
8134 elif kdu_name:
8135 namespace += ".{}".format(kdu_name)
8136 element_type = "KDU"
8137 element_under_configuration = kdu_name
8138 osm_config["osm"]["kdu_name"] = kdu_name
8139
8140 # Get artifact path
8141 if base_folder["pkg-dir"]:
8142 artifact_path = "{}/{}/{}/{}".format(
8143 base_folder["folder"],
8144 base_folder["pkg-dir"],
8145 "charms"
8146 if vca_type
8147 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8148 else "helm-charts",
8149 vca_name,
8150 )
8151 else:
8152 artifact_path = "{}/Scripts/{}/{}/".format(
8153 base_folder["folder"],
8154 "charms"
8155 if vca_type
8156 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8157 else "helm-charts",
8158 vca_name,
8159 )
8160
8161 self.logger.debug("Artifact path > {}".format(artifact_path))
8162
8163 # get initial_config_primitive_list that applies to this element
8164 initial_config_primitive_list = config_descriptor.get(
8165 "initial-config-primitive"
8166 )
8167
8168 self.logger.debug(
8169 "Initial config primitive list > {}".format(
8170 initial_config_primitive_list
8171 )
8172 )
8173
8174 # add config if not present for NS charm
8175 ee_descriptor_id = ee_config_descriptor.get("id")
8176 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8177 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8178 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8179 )
8180
8181 self.logger.debug(
8182 "Initial config primitive list #2 > {}".format(
8183 initial_config_primitive_list
8184 )
8185 )
8186 # n2vc_redesign STEP 3.1
8187 # find old ee_id if exists
8188 ee_id = vca_deployed.get("ee_id")
8189
8190 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8191 # create or register execution environment in VCA. Only for native charms when healing
8192 if vca_type == "native_charm":
8193 step = "Waiting to VM being up and getting IP address"
8194 self.logger.debug(logging_text + step)
8195 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8196 logging_text,
8197 nsr_id,
8198 vnfr_id,
8199 vdu_id,
8200 vdu_index,
8201 user=None,
8202 pub_key=None,
8203 )
8204 credentials = {"hostname": rw_mgmt_ip}
8205 # get username
8206 username = deep_get(
8207 config_descriptor, ("config-access", "ssh-access", "default-user")
8208 )
8209 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8210 # merged. Meanwhile let's get username from initial-config-primitive
8211 if not username and initial_config_primitive_list:
8212 for config_primitive in initial_config_primitive_list:
8213 for param in config_primitive.get("parameter", ()):
8214 if param["name"] == "ssh-username":
8215 username = param["value"]
8216 break
8217 if not username:
8218 raise LcmException(
8219 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8220 "'config-access.ssh-access.default-user'"
8221 )
8222 credentials["username"] = username
8223
8224 # n2vc_redesign STEP 3.2
8225 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8226 self._write_configuration_status(
8227 nsr_id=nsr_id,
8228 vca_index=vca_index,
8229 status="REGISTERING",
8230 element_under_configuration=element_under_configuration,
8231 element_type=element_type,
8232 )
8233
8234 step = "register execution environment {}".format(credentials)
8235 self.logger.debug(logging_text + step)
8236 ee_id = await self.vca_map[vca_type].register_execution_environment(
8237 credentials=credentials,
8238 namespace=namespace,
8239 db_dict=db_dict,
8240 vca_id=vca_id,
8241 )
8242
8243 # update ee_id en db
8244 db_dict_ee_id = {
8245 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8246 }
8247 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8248
8249 # for compatibility with MON/POL modules, the need model and application name at database
8250 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8251 # Not sure if this need to be done when healing
8252 """
8253 ee_id_parts = ee_id.split(".")
8254 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8255 if len(ee_id_parts) >= 2:
8256 model_name = ee_id_parts[0]
8257 application_name = ee_id_parts[1]
8258 db_nsr_update[db_update_entry + "model"] = model_name
8259 db_nsr_update[db_update_entry + "application"] = application_name
8260 """
8261
8262 # n2vc_redesign STEP 3.3
8263 # Install configuration software. Only for native charms.
8264 step = "Install configuration Software"
8265
8266 self._write_configuration_status(
8267 nsr_id=nsr_id,
8268 vca_index=vca_index,
8269 status="INSTALLING SW",
8270 element_under_configuration=element_under_configuration,
8271 element_type=element_type,
8272 # other_update=db_nsr_update,
8273 other_update=None,
8274 )
8275
8276 # TODO check if already done
8277 self.logger.debug(logging_text + step)
8278 config = None
8279 if vca_type == "native_charm":
8280 config_primitive = next(
8281 (p for p in initial_config_primitive_list if p["name"] == "config"),
8282 None,
8283 )
8284 if config_primitive:
8285 config = self._map_primitive_params(
8286 config_primitive, {}, deploy_params
8287 )
8288 await self.vca_map[vca_type].install_configuration_sw(
8289 ee_id=ee_id,
8290 artifact_path=artifact_path,
8291 db_dict=db_dict,
8292 config=config,
8293 num_units=1,
8294 vca_id=vca_id,
8295 vca_type=vca_type,
8296 )
8297
8298 # write in db flag of configuration_sw already installed
8299 self.update_db_2(
8300 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8301 )
8302
8303 # Not sure if this need to be done when healing
8304 """
8305 # add relations for this VCA (wait for other peers related with this VCA)
8306 await self._add_vca_relations(
8307 logging_text=logging_text,
8308 nsr_id=nsr_id,
8309 vca_type=vca_type,
8310 vca_index=vca_index,
8311 )
8312 """
8313
8314 # if SSH access is required, then get execution environment SSH public
8315 # if native charm we have waited already to VM be UP
8316 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8317 pub_key = None
8318 user = None
8319 # self.logger.debug("get ssh key block")
8320 if deep_get(
8321 config_descriptor, ("config-access", "ssh-access", "required")
8322 ):
8323 # self.logger.debug("ssh key needed")
8324 # Needed to inject a ssh key
8325 user = deep_get(
8326 config_descriptor,
8327 ("config-access", "ssh-access", "default-user"),
8328 )
8329 step = "Install configuration Software, getting public ssh key"
8330 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8331 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8332 )
8333
8334 step = "Insert public key into VM user={} ssh_key={}".format(
8335 user, pub_key
8336 )
8337 else:
8338 # self.logger.debug("no need to get ssh key")
8339 step = "Waiting to VM being up and getting IP address"
8340 self.logger.debug(logging_text + step)
8341
8342 # n2vc_redesign STEP 5.1
8343 # wait for RO (ip-address) Insert pub_key into VM
8344 # IMPORTANT: We need do wait for RO to complete healing operation.
8345 await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
8346 if vnfr_id:
8347 if kdu_name:
8348 rw_mgmt_ip = await self.wait_kdu_up(
8349 logging_text, nsr_id, vnfr_id, kdu_name
8350 )
8351 else:
8352 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8353 logging_text,
8354 nsr_id,
8355 vnfr_id,
8356 vdu_id,
8357 vdu_index,
8358 user=user,
8359 pub_key=pub_key,
8360 )
8361 else:
8362 rw_mgmt_ip = None # This is for a NS configuration
8363
8364 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8365
8366 # store rw_mgmt_ip in deploy params for later replacement
8367 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8368
8369 # Day1 operations.
8370 # get run-day1 operation parameter
8371 runDay1 = deploy_params.get("run-day1", False)
8372 self.logger.debug(
8373 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8374 )
8375 if runDay1:
8376 # n2vc_redesign STEP 6 Execute initial config primitive
8377 step = "execute initial config primitive"
8378
8379 # wait for dependent primitives execution (NS -> VNF -> VDU)
8380 if initial_config_primitive_list:
8381 await self._wait_dependent_n2vc(
8382 nsr_id, vca_deployed_list, vca_index
8383 )
8384
8385 # stage, in function of element type: vdu, kdu, vnf or ns
8386 my_vca = vca_deployed_list[vca_index]
8387 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8388 # VDU or KDU
8389 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8390 elif my_vca.get("member-vnf-index"):
8391 # VNF
8392 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8393 else:
8394 # NS
8395 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8396
8397 self._write_configuration_status(
8398 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8399 )
8400
8401 self._write_op_status(op_id=nslcmop_id, stage=stage)
8402
8403 check_if_terminated_needed = True
8404 for initial_config_primitive in initial_config_primitive_list:
8405 # adding information on the vca_deployed if it is a NS execution environment
8406 if not vca_deployed["member-vnf-index"]:
8407 deploy_params["ns_config_info"] = json.dumps(
8408 self._get_ns_config_info(nsr_id)
8409 )
8410 # TODO check if already done
8411 primitive_params_ = self._map_primitive_params(
8412 initial_config_primitive, {}, deploy_params
8413 )
8414
8415 step = "execute primitive '{}' params '{}'".format(
8416 initial_config_primitive["name"], primitive_params_
8417 )
8418 self.logger.debug(logging_text + step)
8419 await self.vca_map[vca_type].exec_primitive(
8420 ee_id=ee_id,
8421 primitive_name=initial_config_primitive["name"],
8422 params_dict=primitive_params_,
8423 db_dict=db_dict,
8424 vca_id=vca_id,
8425 vca_type=vca_type,
8426 )
8427 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8428 if check_if_terminated_needed:
8429 if config_descriptor.get("terminate-config-primitive"):
8430 self.update_db_2(
8431 "nsrs",
8432 nsr_id,
8433 {db_update_entry + "needed_terminate": True},
8434 )
8435 check_if_terminated_needed = False
8436
8437 # TODO register in database that primitive is done
8438
8439 # STEP 7 Configure metrics
8440 # Not sure if this need to be done when healing
8441 """
8442 if vca_type == "helm" or vca_type == "helm-v3":
8443 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8444 ee_id=ee_id,
8445 artifact_path=artifact_path,
8446 ee_config_descriptor=ee_config_descriptor,
8447 vnfr_id=vnfr_id,
8448 nsr_id=nsr_id,
8449 target_ip=rw_mgmt_ip,
8450 )
8451 if prometheus_jobs:
8452 self.update_db_2(
8453 "nsrs",
8454 nsr_id,
8455 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8456 )
8457
8458 for job in prometheus_jobs:
8459 self.db.set_one(
8460 "prometheus_jobs",
8461 {"job_name": job["job_name"]},
8462 job,
8463 upsert=True,
8464 fail_on_empty=False,
8465 )
8466
8467 """
8468 step = "instantiated at VCA"
8469 self.logger.debug(logging_text + step)
8470
8471 self._write_configuration_status(
8472 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8473 )
8474
8475 except Exception as e: # TODO not use Exception but N2VC exception
8476 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8477 if not isinstance(
8478 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8479 ):
8480 self.logger.error(
8481 "Exception while {} : {}".format(step, e), exc_info=True
8482 )
8483 self._write_configuration_status(
8484 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8485 )
8486 raise LcmException("{} {}".format(step, e)) from e
8487
8488 async def _wait_heal_ro(
8489 self,
8490 nsr_id,
8491 timeout=600,
8492 ):
8493 start_time = time()
8494 while time() <= start_time + timeout:
8495 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8496 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8497 "operational-status"
8498 ]
8499 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8500 if operational_status_ro != "healing":
8501 break
8502 await asyncio.sleep(15, loop=self.loop)
8503 else: # timeout_ns_deploy
8504 raise NgRoException("Timeout waiting ns to deploy")
8505
8506 async def vertical_scale(self, nsr_id, nslcmop_id):
8507 """
8508 Vertical Scale the VDUs in a NS
8509
8510 :param: nsr_id: NS Instance ID
8511 :param: nslcmop_id: nslcmop ID of migrate
8512
8513 """
8514 # Try to lock HA task here
8515 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8516 if not task_is_locked_by_me:
8517 return
8518 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8519 self.logger.debug(logging_text + "Enter")
8520 # get all needed from database
8521 db_nslcmop = None
8522 db_nslcmop_update = {}
8523 nslcmop_operation_state = None
8524 db_nsr_update = {}
8525 target = {}
8526 exc = None
8527 # in case of error, indicates what part of scale was failed to put nsr at error status
8528 start_deploy = time()
8529
8530 try:
8531 # wait for any previous tasks in process
8532 step = "Waiting for previous operations to terminate"
8533 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8534
8535 self._write_ns_status(
8536 nsr_id=nsr_id,
8537 ns_state=None,
8538 current_operation="VerticalScale",
8539 current_operation_id=nslcmop_id,
8540 )
8541 step = "Getting nslcmop from database"
8542 self.logger.debug(
8543 step + " after having waited for previous tasks to be completed"
8544 )
8545 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8546 operationParams = db_nslcmop.get("operationParams")
8547 target = {}
8548 target.update(operationParams)
8549 desc = await self.RO.vertical_scale(nsr_id, target)
8550 self.logger.debug("RO return > {}".format(desc))
8551 action_id = desc["action_id"]
8552 await self._wait_ng_ro(
8553 nsr_id,
8554 action_id,
8555 nslcmop_id,
8556 start_deploy,
8557 self.timeout_verticalscale,
8558 operation="verticalscale",
8559 )
8560 except (ROclient.ROClientException, DbException, LcmException) as e:
8561 self.logger.error("Exit Exception {}".format(e))
8562 exc = e
8563 except asyncio.CancelledError:
8564 self.logger.error("Cancelled Exception while '{}'".format(step))
8565 exc = "Operation was cancelled"
8566 except Exception as e:
8567 exc = traceback.format_exc()
8568 self.logger.critical(
8569 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8570 )
8571 finally:
8572 self._write_ns_status(
8573 nsr_id=nsr_id,
8574 ns_state=None,
8575 current_operation="IDLE",
8576 current_operation_id=None,
8577 )
8578 if exc:
8579 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8580 nslcmop_operation_state = "FAILED"
8581 else:
8582 nslcmop_operation_state = "COMPLETED"
8583 db_nslcmop_update["detailed-status"] = "Done"
8584 db_nsr_update["detailed-status"] = "Done"
8585
8586 self._write_op_status(
8587 op_id=nslcmop_id,
8588 stage="",
8589 error_message="",
8590 operation_state=nslcmop_operation_state,
8591 other_update=db_nslcmop_update,
8592 )
8593 if nslcmop_operation_state:
8594 try:
8595 msg = {
8596 "nsr_id": nsr_id,
8597 "nslcmop_id": nslcmop_id,
8598 "operationState": nslcmop_operation_state,
8599 }
8600 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8601 except Exception as e:
8602 self.logger.error(
8603 logging_text + "kafka_write notification Exception {}".format(e)
8604 )
8605 self.logger.debug(logging_text + "Exit")
8606 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")