Reformat files according to new black validation
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import randint
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 task_name_deploy_vca = "Deploying VCA"
136
137 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138 """
139 Init, Connect to database, filesystem storage, and messaging
140 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141 :return: None
142 """
143 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 self.db = Database().instance.db
146 self.fs = Filesystem().instance.fs
147 self.loop = loop
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.kubectlpath,
171 helm_command=self.vca_config.helmpath,
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.kubectlpath,
180 helm_command=self.vca_config.helm3path,
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.kubectlpath,
189 juju_command=self.vca_config.jujupath,
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 self.op_status_map = {
217 "instantiation": self.RO.status,
218 "termination": self.RO.status,
219 "migrate": self.RO.status,
220 "healing": self.RO.recreate_status,
221 "verticalscale": self.RO.status,
222 "start_stop_rebuild": self.RO.status,
223 }
224
225 @staticmethod
226 def increment_ip_mac(ip_mac, vm_index=1):
227 if not isinstance(ip_mac, str):
228 return ip_mac
229 try:
230 # try with ipv4 look for last dot
231 i = ip_mac.rfind(".")
232 if i > 0:
233 i += 1
234 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235 # try with ipv6 or mac look for last colon. Operate in hex
236 i = ip_mac.rfind(":")
237 if i > 0:
238 i += 1
239 # format in hex, len can be 2 for mac or 4 for ipv6
240 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242 )
243 except Exception:
244 pass
245 return None
246
247 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
249
250 try:
251 # TODO filter RO descriptor fields...
252
253 # write to database
254 db_dict = dict()
255 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
256 db_dict["deploymentStatus"] = ro_descriptor
257 self.update_db_2("nsrs", nsrs_id, db_dict)
258
259 except Exception as e:
260 self.logger.warn(
261 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
262 )
263
264 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
265 # remove last dot from path (if exists)
266 if path.endswith("."):
267 path = path[:-1]
268
269 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
270 # .format(table, filter, path, updated_data))
271 try:
272 nsr_id = filter.get("_id")
273
274 # read ns record from database
275 nsr = self.db.get_one(table="nsrs", q_filter=filter)
276 current_ns_status = nsr.get("nsState")
277
278 # get vca status for NS
279 status_dict = await self.n2vc.get_status(
280 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
281 )
282
283 # vcaStatus
284 db_dict = dict()
285 db_dict["vcaStatus"] = status_dict
286
287 # update configurationStatus for this VCA
288 try:
289 vca_index = int(path[path.rfind(".") + 1 :])
290
291 vca_list = deep_get(
292 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
293 )
294 vca_status = vca_list[vca_index].get("status")
295
296 configuration_status_list = nsr.get("configurationStatus")
297 config_status = configuration_status_list[vca_index].get("status")
298
299 if config_status == "BROKEN" and vca_status != "failed":
300 db_dict["configurationStatus"][vca_index] = "READY"
301 elif config_status != "BROKEN" and vca_status == "failed":
302 db_dict["configurationStatus"][vca_index] = "BROKEN"
303 except Exception as e:
304 # not update configurationStatus
305 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
306
307 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
308 # if nsState = 'DEGRADED' check if all is OK
309 is_degraded = False
310 if current_ns_status in ("READY", "DEGRADED"):
311 error_description = ""
312 # check machines
313 if status_dict.get("machines"):
314 for machine_id in status_dict.get("machines"):
315 machine = status_dict.get("machines").get(machine_id)
316 # check machine agent-status
317 if machine.get("agent-status"):
318 s = machine.get("agent-status").get("status")
319 if s != "started":
320 is_degraded = True
321 error_description += (
322 "machine {} agent-status={} ; ".format(
323 machine_id, s
324 )
325 )
326 # check machine instance status
327 if machine.get("instance-status"):
328 s = machine.get("instance-status").get("status")
329 if s != "running":
330 is_degraded = True
331 error_description += (
332 "machine {} instance-status={} ; ".format(
333 machine_id, s
334 )
335 )
336 # check applications
337 if status_dict.get("applications"):
338 for app_id in status_dict.get("applications"):
339 app = status_dict.get("applications").get(app_id)
340 # check application status
341 if app.get("status"):
342 s = app.get("status").get("status")
343 if s != "active":
344 is_degraded = True
345 error_description += (
346 "application {} status={} ; ".format(app_id, s)
347 )
348
349 if error_description:
350 db_dict["errorDescription"] = error_description
351 if current_ns_status == "READY" and is_degraded:
352 db_dict["nsState"] = "DEGRADED"
353 if current_ns_status == "DEGRADED" and not is_degraded:
354 db_dict["nsState"] = "READY"
355
356 # write to database
357 self.update_db_2("nsrs", nsr_id, db_dict)
358
359 except (asyncio.CancelledError, asyncio.TimeoutError):
360 raise
361 except Exception as e:
362 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
363
364 async def _on_update_k8s_db(
365 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
366 ):
367 """
368 Updating vca status in NSR record
369 :param cluster_uuid: UUID of a k8s cluster
370 :param kdu_instance: The unique name of the KDU instance
371 :param filter: To get nsr_id
372 :cluster_type: The cluster type (juju, k8s)
373 :return: none
374 """
375
376 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
377 # .format(cluster_uuid, kdu_instance, filter))
378
379 nsr_id = filter.get("_id")
380 try:
381 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
382 cluster_uuid=cluster_uuid,
383 kdu_instance=kdu_instance,
384 yaml_format=False,
385 complete_status=True,
386 vca_id=vca_id,
387 )
388
389 # vcaStatus
390 db_dict = dict()
391 db_dict["vcaStatus"] = {nsr_id: vca_status}
392
393 self.logger.debug(
394 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
395 )
396
397 # write to database
398 self.update_db_2("nsrs", nsr_id, db_dict)
399 except (asyncio.CancelledError, asyncio.TimeoutError):
400 raise
401 except Exception as e:
402 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
403
404 @staticmethod
405 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
406 try:
407 env = Environment(
408 undefined=StrictUndefined,
409 autoescape=select_autoescape(default_for_string=True, default=True),
410 )
411 template = env.from_string(cloud_init_text)
412 return template.render(additional_params or {})
413 except UndefinedError as e:
414 raise LcmException(
415 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
416 "file, must be provided in the instantiation parameters inside the "
417 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
418 )
419 except (TemplateError, TemplateNotFound) as e:
420 raise LcmException(
421 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
422 vnfd_id, vdu_id, e
423 )
424 )
425
426 def _get_vdu_cloud_init_content(self, vdu, vnfd):
427 cloud_init_content = cloud_init_file = None
428 try:
429 if vdu.get("cloud-init-file"):
430 base_folder = vnfd["_admin"]["storage"]
431 if base_folder["pkg-dir"]:
432 cloud_init_file = "{}/{}/cloud_init/{}".format(
433 base_folder["folder"],
434 base_folder["pkg-dir"],
435 vdu["cloud-init-file"],
436 )
437 else:
438 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
439 base_folder["folder"],
440 vdu["cloud-init-file"],
441 )
442 with self.fs.file_open(cloud_init_file, "r") as ci_file:
443 cloud_init_content = ci_file.read()
444 elif vdu.get("cloud-init"):
445 cloud_init_content = vdu["cloud-init"]
446
447 return cloud_init_content
448 except FsException as e:
449 raise LcmException(
450 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
451 vnfd["id"], vdu["id"], cloud_init_file, e
452 )
453 )
454
455 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
456 vdur = next(
457 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
458 )
459 additional_params = vdur.get("additionalParams")
460 return parse_yaml_strings(additional_params)
461
462 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
463 """
464 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
465 :param vnfd: input vnfd
466 :param new_id: overrides vnf id if provided
467 :param additionalParams: Instantiation params for VNFs provided
468 :param nsrId: Id of the NSR
469 :return: copy of vnfd
470 """
471 vnfd_RO = deepcopy(vnfd)
472 # remove unused by RO configuration, monitoring, scaling and internal keys
473 vnfd_RO.pop("_id", None)
474 vnfd_RO.pop("_admin", None)
475 vnfd_RO.pop("monitoring-param", None)
476 vnfd_RO.pop("scaling-group-descriptor", None)
477 vnfd_RO.pop("kdu", None)
478 vnfd_RO.pop("k8s-cluster", None)
479 if new_id:
480 vnfd_RO["id"] = new_id
481
482 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
483 for vdu in get_iterable(vnfd_RO, "vdu"):
484 vdu.pop("cloud-init-file", None)
485 vdu.pop("cloud-init", None)
486 return vnfd_RO
487
488 @staticmethod
489 def ip_profile_2_RO(ip_profile):
490 RO_ip_profile = deepcopy(ip_profile)
491 if "dns-server" in RO_ip_profile:
492 if isinstance(RO_ip_profile["dns-server"], list):
493 RO_ip_profile["dns-address"] = []
494 for ds in RO_ip_profile.pop("dns-server"):
495 RO_ip_profile["dns-address"].append(ds["address"])
496 else:
497 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
498 if RO_ip_profile.get("ip-version") == "ipv4":
499 RO_ip_profile["ip-version"] = "IPv4"
500 if RO_ip_profile.get("ip-version") == "ipv6":
501 RO_ip_profile["ip-version"] = "IPv6"
502 if "dhcp-params" in RO_ip_profile:
503 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
504 return RO_ip_profile
505
506 def _get_ro_vim_id_for_vim_account(self, vim_account):
507 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
508 if db_vim["_admin"]["operationalState"] != "ENABLED":
509 raise LcmException(
510 "VIM={} is not available. operationalState={}".format(
511 vim_account, db_vim["_admin"]["operationalState"]
512 )
513 )
514 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
515 return RO_vim_id
516
517 def get_ro_wim_id_for_wim_account(self, wim_account):
518 if isinstance(wim_account, str):
519 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
520 if db_wim["_admin"]["operationalState"] != "ENABLED":
521 raise LcmException(
522 "WIM={} is not available. operationalState={}".format(
523 wim_account, db_wim["_admin"]["operationalState"]
524 )
525 )
526 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
527 return RO_wim_id
528 else:
529 return wim_account
530
531 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
532 db_vdu_push_list = []
533 template_vdur = []
534 db_update = {"_admin.modified": time()}
535 if vdu_create:
536 for vdu_id, vdu_count in vdu_create.items():
537 vdur = next(
538 (
539 vdur
540 for vdur in reversed(db_vnfr["vdur"])
541 if vdur["vdu-id-ref"] == vdu_id
542 ),
543 None,
544 )
545 if not vdur:
546 # Read the template saved in the db:
547 self.logger.debug(
548 "No vdur in the database. Using the vdur-template to scale"
549 )
550 vdur_template = db_vnfr.get("vdur-template")
551 if not vdur_template:
552 raise LcmException(
553 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
554 vdu_id
555 )
556 )
557 vdur = vdur_template[0]
558 # Delete a template from the database after using it
559 self.db.set_one(
560 "vnfrs",
561 {"_id": db_vnfr["_id"]},
562 None,
563 pull={"vdur-template": {"_id": vdur["_id"]}},
564 )
565 for count in range(vdu_count):
566 vdur_copy = deepcopy(vdur)
567 vdur_copy["status"] = "BUILD"
568 vdur_copy["status-detailed"] = None
569 vdur_copy["ip-address"] = None
570 vdur_copy["_id"] = str(uuid4())
571 vdur_copy["count-index"] += count + 1
572 vdur_copy["id"] = "{}-{}".format(
573 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
574 )
575 vdur_copy.pop("vim_info", None)
576 for iface in vdur_copy["interfaces"]:
577 if iface.get("fixed-ip"):
578 iface["ip-address"] = self.increment_ip_mac(
579 iface["ip-address"], count + 1
580 )
581 else:
582 iface.pop("ip-address", None)
583 if iface.get("fixed-mac"):
584 iface["mac-address"] = self.increment_ip_mac(
585 iface["mac-address"], count + 1
586 )
587 else:
588 iface.pop("mac-address", None)
589 if db_vnfr["vdur"]:
590 iface.pop(
591 "mgmt_vnf", None
592 ) # only first vdu can be managment of vnf
593 db_vdu_push_list.append(vdur_copy)
594 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
595 if vdu_delete:
596 if len(db_vnfr["vdur"]) == 1:
597 # The scale will move to 0 instances
598 self.logger.debug(
599 "Scaling to 0 !, creating the template with the last vdur"
600 )
601 template_vdur = [db_vnfr["vdur"][0]]
602 for vdu_id, vdu_count in vdu_delete.items():
603 if mark_delete:
604 indexes_to_delete = [
605 iv[0]
606 for iv in enumerate(db_vnfr["vdur"])
607 if iv[1]["vdu-id-ref"] == vdu_id
608 ]
609 db_update.update(
610 {
611 "vdur.{}.status".format(i): "DELETING"
612 for i in indexes_to_delete[-vdu_count:]
613 }
614 )
615 else:
616 # it must be deleted one by one because common.db does not allow otherwise
617 vdus_to_delete = [
618 v
619 for v in reversed(db_vnfr["vdur"])
620 if v["vdu-id-ref"] == vdu_id
621 ]
622 for vdu in vdus_to_delete[:vdu_count]:
623 self.db.set_one(
624 "vnfrs",
625 {"_id": db_vnfr["_id"]},
626 None,
627 pull={"vdur": {"_id": vdu["_id"]}},
628 )
629 db_push = {}
630 if db_vdu_push_list:
631 db_push["vdur"] = db_vdu_push_list
632 if template_vdur:
633 db_push["vdur-template"] = template_vdur
634 if not db_push:
635 db_push = None
636 db_vnfr["vdur-template"] = template_vdur
637 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
638 # modify passed dictionary db_vnfr
639 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
640 db_vnfr["vdur"] = db_vnfr_["vdur"]
641
642 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
643 """
644 Updates database nsr with the RO info for the created vld
645 :param ns_update_nsr: dictionary to be filled with the updated info
646 :param db_nsr: content of db_nsr. This is also modified
647 :param nsr_desc_RO: nsr descriptor from RO
648 :return: Nothing, LcmException is raised on errors
649 """
650
651 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
652 for net_RO in get_iterable(nsr_desc_RO, "nets"):
653 if vld["id"] != net_RO.get("ns_net_osm_id"):
654 continue
655 vld["vim-id"] = net_RO.get("vim_net_id")
656 vld["name"] = net_RO.get("vim_name")
657 vld["status"] = net_RO.get("status")
658 vld["status-detailed"] = net_RO.get("error_msg")
659 ns_update_nsr["vld.{}".format(vld_index)] = vld
660 break
661 else:
662 raise LcmException(
663 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
664 )
665
666 def set_vnfr_at_error(self, db_vnfrs, error_text):
667 try:
668 for db_vnfr in db_vnfrs.values():
669 vnfr_update = {"status": "ERROR"}
670 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
671 if "status" not in vdur:
672 vdur["status"] = "ERROR"
673 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
674 if error_text:
675 vdur["status-detailed"] = str(error_text)
676 vnfr_update[
677 "vdur.{}.status-detailed".format(vdu_index)
678 ] = "ERROR"
679 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
680 except DbException as e:
681 self.logger.error("Cannot update vnf. {}".format(e))
682
683 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
684 """
685 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
686 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
687 :param nsr_desc_RO: nsr descriptor from RO
688 :return: Nothing, LcmException is raised on errors
689 """
690 for vnf_index, db_vnfr in db_vnfrs.items():
691 for vnf_RO in nsr_desc_RO["vnfs"]:
692 if vnf_RO["member_vnf_index"] != vnf_index:
693 continue
694 vnfr_update = {}
695 if vnf_RO.get("ip_address"):
696 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
697 "ip_address"
698 ].split(";")[0]
699 elif not db_vnfr.get("ip-address"):
700 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
701 raise LcmExceptionNoMgmtIP(
702 "ns member_vnf_index '{}' has no IP address".format(
703 vnf_index
704 )
705 )
706
707 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
708 vdur_RO_count_index = 0
709 if vdur.get("pdu-type"):
710 continue
711 for vdur_RO in get_iterable(vnf_RO, "vms"):
712 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
713 continue
714 if vdur["count-index"] != vdur_RO_count_index:
715 vdur_RO_count_index += 1
716 continue
717 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
718 if vdur_RO.get("ip_address"):
719 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
720 else:
721 vdur["ip-address"] = None
722 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
723 vdur["name"] = vdur_RO.get("vim_name")
724 vdur["status"] = vdur_RO.get("status")
725 vdur["status-detailed"] = vdur_RO.get("error_msg")
726 for ifacer in get_iterable(vdur, "interfaces"):
727 for interface_RO in get_iterable(vdur_RO, "interfaces"):
728 if ifacer["name"] == interface_RO.get("internal_name"):
729 ifacer["ip-address"] = interface_RO.get(
730 "ip_address"
731 )
732 ifacer["mac-address"] = interface_RO.get(
733 "mac_address"
734 )
735 break
736 else:
737 raise LcmException(
738 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
739 "from VIM info".format(
740 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
741 )
742 )
743 vnfr_update["vdur.{}".format(vdu_index)] = vdur
744 break
745 else:
746 raise LcmException(
747 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
748 "VIM info".format(
749 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
750 )
751 )
752
753 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
754 for net_RO in get_iterable(nsr_desc_RO, "nets"):
755 if vld["id"] != net_RO.get("vnf_net_osm_id"):
756 continue
757 vld["vim-id"] = net_RO.get("vim_net_id")
758 vld["name"] = net_RO.get("vim_name")
759 vld["status"] = net_RO.get("status")
760 vld["status-detailed"] = net_RO.get("error_msg")
761 vnfr_update["vld.{}".format(vld_index)] = vld
762 break
763 else:
764 raise LcmException(
765 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
766 vnf_index, vld["id"]
767 )
768 )
769
770 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
771 break
772
773 else:
774 raise LcmException(
775 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
776 vnf_index
777 )
778 )
779
780 def _get_ns_config_info(self, nsr_id):
781 """
782 Generates a mapping between vnf,vdu elements and the N2VC id
783 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
784 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
785 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
786 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
787 """
788 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
789 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
790 mapping = {}
791 ns_config_info = {"osm-config-mapping": mapping}
792 for vca in vca_deployed_list:
793 if not vca["member-vnf-index"]:
794 continue
795 if not vca["vdu_id"]:
796 mapping[vca["member-vnf-index"]] = vca["application"]
797 else:
798 mapping[
799 "{}.{}.{}".format(
800 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
801 )
802 ] = vca["application"]
803 return ns_config_info
804
805 async def _instantiate_ng_ro(
806 self,
807 logging_text,
808 nsr_id,
809 nsd,
810 db_nsr,
811 db_nslcmop,
812 db_vnfrs,
813 db_vnfds,
814 n2vc_key_list,
815 stage,
816 start_deploy,
817 timeout_ns_deploy,
818 ):
819 db_vims = {}
820
821 def get_vim_account(vim_account_id):
822 nonlocal db_vims
823 if vim_account_id in db_vims:
824 return db_vims[vim_account_id]
825 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
826 db_vims[vim_account_id] = db_vim
827 return db_vim
828
829 # modify target_vld info with instantiation parameters
830 def parse_vld_instantiation_params(
831 target_vim, target_vld, vld_params, target_sdn
832 ):
833 if vld_params.get("ip-profile"):
834 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
835 "ip-profile"
836 ]
837 if vld_params.get("provider-network"):
838 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
839 "provider-network"
840 ]
841 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
842 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
843 "provider-network"
844 ]["sdn-ports"]
845
846 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
847 # if wim_account_id is specified in vld_params, validate if it is feasible.
848 wim_account_id, db_wim = select_feasible_wim_account(
849 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
850 )
851
852 if wim_account_id:
853 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
854 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
855 # update vld_params with correct WIM account Id
856 vld_params["wimAccountId"] = wim_account_id
857
858 target_wim = "wim:{}".format(wim_account_id)
859 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
860 sdn_ports = get_sdn_ports(vld_params, db_wim)
861 if len(sdn_ports) > 0:
862 target_vld["vim_info"][target_wim] = target_wim_attrs
863 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
864
865 self.logger.debug(
866 "Target VLD with WIM data: {:s}".format(str(target_vld))
867 )
868
869 for param in ("vim-network-name", "vim-network-id"):
870 if vld_params.get(param):
871 if isinstance(vld_params[param], dict):
872 for vim, vim_net in vld_params[param].items():
873 other_target_vim = "vim:" + vim
874 populate_dict(
875 target_vld["vim_info"],
876 (other_target_vim, param.replace("-", "_")),
877 vim_net,
878 )
879 else: # isinstance str
880 target_vld["vim_info"][target_vim][
881 param.replace("-", "_")
882 ] = vld_params[param]
883 if vld_params.get("common_id"):
884 target_vld["common_id"] = vld_params.get("common_id")
885
886 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
887 def update_ns_vld_target(target, ns_params):
888 for vnf_params in ns_params.get("vnf", ()):
889 if vnf_params.get("vimAccountId"):
890 target_vnf = next(
891 (
892 vnfr
893 for vnfr in db_vnfrs.values()
894 if vnf_params["member-vnf-index"]
895 == vnfr["member-vnf-index-ref"]
896 ),
897 None,
898 )
899 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
900 if not vdur:
901 return
902 for a_index, a_vld in enumerate(target["ns"]["vld"]):
903 target_vld = find_in_list(
904 get_iterable(vdur, "interfaces"),
905 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
906 )
907
908 vld_params = find_in_list(
909 get_iterable(ns_params, "vld"),
910 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
911 )
912 if target_vld:
913 if vnf_params.get("vimAccountId") not in a_vld.get(
914 "vim_info", {}
915 ):
916 target_vim_network_list = [
917 v for _, v in a_vld.get("vim_info").items()
918 ]
919 target_vim_network_name = next(
920 (
921 item.get("vim_network_name", "")
922 for item in target_vim_network_list
923 ),
924 "",
925 )
926
927 target["ns"]["vld"][a_index].get("vim_info").update(
928 {
929 "vim:{}".format(vnf_params["vimAccountId"]): {
930 "vim_network_name": target_vim_network_name,
931 }
932 }
933 )
934
935 if vld_params:
936 for param in ("vim-network-name", "vim-network-id"):
937 if vld_params.get(param) and isinstance(
938 vld_params[param], dict
939 ):
940 for vim, vim_net in vld_params[
941 param
942 ].items():
943 other_target_vim = "vim:" + vim
944 populate_dict(
945 target["ns"]["vld"][a_index].get(
946 "vim_info"
947 ),
948 (
949 other_target_vim,
950 param.replace("-", "_"),
951 ),
952 vim_net,
953 )
954
955 nslcmop_id = db_nslcmop["_id"]
956 target = {
957 "name": db_nsr["name"],
958 "ns": {"vld": []},
959 "vnf": [],
960 "image": deepcopy(db_nsr["image"]),
961 "flavor": deepcopy(db_nsr["flavor"]),
962 "action_id": nslcmop_id,
963 "cloud_init_content": {},
964 }
965 for image in target["image"]:
966 image["vim_info"] = {}
967 for flavor in target["flavor"]:
968 flavor["vim_info"] = {}
969 if db_nsr.get("affinity-or-anti-affinity-group"):
970 target["affinity-or-anti-affinity-group"] = deepcopy(
971 db_nsr["affinity-or-anti-affinity-group"]
972 )
973 for affinity_or_anti_affinity_group in target[
974 "affinity-or-anti-affinity-group"
975 ]:
976 affinity_or_anti_affinity_group["vim_info"] = {}
977
978 if db_nslcmop.get("lcmOperationType") != "instantiate":
979 # get parameters of instantiation:
980 db_nslcmop_instantiate = self.db.get_list(
981 "nslcmops",
982 {
983 "nsInstanceId": db_nslcmop["nsInstanceId"],
984 "lcmOperationType": "instantiate",
985 },
986 )[-1]
987 ns_params = db_nslcmop_instantiate.get("operationParams")
988 else:
989 ns_params = db_nslcmop.get("operationParams")
990 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
991 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
992
993 cp2target = {}
994 for vld_index, vld in enumerate(db_nsr.get("vld")):
995 target_vim = "vim:{}".format(ns_params["vimAccountId"])
996 target_vld = {
997 "id": vld["id"],
998 "name": vld["name"],
999 "mgmt-network": vld.get("mgmt-network", False),
1000 "type": vld.get("type"),
1001 "vim_info": {
1002 target_vim: {
1003 "vim_network_name": vld.get("vim-network-name"),
1004 "vim_account_id": ns_params["vimAccountId"],
1005 }
1006 },
1007 }
1008 # check if this network needs SDN assist
1009 if vld.get("pci-interfaces"):
1010 db_vim = get_vim_account(ns_params["vimAccountId"])
1011 if vim_config := db_vim.get("config"):
1012 if sdnc_id := vim_config.get("sdn-controller"):
1013 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1014 target_sdn = "sdn:{}".format(sdnc_id)
1015 target_vld["vim_info"][target_sdn] = {
1016 "sdn": True,
1017 "target_vim": target_vim,
1018 "vlds": [sdn_vld],
1019 "type": vld.get("type"),
1020 }
1021
1022 nsd_vnf_profiles = get_vnf_profiles(nsd)
1023 for nsd_vnf_profile in nsd_vnf_profiles:
1024 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1025 if cp["virtual-link-profile-id"] == vld["id"]:
1026 cp2target[
1027 "member_vnf:{}.{}".format(
1028 cp["constituent-cpd-id"][0][
1029 "constituent-base-element-id"
1030 ],
1031 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1032 )
1033 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1034
1035 # check at nsd descriptor, if there is an ip-profile
1036 vld_params = {}
1037 nsd_vlp = find_in_list(
1038 get_virtual_link_profiles(nsd),
1039 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1040 == vld["id"],
1041 )
1042 if (
1043 nsd_vlp
1044 and nsd_vlp.get("virtual-link-protocol-data")
1045 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1046 ):
1047 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1048 "l3-protocol-data"
1049 ]
1050 ip_profile_dest_data = {}
1051 if "ip-version" in ip_profile_source_data:
1052 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1053 "ip-version"
1054 ]
1055 if "cidr" in ip_profile_source_data:
1056 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1057 "cidr"
1058 ]
1059 if "gateway-ip" in ip_profile_source_data:
1060 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1061 "gateway-ip"
1062 ]
1063 if "dhcp-enabled" in ip_profile_source_data:
1064 ip_profile_dest_data["dhcp-params"] = {
1065 "enabled": ip_profile_source_data["dhcp-enabled"]
1066 }
1067 vld_params["ip-profile"] = ip_profile_dest_data
1068
1069 # update vld_params with instantiation params
1070 vld_instantiation_params = find_in_list(
1071 get_iterable(ns_params, "vld"),
1072 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1073 )
1074 if vld_instantiation_params:
1075 vld_params.update(vld_instantiation_params)
1076 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1077 target["ns"]["vld"].append(target_vld)
1078 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1079 update_ns_vld_target(target, ns_params)
1080
1081 for vnfr in db_vnfrs.values():
1082 vnfd = find_in_list(
1083 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1084 )
1085 vnf_params = find_in_list(
1086 get_iterable(ns_params, "vnf"),
1087 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1088 )
1089 target_vnf = deepcopy(vnfr)
1090 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1091 for vld in target_vnf.get("vld", ()):
1092 # check if connected to a ns.vld, to fill target'
1093 vnf_cp = find_in_list(
1094 vnfd.get("int-virtual-link-desc", ()),
1095 lambda cpd: cpd.get("id") == vld["id"],
1096 )
1097 if vnf_cp:
1098 ns_cp = "member_vnf:{}.{}".format(
1099 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1100 )
1101 if cp2target.get(ns_cp):
1102 vld["target"] = cp2target[ns_cp]
1103
1104 vld["vim_info"] = {
1105 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1106 }
1107 # check if this network needs SDN assist
1108 target_sdn = None
1109 if vld.get("pci-interfaces"):
1110 db_vim = get_vim_account(vnfr["vim-account-id"])
1111 sdnc_id = db_vim["config"].get("sdn-controller")
1112 if sdnc_id:
1113 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1114 target_sdn = "sdn:{}".format(sdnc_id)
1115 vld["vim_info"][target_sdn] = {
1116 "sdn": True,
1117 "target_vim": target_vim,
1118 "vlds": [sdn_vld],
1119 "type": vld.get("type"),
1120 }
1121
1122 # check at vnfd descriptor, if there is an ip-profile
1123 vld_params = {}
1124 vnfd_vlp = find_in_list(
1125 get_virtual_link_profiles(vnfd),
1126 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1127 )
1128 if (
1129 vnfd_vlp
1130 and vnfd_vlp.get("virtual-link-protocol-data")
1131 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1132 ):
1133 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1134 "l3-protocol-data"
1135 ]
1136 ip_profile_dest_data = {}
1137 if "ip-version" in ip_profile_source_data:
1138 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1139 "ip-version"
1140 ]
1141 if "cidr" in ip_profile_source_data:
1142 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1143 "cidr"
1144 ]
1145 if "gateway-ip" in ip_profile_source_data:
1146 ip_profile_dest_data[
1147 "gateway-address"
1148 ] = ip_profile_source_data["gateway-ip"]
1149 if "dhcp-enabled" in ip_profile_source_data:
1150 ip_profile_dest_data["dhcp-params"] = {
1151 "enabled": ip_profile_source_data["dhcp-enabled"]
1152 }
1153
1154 vld_params["ip-profile"] = ip_profile_dest_data
1155 # update vld_params with instantiation params
1156 if vnf_params:
1157 vld_instantiation_params = find_in_list(
1158 get_iterable(vnf_params, "internal-vld"),
1159 lambda i_vld: i_vld["name"] == vld["id"],
1160 )
1161 if vld_instantiation_params:
1162 vld_params.update(vld_instantiation_params)
1163 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1164
1165 vdur_list = []
1166 for vdur in target_vnf.get("vdur", ()):
1167 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1168 continue # This vdu must not be created
1169 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1170
1171 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1172
1173 if ssh_keys_all:
1174 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1175 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1176 if (
1177 vdu_configuration
1178 and vdu_configuration.get("config-access")
1179 and vdu_configuration.get("config-access").get("ssh-access")
1180 ):
1181 vdur["ssh-keys"] = ssh_keys_all
1182 vdur["ssh-access-required"] = vdu_configuration[
1183 "config-access"
1184 ]["ssh-access"]["required"]
1185 elif (
1186 vnf_configuration
1187 and vnf_configuration.get("config-access")
1188 and vnf_configuration.get("config-access").get("ssh-access")
1189 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1190 ):
1191 vdur["ssh-keys"] = ssh_keys_all
1192 vdur["ssh-access-required"] = vnf_configuration[
1193 "config-access"
1194 ]["ssh-access"]["required"]
1195 elif ssh_keys_instantiation and find_in_list(
1196 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1197 ):
1198 vdur["ssh-keys"] = ssh_keys_instantiation
1199
1200 self.logger.debug("NS > vdur > {}".format(vdur))
1201
1202 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1203 # cloud-init
1204 if vdud.get("cloud-init-file"):
1205 vdur["cloud-init"] = "{}:file:{}".format(
1206 vnfd["_id"], vdud.get("cloud-init-file")
1207 )
1208 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1209 if vdur["cloud-init"] not in target["cloud_init_content"]:
1210 base_folder = vnfd["_admin"]["storage"]
1211 if base_folder["pkg-dir"]:
1212 cloud_init_file = "{}/{}/cloud_init/{}".format(
1213 base_folder["folder"],
1214 base_folder["pkg-dir"],
1215 vdud.get("cloud-init-file"),
1216 )
1217 else:
1218 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1219 base_folder["folder"],
1220 vdud.get("cloud-init-file"),
1221 )
1222 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1223 target["cloud_init_content"][
1224 vdur["cloud-init"]
1225 ] = ci_file.read()
1226 elif vdud.get("cloud-init"):
1227 vdur["cloud-init"] = "{}:vdu:{}".format(
1228 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1229 )
1230 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1231 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1232 "cloud-init"
1233 ]
1234 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1235 deploy_params_vdu = self._format_additional_params(
1236 vdur.get("additionalParams") or {}
1237 )
1238 deploy_params_vdu["OSM"] = get_osm_params(
1239 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1240 )
1241 vdur["additionalParams"] = deploy_params_vdu
1242
1243 # flavor
1244 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1245 if target_vim not in ns_flavor["vim_info"]:
1246 ns_flavor["vim_info"][target_vim] = {}
1247
1248 # deal with images
1249 # in case alternative images are provided we must check if they should be applied
1250 # for the vim_type, modify the vim_type taking into account
1251 ns_image_id = int(vdur["ns-image-id"])
1252 if vdur.get("alt-image-ids"):
1253 db_vim = get_vim_account(vnfr["vim-account-id"])
1254 vim_type = db_vim["vim_type"]
1255 for alt_image_id in vdur.get("alt-image-ids"):
1256 ns_alt_image = target["image"][int(alt_image_id)]
1257 if vim_type == ns_alt_image.get("vim-type"):
1258 # must use alternative image
1259 self.logger.debug(
1260 "use alternative image id: {}".format(alt_image_id)
1261 )
1262 ns_image_id = alt_image_id
1263 vdur["ns-image-id"] = ns_image_id
1264 break
1265 ns_image = target["image"][int(ns_image_id)]
1266 if target_vim not in ns_image["vim_info"]:
1267 ns_image["vim_info"][target_vim] = {}
1268
1269 # Affinity groups
1270 if vdur.get("affinity-or-anti-affinity-group-id"):
1271 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1272 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1273 if target_vim not in ns_ags["vim_info"]:
1274 ns_ags["vim_info"][target_vim] = {}
1275
1276 vdur["vim_info"] = {target_vim: {}}
1277 # instantiation parameters
1278 if vnf_params:
1279 vdu_instantiation_params = find_in_list(
1280 get_iterable(vnf_params, "vdu"),
1281 lambda i_vdu: i_vdu["id"] == vdud["id"],
1282 )
1283 if vdu_instantiation_params:
1284 # Parse the vdu_volumes from the instantiation params
1285 vdu_volumes = get_volumes_from_instantiation_params(
1286 vdu_instantiation_params, vdud
1287 )
1288 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1289 vdur_list.append(vdur)
1290 target_vnf["vdur"] = vdur_list
1291 target["vnf"].append(target_vnf)
1292
1293 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1294 desc = await self.RO.deploy(nsr_id, target)
1295 self.logger.debug("RO return > {}".format(desc))
1296 action_id = desc["action_id"]
1297 await self._wait_ng_ro(
1298 nsr_id,
1299 action_id,
1300 nslcmop_id,
1301 start_deploy,
1302 timeout_ns_deploy,
1303 stage,
1304 operation="instantiation",
1305 )
1306
1307 # Updating NSR
1308 db_nsr_update = {
1309 "_admin.deployed.RO.operational-status": "running",
1310 "detailed-status": " ".join(stage),
1311 }
1312 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1313 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1314 self._write_op_status(nslcmop_id, stage)
1315 self.logger.debug(
1316 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1317 )
1318 return
1319
1320 async def _wait_ng_ro(
1321 self,
1322 nsr_id,
1323 action_id,
1324 nslcmop_id=None,
1325 start_time=None,
1326 timeout=600,
1327 stage=None,
1328 operation=None,
1329 ):
1330 detailed_status_old = None
1331 db_nsr_update = {}
1332 start_time = start_time or time()
1333 while time() <= start_time + timeout:
1334 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1335 self.logger.debug("Wait NG RO > {}".format(desc_status))
1336 if desc_status["status"] == "FAILED":
1337 raise NgRoException(desc_status["details"])
1338 elif desc_status["status"] == "BUILD":
1339 if stage:
1340 stage[2] = "VIM: ({})".format(desc_status["details"])
1341 elif desc_status["status"] == "DONE":
1342 if stage:
1343 stage[2] = "Deployed at VIM"
1344 break
1345 else:
1346 assert False, "ROclient.check_ns_status returns unknown {}".format(
1347 desc_status["status"]
1348 )
1349 if stage and nslcmop_id and stage[2] != detailed_status_old:
1350 detailed_status_old = stage[2]
1351 db_nsr_update["detailed-status"] = " ".join(stage)
1352 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1353 self._write_op_status(nslcmop_id, stage)
1354 await asyncio.sleep(15, loop=self.loop)
1355 else: # timeout_ns_deploy
1356 raise NgRoException("Timeout waiting ns to deploy")
1357
1358 async def _terminate_ng_ro(
1359 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1360 ):
1361 db_nsr_update = {}
1362 failed_detail = []
1363 action_id = None
1364 start_deploy = time()
1365 try:
1366 target = {
1367 "ns": {"vld": []},
1368 "vnf": [],
1369 "image": [],
1370 "flavor": [],
1371 "action_id": nslcmop_id,
1372 }
1373 desc = await self.RO.deploy(nsr_id, target)
1374 action_id = desc["action_id"]
1375 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1376 self.logger.debug(
1377 logging_text
1378 + "ns terminate action at RO. action_id={}".format(action_id)
1379 )
1380
1381 # wait until done
1382 delete_timeout = 20 * 60 # 20 minutes
1383 await self._wait_ng_ro(
1384 nsr_id,
1385 action_id,
1386 nslcmop_id,
1387 start_deploy,
1388 delete_timeout,
1389 stage,
1390 operation="termination",
1391 )
1392 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1393 # delete all nsr
1394 await self.RO.delete(nsr_id)
1395 except NgRoException as e:
1396 if e.http_code == 404: # not found
1397 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1398 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1399 self.logger.debug(
1400 logging_text + "RO_action_id={} already deleted".format(action_id)
1401 )
1402 elif e.http_code == 409: # conflict
1403 failed_detail.append("delete conflict: {}".format(e))
1404 self.logger.debug(
1405 logging_text
1406 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1407 )
1408 else:
1409 failed_detail.append("delete error: {}".format(e))
1410 self.logger.error(
1411 logging_text
1412 + "RO_action_id={} delete error: {}".format(action_id, e)
1413 )
1414 except Exception as e:
1415 failed_detail.append("delete error: {}".format(e))
1416 self.logger.error(
1417 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1418 )
1419
1420 if failed_detail:
1421 stage[2] = "Error deleting from VIM"
1422 else:
1423 stage[2] = "Deleted from VIM"
1424 db_nsr_update["detailed-status"] = " ".join(stage)
1425 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1426 self._write_op_status(nslcmop_id, stage)
1427
1428 if failed_detail:
1429 raise LcmException("; ".join(failed_detail))
1430 return
1431
1432 async def instantiate_RO(
1433 self,
1434 logging_text,
1435 nsr_id,
1436 nsd,
1437 db_nsr,
1438 db_nslcmop,
1439 db_vnfrs,
1440 db_vnfds,
1441 n2vc_key_list,
1442 stage,
1443 ):
1444 """
1445 Instantiate at RO
1446 :param logging_text: preffix text to use at logging
1447 :param nsr_id: nsr identity
1448 :param nsd: database content of ns descriptor
1449 :param db_nsr: database content of ns record
1450 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1451 :param db_vnfrs:
1452 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1453 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1454 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1455 :return: None or exception
1456 """
1457 try:
1458 start_deploy = time()
1459 ns_params = db_nslcmop.get("operationParams")
1460 if ns_params and ns_params.get("timeout_ns_deploy"):
1461 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1462 else:
1463 timeout_ns_deploy = self.timeout.ns_deploy
1464
1465 # Check for and optionally request placement optimization. Database will be updated if placement activated
1466 stage[2] = "Waiting for Placement."
1467 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1468 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1469 for vnfr in db_vnfrs.values():
1470 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1471 break
1472 else:
1473 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1474
1475 return await self._instantiate_ng_ro(
1476 logging_text,
1477 nsr_id,
1478 nsd,
1479 db_nsr,
1480 db_nslcmop,
1481 db_vnfrs,
1482 db_vnfds,
1483 n2vc_key_list,
1484 stage,
1485 start_deploy,
1486 timeout_ns_deploy,
1487 )
1488 except Exception as e:
1489 stage[2] = "ERROR deploying at VIM"
1490 self.set_vnfr_at_error(db_vnfrs, str(e))
1491 self.logger.error(
1492 "Error deploying at VIM {}".format(e),
1493 exc_info=not isinstance(
1494 e,
1495 (
1496 ROclient.ROClientException,
1497 LcmException,
1498 DbException,
1499 NgRoException,
1500 ),
1501 ),
1502 )
1503 raise
1504
1505 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1506 """
1507 Wait for kdu to be up, get ip address
1508 :param logging_text: prefix use for logging
1509 :param nsr_id:
1510 :param vnfr_id:
1511 :param kdu_name:
1512 :return: IP address, K8s services
1513 """
1514
1515 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1516 nb_tries = 0
1517
1518 while nb_tries < 360:
1519 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1520 kdur = next(
1521 (
1522 x
1523 for x in get_iterable(db_vnfr, "kdur")
1524 if x.get("kdu-name") == kdu_name
1525 ),
1526 None,
1527 )
1528 if not kdur:
1529 raise LcmException(
1530 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1531 )
1532 if kdur.get("status"):
1533 if kdur["status"] in ("READY", "ENABLED"):
1534 return kdur.get("ip-address"), kdur.get("services")
1535 else:
1536 raise LcmException(
1537 "target KDU={} is in error state".format(kdu_name)
1538 )
1539
1540 await asyncio.sleep(10, loop=self.loop)
1541 nb_tries += 1
1542 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1543
1544 async def wait_vm_up_insert_key_ro(
1545 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1546 ):
1547 """
1548 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1549 :param logging_text: prefix use for logging
1550 :param nsr_id:
1551 :param vnfr_id:
1552 :param vdu_id:
1553 :param vdu_index:
1554 :param pub_key: public ssh key to inject, None to skip
1555 :param user: user to apply the public ssh key
1556 :return: IP address
1557 """
1558
1559 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1560 ip_address = None
1561 target_vdu_id = None
1562 ro_retries = 0
1563
1564 while True:
1565 ro_retries += 1
1566 if ro_retries >= 360: # 1 hour
1567 raise LcmException(
1568 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1569 )
1570
1571 await asyncio.sleep(10, loop=self.loop)
1572
1573 # get ip address
1574 if not target_vdu_id:
1575 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1576
1577 if not vdu_id: # for the VNF case
1578 if db_vnfr.get("status") == "ERROR":
1579 raise LcmException(
1580 "Cannot inject ssh-key because target VNF is in error state"
1581 )
1582 ip_address = db_vnfr.get("ip-address")
1583 if not ip_address:
1584 continue
1585 vdur = next(
1586 (
1587 x
1588 for x in get_iterable(db_vnfr, "vdur")
1589 if x.get("ip-address") == ip_address
1590 ),
1591 None,
1592 )
1593 else: # VDU case
1594 vdur = next(
1595 (
1596 x
1597 for x in get_iterable(db_vnfr, "vdur")
1598 if x.get("vdu-id-ref") == vdu_id
1599 and x.get("count-index") == vdu_index
1600 ),
1601 None,
1602 )
1603
1604 if (
1605 not vdur and len(db_vnfr.get("vdur", ())) == 1
1606 ): # If only one, this should be the target vdu
1607 vdur = db_vnfr["vdur"][0]
1608 if not vdur:
1609 raise LcmException(
1610 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1611 vnfr_id, vdu_id, vdu_index
1612 )
1613 )
1614 # New generation RO stores information at "vim_info"
1615 ng_ro_status = None
1616 target_vim = None
1617 if vdur.get("vim_info"):
1618 target_vim = next(
1619 t for t in vdur["vim_info"]
1620 ) # there should be only one key
1621 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1622 if (
1623 vdur.get("pdu-type")
1624 or vdur.get("status") == "ACTIVE"
1625 or ng_ro_status == "ACTIVE"
1626 ):
1627 ip_address = vdur.get("ip-address")
1628 if not ip_address:
1629 continue
1630 target_vdu_id = vdur["vdu-id-ref"]
1631 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1632 raise LcmException(
1633 "Cannot inject ssh-key because target VM is in error state"
1634 )
1635
1636 if not target_vdu_id:
1637 continue
1638
1639 # inject public key into machine
1640 if pub_key and user:
1641 self.logger.debug(logging_text + "Inserting RO key")
1642 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1643 if vdur.get("pdu-type"):
1644 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1645 return ip_address
1646 try:
1647 target = {
1648 "action": {
1649 "action": "inject_ssh_key",
1650 "key": pub_key,
1651 "user": user,
1652 },
1653 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1654 }
1655 desc = await self.RO.deploy(nsr_id, target)
1656 action_id = desc["action_id"]
1657 await self._wait_ng_ro(
1658 nsr_id, action_id, timeout=600, operation="instantiation"
1659 )
1660 break
1661 except NgRoException as e:
1662 raise LcmException(
1663 "Reaching max tries injecting key. Error: {}".format(e)
1664 )
1665 else:
1666 break
1667
1668 return ip_address
1669
1670 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1671 """
1672 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1673 """
1674 my_vca = vca_deployed_list[vca_index]
1675 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1676 # vdu or kdu: no dependencies
1677 return
1678 timeout = 300
1679 while timeout >= 0:
1680 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1681 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1682 configuration_status_list = db_nsr["configurationStatus"]
1683 for index, vca_deployed in enumerate(configuration_status_list):
1684 if index == vca_index:
1685 # myself
1686 continue
1687 if not my_vca.get("member-vnf-index") or (
1688 vca_deployed.get("member-vnf-index")
1689 == my_vca.get("member-vnf-index")
1690 ):
1691 internal_status = configuration_status_list[index].get("status")
1692 if internal_status == "READY":
1693 continue
1694 elif internal_status == "BROKEN":
1695 raise LcmException(
1696 "Configuration aborted because dependent charm/s has failed"
1697 )
1698 else:
1699 break
1700 else:
1701 # no dependencies, return
1702 return
1703 await asyncio.sleep(10)
1704 timeout -= 1
1705
1706 raise LcmException("Configuration aborted because dependent charm/s timeout")
1707
1708 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1709 vca_id = None
1710 if db_vnfr:
1711 vca_id = deep_get(db_vnfr, ("vca-id",))
1712 elif db_nsr:
1713 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1714 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1715 return vca_id
1716
1717 async def instantiate_N2VC(
1718 self,
1719 logging_text,
1720 vca_index,
1721 nsi_id,
1722 db_nsr,
1723 db_vnfr,
1724 vdu_id,
1725 kdu_name,
1726 vdu_index,
1727 kdu_index,
1728 config_descriptor,
1729 deploy_params,
1730 base_folder,
1731 nslcmop_id,
1732 stage,
1733 vca_type,
1734 vca_name,
1735 ee_config_descriptor,
1736 ):
1737 nsr_id = db_nsr["_id"]
1738 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1739 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1740 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1741 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1742 db_dict = {
1743 "collection": "nsrs",
1744 "filter": {"_id": nsr_id},
1745 "path": db_update_entry,
1746 }
1747 step = ""
1748 try:
1749 element_type = "NS"
1750 element_under_configuration = nsr_id
1751
1752 vnfr_id = None
1753 if db_vnfr:
1754 vnfr_id = db_vnfr["_id"]
1755 osm_config["osm"]["vnf_id"] = vnfr_id
1756
1757 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1758
1759 if vca_type == "native_charm":
1760 index_number = 0
1761 else:
1762 index_number = vdu_index or 0
1763
1764 if vnfr_id:
1765 element_type = "VNF"
1766 element_under_configuration = vnfr_id
1767 namespace += ".{}-{}".format(vnfr_id, index_number)
1768 if vdu_id:
1769 namespace += ".{}-{}".format(vdu_id, index_number)
1770 element_type = "VDU"
1771 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1772 osm_config["osm"]["vdu_id"] = vdu_id
1773 elif kdu_name:
1774 namespace += ".{}".format(kdu_name)
1775 element_type = "KDU"
1776 element_under_configuration = kdu_name
1777 osm_config["osm"]["kdu_name"] = kdu_name
1778
1779 # Get artifact path
1780 if base_folder["pkg-dir"]:
1781 artifact_path = "{}/{}/{}/{}".format(
1782 base_folder["folder"],
1783 base_folder["pkg-dir"],
1784 "charms"
1785 if vca_type
1786 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1787 else "helm-charts",
1788 vca_name,
1789 )
1790 else:
1791 artifact_path = "{}/Scripts/{}/{}/".format(
1792 base_folder["folder"],
1793 "charms"
1794 if vca_type
1795 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1796 else "helm-charts",
1797 vca_name,
1798 )
1799
1800 self.logger.debug("Artifact path > {}".format(artifact_path))
1801
1802 # get initial_config_primitive_list that applies to this element
1803 initial_config_primitive_list = config_descriptor.get(
1804 "initial-config-primitive"
1805 )
1806
1807 self.logger.debug(
1808 "Initial config primitive list > {}".format(
1809 initial_config_primitive_list
1810 )
1811 )
1812
1813 # add config if not present for NS charm
1814 ee_descriptor_id = ee_config_descriptor.get("id")
1815 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1816 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1817 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1818 )
1819
1820 self.logger.debug(
1821 "Initial config primitive list #2 > {}".format(
1822 initial_config_primitive_list
1823 )
1824 )
1825 # n2vc_redesign STEP 3.1
1826 # find old ee_id if exists
1827 ee_id = vca_deployed.get("ee_id")
1828
1829 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1830 # create or register execution environment in VCA
1831 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1832 self._write_configuration_status(
1833 nsr_id=nsr_id,
1834 vca_index=vca_index,
1835 status="CREATING",
1836 element_under_configuration=element_under_configuration,
1837 element_type=element_type,
1838 )
1839
1840 step = "create execution environment"
1841 self.logger.debug(logging_text + step)
1842
1843 ee_id = None
1844 credentials = None
1845 if vca_type == "k8s_proxy_charm":
1846 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1847 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1848 namespace=namespace,
1849 artifact_path=artifact_path,
1850 db_dict=db_dict,
1851 vca_id=vca_id,
1852 )
1853 elif vca_type == "helm" or vca_type == "helm-v3":
1854 ee_id, credentials = await self.vca_map[
1855 vca_type
1856 ].create_execution_environment(
1857 namespace=namespace,
1858 reuse_ee_id=ee_id,
1859 db_dict=db_dict,
1860 config=osm_config,
1861 artifact_path=artifact_path,
1862 chart_model=vca_name,
1863 vca_type=vca_type,
1864 )
1865 else:
1866 ee_id, credentials = await self.vca_map[
1867 vca_type
1868 ].create_execution_environment(
1869 namespace=namespace,
1870 reuse_ee_id=ee_id,
1871 db_dict=db_dict,
1872 vca_id=vca_id,
1873 )
1874
1875 elif vca_type == "native_charm":
1876 step = "Waiting to VM being up and getting IP address"
1877 self.logger.debug(logging_text + step)
1878 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1879 logging_text,
1880 nsr_id,
1881 vnfr_id,
1882 vdu_id,
1883 vdu_index,
1884 user=None,
1885 pub_key=None,
1886 )
1887 credentials = {"hostname": rw_mgmt_ip}
1888 # get username
1889 username = deep_get(
1890 config_descriptor, ("config-access", "ssh-access", "default-user")
1891 )
1892 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1893 # merged. Meanwhile let's get username from initial-config-primitive
1894 if not username and initial_config_primitive_list:
1895 for config_primitive in initial_config_primitive_list:
1896 for param in config_primitive.get("parameter", ()):
1897 if param["name"] == "ssh-username":
1898 username = param["value"]
1899 break
1900 if not username:
1901 raise LcmException(
1902 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1903 "'config-access.ssh-access.default-user'"
1904 )
1905 credentials["username"] = username
1906 # n2vc_redesign STEP 3.2
1907
1908 self._write_configuration_status(
1909 nsr_id=nsr_id,
1910 vca_index=vca_index,
1911 status="REGISTERING",
1912 element_under_configuration=element_under_configuration,
1913 element_type=element_type,
1914 )
1915
1916 step = "register execution environment {}".format(credentials)
1917 self.logger.debug(logging_text + step)
1918 ee_id = await self.vca_map[vca_type].register_execution_environment(
1919 credentials=credentials,
1920 namespace=namespace,
1921 db_dict=db_dict,
1922 vca_id=vca_id,
1923 )
1924
1925 # for compatibility with MON/POL modules, the need model and application name at database
1926 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1927 ee_id_parts = ee_id.split(".")
1928 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1929 if len(ee_id_parts) >= 2:
1930 model_name = ee_id_parts[0]
1931 application_name = ee_id_parts[1]
1932 db_nsr_update[db_update_entry + "model"] = model_name
1933 db_nsr_update[db_update_entry + "application"] = application_name
1934
1935 # n2vc_redesign STEP 3.3
1936 step = "Install configuration Software"
1937
1938 self._write_configuration_status(
1939 nsr_id=nsr_id,
1940 vca_index=vca_index,
1941 status="INSTALLING SW",
1942 element_under_configuration=element_under_configuration,
1943 element_type=element_type,
1944 other_update=db_nsr_update,
1945 )
1946
1947 # TODO check if already done
1948 self.logger.debug(logging_text + step)
1949 config = None
1950 if vca_type == "native_charm":
1951 config_primitive = next(
1952 (p for p in initial_config_primitive_list if p["name"] == "config"),
1953 None,
1954 )
1955 if config_primitive:
1956 config = self._map_primitive_params(
1957 config_primitive, {}, deploy_params
1958 )
1959 num_units = 1
1960 if vca_type == "lxc_proxy_charm":
1961 if element_type == "NS":
1962 num_units = db_nsr.get("config-units") or 1
1963 elif element_type == "VNF":
1964 num_units = db_vnfr.get("config-units") or 1
1965 elif element_type == "VDU":
1966 for v in db_vnfr["vdur"]:
1967 if vdu_id == v["vdu-id-ref"]:
1968 num_units = v.get("config-units") or 1
1969 break
1970 if vca_type != "k8s_proxy_charm":
1971 await self.vca_map[vca_type].install_configuration_sw(
1972 ee_id=ee_id,
1973 artifact_path=artifact_path,
1974 db_dict=db_dict,
1975 config=config,
1976 num_units=num_units,
1977 vca_id=vca_id,
1978 vca_type=vca_type,
1979 )
1980
1981 # write in db flag of configuration_sw already installed
1982 self.update_db_2(
1983 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1984 )
1985
1986 # add relations for this VCA (wait for other peers related with this VCA)
1987 is_relation_added = await self._add_vca_relations(
1988 logging_text=logging_text,
1989 nsr_id=nsr_id,
1990 vca_type=vca_type,
1991 vca_index=vca_index,
1992 )
1993
1994 if not is_relation_added:
1995 raise LcmException("Relations could not be added to VCA.")
1996
1997 # if SSH access is required, then get execution environment SSH public
1998 # if native charm we have waited already to VM be UP
1999 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2000 pub_key = None
2001 user = None
2002 # self.logger.debug("get ssh key block")
2003 if deep_get(
2004 config_descriptor, ("config-access", "ssh-access", "required")
2005 ):
2006 # self.logger.debug("ssh key needed")
2007 # Needed to inject a ssh key
2008 user = deep_get(
2009 config_descriptor,
2010 ("config-access", "ssh-access", "default-user"),
2011 )
2012 step = "Install configuration Software, getting public ssh key"
2013 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2014 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2015 )
2016
2017 step = "Insert public key into VM user={} ssh_key={}".format(
2018 user, pub_key
2019 )
2020 else:
2021 # self.logger.debug("no need to get ssh key")
2022 step = "Waiting to VM being up and getting IP address"
2023 self.logger.debug(logging_text + step)
2024
2025 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2026 rw_mgmt_ip = None
2027
2028 # n2vc_redesign STEP 5.1
2029 # wait for RO (ip-address) Insert pub_key into VM
2030 if vnfr_id:
2031 if kdu_name:
2032 rw_mgmt_ip, services = await self.wait_kdu_up(
2033 logging_text, nsr_id, vnfr_id, kdu_name
2034 )
2035 vnfd = self.db.get_one(
2036 "vnfds_revisions",
2037 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2038 )
2039 kdu = get_kdu(vnfd, kdu_name)
2040 kdu_services = [
2041 service["name"] for service in get_kdu_services(kdu)
2042 ]
2043 exposed_services = []
2044 for service in services:
2045 if any(s in service["name"] for s in kdu_services):
2046 exposed_services.append(service)
2047 await self.vca_map[vca_type].exec_primitive(
2048 ee_id=ee_id,
2049 primitive_name="config",
2050 params_dict={
2051 "osm-config": json.dumps(
2052 OsmConfigBuilder(
2053 k8s={"services": exposed_services}
2054 ).build()
2055 )
2056 },
2057 vca_id=vca_id,
2058 )
2059
2060 # This verification is needed in order to avoid trying to add a public key
2061 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2062 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2063 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2064 # or it is a KNF)
2065 elif db_vnfr.get("vdur"):
2066 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2067 logging_text,
2068 nsr_id,
2069 vnfr_id,
2070 vdu_id,
2071 vdu_index,
2072 user=user,
2073 pub_key=pub_key,
2074 )
2075
2076 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2077
2078 # store rw_mgmt_ip in deploy params for later replacement
2079 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2080
2081 # n2vc_redesign STEP 6 Execute initial config primitive
2082 step = "execute initial config primitive"
2083
2084 # wait for dependent primitives execution (NS -> VNF -> VDU)
2085 if initial_config_primitive_list:
2086 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2087
2088 # stage, in function of element type: vdu, kdu, vnf or ns
2089 my_vca = vca_deployed_list[vca_index]
2090 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2091 # VDU or KDU
2092 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2093 elif my_vca.get("member-vnf-index"):
2094 # VNF
2095 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2096 else:
2097 # NS
2098 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2099
2100 self._write_configuration_status(
2101 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2102 )
2103
2104 self._write_op_status(op_id=nslcmop_id, stage=stage)
2105
2106 check_if_terminated_needed = True
2107 for initial_config_primitive in initial_config_primitive_list:
2108 # adding information on the vca_deployed if it is a NS execution environment
2109 if not vca_deployed["member-vnf-index"]:
2110 deploy_params["ns_config_info"] = json.dumps(
2111 self._get_ns_config_info(nsr_id)
2112 )
2113 # TODO check if already done
2114 primitive_params_ = self._map_primitive_params(
2115 initial_config_primitive, {}, deploy_params
2116 )
2117
2118 step = "execute primitive '{}' params '{}'".format(
2119 initial_config_primitive["name"], primitive_params_
2120 )
2121 self.logger.debug(logging_text + step)
2122 await self.vca_map[vca_type].exec_primitive(
2123 ee_id=ee_id,
2124 primitive_name=initial_config_primitive["name"],
2125 params_dict=primitive_params_,
2126 db_dict=db_dict,
2127 vca_id=vca_id,
2128 vca_type=vca_type,
2129 )
2130 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2131 if check_if_terminated_needed:
2132 if config_descriptor.get("terminate-config-primitive"):
2133 self.update_db_2(
2134 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2135 )
2136 check_if_terminated_needed = False
2137
2138 # TODO register in database that primitive is done
2139
2140 # STEP 7 Configure metrics
2141 if vca_type == "helm" or vca_type == "helm-v3":
2142 # TODO: review for those cases where the helm chart is a reference and
2143 # is not part of the NF package
2144 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2145 ee_id=ee_id,
2146 artifact_path=artifact_path,
2147 ee_config_descriptor=ee_config_descriptor,
2148 vnfr_id=vnfr_id,
2149 nsr_id=nsr_id,
2150 target_ip=rw_mgmt_ip,
2151 element_type=element_type,
2152 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2153 vdu_id=vdu_id,
2154 vdu_index=vdu_index,
2155 kdu_name=kdu_name,
2156 kdu_index=kdu_index,
2157 )
2158 if prometheus_jobs:
2159 self.update_db_2(
2160 "nsrs",
2161 nsr_id,
2162 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2163 )
2164
2165 for job in prometheus_jobs:
2166 self.db.set_one(
2167 "prometheus_jobs",
2168 {"job_name": job["job_name"]},
2169 job,
2170 upsert=True,
2171 fail_on_empty=False,
2172 )
2173
2174 step = "instantiated at VCA"
2175 self.logger.debug(logging_text + step)
2176
2177 self._write_configuration_status(
2178 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2179 )
2180
2181 except Exception as e: # TODO not use Exception but N2VC exception
2182 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2183 if not isinstance(
2184 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2185 ):
2186 self.logger.error(
2187 "Exception while {} : {}".format(step, e), exc_info=True
2188 )
2189 self._write_configuration_status(
2190 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2191 )
2192 raise LcmException("{}. {}".format(step, e)) from e
2193
2194 def _write_ns_status(
2195 self,
2196 nsr_id: str,
2197 ns_state: str,
2198 current_operation: str,
2199 current_operation_id: str,
2200 error_description: str = None,
2201 error_detail: str = None,
2202 other_update: dict = None,
2203 ):
2204 """
2205 Update db_nsr fields.
2206 :param nsr_id:
2207 :param ns_state:
2208 :param current_operation:
2209 :param current_operation_id:
2210 :param error_description:
2211 :param error_detail:
2212 :param other_update: Other required changes at database if provided, will be cleared
2213 :return:
2214 """
2215 try:
2216 db_dict = other_update or {}
2217 db_dict[
2218 "_admin.nslcmop"
2219 ] = current_operation_id # for backward compatibility
2220 db_dict["_admin.current-operation"] = current_operation_id
2221 db_dict["_admin.operation-type"] = (
2222 current_operation if current_operation != "IDLE" else None
2223 )
2224 db_dict["currentOperation"] = current_operation
2225 db_dict["currentOperationID"] = current_operation_id
2226 db_dict["errorDescription"] = error_description
2227 db_dict["errorDetail"] = error_detail
2228
2229 if ns_state:
2230 db_dict["nsState"] = ns_state
2231 self.update_db_2("nsrs", nsr_id, db_dict)
2232 except DbException as e:
2233 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2234
2235 def _write_op_status(
2236 self,
2237 op_id: str,
2238 stage: list = None,
2239 error_message: str = None,
2240 queuePosition: int = 0,
2241 operation_state: str = None,
2242 other_update: dict = None,
2243 ):
2244 try:
2245 db_dict = other_update or {}
2246 db_dict["queuePosition"] = queuePosition
2247 if isinstance(stage, list):
2248 db_dict["stage"] = stage[0]
2249 db_dict["detailed-status"] = " ".join(stage)
2250 elif stage is not None:
2251 db_dict["stage"] = str(stage)
2252
2253 if error_message is not None:
2254 db_dict["errorMessage"] = error_message
2255 if operation_state is not None:
2256 db_dict["operationState"] = operation_state
2257 db_dict["statusEnteredTime"] = time()
2258 self.update_db_2("nslcmops", op_id, db_dict)
2259 except DbException as e:
2260 self.logger.warn(
2261 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2262 )
2263
2264 def _write_all_config_status(self, db_nsr: dict, status: str):
2265 try:
2266 nsr_id = db_nsr["_id"]
2267 # configurationStatus
2268 config_status = db_nsr.get("configurationStatus")
2269 if config_status:
2270 db_nsr_update = {
2271 "configurationStatus.{}.status".format(index): status
2272 for index, v in enumerate(config_status)
2273 if v
2274 }
2275 # update status
2276 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2277
2278 except DbException as e:
2279 self.logger.warn(
2280 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2281 )
2282
2283 def _write_configuration_status(
2284 self,
2285 nsr_id: str,
2286 vca_index: int,
2287 status: str = None,
2288 element_under_configuration: str = None,
2289 element_type: str = None,
2290 other_update: dict = None,
2291 ):
2292 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2293 # .format(vca_index, status))
2294
2295 try:
2296 db_path = "configurationStatus.{}.".format(vca_index)
2297 db_dict = other_update or {}
2298 if status:
2299 db_dict[db_path + "status"] = status
2300 if element_under_configuration:
2301 db_dict[
2302 db_path + "elementUnderConfiguration"
2303 ] = element_under_configuration
2304 if element_type:
2305 db_dict[db_path + "elementType"] = element_type
2306 self.update_db_2("nsrs", nsr_id, db_dict)
2307 except DbException as e:
2308 self.logger.warn(
2309 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2310 status, nsr_id, vca_index, e
2311 )
2312 )
2313
2314 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2315 """
2316 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2317 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2318 Database is used because the result can be obtained from a different LCM worker in case of HA.
2319 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2320 :param db_nslcmop: database content of nslcmop
2321 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2322 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2323 computed 'vim-account-id'
2324 """
2325 modified = False
2326 nslcmop_id = db_nslcmop["_id"]
2327 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2328 if placement_engine == "PLA":
2329 self.logger.debug(
2330 logging_text + "Invoke and wait for placement optimization"
2331 )
2332 await self.msg.aiowrite(
2333 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2334 )
2335 db_poll_interval = 5
2336 wait = db_poll_interval * 10
2337 pla_result = None
2338 while not pla_result and wait >= 0:
2339 await asyncio.sleep(db_poll_interval)
2340 wait -= db_poll_interval
2341 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2342 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2343
2344 if not pla_result:
2345 raise LcmException(
2346 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2347 )
2348
2349 for pla_vnf in pla_result["vnf"]:
2350 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2351 if not pla_vnf.get("vimAccountId") or not vnfr:
2352 continue
2353 modified = True
2354 self.db.set_one(
2355 "vnfrs",
2356 {"_id": vnfr["_id"]},
2357 {"vim-account-id": pla_vnf["vimAccountId"]},
2358 )
2359 # Modifies db_vnfrs
2360 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2361 return modified
2362
2363 def update_nsrs_with_pla_result(self, params):
2364 try:
2365 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2366 self.update_db_2(
2367 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2368 )
2369 except Exception as e:
2370 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2371
2372 async def instantiate(self, nsr_id, nslcmop_id):
2373 """
2374
2375 :param nsr_id: ns instance to deploy
2376 :param nslcmop_id: operation to run
2377 :return:
2378 """
2379
2380 # Try to lock HA task here
2381 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2382 if not task_is_locked_by_me:
2383 self.logger.debug(
2384 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2385 )
2386 return
2387
2388 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2389 self.logger.debug(logging_text + "Enter")
2390
2391 # get all needed from database
2392
2393 # database nsrs record
2394 db_nsr = None
2395
2396 # database nslcmops record
2397 db_nslcmop = None
2398
2399 # update operation on nsrs
2400 db_nsr_update = {}
2401 # update operation on nslcmops
2402 db_nslcmop_update = {}
2403
2404 timeout_ns_deploy = self.timeout.ns_deploy
2405
2406 nslcmop_operation_state = None
2407 db_vnfrs = {} # vnf's info indexed by member-index
2408 # n2vc_info = {}
2409 tasks_dict_info = {} # from task to info text
2410 exc = None
2411 error_list = []
2412 stage = [
2413 "Stage 1/5: preparation of the environment.",
2414 "Waiting for previous operations to terminate.",
2415 "",
2416 ]
2417 # ^ stage, step, VIM progress
2418 try:
2419 # wait for any previous tasks in process
2420 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2421
2422 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2423 stage[1] = "Reading from database."
2424 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2425 db_nsr_update["detailed-status"] = "creating"
2426 db_nsr_update["operational-status"] = "init"
2427 self._write_ns_status(
2428 nsr_id=nsr_id,
2429 ns_state="BUILDING",
2430 current_operation="INSTANTIATING",
2431 current_operation_id=nslcmop_id,
2432 other_update=db_nsr_update,
2433 )
2434 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2435
2436 # read from db: operation
2437 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2438 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2439 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2440 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2441 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2442 )
2443 ns_params = db_nslcmop.get("operationParams")
2444 if ns_params and ns_params.get("timeout_ns_deploy"):
2445 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2446
2447 # read from db: ns
2448 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2449 self.logger.debug(logging_text + stage[1])
2450 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2451 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2452 self.logger.debug(logging_text + stage[1])
2453 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2454 self.fs.sync(db_nsr["nsd-id"])
2455 db_nsr["nsd"] = nsd
2456 # nsr_name = db_nsr["name"] # TODO short-name??
2457
2458 # read from db: vnf's of this ns
2459 stage[1] = "Getting vnfrs from db."
2460 self.logger.debug(logging_text + stage[1])
2461 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2462
2463 # read from db: vnfd's for every vnf
2464 db_vnfds = [] # every vnfd data
2465
2466 # for each vnf in ns, read vnfd
2467 for vnfr in db_vnfrs_list:
2468 if vnfr.get("kdur"):
2469 kdur_list = []
2470 for kdur in vnfr["kdur"]:
2471 if kdur.get("additionalParams"):
2472 kdur["additionalParams"] = json.loads(
2473 kdur["additionalParams"]
2474 )
2475 kdur_list.append(kdur)
2476 vnfr["kdur"] = kdur_list
2477
2478 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2479 vnfd_id = vnfr["vnfd-id"]
2480 vnfd_ref = vnfr["vnfd-ref"]
2481 self.fs.sync(vnfd_id)
2482
2483 # if we haven't this vnfd, read it from db
2484 if vnfd_id not in db_vnfds:
2485 # read from db
2486 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2487 vnfd_id, vnfd_ref
2488 )
2489 self.logger.debug(logging_text + stage[1])
2490 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2491
2492 # store vnfd
2493 db_vnfds.append(vnfd)
2494
2495 # Get or generates the _admin.deployed.VCA list
2496 vca_deployed_list = None
2497 if db_nsr["_admin"].get("deployed"):
2498 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2499 if vca_deployed_list is None:
2500 vca_deployed_list = []
2501 configuration_status_list = []
2502 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2503 db_nsr_update["configurationStatus"] = configuration_status_list
2504 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2505 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2506 elif isinstance(vca_deployed_list, dict):
2507 # maintain backward compatibility. Change a dict to list at database
2508 vca_deployed_list = list(vca_deployed_list.values())
2509 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2510 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2511
2512 if not isinstance(
2513 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2514 ):
2515 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2516 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2517
2518 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2519 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2520 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2521 self.db.set_list(
2522 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2523 )
2524
2525 # n2vc_redesign STEP 2 Deploy Network Scenario
2526 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2527 self._write_op_status(op_id=nslcmop_id, stage=stage)
2528
2529 stage[1] = "Deploying KDUs."
2530 # self.logger.debug(logging_text + "Before deploy_kdus")
2531 # Call to deploy_kdus in case exists the "vdu:kdu" param
2532 await self.deploy_kdus(
2533 logging_text=logging_text,
2534 nsr_id=nsr_id,
2535 nslcmop_id=nslcmop_id,
2536 db_vnfrs=db_vnfrs,
2537 db_vnfds=db_vnfds,
2538 task_instantiation_info=tasks_dict_info,
2539 )
2540
2541 stage[1] = "Getting VCA public key."
2542 # n2vc_redesign STEP 1 Get VCA public ssh-key
2543 # feature 1429. Add n2vc public key to needed VMs
2544 n2vc_key = self.n2vc.get_public_key()
2545 n2vc_key_list = [n2vc_key]
2546 if self.vca_config.public_key:
2547 n2vc_key_list.append(self.vca_config.public_key)
2548
2549 stage[1] = "Deploying NS at VIM."
2550 task_ro = asyncio.ensure_future(
2551 self.instantiate_RO(
2552 logging_text=logging_text,
2553 nsr_id=nsr_id,
2554 nsd=nsd,
2555 db_nsr=db_nsr,
2556 db_nslcmop=db_nslcmop,
2557 db_vnfrs=db_vnfrs,
2558 db_vnfds=db_vnfds,
2559 n2vc_key_list=n2vc_key_list,
2560 stage=stage,
2561 )
2562 )
2563 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2564 tasks_dict_info[task_ro] = "Deploying at VIM"
2565
2566 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2567 stage[1] = "Deploying Execution Environments."
2568 self.logger.debug(logging_text + stage[1])
2569
2570 # create namespace and certificate if any helm based EE is present in the NS
2571 if check_helm_ee_in_ns(db_vnfds):
2572 # TODO: create EE namespace
2573 # create TLS certificates
2574 await self.vca_map["helm-v3"].create_tls_certificate(
2575 secret_name="ee-tls-{}".format(nsr_id),
2576 dns_prefix="*",
2577 nsr_id=nsr_id,
2578 usage="server auth",
2579 )
2580
2581 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2582 for vnf_profile in get_vnf_profiles(nsd):
2583 vnfd_id = vnf_profile["vnfd-id"]
2584 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2585 member_vnf_index = str(vnf_profile["id"])
2586 db_vnfr = db_vnfrs[member_vnf_index]
2587 base_folder = vnfd["_admin"]["storage"]
2588 vdu_id = None
2589 vdu_index = 0
2590 vdu_name = None
2591 kdu_name = None
2592 kdu_index = None
2593
2594 # Get additional parameters
2595 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2596 if db_vnfr.get("additionalParamsForVnf"):
2597 deploy_params.update(
2598 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2599 )
2600
2601 descriptor_config = get_configuration(vnfd, vnfd["id"])
2602 if descriptor_config:
2603 self._deploy_n2vc(
2604 logging_text=logging_text
2605 + "member_vnf_index={} ".format(member_vnf_index),
2606 db_nsr=db_nsr,
2607 db_vnfr=db_vnfr,
2608 nslcmop_id=nslcmop_id,
2609 nsr_id=nsr_id,
2610 nsi_id=nsi_id,
2611 vnfd_id=vnfd_id,
2612 vdu_id=vdu_id,
2613 kdu_name=kdu_name,
2614 member_vnf_index=member_vnf_index,
2615 vdu_index=vdu_index,
2616 kdu_index=kdu_index,
2617 vdu_name=vdu_name,
2618 deploy_params=deploy_params,
2619 descriptor_config=descriptor_config,
2620 base_folder=base_folder,
2621 task_instantiation_info=tasks_dict_info,
2622 stage=stage,
2623 )
2624
2625 # Deploy charms for each VDU that supports one.
2626 for vdud in get_vdu_list(vnfd):
2627 vdu_id = vdud["id"]
2628 descriptor_config = get_configuration(vnfd, vdu_id)
2629 vdur = find_in_list(
2630 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2631 )
2632
2633 if vdur.get("additionalParams"):
2634 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2635 else:
2636 deploy_params_vdu = deploy_params
2637 deploy_params_vdu["OSM"] = get_osm_params(
2638 db_vnfr, vdu_id, vdu_count_index=0
2639 )
2640 vdud_count = get_number_of_instances(vnfd, vdu_id)
2641
2642 self.logger.debug("VDUD > {}".format(vdud))
2643 self.logger.debug(
2644 "Descriptor config > {}".format(descriptor_config)
2645 )
2646 if descriptor_config:
2647 vdu_name = None
2648 kdu_name = None
2649 kdu_index = None
2650 for vdu_index in range(vdud_count):
2651 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2652 self._deploy_n2vc(
2653 logging_text=logging_text
2654 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2655 member_vnf_index, vdu_id, vdu_index
2656 ),
2657 db_nsr=db_nsr,
2658 db_vnfr=db_vnfr,
2659 nslcmop_id=nslcmop_id,
2660 nsr_id=nsr_id,
2661 nsi_id=nsi_id,
2662 vnfd_id=vnfd_id,
2663 vdu_id=vdu_id,
2664 kdu_name=kdu_name,
2665 kdu_index=kdu_index,
2666 member_vnf_index=member_vnf_index,
2667 vdu_index=vdu_index,
2668 vdu_name=vdu_name,
2669 deploy_params=deploy_params_vdu,
2670 descriptor_config=descriptor_config,
2671 base_folder=base_folder,
2672 task_instantiation_info=tasks_dict_info,
2673 stage=stage,
2674 )
2675 for kdud in get_kdu_list(vnfd):
2676 kdu_name = kdud["name"]
2677 descriptor_config = get_configuration(vnfd, kdu_name)
2678 if descriptor_config:
2679 vdu_id = None
2680 vdu_index = 0
2681 vdu_name = None
2682 kdu_index, kdur = next(
2683 x
2684 for x in enumerate(db_vnfr["kdur"])
2685 if x[1]["kdu-name"] == kdu_name
2686 )
2687 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2688 if kdur.get("additionalParams"):
2689 deploy_params_kdu.update(
2690 parse_yaml_strings(kdur["additionalParams"].copy())
2691 )
2692
2693 self._deploy_n2vc(
2694 logging_text=logging_text,
2695 db_nsr=db_nsr,
2696 db_vnfr=db_vnfr,
2697 nslcmop_id=nslcmop_id,
2698 nsr_id=nsr_id,
2699 nsi_id=nsi_id,
2700 vnfd_id=vnfd_id,
2701 vdu_id=vdu_id,
2702 kdu_name=kdu_name,
2703 member_vnf_index=member_vnf_index,
2704 vdu_index=vdu_index,
2705 kdu_index=kdu_index,
2706 vdu_name=vdu_name,
2707 deploy_params=deploy_params_kdu,
2708 descriptor_config=descriptor_config,
2709 base_folder=base_folder,
2710 task_instantiation_info=tasks_dict_info,
2711 stage=stage,
2712 )
2713
2714 # Check if this NS has a charm configuration
2715 descriptor_config = nsd.get("ns-configuration")
2716 if descriptor_config and descriptor_config.get("juju"):
2717 vnfd_id = None
2718 db_vnfr = None
2719 member_vnf_index = None
2720 vdu_id = None
2721 kdu_name = None
2722 kdu_index = None
2723 vdu_index = 0
2724 vdu_name = None
2725
2726 # Get additional parameters
2727 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2728 if db_nsr.get("additionalParamsForNs"):
2729 deploy_params.update(
2730 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2731 )
2732 base_folder = nsd["_admin"]["storage"]
2733 self._deploy_n2vc(
2734 logging_text=logging_text,
2735 db_nsr=db_nsr,
2736 db_vnfr=db_vnfr,
2737 nslcmop_id=nslcmop_id,
2738 nsr_id=nsr_id,
2739 nsi_id=nsi_id,
2740 vnfd_id=vnfd_id,
2741 vdu_id=vdu_id,
2742 kdu_name=kdu_name,
2743 member_vnf_index=member_vnf_index,
2744 vdu_index=vdu_index,
2745 kdu_index=kdu_index,
2746 vdu_name=vdu_name,
2747 deploy_params=deploy_params,
2748 descriptor_config=descriptor_config,
2749 base_folder=base_folder,
2750 task_instantiation_info=tasks_dict_info,
2751 stage=stage,
2752 )
2753
2754 # rest of staff will be done at finally
2755
2756 except (
2757 ROclient.ROClientException,
2758 DbException,
2759 LcmException,
2760 N2VCException,
2761 ) as e:
2762 self.logger.error(
2763 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2764 )
2765 exc = e
2766 except asyncio.CancelledError:
2767 self.logger.error(
2768 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2769 )
2770 exc = "Operation was cancelled"
2771 except Exception as e:
2772 exc = traceback.format_exc()
2773 self.logger.critical(
2774 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2775 exc_info=True,
2776 )
2777 finally:
2778 if exc:
2779 error_list.append(str(exc))
2780 try:
2781 # wait for pending tasks
2782 if tasks_dict_info:
2783 stage[1] = "Waiting for instantiate pending tasks."
2784 self.logger.debug(logging_text + stage[1])
2785 error_list += await self._wait_for_tasks(
2786 logging_text,
2787 tasks_dict_info,
2788 timeout_ns_deploy,
2789 stage,
2790 nslcmop_id,
2791 nsr_id=nsr_id,
2792 )
2793 stage[1] = stage[2] = ""
2794 except asyncio.CancelledError:
2795 error_list.append("Cancelled")
2796 # TODO cancel all tasks
2797 except Exception as exc:
2798 error_list.append(str(exc))
2799
2800 # update operation-status
2801 db_nsr_update["operational-status"] = "running"
2802 # let's begin with VCA 'configured' status (later we can change it)
2803 db_nsr_update["config-status"] = "configured"
2804 for task, task_name in tasks_dict_info.items():
2805 if not task.done() or task.cancelled() or task.exception():
2806 if task_name.startswith(self.task_name_deploy_vca):
2807 # A N2VC task is pending
2808 db_nsr_update["config-status"] = "failed"
2809 else:
2810 # RO or KDU task is pending
2811 db_nsr_update["operational-status"] = "failed"
2812
2813 # update status at database
2814 if error_list:
2815 error_detail = ". ".join(error_list)
2816 self.logger.error(logging_text + error_detail)
2817 error_description_nslcmop = "{} Detail: {}".format(
2818 stage[0], error_detail
2819 )
2820 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2821 nslcmop_id, stage[0]
2822 )
2823
2824 db_nsr_update["detailed-status"] = (
2825 error_description_nsr + " Detail: " + error_detail
2826 )
2827 db_nslcmop_update["detailed-status"] = error_detail
2828 nslcmop_operation_state = "FAILED"
2829 ns_state = "BROKEN"
2830 else:
2831 error_detail = None
2832 error_description_nsr = error_description_nslcmop = None
2833 ns_state = "READY"
2834 db_nsr_update["detailed-status"] = "Done"
2835 db_nslcmop_update["detailed-status"] = "Done"
2836 nslcmop_operation_state = "COMPLETED"
2837
2838 if db_nsr:
2839 self._write_ns_status(
2840 nsr_id=nsr_id,
2841 ns_state=ns_state,
2842 current_operation="IDLE",
2843 current_operation_id=None,
2844 error_description=error_description_nsr,
2845 error_detail=error_detail,
2846 other_update=db_nsr_update,
2847 )
2848 self._write_op_status(
2849 op_id=nslcmop_id,
2850 stage="",
2851 error_message=error_description_nslcmop,
2852 operation_state=nslcmop_operation_state,
2853 other_update=db_nslcmop_update,
2854 )
2855
2856 if nslcmop_operation_state:
2857 try:
2858 await self.msg.aiowrite(
2859 "ns",
2860 "instantiated",
2861 {
2862 "nsr_id": nsr_id,
2863 "nslcmop_id": nslcmop_id,
2864 "operationState": nslcmop_operation_state,
2865 },
2866 loop=self.loop,
2867 )
2868 except Exception as e:
2869 self.logger.error(
2870 logging_text + "kafka_write notification Exception {}".format(e)
2871 )
2872
2873 self.logger.debug(logging_text + "Exit")
2874 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2875
2876 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2877 if vnfd_id not in cached_vnfds:
2878 cached_vnfds[vnfd_id] = self.db.get_one(
2879 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2880 )
2881 return cached_vnfds[vnfd_id]
2882
2883 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2884 if vnf_profile_id not in cached_vnfrs:
2885 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2886 "vnfrs",
2887 {
2888 "member-vnf-index-ref": vnf_profile_id,
2889 "nsr-id-ref": nsr_id,
2890 },
2891 )
2892 return cached_vnfrs[vnf_profile_id]
2893
2894 def _is_deployed_vca_in_relation(
2895 self, vca: DeployedVCA, relation: Relation
2896 ) -> bool:
2897 found = False
2898 for endpoint in (relation.provider, relation.requirer):
2899 if endpoint["kdu-resource-profile-id"]:
2900 continue
2901 found = (
2902 vca.vnf_profile_id == endpoint.vnf_profile_id
2903 and vca.vdu_profile_id == endpoint.vdu_profile_id
2904 and vca.execution_environment_ref == endpoint.execution_environment_ref
2905 )
2906 if found:
2907 break
2908 return found
2909
2910 def _update_ee_relation_data_with_implicit_data(
2911 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2912 ):
2913 ee_relation_data = safe_get_ee_relation(
2914 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2915 )
2916 ee_relation_level = EELevel.get_level(ee_relation_data)
2917 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2918 "execution-environment-ref"
2919 ]:
2920 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2921 vnfd_id = vnf_profile["vnfd-id"]
2922 project = nsd["_admin"]["projects_read"][0]
2923 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2924 entity_id = (
2925 vnfd_id
2926 if ee_relation_level == EELevel.VNF
2927 else ee_relation_data["vdu-profile-id"]
2928 )
2929 ee = get_juju_ee_ref(db_vnfd, entity_id)
2930 if not ee:
2931 raise Exception(
2932 f"not execution environments found for ee_relation {ee_relation_data}"
2933 )
2934 ee_relation_data["execution-environment-ref"] = ee["id"]
2935 return ee_relation_data
2936
2937 def _get_ns_relations(
2938 self,
2939 nsr_id: str,
2940 nsd: Dict[str, Any],
2941 vca: DeployedVCA,
2942 cached_vnfds: Dict[str, Any],
2943 ) -> List[Relation]:
2944 relations = []
2945 db_ns_relations = get_ns_configuration_relation_list(nsd)
2946 for r in db_ns_relations:
2947 provider_dict = None
2948 requirer_dict = None
2949 if all(key in r for key in ("provider", "requirer")):
2950 provider_dict = r["provider"]
2951 requirer_dict = r["requirer"]
2952 elif "entities" in r:
2953 provider_id = r["entities"][0]["id"]
2954 provider_dict = {
2955 "nsr-id": nsr_id,
2956 "endpoint": r["entities"][0]["endpoint"],
2957 }
2958 if provider_id != nsd["id"]:
2959 provider_dict["vnf-profile-id"] = provider_id
2960 requirer_id = r["entities"][1]["id"]
2961 requirer_dict = {
2962 "nsr-id": nsr_id,
2963 "endpoint": r["entities"][1]["endpoint"],
2964 }
2965 if requirer_id != nsd["id"]:
2966 requirer_dict["vnf-profile-id"] = requirer_id
2967 else:
2968 raise Exception(
2969 "provider/requirer or entities must be included in the relation."
2970 )
2971 relation_provider = self._update_ee_relation_data_with_implicit_data(
2972 nsr_id, nsd, provider_dict, cached_vnfds
2973 )
2974 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2975 nsr_id, nsd, requirer_dict, cached_vnfds
2976 )
2977 provider = EERelation(relation_provider)
2978 requirer = EERelation(relation_requirer)
2979 relation = Relation(r["name"], provider, requirer)
2980 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2981 if vca_in_relation:
2982 relations.append(relation)
2983 return relations
2984
2985 def _get_vnf_relations(
2986 self,
2987 nsr_id: str,
2988 nsd: Dict[str, Any],
2989 vca: DeployedVCA,
2990 cached_vnfds: Dict[str, Any],
2991 ) -> List[Relation]:
2992 relations = []
2993 if vca.target_element == "ns":
2994 self.logger.debug("VCA is a NS charm, not a VNF.")
2995 return relations
2996 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2997 vnf_profile_id = vnf_profile["id"]
2998 vnfd_id = vnf_profile["vnfd-id"]
2999 project = nsd["_admin"]["projects_read"][0]
3000 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3001 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3002 for r in db_vnf_relations:
3003 provider_dict = None
3004 requirer_dict = None
3005 if all(key in r for key in ("provider", "requirer")):
3006 provider_dict = r["provider"]
3007 requirer_dict = r["requirer"]
3008 elif "entities" in r:
3009 provider_id = r["entities"][0]["id"]
3010 provider_dict = {
3011 "nsr-id": nsr_id,
3012 "vnf-profile-id": vnf_profile_id,
3013 "endpoint": r["entities"][0]["endpoint"],
3014 }
3015 if provider_id != vnfd_id:
3016 provider_dict["vdu-profile-id"] = provider_id
3017 requirer_id = r["entities"][1]["id"]
3018 requirer_dict = {
3019 "nsr-id": nsr_id,
3020 "vnf-profile-id": vnf_profile_id,
3021 "endpoint": r["entities"][1]["endpoint"],
3022 }
3023 if requirer_id != vnfd_id:
3024 requirer_dict["vdu-profile-id"] = requirer_id
3025 else:
3026 raise Exception(
3027 "provider/requirer or entities must be included in the relation."
3028 )
3029 relation_provider = self._update_ee_relation_data_with_implicit_data(
3030 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3031 )
3032 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3033 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3034 )
3035 provider = EERelation(relation_provider)
3036 requirer = EERelation(relation_requirer)
3037 relation = Relation(r["name"], provider, requirer)
3038 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3039 if vca_in_relation:
3040 relations.append(relation)
3041 return relations
3042
3043 def _get_kdu_resource_data(
3044 self,
3045 ee_relation: EERelation,
3046 db_nsr: Dict[str, Any],
3047 cached_vnfds: Dict[str, Any],
3048 ) -> DeployedK8sResource:
3049 nsd = get_nsd(db_nsr)
3050 vnf_profiles = get_vnf_profiles(nsd)
3051 vnfd_id = find_in_list(
3052 vnf_profiles,
3053 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3054 )["vnfd-id"]
3055 project = nsd["_admin"]["projects_read"][0]
3056 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3057 kdu_resource_profile = get_kdu_resource_profile(
3058 db_vnfd, ee_relation.kdu_resource_profile_id
3059 )
3060 kdu_name = kdu_resource_profile["kdu-name"]
3061 deployed_kdu, _ = get_deployed_kdu(
3062 db_nsr.get("_admin", ()).get("deployed", ()),
3063 kdu_name,
3064 ee_relation.vnf_profile_id,
3065 )
3066 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3067 return deployed_kdu
3068
3069 def _get_deployed_component(
3070 self,
3071 ee_relation: EERelation,
3072 db_nsr: Dict[str, Any],
3073 cached_vnfds: Dict[str, Any],
3074 ) -> DeployedComponent:
3075 nsr_id = db_nsr["_id"]
3076 deployed_component = None
3077 ee_level = EELevel.get_level(ee_relation)
3078 if ee_level == EELevel.NS:
3079 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3080 if vca:
3081 deployed_component = DeployedVCA(nsr_id, vca)
3082 elif ee_level == EELevel.VNF:
3083 vca = get_deployed_vca(
3084 db_nsr,
3085 {
3086 "vdu_id": None,
3087 "member-vnf-index": ee_relation.vnf_profile_id,
3088 "ee_descriptor_id": ee_relation.execution_environment_ref,
3089 },
3090 )
3091 if vca:
3092 deployed_component = DeployedVCA(nsr_id, vca)
3093 elif ee_level == EELevel.VDU:
3094 vca = get_deployed_vca(
3095 db_nsr,
3096 {
3097 "vdu_id": ee_relation.vdu_profile_id,
3098 "member-vnf-index": ee_relation.vnf_profile_id,
3099 "ee_descriptor_id": ee_relation.execution_environment_ref,
3100 },
3101 )
3102 if vca:
3103 deployed_component = DeployedVCA(nsr_id, vca)
3104 elif ee_level == EELevel.KDU:
3105 kdu_resource_data = self._get_kdu_resource_data(
3106 ee_relation, db_nsr, cached_vnfds
3107 )
3108 if kdu_resource_data:
3109 deployed_component = DeployedK8sResource(kdu_resource_data)
3110 return deployed_component
3111
3112 async def _add_relation(
3113 self,
3114 relation: Relation,
3115 vca_type: str,
3116 db_nsr: Dict[str, Any],
3117 cached_vnfds: Dict[str, Any],
3118 cached_vnfrs: Dict[str, Any],
3119 ) -> bool:
3120 deployed_provider = self._get_deployed_component(
3121 relation.provider, db_nsr, cached_vnfds
3122 )
3123 deployed_requirer = self._get_deployed_component(
3124 relation.requirer, db_nsr, cached_vnfds
3125 )
3126 if (
3127 deployed_provider
3128 and deployed_requirer
3129 and deployed_provider.config_sw_installed
3130 and deployed_requirer.config_sw_installed
3131 ):
3132 provider_db_vnfr = (
3133 self._get_vnfr(
3134 relation.provider.nsr_id,
3135 relation.provider.vnf_profile_id,
3136 cached_vnfrs,
3137 )
3138 if relation.provider.vnf_profile_id
3139 else None
3140 )
3141 requirer_db_vnfr = (
3142 self._get_vnfr(
3143 relation.requirer.nsr_id,
3144 relation.requirer.vnf_profile_id,
3145 cached_vnfrs,
3146 )
3147 if relation.requirer.vnf_profile_id
3148 else None
3149 )
3150 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3151 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3152 provider_relation_endpoint = RelationEndpoint(
3153 deployed_provider.ee_id,
3154 provider_vca_id,
3155 relation.provider.endpoint,
3156 )
3157 requirer_relation_endpoint = RelationEndpoint(
3158 deployed_requirer.ee_id,
3159 requirer_vca_id,
3160 relation.requirer.endpoint,
3161 )
3162 try:
3163 await self.vca_map[vca_type].add_relation(
3164 provider=provider_relation_endpoint,
3165 requirer=requirer_relation_endpoint,
3166 )
3167 except N2VCException as exception:
3168 self.logger.error(exception)
3169 raise LcmException(exception)
3170 return True
3171 return False
3172
3173 async def _add_vca_relations(
3174 self,
3175 logging_text,
3176 nsr_id,
3177 vca_type: str,
3178 vca_index: int,
3179 timeout: int = 3600,
3180 ) -> bool:
3181 # steps:
3182 # 1. find all relations for this VCA
3183 # 2. wait for other peers related
3184 # 3. add relations
3185
3186 try:
3187 # STEP 1: find all relations for this VCA
3188
3189 # read nsr record
3190 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3191 nsd = get_nsd(db_nsr)
3192
3193 # this VCA data
3194 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3195 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3196
3197 cached_vnfds = {}
3198 cached_vnfrs = {}
3199 relations = []
3200 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3201 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3202
3203 # if no relations, terminate
3204 if not relations:
3205 self.logger.debug(logging_text + " No relations")
3206 return True
3207
3208 self.logger.debug(logging_text + " adding relations {}".format(relations))
3209
3210 # add all relations
3211 start = time()
3212 while True:
3213 # check timeout
3214 now = time()
3215 if now - start >= timeout:
3216 self.logger.error(logging_text + " : timeout adding relations")
3217 return False
3218
3219 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3220 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3221
3222 # for each relation, find the VCA's related
3223 for relation in relations.copy():
3224 added = await self._add_relation(
3225 relation,
3226 vca_type,
3227 db_nsr,
3228 cached_vnfds,
3229 cached_vnfrs,
3230 )
3231 if added:
3232 relations.remove(relation)
3233
3234 if not relations:
3235 self.logger.debug("Relations added")
3236 break
3237 await asyncio.sleep(5.0)
3238
3239 return True
3240
3241 except Exception as e:
3242 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3243 return False
3244
3245 async def _install_kdu(
3246 self,
3247 nsr_id: str,
3248 nsr_db_path: str,
3249 vnfr_data: dict,
3250 kdu_index: int,
3251 kdud: dict,
3252 vnfd: dict,
3253 k8s_instance_info: dict,
3254 k8params: dict = None,
3255 timeout: int = 600,
3256 vca_id: str = None,
3257 ):
3258 try:
3259 k8sclustertype = k8s_instance_info["k8scluster-type"]
3260 # Instantiate kdu
3261 db_dict_install = {
3262 "collection": "nsrs",
3263 "filter": {"_id": nsr_id},
3264 "path": nsr_db_path,
3265 }
3266
3267 if k8s_instance_info.get("kdu-deployment-name"):
3268 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3269 else:
3270 kdu_instance = self.k8scluster_map[
3271 k8sclustertype
3272 ].generate_kdu_instance_name(
3273 db_dict=db_dict_install,
3274 kdu_model=k8s_instance_info["kdu-model"],
3275 kdu_name=k8s_instance_info["kdu-name"],
3276 )
3277
3278 # Update the nsrs table with the kdu-instance value
3279 self.update_db_2(
3280 item="nsrs",
3281 _id=nsr_id,
3282 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3283 )
3284
3285 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3286 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3287 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3288 # namespace, this first verification could be removed, and the next step would be done for any kind
3289 # of KNF.
3290 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3291 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3292 if k8sclustertype in ("juju", "juju-bundle"):
3293 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3294 # that the user passed a namespace which he wants its KDU to be deployed in)
3295 if (
3296 self.db.count(
3297 table="nsrs",
3298 q_filter={
3299 "_id": nsr_id,
3300 "_admin.projects_write": k8s_instance_info["namespace"],
3301 "_admin.projects_read": k8s_instance_info["namespace"],
3302 },
3303 )
3304 > 0
3305 ):
3306 self.logger.debug(
3307 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3308 )
3309 self.update_db_2(
3310 item="nsrs",
3311 _id=nsr_id,
3312 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3313 )
3314 k8s_instance_info["namespace"] = kdu_instance
3315
3316 await self.k8scluster_map[k8sclustertype].install(
3317 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3318 kdu_model=k8s_instance_info["kdu-model"],
3319 atomic=True,
3320 params=k8params,
3321 db_dict=db_dict_install,
3322 timeout=timeout,
3323 kdu_name=k8s_instance_info["kdu-name"],
3324 namespace=k8s_instance_info["namespace"],
3325 kdu_instance=kdu_instance,
3326 vca_id=vca_id,
3327 )
3328
3329 # Obtain services to obtain management service ip
3330 services = await self.k8scluster_map[k8sclustertype].get_services(
3331 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3332 kdu_instance=kdu_instance,
3333 namespace=k8s_instance_info["namespace"],
3334 )
3335
3336 # Obtain management service info (if exists)
3337 vnfr_update_dict = {}
3338 kdu_config = get_configuration(vnfd, kdud["name"])
3339 if kdu_config:
3340 target_ee_list = kdu_config.get("execution-environment-list", [])
3341 else:
3342 target_ee_list = []
3343
3344 if services:
3345 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3346 mgmt_services = [
3347 service
3348 for service in kdud.get("service", [])
3349 if service.get("mgmt-service")
3350 ]
3351 for mgmt_service in mgmt_services:
3352 for service in services:
3353 if service["name"].startswith(mgmt_service["name"]):
3354 # Mgmt service found, Obtain service ip
3355 ip = service.get("external_ip", service.get("cluster_ip"))
3356 if isinstance(ip, list) and len(ip) == 1:
3357 ip = ip[0]
3358
3359 vnfr_update_dict[
3360 "kdur.{}.ip-address".format(kdu_index)
3361 ] = ip
3362
3363 # Check if must update also mgmt ip at the vnf
3364 service_external_cp = mgmt_service.get(
3365 "external-connection-point-ref"
3366 )
3367 if service_external_cp:
3368 if (
3369 deep_get(vnfd, ("mgmt-interface", "cp"))
3370 == service_external_cp
3371 ):
3372 vnfr_update_dict["ip-address"] = ip
3373
3374 if find_in_list(
3375 target_ee_list,
3376 lambda ee: ee.get(
3377 "external-connection-point-ref", ""
3378 )
3379 == service_external_cp,
3380 ):
3381 vnfr_update_dict[
3382 "kdur.{}.ip-address".format(kdu_index)
3383 ] = ip
3384 break
3385 else:
3386 self.logger.warn(
3387 "Mgmt service name: {} not found".format(
3388 mgmt_service["name"]
3389 )
3390 )
3391
3392 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3393 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3394
3395 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3396 if (
3397 kdu_config
3398 and kdu_config.get("initial-config-primitive")
3399 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3400 ):
3401 initial_config_primitive_list = kdu_config.get(
3402 "initial-config-primitive"
3403 )
3404 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3405
3406 for initial_config_primitive in initial_config_primitive_list:
3407 primitive_params_ = self._map_primitive_params(
3408 initial_config_primitive, {}, {}
3409 )
3410
3411 await asyncio.wait_for(
3412 self.k8scluster_map[k8sclustertype].exec_primitive(
3413 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3414 kdu_instance=kdu_instance,
3415 primitive_name=initial_config_primitive["name"],
3416 params=primitive_params_,
3417 db_dict=db_dict_install,
3418 vca_id=vca_id,
3419 ),
3420 timeout=timeout,
3421 )
3422
3423 except Exception as e:
3424 # Prepare update db with error and raise exception
3425 try:
3426 self.update_db_2(
3427 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3428 )
3429 self.update_db_2(
3430 "vnfrs",
3431 vnfr_data.get("_id"),
3432 {"kdur.{}.status".format(kdu_index): "ERROR"},
3433 )
3434 except Exception:
3435 # ignore to keep original exception
3436 pass
3437 # reraise original error
3438 raise
3439
3440 return kdu_instance
3441
3442 async def deploy_kdus(
3443 self,
3444 logging_text,
3445 nsr_id,
3446 nslcmop_id,
3447 db_vnfrs,
3448 db_vnfds,
3449 task_instantiation_info,
3450 ):
3451 # Launch kdus if present in the descriptor
3452
3453 k8scluster_id_2_uuic = {
3454 "helm-chart-v3": {},
3455 "helm-chart": {},
3456 "juju-bundle": {},
3457 }
3458
3459 async def _get_cluster_id(cluster_id, cluster_type):
3460 nonlocal k8scluster_id_2_uuic
3461 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3462 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3463
3464 # check if K8scluster is creating and wait look if previous tasks in process
3465 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3466 "k8scluster", cluster_id
3467 )
3468 if task_dependency:
3469 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3470 task_name, cluster_id
3471 )
3472 self.logger.debug(logging_text + text)
3473 await asyncio.wait(task_dependency, timeout=3600)
3474
3475 db_k8scluster = self.db.get_one(
3476 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3477 )
3478 if not db_k8scluster:
3479 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3480
3481 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3482 if not k8s_id:
3483 if cluster_type == "helm-chart-v3":
3484 try:
3485 # backward compatibility for existing clusters that have not been initialized for helm v3
3486 k8s_credentials = yaml.safe_dump(
3487 db_k8scluster.get("credentials")
3488 )
3489 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3490 k8s_credentials, reuse_cluster_uuid=cluster_id
3491 )
3492 db_k8scluster_update = {}
3493 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3494 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3495 db_k8scluster_update[
3496 "_admin.helm-chart-v3.created"
3497 ] = uninstall_sw
3498 db_k8scluster_update[
3499 "_admin.helm-chart-v3.operationalState"
3500 ] = "ENABLED"
3501 self.update_db_2(
3502 "k8sclusters", cluster_id, db_k8scluster_update
3503 )
3504 except Exception as e:
3505 self.logger.error(
3506 logging_text
3507 + "error initializing helm-v3 cluster: {}".format(str(e))
3508 )
3509 raise LcmException(
3510 "K8s cluster '{}' has not been initialized for '{}'".format(
3511 cluster_id, cluster_type
3512 )
3513 )
3514 else:
3515 raise LcmException(
3516 "K8s cluster '{}' has not been initialized for '{}'".format(
3517 cluster_id, cluster_type
3518 )
3519 )
3520 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3521 return k8s_id
3522
3523 logging_text += "Deploy kdus: "
3524 step = ""
3525 try:
3526 db_nsr_update = {"_admin.deployed.K8s": []}
3527 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3528
3529 index = 0
3530 updated_cluster_list = []
3531 updated_v3_cluster_list = []
3532
3533 for vnfr_data in db_vnfrs.values():
3534 vca_id = self.get_vca_id(vnfr_data, {})
3535 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3536 # Step 0: Prepare and set parameters
3537 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3538 vnfd_id = vnfr_data.get("vnfd-id")
3539 vnfd_with_id = find_in_list(
3540 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3541 )
3542 kdud = next(
3543 kdud
3544 for kdud in vnfd_with_id["kdu"]
3545 if kdud["name"] == kdur["kdu-name"]
3546 )
3547 namespace = kdur.get("k8s-namespace")
3548 kdu_deployment_name = kdur.get("kdu-deployment-name")
3549 if kdur.get("helm-chart"):
3550 kdumodel = kdur["helm-chart"]
3551 # Default version: helm3, if helm-version is v2 assign v2
3552 k8sclustertype = "helm-chart-v3"
3553 self.logger.debug("kdur: {}".format(kdur))
3554 if (
3555 kdur.get("helm-version")
3556 and kdur.get("helm-version") == "v2"
3557 ):
3558 k8sclustertype = "helm-chart"
3559 elif kdur.get("juju-bundle"):
3560 kdumodel = kdur["juju-bundle"]
3561 k8sclustertype = "juju-bundle"
3562 else:
3563 raise LcmException(
3564 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3565 "juju-bundle. Maybe an old NBI version is running".format(
3566 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3567 )
3568 )
3569 # check if kdumodel is a file and exists
3570 try:
3571 vnfd_with_id = find_in_list(
3572 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3573 )
3574 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3575 if storage: # may be not present if vnfd has not artifacts
3576 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3577 if storage["pkg-dir"]:
3578 filename = "{}/{}/{}s/{}".format(
3579 storage["folder"],
3580 storage["pkg-dir"],
3581 k8sclustertype,
3582 kdumodel,
3583 )
3584 else:
3585 filename = "{}/Scripts/{}s/{}".format(
3586 storage["folder"],
3587 k8sclustertype,
3588 kdumodel,
3589 )
3590 if self.fs.file_exists(
3591 filename, mode="file"
3592 ) or self.fs.file_exists(filename, mode="dir"):
3593 kdumodel = self.fs.path + filename
3594 except (asyncio.TimeoutError, asyncio.CancelledError):
3595 raise
3596 except Exception: # it is not a file
3597 pass
3598
3599 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3600 step = "Synchronize repos for k8s cluster '{}'".format(
3601 k8s_cluster_id
3602 )
3603 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3604
3605 # Synchronize repos
3606 if (
3607 k8sclustertype == "helm-chart"
3608 and cluster_uuid not in updated_cluster_list
3609 ) or (
3610 k8sclustertype == "helm-chart-v3"
3611 and cluster_uuid not in updated_v3_cluster_list
3612 ):
3613 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3614 self.k8scluster_map[k8sclustertype].synchronize_repos(
3615 cluster_uuid=cluster_uuid
3616 )
3617 )
3618 if del_repo_list or added_repo_dict:
3619 if k8sclustertype == "helm-chart":
3620 unset = {
3621 "_admin.helm_charts_added." + item: None
3622 for item in del_repo_list
3623 }
3624 updated = {
3625 "_admin.helm_charts_added." + item: name
3626 for item, name in added_repo_dict.items()
3627 }
3628 updated_cluster_list.append(cluster_uuid)
3629 elif k8sclustertype == "helm-chart-v3":
3630 unset = {
3631 "_admin.helm_charts_v3_added." + item: None
3632 for item in del_repo_list
3633 }
3634 updated = {
3635 "_admin.helm_charts_v3_added." + item: name
3636 for item, name in added_repo_dict.items()
3637 }
3638 updated_v3_cluster_list.append(cluster_uuid)
3639 self.logger.debug(
3640 logging_text + "repos synchronized on k8s cluster "
3641 "'{}' to_delete: {}, to_add: {}".format(
3642 k8s_cluster_id, del_repo_list, added_repo_dict
3643 )
3644 )
3645 self.db.set_one(
3646 "k8sclusters",
3647 {"_id": k8s_cluster_id},
3648 updated,
3649 unset=unset,
3650 )
3651
3652 # Instantiate kdu
3653 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3654 vnfr_data["member-vnf-index-ref"],
3655 kdur["kdu-name"],
3656 k8s_cluster_id,
3657 )
3658 k8s_instance_info = {
3659 "kdu-instance": None,
3660 "k8scluster-uuid": cluster_uuid,
3661 "k8scluster-type": k8sclustertype,
3662 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3663 "kdu-name": kdur["kdu-name"],
3664 "kdu-model": kdumodel,
3665 "namespace": namespace,
3666 "kdu-deployment-name": kdu_deployment_name,
3667 }
3668 db_path = "_admin.deployed.K8s.{}".format(index)
3669 db_nsr_update[db_path] = k8s_instance_info
3670 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3671 vnfd_with_id = find_in_list(
3672 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3673 )
3674 task = asyncio.ensure_future(
3675 self._install_kdu(
3676 nsr_id,
3677 db_path,
3678 vnfr_data,
3679 kdu_index,
3680 kdud,
3681 vnfd_with_id,
3682 k8s_instance_info,
3683 k8params=desc_params,
3684 timeout=1800,
3685 vca_id=vca_id,
3686 )
3687 )
3688 self.lcm_tasks.register(
3689 "ns",
3690 nsr_id,
3691 nslcmop_id,
3692 "instantiate_KDU-{}".format(index),
3693 task,
3694 )
3695 task_instantiation_info[task] = "Deploying KDU {}".format(
3696 kdur["kdu-name"]
3697 )
3698
3699 index += 1
3700
3701 except (LcmException, asyncio.CancelledError):
3702 raise
3703 except Exception as e:
3704 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3705 if isinstance(e, (N2VCException, DbException)):
3706 self.logger.error(logging_text + msg)
3707 else:
3708 self.logger.critical(logging_text + msg, exc_info=True)
3709 raise LcmException(msg)
3710 finally:
3711 if db_nsr_update:
3712 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3713
3714 def _deploy_n2vc(
3715 self,
3716 logging_text,
3717 db_nsr,
3718 db_vnfr,
3719 nslcmop_id,
3720 nsr_id,
3721 nsi_id,
3722 vnfd_id,
3723 vdu_id,
3724 kdu_name,
3725 member_vnf_index,
3726 vdu_index,
3727 kdu_index,
3728 vdu_name,
3729 deploy_params,
3730 descriptor_config,
3731 base_folder,
3732 task_instantiation_info,
3733 stage,
3734 ):
3735 # launch instantiate_N2VC in a asyncio task and register task object
3736 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3737 # if not found, create one entry and update database
3738 # fill db_nsr._admin.deployed.VCA.<index>
3739
3740 self.logger.debug(
3741 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3742 )
3743
3744 charm_name = ""
3745 get_charm_name = False
3746 if "execution-environment-list" in descriptor_config:
3747 ee_list = descriptor_config.get("execution-environment-list", [])
3748 elif "juju" in descriptor_config:
3749 ee_list = [descriptor_config] # ns charms
3750 if "execution-environment-list" not in descriptor_config:
3751 # charm name is only required for ns charms
3752 get_charm_name = True
3753 else: # other types as script are not supported
3754 ee_list = []
3755
3756 for ee_item in ee_list:
3757 self.logger.debug(
3758 logging_text
3759 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3760 ee_item.get("juju"), ee_item.get("helm-chart")
3761 )
3762 )
3763 ee_descriptor_id = ee_item.get("id")
3764 if ee_item.get("juju"):
3765 vca_name = ee_item["juju"].get("charm")
3766 if get_charm_name:
3767 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3768 vca_type = (
3769 "lxc_proxy_charm"
3770 if ee_item["juju"].get("charm") is not None
3771 else "native_charm"
3772 )
3773 if ee_item["juju"].get("cloud") == "k8s":
3774 vca_type = "k8s_proxy_charm"
3775 elif ee_item["juju"].get("proxy") is False:
3776 vca_type = "native_charm"
3777 elif ee_item.get("helm-chart"):
3778 vca_name = ee_item["helm-chart"]
3779 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3780 vca_type = "helm"
3781 else:
3782 vca_type = "helm-v3"
3783 else:
3784 self.logger.debug(
3785 logging_text + "skipping non juju neither charm configuration"
3786 )
3787 continue
3788
3789 vca_index = -1
3790 for vca_index, vca_deployed in enumerate(
3791 db_nsr["_admin"]["deployed"]["VCA"]
3792 ):
3793 if not vca_deployed:
3794 continue
3795 if (
3796 vca_deployed.get("member-vnf-index") == member_vnf_index
3797 and vca_deployed.get("vdu_id") == vdu_id
3798 and vca_deployed.get("kdu_name") == kdu_name
3799 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3800 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3801 ):
3802 break
3803 else:
3804 # not found, create one.
3805 target = (
3806 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3807 )
3808 if vdu_id:
3809 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3810 elif kdu_name:
3811 target += "/kdu/{}".format(kdu_name)
3812 vca_deployed = {
3813 "target_element": target,
3814 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3815 "member-vnf-index": member_vnf_index,
3816 "vdu_id": vdu_id,
3817 "kdu_name": kdu_name,
3818 "vdu_count_index": vdu_index,
3819 "operational-status": "init", # TODO revise
3820 "detailed-status": "", # TODO revise
3821 "step": "initial-deploy", # TODO revise
3822 "vnfd_id": vnfd_id,
3823 "vdu_name": vdu_name,
3824 "type": vca_type,
3825 "ee_descriptor_id": ee_descriptor_id,
3826 "charm_name": charm_name,
3827 }
3828 vca_index += 1
3829
3830 # create VCA and configurationStatus in db
3831 db_dict = {
3832 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3833 "configurationStatus.{}".format(vca_index): dict(),
3834 }
3835 self.update_db_2("nsrs", nsr_id, db_dict)
3836
3837 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3838
3839 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3840 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3841 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3842
3843 # Launch task
3844 task_n2vc = asyncio.ensure_future(
3845 self.instantiate_N2VC(
3846 logging_text=logging_text,
3847 vca_index=vca_index,
3848 nsi_id=nsi_id,
3849 db_nsr=db_nsr,
3850 db_vnfr=db_vnfr,
3851 vdu_id=vdu_id,
3852 kdu_name=kdu_name,
3853 vdu_index=vdu_index,
3854 kdu_index=kdu_index,
3855 deploy_params=deploy_params,
3856 config_descriptor=descriptor_config,
3857 base_folder=base_folder,
3858 nslcmop_id=nslcmop_id,
3859 stage=stage,
3860 vca_type=vca_type,
3861 vca_name=vca_name,
3862 ee_config_descriptor=ee_item,
3863 )
3864 )
3865 self.lcm_tasks.register(
3866 "ns",
3867 nsr_id,
3868 nslcmop_id,
3869 "instantiate_N2VC-{}".format(vca_index),
3870 task_n2vc,
3871 )
3872 task_instantiation_info[
3873 task_n2vc
3874 ] = self.task_name_deploy_vca + " {}.{}".format(
3875 member_vnf_index or "", vdu_id or ""
3876 )
3877
3878 @staticmethod
3879 def _create_nslcmop(nsr_id, operation, params):
3880 """
3881 Creates a ns-lcm-opp content to be stored at database.
3882 :param nsr_id: internal id of the instance
3883 :param operation: instantiate, terminate, scale, action, ...
3884 :param params: user parameters for the operation
3885 :return: dictionary following SOL005 format
3886 """
3887 # Raise exception if invalid arguments
3888 if not (nsr_id and operation and params):
3889 raise LcmException(
3890 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3891 )
3892 now = time()
3893 _id = str(uuid4())
3894 nslcmop = {
3895 "id": _id,
3896 "_id": _id,
3897 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3898 "operationState": "PROCESSING",
3899 "statusEnteredTime": now,
3900 "nsInstanceId": nsr_id,
3901 "lcmOperationType": operation,
3902 "startTime": now,
3903 "isAutomaticInvocation": False,
3904 "operationParams": params,
3905 "isCancelPending": False,
3906 "links": {
3907 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3908 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3909 },
3910 }
3911 return nslcmop
3912
3913 def _format_additional_params(self, params):
3914 params = params or {}
3915 for key, value in params.items():
3916 if str(value).startswith("!!yaml "):
3917 params[key] = yaml.safe_load(value[7:])
3918 return params
3919
3920 def _get_terminate_primitive_params(self, seq, vnf_index):
3921 primitive = seq.get("name")
3922 primitive_params = {}
3923 params = {
3924 "member_vnf_index": vnf_index,
3925 "primitive": primitive,
3926 "primitive_params": primitive_params,
3927 }
3928 desc_params = {}
3929 return self._map_primitive_params(seq, params, desc_params)
3930
3931 # sub-operations
3932
3933 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3934 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3935 if op.get("operationState") == "COMPLETED":
3936 # b. Skip sub-operation
3937 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3938 return self.SUBOPERATION_STATUS_SKIP
3939 else:
3940 # c. retry executing sub-operation
3941 # The sub-operation exists, and operationState != 'COMPLETED'
3942 # Update operationState = 'PROCESSING' to indicate a retry.
3943 operationState = "PROCESSING"
3944 detailed_status = "In progress"
3945 self._update_suboperation_status(
3946 db_nslcmop, op_index, operationState, detailed_status
3947 )
3948 # Return the sub-operation index
3949 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3950 # with arguments extracted from the sub-operation
3951 return op_index
3952
3953 # Find a sub-operation where all keys in a matching dictionary must match
3954 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3955 def _find_suboperation(self, db_nslcmop, match):
3956 if db_nslcmop and match:
3957 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3958 for i, op in enumerate(op_list):
3959 if all(op.get(k) == match[k] for k in match):
3960 return i
3961 return self.SUBOPERATION_STATUS_NOT_FOUND
3962
3963 # Update status for a sub-operation given its index
3964 def _update_suboperation_status(
3965 self, db_nslcmop, op_index, operationState, detailed_status
3966 ):
3967 # Update DB for HA tasks
3968 q_filter = {"_id": db_nslcmop["_id"]}
3969 update_dict = {
3970 "_admin.operations.{}.operationState".format(op_index): operationState,
3971 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3972 }
3973 self.db.set_one(
3974 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3975 )
3976
3977 # Add sub-operation, return the index of the added sub-operation
3978 # Optionally, set operationState, detailed-status, and operationType
3979 # Status and type are currently set for 'scale' sub-operations:
3980 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3981 # 'detailed-status' : status message
3982 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3983 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3984 def _add_suboperation(
3985 self,
3986 db_nslcmop,
3987 vnf_index,
3988 vdu_id,
3989 vdu_count_index,
3990 vdu_name,
3991 primitive,
3992 mapped_primitive_params,
3993 operationState=None,
3994 detailed_status=None,
3995 operationType=None,
3996 RO_nsr_id=None,
3997 RO_scaling_info=None,
3998 ):
3999 if not db_nslcmop:
4000 return self.SUBOPERATION_STATUS_NOT_FOUND
4001 # Get the "_admin.operations" list, if it exists
4002 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4003 op_list = db_nslcmop_admin.get("operations")
4004 # Create or append to the "_admin.operations" list
4005 new_op = {
4006 "member_vnf_index": vnf_index,
4007 "vdu_id": vdu_id,
4008 "vdu_count_index": vdu_count_index,
4009 "primitive": primitive,
4010 "primitive_params": mapped_primitive_params,
4011 }
4012 if operationState:
4013 new_op["operationState"] = operationState
4014 if detailed_status:
4015 new_op["detailed-status"] = detailed_status
4016 if operationType:
4017 new_op["lcmOperationType"] = operationType
4018 if RO_nsr_id:
4019 new_op["RO_nsr_id"] = RO_nsr_id
4020 if RO_scaling_info:
4021 new_op["RO_scaling_info"] = RO_scaling_info
4022 if not op_list:
4023 # No existing operations, create key 'operations' with current operation as first list element
4024 db_nslcmop_admin.update({"operations": [new_op]})
4025 op_list = db_nslcmop_admin.get("operations")
4026 else:
4027 # Existing operations, append operation to list
4028 op_list.append(new_op)
4029
4030 db_nslcmop_update = {"_admin.operations": op_list}
4031 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4032 op_index = len(op_list) - 1
4033 return op_index
4034
4035 # Helper methods for scale() sub-operations
4036
4037 # pre-scale/post-scale:
4038 # Check for 3 different cases:
4039 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4040 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4041 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4042 def _check_or_add_scale_suboperation(
4043 self,
4044 db_nslcmop,
4045 vnf_index,
4046 vnf_config_primitive,
4047 primitive_params,
4048 operationType,
4049 RO_nsr_id=None,
4050 RO_scaling_info=None,
4051 ):
4052 # Find this sub-operation
4053 if RO_nsr_id and RO_scaling_info:
4054 operationType = "SCALE-RO"
4055 match = {
4056 "member_vnf_index": vnf_index,
4057 "RO_nsr_id": RO_nsr_id,
4058 "RO_scaling_info": RO_scaling_info,
4059 }
4060 else:
4061 match = {
4062 "member_vnf_index": vnf_index,
4063 "primitive": vnf_config_primitive,
4064 "primitive_params": primitive_params,
4065 "lcmOperationType": operationType,
4066 }
4067 op_index = self._find_suboperation(db_nslcmop, match)
4068 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4069 # a. New sub-operation
4070 # The sub-operation does not exist, add it.
4071 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4072 # The following parameters are set to None for all kind of scaling:
4073 vdu_id = None
4074 vdu_count_index = None
4075 vdu_name = None
4076 if RO_nsr_id and RO_scaling_info:
4077 vnf_config_primitive = None
4078 primitive_params = None
4079 else:
4080 RO_nsr_id = None
4081 RO_scaling_info = None
4082 # Initial status for sub-operation
4083 operationState = "PROCESSING"
4084 detailed_status = "In progress"
4085 # Add sub-operation for pre/post-scaling (zero or more operations)
4086 self._add_suboperation(
4087 db_nslcmop,
4088 vnf_index,
4089 vdu_id,
4090 vdu_count_index,
4091 vdu_name,
4092 vnf_config_primitive,
4093 primitive_params,
4094 operationState,
4095 detailed_status,
4096 operationType,
4097 RO_nsr_id,
4098 RO_scaling_info,
4099 )
4100 return self.SUBOPERATION_STATUS_NEW
4101 else:
4102 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4103 # or op_index (operationState != 'COMPLETED')
4104 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4105
4106 # Function to return execution_environment id
4107
4108 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4109 # TODO vdu_index_count
4110 for vca in vca_deployed_list:
4111 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4112 return vca["ee_id"]
4113
4114 async def destroy_N2VC(
4115 self,
4116 logging_text,
4117 db_nslcmop,
4118 vca_deployed,
4119 config_descriptor,
4120 vca_index,
4121 destroy_ee=True,
4122 exec_primitives=True,
4123 scaling_in=False,
4124 vca_id: str = None,
4125 ):
4126 """
4127 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4128 :param logging_text:
4129 :param db_nslcmop:
4130 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4131 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4132 :param vca_index: index in the database _admin.deployed.VCA
4133 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4134 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4135 not executed properly
4136 :param scaling_in: True destroys the application, False destroys the model
4137 :return: None or exception
4138 """
4139
4140 self.logger.debug(
4141 logging_text
4142 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4143 vca_index, vca_deployed, config_descriptor, destroy_ee
4144 )
4145 )
4146
4147 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4148
4149 # execute terminate_primitives
4150 if exec_primitives:
4151 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4152 config_descriptor.get("terminate-config-primitive"),
4153 vca_deployed.get("ee_descriptor_id"),
4154 )
4155 vdu_id = vca_deployed.get("vdu_id")
4156 vdu_count_index = vca_deployed.get("vdu_count_index")
4157 vdu_name = vca_deployed.get("vdu_name")
4158 vnf_index = vca_deployed.get("member-vnf-index")
4159 if terminate_primitives and vca_deployed.get("needed_terminate"):
4160 for seq in terminate_primitives:
4161 # For each sequence in list, get primitive and call _ns_execute_primitive()
4162 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4163 vnf_index, seq.get("name")
4164 )
4165 self.logger.debug(logging_text + step)
4166 # Create the primitive for each sequence, i.e. "primitive": "touch"
4167 primitive = seq.get("name")
4168 mapped_primitive_params = self._get_terminate_primitive_params(
4169 seq, vnf_index
4170 )
4171
4172 # Add sub-operation
4173 self._add_suboperation(
4174 db_nslcmop,
4175 vnf_index,
4176 vdu_id,
4177 vdu_count_index,
4178 vdu_name,
4179 primitive,
4180 mapped_primitive_params,
4181 )
4182 # Sub-operations: Call _ns_execute_primitive() instead of action()
4183 try:
4184 result, result_detail = await self._ns_execute_primitive(
4185 vca_deployed["ee_id"],
4186 primitive,
4187 mapped_primitive_params,
4188 vca_type=vca_type,
4189 vca_id=vca_id,
4190 )
4191 except LcmException:
4192 # this happens when VCA is not deployed. In this case it is not needed to terminate
4193 continue
4194 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4195 if result not in result_ok:
4196 raise LcmException(
4197 "terminate_primitive {} for vnf_member_index={} fails with "
4198 "error {}".format(seq.get("name"), vnf_index, result_detail)
4199 )
4200 # set that this VCA do not need terminated
4201 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4202 vca_index
4203 )
4204 self.update_db_2(
4205 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4206 )
4207
4208 # Delete Prometheus Jobs if any
4209 # This uses NSR_ID, so it will destroy any jobs under this index
4210 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4211
4212 if destroy_ee:
4213 await self.vca_map[vca_type].delete_execution_environment(
4214 vca_deployed["ee_id"],
4215 scaling_in=scaling_in,
4216 vca_type=vca_type,
4217 vca_id=vca_id,
4218 )
4219
4220 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4221 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4222 namespace = "." + db_nsr["_id"]
4223 try:
4224 await self.n2vc.delete_namespace(
4225 namespace=namespace,
4226 total_timeout=self.timeout.charm_delete,
4227 vca_id=vca_id,
4228 )
4229 except N2VCNotFound: # already deleted. Skip
4230 pass
4231 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4232
4233 async def terminate(self, nsr_id, nslcmop_id):
4234 # Try to lock HA task here
4235 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4236 if not task_is_locked_by_me:
4237 return
4238
4239 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4240 self.logger.debug(logging_text + "Enter")
4241 timeout_ns_terminate = self.timeout.ns_terminate
4242 db_nsr = None
4243 db_nslcmop = None
4244 operation_params = None
4245 exc = None
4246 error_list = [] # annotates all failed error messages
4247 db_nslcmop_update = {}
4248 autoremove = False # autoremove after terminated
4249 tasks_dict_info = {}
4250 db_nsr_update = {}
4251 stage = [
4252 "Stage 1/3: Preparing task.",
4253 "Waiting for previous operations to terminate.",
4254 "",
4255 ]
4256 # ^ contains [stage, step, VIM-status]
4257 try:
4258 # wait for any previous tasks in process
4259 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4260
4261 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4262 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4263 operation_params = db_nslcmop.get("operationParams") or {}
4264 if operation_params.get("timeout_ns_terminate"):
4265 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4266 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4267 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4268
4269 db_nsr_update["operational-status"] = "terminating"
4270 db_nsr_update["config-status"] = "terminating"
4271 self._write_ns_status(
4272 nsr_id=nsr_id,
4273 ns_state="TERMINATING",
4274 current_operation="TERMINATING",
4275 current_operation_id=nslcmop_id,
4276 other_update=db_nsr_update,
4277 )
4278 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4279 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4280 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4281 return
4282
4283 stage[1] = "Getting vnf descriptors from db."
4284 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4285 db_vnfrs_dict = {
4286 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4287 }
4288 db_vnfds_from_id = {}
4289 db_vnfds_from_member_index = {}
4290 # Loop over VNFRs
4291 for vnfr in db_vnfrs_list:
4292 vnfd_id = vnfr["vnfd-id"]
4293 if vnfd_id not in db_vnfds_from_id:
4294 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4295 db_vnfds_from_id[vnfd_id] = vnfd
4296 db_vnfds_from_member_index[
4297 vnfr["member-vnf-index-ref"]
4298 ] = db_vnfds_from_id[vnfd_id]
4299
4300 # Destroy individual execution environments when there are terminating primitives.
4301 # Rest of EE will be deleted at once
4302 # TODO - check before calling _destroy_N2VC
4303 # if not operation_params.get("skip_terminate_primitives"):#
4304 # or not vca.get("needed_terminate"):
4305 stage[0] = "Stage 2/3 execute terminating primitives."
4306 self.logger.debug(logging_text + stage[0])
4307 stage[1] = "Looking execution environment that needs terminate."
4308 self.logger.debug(logging_text + stage[1])
4309
4310 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4311 config_descriptor = None
4312 vca_member_vnf_index = vca.get("member-vnf-index")
4313 vca_id = self.get_vca_id(
4314 db_vnfrs_dict.get(vca_member_vnf_index)
4315 if vca_member_vnf_index
4316 else None,
4317 db_nsr,
4318 )
4319 if not vca or not vca.get("ee_id"):
4320 continue
4321 if not vca.get("member-vnf-index"):
4322 # ns
4323 config_descriptor = db_nsr.get("ns-configuration")
4324 elif vca.get("vdu_id"):
4325 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4326 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4327 elif vca.get("kdu_name"):
4328 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4329 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4330 else:
4331 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4332 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4333 vca_type = vca.get("type")
4334 exec_terminate_primitives = not operation_params.get(
4335 "skip_terminate_primitives"
4336 ) and vca.get("needed_terminate")
4337 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4338 # pending native charms
4339 destroy_ee = (
4340 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4341 )
4342 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4343 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4344 task = asyncio.ensure_future(
4345 self.destroy_N2VC(
4346 logging_text,
4347 db_nslcmop,
4348 vca,
4349 config_descriptor,
4350 vca_index,
4351 destroy_ee,
4352 exec_terminate_primitives,
4353 vca_id=vca_id,
4354 )
4355 )
4356 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4357
4358 # wait for pending tasks of terminate primitives
4359 if tasks_dict_info:
4360 self.logger.debug(
4361 logging_text
4362 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4363 )
4364 error_list = await self._wait_for_tasks(
4365 logging_text,
4366 tasks_dict_info,
4367 min(self.timeout.charm_delete, timeout_ns_terminate),
4368 stage,
4369 nslcmop_id,
4370 )
4371 tasks_dict_info.clear()
4372 if error_list:
4373 return # raise LcmException("; ".join(error_list))
4374
4375 # remove All execution environments at once
4376 stage[0] = "Stage 3/3 delete all."
4377
4378 if nsr_deployed.get("VCA"):
4379 stage[1] = "Deleting all execution environments."
4380 self.logger.debug(logging_text + stage[1])
4381 vca_id = self.get_vca_id({}, db_nsr)
4382 task_delete_ee = asyncio.ensure_future(
4383 asyncio.wait_for(
4384 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4385 timeout=self.timeout.charm_delete,
4386 )
4387 )
4388 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4389 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4390
4391 # Delete Namespace and Certificates if necessary
4392 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4393 await self.vca_map["helm-v3"].delete_tls_certificate(
4394 certificate_name=db_nslcmop["nsInstanceId"],
4395 )
4396 # TODO: Delete namespace
4397
4398 # Delete from k8scluster
4399 stage[1] = "Deleting KDUs."
4400 self.logger.debug(logging_text + stage[1])
4401 # print(nsr_deployed)
4402 for kdu in get_iterable(nsr_deployed, "K8s"):
4403 if not kdu or not kdu.get("kdu-instance"):
4404 continue
4405 kdu_instance = kdu.get("kdu-instance")
4406 if kdu.get("k8scluster-type") in self.k8scluster_map:
4407 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4408 vca_id = self.get_vca_id({}, db_nsr)
4409 task_delete_kdu_instance = asyncio.ensure_future(
4410 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4411 cluster_uuid=kdu.get("k8scluster-uuid"),
4412 kdu_instance=kdu_instance,
4413 vca_id=vca_id,
4414 namespace=kdu.get("namespace"),
4415 )
4416 )
4417 else:
4418 self.logger.error(
4419 logging_text
4420 + "Unknown k8s deployment type {}".format(
4421 kdu.get("k8scluster-type")
4422 )
4423 )
4424 continue
4425 tasks_dict_info[
4426 task_delete_kdu_instance
4427 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4428
4429 # remove from RO
4430 stage[1] = "Deleting ns from VIM."
4431 if self.ro_config.ng:
4432 task_delete_ro = asyncio.ensure_future(
4433 self._terminate_ng_ro(
4434 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4435 )
4436 )
4437 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4438
4439 # rest of staff will be done at finally
4440
4441 except (
4442 ROclient.ROClientException,
4443 DbException,
4444 LcmException,
4445 N2VCException,
4446 ) as e:
4447 self.logger.error(logging_text + "Exit Exception {}".format(e))
4448 exc = e
4449 except asyncio.CancelledError:
4450 self.logger.error(
4451 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4452 )
4453 exc = "Operation was cancelled"
4454 except Exception as e:
4455 exc = traceback.format_exc()
4456 self.logger.critical(
4457 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4458 exc_info=True,
4459 )
4460 finally:
4461 if exc:
4462 error_list.append(str(exc))
4463 try:
4464 # wait for pending tasks
4465 if tasks_dict_info:
4466 stage[1] = "Waiting for terminate pending tasks."
4467 self.logger.debug(logging_text + stage[1])
4468 error_list += await self._wait_for_tasks(
4469 logging_text,
4470 tasks_dict_info,
4471 timeout_ns_terminate,
4472 stage,
4473 nslcmop_id,
4474 )
4475 stage[1] = stage[2] = ""
4476 except asyncio.CancelledError:
4477 error_list.append("Cancelled")
4478 # TODO cancell all tasks
4479 except Exception as exc:
4480 error_list.append(str(exc))
4481 # update status at database
4482 if error_list:
4483 error_detail = "; ".join(error_list)
4484 # self.logger.error(logging_text + error_detail)
4485 error_description_nslcmop = "{} Detail: {}".format(
4486 stage[0], error_detail
4487 )
4488 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4489 nslcmop_id, stage[0]
4490 )
4491
4492 db_nsr_update["operational-status"] = "failed"
4493 db_nsr_update["detailed-status"] = (
4494 error_description_nsr + " Detail: " + error_detail
4495 )
4496 db_nslcmop_update["detailed-status"] = error_detail
4497 nslcmop_operation_state = "FAILED"
4498 ns_state = "BROKEN"
4499 else:
4500 error_detail = None
4501 error_description_nsr = error_description_nslcmop = None
4502 ns_state = "NOT_INSTANTIATED"
4503 db_nsr_update["operational-status"] = "terminated"
4504 db_nsr_update["detailed-status"] = "Done"
4505 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4506 db_nslcmop_update["detailed-status"] = "Done"
4507 nslcmop_operation_state = "COMPLETED"
4508
4509 if db_nsr:
4510 self._write_ns_status(
4511 nsr_id=nsr_id,
4512 ns_state=ns_state,
4513 current_operation="IDLE",
4514 current_operation_id=None,
4515 error_description=error_description_nsr,
4516 error_detail=error_detail,
4517 other_update=db_nsr_update,
4518 )
4519 self._write_op_status(
4520 op_id=nslcmop_id,
4521 stage="",
4522 error_message=error_description_nslcmop,
4523 operation_state=nslcmop_operation_state,
4524 other_update=db_nslcmop_update,
4525 )
4526 if ns_state == "NOT_INSTANTIATED":
4527 try:
4528 self.db.set_list(
4529 "vnfrs",
4530 {"nsr-id-ref": nsr_id},
4531 {"_admin.nsState": "NOT_INSTANTIATED"},
4532 )
4533 except DbException as e:
4534 self.logger.warn(
4535 logging_text
4536 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4537 nsr_id, e
4538 )
4539 )
4540 if operation_params:
4541 autoremove = operation_params.get("autoremove", False)
4542 if nslcmop_operation_state:
4543 try:
4544 await self.msg.aiowrite(
4545 "ns",
4546 "terminated",
4547 {
4548 "nsr_id": nsr_id,
4549 "nslcmop_id": nslcmop_id,
4550 "operationState": nslcmop_operation_state,
4551 "autoremove": autoremove,
4552 },
4553 loop=self.loop,
4554 )
4555 except Exception as e:
4556 self.logger.error(
4557 logging_text + "kafka_write notification Exception {}".format(e)
4558 )
4559
4560 self.logger.debug(logging_text + "Exit")
4561 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4562
4563 async def _wait_for_tasks(
4564 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4565 ):
4566 time_start = time()
4567 error_detail_list = []
4568 error_list = []
4569 pending_tasks = list(created_tasks_info.keys())
4570 num_tasks = len(pending_tasks)
4571 num_done = 0
4572 stage[1] = "{}/{}.".format(num_done, num_tasks)
4573 self._write_op_status(nslcmop_id, stage)
4574 while pending_tasks:
4575 new_error = None
4576 _timeout = timeout + time_start - time()
4577 done, pending_tasks = await asyncio.wait(
4578 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4579 )
4580 num_done += len(done)
4581 if not done: # Timeout
4582 for task in pending_tasks:
4583 new_error = created_tasks_info[task] + ": Timeout"
4584 error_detail_list.append(new_error)
4585 error_list.append(new_error)
4586 break
4587 for task in done:
4588 if task.cancelled():
4589 exc = "Cancelled"
4590 else:
4591 exc = task.exception()
4592 if exc:
4593 if isinstance(exc, asyncio.TimeoutError):
4594 exc = "Timeout"
4595 new_error = created_tasks_info[task] + ": {}".format(exc)
4596 error_list.append(created_tasks_info[task])
4597 error_detail_list.append(new_error)
4598 if isinstance(
4599 exc,
4600 (
4601 str,
4602 DbException,
4603 N2VCException,
4604 ROclient.ROClientException,
4605 LcmException,
4606 K8sException,
4607 NgRoException,
4608 ),
4609 ):
4610 self.logger.error(logging_text + new_error)
4611 else:
4612 exc_traceback = "".join(
4613 traceback.format_exception(None, exc, exc.__traceback__)
4614 )
4615 self.logger.error(
4616 logging_text
4617 + created_tasks_info[task]
4618 + " "
4619 + exc_traceback
4620 )
4621 else:
4622 self.logger.debug(
4623 logging_text + created_tasks_info[task] + ": Done"
4624 )
4625 stage[1] = "{}/{}.".format(num_done, num_tasks)
4626 if new_error:
4627 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4628 if nsr_id: # update also nsr
4629 self.update_db_2(
4630 "nsrs",
4631 nsr_id,
4632 {
4633 "errorDescription": "Error at: " + ", ".join(error_list),
4634 "errorDetail": ". ".join(error_detail_list),
4635 },
4636 )
4637 self._write_op_status(nslcmop_id, stage)
4638 return error_detail_list
4639
4640 @staticmethod
4641 def _map_primitive_params(primitive_desc, params, instantiation_params):
4642 """
4643 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4644 The default-value is used. If it is between < > it look for a value at instantiation_params
4645 :param primitive_desc: portion of VNFD/NSD that describes primitive
4646 :param params: Params provided by user
4647 :param instantiation_params: Instantiation params provided by user
4648 :return: a dictionary with the calculated params
4649 """
4650 calculated_params = {}
4651 for parameter in primitive_desc.get("parameter", ()):
4652 param_name = parameter["name"]
4653 if param_name in params:
4654 calculated_params[param_name] = params[param_name]
4655 elif "default-value" in parameter or "value" in parameter:
4656 if "value" in parameter:
4657 calculated_params[param_name] = parameter["value"]
4658 else:
4659 calculated_params[param_name] = parameter["default-value"]
4660 if (
4661 isinstance(calculated_params[param_name], str)
4662 and calculated_params[param_name].startswith("<")
4663 and calculated_params[param_name].endswith(">")
4664 ):
4665 if calculated_params[param_name][1:-1] in instantiation_params:
4666 calculated_params[param_name] = instantiation_params[
4667 calculated_params[param_name][1:-1]
4668 ]
4669 else:
4670 raise LcmException(
4671 "Parameter {} needed to execute primitive {} not provided".format(
4672 calculated_params[param_name], primitive_desc["name"]
4673 )
4674 )
4675 else:
4676 raise LcmException(
4677 "Parameter {} needed to execute primitive {} not provided".format(
4678 param_name, primitive_desc["name"]
4679 )
4680 )
4681
4682 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4683 calculated_params[param_name] = yaml.safe_dump(
4684 calculated_params[param_name], default_flow_style=True, width=256
4685 )
4686 elif isinstance(calculated_params[param_name], str) and calculated_params[
4687 param_name
4688 ].startswith("!!yaml "):
4689 calculated_params[param_name] = calculated_params[param_name][7:]
4690 if parameter.get("data-type") == "INTEGER":
4691 try:
4692 calculated_params[param_name] = int(calculated_params[param_name])
4693 except ValueError: # error converting string to int
4694 raise LcmException(
4695 "Parameter {} of primitive {} must be integer".format(
4696 param_name, primitive_desc["name"]
4697 )
4698 )
4699 elif parameter.get("data-type") == "BOOLEAN":
4700 calculated_params[param_name] = not (
4701 (str(calculated_params[param_name])).lower() == "false"
4702 )
4703
4704 # add always ns_config_info if primitive name is config
4705 if primitive_desc["name"] == "config":
4706 if "ns_config_info" in instantiation_params:
4707 calculated_params["ns_config_info"] = instantiation_params[
4708 "ns_config_info"
4709 ]
4710 return calculated_params
4711
4712 def _look_for_deployed_vca(
4713 self,
4714 deployed_vca,
4715 member_vnf_index,
4716 vdu_id,
4717 vdu_count_index,
4718 kdu_name=None,
4719 ee_descriptor_id=None,
4720 ):
4721 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4722 for vca in deployed_vca:
4723 if not vca:
4724 continue
4725 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4726 continue
4727 if (
4728 vdu_count_index is not None
4729 and vdu_count_index != vca["vdu_count_index"]
4730 ):
4731 continue
4732 if kdu_name and kdu_name != vca["kdu_name"]:
4733 continue
4734 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4735 continue
4736 break
4737 else:
4738 # vca_deployed not found
4739 raise LcmException(
4740 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4741 " is not deployed".format(
4742 member_vnf_index,
4743 vdu_id,
4744 vdu_count_index,
4745 kdu_name,
4746 ee_descriptor_id,
4747 )
4748 )
4749 # get ee_id
4750 ee_id = vca.get("ee_id")
4751 vca_type = vca.get(
4752 "type", "lxc_proxy_charm"
4753 ) # default value for backward compatibility - proxy charm
4754 if not ee_id:
4755 raise LcmException(
4756 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4757 "execution environment".format(
4758 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4759 )
4760 )
4761 return ee_id, vca_type
4762
4763 async def _ns_execute_primitive(
4764 self,
4765 ee_id,
4766 primitive,
4767 primitive_params,
4768 retries=0,
4769 retries_interval=30,
4770 timeout=None,
4771 vca_type=None,
4772 db_dict=None,
4773 vca_id: str = None,
4774 ) -> (str, str):
4775 try:
4776 if primitive == "config":
4777 primitive_params = {"params": primitive_params}
4778
4779 vca_type = vca_type or "lxc_proxy_charm"
4780
4781 while retries >= 0:
4782 try:
4783 output = await asyncio.wait_for(
4784 self.vca_map[vca_type].exec_primitive(
4785 ee_id=ee_id,
4786 primitive_name=primitive,
4787 params_dict=primitive_params,
4788 progress_timeout=self.timeout.progress_primitive,
4789 total_timeout=self.timeout.primitive,
4790 db_dict=db_dict,
4791 vca_id=vca_id,
4792 vca_type=vca_type,
4793 ),
4794 timeout=timeout or self.timeout.primitive,
4795 )
4796 # execution was OK
4797 break
4798 except asyncio.CancelledError:
4799 raise
4800 except Exception as e:
4801 retries -= 1
4802 if retries >= 0:
4803 self.logger.debug(
4804 "Error executing action {} on {} -> {}".format(
4805 primitive, ee_id, e
4806 )
4807 )
4808 # wait and retry
4809 await asyncio.sleep(retries_interval, loop=self.loop)
4810 else:
4811 if isinstance(e, asyncio.TimeoutError):
4812 e = N2VCException(
4813 message="Timed out waiting for action to complete"
4814 )
4815 return "FAILED", getattr(e, "message", repr(e))
4816
4817 return "COMPLETED", output
4818
4819 except (LcmException, asyncio.CancelledError):
4820 raise
4821 except Exception as e:
4822 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4823
4824 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4825 """
4826 Updating the vca_status with latest juju information in nsrs record
4827 :param: nsr_id: Id of the nsr
4828 :param: nslcmop_id: Id of the nslcmop
4829 :return: None
4830 """
4831
4832 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4833 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4834 vca_id = self.get_vca_id({}, db_nsr)
4835 if db_nsr["_admin"]["deployed"]["K8s"]:
4836 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4837 cluster_uuid, kdu_instance, cluster_type = (
4838 k8s["k8scluster-uuid"],
4839 k8s["kdu-instance"],
4840 k8s["k8scluster-type"],
4841 )
4842 await self._on_update_k8s_db(
4843 cluster_uuid=cluster_uuid,
4844 kdu_instance=kdu_instance,
4845 filter={"_id": nsr_id},
4846 vca_id=vca_id,
4847 cluster_type=cluster_type,
4848 )
4849 else:
4850 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4851 table, filter = "nsrs", {"_id": nsr_id}
4852 path = "_admin.deployed.VCA.{}.".format(vca_index)
4853 await self._on_update_n2vc_db(table, filter, path, {})
4854
4855 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4856 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4857
4858 async def action(self, nsr_id, nslcmop_id):
4859 # Try to lock HA task here
4860 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4861 if not task_is_locked_by_me:
4862 return
4863
4864 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4865 self.logger.debug(logging_text + "Enter")
4866 # get all needed from database
4867 db_nsr = None
4868 db_nslcmop = None
4869 db_nsr_update = {}
4870 db_nslcmop_update = {}
4871 nslcmop_operation_state = None
4872 error_description_nslcmop = None
4873 exc = None
4874 step = ""
4875 try:
4876 # wait for any previous tasks in process
4877 step = "Waiting for previous operations to terminate"
4878 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4879
4880 self._write_ns_status(
4881 nsr_id=nsr_id,
4882 ns_state=None,
4883 current_operation="RUNNING ACTION",
4884 current_operation_id=nslcmop_id,
4885 )
4886
4887 step = "Getting information from database"
4888 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4889 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4890 if db_nslcmop["operationParams"].get("primitive_params"):
4891 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4892 db_nslcmop["operationParams"]["primitive_params"]
4893 )
4894
4895 nsr_deployed = db_nsr["_admin"].get("deployed")
4896 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4897 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4898 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4899 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4900 primitive = db_nslcmop["operationParams"]["primitive"]
4901 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4902 timeout_ns_action = db_nslcmop["operationParams"].get(
4903 "timeout_ns_action", self.timeout.primitive
4904 )
4905
4906 if vnf_index:
4907 step = "Getting vnfr from database"
4908 db_vnfr = self.db.get_one(
4909 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4910 )
4911 if db_vnfr.get("kdur"):
4912 kdur_list = []
4913 for kdur in db_vnfr["kdur"]:
4914 if kdur.get("additionalParams"):
4915 kdur["additionalParams"] = json.loads(
4916 kdur["additionalParams"]
4917 )
4918 kdur_list.append(kdur)
4919 db_vnfr["kdur"] = kdur_list
4920 step = "Getting vnfd from database"
4921 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4922
4923 # Sync filesystem before running a primitive
4924 self.fs.sync(db_vnfr["vnfd-id"])
4925 else:
4926 step = "Getting nsd from database"
4927 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4928
4929 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4930 # for backward compatibility
4931 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4932 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4933 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4934 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4935
4936 # look for primitive
4937 config_primitive_desc = descriptor_configuration = None
4938 if vdu_id:
4939 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4940 elif kdu_name:
4941 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4942 elif vnf_index:
4943 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4944 else:
4945 descriptor_configuration = db_nsd.get("ns-configuration")
4946
4947 if descriptor_configuration and descriptor_configuration.get(
4948 "config-primitive"
4949 ):
4950 for config_primitive in descriptor_configuration["config-primitive"]:
4951 if config_primitive["name"] == primitive:
4952 config_primitive_desc = config_primitive
4953 break
4954
4955 if not config_primitive_desc:
4956 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4957 raise LcmException(
4958 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4959 primitive
4960 )
4961 )
4962 primitive_name = primitive
4963 ee_descriptor_id = None
4964 else:
4965 primitive_name = config_primitive_desc.get(
4966 "execution-environment-primitive", primitive
4967 )
4968 ee_descriptor_id = config_primitive_desc.get(
4969 "execution-environment-ref"
4970 )
4971
4972 if vnf_index:
4973 if vdu_id:
4974 vdur = next(
4975 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4976 )
4977 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4978 elif kdu_name:
4979 kdur = next(
4980 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4981 )
4982 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4983 else:
4984 desc_params = parse_yaml_strings(
4985 db_vnfr.get("additionalParamsForVnf")
4986 )
4987 else:
4988 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4989 if kdu_name and get_configuration(db_vnfd, kdu_name):
4990 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4991 actions = set()
4992 for primitive in kdu_configuration.get("initial-config-primitive", []):
4993 actions.add(primitive["name"])
4994 for primitive in kdu_configuration.get("config-primitive", []):
4995 actions.add(primitive["name"])
4996 kdu = find_in_list(
4997 nsr_deployed["K8s"],
4998 lambda kdu: kdu_name == kdu["kdu-name"]
4999 and kdu["member-vnf-index"] == vnf_index,
5000 )
5001 kdu_action = (
5002 True
5003 if primitive_name in actions
5004 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5005 else False
5006 )
5007
5008 # TODO check if ns is in a proper status
5009 if kdu_name and (
5010 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5011 ):
5012 # kdur and desc_params already set from before
5013 if primitive_params:
5014 desc_params.update(primitive_params)
5015 # TODO Check if we will need something at vnf level
5016 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5017 if (
5018 kdu_name == kdu["kdu-name"]
5019 and kdu["member-vnf-index"] == vnf_index
5020 ):
5021 break
5022 else:
5023 raise LcmException(
5024 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5025 )
5026
5027 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5028 msg = "unknown k8scluster-type '{}'".format(
5029 kdu.get("k8scluster-type")
5030 )
5031 raise LcmException(msg)
5032
5033 db_dict = {
5034 "collection": "nsrs",
5035 "filter": {"_id": nsr_id},
5036 "path": "_admin.deployed.K8s.{}".format(index),
5037 }
5038 self.logger.debug(
5039 logging_text
5040 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5041 )
5042 step = "Executing kdu {}".format(primitive_name)
5043 if primitive_name == "upgrade":
5044 if desc_params.get("kdu_model"):
5045 kdu_model = desc_params.get("kdu_model")
5046 del desc_params["kdu_model"]
5047 else:
5048 kdu_model = kdu.get("kdu-model")
5049 parts = kdu_model.split(sep=":")
5050 if len(parts) == 2:
5051 kdu_model = parts[0]
5052 if desc_params.get("kdu_atomic_upgrade"):
5053 atomic_upgrade = desc_params.get(
5054 "kdu_atomic_upgrade"
5055 ).lower() in ("yes", "true", "1")
5056 del desc_params["kdu_atomic_upgrade"]
5057 else:
5058 atomic_upgrade = True
5059
5060 detailed_status = await asyncio.wait_for(
5061 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5062 cluster_uuid=kdu.get("k8scluster-uuid"),
5063 kdu_instance=kdu.get("kdu-instance"),
5064 atomic=atomic_upgrade,
5065 kdu_model=kdu_model,
5066 params=desc_params,
5067 db_dict=db_dict,
5068 timeout=timeout_ns_action,
5069 ),
5070 timeout=timeout_ns_action + 10,
5071 )
5072 self.logger.debug(
5073 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5074 )
5075 elif primitive_name == "rollback":
5076 detailed_status = await asyncio.wait_for(
5077 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5078 cluster_uuid=kdu.get("k8scluster-uuid"),
5079 kdu_instance=kdu.get("kdu-instance"),
5080 db_dict=db_dict,
5081 ),
5082 timeout=timeout_ns_action,
5083 )
5084 elif primitive_name == "status":
5085 detailed_status = await asyncio.wait_for(
5086 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5087 cluster_uuid=kdu.get("k8scluster-uuid"),
5088 kdu_instance=kdu.get("kdu-instance"),
5089 vca_id=vca_id,
5090 ),
5091 timeout=timeout_ns_action,
5092 )
5093 else:
5094 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5095 kdu["kdu-name"], nsr_id
5096 )
5097 params = self._map_primitive_params(
5098 config_primitive_desc, primitive_params, desc_params
5099 )
5100
5101 detailed_status = await asyncio.wait_for(
5102 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5103 cluster_uuid=kdu.get("k8scluster-uuid"),
5104 kdu_instance=kdu_instance,
5105 primitive_name=primitive_name,
5106 params=params,
5107 db_dict=db_dict,
5108 timeout=timeout_ns_action,
5109 vca_id=vca_id,
5110 ),
5111 timeout=timeout_ns_action,
5112 )
5113
5114 if detailed_status:
5115 nslcmop_operation_state = "COMPLETED"
5116 else:
5117 detailed_status = ""
5118 nslcmop_operation_state = "FAILED"
5119 else:
5120 ee_id, vca_type = self._look_for_deployed_vca(
5121 nsr_deployed["VCA"],
5122 member_vnf_index=vnf_index,
5123 vdu_id=vdu_id,
5124 vdu_count_index=vdu_count_index,
5125 ee_descriptor_id=ee_descriptor_id,
5126 )
5127 for vca_index, vca_deployed in enumerate(
5128 db_nsr["_admin"]["deployed"]["VCA"]
5129 ):
5130 if vca_deployed.get("member-vnf-index") == vnf_index:
5131 db_dict = {
5132 "collection": "nsrs",
5133 "filter": {"_id": nsr_id},
5134 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5135 }
5136 break
5137 (
5138 nslcmop_operation_state,
5139 detailed_status,
5140 ) = await self._ns_execute_primitive(
5141 ee_id,
5142 primitive=primitive_name,
5143 primitive_params=self._map_primitive_params(
5144 config_primitive_desc, primitive_params, desc_params
5145 ),
5146 timeout=timeout_ns_action,
5147 vca_type=vca_type,
5148 db_dict=db_dict,
5149 vca_id=vca_id,
5150 )
5151
5152 db_nslcmop_update["detailed-status"] = detailed_status
5153 error_description_nslcmop = (
5154 detailed_status if nslcmop_operation_state == "FAILED" else ""
5155 )
5156 self.logger.debug(
5157 logging_text
5158 + "Done with result {} {}".format(
5159 nslcmop_operation_state, detailed_status
5160 )
5161 )
5162 return # database update is called inside finally
5163
5164 except (DbException, LcmException, N2VCException, K8sException) as e:
5165 self.logger.error(logging_text + "Exit Exception {}".format(e))
5166 exc = e
5167 except asyncio.CancelledError:
5168 self.logger.error(
5169 logging_text + "Cancelled Exception while '{}'".format(step)
5170 )
5171 exc = "Operation was cancelled"
5172 except asyncio.TimeoutError:
5173 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5174 exc = "Timeout"
5175 except Exception as e:
5176 exc = traceback.format_exc()
5177 self.logger.critical(
5178 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5179 exc_info=True,
5180 )
5181 finally:
5182 if exc:
5183 db_nslcmop_update[
5184 "detailed-status"
5185 ] = (
5186 detailed_status
5187 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5188 nslcmop_operation_state = "FAILED"
5189 if db_nsr:
5190 self._write_ns_status(
5191 nsr_id=nsr_id,
5192 ns_state=db_nsr[
5193 "nsState"
5194 ], # TODO check if degraded. For the moment use previous status
5195 current_operation="IDLE",
5196 current_operation_id=None,
5197 # error_description=error_description_nsr,
5198 # error_detail=error_detail,
5199 other_update=db_nsr_update,
5200 )
5201
5202 self._write_op_status(
5203 op_id=nslcmop_id,
5204 stage="",
5205 error_message=error_description_nslcmop,
5206 operation_state=nslcmop_operation_state,
5207 other_update=db_nslcmop_update,
5208 )
5209
5210 if nslcmop_operation_state:
5211 try:
5212 await self.msg.aiowrite(
5213 "ns",
5214 "actioned",
5215 {
5216 "nsr_id": nsr_id,
5217 "nslcmop_id": nslcmop_id,
5218 "operationState": nslcmop_operation_state,
5219 },
5220 loop=self.loop,
5221 )
5222 except Exception as e:
5223 self.logger.error(
5224 logging_text + "kafka_write notification Exception {}".format(e)
5225 )
5226 self.logger.debug(logging_text + "Exit")
5227 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5228 return nslcmop_operation_state, detailed_status
5229
5230 async def terminate_vdus(
5231 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5232 ):
5233 """This method terminates VDUs
5234
5235 Args:
5236 db_vnfr: VNF instance record
5237 member_vnf_index: VNF index to identify the VDUs to be removed
5238 db_nsr: NS instance record
5239 update_db_nslcmops: Nslcmop update record
5240 """
5241 vca_scaling_info = []
5242 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5243 scaling_info["scaling_direction"] = "IN"
5244 scaling_info["vdu-delete"] = {}
5245 scaling_info["kdu-delete"] = {}
5246 db_vdur = db_vnfr.get("vdur")
5247 vdur_list = copy(db_vdur)
5248 count_index = 0
5249 for index, vdu in enumerate(vdur_list):
5250 vca_scaling_info.append(
5251 {
5252 "osm_vdu_id": vdu["vdu-id-ref"],
5253 "member-vnf-index": member_vnf_index,
5254 "type": "delete",
5255 "vdu_index": count_index,
5256 }
5257 )
5258 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5259 scaling_info["vdu"].append(
5260 {
5261 "name": vdu.get("name") or vdu.get("vdu-name"),
5262 "vdu_id": vdu["vdu-id-ref"],
5263 "interface": [],
5264 }
5265 )
5266 for interface in vdu["interfaces"]:
5267 scaling_info["vdu"][index]["interface"].append(
5268 {
5269 "name": interface["name"],
5270 "ip_address": interface["ip-address"],
5271 "mac_address": interface.get("mac-address"),
5272 }
5273 )
5274 self.logger.info("NS update scaling info{}".format(scaling_info))
5275 stage[2] = "Terminating VDUs"
5276 if scaling_info.get("vdu-delete"):
5277 # scale_process = "RO"
5278 if self.ro_config.ng:
5279 await self._scale_ng_ro(
5280 logging_text,
5281 db_nsr,
5282 update_db_nslcmops,
5283 db_vnfr,
5284 scaling_info,
5285 stage,
5286 )
5287
5288 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5289 """This method is to Remove VNF instances from NS.
5290
5291 Args:
5292 nsr_id: NS instance id
5293 nslcmop_id: nslcmop id of update
5294 vnf_instance_id: id of the VNF instance to be removed
5295
5296 Returns:
5297 result: (str, str) COMPLETED/FAILED, details
5298 """
5299 try:
5300 db_nsr_update = {}
5301 logging_text = "Task ns={} update ".format(nsr_id)
5302 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5303 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5304 if check_vnfr_count > 1:
5305 stage = ["", "", ""]
5306 step = "Getting nslcmop from database"
5307 self.logger.debug(
5308 step + " after having waited for previous tasks to be completed"
5309 )
5310 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5311 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5312 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5313 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5314 """ db_vnfr = self.db.get_one(
5315 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5316
5317 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5318 await self.terminate_vdus(
5319 db_vnfr,
5320 member_vnf_index,
5321 db_nsr,
5322 update_db_nslcmops,
5323 stage,
5324 logging_text,
5325 )
5326
5327 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5328 constituent_vnfr.remove(db_vnfr.get("_id"))
5329 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5330 "constituent-vnfr-ref"
5331 )
5332 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5333 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5335 return "COMPLETED", "Done"
5336 else:
5337 step = "Terminate VNF Failed with"
5338 raise LcmException(
5339 "{} Cannot terminate the last VNF in this NS.".format(
5340 vnf_instance_id
5341 )
5342 )
5343 except (LcmException, asyncio.CancelledError):
5344 raise
5345 except Exception as e:
5346 self.logger.debug("Error removing VNF {}".format(e))
5347 return "FAILED", "Error removing VNF {}".format(e)
5348
5349 async def _ns_redeploy_vnf(
5350 self,
5351 nsr_id,
5352 nslcmop_id,
5353 db_vnfd,
5354 db_vnfr,
5355 db_nsr,
5356 ):
5357 """This method updates and redeploys VNF instances
5358
5359 Args:
5360 nsr_id: NS instance id
5361 nslcmop_id: nslcmop id
5362 db_vnfd: VNF descriptor
5363 db_vnfr: VNF instance record
5364 db_nsr: NS instance record
5365
5366 Returns:
5367 result: (str, str) COMPLETED/FAILED, details
5368 """
5369 try:
5370 count_index = 0
5371 stage = ["", "", ""]
5372 logging_text = "Task ns={} update ".format(nsr_id)
5373 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5374 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5375
5376 # Terminate old VNF resources
5377 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5378 await self.terminate_vdus(
5379 db_vnfr,
5380 member_vnf_index,
5381 db_nsr,
5382 update_db_nslcmops,
5383 stage,
5384 logging_text,
5385 )
5386
5387 # old_vnfd_id = db_vnfr["vnfd-id"]
5388 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5389 new_db_vnfd = db_vnfd
5390 # new_vnfd_ref = new_db_vnfd["id"]
5391 # new_vnfd_id = vnfd_id
5392
5393 # Create VDUR
5394 new_vnfr_cp = []
5395 for cp in new_db_vnfd.get("ext-cpd", ()):
5396 vnf_cp = {
5397 "name": cp.get("id"),
5398 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5399 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5400 "id": cp.get("id"),
5401 }
5402 new_vnfr_cp.append(vnf_cp)
5403 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5404 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5405 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5406 new_vnfr_update = {
5407 "revision": latest_vnfd_revision,
5408 "connection-point": new_vnfr_cp,
5409 "vdur": new_vdur,
5410 "ip-address": "",
5411 }
5412 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5413 updated_db_vnfr = self.db.get_one(
5414 "vnfrs",
5415 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5416 )
5417
5418 # Instantiate new VNF resources
5419 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5420 vca_scaling_info = []
5421 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5422 scaling_info["scaling_direction"] = "OUT"
5423 scaling_info["vdu-create"] = {}
5424 scaling_info["kdu-create"] = {}
5425 vdud_instantiate_list = db_vnfd["vdu"]
5426 for index, vdud in enumerate(vdud_instantiate_list):
5427 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5428 if cloud_init_text:
5429 additional_params = (
5430 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5431 or {}
5432 )
5433 cloud_init_list = []
5434 if cloud_init_text:
5435 # TODO Information of its own ip is not available because db_vnfr is not updated.
5436 additional_params["OSM"] = get_osm_params(
5437 updated_db_vnfr, vdud["id"], 1
5438 )
5439 cloud_init_list.append(
5440 self._parse_cloud_init(
5441 cloud_init_text,
5442 additional_params,
5443 db_vnfd["id"],
5444 vdud["id"],
5445 )
5446 )
5447 vca_scaling_info.append(
5448 {
5449 "osm_vdu_id": vdud["id"],
5450 "member-vnf-index": member_vnf_index,
5451 "type": "create",
5452 "vdu_index": count_index,
5453 }
5454 )
5455 scaling_info["vdu-create"][vdud["id"]] = count_index
5456 if self.ro_config.ng:
5457 self.logger.debug(
5458 "New Resources to be deployed: {}".format(scaling_info)
5459 )
5460 await self._scale_ng_ro(
5461 logging_text,
5462 db_nsr,
5463 update_db_nslcmops,
5464 updated_db_vnfr,
5465 scaling_info,
5466 stage,
5467 )
5468 return "COMPLETED", "Done"
5469 except (LcmException, asyncio.CancelledError):
5470 raise
5471 except Exception as e:
5472 self.logger.debug("Error updating VNF {}".format(e))
5473 return "FAILED", "Error updating VNF {}".format(e)
5474
5475 async def _ns_charm_upgrade(
5476 self,
5477 ee_id,
5478 charm_id,
5479 charm_type,
5480 path,
5481 timeout: float = None,
5482 ) -> (str, str):
5483 """This method upgrade charms in VNF instances
5484
5485 Args:
5486 ee_id: Execution environment id
5487 path: Local path to the charm
5488 charm_id: charm-id
5489 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5490 timeout: (Float) Timeout for the ns update operation
5491
5492 Returns:
5493 result: (str, str) COMPLETED/FAILED, details
5494 """
5495 try:
5496 charm_type = charm_type or "lxc_proxy_charm"
5497 output = await self.vca_map[charm_type].upgrade_charm(
5498 ee_id=ee_id,
5499 path=path,
5500 charm_id=charm_id,
5501 charm_type=charm_type,
5502 timeout=timeout or self.timeout.ns_update,
5503 )
5504
5505 if output:
5506 return "COMPLETED", output
5507
5508 except (LcmException, asyncio.CancelledError):
5509 raise
5510
5511 except Exception as e:
5512 self.logger.debug("Error upgrading charm {}".format(path))
5513
5514 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5515
5516 async def update(self, nsr_id, nslcmop_id):
5517 """Update NS according to different update types
5518
5519 This method performs upgrade of VNF instances then updates the revision
5520 number in VNF record
5521
5522 Args:
5523 nsr_id: Network service will be updated
5524 nslcmop_id: ns lcm operation id
5525
5526 Returns:
5527 It may raise DbException, LcmException, N2VCException, K8sException
5528
5529 """
5530 # Try to lock HA task here
5531 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5532 if not task_is_locked_by_me:
5533 return
5534
5535 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5536 self.logger.debug(logging_text + "Enter")
5537
5538 # Set the required variables to be filled up later
5539 db_nsr = None
5540 db_nslcmop_update = {}
5541 vnfr_update = {}
5542 nslcmop_operation_state = None
5543 db_nsr_update = {}
5544 error_description_nslcmop = ""
5545 exc = None
5546 change_type = "updated"
5547 detailed_status = ""
5548 member_vnf_index = None
5549
5550 try:
5551 # wait for any previous tasks in process
5552 step = "Waiting for previous operations to terminate"
5553 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5554 self._write_ns_status(
5555 nsr_id=nsr_id,
5556 ns_state=None,
5557 current_operation="UPDATING",
5558 current_operation_id=nslcmop_id,
5559 )
5560
5561 step = "Getting nslcmop from database"
5562 db_nslcmop = self.db.get_one(
5563 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5564 )
5565 update_type = db_nslcmop["operationParams"]["updateType"]
5566
5567 step = "Getting nsr from database"
5568 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5569 old_operational_status = db_nsr["operational-status"]
5570 db_nsr_update["operational-status"] = "updating"
5571 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5572 nsr_deployed = db_nsr["_admin"].get("deployed")
5573
5574 if update_type == "CHANGE_VNFPKG":
5575 # Get the input parameters given through update request
5576 vnf_instance_id = db_nslcmop["operationParams"][
5577 "changeVnfPackageData"
5578 ].get("vnfInstanceId")
5579
5580 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5581 "vnfdId"
5582 )
5583 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5584
5585 step = "Getting vnfr from database"
5586 db_vnfr = self.db.get_one(
5587 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5588 )
5589
5590 step = "Getting vnfds from database"
5591 # Latest VNFD
5592 latest_vnfd = self.db.get_one(
5593 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5594 )
5595 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5596
5597 # Current VNFD
5598 current_vnf_revision = db_vnfr.get("revision", 1)
5599 current_vnfd = self.db.get_one(
5600 "vnfds_revisions",
5601 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5602 fail_on_empty=False,
5603 )
5604 # Charm artifact paths will be filled up later
5605 (
5606 current_charm_artifact_path,
5607 target_charm_artifact_path,
5608 charm_artifact_paths,
5609 helm_artifacts,
5610 ) = ([], [], [], [])
5611
5612 step = "Checking if revision has changed in VNFD"
5613 if current_vnf_revision != latest_vnfd_revision:
5614 change_type = "policy_updated"
5615
5616 # There is new revision of VNFD, update operation is required
5617 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5618 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5619
5620 step = "Removing the VNFD packages if they exist in the local path"
5621 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5622 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5623
5624 step = "Get the VNFD packages from FSMongo"
5625 self.fs.sync(from_path=latest_vnfd_path)
5626 self.fs.sync(from_path=current_vnfd_path)
5627
5628 step = (
5629 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5630 )
5631 current_base_folder = current_vnfd["_admin"]["storage"]
5632 latest_base_folder = latest_vnfd["_admin"]["storage"]
5633
5634 for vca_index, vca_deployed in enumerate(
5635 get_iterable(nsr_deployed, "VCA")
5636 ):
5637 vnf_index = db_vnfr.get("member-vnf-index-ref")
5638
5639 # Getting charm-id and charm-type
5640 if vca_deployed.get("member-vnf-index") == vnf_index:
5641 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5642 vca_type = vca_deployed.get("type")
5643 vdu_count_index = vca_deployed.get("vdu_count_index")
5644
5645 # Getting ee-id
5646 ee_id = vca_deployed.get("ee_id")
5647
5648 step = "Getting descriptor config"
5649 if current_vnfd.get("kdu"):
5650 search_key = "kdu_name"
5651 else:
5652 search_key = "vnfd_id"
5653
5654 entity_id = vca_deployed.get(search_key)
5655
5656 descriptor_config = get_configuration(
5657 current_vnfd, entity_id
5658 )
5659
5660 if "execution-environment-list" in descriptor_config:
5661 ee_list = descriptor_config.get(
5662 "execution-environment-list", []
5663 )
5664 else:
5665 ee_list = []
5666
5667 # There could be several charm used in the same VNF
5668 for ee_item in ee_list:
5669 if ee_item.get("juju"):
5670 step = "Getting charm name"
5671 charm_name = ee_item["juju"].get("charm")
5672
5673 step = "Setting Charm artifact paths"
5674 current_charm_artifact_path.append(
5675 get_charm_artifact_path(
5676 current_base_folder,
5677 charm_name,
5678 vca_type,
5679 current_vnf_revision,
5680 )
5681 )
5682 target_charm_artifact_path.append(
5683 get_charm_artifact_path(
5684 latest_base_folder,
5685 charm_name,
5686 vca_type,
5687 latest_vnfd_revision,
5688 )
5689 )
5690 elif ee_item.get("helm-chart"):
5691 # add chart to list and all parameters
5692 step = "Getting helm chart name"
5693 chart_name = ee_item.get("helm-chart")
5694 if (
5695 ee_item.get("helm-version")
5696 and ee_item.get("helm-version") == "v2"
5697 ):
5698 vca_type = "helm"
5699 else:
5700 vca_type = "helm-v3"
5701 step = "Setting Helm chart artifact paths"
5702
5703 helm_artifacts.append(
5704 {
5705 "current_artifact_path": get_charm_artifact_path(
5706 current_base_folder,
5707 chart_name,
5708 vca_type,
5709 current_vnf_revision,
5710 ),
5711 "target_artifact_path": get_charm_artifact_path(
5712 latest_base_folder,
5713 chart_name,
5714 vca_type,
5715 latest_vnfd_revision,
5716 ),
5717 "ee_id": ee_id,
5718 "vca_index": vca_index,
5719 "vdu_index": vdu_count_index,
5720 }
5721 )
5722
5723 charm_artifact_paths = zip(
5724 current_charm_artifact_path, target_charm_artifact_path
5725 )
5726
5727 step = "Checking if software version has changed in VNFD"
5728 if find_software_version(current_vnfd) != find_software_version(
5729 latest_vnfd
5730 ):
5731 step = "Checking if existing VNF has charm"
5732 for current_charm_path, target_charm_path in list(
5733 charm_artifact_paths
5734 ):
5735 if current_charm_path:
5736 raise LcmException(
5737 "Software version change is not supported as VNF instance {} has charm.".format(
5738 vnf_instance_id
5739 )
5740 )
5741
5742 # There is no change in the charm package, then redeploy the VNF
5743 # based on new descriptor
5744 step = "Redeploying VNF"
5745 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5746 (result, detailed_status) = await self._ns_redeploy_vnf(
5747 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5748 )
5749 if result == "FAILED":
5750 nslcmop_operation_state = result
5751 error_description_nslcmop = detailed_status
5752 db_nslcmop_update["detailed-status"] = detailed_status
5753 self.logger.debug(
5754 logging_text
5755 + " step {} Done with result {} {}".format(
5756 step, nslcmop_operation_state, detailed_status
5757 )
5758 )
5759
5760 else:
5761 step = "Checking if any charm package has changed or not"
5762 for current_charm_path, target_charm_path in list(
5763 charm_artifact_paths
5764 ):
5765 if (
5766 current_charm_path
5767 and target_charm_path
5768 and self.check_charm_hash_changed(
5769 current_charm_path, target_charm_path
5770 )
5771 ):
5772 step = "Checking whether VNF uses juju bundle"
5773 if check_juju_bundle_existence(current_vnfd):
5774 raise LcmException(
5775 "Charm upgrade is not supported for the instance which"
5776 " uses juju-bundle: {}".format(
5777 check_juju_bundle_existence(current_vnfd)
5778 )
5779 )
5780
5781 step = "Upgrading Charm"
5782 (
5783 result,
5784 detailed_status,
5785 ) = await self._ns_charm_upgrade(
5786 ee_id=ee_id,
5787 charm_id=vca_id,
5788 charm_type=vca_type,
5789 path=self.fs.path + target_charm_path,
5790 timeout=timeout_seconds,
5791 )
5792
5793 if result == "FAILED":
5794 nslcmop_operation_state = result
5795 error_description_nslcmop = detailed_status
5796
5797 db_nslcmop_update["detailed-status"] = detailed_status
5798 self.logger.debug(
5799 logging_text
5800 + " step {} Done with result {} {}".format(
5801 step, nslcmop_operation_state, detailed_status
5802 )
5803 )
5804
5805 step = "Updating policies"
5806 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5807 result = "COMPLETED"
5808 detailed_status = "Done"
5809 db_nslcmop_update["detailed-status"] = "Done"
5810
5811 # helm base EE
5812 for item in helm_artifacts:
5813 if not (
5814 item["current_artifact_path"]
5815 and item["target_artifact_path"]
5816 and self.check_charm_hash_changed(
5817 item["current_artifact_path"],
5818 item["target_artifact_path"],
5819 )
5820 ):
5821 continue
5822 db_update_entry = "_admin.deployed.VCA.{}.".format(
5823 item["vca_index"]
5824 )
5825 vnfr_id = db_vnfr["_id"]
5826 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5827 db_dict = {
5828 "collection": "nsrs",
5829 "filter": {"_id": nsr_id},
5830 "path": db_update_entry,
5831 }
5832 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5833 await self.vca_map[vca_type].upgrade_execution_environment(
5834 namespace=namespace,
5835 helm_id=helm_id,
5836 db_dict=db_dict,
5837 config=osm_config,
5838 artifact_path=item["target_artifact_path"],
5839 vca_type=vca_type,
5840 )
5841 vnf_id = db_vnfr.get("vnfd-ref")
5842 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5843 self.logger.debug("get ssh key block")
5844 rw_mgmt_ip = None
5845 if deep_get(
5846 config_descriptor,
5847 ("config-access", "ssh-access", "required"),
5848 ):
5849 # Needed to inject a ssh key
5850 user = deep_get(
5851 config_descriptor,
5852 ("config-access", "ssh-access", "default-user"),
5853 )
5854 step = (
5855 "Install configuration Software, getting public ssh key"
5856 )
5857 pub_key = await self.vca_map[
5858 vca_type
5859 ].get_ee_ssh_public__key(
5860 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5861 )
5862
5863 step = (
5864 "Insert public key into VM user={} ssh_key={}".format(
5865 user, pub_key
5866 )
5867 )
5868 self.logger.debug(logging_text + step)
5869
5870 # wait for RO (ip-address) Insert pub_key into VM
5871 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
5872 logging_text,
5873 nsr_id,
5874 vnfr_id,
5875 None,
5876 item["vdu_index"],
5877 user=user,
5878 pub_key=pub_key,
5879 )
5880
5881 initial_config_primitive_list = config_descriptor.get(
5882 "initial-config-primitive"
5883 )
5884 config_primitive = next(
5885 (
5886 p
5887 for p in initial_config_primitive_list
5888 if p["name"] == "config"
5889 ),
5890 None,
5891 )
5892 if not config_primitive:
5893 continue
5894
5895 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5896 if rw_mgmt_ip:
5897 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
5898 if db_vnfr.get("additionalParamsForVnf"):
5899 deploy_params.update(
5900 parse_yaml_strings(
5901 db_vnfr["additionalParamsForVnf"].copy()
5902 )
5903 )
5904 primitive_params_ = self._map_primitive_params(
5905 config_primitive, {}, deploy_params
5906 )
5907
5908 step = "execute primitive '{}' params '{}'".format(
5909 config_primitive["name"], primitive_params_
5910 )
5911 self.logger.debug(logging_text + step)
5912 await self.vca_map[vca_type].exec_primitive(
5913 ee_id=ee_id,
5914 primitive_name=config_primitive["name"],
5915 params_dict=primitive_params_,
5916 db_dict=db_dict,
5917 vca_id=vca_id,
5918 vca_type=vca_type,
5919 )
5920
5921 step = "Updating policies"
5922 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5923 detailed_status = "Done"
5924 db_nslcmop_update["detailed-status"] = "Done"
5925
5926 # If nslcmop_operation_state is None, so any operation is not failed.
5927 if not nslcmop_operation_state:
5928 nslcmop_operation_state = "COMPLETED"
5929
5930 # If update CHANGE_VNFPKG nslcmop_operation is successful
5931 # vnf revision need to be updated
5932 vnfr_update["revision"] = latest_vnfd_revision
5933 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5934
5935 self.logger.debug(
5936 logging_text
5937 + " task Done with result {} {}".format(
5938 nslcmop_operation_state, detailed_status
5939 )
5940 )
5941 elif update_type == "REMOVE_VNF":
5942 # This part is included in https://osm.etsi.org/gerrit/11876
5943 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5944 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5945 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5946 step = "Removing VNF"
5947 (result, detailed_status) = await self.remove_vnf(
5948 nsr_id, nslcmop_id, vnf_instance_id
5949 )
5950 if result == "FAILED":
5951 nslcmop_operation_state = result
5952 error_description_nslcmop = detailed_status
5953 db_nslcmop_update["detailed-status"] = detailed_status
5954 change_type = "vnf_terminated"
5955 if not nslcmop_operation_state:
5956 nslcmop_operation_state = "COMPLETED"
5957 self.logger.debug(
5958 logging_text
5959 + " task Done with result {} {}".format(
5960 nslcmop_operation_state, detailed_status
5961 )
5962 )
5963
5964 elif update_type == "OPERATE_VNF":
5965 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
5966 "vnfInstanceId"
5967 ]
5968 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
5969 "changeStateTo"
5970 ]
5971 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
5972 "additionalParam"
5973 ]
5974 (result, detailed_status) = await self.rebuild_start_stop(
5975 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5976 )
5977 if result == "FAILED":
5978 nslcmop_operation_state = result
5979 error_description_nslcmop = detailed_status
5980 db_nslcmop_update["detailed-status"] = detailed_status
5981 if not nslcmop_operation_state:
5982 nslcmop_operation_state = "COMPLETED"
5983 self.logger.debug(
5984 logging_text
5985 + " task Done with result {} {}".format(
5986 nslcmop_operation_state, detailed_status
5987 )
5988 )
5989
5990 # If nslcmop_operation_state is None, so any operation is not failed.
5991 # All operations are executed in overall.
5992 if not nslcmop_operation_state:
5993 nslcmop_operation_state = "COMPLETED"
5994 db_nsr_update["operational-status"] = old_operational_status
5995
5996 except (DbException, LcmException, N2VCException, K8sException) as e:
5997 self.logger.error(logging_text + "Exit Exception {}".format(e))
5998 exc = e
5999 except asyncio.CancelledError:
6000 self.logger.error(
6001 logging_text + "Cancelled Exception while '{}'".format(step)
6002 )
6003 exc = "Operation was cancelled"
6004 except asyncio.TimeoutError:
6005 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6006 exc = "Timeout"
6007 except Exception as e:
6008 exc = traceback.format_exc()
6009 self.logger.critical(
6010 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6011 exc_info=True,
6012 )
6013 finally:
6014 if exc:
6015 db_nslcmop_update[
6016 "detailed-status"
6017 ] = (
6018 detailed_status
6019 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6020 nslcmop_operation_state = "FAILED"
6021 db_nsr_update["operational-status"] = old_operational_status
6022 if db_nsr:
6023 self._write_ns_status(
6024 nsr_id=nsr_id,
6025 ns_state=db_nsr["nsState"],
6026 current_operation="IDLE",
6027 current_operation_id=None,
6028 other_update=db_nsr_update,
6029 )
6030
6031 self._write_op_status(
6032 op_id=nslcmop_id,
6033 stage="",
6034 error_message=error_description_nslcmop,
6035 operation_state=nslcmop_operation_state,
6036 other_update=db_nslcmop_update,
6037 )
6038
6039 if nslcmop_operation_state:
6040 try:
6041 msg = {
6042 "nsr_id": nsr_id,
6043 "nslcmop_id": nslcmop_id,
6044 "operationState": nslcmop_operation_state,
6045 }
6046 if (
6047 change_type in ("vnf_terminated", "policy_updated")
6048 and member_vnf_index
6049 ):
6050 msg.update({"vnf_member_index": member_vnf_index})
6051 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6052 except Exception as e:
6053 self.logger.error(
6054 logging_text + "kafka_write notification Exception {}".format(e)
6055 )
6056 self.logger.debug(logging_text + "Exit")
6057 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6058 return nslcmop_operation_state, detailed_status
6059
6060 async def scale(self, nsr_id, nslcmop_id):
6061 # Try to lock HA task here
6062 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6063 if not task_is_locked_by_me:
6064 return
6065
6066 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6067 stage = ["", "", ""]
6068 tasks_dict_info = {}
6069 # ^ stage, step, VIM progress
6070 self.logger.debug(logging_text + "Enter")
6071 # get all needed from database
6072 db_nsr = None
6073 db_nslcmop_update = {}
6074 db_nsr_update = {}
6075 exc = None
6076 # in case of error, indicates what part of scale was failed to put nsr at error status
6077 scale_process = None
6078 old_operational_status = ""
6079 old_config_status = ""
6080 nsi_id = None
6081 try:
6082 # wait for any previous tasks in process
6083 step = "Waiting for previous operations to terminate"
6084 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6085 self._write_ns_status(
6086 nsr_id=nsr_id,
6087 ns_state=None,
6088 current_operation="SCALING",
6089 current_operation_id=nslcmop_id,
6090 )
6091
6092 step = "Getting nslcmop from database"
6093 self.logger.debug(
6094 step + " after having waited for previous tasks to be completed"
6095 )
6096 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6097
6098 step = "Getting nsr from database"
6099 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6100 old_operational_status = db_nsr["operational-status"]
6101 old_config_status = db_nsr["config-status"]
6102
6103 step = "Parsing scaling parameters"
6104 db_nsr_update["operational-status"] = "scaling"
6105 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6106 nsr_deployed = db_nsr["_admin"].get("deployed")
6107
6108 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6109 "scaleByStepData"
6110 ]["member-vnf-index"]
6111 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6112 "scaleByStepData"
6113 ]["scaling-group-descriptor"]
6114 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6115 # for backward compatibility
6116 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6117 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6118 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6119 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6120
6121 step = "Getting vnfr from database"
6122 db_vnfr = self.db.get_one(
6123 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6124 )
6125
6126 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6127
6128 step = "Getting vnfd from database"
6129 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6130
6131 base_folder = db_vnfd["_admin"]["storage"]
6132
6133 step = "Getting scaling-group-descriptor"
6134 scaling_descriptor = find_in_list(
6135 get_scaling_aspect(db_vnfd),
6136 lambda scale_desc: scale_desc["name"] == scaling_group,
6137 )
6138 if not scaling_descriptor:
6139 raise LcmException(
6140 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6141 "at vnfd:scaling-group-descriptor".format(scaling_group)
6142 )
6143
6144 step = "Sending scale order to VIM"
6145 # TODO check if ns is in a proper status
6146 nb_scale_op = 0
6147 if not db_nsr["_admin"].get("scaling-group"):
6148 self.update_db_2(
6149 "nsrs",
6150 nsr_id,
6151 {
6152 "_admin.scaling-group": [
6153 {"name": scaling_group, "nb-scale-op": 0}
6154 ]
6155 },
6156 )
6157 admin_scale_index = 0
6158 else:
6159 for admin_scale_index, admin_scale_info in enumerate(
6160 db_nsr["_admin"]["scaling-group"]
6161 ):
6162 if admin_scale_info["name"] == scaling_group:
6163 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6164 break
6165 else: # not found, set index one plus last element and add new entry with the name
6166 admin_scale_index += 1
6167 db_nsr_update[
6168 "_admin.scaling-group.{}.name".format(admin_scale_index)
6169 ] = scaling_group
6170
6171 vca_scaling_info = []
6172 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6173 if scaling_type == "SCALE_OUT":
6174 if "aspect-delta-details" not in scaling_descriptor:
6175 raise LcmException(
6176 "Aspect delta details not fount in scaling descriptor {}".format(
6177 scaling_descriptor["name"]
6178 )
6179 )
6180 # count if max-instance-count is reached
6181 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6182
6183 scaling_info["scaling_direction"] = "OUT"
6184 scaling_info["vdu-create"] = {}
6185 scaling_info["kdu-create"] = {}
6186 for delta in deltas:
6187 for vdu_delta in delta.get("vdu-delta", {}):
6188 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6189 # vdu_index also provides the number of instance of the targeted vdu
6190 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6191 cloud_init_text = self._get_vdu_cloud_init_content(
6192 vdud, db_vnfd
6193 )
6194 if cloud_init_text:
6195 additional_params = (
6196 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6197 or {}
6198 )
6199 cloud_init_list = []
6200
6201 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6202 max_instance_count = 10
6203 if vdu_profile and "max-number-of-instances" in vdu_profile:
6204 max_instance_count = vdu_profile.get(
6205 "max-number-of-instances", 10
6206 )
6207
6208 default_instance_num = get_number_of_instances(
6209 db_vnfd, vdud["id"]
6210 )
6211 instances_number = vdu_delta.get("number-of-instances", 1)
6212 nb_scale_op += instances_number
6213
6214 new_instance_count = nb_scale_op + default_instance_num
6215 # Control if new count is over max and vdu count is less than max.
6216 # Then assign new instance count
6217 if new_instance_count > max_instance_count > vdu_count:
6218 instances_number = new_instance_count - max_instance_count
6219 else:
6220 instances_number = instances_number
6221
6222 if new_instance_count > max_instance_count:
6223 raise LcmException(
6224 "reached the limit of {} (max-instance-count) "
6225 "scaling-out operations for the "
6226 "scaling-group-descriptor '{}'".format(
6227 nb_scale_op, scaling_group
6228 )
6229 )
6230 for x in range(vdu_delta.get("number-of-instances", 1)):
6231 if cloud_init_text:
6232 # TODO Information of its own ip is not available because db_vnfr is not updated.
6233 additional_params["OSM"] = get_osm_params(
6234 db_vnfr, vdu_delta["id"], vdu_index + x
6235 )
6236 cloud_init_list.append(
6237 self._parse_cloud_init(
6238 cloud_init_text,
6239 additional_params,
6240 db_vnfd["id"],
6241 vdud["id"],
6242 )
6243 )
6244 vca_scaling_info.append(
6245 {
6246 "osm_vdu_id": vdu_delta["id"],
6247 "member-vnf-index": vnf_index,
6248 "type": "create",
6249 "vdu_index": vdu_index + x,
6250 }
6251 )
6252 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6253 for kdu_delta in delta.get("kdu-resource-delta", {}):
6254 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6255 kdu_name = kdu_profile["kdu-name"]
6256 resource_name = kdu_profile.get("resource-name", "")
6257
6258 # Might have different kdus in the same delta
6259 # Should have list for each kdu
6260 if not scaling_info["kdu-create"].get(kdu_name, None):
6261 scaling_info["kdu-create"][kdu_name] = []
6262
6263 kdur = get_kdur(db_vnfr, kdu_name)
6264 if kdur.get("helm-chart"):
6265 k8s_cluster_type = "helm-chart-v3"
6266 self.logger.debug("kdur: {}".format(kdur))
6267 if (
6268 kdur.get("helm-version")
6269 and kdur.get("helm-version") == "v2"
6270 ):
6271 k8s_cluster_type = "helm-chart"
6272 elif kdur.get("juju-bundle"):
6273 k8s_cluster_type = "juju-bundle"
6274 else:
6275 raise LcmException(
6276 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6277 "juju-bundle. Maybe an old NBI version is running".format(
6278 db_vnfr["member-vnf-index-ref"], kdu_name
6279 )
6280 )
6281
6282 max_instance_count = 10
6283 if kdu_profile and "max-number-of-instances" in kdu_profile:
6284 max_instance_count = kdu_profile.get(
6285 "max-number-of-instances", 10
6286 )
6287
6288 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6289 deployed_kdu, _ = get_deployed_kdu(
6290 nsr_deployed, kdu_name, vnf_index
6291 )
6292 if deployed_kdu is None:
6293 raise LcmException(
6294 "KDU '{}' for vnf '{}' not deployed".format(
6295 kdu_name, vnf_index
6296 )
6297 )
6298 kdu_instance = deployed_kdu.get("kdu-instance")
6299 instance_num = await self.k8scluster_map[
6300 k8s_cluster_type
6301 ].get_scale_count(
6302 resource_name,
6303 kdu_instance,
6304 vca_id=vca_id,
6305 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6306 kdu_model=deployed_kdu.get("kdu-model"),
6307 )
6308 kdu_replica_count = instance_num + kdu_delta.get(
6309 "number-of-instances", 1
6310 )
6311
6312 # Control if new count is over max and instance_num is less than max.
6313 # Then assign max instance number to kdu replica count
6314 if kdu_replica_count > max_instance_count > instance_num:
6315 kdu_replica_count = max_instance_count
6316 if kdu_replica_count > max_instance_count:
6317 raise LcmException(
6318 "reached the limit of {} (max-instance-count) "
6319 "scaling-out operations for the "
6320 "scaling-group-descriptor '{}'".format(
6321 instance_num, scaling_group
6322 )
6323 )
6324
6325 for x in range(kdu_delta.get("number-of-instances", 1)):
6326 vca_scaling_info.append(
6327 {
6328 "osm_kdu_id": kdu_name,
6329 "member-vnf-index": vnf_index,
6330 "type": "create",
6331 "kdu_index": instance_num + x - 1,
6332 }
6333 )
6334 scaling_info["kdu-create"][kdu_name].append(
6335 {
6336 "member-vnf-index": vnf_index,
6337 "type": "create",
6338 "k8s-cluster-type": k8s_cluster_type,
6339 "resource-name": resource_name,
6340 "scale": kdu_replica_count,
6341 }
6342 )
6343 elif scaling_type == "SCALE_IN":
6344 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6345
6346 scaling_info["scaling_direction"] = "IN"
6347 scaling_info["vdu-delete"] = {}
6348 scaling_info["kdu-delete"] = {}
6349
6350 for delta in deltas:
6351 for vdu_delta in delta.get("vdu-delta", {}):
6352 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6353 min_instance_count = 0
6354 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6355 if vdu_profile and "min-number-of-instances" in vdu_profile:
6356 min_instance_count = vdu_profile["min-number-of-instances"]
6357
6358 default_instance_num = get_number_of_instances(
6359 db_vnfd, vdu_delta["id"]
6360 )
6361 instance_num = vdu_delta.get("number-of-instances", 1)
6362 nb_scale_op -= instance_num
6363
6364 new_instance_count = nb_scale_op + default_instance_num
6365
6366 if new_instance_count < min_instance_count < vdu_count:
6367 instances_number = min_instance_count - new_instance_count
6368 else:
6369 instances_number = instance_num
6370
6371 if new_instance_count < min_instance_count:
6372 raise LcmException(
6373 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6374 "scaling-group-descriptor '{}'".format(
6375 nb_scale_op, scaling_group
6376 )
6377 )
6378 for x in range(vdu_delta.get("number-of-instances", 1)):
6379 vca_scaling_info.append(
6380 {
6381 "osm_vdu_id": vdu_delta["id"],
6382 "member-vnf-index": vnf_index,
6383 "type": "delete",
6384 "vdu_index": vdu_index - 1 - x,
6385 }
6386 )
6387 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6388 for kdu_delta in delta.get("kdu-resource-delta", {}):
6389 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6390 kdu_name = kdu_profile["kdu-name"]
6391 resource_name = kdu_profile.get("resource-name", "")
6392
6393 if not scaling_info["kdu-delete"].get(kdu_name, None):
6394 scaling_info["kdu-delete"][kdu_name] = []
6395
6396 kdur = get_kdur(db_vnfr, kdu_name)
6397 if kdur.get("helm-chart"):
6398 k8s_cluster_type = "helm-chart-v3"
6399 self.logger.debug("kdur: {}".format(kdur))
6400 if (
6401 kdur.get("helm-version")
6402 and kdur.get("helm-version") == "v2"
6403 ):
6404 k8s_cluster_type = "helm-chart"
6405 elif kdur.get("juju-bundle"):
6406 k8s_cluster_type = "juju-bundle"
6407 else:
6408 raise LcmException(
6409 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6410 "juju-bundle. Maybe an old NBI version is running".format(
6411 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6412 )
6413 )
6414
6415 min_instance_count = 0
6416 if kdu_profile and "min-number-of-instances" in kdu_profile:
6417 min_instance_count = kdu_profile["min-number-of-instances"]
6418
6419 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6420 deployed_kdu, _ = get_deployed_kdu(
6421 nsr_deployed, kdu_name, vnf_index
6422 )
6423 if deployed_kdu is None:
6424 raise LcmException(
6425 "KDU '{}' for vnf '{}' not deployed".format(
6426 kdu_name, vnf_index
6427 )
6428 )
6429 kdu_instance = deployed_kdu.get("kdu-instance")
6430 instance_num = await self.k8scluster_map[
6431 k8s_cluster_type
6432 ].get_scale_count(
6433 resource_name,
6434 kdu_instance,
6435 vca_id=vca_id,
6436 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6437 kdu_model=deployed_kdu.get("kdu-model"),
6438 )
6439 kdu_replica_count = instance_num - kdu_delta.get(
6440 "number-of-instances", 1
6441 )
6442
6443 if kdu_replica_count < min_instance_count < instance_num:
6444 kdu_replica_count = min_instance_count
6445 if kdu_replica_count < min_instance_count:
6446 raise LcmException(
6447 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6448 "scaling-group-descriptor '{}'".format(
6449 instance_num, scaling_group
6450 )
6451 )
6452
6453 for x in range(kdu_delta.get("number-of-instances", 1)):
6454 vca_scaling_info.append(
6455 {
6456 "osm_kdu_id": kdu_name,
6457 "member-vnf-index": vnf_index,
6458 "type": "delete",
6459 "kdu_index": instance_num - x - 1,
6460 }
6461 )
6462 scaling_info["kdu-delete"][kdu_name].append(
6463 {
6464 "member-vnf-index": vnf_index,
6465 "type": "delete",
6466 "k8s-cluster-type": k8s_cluster_type,
6467 "resource-name": resource_name,
6468 "scale": kdu_replica_count,
6469 }
6470 )
6471
6472 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6473 vdu_delete = copy(scaling_info.get("vdu-delete"))
6474 if scaling_info["scaling_direction"] == "IN":
6475 for vdur in reversed(db_vnfr["vdur"]):
6476 if vdu_delete.get(vdur["vdu-id-ref"]):
6477 vdu_delete[vdur["vdu-id-ref"]] -= 1
6478 scaling_info["vdu"].append(
6479 {
6480 "name": vdur.get("name") or vdur.get("vdu-name"),
6481 "vdu_id": vdur["vdu-id-ref"],
6482 "interface": [],
6483 }
6484 )
6485 for interface in vdur["interfaces"]:
6486 scaling_info["vdu"][-1]["interface"].append(
6487 {
6488 "name": interface["name"],
6489 "ip_address": interface["ip-address"],
6490 "mac_address": interface.get("mac-address"),
6491 }
6492 )
6493 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6494
6495 # PRE-SCALE BEGIN
6496 step = "Executing pre-scale vnf-config-primitive"
6497 if scaling_descriptor.get("scaling-config-action"):
6498 for scaling_config_action in scaling_descriptor[
6499 "scaling-config-action"
6500 ]:
6501 if (
6502 scaling_config_action.get("trigger") == "pre-scale-in"
6503 and scaling_type == "SCALE_IN"
6504 ) or (
6505 scaling_config_action.get("trigger") == "pre-scale-out"
6506 and scaling_type == "SCALE_OUT"
6507 ):
6508 vnf_config_primitive = scaling_config_action[
6509 "vnf-config-primitive-name-ref"
6510 ]
6511 step = db_nslcmop_update[
6512 "detailed-status"
6513 ] = "executing pre-scale scaling-config-action '{}'".format(
6514 vnf_config_primitive
6515 )
6516
6517 # look for primitive
6518 for config_primitive in (
6519 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6520 ).get("config-primitive", ()):
6521 if config_primitive["name"] == vnf_config_primitive:
6522 break
6523 else:
6524 raise LcmException(
6525 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6526 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6527 "primitive".format(scaling_group, vnf_config_primitive)
6528 )
6529
6530 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6531 if db_vnfr.get("additionalParamsForVnf"):
6532 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6533
6534 scale_process = "VCA"
6535 db_nsr_update["config-status"] = "configuring pre-scaling"
6536 primitive_params = self._map_primitive_params(
6537 config_primitive, {}, vnfr_params
6538 )
6539
6540 # Pre-scale retry check: Check if this sub-operation has been executed before
6541 op_index = self._check_or_add_scale_suboperation(
6542 db_nslcmop,
6543 vnf_index,
6544 vnf_config_primitive,
6545 primitive_params,
6546 "PRE-SCALE",
6547 )
6548 if op_index == self.SUBOPERATION_STATUS_SKIP:
6549 # Skip sub-operation
6550 result = "COMPLETED"
6551 result_detail = "Done"
6552 self.logger.debug(
6553 logging_text
6554 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6555 vnf_config_primitive, result, result_detail
6556 )
6557 )
6558 else:
6559 if op_index == self.SUBOPERATION_STATUS_NEW:
6560 # New sub-operation: Get index of this sub-operation
6561 op_index = (
6562 len(db_nslcmop.get("_admin", {}).get("operations"))
6563 - 1
6564 )
6565 self.logger.debug(
6566 logging_text
6567 + "vnf_config_primitive={} New sub-operation".format(
6568 vnf_config_primitive
6569 )
6570 )
6571 else:
6572 # retry: Get registered params for this existing sub-operation
6573 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6574 op_index
6575 ]
6576 vnf_index = op.get("member_vnf_index")
6577 vnf_config_primitive = op.get("primitive")
6578 primitive_params = op.get("primitive_params")
6579 self.logger.debug(
6580 logging_text
6581 + "vnf_config_primitive={} Sub-operation retry".format(
6582 vnf_config_primitive
6583 )
6584 )
6585 # Execute the primitive, either with new (first-time) or registered (reintent) args
6586 ee_descriptor_id = config_primitive.get(
6587 "execution-environment-ref"
6588 )
6589 primitive_name = config_primitive.get(
6590 "execution-environment-primitive", vnf_config_primitive
6591 )
6592 ee_id, vca_type = self._look_for_deployed_vca(
6593 nsr_deployed["VCA"],
6594 member_vnf_index=vnf_index,
6595 vdu_id=None,
6596 vdu_count_index=None,
6597 ee_descriptor_id=ee_descriptor_id,
6598 )
6599 result, result_detail = await self._ns_execute_primitive(
6600 ee_id,
6601 primitive_name,
6602 primitive_params,
6603 vca_type=vca_type,
6604 vca_id=vca_id,
6605 )
6606 self.logger.debug(
6607 logging_text
6608 + "vnf_config_primitive={} Done with result {} {}".format(
6609 vnf_config_primitive, result, result_detail
6610 )
6611 )
6612 # Update operationState = COMPLETED | FAILED
6613 self._update_suboperation_status(
6614 db_nslcmop, op_index, result, result_detail
6615 )
6616
6617 if result == "FAILED":
6618 raise LcmException(result_detail)
6619 db_nsr_update["config-status"] = old_config_status
6620 scale_process = None
6621 # PRE-SCALE END
6622
6623 db_nsr_update[
6624 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6625 ] = nb_scale_op
6626 db_nsr_update[
6627 "_admin.scaling-group.{}.time".format(admin_scale_index)
6628 ] = time()
6629
6630 # SCALE-IN VCA - BEGIN
6631 if vca_scaling_info:
6632 step = db_nslcmop_update[
6633 "detailed-status"
6634 ] = "Deleting the execution environments"
6635 scale_process = "VCA"
6636 for vca_info in vca_scaling_info:
6637 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6638 member_vnf_index = str(vca_info["member-vnf-index"])
6639 self.logger.debug(
6640 logging_text + "vdu info: {}".format(vca_info)
6641 )
6642 if vca_info.get("osm_vdu_id"):
6643 vdu_id = vca_info["osm_vdu_id"]
6644 vdu_index = int(vca_info["vdu_index"])
6645 stage[
6646 1
6647 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6648 member_vnf_index, vdu_id, vdu_index
6649 )
6650 stage[2] = step = "Scaling in VCA"
6651 self._write_op_status(op_id=nslcmop_id, stage=stage)
6652 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6653 config_update = db_nsr["configurationStatus"]
6654 for vca_index, vca in enumerate(vca_update):
6655 if (
6656 (vca or vca.get("ee_id"))
6657 and vca["member-vnf-index"] == member_vnf_index
6658 and vca["vdu_count_index"] == vdu_index
6659 ):
6660 if vca.get("vdu_id"):
6661 config_descriptor = get_configuration(
6662 db_vnfd, vca.get("vdu_id")
6663 )
6664 elif vca.get("kdu_name"):
6665 config_descriptor = get_configuration(
6666 db_vnfd, vca.get("kdu_name")
6667 )
6668 else:
6669 config_descriptor = get_configuration(
6670 db_vnfd, db_vnfd["id"]
6671 )
6672 operation_params = (
6673 db_nslcmop.get("operationParams") or {}
6674 )
6675 exec_terminate_primitives = not operation_params.get(
6676 "skip_terminate_primitives"
6677 ) and vca.get("needed_terminate")
6678 task = asyncio.ensure_future(
6679 asyncio.wait_for(
6680 self.destroy_N2VC(
6681 logging_text,
6682 db_nslcmop,
6683 vca,
6684 config_descriptor,
6685 vca_index,
6686 destroy_ee=True,
6687 exec_primitives=exec_terminate_primitives,
6688 scaling_in=True,
6689 vca_id=vca_id,
6690 ),
6691 timeout=self.timeout.charm_delete,
6692 )
6693 )
6694 tasks_dict_info[task] = "Terminating VCA {}".format(
6695 vca.get("ee_id")
6696 )
6697 del vca_update[vca_index]
6698 del config_update[vca_index]
6699 # wait for pending tasks of terminate primitives
6700 if tasks_dict_info:
6701 self.logger.debug(
6702 logging_text
6703 + "Waiting for tasks {}".format(
6704 list(tasks_dict_info.keys())
6705 )
6706 )
6707 error_list = await self._wait_for_tasks(
6708 logging_text,
6709 tasks_dict_info,
6710 min(
6711 self.timeout.charm_delete, self.timeout.ns_terminate
6712 ),
6713 stage,
6714 nslcmop_id,
6715 )
6716 tasks_dict_info.clear()
6717 if error_list:
6718 raise LcmException("; ".join(error_list))
6719
6720 db_vca_and_config_update = {
6721 "_admin.deployed.VCA": vca_update,
6722 "configurationStatus": config_update,
6723 }
6724 self.update_db_2(
6725 "nsrs", db_nsr["_id"], db_vca_and_config_update
6726 )
6727 scale_process = None
6728 # SCALE-IN VCA - END
6729
6730 # SCALE RO - BEGIN
6731 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6732 scale_process = "RO"
6733 if self.ro_config.ng:
6734 await self._scale_ng_ro(
6735 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6736 )
6737 scaling_info.pop("vdu-create", None)
6738 scaling_info.pop("vdu-delete", None)
6739
6740 scale_process = None
6741 # SCALE RO - END
6742
6743 # SCALE KDU - BEGIN
6744 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6745 scale_process = "KDU"
6746 await self._scale_kdu(
6747 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6748 )
6749 scaling_info.pop("kdu-create", None)
6750 scaling_info.pop("kdu-delete", None)
6751
6752 scale_process = None
6753 # SCALE KDU - END
6754
6755 if db_nsr_update:
6756 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6757
6758 # SCALE-UP VCA - BEGIN
6759 if vca_scaling_info:
6760 step = db_nslcmop_update[
6761 "detailed-status"
6762 ] = "Creating new execution environments"
6763 scale_process = "VCA"
6764 for vca_info in vca_scaling_info:
6765 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6766 member_vnf_index = str(vca_info["member-vnf-index"])
6767 self.logger.debug(
6768 logging_text + "vdu info: {}".format(vca_info)
6769 )
6770 vnfd_id = db_vnfr["vnfd-ref"]
6771 if vca_info.get("osm_vdu_id"):
6772 vdu_index = int(vca_info["vdu_index"])
6773 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6774 if db_vnfr.get("additionalParamsForVnf"):
6775 deploy_params.update(
6776 parse_yaml_strings(
6777 db_vnfr["additionalParamsForVnf"].copy()
6778 )
6779 )
6780 descriptor_config = get_configuration(
6781 db_vnfd, db_vnfd["id"]
6782 )
6783 if descriptor_config:
6784 vdu_id = None
6785 vdu_name = None
6786 kdu_name = None
6787 kdu_index = None
6788 self._deploy_n2vc(
6789 logging_text=logging_text
6790 + "member_vnf_index={} ".format(member_vnf_index),
6791 db_nsr=db_nsr,
6792 db_vnfr=db_vnfr,
6793 nslcmop_id=nslcmop_id,
6794 nsr_id=nsr_id,
6795 nsi_id=nsi_id,
6796 vnfd_id=vnfd_id,
6797 vdu_id=vdu_id,
6798 kdu_name=kdu_name,
6799 kdu_index=kdu_index,
6800 member_vnf_index=member_vnf_index,
6801 vdu_index=vdu_index,
6802 vdu_name=vdu_name,
6803 deploy_params=deploy_params,
6804 descriptor_config=descriptor_config,
6805 base_folder=base_folder,
6806 task_instantiation_info=tasks_dict_info,
6807 stage=stage,
6808 )
6809 vdu_id = vca_info["osm_vdu_id"]
6810 vdur = find_in_list(
6811 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6812 )
6813 descriptor_config = get_configuration(db_vnfd, vdu_id)
6814 if vdur.get("additionalParams"):
6815 deploy_params_vdu = parse_yaml_strings(
6816 vdur["additionalParams"]
6817 )
6818 else:
6819 deploy_params_vdu = deploy_params
6820 deploy_params_vdu["OSM"] = get_osm_params(
6821 db_vnfr, vdu_id, vdu_count_index=vdu_index
6822 )
6823 if descriptor_config:
6824 vdu_name = None
6825 kdu_name = None
6826 kdu_index = None
6827 stage[
6828 1
6829 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6830 member_vnf_index, vdu_id, vdu_index
6831 )
6832 stage[2] = step = "Scaling out VCA"
6833 self._write_op_status(op_id=nslcmop_id, stage=stage)
6834 self._deploy_n2vc(
6835 logging_text=logging_text
6836 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6837 member_vnf_index, vdu_id, vdu_index
6838 ),
6839 db_nsr=db_nsr,
6840 db_vnfr=db_vnfr,
6841 nslcmop_id=nslcmop_id,
6842 nsr_id=nsr_id,
6843 nsi_id=nsi_id,
6844 vnfd_id=vnfd_id,
6845 vdu_id=vdu_id,
6846 kdu_name=kdu_name,
6847 member_vnf_index=member_vnf_index,
6848 vdu_index=vdu_index,
6849 kdu_index=kdu_index,
6850 vdu_name=vdu_name,
6851 deploy_params=deploy_params_vdu,
6852 descriptor_config=descriptor_config,
6853 base_folder=base_folder,
6854 task_instantiation_info=tasks_dict_info,
6855 stage=stage,
6856 )
6857 # SCALE-UP VCA - END
6858 scale_process = None
6859
6860 # POST-SCALE BEGIN
6861 # execute primitive service POST-SCALING
6862 step = "Executing post-scale vnf-config-primitive"
6863 if scaling_descriptor.get("scaling-config-action"):
6864 for scaling_config_action in scaling_descriptor[
6865 "scaling-config-action"
6866 ]:
6867 if (
6868 scaling_config_action.get("trigger") == "post-scale-in"
6869 and scaling_type == "SCALE_IN"
6870 ) or (
6871 scaling_config_action.get("trigger") == "post-scale-out"
6872 and scaling_type == "SCALE_OUT"
6873 ):
6874 vnf_config_primitive = scaling_config_action[
6875 "vnf-config-primitive-name-ref"
6876 ]
6877 step = db_nslcmop_update[
6878 "detailed-status"
6879 ] = "executing post-scale scaling-config-action '{}'".format(
6880 vnf_config_primitive
6881 )
6882
6883 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6884 if db_vnfr.get("additionalParamsForVnf"):
6885 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6886
6887 # look for primitive
6888 for config_primitive in (
6889 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6890 ).get("config-primitive", ()):
6891 if config_primitive["name"] == vnf_config_primitive:
6892 break
6893 else:
6894 raise LcmException(
6895 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6896 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6897 "config-primitive".format(
6898 scaling_group, vnf_config_primitive
6899 )
6900 )
6901 scale_process = "VCA"
6902 db_nsr_update["config-status"] = "configuring post-scaling"
6903 primitive_params = self._map_primitive_params(
6904 config_primitive, {}, vnfr_params
6905 )
6906
6907 # Post-scale retry check: Check if this sub-operation has been executed before
6908 op_index = self._check_or_add_scale_suboperation(
6909 db_nslcmop,
6910 vnf_index,
6911 vnf_config_primitive,
6912 primitive_params,
6913 "POST-SCALE",
6914 )
6915 if op_index == self.SUBOPERATION_STATUS_SKIP:
6916 # Skip sub-operation
6917 result = "COMPLETED"
6918 result_detail = "Done"
6919 self.logger.debug(
6920 logging_text
6921 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6922 vnf_config_primitive, result, result_detail
6923 )
6924 )
6925 else:
6926 if op_index == self.SUBOPERATION_STATUS_NEW:
6927 # New sub-operation: Get index of this sub-operation
6928 op_index = (
6929 len(db_nslcmop.get("_admin", {}).get("operations"))
6930 - 1
6931 )
6932 self.logger.debug(
6933 logging_text
6934 + "vnf_config_primitive={} New sub-operation".format(
6935 vnf_config_primitive
6936 )
6937 )
6938 else:
6939 # retry: Get registered params for this existing sub-operation
6940 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6941 op_index
6942 ]
6943 vnf_index = op.get("member_vnf_index")
6944 vnf_config_primitive = op.get("primitive")
6945 primitive_params = op.get("primitive_params")
6946 self.logger.debug(
6947 logging_text
6948 + "vnf_config_primitive={} Sub-operation retry".format(
6949 vnf_config_primitive
6950 )
6951 )
6952 # Execute the primitive, either with new (first-time) or registered (reintent) args
6953 ee_descriptor_id = config_primitive.get(
6954 "execution-environment-ref"
6955 )
6956 primitive_name = config_primitive.get(
6957 "execution-environment-primitive", vnf_config_primitive
6958 )
6959 ee_id, vca_type = self._look_for_deployed_vca(
6960 nsr_deployed["VCA"],
6961 member_vnf_index=vnf_index,
6962 vdu_id=None,
6963 vdu_count_index=None,
6964 ee_descriptor_id=ee_descriptor_id,
6965 )
6966 result, result_detail = await self._ns_execute_primitive(
6967 ee_id,
6968 primitive_name,
6969 primitive_params,
6970 vca_type=vca_type,
6971 vca_id=vca_id,
6972 )
6973 self.logger.debug(
6974 logging_text
6975 + "vnf_config_primitive={} Done with result {} {}".format(
6976 vnf_config_primitive, result, result_detail
6977 )
6978 )
6979 # Update operationState = COMPLETED | FAILED
6980 self._update_suboperation_status(
6981 db_nslcmop, op_index, result, result_detail
6982 )
6983
6984 if result == "FAILED":
6985 raise LcmException(result_detail)
6986 db_nsr_update["config-status"] = old_config_status
6987 scale_process = None
6988 # POST-SCALE END
6989
6990 db_nsr_update[
6991 "detailed-status"
6992 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6993 db_nsr_update["operational-status"] = (
6994 "running"
6995 if old_operational_status == "failed"
6996 else old_operational_status
6997 )
6998 db_nsr_update["config-status"] = old_config_status
6999 return
7000 except (
7001 ROclient.ROClientException,
7002 DbException,
7003 LcmException,
7004 NgRoException,
7005 ) as e:
7006 self.logger.error(logging_text + "Exit Exception {}".format(e))
7007 exc = e
7008 except asyncio.CancelledError:
7009 self.logger.error(
7010 logging_text + "Cancelled Exception while '{}'".format(step)
7011 )
7012 exc = "Operation was cancelled"
7013 except Exception as e:
7014 exc = traceback.format_exc()
7015 self.logger.critical(
7016 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7017 exc_info=True,
7018 )
7019 finally:
7020 self._write_ns_status(
7021 nsr_id=nsr_id,
7022 ns_state=None,
7023 current_operation="IDLE",
7024 current_operation_id=None,
7025 )
7026 if tasks_dict_info:
7027 stage[1] = "Waiting for instantiate pending tasks."
7028 self.logger.debug(logging_text + stage[1])
7029 exc = await self._wait_for_tasks(
7030 logging_text,
7031 tasks_dict_info,
7032 self.timeout.ns_deploy,
7033 stage,
7034 nslcmop_id,
7035 nsr_id=nsr_id,
7036 )
7037 if exc:
7038 db_nslcmop_update[
7039 "detailed-status"
7040 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7041 nslcmop_operation_state = "FAILED"
7042 if db_nsr:
7043 db_nsr_update["operational-status"] = old_operational_status
7044 db_nsr_update["config-status"] = old_config_status
7045 db_nsr_update["detailed-status"] = ""
7046 if scale_process:
7047 if "VCA" in scale_process:
7048 db_nsr_update["config-status"] = "failed"
7049 if "RO" in scale_process:
7050 db_nsr_update["operational-status"] = "failed"
7051 db_nsr_update[
7052 "detailed-status"
7053 ] = "FAILED scaling nslcmop={} {}: {}".format(
7054 nslcmop_id, step, exc
7055 )
7056 else:
7057 error_description_nslcmop = None
7058 nslcmop_operation_state = "COMPLETED"
7059 db_nslcmop_update["detailed-status"] = "Done"
7060
7061 self._write_op_status(
7062 op_id=nslcmop_id,
7063 stage="",
7064 error_message=error_description_nslcmop,
7065 operation_state=nslcmop_operation_state,
7066 other_update=db_nslcmop_update,
7067 )
7068 if db_nsr:
7069 self._write_ns_status(
7070 nsr_id=nsr_id,
7071 ns_state=None,
7072 current_operation="IDLE",
7073 current_operation_id=None,
7074 other_update=db_nsr_update,
7075 )
7076
7077 if nslcmop_operation_state:
7078 try:
7079 msg = {
7080 "nsr_id": nsr_id,
7081 "nslcmop_id": nslcmop_id,
7082 "operationState": nslcmop_operation_state,
7083 }
7084 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7085 except Exception as e:
7086 self.logger.error(
7087 logging_text + "kafka_write notification Exception {}".format(e)
7088 )
7089 self.logger.debug(logging_text + "Exit")
7090 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7091
7092 async def _scale_kdu(
7093 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7094 ):
7095 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7096 for kdu_name in _scaling_info:
7097 for kdu_scaling_info in _scaling_info[kdu_name]:
7098 deployed_kdu, index = get_deployed_kdu(
7099 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7100 )
7101 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7102 kdu_instance = deployed_kdu["kdu-instance"]
7103 kdu_model = deployed_kdu.get("kdu-model")
7104 scale = int(kdu_scaling_info["scale"])
7105 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7106
7107 db_dict = {
7108 "collection": "nsrs",
7109 "filter": {"_id": nsr_id},
7110 "path": "_admin.deployed.K8s.{}".format(index),
7111 }
7112
7113 step = "scaling application {}".format(
7114 kdu_scaling_info["resource-name"]
7115 )
7116 self.logger.debug(logging_text + step)
7117
7118 if kdu_scaling_info["type"] == "delete":
7119 kdu_config = get_configuration(db_vnfd, kdu_name)
7120 if (
7121 kdu_config
7122 and kdu_config.get("terminate-config-primitive")
7123 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7124 ):
7125 terminate_config_primitive_list = kdu_config.get(
7126 "terminate-config-primitive"
7127 )
7128 terminate_config_primitive_list.sort(
7129 key=lambda val: int(val["seq"])
7130 )
7131
7132 for (
7133 terminate_config_primitive
7134 ) in terminate_config_primitive_list:
7135 primitive_params_ = self._map_primitive_params(
7136 terminate_config_primitive, {}, {}
7137 )
7138 step = "execute terminate config primitive"
7139 self.logger.debug(logging_text + step)
7140 await asyncio.wait_for(
7141 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7142 cluster_uuid=cluster_uuid,
7143 kdu_instance=kdu_instance,
7144 primitive_name=terminate_config_primitive["name"],
7145 params=primitive_params_,
7146 db_dict=db_dict,
7147 total_timeout=self.timeout.primitive,
7148 vca_id=vca_id,
7149 ),
7150 timeout=self.timeout.primitive
7151 * self.timeout.primitive_outer_factor,
7152 )
7153
7154 await asyncio.wait_for(
7155 self.k8scluster_map[k8s_cluster_type].scale(
7156 kdu_instance=kdu_instance,
7157 scale=scale,
7158 resource_name=kdu_scaling_info["resource-name"],
7159 total_timeout=self.timeout.scale_on_error,
7160 vca_id=vca_id,
7161 cluster_uuid=cluster_uuid,
7162 kdu_model=kdu_model,
7163 atomic=True,
7164 db_dict=db_dict,
7165 ),
7166 timeout=self.timeout.scale_on_error
7167 * self.timeout.scale_on_error_outer_factor,
7168 )
7169
7170 if kdu_scaling_info["type"] == "create":
7171 kdu_config = get_configuration(db_vnfd, kdu_name)
7172 if (
7173 kdu_config
7174 and kdu_config.get("initial-config-primitive")
7175 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7176 ):
7177 initial_config_primitive_list = kdu_config.get(
7178 "initial-config-primitive"
7179 )
7180 initial_config_primitive_list.sort(
7181 key=lambda val: int(val["seq"])
7182 )
7183
7184 for initial_config_primitive in initial_config_primitive_list:
7185 primitive_params_ = self._map_primitive_params(
7186 initial_config_primitive, {}, {}
7187 )
7188 step = "execute initial config primitive"
7189 self.logger.debug(logging_text + step)
7190 await asyncio.wait_for(
7191 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7192 cluster_uuid=cluster_uuid,
7193 kdu_instance=kdu_instance,
7194 primitive_name=initial_config_primitive["name"],
7195 params=primitive_params_,
7196 db_dict=db_dict,
7197 vca_id=vca_id,
7198 ),
7199 timeout=600,
7200 )
7201
7202 async def _scale_ng_ro(
7203 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7204 ):
7205 nsr_id = db_nslcmop["nsInstanceId"]
7206 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7207 db_vnfrs = {}
7208
7209 # read from db: vnfd's for every vnf
7210 db_vnfds = []
7211
7212 # for each vnf in ns, read vnfd
7213 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7214 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7215 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7216 # if we haven't this vnfd, read it from db
7217 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7218 # read from db
7219 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7220 db_vnfds.append(vnfd)
7221 n2vc_key = self.n2vc.get_public_key()
7222 n2vc_key_list = [n2vc_key]
7223 self.scale_vnfr(
7224 db_vnfr,
7225 vdu_scaling_info.get("vdu-create"),
7226 vdu_scaling_info.get("vdu-delete"),
7227 mark_delete=True,
7228 )
7229 # db_vnfr has been updated, update db_vnfrs to use it
7230 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7231 await self._instantiate_ng_ro(
7232 logging_text,
7233 nsr_id,
7234 db_nsd,
7235 db_nsr,
7236 db_nslcmop,
7237 db_vnfrs,
7238 db_vnfds,
7239 n2vc_key_list,
7240 stage=stage,
7241 start_deploy=time(),
7242 timeout_ns_deploy=self.timeout.ns_deploy,
7243 )
7244 if vdu_scaling_info.get("vdu-delete"):
7245 self.scale_vnfr(
7246 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7247 )
7248
7249 async def extract_prometheus_scrape_jobs(
7250 self,
7251 ee_id: str,
7252 artifact_path: str,
7253 ee_config_descriptor: dict,
7254 vnfr_id: str,
7255 nsr_id: str,
7256 target_ip: str,
7257 element_type: str,
7258 vnf_member_index: str = "",
7259 vdu_id: str = "",
7260 vdu_index: int = None,
7261 kdu_name: str = "",
7262 kdu_index: int = None,
7263 ) -> dict:
7264 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7265 This method will wait until the corresponding VDU or KDU is fully instantiated
7266
7267 Args:
7268 ee_id (str): Execution Environment ID
7269 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7270 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7271 vnfr_id (str): VNFR ID where this EE applies
7272 nsr_id (str): NSR ID where this EE applies
7273 target_ip (str): VDU/KDU instance IP address
7274 element_type (str): NS or VNF or VDU or KDU
7275 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7276 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7277 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7278 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7279 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7280
7281 Raises:
7282 LcmException: When the VDU or KDU instance was not found in an hour
7283
7284 Returns:
7285 _type_: Prometheus jobs
7286 """
7287 # default the vdur and kdur names to an empty string, to avoid any later
7288 # problem with Prometheus when the element type is not VDU or KDU
7289 vdur_name = ""
7290 kdur_name = ""
7291
7292 # look if exist a file called 'prometheus*.j2' and
7293 artifact_content = self.fs.dir_ls(artifact_path)
7294 job_file = next(
7295 (
7296 f
7297 for f in artifact_content
7298 if f.startswith("prometheus") and f.endswith(".j2")
7299 ),
7300 None,
7301 )
7302 if not job_file:
7303 return
7304 with self.fs.file_open((artifact_path, job_file), "r") as f:
7305 job_data = f.read()
7306
7307 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7308 if element_type in ("VDU", "KDU"):
7309 for _ in range(360):
7310 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7311 if vdu_id and vdu_index is not None:
7312 vdur = next(
7313 (
7314 x
7315 for x in get_iterable(db_vnfr, "vdur")
7316 if (
7317 x.get("vdu-id-ref") == vdu_id
7318 and x.get("count-index") == vdu_index
7319 )
7320 ),
7321 {},
7322 )
7323 if vdur.get("name"):
7324 vdur_name = vdur.get("name")
7325 break
7326 if kdu_name and kdu_index is not None:
7327 kdur = next(
7328 (
7329 x
7330 for x in get_iterable(db_vnfr, "kdur")
7331 if (
7332 x.get("kdu-name") == kdu_name
7333 and x.get("count-index") == kdu_index
7334 )
7335 ),
7336 {},
7337 )
7338 if kdur.get("name"):
7339 kdur_name = kdur.get("name")
7340 break
7341
7342 await asyncio.sleep(10, loop=self.loop)
7343 else:
7344 if vdu_id and vdu_index is not None:
7345 raise LcmException(
7346 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7347 )
7348 if kdu_name and kdu_index is not None:
7349 raise LcmException(
7350 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7351 )
7352
7353 # TODO get_service
7354 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7355 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7356 host_port = "80"
7357 vnfr_id = vnfr_id.replace("-", "")
7358 variables = {
7359 "JOB_NAME": vnfr_id,
7360 "TARGET_IP": target_ip,
7361 "EXPORTER_POD_IP": host_name,
7362 "EXPORTER_POD_PORT": host_port,
7363 "NSR_ID": nsr_id,
7364 "VNF_MEMBER_INDEX": vnf_member_index,
7365 "VDUR_NAME": vdur_name,
7366 "KDUR_NAME": kdur_name,
7367 "ELEMENT_TYPE": element_type,
7368 }
7369 job_list = parse_job(job_data, variables)
7370 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7371 for job in job_list:
7372 if (
7373 not isinstance(job.get("job_name"), str)
7374 or vnfr_id not in job["job_name"]
7375 ):
7376 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7377 job["nsr_id"] = nsr_id
7378 job["vnfr_id"] = vnfr_id
7379 return job_list
7380
7381 async def rebuild_start_stop(
7382 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7383 ):
7384 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7385 self.logger.info(logging_text + "Enter")
7386 stage = ["Preparing the environment", ""]
7387 # database nsrs record
7388 db_nsr_update = {}
7389 vdu_vim_name = None
7390 vim_vm_id = None
7391 # in case of error, indicates what part of scale was failed to put nsr at error status
7392 start_deploy = time()
7393 try:
7394 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7395 vim_account_id = db_vnfr.get("vim-account-id")
7396 vim_info_key = "vim:" + vim_account_id
7397 vdu_id = additional_param["vdu_id"]
7398 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7399 vdur = find_in_list(
7400 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7401 )
7402 if vdur:
7403 vdu_vim_name = vdur["name"]
7404 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7405 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7406 else:
7407 raise LcmException("Target vdu is not found")
7408 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7409 # wait for any previous tasks in process
7410 stage[1] = "Waiting for previous operations to terminate"
7411 self.logger.info(stage[1])
7412 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7413
7414 stage[1] = "Reading from database."
7415 self.logger.info(stage[1])
7416 self._write_ns_status(
7417 nsr_id=nsr_id,
7418 ns_state=None,
7419 current_operation=operation_type.upper(),
7420 current_operation_id=nslcmop_id,
7421 )
7422 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7423
7424 # read from db: ns
7425 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7426 db_nsr_update["operational-status"] = operation_type
7427 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7428 # Payload for RO
7429 desc = {
7430 operation_type: {
7431 "vim_vm_id": vim_vm_id,
7432 "vnf_id": vnf_id,
7433 "vdu_index": additional_param["count-index"],
7434 "vdu_id": vdur["id"],
7435 "target_vim": target_vim,
7436 "vim_account_id": vim_account_id,
7437 }
7438 }
7439 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7440 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7441 self.logger.info("ro nsr id: {}".format(nsr_id))
7442 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7443 self.logger.info("response from RO: {}".format(result_dict))
7444 action_id = result_dict["action_id"]
7445 await self._wait_ng_ro(
7446 nsr_id,
7447 action_id,
7448 nslcmop_id,
7449 start_deploy,
7450 self.timeout.operate,
7451 None,
7452 "start_stop_rebuild",
7453 )
7454 return "COMPLETED", "Done"
7455 except (ROclient.ROClientException, DbException, LcmException) as e:
7456 self.logger.error("Exit Exception {}".format(e))
7457 exc = e
7458 except asyncio.CancelledError:
7459 self.logger.error("Cancelled Exception while '{}'".format(stage))
7460 exc = "Operation was cancelled"
7461 except Exception as e:
7462 exc = traceback.format_exc()
7463 self.logger.critical(
7464 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7465 )
7466 return "FAILED", "Error in operate VNF {}".format(exc)
7467
7468 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7469 """
7470 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7471
7472 :param: vim_account_id: VIM Account ID
7473
7474 :return: (cloud_name, cloud_credential)
7475 """
7476 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7477 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7478
7479 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7480 """
7481 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7482
7483 :param: vim_account_id: VIM Account ID
7484
7485 :return: (cloud_name, cloud_credential)
7486 """
7487 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7488 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7489
7490 async def migrate(self, nsr_id, nslcmop_id):
7491 """
7492 Migrate VNFs and VDUs instances in a NS
7493
7494 :param: nsr_id: NS Instance ID
7495 :param: nslcmop_id: nslcmop ID of migrate
7496
7497 """
7498 # Try to lock HA task here
7499 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7500 if not task_is_locked_by_me:
7501 return
7502 logging_text = "Task ns={} migrate ".format(nsr_id)
7503 self.logger.debug(logging_text + "Enter")
7504 # get all needed from database
7505 db_nslcmop = None
7506 db_nslcmop_update = {}
7507 nslcmop_operation_state = None
7508 db_nsr_update = {}
7509 target = {}
7510 exc = None
7511 # in case of error, indicates what part of scale was failed to put nsr at error status
7512 start_deploy = time()
7513
7514 try:
7515 # wait for any previous tasks in process
7516 step = "Waiting for previous operations to terminate"
7517 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7518
7519 self._write_ns_status(
7520 nsr_id=nsr_id,
7521 ns_state=None,
7522 current_operation="MIGRATING",
7523 current_operation_id=nslcmop_id,
7524 )
7525 step = "Getting nslcmop from database"
7526 self.logger.debug(
7527 step + " after having waited for previous tasks to be completed"
7528 )
7529 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7530 migrate_params = db_nslcmop.get("operationParams")
7531
7532 target = {}
7533 target.update(migrate_params)
7534 desc = await self.RO.migrate(nsr_id, target)
7535 self.logger.debug("RO return > {}".format(desc))
7536 action_id = desc["action_id"]
7537 await self._wait_ng_ro(
7538 nsr_id,
7539 action_id,
7540 nslcmop_id,
7541 start_deploy,
7542 self.timeout.migrate,
7543 operation="migrate",
7544 )
7545 except (ROclient.ROClientException, DbException, LcmException) as e:
7546 self.logger.error("Exit Exception {}".format(e))
7547 exc = e
7548 except asyncio.CancelledError:
7549 self.logger.error("Cancelled Exception while '{}'".format(step))
7550 exc = "Operation was cancelled"
7551 except Exception as e:
7552 exc = traceback.format_exc()
7553 self.logger.critical(
7554 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7555 )
7556 finally:
7557 self._write_ns_status(
7558 nsr_id=nsr_id,
7559 ns_state=None,
7560 current_operation="IDLE",
7561 current_operation_id=None,
7562 )
7563 if exc:
7564 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7565 nslcmop_operation_state = "FAILED"
7566 else:
7567 nslcmop_operation_state = "COMPLETED"
7568 db_nslcmop_update["detailed-status"] = "Done"
7569 db_nsr_update["detailed-status"] = "Done"
7570
7571 self._write_op_status(
7572 op_id=nslcmop_id,
7573 stage="",
7574 error_message="",
7575 operation_state=nslcmop_operation_state,
7576 other_update=db_nslcmop_update,
7577 )
7578 if nslcmop_operation_state:
7579 try:
7580 msg = {
7581 "nsr_id": nsr_id,
7582 "nslcmop_id": nslcmop_id,
7583 "operationState": nslcmop_operation_state,
7584 }
7585 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7586 except Exception as e:
7587 self.logger.error(
7588 logging_text + "kafka_write notification Exception {}".format(e)
7589 )
7590 self.logger.debug(logging_text + "Exit")
7591 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7592
7593 async def heal(self, nsr_id, nslcmop_id):
7594 """
7595 Heal NS
7596
7597 :param nsr_id: ns instance to heal
7598 :param nslcmop_id: operation to run
7599 :return:
7600 """
7601
7602 # Try to lock HA task here
7603 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7604 if not task_is_locked_by_me:
7605 return
7606
7607 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7608 stage = ["", "", ""]
7609 tasks_dict_info = {}
7610 # ^ stage, step, VIM progress
7611 self.logger.debug(logging_text + "Enter")
7612 # get all needed from database
7613 db_nsr = None
7614 db_nslcmop_update = {}
7615 db_nsr_update = {}
7616 db_vnfrs = {} # vnf's info indexed by _id
7617 exc = None
7618 old_operational_status = ""
7619 old_config_status = ""
7620 nsi_id = None
7621 try:
7622 # wait for any previous tasks in process
7623 step = "Waiting for previous operations to terminate"
7624 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7625 self._write_ns_status(
7626 nsr_id=nsr_id,
7627 ns_state=None,
7628 current_operation="HEALING",
7629 current_operation_id=nslcmop_id,
7630 )
7631
7632 step = "Getting nslcmop from database"
7633 self.logger.debug(
7634 step + " after having waited for previous tasks to be completed"
7635 )
7636 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7637
7638 step = "Getting nsr from database"
7639 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7640 old_operational_status = db_nsr["operational-status"]
7641 old_config_status = db_nsr["config-status"]
7642
7643 db_nsr_update = {
7644 "_admin.deployed.RO.operational-status": "healing",
7645 }
7646 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7647
7648 step = "Sending heal order to VIM"
7649 await self.heal_RO(
7650 logging_text=logging_text,
7651 nsr_id=nsr_id,
7652 db_nslcmop=db_nslcmop,
7653 stage=stage,
7654 )
7655 # VCA tasks
7656 # read from db: nsd
7657 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7658 self.logger.debug(logging_text + stage[1])
7659 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7660 self.fs.sync(db_nsr["nsd-id"])
7661 db_nsr["nsd"] = nsd
7662 # read from db: vnfr's of this ns
7663 step = "Getting vnfrs from db"
7664 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7665 for vnfr in db_vnfrs_list:
7666 db_vnfrs[vnfr["_id"]] = vnfr
7667 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7668
7669 # Check for each target VNF
7670 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7671 for target_vnf in target_list:
7672 # Find this VNF in the list from DB
7673 vnfr_id = target_vnf.get("vnfInstanceId", None)
7674 if vnfr_id:
7675 db_vnfr = db_vnfrs[vnfr_id]
7676 vnfd_id = db_vnfr.get("vnfd-id")
7677 vnfd_ref = db_vnfr.get("vnfd-ref")
7678 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7679 base_folder = vnfd["_admin"]["storage"]
7680 vdu_id = None
7681 vdu_index = 0
7682 vdu_name = None
7683 kdu_name = None
7684 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7685 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7686
7687 # Check each target VDU and deploy N2VC
7688 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7689 "vdu", []
7690 )
7691 if not target_vdu_list:
7692 # Codigo nuevo para crear diccionario
7693 target_vdu_list = []
7694 for existing_vdu in db_vnfr.get("vdur"):
7695 vdu_name = existing_vdu.get("vdu-name", None)
7696 vdu_index = existing_vdu.get("count-index", 0)
7697 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7698 "run-day1", False
7699 )
7700 vdu_to_be_healed = {
7701 "vdu-id": vdu_name,
7702 "count-index": vdu_index,
7703 "run-day1": vdu_run_day1,
7704 }
7705 target_vdu_list.append(vdu_to_be_healed)
7706 for target_vdu in target_vdu_list:
7707 deploy_params_vdu = target_vdu
7708 # Set run-day1 vnf level value if not vdu level value exists
7709 if not deploy_params_vdu.get("run-day1") and target_vnf[
7710 "additionalParams"
7711 ].get("run-day1"):
7712 deploy_params_vdu["run-day1"] = target_vnf[
7713 "additionalParams"
7714 ].get("run-day1")
7715 vdu_name = target_vdu.get("vdu-id", None)
7716 # TODO: Get vdu_id from vdud.
7717 vdu_id = vdu_name
7718 # For multi instance VDU count-index is mandatory
7719 # For single session VDU count-indes is 0
7720 vdu_index = target_vdu.get("count-index", 0)
7721
7722 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7723 stage[1] = "Deploying Execution Environments."
7724 self.logger.debug(logging_text + stage[1])
7725
7726 # VNF Level charm. Normal case when proxy charms.
7727 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7728 descriptor_config = get_configuration(vnfd, vnfd_ref)
7729 if descriptor_config:
7730 # Continue if healed machine is management machine
7731 vnf_ip_address = db_vnfr.get("ip-address")
7732 target_instance = None
7733 for instance in db_vnfr.get("vdur", None):
7734 if (
7735 instance["vdu-name"] == vdu_name
7736 and instance["count-index"] == vdu_index
7737 ):
7738 target_instance = instance
7739 break
7740 if vnf_ip_address == target_instance.get("ip-address"):
7741 self._heal_n2vc(
7742 logging_text=logging_text
7743 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7744 member_vnf_index, vdu_name, vdu_index
7745 ),
7746 db_nsr=db_nsr,
7747 db_vnfr=db_vnfr,
7748 nslcmop_id=nslcmop_id,
7749 nsr_id=nsr_id,
7750 nsi_id=nsi_id,
7751 vnfd_id=vnfd_ref,
7752 vdu_id=None,
7753 kdu_name=None,
7754 member_vnf_index=member_vnf_index,
7755 vdu_index=0,
7756 vdu_name=None,
7757 deploy_params=deploy_params_vdu,
7758 descriptor_config=descriptor_config,
7759 base_folder=base_folder,
7760 task_instantiation_info=tasks_dict_info,
7761 stage=stage,
7762 )
7763
7764 # VDU Level charm. Normal case with native charms.
7765 descriptor_config = get_configuration(vnfd, vdu_name)
7766 if descriptor_config:
7767 self._heal_n2vc(
7768 logging_text=logging_text
7769 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7770 member_vnf_index, vdu_name, vdu_index
7771 ),
7772 db_nsr=db_nsr,
7773 db_vnfr=db_vnfr,
7774 nslcmop_id=nslcmop_id,
7775 nsr_id=nsr_id,
7776 nsi_id=nsi_id,
7777 vnfd_id=vnfd_ref,
7778 vdu_id=vdu_id,
7779 kdu_name=kdu_name,
7780 member_vnf_index=member_vnf_index,
7781 vdu_index=vdu_index,
7782 vdu_name=vdu_name,
7783 deploy_params=deploy_params_vdu,
7784 descriptor_config=descriptor_config,
7785 base_folder=base_folder,
7786 task_instantiation_info=tasks_dict_info,
7787 stage=stage,
7788 )
7789
7790 except (
7791 ROclient.ROClientException,
7792 DbException,
7793 LcmException,
7794 NgRoException,
7795 ) as e:
7796 self.logger.error(logging_text + "Exit Exception {}".format(e))
7797 exc = e
7798 except asyncio.CancelledError:
7799 self.logger.error(
7800 logging_text + "Cancelled Exception while '{}'".format(step)
7801 )
7802 exc = "Operation was cancelled"
7803 except Exception as e:
7804 exc = traceback.format_exc()
7805 self.logger.critical(
7806 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7807 exc_info=True,
7808 )
7809 finally:
7810 if tasks_dict_info:
7811 stage[1] = "Waiting for healing pending tasks."
7812 self.logger.debug(logging_text + stage[1])
7813 exc = await self._wait_for_tasks(
7814 logging_text,
7815 tasks_dict_info,
7816 self.timeout.ns_deploy,
7817 stage,
7818 nslcmop_id,
7819 nsr_id=nsr_id,
7820 )
7821 if exc:
7822 db_nslcmop_update[
7823 "detailed-status"
7824 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7825 nslcmop_operation_state = "FAILED"
7826 if db_nsr:
7827 db_nsr_update["operational-status"] = old_operational_status
7828 db_nsr_update["config-status"] = old_config_status
7829 db_nsr_update[
7830 "detailed-status"
7831 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7832 for task, task_name in tasks_dict_info.items():
7833 if not task.done() or task.cancelled() or task.exception():
7834 if task_name.startswith(self.task_name_deploy_vca):
7835 # A N2VC task is pending
7836 db_nsr_update["config-status"] = "failed"
7837 else:
7838 # RO task is pending
7839 db_nsr_update["operational-status"] = "failed"
7840 else:
7841 error_description_nslcmop = None
7842 nslcmop_operation_state = "COMPLETED"
7843 db_nslcmop_update["detailed-status"] = "Done"
7844 db_nsr_update["detailed-status"] = "Done"
7845 db_nsr_update["operational-status"] = "running"
7846 db_nsr_update["config-status"] = "configured"
7847
7848 self._write_op_status(
7849 op_id=nslcmop_id,
7850 stage="",
7851 error_message=error_description_nslcmop,
7852 operation_state=nslcmop_operation_state,
7853 other_update=db_nslcmop_update,
7854 )
7855 if db_nsr:
7856 self._write_ns_status(
7857 nsr_id=nsr_id,
7858 ns_state=None,
7859 current_operation="IDLE",
7860 current_operation_id=None,
7861 other_update=db_nsr_update,
7862 )
7863
7864 if nslcmop_operation_state:
7865 try:
7866 msg = {
7867 "nsr_id": nsr_id,
7868 "nslcmop_id": nslcmop_id,
7869 "operationState": nslcmop_operation_state,
7870 }
7871 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7872 except Exception as e:
7873 self.logger.error(
7874 logging_text + "kafka_write notification Exception {}".format(e)
7875 )
7876 self.logger.debug(logging_text + "Exit")
7877 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7878
7879 async def heal_RO(
7880 self,
7881 logging_text,
7882 nsr_id,
7883 db_nslcmop,
7884 stage,
7885 ):
7886 """
7887 Heal at RO
7888 :param logging_text: preffix text to use at logging
7889 :param nsr_id: nsr identity
7890 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7891 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7892 :return: None or exception
7893 """
7894
7895 def get_vim_account(vim_account_id):
7896 nonlocal db_vims
7897 if vim_account_id in db_vims:
7898 return db_vims[vim_account_id]
7899 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7900 db_vims[vim_account_id] = db_vim
7901 return db_vim
7902
7903 try:
7904 start_heal = time()
7905 ns_params = db_nslcmop.get("operationParams")
7906 if ns_params and ns_params.get("timeout_ns_heal"):
7907 timeout_ns_heal = ns_params["timeout_ns_heal"]
7908 else:
7909 timeout_ns_heal = self.timeout.ns_heal
7910
7911 db_vims = {}
7912
7913 nslcmop_id = db_nslcmop["_id"]
7914 target = {
7915 "action_id": nslcmop_id,
7916 }
7917 self.logger.warning(
7918 "db_nslcmop={} and timeout_ns_heal={}".format(
7919 db_nslcmop, timeout_ns_heal
7920 )
7921 )
7922 target.update(db_nslcmop.get("operationParams", {}))
7923
7924 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7925 desc = await self.RO.recreate(nsr_id, target)
7926 self.logger.debug("RO return > {}".format(desc))
7927 action_id = desc["action_id"]
7928 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7929 await self._wait_ng_ro(
7930 nsr_id,
7931 action_id,
7932 nslcmop_id,
7933 start_heal,
7934 timeout_ns_heal,
7935 stage,
7936 operation="healing",
7937 )
7938
7939 # Updating NSR
7940 db_nsr_update = {
7941 "_admin.deployed.RO.operational-status": "running",
7942 "detailed-status": " ".join(stage),
7943 }
7944 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7945 self._write_op_status(nslcmop_id, stage)
7946 self.logger.debug(
7947 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7948 )
7949
7950 except Exception as e:
7951 stage[2] = "ERROR healing at VIM"
7952 # self.set_vnfr_at_error(db_vnfrs, str(e))
7953 self.logger.error(
7954 "Error healing at VIM {}".format(e),
7955 exc_info=not isinstance(
7956 e,
7957 (
7958 ROclient.ROClientException,
7959 LcmException,
7960 DbException,
7961 NgRoException,
7962 ),
7963 ),
7964 )
7965 raise
7966
7967 def _heal_n2vc(
7968 self,
7969 logging_text,
7970 db_nsr,
7971 db_vnfr,
7972 nslcmop_id,
7973 nsr_id,
7974 nsi_id,
7975 vnfd_id,
7976 vdu_id,
7977 kdu_name,
7978 member_vnf_index,
7979 vdu_index,
7980 vdu_name,
7981 deploy_params,
7982 descriptor_config,
7983 base_folder,
7984 task_instantiation_info,
7985 stage,
7986 ):
7987 # launch instantiate_N2VC in a asyncio task and register task object
7988 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7989 # if not found, create one entry and update database
7990 # fill db_nsr._admin.deployed.VCA.<index>
7991
7992 self.logger.debug(
7993 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7994 )
7995
7996 charm_name = ""
7997 get_charm_name = False
7998 if "execution-environment-list" in descriptor_config:
7999 ee_list = descriptor_config.get("execution-environment-list", [])
8000 elif "juju" in descriptor_config:
8001 ee_list = [descriptor_config] # ns charms
8002 if "execution-environment-list" not in descriptor_config:
8003 # charm name is only required for ns charms
8004 get_charm_name = True
8005 else: # other types as script are not supported
8006 ee_list = []
8007
8008 for ee_item in ee_list:
8009 self.logger.debug(
8010 logging_text
8011 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8012 ee_item.get("juju"), ee_item.get("helm-chart")
8013 )
8014 )
8015 ee_descriptor_id = ee_item.get("id")
8016 if ee_item.get("juju"):
8017 vca_name = ee_item["juju"].get("charm")
8018 if get_charm_name:
8019 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8020 vca_type = (
8021 "lxc_proxy_charm"
8022 if ee_item["juju"].get("charm") is not None
8023 else "native_charm"
8024 )
8025 if ee_item["juju"].get("cloud") == "k8s":
8026 vca_type = "k8s_proxy_charm"
8027 elif ee_item["juju"].get("proxy") is False:
8028 vca_type = "native_charm"
8029 elif ee_item.get("helm-chart"):
8030 vca_name = ee_item["helm-chart"]
8031 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8032 vca_type = "helm"
8033 else:
8034 vca_type = "helm-v3"
8035 else:
8036 self.logger.debug(
8037 logging_text + "skipping non juju neither charm configuration"
8038 )
8039 continue
8040
8041 vca_index = -1
8042 for vca_index, vca_deployed in enumerate(
8043 db_nsr["_admin"]["deployed"]["VCA"]
8044 ):
8045 if not vca_deployed:
8046 continue
8047 if (
8048 vca_deployed.get("member-vnf-index") == member_vnf_index
8049 and vca_deployed.get("vdu_id") == vdu_id
8050 and vca_deployed.get("kdu_name") == kdu_name
8051 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8052 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8053 ):
8054 break
8055 else:
8056 # not found, create one.
8057 target = (
8058 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8059 )
8060 if vdu_id:
8061 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8062 elif kdu_name:
8063 target += "/kdu/{}".format(kdu_name)
8064 vca_deployed = {
8065 "target_element": target,
8066 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8067 "member-vnf-index": member_vnf_index,
8068 "vdu_id": vdu_id,
8069 "kdu_name": kdu_name,
8070 "vdu_count_index": vdu_index,
8071 "operational-status": "init", # TODO revise
8072 "detailed-status": "", # TODO revise
8073 "step": "initial-deploy", # TODO revise
8074 "vnfd_id": vnfd_id,
8075 "vdu_name": vdu_name,
8076 "type": vca_type,
8077 "ee_descriptor_id": ee_descriptor_id,
8078 "charm_name": charm_name,
8079 }
8080 vca_index += 1
8081
8082 # create VCA and configurationStatus in db
8083 db_dict = {
8084 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8085 "configurationStatus.{}".format(vca_index): dict(),
8086 }
8087 self.update_db_2("nsrs", nsr_id, db_dict)
8088
8089 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8090
8091 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8092 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8093 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8094
8095 # Launch task
8096 task_n2vc = asyncio.ensure_future(
8097 self.heal_N2VC(
8098 logging_text=logging_text,
8099 vca_index=vca_index,
8100 nsi_id=nsi_id,
8101 db_nsr=db_nsr,
8102 db_vnfr=db_vnfr,
8103 vdu_id=vdu_id,
8104 kdu_name=kdu_name,
8105 vdu_index=vdu_index,
8106 deploy_params=deploy_params,
8107 config_descriptor=descriptor_config,
8108 base_folder=base_folder,
8109 nslcmop_id=nslcmop_id,
8110 stage=stage,
8111 vca_type=vca_type,
8112 vca_name=vca_name,
8113 ee_config_descriptor=ee_item,
8114 )
8115 )
8116 self.lcm_tasks.register(
8117 "ns",
8118 nsr_id,
8119 nslcmop_id,
8120 "instantiate_N2VC-{}".format(vca_index),
8121 task_n2vc,
8122 )
8123 task_instantiation_info[
8124 task_n2vc
8125 ] = self.task_name_deploy_vca + " {}.{}".format(
8126 member_vnf_index or "", vdu_id or ""
8127 )
8128
8129 async def heal_N2VC(
8130 self,
8131 logging_text,
8132 vca_index,
8133 nsi_id,
8134 db_nsr,
8135 db_vnfr,
8136 vdu_id,
8137 kdu_name,
8138 vdu_index,
8139 config_descriptor,
8140 deploy_params,
8141 base_folder,
8142 nslcmop_id,
8143 stage,
8144 vca_type,
8145 vca_name,
8146 ee_config_descriptor,
8147 ):
8148 nsr_id = db_nsr["_id"]
8149 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8150 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8151 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8152 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8153 db_dict = {
8154 "collection": "nsrs",
8155 "filter": {"_id": nsr_id},
8156 "path": db_update_entry,
8157 }
8158 step = ""
8159 try:
8160 element_type = "NS"
8161 element_under_configuration = nsr_id
8162
8163 vnfr_id = None
8164 if db_vnfr:
8165 vnfr_id = db_vnfr["_id"]
8166 osm_config["osm"]["vnf_id"] = vnfr_id
8167
8168 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8169
8170 if vca_type == "native_charm":
8171 index_number = 0
8172 else:
8173 index_number = vdu_index or 0
8174
8175 if vnfr_id:
8176 element_type = "VNF"
8177 element_under_configuration = vnfr_id
8178 namespace += ".{}-{}".format(vnfr_id, index_number)
8179 if vdu_id:
8180 namespace += ".{}-{}".format(vdu_id, index_number)
8181 element_type = "VDU"
8182 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8183 osm_config["osm"]["vdu_id"] = vdu_id
8184 elif kdu_name:
8185 namespace += ".{}".format(kdu_name)
8186 element_type = "KDU"
8187 element_under_configuration = kdu_name
8188 osm_config["osm"]["kdu_name"] = kdu_name
8189
8190 # Get artifact path
8191 if base_folder["pkg-dir"]:
8192 artifact_path = "{}/{}/{}/{}".format(
8193 base_folder["folder"],
8194 base_folder["pkg-dir"],
8195 "charms"
8196 if vca_type
8197 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8198 else "helm-charts",
8199 vca_name,
8200 )
8201 else:
8202 artifact_path = "{}/Scripts/{}/{}/".format(
8203 base_folder["folder"],
8204 "charms"
8205 if vca_type
8206 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8207 else "helm-charts",
8208 vca_name,
8209 )
8210
8211 self.logger.debug("Artifact path > {}".format(artifact_path))
8212
8213 # get initial_config_primitive_list that applies to this element
8214 initial_config_primitive_list = config_descriptor.get(
8215 "initial-config-primitive"
8216 )
8217
8218 self.logger.debug(
8219 "Initial config primitive list > {}".format(
8220 initial_config_primitive_list
8221 )
8222 )
8223
8224 # add config if not present for NS charm
8225 ee_descriptor_id = ee_config_descriptor.get("id")
8226 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8227 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8228 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8229 )
8230
8231 self.logger.debug(
8232 "Initial config primitive list #2 > {}".format(
8233 initial_config_primitive_list
8234 )
8235 )
8236 # n2vc_redesign STEP 3.1
8237 # find old ee_id if exists
8238 ee_id = vca_deployed.get("ee_id")
8239
8240 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8241 # create or register execution environment in VCA. Only for native charms when healing
8242 if vca_type == "native_charm":
8243 step = "Waiting to VM being up and getting IP address"
8244 self.logger.debug(logging_text + step)
8245 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8246 logging_text,
8247 nsr_id,
8248 vnfr_id,
8249 vdu_id,
8250 vdu_index,
8251 user=None,
8252 pub_key=None,
8253 )
8254 credentials = {"hostname": rw_mgmt_ip}
8255 # get username
8256 username = deep_get(
8257 config_descriptor, ("config-access", "ssh-access", "default-user")
8258 )
8259 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8260 # merged. Meanwhile let's get username from initial-config-primitive
8261 if not username and initial_config_primitive_list:
8262 for config_primitive in initial_config_primitive_list:
8263 for param in config_primitive.get("parameter", ()):
8264 if param["name"] == "ssh-username":
8265 username = param["value"]
8266 break
8267 if not username:
8268 raise LcmException(
8269 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8270 "'config-access.ssh-access.default-user'"
8271 )
8272 credentials["username"] = username
8273
8274 # n2vc_redesign STEP 3.2
8275 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8276 self._write_configuration_status(
8277 nsr_id=nsr_id,
8278 vca_index=vca_index,
8279 status="REGISTERING",
8280 element_under_configuration=element_under_configuration,
8281 element_type=element_type,
8282 )
8283
8284 step = "register execution environment {}".format(credentials)
8285 self.logger.debug(logging_text + step)
8286 ee_id = await self.vca_map[vca_type].register_execution_environment(
8287 credentials=credentials,
8288 namespace=namespace,
8289 db_dict=db_dict,
8290 vca_id=vca_id,
8291 )
8292
8293 # update ee_id en db
8294 db_dict_ee_id = {
8295 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8296 }
8297 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8298
8299 # for compatibility with MON/POL modules, the need model and application name at database
8300 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8301 # Not sure if this need to be done when healing
8302 """
8303 ee_id_parts = ee_id.split(".")
8304 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8305 if len(ee_id_parts) >= 2:
8306 model_name = ee_id_parts[0]
8307 application_name = ee_id_parts[1]
8308 db_nsr_update[db_update_entry + "model"] = model_name
8309 db_nsr_update[db_update_entry + "application"] = application_name
8310 """
8311
8312 # n2vc_redesign STEP 3.3
8313 # Install configuration software. Only for native charms.
8314 step = "Install configuration Software"
8315
8316 self._write_configuration_status(
8317 nsr_id=nsr_id,
8318 vca_index=vca_index,
8319 status="INSTALLING SW",
8320 element_under_configuration=element_under_configuration,
8321 element_type=element_type,
8322 # other_update=db_nsr_update,
8323 other_update=None,
8324 )
8325
8326 # TODO check if already done
8327 self.logger.debug(logging_text + step)
8328 config = None
8329 if vca_type == "native_charm":
8330 config_primitive = next(
8331 (p for p in initial_config_primitive_list if p["name"] == "config"),
8332 None,
8333 )
8334 if config_primitive:
8335 config = self._map_primitive_params(
8336 config_primitive, {}, deploy_params
8337 )
8338 await self.vca_map[vca_type].install_configuration_sw(
8339 ee_id=ee_id,
8340 artifact_path=artifact_path,
8341 db_dict=db_dict,
8342 config=config,
8343 num_units=1,
8344 vca_id=vca_id,
8345 vca_type=vca_type,
8346 )
8347
8348 # write in db flag of configuration_sw already installed
8349 self.update_db_2(
8350 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8351 )
8352
8353 # Not sure if this need to be done when healing
8354 """
8355 # add relations for this VCA (wait for other peers related with this VCA)
8356 await self._add_vca_relations(
8357 logging_text=logging_text,
8358 nsr_id=nsr_id,
8359 vca_type=vca_type,
8360 vca_index=vca_index,
8361 )
8362 """
8363
8364 # if SSH access is required, then get execution environment SSH public
8365 # if native charm we have waited already to VM be UP
8366 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8367 pub_key = None
8368 user = None
8369 # self.logger.debug("get ssh key block")
8370 if deep_get(
8371 config_descriptor, ("config-access", "ssh-access", "required")
8372 ):
8373 # self.logger.debug("ssh key needed")
8374 # Needed to inject a ssh key
8375 user = deep_get(
8376 config_descriptor,
8377 ("config-access", "ssh-access", "default-user"),
8378 )
8379 step = "Install configuration Software, getting public ssh key"
8380 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8381 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8382 )
8383
8384 step = "Insert public key into VM user={} ssh_key={}".format(
8385 user, pub_key
8386 )
8387 else:
8388 # self.logger.debug("no need to get ssh key")
8389 step = "Waiting to VM being up and getting IP address"
8390 self.logger.debug(logging_text + step)
8391
8392 # n2vc_redesign STEP 5.1
8393 # wait for RO (ip-address) Insert pub_key into VM
8394 # IMPORTANT: We need do wait for RO to complete healing operation.
8395 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8396 if vnfr_id:
8397 if kdu_name:
8398 rw_mgmt_ip = await self.wait_kdu_up(
8399 logging_text, nsr_id, vnfr_id, kdu_name
8400 )
8401 else:
8402 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8403 logging_text,
8404 nsr_id,
8405 vnfr_id,
8406 vdu_id,
8407 vdu_index,
8408 user=user,
8409 pub_key=pub_key,
8410 )
8411 else:
8412 rw_mgmt_ip = None # This is for a NS configuration
8413
8414 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8415
8416 # store rw_mgmt_ip in deploy params for later replacement
8417 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8418
8419 # Day1 operations.
8420 # get run-day1 operation parameter
8421 runDay1 = deploy_params.get("run-day1", False)
8422 self.logger.debug(
8423 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8424 )
8425 if runDay1:
8426 # n2vc_redesign STEP 6 Execute initial config primitive
8427 step = "execute initial config primitive"
8428
8429 # wait for dependent primitives execution (NS -> VNF -> VDU)
8430 if initial_config_primitive_list:
8431 await self._wait_dependent_n2vc(
8432 nsr_id, vca_deployed_list, vca_index
8433 )
8434
8435 # stage, in function of element type: vdu, kdu, vnf or ns
8436 my_vca = vca_deployed_list[vca_index]
8437 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8438 # VDU or KDU
8439 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8440 elif my_vca.get("member-vnf-index"):
8441 # VNF
8442 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8443 else:
8444 # NS
8445 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8446
8447 self._write_configuration_status(
8448 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8449 )
8450
8451 self._write_op_status(op_id=nslcmop_id, stage=stage)
8452
8453 check_if_terminated_needed = True
8454 for initial_config_primitive in initial_config_primitive_list:
8455 # adding information on the vca_deployed if it is a NS execution environment
8456 if not vca_deployed["member-vnf-index"]:
8457 deploy_params["ns_config_info"] = json.dumps(
8458 self._get_ns_config_info(nsr_id)
8459 )
8460 # TODO check if already done
8461 primitive_params_ = self._map_primitive_params(
8462 initial_config_primitive, {}, deploy_params
8463 )
8464
8465 step = "execute primitive '{}' params '{}'".format(
8466 initial_config_primitive["name"], primitive_params_
8467 )
8468 self.logger.debug(logging_text + step)
8469 await self.vca_map[vca_type].exec_primitive(
8470 ee_id=ee_id,
8471 primitive_name=initial_config_primitive["name"],
8472 params_dict=primitive_params_,
8473 db_dict=db_dict,
8474 vca_id=vca_id,
8475 vca_type=vca_type,
8476 )
8477 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8478 if check_if_terminated_needed:
8479 if config_descriptor.get("terminate-config-primitive"):
8480 self.update_db_2(
8481 "nsrs",
8482 nsr_id,
8483 {db_update_entry + "needed_terminate": True},
8484 )
8485 check_if_terminated_needed = False
8486
8487 # TODO register in database that primitive is done
8488
8489 # STEP 7 Configure metrics
8490 # Not sure if this need to be done when healing
8491 """
8492 if vca_type == "helm" or vca_type == "helm-v3":
8493 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8494 ee_id=ee_id,
8495 artifact_path=artifact_path,
8496 ee_config_descriptor=ee_config_descriptor,
8497 vnfr_id=vnfr_id,
8498 nsr_id=nsr_id,
8499 target_ip=rw_mgmt_ip,
8500 )
8501 if prometheus_jobs:
8502 self.update_db_2(
8503 "nsrs",
8504 nsr_id,
8505 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8506 )
8507
8508 for job in prometheus_jobs:
8509 self.db.set_one(
8510 "prometheus_jobs",
8511 {"job_name": job["job_name"]},
8512 job,
8513 upsert=True,
8514 fail_on_empty=False,
8515 )
8516
8517 """
8518 step = "instantiated at VCA"
8519 self.logger.debug(logging_text + step)
8520
8521 self._write_configuration_status(
8522 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8523 )
8524
8525 except Exception as e: # TODO not use Exception but N2VC exception
8526 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8527 if not isinstance(
8528 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8529 ):
8530 self.logger.error(
8531 "Exception while {} : {}".format(step, e), exc_info=True
8532 )
8533 self._write_configuration_status(
8534 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8535 )
8536 raise LcmException("{} {}".format(step, e)) from e
8537
8538 async def _wait_heal_ro(
8539 self,
8540 nsr_id,
8541 timeout=600,
8542 ):
8543 start_time = time()
8544 while time() <= start_time + timeout:
8545 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8546 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8547 "operational-status"
8548 ]
8549 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8550 if operational_status_ro != "healing":
8551 break
8552 await asyncio.sleep(15, loop=self.loop)
8553 else: # timeout_ns_deploy
8554 raise NgRoException("Timeout waiting ns to deploy")
8555
8556 async def vertical_scale(self, nsr_id, nslcmop_id):
8557 """
8558 Vertical Scale the VDUs in a NS
8559
8560 :param: nsr_id: NS Instance ID
8561 :param: nslcmop_id: nslcmop ID of migrate
8562
8563 """
8564 # Try to lock HA task here
8565 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8566 if not task_is_locked_by_me:
8567 return
8568 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8569 self.logger.debug(logging_text + "Enter")
8570 # get all needed from database
8571 db_nslcmop = None
8572 db_nslcmop_update = {}
8573 nslcmop_operation_state = None
8574 db_nsr_update = {}
8575 target = {}
8576 exc = None
8577 # in case of error, indicates what part of scale was failed to put nsr at error status
8578 start_deploy = time()
8579
8580 try:
8581 # wait for any previous tasks in process
8582 step = "Waiting for previous operations to terminate"
8583 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8584
8585 self._write_ns_status(
8586 nsr_id=nsr_id,
8587 ns_state=None,
8588 current_operation="VerticalScale",
8589 current_operation_id=nslcmop_id,
8590 )
8591 step = "Getting nslcmop from database"
8592 self.logger.debug(
8593 step + " after having waited for previous tasks to be completed"
8594 )
8595 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8596 operationParams = db_nslcmop.get("operationParams")
8597 target = {}
8598 target.update(operationParams)
8599 desc = await self.RO.vertical_scale(nsr_id, target)
8600 self.logger.debug("RO return > {}".format(desc))
8601 action_id = desc["action_id"]
8602 await self._wait_ng_ro(
8603 nsr_id,
8604 action_id,
8605 nslcmop_id,
8606 start_deploy,
8607 self.timeout.verticalscale,
8608 operation="verticalscale",
8609 )
8610 except (ROclient.ROClientException, DbException, LcmException) as e:
8611 self.logger.error("Exit Exception {}".format(e))
8612 exc = e
8613 except asyncio.CancelledError:
8614 self.logger.error("Cancelled Exception while '{}'".format(step))
8615 exc = "Operation was cancelled"
8616 except Exception as e:
8617 exc = traceback.format_exc()
8618 self.logger.critical(
8619 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8620 )
8621 finally:
8622 self._write_ns_status(
8623 nsr_id=nsr_id,
8624 ns_state=None,
8625 current_operation="IDLE",
8626 current_operation_id=None,
8627 )
8628 if exc:
8629 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8630 nslcmop_operation_state = "FAILED"
8631 else:
8632 nslcmop_operation_state = "COMPLETED"
8633 db_nslcmop_update["detailed-status"] = "Done"
8634 db_nsr_update["detailed-status"] = "Done"
8635
8636 self._write_op_status(
8637 op_id=nslcmop_id,
8638 stage="",
8639 error_message="",
8640 operation_state=nslcmop_operation_state,
8641 other_update=db_nslcmop_update,
8642 )
8643 if nslcmop_operation_state:
8644 try:
8645 msg = {
8646 "nsr_id": nsr_id,
8647 "nslcmop_id": nslcmop_id,
8648 "operationState": nslcmop_operation_state,
8649 }
8650 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8651 except Exception as e:
8652 self.logger.error(
8653 logging_text + "kafka_write notification Exception {}".format(e)
8654 )
8655 self.logger.debug(logging_text + "Exit")
8656 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")