Bug 2210 NS instantiation fails in basic12
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import randint
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 task_name_deploy_vca = "Deploying VCA"
136
137 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138 """
139 Init, Connect to database, filesystem storage, and messaging
140 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141 :return: None
142 """
143 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 self.db = Database().instance.db
146 self.fs = Filesystem().instance.fs
147 self.loop = loop
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.kubectlpath,
171 helm_command=self.vca_config.helmpath,
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.kubectlpath,
180 helm_command=self.vca_config.helm3path,
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.kubectlpath,
189 juju_command=self.vca_config.jujupath,
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 self.op_status_map = {
217 "instantiation": self.RO.status,
218 "termination": self.RO.status,
219 "migrate": self.RO.status,
220 "healing": self.RO.recreate_status,
221 "verticalscale": self.RO.status,
222 "start_stop_rebuild": self.RO.status,
223 }
224
225 @staticmethod
226 def increment_ip_mac(ip_mac, vm_index=1):
227 if not isinstance(ip_mac, str):
228 return ip_mac
229 try:
230 # try with ipv4 look for last dot
231 i = ip_mac.rfind(".")
232 if i > 0:
233 i += 1
234 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235 # try with ipv6 or mac look for last colon. Operate in hex
236 i = ip_mac.rfind(":")
237 if i > 0:
238 i += 1
239 # format in hex, len can be 2 for mac or 4 for ipv6
240 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242 )
243 except Exception:
244 pass
245 return None
246
247 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266
267 # remove last dot from path (if exists)
268 if path.endswith("."):
269 path = path[:-1]
270
271 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
272 # .format(table, filter, path, updated_data))
273 try:
274
275 nsr_id = filter.get("_id")
276
277 # read ns record from database
278 nsr = self.db.get_one(table="nsrs", q_filter=filter)
279 current_ns_status = nsr.get("nsState")
280
281 # get vca status for NS
282 status_dict = await self.n2vc.get_status(
283 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
284 )
285
286 # vcaStatus
287 db_dict = dict()
288 db_dict["vcaStatus"] = status_dict
289
290 # update configurationStatus for this VCA
291 try:
292 vca_index = int(path[path.rfind(".") + 1 :])
293
294 vca_list = deep_get(
295 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
296 )
297 vca_status = vca_list[vca_index].get("status")
298
299 configuration_status_list = nsr.get("configurationStatus")
300 config_status = configuration_status_list[vca_index].get("status")
301
302 if config_status == "BROKEN" and vca_status != "failed":
303 db_dict["configurationStatus"][vca_index] = "READY"
304 elif config_status != "BROKEN" and vca_status == "failed":
305 db_dict["configurationStatus"][vca_index] = "BROKEN"
306 except Exception as e:
307 # not update configurationStatus
308 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
309
310 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
311 # if nsState = 'DEGRADED' check if all is OK
312 is_degraded = False
313 if current_ns_status in ("READY", "DEGRADED"):
314 error_description = ""
315 # check machines
316 if status_dict.get("machines"):
317 for machine_id in status_dict.get("machines"):
318 machine = status_dict.get("machines").get(machine_id)
319 # check machine agent-status
320 if machine.get("agent-status"):
321 s = machine.get("agent-status").get("status")
322 if s != "started":
323 is_degraded = True
324 error_description += (
325 "machine {} agent-status={} ; ".format(
326 machine_id, s
327 )
328 )
329 # check machine instance status
330 if machine.get("instance-status"):
331 s = machine.get("instance-status").get("status")
332 if s != "running":
333 is_degraded = True
334 error_description += (
335 "machine {} instance-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check applications
340 if status_dict.get("applications"):
341 for app_id in status_dict.get("applications"):
342 app = status_dict.get("applications").get(app_id)
343 # check application status
344 if app.get("status"):
345 s = app.get("status").get("status")
346 if s != "active":
347 is_degraded = True
348 error_description += (
349 "application {} status={} ; ".format(app_id, s)
350 )
351
352 if error_description:
353 db_dict["errorDescription"] = error_description
354 if current_ns_status == "READY" and is_degraded:
355 db_dict["nsState"] = "DEGRADED"
356 if current_ns_status == "DEGRADED" and not is_degraded:
357 db_dict["nsState"] = "READY"
358
359 # write to database
360 self.update_db_2("nsrs", nsr_id, db_dict)
361
362 except (asyncio.CancelledError, asyncio.TimeoutError):
363 raise
364 except Exception as e:
365 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
366
367 async def _on_update_k8s_db(
368 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
369 ):
370 """
371 Updating vca status in NSR record
372 :param cluster_uuid: UUID of a k8s cluster
373 :param kdu_instance: The unique name of the KDU instance
374 :param filter: To get nsr_id
375 :cluster_type: The cluster type (juju, k8s)
376 :return: none
377 """
378
379 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
380 # .format(cluster_uuid, kdu_instance, filter))
381
382 nsr_id = filter.get("_id")
383 try:
384 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
385 cluster_uuid=cluster_uuid,
386 kdu_instance=kdu_instance,
387 yaml_format=False,
388 complete_status=True,
389 vca_id=vca_id,
390 )
391
392 # vcaStatus
393 db_dict = dict()
394 db_dict["vcaStatus"] = {nsr_id: vca_status}
395
396 self.logger.debug(
397 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
398 )
399
400 # write to database
401 self.update_db_2("nsrs", nsr_id, db_dict)
402 except (asyncio.CancelledError, asyncio.TimeoutError):
403 raise
404 except Exception as e:
405 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
406
407 @staticmethod
408 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
409 try:
410 env = Environment(
411 undefined=StrictUndefined,
412 autoescape=select_autoescape(default_for_string=True, default=True),
413 )
414 template = env.from_string(cloud_init_text)
415 return template.render(additional_params or {})
416 except UndefinedError as e:
417 raise LcmException(
418 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
419 "file, must be provided in the instantiation parameters inside the "
420 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
421 )
422 except (TemplateError, TemplateNotFound) as e:
423 raise LcmException(
424 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
425 vnfd_id, vdu_id, e
426 )
427 )
428
429 def _get_vdu_cloud_init_content(self, vdu, vnfd):
430 cloud_init_content = cloud_init_file = None
431 try:
432 if vdu.get("cloud-init-file"):
433 base_folder = vnfd["_admin"]["storage"]
434 if base_folder["pkg-dir"]:
435 cloud_init_file = "{}/{}/cloud_init/{}".format(
436 base_folder["folder"],
437 base_folder["pkg-dir"],
438 vdu["cloud-init-file"],
439 )
440 else:
441 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
442 base_folder["folder"],
443 vdu["cloud-init-file"],
444 )
445 with self.fs.file_open(cloud_init_file, "r") as ci_file:
446 cloud_init_content = ci_file.read()
447 elif vdu.get("cloud-init"):
448 cloud_init_content = vdu["cloud-init"]
449
450 return cloud_init_content
451 except FsException as e:
452 raise LcmException(
453 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
454 vnfd["id"], vdu["id"], cloud_init_file, e
455 )
456 )
457
458 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
459 vdur = next(
460 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
461 )
462 additional_params = vdur.get("additionalParams")
463 return parse_yaml_strings(additional_params)
464
465 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
466 """
467 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
468 :param vnfd: input vnfd
469 :param new_id: overrides vnf id if provided
470 :param additionalParams: Instantiation params for VNFs provided
471 :param nsrId: Id of the NSR
472 :return: copy of vnfd
473 """
474 vnfd_RO = deepcopy(vnfd)
475 # remove unused by RO configuration, monitoring, scaling and internal keys
476 vnfd_RO.pop("_id", None)
477 vnfd_RO.pop("_admin", None)
478 vnfd_RO.pop("monitoring-param", None)
479 vnfd_RO.pop("scaling-group-descriptor", None)
480 vnfd_RO.pop("kdu", None)
481 vnfd_RO.pop("k8s-cluster", None)
482 if new_id:
483 vnfd_RO["id"] = new_id
484
485 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
486 for vdu in get_iterable(vnfd_RO, "vdu"):
487 vdu.pop("cloud-init-file", None)
488 vdu.pop("cloud-init", None)
489 return vnfd_RO
490
491 @staticmethod
492 def ip_profile_2_RO(ip_profile):
493 RO_ip_profile = deepcopy(ip_profile)
494 if "dns-server" in RO_ip_profile:
495 if isinstance(RO_ip_profile["dns-server"], list):
496 RO_ip_profile["dns-address"] = []
497 for ds in RO_ip_profile.pop("dns-server"):
498 RO_ip_profile["dns-address"].append(ds["address"])
499 else:
500 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
501 if RO_ip_profile.get("ip-version") == "ipv4":
502 RO_ip_profile["ip-version"] = "IPv4"
503 if RO_ip_profile.get("ip-version") == "ipv6":
504 RO_ip_profile["ip-version"] = "IPv6"
505 if "dhcp-params" in RO_ip_profile:
506 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
507 return RO_ip_profile
508
509 def _get_ro_vim_id_for_vim_account(self, vim_account):
510 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
511 if db_vim["_admin"]["operationalState"] != "ENABLED":
512 raise LcmException(
513 "VIM={} is not available. operationalState={}".format(
514 vim_account, db_vim["_admin"]["operationalState"]
515 )
516 )
517 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
518 return RO_vim_id
519
520 def get_ro_wim_id_for_wim_account(self, wim_account):
521 if isinstance(wim_account, str):
522 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
523 if db_wim["_admin"]["operationalState"] != "ENABLED":
524 raise LcmException(
525 "WIM={} is not available. operationalState={}".format(
526 wim_account, db_wim["_admin"]["operationalState"]
527 )
528 )
529 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
530 return RO_wim_id
531 else:
532 return wim_account
533
534 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
535
536 db_vdu_push_list = []
537 template_vdur = []
538 db_update = {"_admin.modified": time()}
539 if vdu_create:
540 for vdu_id, vdu_count in vdu_create.items():
541 vdur = next(
542 (
543 vdur
544 for vdur in reversed(db_vnfr["vdur"])
545 if vdur["vdu-id-ref"] == vdu_id
546 ),
547 None,
548 )
549 if not vdur:
550 # Read the template saved in the db:
551 self.logger.debug(
552 "No vdur in the database. Using the vdur-template to scale"
553 )
554 vdur_template = db_vnfr.get("vdur-template")
555 if not vdur_template:
556 raise LcmException(
557 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
558 vdu_id
559 )
560 )
561 vdur = vdur_template[0]
562 # Delete a template from the database after using it
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur-template": {"_id": vdur["_id"]}},
568 )
569 for count in range(vdu_count):
570 vdur_copy = deepcopy(vdur)
571 vdur_copy["status"] = "BUILD"
572 vdur_copy["status-detailed"] = None
573 vdur_copy["ip-address"] = None
574 vdur_copy["_id"] = str(uuid4())
575 vdur_copy["count-index"] += count + 1
576 vdur_copy["id"] = "{}-{}".format(
577 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
578 )
579 vdur_copy.pop("vim_info", None)
580 for iface in vdur_copy["interfaces"]:
581 if iface.get("fixed-ip"):
582 iface["ip-address"] = self.increment_ip_mac(
583 iface["ip-address"], count + 1
584 )
585 else:
586 iface.pop("ip-address", None)
587 if iface.get("fixed-mac"):
588 iface["mac-address"] = self.increment_ip_mac(
589 iface["mac-address"], count + 1
590 )
591 else:
592 iface.pop("mac-address", None)
593 if db_vnfr["vdur"]:
594 iface.pop(
595 "mgmt_vnf", None
596 ) # only first vdu can be managment of vnf
597 db_vdu_push_list.append(vdur_copy)
598 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
599 if vdu_delete:
600 if len(db_vnfr["vdur"]) == 1:
601 # The scale will move to 0 instances
602 self.logger.debug(
603 "Scaling to 0 !, creating the template with the last vdur"
604 )
605 template_vdur = [db_vnfr["vdur"][0]]
606 for vdu_id, vdu_count in vdu_delete.items():
607 if mark_delete:
608 indexes_to_delete = [
609 iv[0]
610 for iv in enumerate(db_vnfr["vdur"])
611 if iv[1]["vdu-id-ref"] == vdu_id
612 ]
613 db_update.update(
614 {
615 "vdur.{}.status".format(i): "DELETING"
616 for i in indexes_to_delete[-vdu_count:]
617 }
618 )
619 else:
620 # it must be deleted one by one because common.db does not allow otherwise
621 vdus_to_delete = [
622 v
623 for v in reversed(db_vnfr["vdur"])
624 if v["vdu-id-ref"] == vdu_id
625 ]
626 for vdu in vdus_to_delete[:vdu_count]:
627 self.db.set_one(
628 "vnfrs",
629 {"_id": db_vnfr["_id"]},
630 None,
631 pull={"vdur": {"_id": vdu["_id"]}},
632 )
633 db_push = {}
634 if db_vdu_push_list:
635 db_push["vdur"] = db_vdu_push_list
636 if template_vdur:
637 db_push["vdur-template"] = template_vdur
638 if not db_push:
639 db_push = None
640 db_vnfr["vdur-template"] = template_vdur
641 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
642 # modify passed dictionary db_vnfr
643 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
644 db_vnfr["vdur"] = db_vnfr_["vdur"]
645
646 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
647 """
648 Updates database nsr with the RO info for the created vld
649 :param ns_update_nsr: dictionary to be filled with the updated info
650 :param db_nsr: content of db_nsr. This is also modified
651 :param nsr_desc_RO: nsr descriptor from RO
652 :return: Nothing, LcmException is raised on errors
653 """
654
655 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
656 for net_RO in get_iterable(nsr_desc_RO, "nets"):
657 if vld["id"] != net_RO.get("ns_net_osm_id"):
658 continue
659 vld["vim-id"] = net_RO.get("vim_net_id")
660 vld["name"] = net_RO.get("vim_name")
661 vld["status"] = net_RO.get("status")
662 vld["status-detailed"] = net_RO.get("error_msg")
663 ns_update_nsr["vld.{}".format(vld_index)] = vld
664 break
665 else:
666 raise LcmException(
667 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
668 )
669
670 def set_vnfr_at_error(self, db_vnfrs, error_text):
671 try:
672 for db_vnfr in db_vnfrs.values():
673 vnfr_update = {"status": "ERROR"}
674 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
675 if "status" not in vdur:
676 vdur["status"] = "ERROR"
677 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
678 if error_text:
679 vdur["status-detailed"] = str(error_text)
680 vnfr_update[
681 "vdur.{}.status-detailed".format(vdu_index)
682 ] = "ERROR"
683 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
684 except DbException as e:
685 self.logger.error("Cannot update vnf. {}".format(e))
686
687 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
688 """
689 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
690 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
691 :param nsr_desc_RO: nsr descriptor from RO
692 :return: Nothing, LcmException is raised on errors
693 """
694 for vnf_index, db_vnfr in db_vnfrs.items():
695 for vnf_RO in nsr_desc_RO["vnfs"]:
696 if vnf_RO["member_vnf_index"] != vnf_index:
697 continue
698 vnfr_update = {}
699 if vnf_RO.get("ip_address"):
700 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
701 "ip_address"
702 ].split(";")[0]
703 elif not db_vnfr.get("ip-address"):
704 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
705 raise LcmExceptionNoMgmtIP(
706 "ns member_vnf_index '{}' has no IP address".format(
707 vnf_index
708 )
709 )
710
711 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
712 vdur_RO_count_index = 0
713 if vdur.get("pdu-type"):
714 continue
715 for vdur_RO in get_iterable(vnf_RO, "vms"):
716 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
717 continue
718 if vdur["count-index"] != vdur_RO_count_index:
719 vdur_RO_count_index += 1
720 continue
721 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
722 if vdur_RO.get("ip_address"):
723 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
724 else:
725 vdur["ip-address"] = None
726 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
727 vdur["name"] = vdur_RO.get("vim_name")
728 vdur["status"] = vdur_RO.get("status")
729 vdur["status-detailed"] = vdur_RO.get("error_msg")
730 for ifacer in get_iterable(vdur, "interfaces"):
731 for interface_RO in get_iterable(vdur_RO, "interfaces"):
732 if ifacer["name"] == interface_RO.get("internal_name"):
733 ifacer["ip-address"] = interface_RO.get(
734 "ip_address"
735 )
736 ifacer["mac-address"] = interface_RO.get(
737 "mac_address"
738 )
739 break
740 else:
741 raise LcmException(
742 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
743 "from VIM info".format(
744 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
745 )
746 )
747 vnfr_update["vdur.{}".format(vdu_index)] = vdur
748 break
749 else:
750 raise LcmException(
751 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
752 "VIM info".format(
753 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
754 )
755 )
756
757 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
758 for net_RO in get_iterable(nsr_desc_RO, "nets"):
759 if vld["id"] != net_RO.get("vnf_net_osm_id"):
760 continue
761 vld["vim-id"] = net_RO.get("vim_net_id")
762 vld["name"] = net_RO.get("vim_name")
763 vld["status"] = net_RO.get("status")
764 vld["status-detailed"] = net_RO.get("error_msg")
765 vnfr_update["vld.{}".format(vld_index)] = vld
766 break
767 else:
768 raise LcmException(
769 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
770 vnf_index, vld["id"]
771 )
772 )
773
774 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
775 break
776
777 else:
778 raise LcmException(
779 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
780 vnf_index
781 )
782 )
783
784 def _get_ns_config_info(self, nsr_id):
785 """
786 Generates a mapping between vnf,vdu elements and the N2VC id
787 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
788 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
789 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
790 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
791 """
792 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
793 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
794 mapping = {}
795 ns_config_info = {"osm-config-mapping": mapping}
796 for vca in vca_deployed_list:
797 if not vca["member-vnf-index"]:
798 continue
799 if not vca["vdu_id"]:
800 mapping[vca["member-vnf-index"]] = vca["application"]
801 else:
802 mapping[
803 "{}.{}.{}".format(
804 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
805 )
806 ] = vca["application"]
807 return ns_config_info
808
809 async def _instantiate_ng_ro(
810 self,
811 logging_text,
812 nsr_id,
813 nsd,
814 db_nsr,
815 db_nslcmop,
816 db_vnfrs,
817 db_vnfds,
818 n2vc_key_list,
819 stage,
820 start_deploy,
821 timeout_ns_deploy,
822 ):
823
824 db_vims = {}
825
826 def get_vim_account(vim_account_id):
827 nonlocal db_vims
828 if vim_account_id in db_vims:
829 return db_vims[vim_account_id]
830 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
831 db_vims[vim_account_id] = db_vim
832 return db_vim
833
834 # modify target_vld info with instantiation parameters
835 def parse_vld_instantiation_params(
836 target_vim, target_vld, vld_params, target_sdn
837 ):
838 if vld_params.get("ip-profile"):
839 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
840 "ip-profile"
841 ]
842 if vld_params.get("provider-network"):
843 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
844 "provider-network"
845 ]
846 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
847 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
848 "provider-network"
849 ]["sdn-ports"]
850
851 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
852 # if wim_account_id is specified in vld_params, validate if it is feasible.
853 wim_account_id, db_wim = select_feasible_wim_account(
854 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
855 )
856
857 if wim_account_id:
858 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
859 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
860 # update vld_params with correct WIM account Id
861 vld_params["wimAccountId"] = wim_account_id
862
863 target_wim = "wim:{}".format(wim_account_id)
864 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
865 sdn_ports = get_sdn_ports(vld_params, db_wim)
866 if len(sdn_ports) > 0:
867 target_vld["vim_info"][target_wim] = target_wim_attrs
868 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
869
870 self.logger.debug(
871 "Target VLD with WIM data: {:s}".format(str(target_vld))
872 )
873
874 for param in ("vim-network-name", "vim-network-id"):
875 if vld_params.get(param):
876 if isinstance(vld_params[param], dict):
877 for vim, vim_net in vld_params[param].items():
878 other_target_vim = "vim:" + vim
879 populate_dict(
880 target_vld["vim_info"],
881 (other_target_vim, param.replace("-", "_")),
882 vim_net,
883 )
884 else: # isinstance str
885 target_vld["vim_info"][target_vim][
886 param.replace("-", "_")
887 ] = vld_params[param]
888 if vld_params.get("common_id"):
889 target_vld["common_id"] = vld_params.get("common_id")
890
891 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
892 def update_ns_vld_target(target, ns_params):
893 for vnf_params in ns_params.get("vnf", ()):
894 if vnf_params.get("vimAccountId"):
895 target_vnf = next(
896 (
897 vnfr
898 for vnfr in db_vnfrs.values()
899 if vnf_params["member-vnf-index"]
900 == vnfr["member-vnf-index-ref"]
901 ),
902 None,
903 )
904 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
905 if not vdur:
906 return
907 for a_index, a_vld in enumerate(target["ns"]["vld"]):
908 target_vld = find_in_list(
909 get_iterable(vdur, "interfaces"),
910 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
911 )
912
913 vld_params = find_in_list(
914 get_iterable(ns_params, "vld"),
915 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
916 )
917 if target_vld:
918
919 if vnf_params.get("vimAccountId") not in a_vld.get(
920 "vim_info", {}
921 ):
922 target_vim_network_list = [
923 v for _, v in a_vld.get("vim_info").items()
924 ]
925 target_vim_network_name = next(
926 (
927 item.get("vim_network_name", "")
928 for item in target_vim_network_list
929 ),
930 "",
931 )
932
933 target["ns"]["vld"][a_index].get("vim_info").update(
934 {
935 "vim:{}".format(vnf_params["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name,
937 }
938 }
939 )
940
941 if vld_params:
942 for param in ("vim-network-name", "vim-network-id"):
943 if vld_params.get(param) and isinstance(
944 vld_params[param], dict
945 ):
946 for vim, vim_net in vld_params[
947 param
948 ].items():
949 other_target_vim = "vim:" + vim
950 populate_dict(
951 target["ns"]["vld"][a_index].get(
952 "vim_info"
953 ),
954 (
955 other_target_vim,
956 param.replace("-", "_"),
957 ),
958 vim_net,
959 )
960
961 nslcmop_id = db_nslcmop["_id"]
962 target = {
963 "name": db_nsr["name"],
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": deepcopy(db_nsr["image"]),
967 "flavor": deepcopy(db_nsr["flavor"]),
968 "action_id": nslcmop_id,
969 "cloud_init_content": {},
970 }
971 for image in target["image"]:
972 image["vim_info"] = {}
973 for flavor in target["flavor"]:
974 flavor["vim_info"] = {}
975 if db_nsr.get("affinity-or-anti-affinity-group"):
976 target["affinity-or-anti-affinity-group"] = deepcopy(
977 db_nsr["affinity-or-anti-affinity-group"]
978 )
979 for affinity_or_anti_affinity_group in target[
980 "affinity-or-anti-affinity-group"
981 ]:
982 affinity_or_anti_affinity_group["vim_info"] = {}
983
984 if db_nslcmop.get("lcmOperationType") != "instantiate":
985 # get parameters of instantiation:
986 db_nslcmop_instantiate = self.db.get_list(
987 "nslcmops",
988 {
989 "nsInstanceId": db_nslcmop["nsInstanceId"],
990 "lcmOperationType": "instantiate",
991 },
992 )[-1]
993 ns_params = db_nslcmop_instantiate.get("operationParams")
994 else:
995 ns_params = db_nslcmop.get("operationParams")
996 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
997 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
998
999 cp2target = {}
1000 for vld_index, vld in enumerate(db_nsr.get("vld")):
1001 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1002 target_vld = {
1003 "id": vld["id"],
1004 "name": vld["name"],
1005 "mgmt-network": vld.get("mgmt-network", False),
1006 "type": vld.get("type"),
1007 "vim_info": {
1008 target_vim: {
1009 "vim_network_name": vld.get("vim-network-name"),
1010 "vim_account_id": ns_params["vimAccountId"],
1011 }
1012 },
1013 }
1014 # check if this network needs SDN assist
1015 if vld.get("pci-interfaces"):
1016 db_vim = get_vim_account(ns_params["vimAccountId"])
1017 if vim_config := db_vim.get("config"):
1018 if sdnc_id := vim_config.get("sdn-controller"):
1019 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1020 target_sdn = "sdn:{}".format(sdnc_id)
1021 target_vld["vim_info"][target_sdn] = {
1022 "sdn": True,
1023 "target_vim": target_vim,
1024 "vlds": [sdn_vld],
1025 "type": vld.get("type"),
1026 }
1027
1028 nsd_vnf_profiles = get_vnf_profiles(nsd)
1029 for nsd_vnf_profile in nsd_vnf_profiles:
1030 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1031 if cp["virtual-link-profile-id"] == vld["id"]:
1032 cp2target[
1033 "member_vnf:{}.{}".format(
1034 cp["constituent-cpd-id"][0][
1035 "constituent-base-element-id"
1036 ],
1037 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1038 )
1039 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1040
1041 # check at nsd descriptor, if there is an ip-profile
1042 vld_params = {}
1043 nsd_vlp = find_in_list(
1044 get_virtual_link_profiles(nsd),
1045 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1046 == vld["id"],
1047 )
1048 if (
1049 nsd_vlp
1050 and nsd_vlp.get("virtual-link-protocol-data")
1051 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1052 ):
1053 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1054 "l3-protocol-data"
1055 ]
1056 ip_profile_dest_data = {}
1057 if "ip-version" in ip_profile_source_data:
1058 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1059 "ip-version"
1060 ]
1061 if "cidr" in ip_profile_source_data:
1062 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1063 "cidr"
1064 ]
1065 if "gateway-ip" in ip_profile_source_data:
1066 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1067 "gateway-ip"
1068 ]
1069 if "dhcp-enabled" in ip_profile_source_data:
1070 ip_profile_dest_data["dhcp-params"] = {
1071 "enabled": ip_profile_source_data["dhcp-enabled"]
1072 }
1073 vld_params["ip-profile"] = ip_profile_dest_data
1074
1075 # update vld_params with instantiation params
1076 vld_instantiation_params = find_in_list(
1077 get_iterable(ns_params, "vld"),
1078 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1079 )
1080 if vld_instantiation_params:
1081 vld_params.update(vld_instantiation_params)
1082 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1083 target["ns"]["vld"].append(target_vld)
1084 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1085 update_ns_vld_target(target, ns_params)
1086
1087 for vnfr in db_vnfrs.values():
1088 vnfd = find_in_list(
1089 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1090 )
1091 vnf_params = find_in_list(
1092 get_iterable(ns_params, "vnf"),
1093 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1094 )
1095 target_vnf = deepcopy(vnfr)
1096 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1097 for vld in target_vnf.get("vld", ()):
1098 # check if connected to a ns.vld, to fill target'
1099 vnf_cp = find_in_list(
1100 vnfd.get("int-virtual-link-desc", ()),
1101 lambda cpd: cpd.get("id") == vld["id"],
1102 )
1103 if vnf_cp:
1104 ns_cp = "member_vnf:{}.{}".format(
1105 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1106 )
1107 if cp2target.get(ns_cp):
1108 vld["target"] = cp2target[ns_cp]
1109
1110 vld["vim_info"] = {
1111 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1112 }
1113 # check if this network needs SDN assist
1114 target_sdn = None
1115 if vld.get("pci-interfaces"):
1116 db_vim = get_vim_account(vnfr["vim-account-id"])
1117 sdnc_id = db_vim["config"].get("sdn-controller")
1118 if sdnc_id:
1119 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1120 target_sdn = "sdn:{}".format(sdnc_id)
1121 vld["vim_info"][target_sdn] = {
1122 "sdn": True,
1123 "target_vim": target_vim,
1124 "vlds": [sdn_vld],
1125 "type": vld.get("type"),
1126 }
1127
1128 # check at vnfd descriptor, if there is an ip-profile
1129 vld_params = {}
1130 vnfd_vlp = find_in_list(
1131 get_virtual_link_profiles(vnfd),
1132 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1133 )
1134 if (
1135 vnfd_vlp
1136 and vnfd_vlp.get("virtual-link-protocol-data")
1137 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1138 ):
1139 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1140 "l3-protocol-data"
1141 ]
1142 ip_profile_dest_data = {}
1143 if "ip-version" in ip_profile_source_data:
1144 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1145 "ip-version"
1146 ]
1147 if "cidr" in ip_profile_source_data:
1148 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1149 "cidr"
1150 ]
1151 if "gateway-ip" in ip_profile_source_data:
1152 ip_profile_dest_data[
1153 "gateway-address"
1154 ] = ip_profile_source_data["gateway-ip"]
1155 if "dhcp-enabled" in ip_profile_source_data:
1156 ip_profile_dest_data["dhcp-params"] = {
1157 "enabled": ip_profile_source_data["dhcp-enabled"]
1158 }
1159
1160 vld_params["ip-profile"] = ip_profile_dest_data
1161 # update vld_params with instantiation params
1162 if vnf_params:
1163 vld_instantiation_params = find_in_list(
1164 get_iterable(vnf_params, "internal-vld"),
1165 lambda i_vld: i_vld["name"] == vld["id"],
1166 )
1167 if vld_instantiation_params:
1168 vld_params.update(vld_instantiation_params)
1169 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1170
1171 vdur_list = []
1172 for vdur in target_vnf.get("vdur", ()):
1173 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1174 continue # This vdu must not be created
1175 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1176
1177 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1178
1179 if ssh_keys_all:
1180 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1181 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1182 if (
1183 vdu_configuration
1184 and vdu_configuration.get("config-access")
1185 and vdu_configuration.get("config-access").get("ssh-access")
1186 ):
1187 vdur["ssh-keys"] = ssh_keys_all
1188 vdur["ssh-access-required"] = vdu_configuration[
1189 "config-access"
1190 ]["ssh-access"]["required"]
1191 elif (
1192 vnf_configuration
1193 and vnf_configuration.get("config-access")
1194 and vnf_configuration.get("config-access").get("ssh-access")
1195 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1196 ):
1197 vdur["ssh-keys"] = ssh_keys_all
1198 vdur["ssh-access-required"] = vnf_configuration[
1199 "config-access"
1200 ]["ssh-access"]["required"]
1201 elif ssh_keys_instantiation and find_in_list(
1202 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1203 ):
1204 vdur["ssh-keys"] = ssh_keys_instantiation
1205
1206 self.logger.debug("NS > vdur > {}".format(vdur))
1207
1208 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1209 # cloud-init
1210 if vdud.get("cloud-init-file"):
1211 vdur["cloud-init"] = "{}:file:{}".format(
1212 vnfd["_id"], vdud.get("cloud-init-file")
1213 )
1214 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1215 if vdur["cloud-init"] not in target["cloud_init_content"]:
1216 base_folder = vnfd["_admin"]["storage"]
1217 if base_folder["pkg-dir"]:
1218 cloud_init_file = "{}/{}/cloud_init/{}".format(
1219 base_folder["folder"],
1220 base_folder["pkg-dir"],
1221 vdud.get("cloud-init-file"),
1222 )
1223 else:
1224 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1225 base_folder["folder"],
1226 vdud.get("cloud-init-file"),
1227 )
1228 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1229 target["cloud_init_content"][
1230 vdur["cloud-init"]
1231 ] = ci_file.read()
1232 elif vdud.get("cloud-init"):
1233 vdur["cloud-init"] = "{}:vdu:{}".format(
1234 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1235 )
1236 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1237 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1238 "cloud-init"
1239 ]
1240 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1241 deploy_params_vdu = self._format_additional_params(
1242 vdur.get("additionalParams") or {}
1243 )
1244 deploy_params_vdu["OSM"] = get_osm_params(
1245 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1246 )
1247 vdur["additionalParams"] = deploy_params_vdu
1248
1249 # flavor
1250 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1251 if target_vim not in ns_flavor["vim_info"]:
1252 ns_flavor["vim_info"][target_vim] = {}
1253
1254 # deal with images
1255 # in case alternative images are provided we must check if they should be applied
1256 # for the vim_type, modify the vim_type taking into account
1257 ns_image_id = int(vdur["ns-image-id"])
1258 if vdur.get("alt-image-ids"):
1259 db_vim = get_vim_account(vnfr["vim-account-id"])
1260 vim_type = db_vim["vim_type"]
1261 for alt_image_id in vdur.get("alt-image-ids"):
1262 ns_alt_image = target["image"][int(alt_image_id)]
1263 if vim_type == ns_alt_image.get("vim-type"):
1264 # must use alternative image
1265 self.logger.debug(
1266 "use alternative image id: {}".format(alt_image_id)
1267 )
1268 ns_image_id = alt_image_id
1269 vdur["ns-image-id"] = ns_image_id
1270 break
1271 ns_image = target["image"][int(ns_image_id)]
1272 if target_vim not in ns_image["vim_info"]:
1273 ns_image["vim_info"][target_vim] = {}
1274
1275 # Affinity groups
1276 if vdur.get("affinity-or-anti-affinity-group-id"):
1277 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1278 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1279 if target_vim not in ns_ags["vim_info"]:
1280 ns_ags["vim_info"][target_vim] = {}
1281
1282 vdur["vim_info"] = {target_vim: {}}
1283 # instantiation parameters
1284 if vnf_params:
1285 vdu_instantiation_params = find_in_list(
1286 get_iterable(vnf_params, "vdu"),
1287 lambda i_vdu: i_vdu["id"] == vdud["id"],
1288 )
1289 if vdu_instantiation_params:
1290 # Parse the vdu_volumes from the instantiation params
1291 vdu_volumes = get_volumes_from_instantiation_params(
1292 vdu_instantiation_params, vdud
1293 )
1294 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1295 vdur_list.append(vdur)
1296 target_vnf["vdur"] = vdur_list
1297 target["vnf"].append(target_vnf)
1298
1299 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1300 desc = await self.RO.deploy(nsr_id, target)
1301 self.logger.debug("RO return > {}".format(desc))
1302 action_id = desc["action_id"]
1303 await self._wait_ng_ro(
1304 nsr_id,
1305 action_id,
1306 nslcmop_id,
1307 start_deploy,
1308 timeout_ns_deploy,
1309 stage,
1310 operation="instantiation",
1311 )
1312
1313 # Updating NSR
1314 db_nsr_update = {
1315 "_admin.deployed.RO.operational-status": "running",
1316 "detailed-status": " ".join(stage),
1317 }
1318 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 self.logger.debug(
1322 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1323 )
1324 return
1325
1326 async def _wait_ng_ro(
1327 self,
1328 nsr_id,
1329 action_id,
1330 nslcmop_id=None,
1331 start_time=None,
1332 timeout=600,
1333 stage=None,
1334 operation=None,
1335 ):
1336 detailed_status_old = None
1337 db_nsr_update = {}
1338 start_time = start_time or time()
1339 while time() <= start_time + timeout:
1340 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1341 self.logger.debug("Wait NG RO > {}".format(desc_status))
1342 if desc_status["status"] == "FAILED":
1343 raise NgRoException(desc_status["details"])
1344 elif desc_status["status"] == "BUILD":
1345 if stage:
1346 stage[2] = "VIM: ({})".format(desc_status["details"])
1347 elif desc_status["status"] == "DONE":
1348 if stage:
1349 stage[2] = "Deployed at VIM"
1350 break
1351 else:
1352 assert False, "ROclient.check_ns_status returns unknown {}".format(
1353 desc_status["status"]
1354 )
1355 if stage and nslcmop_id and stage[2] != detailed_status_old:
1356 detailed_status_old = stage[2]
1357 db_nsr_update["detailed-status"] = " ".join(stage)
1358 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1359 self._write_op_status(nslcmop_id, stage)
1360 await asyncio.sleep(15, loop=self.loop)
1361 else: # timeout_ns_deploy
1362 raise NgRoException("Timeout waiting ns to deploy")
1363
1364 async def _terminate_ng_ro(
1365 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1366 ):
1367 db_nsr_update = {}
1368 failed_detail = []
1369 action_id = None
1370 start_deploy = time()
1371 try:
1372 target = {
1373 "ns": {"vld": []},
1374 "vnf": [],
1375 "image": [],
1376 "flavor": [],
1377 "action_id": nslcmop_id,
1378 }
1379 desc = await self.RO.deploy(nsr_id, target)
1380 action_id = desc["action_id"]
1381 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1382 self.logger.debug(
1383 logging_text
1384 + "ns terminate action at RO. action_id={}".format(action_id)
1385 )
1386
1387 # wait until done
1388 delete_timeout = 20 * 60 # 20 minutes
1389 await self._wait_ng_ro(
1390 nsr_id,
1391 action_id,
1392 nslcmop_id,
1393 start_deploy,
1394 delete_timeout,
1395 stage,
1396 operation="termination",
1397 )
1398 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1399 # delete all nsr
1400 await self.RO.delete(nsr_id)
1401 except NgRoException as e:
1402 if e.http_code == 404: # not found
1403 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1404 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1405 self.logger.debug(
1406 logging_text + "RO_action_id={} already deleted".format(action_id)
1407 )
1408 elif e.http_code == 409: # conflict
1409 failed_detail.append("delete conflict: {}".format(e))
1410 self.logger.debug(
1411 logging_text
1412 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1413 )
1414 else:
1415 failed_detail.append("delete error: {}".format(e))
1416 self.logger.error(
1417 logging_text
1418 + "RO_action_id={} delete error: {}".format(action_id, e)
1419 )
1420 except Exception as e:
1421 failed_detail.append("delete error: {}".format(e))
1422 self.logger.error(
1423 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1424 )
1425
1426 if failed_detail:
1427 stage[2] = "Error deleting from VIM"
1428 else:
1429 stage[2] = "Deleted from VIM"
1430 db_nsr_update["detailed-status"] = " ".join(stage)
1431 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1432 self._write_op_status(nslcmop_id, stage)
1433
1434 if failed_detail:
1435 raise LcmException("; ".join(failed_detail))
1436 return
1437
1438 async def instantiate_RO(
1439 self,
1440 logging_text,
1441 nsr_id,
1442 nsd,
1443 db_nsr,
1444 db_nslcmop,
1445 db_vnfrs,
1446 db_vnfds,
1447 n2vc_key_list,
1448 stage,
1449 ):
1450 """
1451 Instantiate at RO
1452 :param logging_text: preffix text to use at logging
1453 :param nsr_id: nsr identity
1454 :param nsd: database content of ns descriptor
1455 :param db_nsr: database content of ns record
1456 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1457 :param db_vnfrs:
1458 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1459 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1460 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1461 :return: None or exception
1462 """
1463 try:
1464 start_deploy = time()
1465 ns_params = db_nslcmop.get("operationParams")
1466 if ns_params and ns_params.get("timeout_ns_deploy"):
1467 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1468 else:
1469 timeout_ns_deploy = self.timeout.ns_deploy
1470
1471 # Check for and optionally request placement optimization. Database will be updated if placement activated
1472 stage[2] = "Waiting for Placement."
1473 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1474 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1475 for vnfr in db_vnfrs.values():
1476 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1477 break
1478 else:
1479 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1480
1481 return await self._instantiate_ng_ro(
1482 logging_text,
1483 nsr_id,
1484 nsd,
1485 db_nsr,
1486 db_nslcmop,
1487 db_vnfrs,
1488 db_vnfds,
1489 n2vc_key_list,
1490 stage,
1491 start_deploy,
1492 timeout_ns_deploy,
1493 )
1494 except Exception as e:
1495 stage[2] = "ERROR deploying at VIM"
1496 self.set_vnfr_at_error(db_vnfrs, str(e))
1497 self.logger.error(
1498 "Error deploying at VIM {}".format(e),
1499 exc_info=not isinstance(
1500 e,
1501 (
1502 ROclient.ROClientException,
1503 LcmException,
1504 DbException,
1505 NgRoException,
1506 ),
1507 ),
1508 )
1509 raise
1510
1511 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1512 """
1513 Wait for kdu to be up, get ip address
1514 :param logging_text: prefix use for logging
1515 :param nsr_id:
1516 :param vnfr_id:
1517 :param kdu_name:
1518 :return: IP address, K8s services
1519 """
1520
1521 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1522 nb_tries = 0
1523
1524 while nb_tries < 360:
1525 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1526 kdur = next(
1527 (
1528 x
1529 for x in get_iterable(db_vnfr, "kdur")
1530 if x.get("kdu-name") == kdu_name
1531 ),
1532 None,
1533 )
1534 if not kdur:
1535 raise LcmException(
1536 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1537 )
1538 if kdur.get("status"):
1539 if kdur["status"] in ("READY", "ENABLED"):
1540 return kdur.get("ip-address"), kdur.get("services")
1541 else:
1542 raise LcmException(
1543 "target KDU={} is in error state".format(kdu_name)
1544 )
1545
1546 await asyncio.sleep(10, loop=self.loop)
1547 nb_tries += 1
1548 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1549
1550 async def wait_vm_up_insert_key_ro(
1551 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1552 ):
1553 """
1554 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1555 :param logging_text: prefix use for logging
1556 :param nsr_id:
1557 :param vnfr_id:
1558 :param vdu_id:
1559 :param vdu_index:
1560 :param pub_key: public ssh key to inject, None to skip
1561 :param user: user to apply the public ssh key
1562 :return: IP address
1563 """
1564
1565 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1566 ip_address = None
1567 target_vdu_id = None
1568 ro_retries = 0
1569
1570 while True:
1571
1572 ro_retries += 1
1573 if ro_retries >= 360: # 1 hour
1574 raise LcmException(
1575 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1576 )
1577
1578 await asyncio.sleep(10, loop=self.loop)
1579
1580 # get ip address
1581 if not target_vdu_id:
1582 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1583
1584 if not vdu_id: # for the VNF case
1585 if db_vnfr.get("status") == "ERROR":
1586 raise LcmException(
1587 "Cannot inject ssh-key because target VNF is in error state"
1588 )
1589 ip_address = db_vnfr.get("ip-address")
1590 if not ip_address:
1591 continue
1592 vdur = next(
1593 (
1594 x
1595 for x in get_iterable(db_vnfr, "vdur")
1596 if x.get("ip-address") == ip_address
1597 ),
1598 None,
1599 )
1600 else: # VDU case
1601 vdur = next(
1602 (
1603 x
1604 for x in get_iterable(db_vnfr, "vdur")
1605 if x.get("vdu-id-ref") == vdu_id
1606 and x.get("count-index") == vdu_index
1607 ),
1608 None,
1609 )
1610
1611 if (
1612 not vdur and len(db_vnfr.get("vdur", ())) == 1
1613 ): # If only one, this should be the target vdu
1614 vdur = db_vnfr["vdur"][0]
1615 if not vdur:
1616 raise LcmException(
1617 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1618 vnfr_id, vdu_id, vdu_index
1619 )
1620 )
1621 # New generation RO stores information at "vim_info"
1622 ng_ro_status = None
1623 target_vim = None
1624 if vdur.get("vim_info"):
1625 target_vim = next(
1626 t for t in vdur["vim_info"]
1627 ) # there should be only one key
1628 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1629 if (
1630 vdur.get("pdu-type")
1631 or vdur.get("status") == "ACTIVE"
1632 or ng_ro_status == "ACTIVE"
1633 ):
1634 ip_address = vdur.get("ip-address")
1635 if not ip_address:
1636 continue
1637 target_vdu_id = vdur["vdu-id-ref"]
1638 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1639 raise LcmException(
1640 "Cannot inject ssh-key because target VM is in error state"
1641 )
1642
1643 if not target_vdu_id:
1644 continue
1645
1646 # inject public key into machine
1647 if pub_key and user:
1648 self.logger.debug(logging_text + "Inserting RO key")
1649 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1650 if vdur.get("pdu-type"):
1651 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1652 return ip_address
1653 try:
1654 target = {
1655 "action": {
1656 "action": "inject_ssh_key",
1657 "key": pub_key,
1658 "user": user,
1659 },
1660 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1661 }
1662 desc = await self.RO.deploy(nsr_id, target)
1663 action_id = desc["action_id"]
1664 await self._wait_ng_ro(
1665 nsr_id, action_id, timeout=600, operation="instantiation"
1666 )
1667 break
1668 except NgRoException as e:
1669 raise LcmException(
1670 "Reaching max tries injecting key. Error: {}".format(e)
1671 )
1672 else:
1673 break
1674
1675 return ip_address
1676
1677 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1678 """
1679 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1680 """
1681 my_vca = vca_deployed_list[vca_index]
1682 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1683 # vdu or kdu: no dependencies
1684 return
1685 timeout = 300
1686 while timeout >= 0:
1687 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1688 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1689 configuration_status_list = db_nsr["configurationStatus"]
1690 for index, vca_deployed in enumerate(configuration_status_list):
1691 if index == vca_index:
1692 # myself
1693 continue
1694 if not my_vca.get("member-vnf-index") or (
1695 vca_deployed.get("member-vnf-index")
1696 == my_vca.get("member-vnf-index")
1697 ):
1698 internal_status = configuration_status_list[index].get("status")
1699 if internal_status == "READY":
1700 continue
1701 elif internal_status == "BROKEN":
1702 raise LcmException(
1703 "Configuration aborted because dependent charm/s has failed"
1704 )
1705 else:
1706 break
1707 else:
1708 # no dependencies, return
1709 return
1710 await asyncio.sleep(10)
1711 timeout -= 1
1712
1713 raise LcmException("Configuration aborted because dependent charm/s timeout")
1714
1715 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1716 vca_id = None
1717 if db_vnfr:
1718 vca_id = deep_get(db_vnfr, ("vca-id",))
1719 elif db_nsr:
1720 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1721 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1722 return vca_id
1723
1724 async def instantiate_N2VC(
1725 self,
1726 logging_text,
1727 vca_index,
1728 nsi_id,
1729 db_nsr,
1730 db_vnfr,
1731 vdu_id,
1732 kdu_name,
1733 vdu_index,
1734 kdu_index,
1735 config_descriptor,
1736 deploy_params,
1737 base_folder,
1738 nslcmop_id,
1739 stage,
1740 vca_type,
1741 vca_name,
1742 ee_config_descriptor,
1743 ):
1744 nsr_id = db_nsr["_id"]
1745 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1746 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1747 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1748 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1749 db_dict = {
1750 "collection": "nsrs",
1751 "filter": {"_id": nsr_id},
1752 "path": db_update_entry,
1753 }
1754 step = ""
1755 try:
1756
1757 element_type = "NS"
1758 element_under_configuration = nsr_id
1759
1760 vnfr_id = None
1761 if db_vnfr:
1762 vnfr_id = db_vnfr["_id"]
1763 osm_config["osm"]["vnf_id"] = vnfr_id
1764
1765 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1766
1767 if vca_type == "native_charm":
1768 index_number = 0
1769 else:
1770 index_number = vdu_index or 0
1771
1772 if vnfr_id:
1773 element_type = "VNF"
1774 element_under_configuration = vnfr_id
1775 namespace += ".{}-{}".format(vnfr_id, index_number)
1776 if vdu_id:
1777 namespace += ".{}-{}".format(vdu_id, index_number)
1778 element_type = "VDU"
1779 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1780 osm_config["osm"]["vdu_id"] = vdu_id
1781 elif kdu_name:
1782 namespace += ".{}".format(kdu_name)
1783 element_type = "KDU"
1784 element_under_configuration = kdu_name
1785 osm_config["osm"]["kdu_name"] = kdu_name
1786
1787 # Get artifact path
1788 if base_folder["pkg-dir"]:
1789 artifact_path = "{}/{}/{}/{}".format(
1790 base_folder["folder"],
1791 base_folder["pkg-dir"],
1792 "charms"
1793 if vca_type
1794 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1795 else "helm-charts",
1796 vca_name,
1797 )
1798 else:
1799 artifact_path = "{}/Scripts/{}/{}/".format(
1800 base_folder["folder"],
1801 "charms"
1802 if vca_type
1803 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1804 else "helm-charts",
1805 vca_name,
1806 )
1807
1808 self.logger.debug("Artifact path > {}".format(artifact_path))
1809
1810 # get initial_config_primitive_list that applies to this element
1811 initial_config_primitive_list = config_descriptor.get(
1812 "initial-config-primitive"
1813 )
1814
1815 self.logger.debug(
1816 "Initial config primitive list > {}".format(
1817 initial_config_primitive_list
1818 )
1819 )
1820
1821 # add config if not present for NS charm
1822 ee_descriptor_id = ee_config_descriptor.get("id")
1823 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1824 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1825 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1826 )
1827
1828 self.logger.debug(
1829 "Initial config primitive list #2 > {}".format(
1830 initial_config_primitive_list
1831 )
1832 )
1833 # n2vc_redesign STEP 3.1
1834 # find old ee_id if exists
1835 ee_id = vca_deployed.get("ee_id")
1836
1837 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1838 # create or register execution environment in VCA
1839 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1840
1841 self._write_configuration_status(
1842 nsr_id=nsr_id,
1843 vca_index=vca_index,
1844 status="CREATING",
1845 element_under_configuration=element_under_configuration,
1846 element_type=element_type,
1847 )
1848
1849 step = "create execution environment"
1850 self.logger.debug(logging_text + step)
1851
1852 ee_id = None
1853 credentials = None
1854 if vca_type == "k8s_proxy_charm":
1855 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1856 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1857 namespace=namespace,
1858 artifact_path=artifact_path,
1859 db_dict=db_dict,
1860 vca_id=vca_id,
1861 )
1862 elif vca_type == "helm" or vca_type == "helm-v3":
1863 ee_id, credentials = await self.vca_map[
1864 vca_type
1865 ].create_execution_environment(
1866 namespace=namespace,
1867 reuse_ee_id=ee_id,
1868 db_dict=db_dict,
1869 config=osm_config,
1870 artifact_path=artifact_path,
1871 chart_model=vca_name,
1872 vca_type=vca_type,
1873 )
1874 else:
1875 ee_id, credentials = await self.vca_map[
1876 vca_type
1877 ].create_execution_environment(
1878 namespace=namespace,
1879 reuse_ee_id=ee_id,
1880 db_dict=db_dict,
1881 vca_id=vca_id,
1882 )
1883
1884 elif vca_type == "native_charm":
1885 step = "Waiting to VM being up and getting IP address"
1886 self.logger.debug(logging_text + step)
1887 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1888 logging_text,
1889 nsr_id,
1890 vnfr_id,
1891 vdu_id,
1892 vdu_index,
1893 user=None,
1894 pub_key=None,
1895 )
1896 credentials = {"hostname": rw_mgmt_ip}
1897 # get username
1898 username = deep_get(
1899 config_descriptor, ("config-access", "ssh-access", "default-user")
1900 )
1901 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1902 # merged. Meanwhile let's get username from initial-config-primitive
1903 if not username and initial_config_primitive_list:
1904 for config_primitive in initial_config_primitive_list:
1905 for param in config_primitive.get("parameter", ()):
1906 if param["name"] == "ssh-username":
1907 username = param["value"]
1908 break
1909 if not username:
1910 raise LcmException(
1911 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1912 "'config-access.ssh-access.default-user'"
1913 )
1914 credentials["username"] = username
1915 # n2vc_redesign STEP 3.2
1916
1917 self._write_configuration_status(
1918 nsr_id=nsr_id,
1919 vca_index=vca_index,
1920 status="REGISTERING",
1921 element_under_configuration=element_under_configuration,
1922 element_type=element_type,
1923 )
1924
1925 step = "register execution environment {}".format(credentials)
1926 self.logger.debug(logging_text + step)
1927 ee_id = await self.vca_map[vca_type].register_execution_environment(
1928 credentials=credentials,
1929 namespace=namespace,
1930 db_dict=db_dict,
1931 vca_id=vca_id,
1932 )
1933
1934 # for compatibility with MON/POL modules, the need model and application name at database
1935 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1936 ee_id_parts = ee_id.split(".")
1937 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1938 if len(ee_id_parts) >= 2:
1939 model_name = ee_id_parts[0]
1940 application_name = ee_id_parts[1]
1941 db_nsr_update[db_update_entry + "model"] = model_name
1942 db_nsr_update[db_update_entry + "application"] = application_name
1943
1944 # n2vc_redesign STEP 3.3
1945 step = "Install configuration Software"
1946
1947 self._write_configuration_status(
1948 nsr_id=nsr_id,
1949 vca_index=vca_index,
1950 status="INSTALLING SW",
1951 element_under_configuration=element_under_configuration,
1952 element_type=element_type,
1953 other_update=db_nsr_update,
1954 )
1955
1956 # TODO check if already done
1957 self.logger.debug(logging_text + step)
1958 config = None
1959 if vca_type == "native_charm":
1960 config_primitive = next(
1961 (p for p in initial_config_primitive_list if p["name"] == "config"),
1962 None,
1963 )
1964 if config_primitive:
1965 config = self._map_primitive_params(
1966 config_primitive, {}, deploy_params
1967 )
1968 num_units = 1
1969 if vca_type == "lxc_proxy_charm":
1970 if element_type == "NS":
1971 num_units = db_nsr.get("config-units") or 1
1972 elif element_type == "VNF":
1973 num_units = db_vnfr.get("config-units") or 1
1974 elif element_type == "VDU":
1975 for v in db_vnfr["vdur"]:
1976 if vdu_id == v["vdu-id-ref"]:
1977 num_units = v.get("config-units") or 1
1978 break
1979 if vca_type != "k8s_proxy_charm":
1980 await self.vca_map[vca_type].install_configuration_sw(
1981 ee_id=ee_id,
1982 artifact_path=artifact_path,
1983 db_dict=db_dict,
1984 config=config,
1985 num_units=num_units,
1986 vca_id=vca_id,
1987 vca_type=vca_type,
1988 )
1989
1990 # write in db flag of configuration_sw already installed
1991 self.update_db_2(
1992 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1993 )
1994
1995 # add relations for this VCA (wait for other peers related with this VCA)
1996 is_relation_added = await self._add_vca_relations(
1997 logging_text=logging_text,
1998 nsr_id=nsr_id,
1999 vca_type=vca_type,
2000 vca_index=vca_index,
2001 )
2002
2003 if not is_relation_added:
2004 raise LcmException("Relations could not be added to VCA.")
2005
2006 # if SSH access is required, then get execution environment SSH public
2007 # if native charm we have waited already to VM be UP
2008 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2009 pub_key = None
2010 user = None
2011 # self.logger.debug("get ssh key block")
2012 if deep_get(
2013 config_descriptor, ("config-access", "ssh-access", "required")
2014 ):
2015 # self.logger.debug("ssh key needed")
2016 # Needed to inject a ssh key
2017 user = deep_get(
2018 config_descriptor,
2019 ("config-access", "ssh-access", "default-user"),
2020 )
2021 step = "Install configuration Software, getting public ssh key"
2022 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2023 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2024 )
2025
2026 step = "Insert public key into VM user={} ssh_key={}".format(
2027 user, pub_key
2028 )
2029 else:
2030 # self.logger.debug("no need to get ssh key")
2031 step = "Waiting to VM being up and getting IP address"
2032 self.logger.debug(logging_text + step)
2033
2034 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2035 rw_mgmt_ip = None
2036
2037 # n2vc_redesign STEP 5.1
2038 # wait for RO (ip-address) Insert pub_key into VM
2039 if vnfr_id:
2040 if kdu_name:
2041 rw_mgmt_ip, services = await self.wait_kdu_up(
2042 logging_text, nsr_id, vnfr_id, kdu_name
2043 )
2044 vnfd = self.db.get_one(
2045 "vnfds_revisions",
2046 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2047 )
2048 kdu = get_kdu(vnfd, kdu_name)
2049 kdu_services = [
2050 service["name"] for service in get_kdu_services(kdu)
2051 ]
2052 exposed_services = []
2053 for service in services:
2054 if any(s in service["name"] for s in kdu_services):
2055 exposed_services.append(service)
2056 await self.vca_map[vca_type].exec_primitive(
2057 ee_id=ee_id,
2058 primitive_name="config",
2059 params_dict={
2060 "osm-config": json.dumps(
2061 OsmConfigBuilder(
2062 k8s={"services": exposed_services}
2063 ).build()
2064 )
2065 },
2066 vca_id=vca_id,
2067 )
2068
2069 # This verification is needed in order to avoid trying to add a public key
2070 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2071 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2072 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2073 # or it is a KNF)
2074 elif db_vnfr.get("vdur"):
2075 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2076 logging_text,
2077 nsr_id,
2078 vnfr_id,
2079 vdu_id,
2080 vdu_index,
2081 user=user,
2082 pub_key=pub_key,
2083 )
2084
2085 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2086
2087 # store rw_mgmt_ip in deploy params for later replacement
2088 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2089
2090 # n2vc_redesign STEP 6 Execute initial config primitive
2091 step = "execute initial config primitive"
2092
2093 # wait for dependent primitives execution (NS -> VNF -> VDU)
2094 if initial_config_primitive_list:
2095 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2096
2097 # stage, in function of element type: vdu, kdu, vnf or ns
2098 my_vca = vca_deployed_list[vca_index]
2099 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2100 # VDU or KDU
2101 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2102 elif my_vca.get("member-vnf-index"):
2103 # VNF
2104 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2105 else:
2106 # NS
2107 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2108
2109 self._write_configuration_status(
2110 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2111 )
2112
2113 self._write_op_status(op_id=nslcmop_id, stage=stage)
2114
2115 check_if_terminated_needed = True
2116 for initial_config_primitive in initial_config_primitive_list:
2117 # adding information on the vca_deployed if it is a NS execution environment
2118 if not vca_deployed["member-vnf-index"]:
2119 deploy_params["ns_config_info"] = json.dumps(
2120 self._get_ns_config_info(nsr_id)
2121 )
2122 # TODO check if already done
2123 primitive_params_ = self._map_primitive_params(
2124 initial_config_primitive, {}, deploy_params
2125 )
2126
2127 step = "execute primitive '{}' params '{}'".format(
2128 initial_config_primitive["name"], primitive_params_
2129 )
2130 self.logger.debug(logging_text + step)
2131 await self.vca_map[vca_type].exec_primitive(
2132 ee_id=ee_id,
2133 primitive_name=initial_config_primitive["name"],
2134 params_dict=primitive_params_,
2135 db_dict=db_dict,
2136 vca_id=vca_id,
2137 vca_type=vca_type,
2138 )
2139 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2140 if check_if_terminated_needed:
2141 if config_descriptor.get("terminate-config-primitive"):
2142 self.update_db_2(
2143 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2144 )
2145 check_if_terminated_needed = False
2146
2147 # TODO register in database that primitive is done
2148
2149 # STEP 7 Configure metrics
2150 if vca_type == "helm" or vca_type == "helm-v3":
2151 # TODO: review for those cases where the helm chart is a reference and
2152 # is not part of the NF package
2153 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2154 ee_id=ee_id,
2155 artifact_path=artifact_path,
2156 ee_config_descriptor=ee_config_descriptor,
2157 vnfr_id=vnfr_id,
2158 nsr_id=nsr_id,
2159 target_ip=rw_mgmt_ip,
2160 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2161 vdu_id=vdu_id,
2162 vdu_index=vdu_index,
2163 kdu_name=kdu_name,
2164 kdu_index=kdu_index,
2165 )
2166 if prometheus_jobs:
2167 self.update_db_2(
2168 "nsrs",
2169 nsr_id,
2170 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2171 )
2172
2173 for job in prometheus_jobs:
2174 self.db.set_one(
2175 "prometheus_jobs",
2176 {"job_name": job["job_name"]},
2177 job,
2178 upsert=True,
2179 fail_on_empty=False,
2180 )
2181
2182 step = "instantiated at VCA"
2183 self.logger.debug(logging_text + step)
2184
2185 self._write_configuration_status(
2186 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2187 )
2188
2189 except Exception as e: # TODO not use Exception but N2VC exception
2190 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2191 if not isinstance(
2192 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2193 ):
2194 self.logger.error(
2195 "Exception while {} : {}".format(step, e), exc_info=True
2196 )
2197 self._write_configuration_status(
2198 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2199 )
2200 raise LcmException("{}. {}".format(step, e)) from e
2201
2202 def _write_ns_status(
2203 self,
2204 nsr_id: str,
2205 ns_state: str,
2206 current_operation: str,
2207 current_operation_id: str,
2208 error_description: str = None,
2209 error_detail: str = None,
2210 other_update: dict = None,
2211 ):
2212 """
2213 Update db_nsr fields.
2214 :param nsr_id:
2215 :param ns_state:
2216 :param current_operation:
2217 :param current_operation_id:
2218 :param error_description:
2219 :param error_detail:
2220 :param other_update: Other required changes at database if provided, will be cleared
2221 :return:
2222 """
2223 try:
2224 db_dict = other_update or {}
2225 db_dict[
2226 "_admin.nslcmop"
2227 ] = current_operation_id # for backward compatibility
2228 db_dict["_admin.current-operation"] = current_operation_id
2229 db_dict["_admin.operation-type"] = (
2230 current_operation if current_operation != "IDLE" else None
2231 )
2232 db_dict["currentOperation"] = current_operation
2233 db_dict["currentOperationID"] = current_operation_id
2234 db_dict["errorDescription"] = error_description
2235 db_dict["errorDetail"] = error_detail
2236
2237 if ns_state:
2238 db_dict["nsState"] = ns_state
2239 self.update_db_2("nsrs", nsr_id, db_dict)
2240 except DbException as e:
2241 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2242
2243 def _write_op_status(
2244 self,
2245 op_id: str,
2246 stage: list = None,
2247 error_message: str = None,
2248 queuePosition: int = 0,
2249 operation_state: str = None,
2250 other_update: dict = None,
2251 ):
2252 try:
2253 db_dict = other_update or {}
2254 db_dict["queuePosition"] = queuePosition
2255 if isinstance(stage, list):
2256 db_dict["stage"] = stage[0]
2257 db_dict["detailed-status"] = " ".join(stage)
2258 elif stage is not None:
2259 db_dict["stage"] = str(stage)
2260
2261 if error_message is not None:
2262 db_dict["errorMessage"] = error_message
2263 if operation_state is not None:
2264 db_dict["operationState"] = operation_state
2265 db_dict["statusEnteredTime"] = time()
2266 self.update_db_2("nslcmops", op_id, db_dict)
2267 except DbException as e:
2268 self.logger.warn(
2269 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2270 )
2271
2272 def _write_all_config_status(self, db_nsr: dict, status: str):
2273 try:
2274 nsr_id = db_nsr["_id"]
2275 # configurationStatus
2276 config_status = db_nsr.get("configurationStatus")
2277 if config_status:
2278 db_nsr_update = {
2279 "configurationStatus.{}.status".format(index): status
2280 for index, v in enumerate(config_status)
2281 if v
2282 }
2283 # update status
2284 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2285
2286 except DbException as e:
2287 self.logger.warn(
2288 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2289 )
2290
2291 def _write_configuration_status(
2292 self,
2293 nsr_id: str,
2294 vca_index: int,
2295 status: str = None,
2296 element_under_configuration: str = None,
2297 element_type: str = None,
2298 other_update: dict = None,
2299 ):
2300
2301 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2302 # .format(vca_index, status))
2303
2304 try:
2305 db_path = "configurationStatus.{}.".format(vca_index)
2306 db_dict = other_update or {}
2307 if status:
2308 db_dict[db_path + "status"] = status
2309 if element_under_configuration:
2310 db_dict[
2311 db_path + "elementUnderConfiguration"
2312 ] = element_under_configuration
2313 if element_type:
2314 db_dict[db_path + "elementType"] = element_type
2315 self.update_db_2("nsrs", nsr_id, db_dict)
2316 except DbException as e:
2317 self.logger.warn(
2318 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2319 status, nsr_id, vca_index, e
2320 )
2321 )
2322
2323 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2324 """
2325 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2326 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2327 Database is used because the result can be obtained from a different LCM worker in case of HA.
2328 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2329 :param db_nslcmop: database content of nslcmop
2330 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2331 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2332 computed 'vim-account-id'
2333 """
2334 modified = False
2335 nslcmop_id = db_nslcmop["_id"]
2336 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2337 if placement_engine == "PLA":
2338 self.logger.debug(
2339 logging_text + "Invoke and wait for placement optimization"
2340 )
2341 await self.msg.aiowrite(
2342 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2343 )
2344 db_poll_interval = 5
2345 wait = db_poll_interval * 10
2346 pla_result = None
2347 while not pla_result and wait >= 0:
2348 await asyncio.sleep(db_poll_interval)
2349 wait -= db_poll_interval
2350 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2351 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2352
2353 if not pla_result:
2354 raise LcmException(
2355 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2356 )
2357
2358 for pla_vnf in pla_result["vnf"]:
2359 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2360 if not pla_vnf.get("vimAccountId") or not vnfr:
2361 continue
2362 modified = True
2363 self.db.set_one(
2364 "vnfrs",
2365 {"_id": vnfr["_id"]},
2366 {"vim-account-id": pla_vnf["vimAccountId"]},
2367 )
2368 # Modifies db_vnfrs
2369 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2370 return modified
2371
2372 def update_nsrs_with_pla_result(self, params):
2373 try:
2374 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2375 self.update_db_2(
2376 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2377 )
2378 except Exception as e:
2379 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2380
2381 async def instantiate(self, nsr_id, nslcmop_id):
2382 """
2383
2384 :param nsr_id: ns instance to deploy
2385 :param nslcmop_id: operation to run
2386 :return:
2387 """
2388
2389 # Try to lock HA task here
2390 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2391 if not task_is_locked_by_me:
2392 self.logger.debug(
2393 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2394 )
2395 return
2396
2397 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2398 self.logger.debug(logging_text + "Enter")
2399
2400 # get all needed from database
2401
2402 # database nsrs record
2403 db_nsr = None
2404
2405 # database nslcmops record
2406 db_nslcmop = None
2407
2408 # update operation on nsrs
2409 db_nsr_update = {}
2410 # update operation on nslcmops
2411 db_nslcmop_update = {}
2412
2413 timeout_ns_deploy = self.timeout.ns_deploy
2414
2415 nslcmop_operation_state = None
2416 db_vnfrs = {} # vnf's info indexed by member-index
2417 # n2vc_info = {}
2418 tasks_dict_info = {} # from task to info text
2419 exc = None
2420 error_list = []
2421 stage = [
2422 "Stage 1/5: preparation of the environment.",
2423 "Waiting for previous operations to terminate.",
2424 "",
2425 ]
2426 # ^ stage, step, VIM progress
2427 try:
2428 # wait for any previous tasks in process
2429 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2430
2431 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2432 stage[1] = "Reading from database."
2433 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2434 db_nsr_update["detailed-status"] = "creating"
2435 db_nsr_update["operational-status"] = "init"
2436 self._write_ns_status(
2437 nsr_id=nsr_id,
2438 ns_state="BUILDING",
2439 current_operation="INSTANTIATING",
2440 current_operation_id=nslcmop_id,
2441 other_update=db_nsr_update,
2442 )
2443 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2444
2445 # read from db: operation
2446 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2447 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2448 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2449 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2450 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2451 )
2452 ns_params = db_nslcmop.get("operationParams")
2453 if ns_params and ns_params.get("timeout_ns_deploy"):
2454 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2455
2456 # read from db: ns
2457 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2458 self.logger.debug(logging_text + stage[1])
2459 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2460 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2461 self.logger.debug(logging_text + stage[1])
2462 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2463 self.fs.sync(db_nsr["nsd-id"])
2464 db_nsr["nsd"] = nsd
2465 # nsr_name = db_nsr["name"] # TODO short-name??
2466
2467 # read from db: vnf's of this ns
2468 stage[1] = "Getting vnfrs from db."
2469 self.logger.debug(logging_text + stage[1])
2470 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2471
2472 # read from db: vnfd's for every vnf
2473 db_vnfds = [] # every vnfd data
2474
2475 # for each vnf in ns, read vnfd
2476 for vnfr in db_vnfrs_list:
2477 if vnfr.get("kdur"):
2478 kdur_list = []
2479 for kdur in vnfr["kdur"]:
2480 if kdur.get("additionalParams"):
2481 kdur["additionalParams"] = json.loads(
2482 kdur["additionalParams"]
2483 )
2484 kdur_list.append(kdur)
2485 vnfr["kdur"] = kdur_list
2486
2487 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2488 vnfd_id = vnfr["vnfd-id"]
2489 vnfd_ref = vnfr["vnfd-ref"]
2490 self.fs.sync(vnfd_id)
2491
2492 # if we haven't this vnfd, read it from db
2493 if vnfd_id not in db_vnfds:
2494 # read from db
2495 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2496 vnfd_id, vnfd_ref
2497 )
2498 self.logger.debug(logging_text + stage[1])
2499 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2500
2501 # store vnfd
2502 db_vnfds.append(vnfd)
2503
2504 # Get or generates the _admin.deployed.VCA list
2505 vca_deployed_list = None
2506 if db_nsr["_admin"].get("deployed"):
2507 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2508 if vca_deployed_list is None:
2509 vca_deployed_list = []
2510 configuration_status_list = []
2511 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2512 db_nsr_update["configurationStatus"] = configuration_status_list
2513 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2514 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2515 elif isinstance(vca_deployed_list, dict):
2516 # maintain backward compatibility. Change a dict to list at database
2517 vca_deployed_list = list(vca_deployed_list.values())
2518 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2519 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2520
2521 if not isinstance(
2522 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2523 ):
2524 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2525 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2526
2527 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2528 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2529 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2530 self.db.set_list(
2531 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2532 )
2533
2534 # n2vc_redesign STEP 2 Deploy Network Scenario
2535 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2536 self._write_op_status(op_id=nslcmop_id, stage=stage)
2537
2538 stage[1] = "Deploying KDUs."
2539 # self.logger.debug(logging_text + "Before deploy_kdus")
2540 # Call to deploy_kdus in case exists the "vdu:kdu" param
2541 await self.deploy_kdus(
2542 logging_text=logging_text,
2543 nsr_id=nsr_id,
2544 nslcmop_id=nslcmop_id,
2545 db_vnfrs=db_vnfrs,
2546 db_vnfds=db_vnfds,
2547 task_instantiation_info=tasks_dict_info,
2548 )
2549
2550 stage[1] = "Getting VCA public key."
2551 # n2vc_redesign STEP 1 Get VCA public ssh-key
2552 # feature 1429. Add n2vc public key to needed VMs
2553 n2vc_key = self.n2vc.get_public_key()
2554 n2vc_key_list = [n2vc_key]
2555 if self.vca_config.public_key:
2556 n2vc_key_list.append(self.vca_config.public_key)
2557
2558 stage[1] = "Deploying NS at VIM."
2559 task_ro = asyncio.ensure_future(
2560 self.instantiate_RO(
2561 logging_text=logging_text,
2562 nsr_id=nsr_id,
2563 nsd=nsd,
2564 db_nsr=db_nsr,
2565 db_nslcmop=db_nslcmop,
2566 db_vnfrs=db_vnfrs,
2567 db_vnfds=db_vnfds,
2568 n2vc_key_list=n2vc_key_list,
2569 stage=stage,
2570 )
2571 )
2572 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2573 tasks_dict_info[task_ro] = "Deploying at VIM"
2574
2575 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2576 stage[1] = "Deploying Execution Environments."
2577 self.logger.debug(logging_text + stage[1])
2578
2579 # create namespace and certificate if any helm based EE is present in the NS
2580 if check_helm_ee_in_ns(db_vnfds):
2581 # TODO: create EE namespace
2582 # create TLS certificates
2583 await self.vca_map["helm-v3"].create_tls_certificate(
2584 secret_name="ee-tls-{}".format(nsr_id),
2585 dns_prefix="*",
2586 nsr_id=nsr_id,
2587 usage="server auth",
2588 )
2589
2590 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2591 for vnf_profile in get_vnf_profiles(nsd):
2592 vnfd_id = vnf_profile["vnfd-id"]
2593 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2594 member_vnf_index = str(vnf_profile["id"])
2595 db_vnfr = db_vnfrs[member_vnf_index]
2596 base_folder = vnfd["_admin"]["storage"]
2597 vdu_id = None
2598 vdu_index = 0
2599 vdu_name = None
2600 kdu_name = None
2601 kdu_index = None
2602
2603 # Get additional parameters
2604 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2605 if db_vnfr.get("additionalParamsForVnf"):
2606 deploy_params.update(
2607 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2608 )
2609
2610 descriptor_config = get_configuration(vnfd, vnfd["id"])
2611 if descriptor_config:
2612 self._deploy_n2vc(
2613 logging_text=logging_text
2614 + "member_vnf_index={} ".format(member_vnf_index),
2615 db_nsr=db_nsr,
2616 db_vnfr=db_vnfr,
2617 nslcmop_id=nslcmop_id,
2618 nsr_id=nsr_id,
2619 nsi_id=nsi_id,
2620 vnfd_id=vnfd_id,
2621 vdu_id=vdu_id,
2622 kdu_name=kdu_name,
2623 member_vnf_index=member_vnf_index,
2624 vdu_index=vdu_index,
2625 kdu_index=kdu_index,
2626 vdu_name=vdu_name,
2627 deploy_params=deploy_params,
2628 descriptor_config=descriptor_config,
2629 base_folder=base_folder,
2630 task_instantiation_info=tasks_dict_info,
2631 stage=stage,
2632 )
2633
2634 # Deploy charms for each VDU that supports one.
2635 for vdud in get_vdu_list(vnfd):
2636 vdu_id = vdud["id"]
2637 descriptor_config = get_configuration(vnfd, vdu_id)
2638 vdur = find_in_list(
2639 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2640 )
2641
2642 if vdur.get("additionalParams"):
2643 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2644 else:
2645 deploy_params_vdu = deploy_params
2646 deploy_params_vdu["OSM"] = get_osm_params(
2647 db_vnfr, vdu_id, vdu_count_index=0
2648 )
2649 vdud_count = get_number_of_instances(vnfd, vdu_id)
2650
2651 self.logger.debug("VDUD > {}".format(vdud))
2652 self.logger.debug(
2653 "Descriptor config > {}".format(descriptor_config)
2654 )
2655 if descriptor_config:
2656 vdu_name = None
2657 kdu_name = None
2658 kdu_index = None
2659 for vdu_index in range(vdud_count):
2660 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2661 self._deploy_n2vc(
2662 logging_text=logging_text
2663 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2664 member_vnf_index, vdu_id, vdu_index
2665 ),
2666 db_nsr=db_nsr,
2667 db_vnfr=db_vnfr,
2668 nslcmop_id=nslcmop_id,
2669 nsr_id=nsr_id,
2670 nsi_id=nsi_id,
2671 vnfd_id=vnfd_id,
2672 vdu_id=vdu_id,
2673 kdu_name=kdu_name,
2674 kdu_index=kdu_index,
2675 member_vnf_index=member_vnf_index,
2676 vdu_index=vdu_index,
2677 vdu_name=vdu_name,
2678 deploy_params=deploy_params_vdu,
2679 descriptor_config=descriptor_config,
2680 base_folder=base_folder,
2681 task_instantiation_info=tasks_dict_info,
2682 stage=stage,
2683 )
2684 for kdud in get_kdu_list(vnfd):
2685 kdu_name = kdud["name"]
2686 descriptor_config = get_configuration(vnfd, kdu_name)
2687 if descriptor_config:
2688 vdu_id = None
2689 vdu_index = 0
2690 vdu_name = None
2691 kdu_index, kdur = next(
2692 x
2693 for x in enumerate(db_vnfr["kdur"])
2694 if x[1]["kdu-name"] == kdu_name
2695 )
2696 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2697 if kdur.get("additionalParams"):
2698 deploy_params_kdu.update(
2699 parse_yaml_strings(kdur["additionalParams"].copy())
2700 )
2701
2702 self._deploy_n2vc(
2703 logging_text=logging_text,
2704 db_nsr=db_nsr,
2705 db_vnfr=db_vnfr,
2706 nslcmop_id=nslcmop_id,
2707 nsr_id=nsr_id,
2708 nsi_id=nsi_id,
2709 vnfd_id=vnfd_id,
2710 vdu_id=vdu_id,
2711 kdu_name=kdu_name,
2712 member_vnf_index=member_vnf_index,
2713 vdu_index=vdu_index,
2714 kdu_index=kdu_index,
2715 vdu_name=vdu_name,
2716 deploy_params=deploy_params_kdu,
2717 descriptor_config=descriptor_config,
2718 base_folder=base_folder,
2719 task_instantiation_info=tasks_dict_info,
2720 stage=stage,
2721 )
2722
2723 # Check if this NS has a charm configuration
2724 descriptor_config = nsd.get("ns-configuration")
2725 if descriptor_config and descriptor_config.get("juju"):
2726 vnfd_id = None
2727 db_vnfr = None
2728 member_vnf_index = None
2729 vdu_id = None
2730 kdu_name = None
2731 kdu_index = None
2732 vdu_index = 0
2733 vdu_name = None
2734
2735 # Get additional parameters
2736 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2737 if db_nsr.get("additionalParamsForNs"):
2738 deploy_params.update(
2739 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2740 )
2741 base_folder = nsd["_admin"]["storage"]
2742 self._deploy_n2vc(
2743 logging_text=logging_text,
2744 db_nsr=db_nsr,
2745 db_vnfr=db_vnfr,
2746 nslcmop_id=nslcmop_id,
2747 nsr_id=nsr_id,
2748 nsi_id=nsi_id,
2749 vnfd_id=vnfd_id,
2750 vdu_id=vdu_id,
2751 kdu_name=kdu_name,
2752 member_vnf_index=member_vnf_index,
2753 vdu_index=vdu_index,
2754 kdu_index=kdu_index,
2755 vdu_name=vdu_name,
2756 deploy_params=deploy_params,
2757 descriptor_config=descriptor_config,
2758 base_folder=base_folder,
2759 task_instantiation_info=tasks_dict_info,
2760 stage=stage,
2761 )
2762
2763 # rest of staff will be done at finally
2764
2765 except (
2766 ROclient.ROClientException,
2767 DbException,
2768 LcmException,
2769 N2VCException,
2770 ) as e:
2771 self.logger.error(
2772 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2773 )
2774 exc = e
2775 except asyncio.CancelledError:
2776 self.logger.error(
2777 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2778 )
2779 exc = "Operation was cancelled"
2780 except Exception as e:
2781 exc = traceback.format_exc()
2782 self.logger.critical(
2783 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2784 exc_info=True,
2785 )
2786 finally:
2787 if exc:
2788 error_list.append(str(exc))
2789 try:
2790 # wait for pending tasks
2791 if tasks_dict_info:
2792 stage[1] = "Waiting for instantiate pending tasks."
2793 self.logger.debug(logging_text + stage[1])
2794 error_list += await self._wait_for_tasks(
2795 logging_text,
2796 tasks_dict_info,
2797 timeout_ns_deploy,
2798 stage,
2799 nslcmop_id,
2800 nsr_id=nsr_id,
2801 )
2802 stage[1] = stage[2] = ""
2803 except asyncio.CancelledError:
2804 error_list.append("Cancelled")
2805 # TODO cancel all tasks
2806 except Exception as exc:
2807 error_list.append(str(exc))
2808
2809 # update operation-status
2810 db_nsr_update["operational-status"] = "running"
2811 # let's begin with VCA 'configured' status (later we can change it)
2812 db_nsr_update["config-status"] = "configured"
2813 for task, task_name in tasks_dict_info.items():
2814 if not task.done() or task.cancelled() or task.exception():
2815 if task_name.startswith(self.task_name_deploy_vca):
2816 # A N2VC task is pending
2817 db_nsr_update["config-status"] = "failed"
2818 else:
2819 # RO or KDU task is pending
2820 db_nsr_update["operational-status"] = "failed"
2821
2822 # update status at database
2823 if error_list:
2824 error_detail = ". ".join(error_list)
2825 self.logger.error(logging_text + error_detail)
2826 error_description_nslcmop = "{} Detail: {}".format(
2827 stage[0], error_detail
2828 )
2829 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2830 nslcmop_id, stage[0]
2831 )
2832
2833 db_nsr_update["detailed-status"] = (
2834 error_description_nsr + " Detail: " + error_detail
2835 )
2836 db_nslcmop_update["detailed-status"] = error_detail
2837 nslcmop_operation_state = "FAILED"
2838 ns_state = "BROKEN"
2839 else:
2840 error_detail = None
2841 error_description_nsr = error_description_nslcmop = None
2842 ns_state = "READY"
2843 db_nsr_update["detailed-status"] = "Done"
2844 db_nslcmop_update["detailed-status"] = "Done"
2845 nslcmop_operation_state = "COMPLETED"
2846
2847 if db_nsr:
2848 self._write_ns_status(
2849 nsr_id=nsr_id,
2850 ns_state=ns_state,
2851 current_operation="IDLE",
2852 current_operation_id=None,
2853 error_description=error_description_nsr,
2854 error_detail=error_detail,
2855 other_update=db_nsr_update,
2856 )
2857 self._write_op_status(
2858 op_id=nslcmop_id,
2859 stage="",
2860 error_message=error_description_nslcmop,
2861 operation_state=nslcmop_operation_state,
2862 other_update=db_nslcmop_update,
2863 )
2864
2865 if nslcmop_operation_state:
2866 try:
2867 await self.msg.aiowrite(
2868 "ns",
2869 "instantiated",
2870 {
2871 "nsr_id": nsr_id,
2872 "nslcmop_id": nslcmop_id,
2873 "operationState": nslcmop_operation_state,
2874 },
2875 loop=self.loop,
2876 )
2877 except Exception as e:
2878 self.logger.error(
2879 logging_text + "kafka_write notification Exception {}".format(e)
2880 )
2881
2882 self.logger.debug(logging_text + "Exit")
2883 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2884
2885 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2886 if vnfd_id not in cached_vnfds:
2887 cached_vnfds[vnfd_id] = self.db.get_one(
2888 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2889 )
2890 return cached_vnfds[vnfd_id]
2891
2892 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2893 if vnf_profile_id not in cached_vnfrs:
2894 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2895 "vnfrs",
2896 {
2897 "member-vnf-index-ref": vnf_profile_id,
2898 "nsr-id-ref": nsr_id,
2899 },
2900 )
2901 return cached_vnfrs[vnf_profile_id]
2902
2903 def _is_deployed_vca_in_relation(
2904 self, vca: DeployedVCA, relation: Relation
2905 ) -> bool:
2906 found = False
2907 for endpoint in (relation.provider, relation.requirer):
2908 if endpoint["kdu-resource-profile-id"]:
2909 continue
2910 found = (
2911 vca.vnf_profile_id == endpoint.vnf_profile_id
2912 and vca.vdu_profile_id == endpoint.vdu_profile_id
2913 and vca.execution_environment_ref == endpoint.execution_environment_ref
2914 )
2915 if found:
2916 break
2917 return found
2918
2919 def _update_ee_relation_data_with_implicit_data(
2920 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2921 ):
2922 ee_relation_data = safe_get_ee_relation(
2923 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2924 )
2925 ee_relation_level = EELevel.get_level(ee_relation_data)
2926 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2927 "execution-environment-ref"
2928 ]:
2929 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2930 vnfd_id = vnf_profile["vnfd-id"]
2931 project = nsd["_admin"]["projects_read"][0]
2932 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2933 entity_id = (
2934 vnfd_id
2935 if ee_relation_level == EELevel.VNF
2936 else ee_relation_data["vdu-profile-id"]
2937 )
2938 ee = get_juju_ee_ref(db_vnfd, entity_id)
2939 if not ee:
2940 raise Exception(
2941 f"not execution environments found for ee_relation {ee_relation_data}"
2942 )
2943 ee_relation_data["execution-environment-ref"] = ee["id"]
2944 return ee_relation_data
2945
2946 def _get_ns_relations(
2947 self,
2948 nsr_id: str,
2949 nsd: Dict[str, Any],
2950 vca: DeployedVCA,
2951 cached_vnfds: Dict[str, Any],
2952 ) -> List[Relation]:
2953 relations = []
2954 db_ns_relations = get_ns_configuration_relation_list(nsd)
2955 for r in db_ns_relations:
2956 provider_dict = None
2957 requirer_dict = None
2958 if all(key in r for key in ("provider", "requirer")):
2959 provider_dict = r["provider"]
2960 requirer_dict = r["requirer"]
2961 elif "entities" in r:
2962 provider_id = r["entities"][0]["id"]
2963 provider_dict = {
2964 "nsr-id": nsr_id,
2965 "endpoint": r["entities"][0]["endpoint"],
2966 }
2967 if provider_id != nsd["id"]:
2968 provider_dict["vnf-profile-id"] = provider_id
2969 requirer_id = r["entities"][1]["id"]
2970 requirer_dict = {
2971 "nsr-id": nsr_id,
2972 "endpoint": r["entities"][1]["endpoint"],
2973 }
2974 if requirer_id != nsd["id"]:
2975 requirer_dict["vnf-profile-id"] = requirer_id
2976 else:
2977 raise Exception(
2978 "provider/requirer or entities must be included in the relation."
2979 )
2980 relation_provider = self._update_ee_relation_data_with_implicit_data(
2981 nsr_id, nsd, provider_dict, cached_vnfds
2982 )
2983 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2984 nsr_id, nsd, requirer_dict, cached_vnfds
2985 )
2986 provider = EERelation(relation_provider)
2987 requirer = EERelation(relation_requirer)
2988 relation = Relation(r["name"], provider, requirer)
2989 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2990 if vca_in_relation:
2991 relations.append(relation)
2992 return relations
2993
2994 def _get_vnf_relations(
2995 self,
2996 nsr_id: str,
2997 nsd: Dict[str, Any],
2998 vca: DeployedVCA,
2999 cached_vnfds: Dict[str, Any],
3000 ) -> List[Relation]:
3001 relations = []
3002 if vca.target_element == "ns":
3003 self.logger.debug("VCA is a NS charm, not a VNF.")
3004 return relations
3005 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3006 vnf_profile_id = vnf_profile["id"]
3007 vnfd_id = vnf_profile["vnfd-id"]
3008 project = nsd["_admin"]["projects_read"][0]
3009 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3010 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3011 for r in db_vnf_relations:
3012 provider_dict = None
3013 requirer_dict = None
3014 if all(key in r for key in ("provider", "requirer")):
3015 provider_dict = r["provider"]
3016 requirer_dict = r["requirer"]
3017 elif "entities" in r:
3018 provider_id = r["entities"][0]["id"]
3019 provider_dict = {
3020 "nsr-id": nsr_id,
3021 "vnf-profile-id": vnf_profile_id,
3022 "endpoint": r["entities"][0]["endpoint"],
3023 }
3024 if provider_id != vnfd_id:
3025 provider_dict["vdu-profile-id"] = provider_id
3026 requirer_id = r["entities"][1]["id"]
3027 requirer_dict = {
3028 "nsr-id": nsr_id,
3029 "vnf-profile-id": vnf_profile_id,
3030 "endpoint": r["entities"][1]["endpoint"],
3031 }
3032 if requirer_id != vnfd_id:
3033 requirer_dict["vdu-profile-id"] = requirer_id
3034 else:
3035 raise Exception(
3036 "provider/requirer or entities must be included in the relation."
3037 )
3038 relation_provider = self._update_ee_relation_data_with_implicit_data(
3039 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3040 )
3041 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3042 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3043 )
3044 provider = EERelation(relation_provider)
3045 requirer = EERelation(relation_requirer)
3046 relation = Relation(r["name"], provider, requirer)
3047 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3048 if vca_in_relation:
3049 relations.append(relation)
3050 return relations
3051
3052 def _get_kdu_resource_data(
3053 self,
3054 ee_relation: EERelation,
3055 db_nsr: Dict[str, Any],
3056 cached_vnfds: Dict[str, Any],
3057 ) -> DeployedK8sResource:
3058 nsd = get_nsd(db_nsr)
3059 vnf_profiles = get_vnf_profiles(nsd)
3060 vnfd_id = find_in_list(
3061 vnf_profiles,
3062 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3063 )["vnfd-id"]
3064 project = nsd["_admin"]["projects_read"][0]
3065 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3066 kdu_resource_profile = get_kdu_resource_profile(
3067 db_vnfd, ee_relation.kdu_resource_profile_id
3068 )
3069 kdu_name = kdu_resource_profile["kdu-name"]
3070 deployed_kdu, _ = get_deployed_kdu(
3071 db_nsr.get("_admin", ()).get("deployed", ()),
3072 kdu_name,
3073 ee_relation.vnf_profile_id,
3074 )
3075 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3076 return deployed_kdu
3077
3078 def _get_deployed_component(
3079 self,
3080 ee_relation: EERelation,
3081 db_nsr: Dict[str, Any],
3082 cached_vnfds: Dict[str, Any],
3083 ) -> DeployedComponent:
3084 nsr_id = db_nsr["_id"]
3085 deployed_component = None
3086 ee_level = EELevel.get_level(ee_relation)
3087 if ee_level == EELevel.NS:
3088 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3089 if vca:
3090 deployed_component = DeployedVCA(nsr_id, vca)
3091 elif ee_level == EELevel.VNF:
3092 vca = get_deployed_vca(
3093 db_nsr,
3094 {
3095 "vdu_id": None,
3096 "member-vnf-index": ee_relation.vnf_profile_id,
3097 "ee_descriptor_id": ee_relation.execution_environment_ref,
3098 },
3099 )
3100 if vca:
3101 deployed_component = DeployedVCA(nsr_id, vca)
3102 elif ee_level == EELevel.VDU:
3103 vca = get_deployed_vca(
3104 db_nsr,
3105 {
3106 "vdu_id": ee_relation.vdu_profile_id,
3107 "member-vnf-index": ee_relation.vnf_profile_id,
3108 "ee_descriptor_id": ee_relation.execution_environment_ref,
3109 },
3110 )
3111 if vca:
3112 deployed_component = DeployedVCA(nsr_id, vca)
3113 elif ee_level == EELevel.KDU:
3114 kdu_resource_data = self._get_kdu_resource_data(
3115 ee_relation, db_nsr, cached_vnfds
3116 )
3117 if kdu_resource_data:
3118 deployed_component = DeployedK8sResource(kdu_resource_data)
3119 return deployed_component
3120
3121 async def _add_relation(
3122 self,
3123 relation: Relation,
3124 vca_type: str,
3125 db_nsr: Dict[str, Any],
3126 cached_vnfds: Dict[str, Any],
3127 cached_vnfrs: Dict[str, Any],
3128 ) -> bool:
3129 deployed_provider = self._get_deployed_component(
3130 relation.provider, db_nsr, cached_vnfds
3131 )
3132 deployed_requirer = self._get_deployed_component(
3133 relation.requirer, db_nsr, cached_vnfds
3134 )
3135 if (
3136 deployed_provider
3137 and deployed_requirer
3138 and deployed_provider.config_sw_installed
3139 and deployed_requirer.config_sw_installed
3140 ):
3141 provider_db_vnfr = (
3142 self._get_vnfr(
3143 relation.provider.nsr_id,
3144 relation.provider.vnf_profile_id,
3145 cached_vnfrs,
3146 )
3147 if relation.provider.vnf_profile_id
3148 else None
3149 )
3150 requirer_db_vnfr = (
3151 self._get_vnfr(
3152 relation.requirer.nsr_id,
3153 relation.requirer.vnf_profile_id,
3154 cached_vnfrs,
3155 )
3156 if relation.requirer.vnf_profile_id
3157 else None
3158 )
3159 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3160 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3161 provider_relation_endpoint = RelationEndpoint(
3162 deployed_provider.ee_id,
3163 provider_vca_id,
3164 relation.provider.endpoint,
3165 )
3166 requirer_relation_endpoint = RelationEndpoint(
3167 deployed_requirer.ee_id,
3168 requirer_vca_id,
3169 relation.requirer.endpoint,
3170 )
3171 try:
3172 await self.vca_map[vca_type].add_relation(
3173 provider=provider_relation_endpoint,
3174 requirer=requirer_relation_endpoint,
3175 )
3176 except N2VCException as exception:
3177 self.logger.error(exception)
3178 raise LcmException(exception)
3179 return True
3180 return False
3181
3182 async def _add_vca_relations(
3183 self,
3184 logging_text,
3185 nsr_id,
3186 vca_type: str,
3187 vca_index: int,
3188 timeout: int = 3600,
3189 ) -> bool:
3190
3191 # steps:
3192 # 1. find all relations for this VCA
3193 # 2. wait for other peers related
3194 # 3. add relations
3195
3196 try:
3197 # STEP 1: find all relations for this VCA
3198
3199 # read nsr record
3200 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3201 nsd = get_nsd(db_nsr)
3202
3203 # this VCA data
3204 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3205 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3206
3207 cached_vnfds = {}
3208 cached_vnfrs = {}
3209 relations = []
3210 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3211 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3212
3213 # if no relations, terminate
3214 if not relations:
3215 self.logger.debug(logging_text + " No relations")
3216 return True
3217
3218 self.logger.debug(logging_text + " adding relations {}".format(relations))
3219
3220 # add all relations
3221 start = time()
3222 while True:
3223 # check timeout
3224 now = time()
3225 if now - start >= timeout:
3226 self.logger.error(logging_text + " : timeout adding relations")
3227 return False
3228
3229 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3230 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3231
3232 # for each relation, find the VCA's related
3233 for relation in relations.copy():
3234 added = await self._add_relation(
3235 relation,
3236 vca_type,
3237 db_nsr,
3238 cached_vnfds,
3239 cached_vnfrs,
3240 )
3241 if added:
3242 relations.remove(relation)
3243
3244 if not relations:
3245 self.logger.debug("Relations added")
3246 break
3247 await asyncio.sleep(5.0)
3248
3249 return True
3250
3251 except Exception as e:
3252 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3253 return False
3254
3255 async def _install_kdu(
3256 self,
3257 nsr_id: str,
3258 nsr_db_path: str,
3259 vnfr_data: dict,
3260 kdu_index: int,
3261 kdud: dict,
3262 vnfd: dict,
3263 k8s_instance_info: dict,
3264 k8params: dict = None,
3265 timeout: int = 600,
3266 vca_id: str = None,
3267 ):
3268
3269 try:
3270 k8sclustertype = k8s_instance_info["k8scluster-type"]
3271 # Instantiate kdu
3272 db_dict_install = {
3273 "collection": "nsrs",
3274 "filter": {"_id": nsr_id},
3275 "path": nsr_db_path,
3276 }
3277
3278 if k8s_instance_info.get("kdu-deployment-name"):
3279 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3280 else:
3281 kdu_instance = self.k8scluster_map[
3282 k8sclustertype
3283 ].generate_kdu_instance_name(
3284 db_dict=db_dict_install,
3285 kdu_model=k8s_instance_info["kdu-model"],
3286 kdu_name=k8s_instance_info["kdu-name"],
3287 )
3288
3289 # Update the nsrs table with the kdu-instance value
3290 self.update_db_2(
3291 item="nsrs",
3292 _id=nsr_id,
3293 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3294 )
3295
3296 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3297 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3298 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3299 # namespace, this first verification could be removed, and the next step would be done for any kind
3300 # of KNF.
3301 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3302 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3303 if k8sclustertype in ("juju", "juju-bundle"):
3304 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3305 # that the user passed a namespace which he wants its KDU to be deployed in)
3306 if (
3307 self.db.count(
3308 table="nsrs",
3309 q_filter={
3310 "_id": nsr_id,
3311 "_admin.projects_write": k8s_instance_info["namespace"],
3312 "_admin.projects_read": k8s_instance_info["namespace"],
3313 },
3314 )
3315 > 0
3316 ):
3317 self.logger.debug(
3318 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3319 )
3320 self.update_db_2(
3321 item="nsrs",
3322 _id=nsr_id,
3323 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3324 )
3325 k8s_instance_info["namespace"] = kdu_instance
3326
3327 await self.k8scluster_map[k8sclustertype].install(
3328 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3329 kdu_model=k8s_instance_info["kdu-model"],
3330 atomic=True,
3331 params=k8params,
3332 db_dict=db_dict_install,
3333 timeout=timeout,
3334 kdu_name=k8s_instance_info["kdu-name"],
3335 namespace=k8s_instance_info["namespace"],
3336 kdu_instance=kdu_instance,
3337 vca_id=vca_id,
3338 )
3339
3340 # Obtain services to obtain management service ip
3341 services = await self.k8scluster_map[k8sclustertype].get_services(
3342 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3343 kdu_instance=kdu_instance,
3344 namespace=k8s_instance_info["namespace"],
3345 )
3346
3347 # Obtain management service info (if exists)
3348 vnfr_update_dict = {}
3349 kdu_config = get_configuration(vnfd, kdud["name"])
3350 if kdu_config:
3351 target_ee_list = kdu_config.get("execution-environment-list", [])
3352 else:
3353 target_ee_list = []
3354
3355 if services:
3356 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3357 mgmt_services = [
3358 service
3359 for service in kdud.get("service", [])
3360 if service.get("mgmt-service")
3361 ]
3362 for mgmt_service in mgmt_services:
3363 for service in services:
3364 if service["name"].startswith(mgmt_service["name"]):
3365 # Mgmt service found, Obtain service ip
3366 ip = service.get("external_ip", service.get("cluster_ip"))
3367 if isinstance(ip, list) and len(ip) == 1:
3368 ip = ip[0]
3369
3370 vnfr_update_dict[
3371 "kdur.{}.ip-address".format(kdu_index)
3372 ] = ip
3373
3374 # Check if must update also mgmt ip at the vnf
3375 service_external_cp = mgmt_service.get(
3376 "external-connection-point-ref"
3377 )
3378 if service_external_cp:
3379 if (
3380 deep_get(vnfd, ("mgmt-interface", "cp"))
3381 == service_external_cp
3382 ):
3383 vnfr_update_dict["ip-address"] = ip
3384
3385 if find_in_list(
3386 target_ee_list,
3387 lambda ee: ee.get(
3388 "external-connection-point-ref", ""
3389 )
3390 == service_external_cp,
3391 ):
3392 vnfr_update_dict[
3393 "kdur.{}.ip-address".format(kdu_index)
3394 ] = ip
3395 break
3396 else:
3397 self.logger.warn(
3398 "Mgmt service name: {} not found".format(
3399 mgmt_service["name"]
3400 )
3401 )
3402
3403 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3404 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3405
3406 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3407 if (
3408 kdu_config
3409 and kdu_config.get("initial-config-primitive")
3410 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3411 ):
3412 initial_config_primitive_list = kdu_config.get(
3413 "initial-config-primitive"
3414 )
3415 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3416
3417 for initial_config_primitive in initial_config_primitive_list:
3418 primitive_params_ = self._map_primitive_params(
3419 initial_config_primitive, {}, {}
3420 )
3421
3422 await asyncio.wait_for(
3423 self.k8scluster_map[k8sclustertype].exec_primitive(
3424 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3425 kdu_instance=kdu_instance,
3426 primitive_name=initial_config_primitive["name"],
3427 params=primitive_params_,
3428 db_dict=db_dict_install,
3429 vca_id=vca_id,
3430 ),
3431 timeout=timeout,
3432 )
3433
3434 except Exception as e:
3435 # Prepare update db with error and raise exception
3436 try:
3437 self.update_db_2(
3438 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3439 )
3440 self.update_db_2(
3441 "vnfrs",
3442 vnfr_data.get("_id"),
3443 {"kdur.{}.status".format(kdu_index): "ERROR"},
3444 )
3445 except Exception:
3446 # ignore to keep original exception
3447 pass
3448 # reraise original error
3449 raise
3450
3451 return kdu_instance
3452
3453 async def deploy_kdus(
3454 self,
3455 logging_text,
3456 nsr_id,
3457 nslcmop_id,
3458 db_vnfrs,
3459 db_vnfds,
3460 task_instantiation_info,
3461 ):
3462 # Launch kdus if present in the descriptor
3463
3464 k8scluster_id_2_uuic = {
3465 "helm-chart-v3": {},
3466 "helm-chart": {},
3467 "juju-bundle": {},
3468 }
3469
3470 async def _get_cluster_id(cluster_id, cluster_type):
3471 nonlocal k8scluster_id_2_uuic
3472 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3473 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3474
3475 # check if K8scluster is creating and wait look if previous tasks in process
3476 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3477 "k8scluster", cluster_id
3478 )
3479 if task_dependency:
3480 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3481 task_name, cluster_id
3482 )
3483 self.logger.debug(logging_text + text)
3484 await asyncio.wait(task_dependency, timeout=3600)
3485
3486 db_k8scluster = self.db.get_one(
3487 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3488 )
3489 if not db_k8scluster:
3490 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3491
3492 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3493 if not k8s_id:
3494 if cluster_type == "helm-chart-v3":
3495 try:
3496 # backward compatibility for existing clusters that have not been initialized for helm v3
3497 k8s_credentials = yaml.safe_dump(
3498 db_k8scluster.get("credentials")
3499 )
3500 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3501 k8s_credentials, reuse_cluster_uuid=cluster_id
3502 )
3503 db_k8scluster_update = {}
3504 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3505 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3506 db_k8scluster_update[
3507 "_admin.helm-chart-v3.created"
3508 ] = uninstall_sw
3509 db_k8scluster_update[
3510 "_admin.helm-chart-v3.operationalState"
3511 ] = "ENABLED"
3512 self.update_db_2(
3513 "k8sclusters", cluster_id, db_k8scluster_update
3514 )
3515 except Exception as e:
3516 self.logger.error(
3517 logging_text
3518 + "error initializing helm-v3 cluster: {}".format(str(e))
3519 )
3520 raise LcmException(
3521 "K8s cluster '{}' has not been initialized for '{}'".format(
3522 cluster_id, cluster_type
3523 )
3524 )
3525 else:
3526 raise LcmException(
3527 "K8s cluster '{}' has not been initialized for '{}'".format(
3528 cluster_id, cluster_type
3529 )
3530 )
3531 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3532 return k8s_id
3533
3534 logging_text += "Deploy kdus: "
3535 step = ""
3536 try:
3537 db_nsr_update = {"_admin.deployed.K8s": []}
3538 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3539
3540 index = 0
3541 updated_cluster_list = []
3542 updated_v3_cluster_list = []
3543
3544 for vnfr_data in db_vnfrs.values():
3545 vca_id = self.get_vca_id(vnfr_data, {})
3546 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3547 # Step 0: Prepare and set parameters
3548 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3549 vnfd_id = vnfr_data.get("vnfd-id")
3550 vnfd_with_id = find_in_list(
3551 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3552 )
3553 kdud = next(
3554 kdud
3555 for kdud in vnfd_with_id["kdu"]
3556 if kdud["name"] == kdur["kdu-name"]
3557 )
3558 namespace = kdur.get("k8s-namespace")
3559 kdu_deployment_name = kdur.get("kdu-deployment-name")
3560 if kdur.get("helm-chart"):
3561 kdumodel = kdur["helm-chart"]
3562 # Default version: helm3, if helm-version is v2 assign v2
3563 k8sclustertype = "helm-chart-v3"
3564 self.logger.debug("kdur: {}".format(kdur))
3565 if (
3566 kdur.get("helm-version")
3567 and kdur.get("helm-version") == "v2"
3568 ):
3569 k8sclustertype = "helm-chart"
3570 elif kdur.get("juju-bundle"):
3571 kdumodel = kdur["juju-bundle"]
3572 k8sclustertype = "juju-bundle"
3573 else:
3574 raise LcmException(
3575 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3576 "juju-bundle. Maybe an old NBI version is running".format(
3577 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3578 )
3579 )
3580 # check if kdumodel is a file and exists
3581 try:
3582 vnfd_with_id = find_in_list(
3583 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3584 )
3585 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3586 if storage: # may be not present if vnfd has not artifacts
3587 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3588 if storage["pkg-dir"]:
3589 filename = "{}/{}/{}s/{}".format(
3590 storage["folder"],
3591 storage["pkg-dir"],
3592 k8sclustertype,
3593 kdumodel,
3594 )
3595 else:
3596 filename = "{}/Scripts/{}s/{}".format(
3597 storage["folder"],
3598 k8sclustertype,
3599 kdumodel,
3600 )
3601 if self.fs.file_exists(
3602 filename, mode="file"
3603 ) or self.fs.file_exists(filename, mode="dir"):
3604 kdumodel = self.fs.path + filename
3605 except (asyncio.TimeoutError, asyncio.CancelledError):
3606 raise
3607 except Exception: # it is not a file
3608 pass
3609
3610 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3611 step = "Synchronize repos for k8s cluster '{}'".format(
3612 k8s_cluster_id
3613 )
3614 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3615
3616 # Synchronize repos
3617 if (
3618 k8sclustertype == "helm-chart"
3619 and cluster_uuid not in updated_cluster_list
3620 ) or (
3621 k8sclustertype == "helm-chart-v3"
3622 and cluster_uuid not in updated_v3_cluster_list
3623 ):
3624 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3625 self.k8scluster_map[k8sclustertype].synchronize_repos(
3626 cluster_uuid=cluster_uuid
3627 )
3628 )
3629 if del_repo_list or added_repo_dict:
3630 if k8sclustertype == "helm-chart":
3631 unset = {
3632 "_admin.helm_charts_added." + item: None
3633 for item in del_repo_list
3634 }
3635 updated = {
3636 "_admin.helm_charts_added." + item: name
3637 for item, name in added_repo_dict.items()
3638 }
3639 updated_cluster_list.append(cluster_uuid)
3640 elif k8sclustertype == "helm-chart-v3":
3641 unset = {
3642 "_admin.helm_charts_v3_added." + item: None
3643 for item in del_repo_list
3644 }
3645 updated = {
3646 "_admin.helm_charts_v3_added." + item: name
3647 for item, name in added_repo_dict.items()
3648 }
3649 updated_v3_cluster_list.append(cluster_uuid)
3650 self.logger.debug(
3651 logging_text + "repos synchronized on k8s cluster "
3652 "'{}' to_delete: {}, to_add: {}".format(
3653 k8s_cluster_id, del_repo_list, added_repo_dict
3654 )
3655 )
3656 self.db.set_one(
3657 "k8sclusters",
3658 {"_id": k8s_cluster_id},
3659 updated,
3660 unset=unset,
3661 )
3662
3663 # Instantiate kdu
3664 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3665 vnfr_data["member-vnf-index-ref"],
3666 kdur["kdu-name"],
3667 k8s_cluster_id,
3668 )
3669 k8s_instance_info = {
3670 "kdu-instance": None,
3671 "k8scluster-uuid": cluster_uuid,
3672 "k8scluster-type": k8sclustertype,
3673 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3674 "kdu-name": kdur["kdu-name"],
3675 "kdu-model": kdumodel,
3676 "namespace": namespace,
3677 "kdu-deployment-name": kdu_deployment_name,
3678 }
3679 db_path = "_admin.deployed.K8s.{}".format(index)
3680 db_nsr_update[db_path] = k8s_instance_info
3681 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3682 vnfd_with_id = find_in_list(
3683 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3684 )
3685 task = asyncio.ensure_future(
3686 self._install_kdu(
3687 nsr_id,
3688 db_path,
3689 vnfr_data,
3690 kdu_index,
3691 kdud,
3692 vnfd_with_id,
3693 k8s_instance_info,
3694 k8params=desc_params,
3695 timeout=1800,
3696 vca_id=vca_id,
3697 )
3698 )
3699 self.lcm_tasks.register(
3700 "ns",
3701 nsr_id,
3702 nslcmop_id,
3703 "instantiate_KDU-{}".format(index),
3704 task,
3705 )
3706 task_instantiation_info[task] = "Deploying KDU {}".format(
3707 kdur["kdu-name"]
3708 )
3709
3710 index += 1
3711
3712 except (LcmException, asyncio.CancelledError):
3713 raise
3714 except Exception as e:
3715 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3716 if isinstance(e, (N2VCException, DbException)):
3717 self.logger.error(logging_text + msg)
3718 else:
3719 self.logger.critical(logging_text + msg, exc_info=True)
3720 raise LcmException(msg)
3721 finally:
3722 if db_nsr_update:
3723 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3724
3725 def _deploy_n2vc(
3726 self,
3727 logging_text,
3728 db_nsr,
3729 db_vnfr,
3730 nslcmop_id,
3731 nsr_id,
3732 nsi_id,
3733 vnfd_id,
3734 vdu_id,
3735 kdu_name,
3736 member_vnf_index,
3737 vdu_index,
3738 kdu_index,
3739 vdu_name,
3740 deploy_params,
3741 descriptor_config,
3742 base_folder,
3743 task_instantiation_info,
3744 stage,
3745 ):
3746 # launch instantiate_N2VC in a asyncio task and register task object
3747 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3748 # if not found, create one entry and update database
3749 # fill db_nsr._admin.deployed.VCA.<index>
3750
3751 self.logger.debug(
3752 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3753 )
3754
3755 charm_name = ""
3756 get_charm_name = False
3757 if "execution-environment-list" in descriptor_config:
3758 ee_list = descriptor_config.get("execution-environment-list", [])
3759 elif "juju" in descriptor_config:
3760 ee_list = [descriptor_config] # ns charms
3761 if "execution-environment-list" not in descriptor_config:
3762 # charm name is only required for ns charms
3763 get_charm_name = True
3764 else: # other types as script are not supported
3765 ee_list = []
3766
3767 for ee_item in ee_list:
3768 self.logger.debug(
3769 logging_text
3770 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3771 ee_item.get("juju"), ee_item.get("helm-chart")
3772 )
3773 )
3774 ee_descriptor_id = ee_item.get("id")
3775 if ee_item.get("juju"):
3776 vca_name = ee_item["juju"].get("charm")
3777 if get_charm_name:
3778 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3779 vca_type = (
3780 "lxc_proxy_charm"
3781 if ee_item["juju"].get("charm") is not None
3782 else "native_charm"
3783 )
3784 if ee_item["juju"].get("cloud") == "k8s":
3785 vca_type = "k8s_proxy_charm"
3786 elif ee_item["juju"].get("proxy") is False:
3787 vca_type = "native_charm"
3788 elif ee_item.get("helm-chart"):
3789 vca_name = ee_item["helm-chart"]
3790 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3791 vca_type = "helm"
3792 else:
3793 vca_type = "helm-v3"
3794 else:
3795 self.logger.debug(
3796 logging_text + "skipping non juju neither charm configuration"
3797 )
3798 continue
3799
3800 vca_index = -1
3801 for vca_index, vca_deployed in enumerate(
3802 db_nsr["_admin"]["deployed"]["VCA"]
3803 ):
3804 if not vca_deployed:
3805 continue
3806 if (
3807 vca_deployed.get("member-vnf-index") == member_vnf_index
3808 and vca_deployed.get("vdu_id") == vdu_id
3809 and vca_deployed.get("kdu_name") == kdu_name
3810 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3811 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3812 ):
3813 break
3814 else:
3815 # not found, create one.
3816 target = (
3817 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3818 )
3819 if vdu_id:
3820 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3821 elif kdu_name:
3822 target += "/kdu/{}".format(kdu_name)
3823 vca_deployed = {
3824 "target_element": target,
3825 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3826 "member-vnf-index": member_vnf_index,
3827 "vdu_id": vdu_id,
3828 "kdu_name": kdu_name,
3829 "vdu_count_index": vdu_index,
3830 "operational-status": "init", # TODO revise
3831 "detailed-status": "", # TODO revise
3832 "step": "initial-deploy", # TODO revise
3833 "vnfd_id": vnfd_id,
3834 "vdu_name": vdu_name,
3835 "type": vca_type,
3836 "ee_descriptor_id": ee_descriptor_id,
3837 "charm_name": charm_name,
3838 }
3839 vca_index += 1
3840
3841 # create VCA and configurationStatus in db
3842 db_dict = {
3843 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3844 "configurationStatus.{}".format(vca_index): dict(),
3845 }
3846 self.update_db_2("nsrs", nsr_id, db_dict)
3847
3848 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3849
3850 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3851 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3852 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3853
3854 # Launch task
3855 task_n2vc = asyncio.ensure_future(
3856 self.instantiate_N2VC(
3857 logging_text=logging_text,
3858 vca_index=vca_index,
3859 nsi_id=nsi_id,
3860 db_nsr=db_nsr,
3861 db_vnfr=db_vnfr,
3862 vdu_id=vdu_id,
3863 kdu_name=kdu_name,
3864 vdu_index=vdu_index,
3865 kdu_index=kdu_index,
3866 deploy_params=deploy_params,
3867 config_descriptor=descriptor_config,
3868 base_folder=base_folder,
3869 nslcmop_id=nslcmop_id,
3870 stage=stage,
3871 vca_type=vca_type,
3872 vca_name=vca_name,
3873 ee_config_descriptor=ee_item,
3874 )
3875 )
3876 self.lcm_tasks.register(
3877 "ns",
3878 nsr_id,
3879 nslcmop_id,
3880 "instantiate_N2VC-{}".format(vca_index),
3881 task_n2vc,
3882 )
3883 task_instantiation_info[
3884 task_n2vc
3885 ] = self.task_name_deploy_vca + " {}.{}".format(
3886 member_vnf_index or "", vdu_id or ""
3887 )
3888
3889 @staticmethod
3890 def _create_nslcmop(nsr_id, operation, params):
3891 """
3892 Creates a ns-lcm-opp content to be stored at database.
3893 :param nsr_id: internal id of the instance
3894 :param operation: instantiate, terminate, scale, action, ...
3895 :param params: user parameters for the operation
3896 :return: dictionary following SOL005 format
3897 """
3898 # Raise exception if invalid arguments
3899 if not (nsr_id and operation and params):
3900 raise LcmException(
3901 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3902 )
3903 now = time()
3904 _id = str(uuid4())
3905 nslcmop = {
3906 "id": _id,
3907 "_id": _id,
3908 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3909 "operationState": "PROCESSING",
3910 "statusEnteredTime": now,
3911 "nsInstanceId": nsr_id,
3912 "lcmOperationType": operation,
3913 "startTime": now,
3914 "isAutomaticInvocation": False,
3915 "operationParams": params,
3916 "isCancelPending": False,
3917 "links": {
3918 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3919 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3920 },
3921 }
3922 return nslcmop
3923
3924 def _format_additional_params(self, params):
3925 params = params or {}
3926 for key, value in params.items():
3927 if str(value).startswith("!!yaml "):
3928 params[key] = yaml.safe_load(value[7:])
3929 return params
3930
3931 def _get_terminate_primitive_params(self, seq, vnf_index):
3932 primitive = seq.get("name")
3933 primitive_params = {}
3934 params = {
3935 "member_vnf_index": vnf_index,
3936 "primitive": primitive,
3937 "primitive_params": primitive_params,
3938 }
3939 desc_params = {}
3940 return self._map_primitive_params(seq, params, desc_params)
3941
3942 # sub-operations
3943
3944 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3945 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3946 if op.get("operationState") == "COMPLETED":
3947 # b. Skip sub-operation
3948 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3949 return self.SUBOPERATION_STATUS_SKIP
3950 else:
3951 # c. retry executing sub-operation
3952 # The sub-operation exists, and operationState != 'COMPLETED'
3953 # Update operationState = 'PROCESSING' to indicate a retry.
3954 operationState = "PROCESSING"
3955 detailed_status = "In progress"
3956 self._update_suboperation_status(
3957 db_nslcmop, op_index, operationState, detailed_status
3958 )
3959 # Return the sub-operation index
3960 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3961 # with arguments extracted from the sub-operation
3962 return op_index
3963
3964 # Find a sub-operation where all keys in a matching dictionary must match
3965 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3966 def _find_suboperation(self, db_nslcmop, match):
3967 if db_nslcmop and match:
3968 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3969 for i, op in enumerate(op_list):
3970 if all(op.get(k) == match[k] for k in match):
3971 return i
3972 return self.SUBOPERATION_STATUS_NOT_FOUND
3973
3974 # Update status for a sub-operation given its index
3975 def _update_suboperation_status(
3976 self, db_nslcmop, op_index, operationState, detailed_status
3977 ):
3978 # Update DB for HA tasks
3979 q_filter = {"_id": db_nslcmop["_id"]}
3980 update_dict = {
3981 "_admin.operations.{}.operationState".format(op_index): operationState,
3982 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3983 }
3984 self.db.set_one(
3985 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3986 )
3987
3988 # Add sub-operation, return the index of the added sub-operation
3989 # Optionally, set operationState, detailed-status, and operationType
3990 # Status and type are currently set for 'scale' sub-operations:
3991 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3992 # 'detailed-status' : status message
3993 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3994 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3995 def _add_suboperation(
3996 self,
3997 db_nslcmop,
3998 vnf_index,
3999 vdu_id,
4000 vdu_count_index,
4001 vdu_name,
4002 primitive,
4003 mapped_primitive_params,
4004 operationState=None,
4005 detailed_status=None,
4006 operationType=None,
4007 RO_nsr_id=None,
4008 RO_scaling_info=None,
4009 ):
4010 if not db_nslcmop:
4011 return self.SUBOPERATION_STATUS_NOT_FOUND
4012 # Get the "_admin.operations" list, if it exists
4013 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4014 op_list = db_nslcmop_admin.get("operations")
4015 # Create or append to the "_admin.operations" list
4016 new_op = {
4017 "member_vnf_index": vnf_index,
4018 "vdu_id": vdu_id,
4019 "vdu_count_index": vdu_count_index,
4020 "primitive": primitive,
4021 "primitive_params": mapped_primitive_params,
4022 }
4023 if operationState:
4024 new_op["operationState"] = operationState
4025 if detailed_status:
4026 new_op["detailed-status"] = detailed_status
4027 if operationType:
4028 new_op["lcmOperationType"] = operationType
4029 if RO_nsr_id:
4030 new_op["RO_nsr_id"] = RO_nsr_id
4031 if RO_scaling_info:
4032 new_op["RO_scaling_info"] = RO_scaling_info
4033 if not op_list:
4034 # No existing operations, create key 'operations' with current operation as first list element
4035 db_nslcmop_admin.update({"operations": [new_op]})
4036 op_list = db_nslcmop_admin.get("operations")
4037 else:
4038 # Existing operations, append operation to list
4039 op_list.append(new_op)
4040
4041 db_nslcmop_update = {"_admin.operations": op_list}
4042 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4043 op_index = len(op_list) - 1
4044 return op_index
4045
4046 # Helper methods for scale() sub-operations
4047
4048 # pre-scale/post-scale:
4049 # Check for 3 different cases:
4050 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4051 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4052 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4053 def _check_or_add_scale_suboperation(
4054 self,
4055 db_nslcmop,
4056 vnf_index,
4057 vnf_config_primitive,
4058 primitive_params,
4059 operationType,
4060 RO_nsr_id=None,
4061 RO_scaling_info=None,
4062 ):
4063 # Find this sub-operation
4064 if RO_nsr_id and RO_scaling_info:
4065 operationType = "SCALE-RO"
4066 match = {
4067 "member_vnf_index": vnf_index,
4068 "RO_nsr_id": RO_nsr_id,
4069 "RO_scaling_info": RO_scaling_info,
4070 }
4071 else:
4072 match = {
4073 "member_vnf_index": vnf_index,
4074 "primitive": vnf_config_primitive,
4075 "primitive_params": primitive_params,
4076 "lcmOperationType": operationType,
4077 }
4078 op_index = self._find_suboperation(db_nslcmop, match)
4079 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4080 # a. New sub-operation
4081 # The sub-operation does not exist, add it.
4082 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4083 # The following parameters are set to None for all kind of scaling:
4084 vdu_id = None
4085 vdu_count_index = None
4086 vdu_name = None
4087 if RO_nsr_id and RO_scaling_info:
4088 vnf_config_primitive = None
4089 primitive_params = None
4090 else:
4091 RO_nsr_id = None
4092 RO_scaling_info = None
4093 # Initial status for sub-operation
4094 operationState = "PROCESSING"
4095 detailed_status = "In progress"
4096 # Add sub-operation for pre/post-scaling (zero or more operations)
4097 self._add_suboperation(
4098 db_nslcmop,
4099 vnf_index,
4100 vdu_id,
4101 vdu_count_index,
4102 vdu_name,
4103 vnf_config_primitive,
4104 primitive_params,
4105 operationState,
4106 detailed_status,
4107 operationType,
4108 RO_nsr_id,
4109 RO_scaling_info,
4110 )
4111 return self.SUBOPERATION_STATUS_NEW
4112 else:
4113 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4114 # or op_index (operationState != 'COMPLETED')
4115 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4116
4117 # Function to return execution_environment id
4118
4119 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4120 # TODO vdu_index_count
4121 for vca in vca_deployed_list:
4122 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4123 return vca["ee_id"]
4124
4125 async def destroy_N2VC(
4126 self,
4127 logging_text,
4128 db_nslcmop,
4129 vca_deployed,
4130 config_descriptor,
4131 vca_index,
4132 destroy_ee=True,
4133 exec_primitives=True,
4134 scaling_in=False,
4135 vca_id: str = None,
4136 ):
4137 """
4138 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4139 :param logging_text:
4140 :param db_nslcmop:
4141 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4142 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4143 :param vca_index: index in the database _admin.deployed.VCA
4144 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4145 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4146 not executed properly
4147 :param scaling_in: True destroys the application, False destroys the model
4148 :return: None or exception
4149 """
4150
4151 self.logger.debug(
4152 logging_text
4153 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4154 vca_index, vca_deployed, config_descriptor, destroy_ee
4155 )
4156 )
4157
4158 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4159
4160 # execute terminate_primitives
4161 if exec_primitives:
4162 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4163 config_descriptor.get("terminate-config-primitive"),
4164 vca_deployed.get("ee_descriptor_id"),
4165 )
4166 vdu_id = vca_deployed.get("vdu_id")
4167 vdu_count_index = vca_deployed.get("vdu_count_index")
4168 vdu_name = vca_deployed.get("vdu_name")
4169 vnf_index = vca_deployed.get("member-vnf-index")
4170 if terminate_primitives and vca_deployed.get("needed_terminate"):
4171 for seq in terminate_primitives:
4172 # For each sequence in list, get primitive and call _ns_execute_primitive()
4173 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4174 vnf_index, seq.get("name")
4175 )
4176 self.logger.debug(logging_text + step)
4177 # Create the primitive for each sequence, i.e. "primitive": "touch"
4178 primitive = seq.get("name")
4179 mapped_primitive_params = self._get_terminate_primitive_params(
4180 seq, vnf_index
4181 )
4182
4183 # Add sub-operation
4184 self._add_suboperation(
4185 db_nslcmop,
4186 vnf_index,
4187 vdu_id,
4188 vdu_count_index,
4189 vdu_name,
4190 primitive,
4191 mapped_primitive_params,
4192 )
4193 # Sub-operations: Call _ns_execute_primitive() instead of action()
4194 try:
4195 result, result_detail = await self._ns_execute_primitive(
4196 vca_deployed["ee_id"],
4197 primitive,
4198 mapped_primitive_params,
4199 vca_type=vca_type,
4200 vca_id=vca_id,
4201 )
4202 except LcmException:
4203 # this happens when VCA is not deployed. In this case it is not needed to terminate
4204 continue
4205 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4206 if result not in result_ok:
4207 raise LcmException(
4208 "terminate_primitive {} for vnf_member_index={} fails with "
4209 "error {}".format(seq.get("name"), vnf_index, result_detail)
4210 )
4211 # set that this VCA do not need terminated
4212 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4213 vca_index
4214 )
4215 self.update_db_2(
4216 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4217 )
4218
4219 # Delete Prometheus Jobs if any
4220 # This uses NSR_ID, so it will destroy any jobs under this index
4221 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4222
4223 if destroy_ee:
4224 await self.vca_map[vca_type].delete_execution_environment(
4225 vca_deployed["ee_id"],
4226 scaling_in=scaling_in,
4227 vca_type=vca_type,
4228 vca_id=vca_id,
4229 )
4230
4231 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4232 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4233 namespace = "." + db_nsr["_id"]
4234 try:
4235 await self.n2vc.delete_namespace(
4236 namespace=namespace,
4237 total_timeout=self.timeout.charm_delete,
4238 vca_id=vca_id,
4239 )
4240 except N2VCNotFound: # already deleted. Skip
4241 pass
4242 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4243
4244 async def terminate(self, nsr_id, nslcmop_id):
4245 # Try to lock HA task here
4246 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4247 if not task_is_locked_by_me:
4248 return
4249
4250 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4251 self.logger.debug(logging_text + "Enter")
4252 timeout_ns_terminate = self.timeout.ns_terminate
4253 db_nsr = None
4254 db_nslcmop = None
4255 operation_params = None
4256 exc = None
4257 error_list = [] # annotates all failed error messages
4258 db_nslcmop_update = {}
4259 autoremove = False # autoremove after terminated
4260 tasks_dict_info = {}
4261 db_nsr_update = {}
4262 stage = [
4263 "Stage 1/3: Preparing task.",
4264 "Waiting for previous operations to terminate.",
4265 "",
4266 ]
4267 # ^ contains [stage, step, VIM-status]
4268 try:
4269 # wait for any previous tasks in process
4270 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4271
4272 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4273 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4274 operation_params = db_nslcmop.get("operationParams") or {}
4275 if operation_params.get("timeout_ns_terminate"):
4276 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4277 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4278 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4279
4280 db_nsr_update["operational-status"] = "terminating"
4281 db_nsr_update["config-status"] = "terminating"
4282 self._write_ns_status(
4283 nsr_id=nsr_id,
4284 ns_state="TERMINATING",
4285 current_operation="TERMINATING",
4286 current_operation_id=nslcmop_id,
4287 other_update=db_nsr_update,
4288 )
4289 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4290 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4291 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4292 return
4293
4294 stage[1] = "Getting vnf descriptors from db."
4295 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4296 db_vnfrs_dict = {
4297 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4298 }
4299 db_vnfds_from_id = {}
4300 db_vnfds_from_member_index = {}
4301 # Loop over VNFRs
4302 for vnfr in db_vnfrs_list:
4303 vnfd_id = vnfr["vnfd-id"]
4304 if vnfd_id not in db_vnfds_from_id:
4305 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4306 db_vnfds_from_id[vnfd_id] = vnfd
4307 db_vnfds_from_member_index[
4308 vnfr["member-vnf-index-ref"]
4309 ] = db_vnfds_from_id[vnfd_id]
4310
4311 # Destroy individual execution environments when there are terminating primitives.
4312 # Rest of EE will be deleted at once
4313 # TODO - check before calling _destroy_N2VC
4314 # if not operation_params.get("skip_terminate_primitives"):#
4315 # or not vca.get("needed_terminate"):
4316 stage[0] = "Stage 2/3 execute terminating primitives."
4317 self.logger.debug(logging_text + stage[0])
4318 stage[1] = "Looking execution environment that needs terminate."
4319 self.logger.debug(logging_text + stage[1])
4320
4321 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4322 config_descriptor = None
4323 vca_member_vnf_index = vca.get("member-vnf-index")
4324 vca_id = self.get_vca_id(
4325 db_vnfrs_dict.get(vca_member_vnf_index)
4326 if vca_member_vnf_index
4327 else None,
4328 db_nsr,
4329 )
4330 if not vca or not vca.get("ee_id"):
4331 continue
4332 if not vca.get("member-vnf-index"):
4333 # ns
4334 config_descriptor = db_nsr.get("ns-configuration")
4335 elif vca.get("vdu_id"):
4336 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4337 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4338 elif vca.get("kdu_name"):
4339 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4340 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4341 else:
4342 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4343 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4344 vca_type = vca.get("type")
4345 exec_terminate_primitives = not operation_params.get(
4346 "skip_terminate_primitives"
4347 ) and vca.get("needed_terminate")
4348 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4349 # pending native charms
4350 destroy_ee = (
4351 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4352 )
4353 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4354 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4355 task = asyncio.ensure_future(
4356 self.destroy_N2VC(
4357 logging_text,
4358 db_nslcmop,
4359 vca,
4360 config_descriptor,
4361 vca_index,
4362 destroy_ee,
4363 exec_terminate_primitives,
4364 vca_id=vca_id,
4365 )
4366 )
4367 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4368
4369 # wait for pending tasks of terminate primitives
4370 if tasks_dict_info:
4371 self.logger.debug(
4372 logging_text
4373 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4374 )
4375 error_list = await self._wait_for_tasks(
4376 logging_text,
4377 tasks_dict_info,
4378 min(self.timeout.charm_delete, timeout_ns_terminate),
4379 stage,
4380 nslcmop_id,
4381 )
4382 tasks_dict_info.clear()
4383 if error_list:
4384 return # raise LcmException("; ".join(error_list))
4385
4386 # remove All execution environments at once
4387 stage[0] = "Stage 3/3 delete all."
4388
4389 if nsr_deployed.get("VCA"):
4390 stage[1] = "Deleting all execution environments."
4391 self.logger.debug(logging_text + stage[1])
4392 vca_id = self.get_vca_id({}, db_nsr)
4393 task_delete_ee = asyncio.ensure_future(
4394 asyncio.wait_for(
4395 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4396 timeout=self.timeout.charm_delete,
4397 )
4398 )
4399 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4400 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4401
4402 # Delete Namespace and Certificates if necessary
4403 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4404 await self.vca_map["helm-v3"].delete_tls_certificate(
4405 certificate_name=db_nslcmop["nsInstanceId"],
4406 )
4407 # TODO: Delete namespace
4408
4409 # Delete from k8scluster
4410 stage[1] = "Deleting KDUs."
4411 self.logger.debug(logging_text + stage[1])
4412 # print(nsr_deployed)
4413 for kdu in get_iterable(nsr_deployed, "K8s"):
4414 if not kdu or not kdu.get("kdu-instance"):
4415 continue
4416 kdu_instance = kdu.get("kdu-instance")
4417 if kdu.get("k8scluster-type") in self.k8scluster_map:
4418 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4419 vca_id = self.get_vca_id({}, db_nsr)
4420 task_delete_kdu_instance = asyncio.ensure_future(
4421 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4422 cluster_uuid=kdu.get("k8scluster-uuid"),
4423 kdu_instance=kdu_instance,
4424 vca_id=vca_id,
4425 namespace=kdu.get("namespace"),
4426 )
4427 )
4428 else:
4429 self.logger.error(
4430 logging_text
4431 + "Unknown k8s deployment type {}".format(
4432 kdu.get("k8scluster-type")
4433 )
4434 )
4435 continue
4436 tasks_dict_info[
4437 task_delete_kdu_instance
4438 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4439
4440 # remove from RO
4441 stage[1] = "Deleting ns from VIM."
4442 if self.ro_config.ng:
4443 task_delete_ro = asyncio.ensure_future(
4444 self._terminate_ng_ro(
4445 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4446 )
4447 )
4448 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4449
4450 # rest of staff will be done at finally
4451
4452 except (
4453 ROclient.ROClientException,
4454 DbException,
4455 LcmException,
4456 N2VCException,
4457 ) as e:
4458 self.logger.error(logging_text + "Exit Exception {}".format(e))
4459 exc = e
4460 except asyncio.CancelledError:
4461 self.logger.error(
4462 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4463 )
4464 exc = "Operation was cancelled"
4465 except Exception as e:
4466 exc = traceback.format_exc()
4467 self.logger.critical(
4468 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4469 exc_info=True,
4470 )
4471 finally:
4472 if exc:
4473 error_list.append(str(exc))
4474 try:
4475 # wait for pending tasks
4476 if tasks_dict_info:
4477 stage[1] = "Waiting for terminate pending tasks."
4478 self.logger.debug(logging_text + stage[1])
4479 error_list += await self._wait_for_tasks(
4480 logging_text,
4481 tasks_dict_info,
4482 timeout_ns_terminate,
4483 stage,
4484 nslcmop_id,
4485 )
4486 stage[1] = stage[2] = ""
4487 except asyncio.CancelledError:
4488 error_list.append("Cancelled")
4489 # TODO cancell all tasks
4490 except Exception as exc:
4491 error_list.append(str(exc))
4492 # update status at database
4493 if error_list:
4494 error_detail = "; ".join(error_list)
4495 # self.logger.error(logging_text + error_detail)
4496 error_description_nslcmop = "{} Detail: {}".format(
4497 stage[0], error_detail
4498 )
4499 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4500 nslcmop_id, stage[0]
4501 )
4502
4503 db_nsr_update["operational-status"] = "failed"
4504 db_nsr_update["detailed-status"] = (
4505 error_description_nsr + " Detail: " + error_detail
4506 )
4507 db_nslcmop_update["detailed-status"] = error_detail
4508 nslcmop_operation_state = "FAILED"
4509 ns_state = "BROKEN"
4510 else:
4511 error_detail = None
4512 error_description_nsr = error_description_nslcmop = None
4513 ns_state = "NOT_INSTANTIATED"
4514 db_nsr_update["operational-status"] = "terminated"
4515 db_nsr_update["detailed-status"] = "Done"
4516 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4517 db_nslcmop_update["detailed-status"] = "Done"
4518 nslcmop_operation_state = "COMPLETED"
4519
4520 if db_nsr:
4521 self._write_ns_status(
4522 nsr_id=nsr_id,
4523 ns_state=ns_state,
4524 current_operation="IDLE",
4525 current_operation_id=None,
4526 error_description=error_description_nsr,
4527 error_detail=error_detail,
4528 other_update=db_nsr_update,
4529 )
4530 self._write_op_status(
4531 op_id=nslcmop_id,
4532 stage="",
4533 error_message=error_description_nslcmop,
4534 operation_state=nslcmop_operation_state,
4535 other_update=db_nslcmop_update,
4536 )
4537 if ns_state == "NOT_INSTANTIATED":
4538 try:
4539 self.db.set_list(
4540 "vnfrs",
4541 {"nsr-id-ref": nsr_id},
4542 {"_admin.nsState": "NOT_INSTANTIATED"},
4543 )
4544 except DbException as e:
4545 self.logger.warn(
4546 logging_text
4547 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4548 nsr_id, e
4549 )
4550 )
4551 if operation_params:
4552 autoremove = operation_params.get("autoremove", False)
4553 if nslcmop_operation_state:
4554 try:
4555 await self.msg.aiowrite(
4556 "ns",
4557 "terminated",
4558 {
4559 "nsr_id": nsr_id,
4560 "nslcmop_id": nslcmop_id,
4561 "operationState": nslcmop_operation_state,
4562 "autoremove": autoremove,
4563 },
4564 loop=self.loop,
4565 )
4566 except Exception as e:
4567 self.logger.error(
4568 logging_text + "kafka_write notification Exception {}".format(e)
4569 )
4570
4571 self.logger.debug(logging_text + "Exit")
4572 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4573
4574 async def _wait_for_tasks(
4575 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4576 ):
4577 time_start = time()
4578 error_detail_list = []
4579 error_list = []
4580 pending_tasks = list(created_tasks_info.keys())
4581 num_tasks = len(pending_tasks)
4582 num_done = 0
4583 stage[1] = "{}/{}.".format(num_done, num_tasks)
4584 self._write_op_status(nslcmop_id, stage)
4585 while pending_tasks:
4586 new_error = None
4587 _timeout = timeout + time_start - time()
4588 done, pending_tasks = await asyncio.wait(
4589 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4590 )
4591 num_done += len(done)
4592 if not done: # Timeout
4593 for task in pending_tasks:
4594 new_error = created_tasks_info[task] + ": Timeout"
4595 error_detail_list.append(new_error)
4596 error_list.append(new_error)
4597 break
4598 for task in done:
4599 if task.cancelled():
4600 exc = "Cancelled"
4601 else:
4602 exc = task.exception()
4603 if exc:
4604 if isinstance(exc, asyncio.TimeoutError):
4605 exc = "Timeout"
4606 new_error = created_tasks_info[task] + ": {}".format(exc)
4607 error_list.append(created_tasks_info[task])
4608 error_detail_list.append(new_error)
4609 if isinstance(
4610 exc,
4611 (
4612 str,
4613 DbException,
4614 N2VCException,
4615 ROclient.ROClientException,
4616 LcmException,
4617 K8sException,
4618 NgRoException,
4619 ),
4620 ):
4621 self.logger.error(logging_text + new_error)
4622 else:
4623 exc_traceback = "".join(
4624 traceback.format_exception(None, exc, exc.__traceback__)
4625 )
4626 self.logger.error(
4627 logging_text
4628 + created_tasks_info[task]
4629 + " "
4630 + exc_traceback
4631 )
4632 else:
4633 self.logger.debug(
4634 logging_text + created_tasks_info[task] + ": Done"
4635 )
4636 stage[1] = "{}/{}.".format(num_done, num_tasks)
4637 if new_error:
4638 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4639 if nsr_id: # update also nsr
4640 self.update_db_2(
4641 "nsrs",
4642 nsr_id,
4643 {
4644 "errorDescription": "Error at: " + ", ".join(error_list),
4645 "errorDetail": ". ".join(error_detail_list),
4646 },
4647 )
4648 self._write_op_status(nslcmop_id, stage)
4649 return error_detail_list
4650
4651 @staticmethod
4652 def _map_primitive_params(primitive_desc, params, instantiation_params):
4653 """
4654 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4655 The default-value is used. If it is between < > it look for a value at instantiation_params
4656 :param primitive_desc: portion of VNFD/NSD that describes primitive
4657 :param params: Params provided by user
4658 :param instantiation_params: Instantiation params provided by user
4659 :return: a dictionary with the calculated params
4660 """
4661 calculated_params = {}
4662 for parameter in primitive_desc.get("parameter", ()):
4663 param_name = parameter["name"]
4664 if param_name in params:
4665 calculated_params[param_name] = params[param_name]
4666 elif "default-value" in parameter or "value" in parameter:
4667 if "value" in parameter:
4668 calculated_params[param_name] = parameter["value"]
4669 else:
4670 calculated_params[param_name] = parameter["default-value"]
4671 if (
4672 isinstance(calculated_params[param_name], str)
4673 and calculated_params[param_name].startswith("<")
4674 and calculated_params[param_name].endswith(">")
4675 ):
4676 if calculated_params[param_name][1:-1] in instantiation_params:
4677 calculated_params[param_name] = instantiation_params[
4678 calculated_params[param_name][1:-1]
4679 ]
4680 else:
4681 raise LcmException(
4682 "Parameter {} needed to execute primitive {} not provided".format(
4683 calculated_params[param_name], primitive_desc["name"]
4684 )
4685 )
4686 else:
4687 raise LcmException(
4688 "Parameter {} needed to execute primitive {} not provided".format(
4689 param_name, primitive_desc["name"]
4690 )
4691 )
4692
4693 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4694 calculated_params[param_name] = yaml.safe_dump(
4695 calculated_params[param_name], default_flow_style=True, width=256
4696 )
4697 elif isinstance(calculated_params[param_name], str) and calculated_params[
4698 param_name
4699 ].startswith("!!yaml "):
4700 calculated_params[param_name] = calculated_params[param_name][7:]
4701 if parameter.get("data-type") == "INTEGER":
4702 try:
4703 calculated_params[param_name] = int(calculated_params[param_name])
4704 except ValueError: # error converting string to int
4705 raise LcmException(
4706 "Parameter {} of primitive {} must be integer".format(
4707 param_name, primitive_desc["name"]
4708 )
4709 )
4710 elif parameter.get("data-type") == "BOOLEAN":
4711 calculated_params[param_name] = not (
4712 (str(calculated_params[param_name])).lower() == "false"
4713 )
4714
4715 # add always ns_config_info if primitive name is config
4716 if primitive_desc["name"] == "config":
4717 if "ns_config_info" in instantiation_params:
4718 calculated_params["ns_config_info"] = instantiation_params[
4719 "ns_config_info"
4720 ]
4721 return calculated_params
4722
4723 def _look_for_deployed_vca(
4724 self,
4725 deployed_vca,
4726 member_vnf_index,
4727 vdu_id,
4728 vdu_count_index,
4729 kdu_name=None,
4730 ee_descriptor_id=None,
4731 ):
4732 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4733 for vca in deployed_vca:
4734 if not vca:
4735 continue
4736 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4737 continue
4738 if (
4739 vdu_count_index is not None
4740 and vdu_count_index != vca["vdu_count_index"]
4741 ):
4742 continue
4743 if kdu_name and kdu_name != vca["kdu_name"]:
4744 continue
4745 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4746 continue
4747 break
4748 else:
4749 # vca_deployed not found
4750 raise LcmException(
4751 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4752 " is not deployed".format(
4753 member_vnf_index,
4754 vdu_id,
4755 vdu_count_index,
4756 kdu_name,
4757 ee_descriptor_id,
4758 )
4759 )
4760 # get ee_id
4761 ee_id = vca.get("ee_id")
4762 vca_type = vca.get(
4763 "type", "lxc_proxy_charm"
4764 ) # default value for backward compatibility - proxy charm
4765 if not ee_id:
4766 raise LcmException(
4767 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4768 "execution environment".format(
4769 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4770 )
4771 )
4772 return ee_id, vca_type
4773
4774 async def _ns_execute_primitive(
4775 self,
4776 ee_id,
4777 primitive,
4778 primitive_params,
4779 retries=0,
4780 retries_interval=30,
4781 timeout=None,
4782 vca_type=None,
4783 db_dict=None,
4784 vca_id: str = None,
4785 ) -> (str, str):
4786 try:
4787 if primitive == "config":
4788 primitive_params = {"params": primitive_params}
4789
4790 vca_type = vca_type or "lxc_proxy_charm"
4791
4792 while retries >= 0:
4793 try:
4794 output = await asyncio.wait_for(
4795 self.vca_map[vca_type].exec_primitive(
4796 ee_id=ee_id,
4797 primitive_name=primitive,
4798 params_dict=primitive_params,
4799 progress_timeout=self.timeout.progress_primitive,
4800 total_timeout=self.timeout.primitive,
4801 db_dict=db_dict,
4802 vca_id=vca_id,
4803 vca_type=vca_type,
4804 ),
4805 timeout=timeout or self.timeout.primitive,
4806 )
4807 # execution was OK
4808 break
4809 except asyncio.CancelledError:
4810 raise
4811 except Exception as e:
4812 retries -= 1
4813 if retries >= 0:
4814 self.logger.debug(
4815 "Error executing action {} on {} -> {}".format(
4816 primitive, ee_id, e
4817 )
4818 )
4819 # wait and retry
4820 await asyncio.sleep(retries_interval, loop=self.loop)
4821 else:
4822 if isinstance(e, asyncio.TimeoutError):
4823 e = N2VCException(
4824 message="Timed out waiting for action to complete"
4825 )
4826 return "FAILED", getattr(e, "message", repr(e))
4827
4828 return "COMPLETED", output
4829
4830 except (LcmException, asyncio.CancelledError):
4831 raise
4832 except Exception as e:
4833 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4834
4835 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4836 """
4837 Updating the vca_status with latest juju information in nsrs record
4838 :param: nsr_id: Id of the nsr
4839 :param: nslcmop_id: Id of the nslcmop
4840 :return: None
4841 """
4842
4843 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4844 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4845 vca_id = self.get_vca_id({}, db_nsr)
4846 if db_nsr["_admin"]["deployed"]["K8s"]:
4847 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4848 cluster_uuid, kdu_instance, cluster_type = (
4849 k8s["k8scluster-uuid"],
4850 k8s["kdu-instance"],
4851 k8s["k8scluster-type"],
4852 )
4853 await self._on_update_k8s_db(
4854 cluster_uuid=cluster_uuid,
4855 kdu_instance=kdu_instance,
4856 filter={"_id": nsr_id},
4857 vca_id=vca_id,
4858 cluster_type=cluster_type,
4859 )
4860 else:
4861 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4862 table, filter = "nsrs", {"_id": nsr_id}
4863 path = "_admin.deployed.VCA.{}.".format(vca_index)
4864 await self._on_update_n2vc_db(table, filter, path, {})
4865
4866 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4867 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4868
4869 async def action(self, nsr_id, nslcmop_id):
4870 # Try to lock HA task here
4871 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4872 if not task_is_locked_by_me:
4873 return
4874
4875 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4876 self.logger.debug(logging_text + "Enter")
4877 # get all needed from database
4878 db_nsr = None
4879 db_nslcmop = None
4880 db_nsr_update = {}
4881 db_nslcmop_update = {}
4882 nslcmop_operation_state = None
4883 error_description_nslcmop = None
4884 exc = None
4885 step = ""
4886 try:
4887 # wait for any previous tasks in process
4888 step = "Waiting for previous operations to terminate"
4889 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4890
4891 self._write_ns_status(
4892 nsr_id=nsr_id,
4893 ns_state=None,
4894 current_operation="RUNNING ACTION",
4895 current_operation_id=nslcmop_id,
4896 )
4897
4898 step = "Getting information from database"
4899 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4900 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4901 if db_nslcmop["operationParams"].get("primitive_params"):
4902 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4903 db_nslcmop["operationParams"]["primitive_params"]
4904 )
4905
4906 nsr_deployed = db_nsr["_admin"].get("deployed")
4907 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4908 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4909 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4910 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4911 primitive = db_nslcmop["operationParams"]["primitive"]
4912 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4913 timeout_ns_action = db_nslcmop["operationParams"].get(
4914 "timeout_ns_action", self.timeout.primitive
4915 )
4916
4917 if vnf_index:
4918 step = "Getting vnfr from database"
4919 db_vnfr = self.db.get_one(
4920 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4921 )
4922 if db_vnfr.get("kdur"):
4923 kdur_list = []
4924 for kdur in db_vnfr["kdur"]:
4925 if kdur.get("additionalParams"):
4926 kdur["additionalParams"] = json.loads(
4927 kdur["additionalParams"]
4928 )
4929 kdur_list.append(kdur)
4930 db_vnfr["kdur"] = kdur_list
4931 step = "Getting vnfd from database"
4932 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4933
4934 # Sync filesystem before running a primitive
4935 self.fs.sync(db_vnfr["vnfd-id"])
4936 else:
4937 step = "Getting nsd from database"
4938 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4939
4940 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4941 # for backward compatibility
4942 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4943 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4944 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4945 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4946
4947 # look for primitive
4948 config_primitive_desc = descriptor_configuration = None
4949 if vdu_id:
4950 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4951 elif kdu_name:
4952 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4953 elif vnf_index:
4954 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4955 else:
4956 descriptor_configuration = db_nsd.get("ns-configuration")
4957
4958 if descriptor_configuration and descriptor_configuration.get(
4959 "config-primitive"
4960 ):
4961 for config_primitive in descriptor_configuration["config-primitive"]:
4962 if config_primitive["name"] == primitive:
4963 config_primitive_desc = config_primitive
4964 break
4965
4966 if not config_primitive_desc:
4967 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4968 raise LcmException(
4969 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4970 primitive
4971 )
4972 )
4973 primitive_name = primitive
4974 ee_descriptor_id = None
4975 else:
4976 primitive_name = config_primitive_desc.get(
4977 "execution-environment-primitive", primitive
4978 )
4979 ee_descriptor_id = config_primitive_desc.get(
4980 "execution-environment-ref"
4981 )
4982
4983 if vnf_index:
4984 if vdu_id:
4985 vdur = next(
4986 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4987 )
4988 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4989 elif kdu_name:
4990 kdur = next(
4991 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4992 )
4993 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4994 else:
4995 desc_params = parse_yaml_strings(
4996 db_vnfr.get("additionalParamsForVnf")
4997 )
4998 else:
4999 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5000 if kdu_name and get_configuration(db_vnfd, kdu_name):
5001 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5002 actions = set()
5003 for primitive in kdu_configuration.get("initial-config-primitive", []):
5004 actions.add(primitive["name"])
5005 for primitive in kdu_configuration.get("config-primitive", []):
5006 actions.add(primitive["name"])
5007 kdu = find_in_list(
5008 nsr_deployed["K8s"],
5009 lambda kdu: kdu_name == kdu["kdu-name"]
5010 and kdu["member-vnf-index"] == vnf_index,
5011 )
5012 kdu_action = (
5013 True
5014 if primitive_name in actions
5015 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5016 else False
5017 )
5018
5019 # TODO check if ns is in a proper status
5020 if kdu_name and (
5021 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5022 ):
5023 # kdur and desc_params already set from before
5024 if primitive_params:
5025 desc_params.update(primitive_params)
5026 # TODO Check if we will need something at vnf level
5027 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5028 if (
5029 kdu_name == kdu["kdu-name"]
5030 and kdu["member-vnf-index"] == vnf_index
5031 ):
5032 break
5033 else:
5034 raise LcmException(
5035 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5036 )
5037
5038 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5039 msg = "unknown k8scluster-type '{}'".format(
5040 kdu.get("k8scluster-type")
5041 )
5042 raise LcmException(msg)
5043
5044 db_dict = {
5045 "collection": "nsrs",
5046 "filter": {"_id": nsr_id},
5047 "path": "_admin.deployed.K8s.{}".format(index),
5048 }
5049 self.logger.debug(
5050 logging_text
5051 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5052 )
5053 step = "Executing kdu {}".format(primitive_name)
5054 if primitive_name == "upgrade":
5055 if desc_params.get("kdu_model"):
5056 kdu_model = desc_params.get("kdu_model")
5057 del desc_params["kdu_model"]
5058 else:
5059 kdu_model = kdu.get("kdu-model")
5060 parts = kdu_model.split(sep=":")
5061 if len(parts) == 2:
5062 kdu_model = parts[0]
5063 if desc_params.get("kdu_atomic_upgrade"):
5064 atomic_upgrade = desc_params.get(
5065 "kdu_atomic_upgrade"
5066 ).lower() in ("yes", "true", "1")
5067 del desc_params["kdu_atomic_upgrade"]
5068 else:
5069 atomic_upgrade = True
5070
5071 detailed_status = await asyncio.wait_for(
5072 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5073 cluster_uuid=kdu.get("k8scluster-uuid"),
5074 kdu_instance=kdu.get("kdu-instance"),
5075 atomic=atomic_upgrade,
5076 kdu_model=kdu_model,
5077 params=desc_params,
5078 db_dict=db_dict,
5079 timeout=timeout_ns_action,
5080 ),
5081 timeout=timeout_ns_action + 10,
5082 )
5083 self.logger.debug(
5084 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5085 )
5086 elif primitive_name == "rollback":
5087 detailed_status = await asyncio.wait_for(
5088 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5089 cluster_uuid=kdu.get("k8scluster-uuid"),
5090 kdu_instance=kdu.get("kdu-instance"),
5091 db_dict=db_dict,
5092 ),
5093 timeout=timeout_ns_action,
5094 )
5095 elif primitive_name == "status":
5096 detailed_status = await asyncio.wait_for(
5097 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5098 cluster_uuid=kdu.get("k8scluster-uuid"),
5099 kdu_instance=kdu.get("kdu-instance"),
5100 vca_id=vca_id,
5101 ),
5102 timeout=timeout_ns_action,
5103 )
5104 else:
5105 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5106 kdu["kdu-name"], nsr_id
5107 )
5108 params = self._map_primitive_params(
5109 config_primitive_desc, primitive_params, desc_params
5110 )
5111
5112 detailed_status = await asyncio.wait_for(
5113 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5114 cluster_uuid=kdu.get("k8scluster-uuid"),
5115 kdu_instance=kdu_instance,
5116 primitive_name=primitive_name,
5117 params=params,
5118 db_dict=db_dict,
5119 timeout=timeout_ns_action,
5120 vca_id=vca_id,
5121 ),
5122 timeout=timeout_ns_action,
5123 )
5124
5125 if detailed_status:
5126 nslcmop_operation_state = "COMPLETED"
5127 else:
5128 detailed_status = ""
5129 nslcmop_operation_state = "FAILED"
5130 else:
5131 ee_id, vca_type = self._look_for_deployed_vca(
5132 nsr_deployed["VCA"],
5133 member_vnf_index=vnf_index,
5134 vdu_id=vdu_id,
5135 vdu_count_index=vdu_count_index,
5136 ee_descriptor_id=ee_descriptor_id,
5137 )
5138 for vca_index, vca_deployed in enumerate(
5139 db_nsr["_admin"]["deployed"]["VCA"]
5140 ):
5141 if vca_deployed.get("member-vnf-index") == vnf_index:
5142 db_dict = {
5143 "collection": "nsrs",
5144 "filter": {"_id": nsr_id},
5145 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5146 }
5147 break
5148 (
5149 nslcmop_operation_state,
5150 detailed_status,
5151 ) = await self._ns_execute_primitive(
5152 ee_id,
5153 primitive=primitive_name,
5154 primitive_params=self._map_primitive_params(
5155 config_primitive_desc, primitive_params, desc_params
5156 ),
5157 timeout=timeout_ns_action,
5158 vca_type=vca_type,
5159 db_dict=db_dict,
5160 vca_id=vca_id,
5161 )
5162
5163 db_nslcmop_update["detailed-status"] = detailed_status
5164 error_description_nslcmop = (
5165 detailed_status if nslcmop_operation_state == "FAILED" else ""
5166 )
5167 self.logger.debug(
5168 logging_text
5169 + "Done with result {} {}".format(
5170 nslcmop_operation_state, detailed_status
5171 )
5172 )
5173 return # database update is called inside finally
5174
5175 except (DbException, LcmException, N2VCException, K8sException) as e:
5176 self.logger.error(logging_text + "Exit Exception {}".format(e))
5177 exc = e
5178 except asyncio.CancelledError:
5179 self.logger.error(
5180 logging_text + "Cancelled Exception while '{}'".format(step)
5181 )
5182 exc = "Operation was cancelled"
5183 except asyncio.TimeoutError:
5184 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5185 exc = "Timeout"
5186 except Exception as e:
5187 exc = traceback.format_exc()
5188 self.logger.critical(
5189 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5190 exc_info=True,
5191 )
5192 finally:
5193 if exc:
5194 db_nslcmop_update[
5195 "detailed-status"
5196 ] = (
5197 detailed_status
5198 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5199 nslcmop_operation_state = "FAILED"
5200 if db_nsr:
5201 self._write_ns_status(
5202 nsr_id=nsr_id,
5203 ns_state=db_nsr[
5204 "nsState"
5205 ], # TODO check if degraded. For the moment use previous status
5206 current_operation="IDLE",
5207 current_operation_id=None,
5208 # error_description=error_description_nsr,
5209 # error_detail=error_detail,
5210 other_update=db_nsr_update,
5211 )
5212
5213 self._write_op_status(
5214 op_id=nslcmop_id,
5215 stage="",
5216 error_message=error_description_nslcmop,
5217 operation_state=nslcmop_operation_state,
5218 other_update=db_nslcmop_update,
5219 )
5220
5221 if nslcmop_operation_state:
5222 try:
5223 await self.msg.aiowrite(
5224 "ns",
5225 "actioned",
5226 {
5227 "nsr_id": nsr_id,
5228 "nslcmop_id": nslcmop_id,
5229 "operationState": nslcmop_operation_state,
5230 },
5231 loop=self.loop,
5232 )
5233 except Exception as e:
5234 self.logger.error(
5235 logging_text + "kafka_write notification Exception {}".format(e)
5236 )
5237 self.logger.debug(logging_text + "Exit")
5238 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5239 return nslcmop_operation_state, detailed_status
5240
5241 async def terminate_vdus(
5242 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5243 ):
5244 """This method terminates VDUs
5245
5246 Args:
5247 db_vnfr: VNF instance record
5248 member_vnf_index: VNF index to identify the VDUs to be removed
5249 db_nsr: NS instance record
5250 update_db_nslcmops: Nslcmop update record
5251 """
5252 vca_scaling_info = []
5253 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5254 scaling_info["scaling_direction"] = "IN"
5255 scaling_info["vdu-delete"] = {}
5256 scaling_info["kdu-delete"] = {}
5257 db_vdur = db_vnfr.get("vdur")
5258 vdur_list = copy(db_vdur)
5259 count_index = 0
5260 for index, vdu in enumerate(vdur_list):
5261 vca_scaling_info.append(
5262 {
5263 "osm_vdu_id": vdu["vdu-id-ref"],
5264 "member-vnf-index": member_vnf_index,
5265 "type": "delete",
5266 "vdu_index": count_index,
5267 }
5268 )
5269 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5270 scaling_info["vdu"].append(
5271 {
5272 "name": vdu.get("name") or vdu.get("vdu-name"),
5273 "vdu_id": vdu["vdu-id-ref"],
5274 "interface": [],
5275 }
5276 )
5277 for interface in vdu["interfaces"]:
5278 scaling_info["vdu"][index]["interface"].append(
5279 {
5280 "name": interface["name"],
5281 "ip_address": interface["ip-address"],
5282 "mac_address": interface.get("mac-address"),
5283 }
5284 )
5285 self.logger.info("NS update scaling info{}".format(scaling_info))
5286 stage[2] = "Terminating VDUs"
5287 if scaling_info.get("vdu-delete"):
5288 # scale_process = "RO"
5289 if self.ro_config.ng:
5290 await self._scale_ng_ro(
5291 logging_text,
5292 db_nsr,
5293 update_db_nslcmops,
5294 db_vnfr,
5295 scaling_info,
5296 stage,
5297 )
5298
5299 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5300 """This method is to Remove VNF instances from NS.
5301
5302 Args:
5303 nsr_id: NS instance id
5304 nslcmop_id: nslcmop id of update
5305 vnf_instance_id: id of the VNF instance to be removed
5306
5307 Returns:
5308 result: (str, str) COMPLETED/FAILED, details
5309 """
5310 try:
5311 db_nsr_update = {}
5312 logging_text = "Task ns={} update ".format(nsr_id)
5313 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5314 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5315 if check_vnfr_count > 1:
5316 stage = ["", "", ""]
5317 step = "Getting nslcmop from database"
5318 self.logger.debug(
5319 step + " after having waited for previous tasks to be completed"
5320 )
5321 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5322 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5323 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5324 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5325 """ db_vnfr = self.db.get_one(
5326 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5327
5328 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5329 await self.terminate_vdus(
5330 db_vnfr,
5331 member_vnf_index,
5332 db_nsr,
5333 update_db_nslcmops,
5334 stage,
5335 logging_text,
5336 )
5337
5338 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5339 constituent_vnfr.remove(db_vnfr.get("_id"))
5340 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5341 "constituent-vnfr-ref"
5342 )
5343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5344 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5345 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5346 return "COMPLETED", "Done"
5347 else:
5348 step = "Terminate VNF Failed with"
5349 raise LcmException(
5350 "{} Cannot terminate the last VNF in this NS.".format(
5351 vnf_instance_id
5352 )
5353 )
5354 except (LcmException, asyncio.CancelledError):
5355 raise
5356 except Exception as e:
5357 self.logger.debug("Error removing VNF {}".format(e))
5358 return "FAILED", "Error removing VNF {}".format(e)
5359
5360 async def _ns_redeploy_vnf(
5361 self,
5362 nsr_id,
5363 nslcmop_id,
5364 db_vnfd,
5365 db_vnfr,
5366 db_nsr,
5367 ):
5368 """This method updates and redeploys VNF instances
5369
5370 Args:
5371 nsr_id: NS instance id
5372 nslcmop_id: nslcmop id
5373 db_vnfd: VNF descriptor
5374 db_vnfr: VNF instance record
5375 db_nsr: NS instance record
5376
5377 Returns:
5378 result: (str, str) COMPLETED/FAILED, details
5379 """
5380 try:
5381 count_index = 0
5382 stage = ["", "", ""]
5383 logging_text = "Task ns={} update ".format(nsr_id)
5384 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5385 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5386
5387 # Terminate old VNF resources
5388 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5389 await self.terminate_vdus(
5390 db_vnfr,
5391 member_vnf_index,
5392 db_nsr,
5393 update_db_nslcmops,
5394 stage,
5395 logging_text,
5396 )
5397
5398 # old_vnfd_id = db_vnfr["vnfd-id"]
5399 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5400 new_db_vnfd = db_vnfd
5401 # new_vnfd_ref = new_db_vnfd["id"]
5402 # new_vnfd_id = vnfd_id
5403
5404 # Create VDUR
5405 new_vnfr_cp = []
5406 for cp in new_db_vnfd.get("ext-cpd", ()):
5407 vnf_cp = {
5408 "name": cp.get("id"),
5409 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5410 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5411 "id": cp.get("id"),
5412 }
5413 new_vnfr_cp.append(vnf_cp)
5414 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5415 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5416 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5417 new_vnfr_update = {
5418 "revision": latest_vnfd_revision,
5419 "connection-point": new_vnfr_cp,
5420 "vdur": new_vdur,
5421 "ip-address": "",
5422 }
5423 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5424 updated_db_vnfr = self.db.get_one(
5425 "vnfrs",
5426 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5427 )
5428
5429 # Instantiate new VNF resources
5430 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5431 vca_scaling_info = []
5432 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5433 scaling_info["scaling_direction"] = "OUT"
5434 scaling_info["vdu-create"] = {}
5435 scaling_info["kdu-create"] = {}
5436 vdud_instantiate_list = db_vnfd["vdu"]
5437 for index, vdud in enumerate(vdud_instantiate_list):
5438 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5439 if cloud_init_text:
5440 additional_params = (
5441 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5442 or {}
5443 )
5444 cloud_init_list = []
5445 if cloud_init_text:
5446 # TODO Information of its own ip is not available because db_vnfr is not updated.
5447 additional_params["OSM"] = get_osm_params(
5448 updated_db_vnfr, vdud["id"], 1
5449 )
5450 cloud_init_list.append(
5451 self._parse_cloud_init(
5452 cloud_init_text,
5453 additional_params,
5454 db_vnfd["id"],
5455 vdud["id"],
5456 )
5457 )
5458 vca_scaling_info.append(
5459 {
5460 "osm_vdu_id": vdud["id"],
5461 "member-vnf-index": member_vnf_index,
5462 "type": "create",
5463 "vdu_index": count_index,
5464 }
5465 )
5466 scaling_info["vdu-create"][vdud["id"]] = count_index
5467 if self.ro_config.ng:
5468 self.logger.debug(
5469 "New Resources to be deployed: {}".format(scaling_info)
5470 )
5471 await self._scale_ng_ro(
5472 logging_text,
5473 db_nsr,
5474 update_db_nslcmops,
5475 updated_db_vnfr,
5476 scaling_info,
5477 stage,
5478 )
5479 return "COMPLETED", "Done"
5480 except (LcmException, asyncio.CancelledError):
5481 raise
5482 except Exception as e:
5483 self.logger.debug("Error updating VNF {}".format(e))
5484 return "FAILED", "Error updating VNF {}".format(e)
5485
5486 async def _ns_charm_upgrade(
5487 self,
5488 ee_id,
5489 charm_id,
5490 charm_type,
5491 path,
5492 timeout: float = None,
5493 ) -> (str, str):
5494 """This method upgrade charms in VNF instances
5495
5496 Args:
5497 ee_id: Execution environment id
5498 path: Local path to the charm
5499 charm_id: charm-id
5500 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5501 timeout: (Float) Timeout for the ns update operation
5502
5503 Returns:
5504 result: (str, str) COMPLETED/FAILED, details
5505 """
5506 try:
5507 charm_type = charm_type or "lxc_proxy_charm"
5508 output = await self.vca_map[charm_type].upgrade_charm(
5509 ee_id=ee_id,
5510 path=path,
5511 charm_id=charm_id,
5512 charm_type=charm_type,
5513 timeout=timeout or self.timeout.ns_update,
5514 )
5515
5516 if output:
5517 return "COMPLETED", output
5518
5519 except (LcmException, asyncio.CancelledError):
5520 raise
5521
5522 except Exception as e:
5523
5524 self.logger.debug("Error upgrading charm {}".format(path))
5525
5526 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5527
5528 async def update(self, nsr_id, nslcmop_id):
5529 """Update NS according to different update types
5530
5531 This method performs upgrade of VNF instances then updates the revision
5532 number in VNF record
5533
5534 Args:
5535 nsr_id: Network service will be updated
5536 nslcmop_id: ns lcm operation id
5537
5538 Returns:
5539 It may raise DbException, LcmException, N2VCException, K8sException
5540
5541 """
5542 # Try to lock HA task here
5543 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5544 if not task_is_locked_by_me:
5545 return
5546
5547 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5548 self.logger.debug(logging_text + "Enter")
5549
5550 # Set the required variables to be filled up later
5551 db_nsr = None
5552 db_nslcmop_update = {}
5553 vnfr_update = {}
5554 nslcmop_operation_state = None
5555 db_nsr_update = {}
5556 error_description_nslcmop = ""
5557 exc = None
5558 change_type = "updated"
5559 detailed_status = ""
5560 member_vnf_index = None
5561
5562 try:
5563 # wait for any previous tasks in process
5564 step = "Waiting for previous operations to terminate"
5565 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5566 self._write_ns_status(
5567 nsr_id=nsr_id,
5568 ns_state=None,
5569 current_operation="UPDATING",
5570 current_operation_id=nslcmop_id,
5571 )
5572
5573 step = "Getting nslcmop from database"
5574 db_nslcmop = self.db.get_one(
5575 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5576 )
5577 update_type = db_nslcmop["operationParams"]["updateType"]
5578
5579 step = "Getting nsr from database"
5580 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5581 old_operational_status = db_nsr["operational-status"]
5582 db_nsr_update["operational-status"] = "updating"
5583 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5584 nsr_deployed = db_nsr["_admin"].get("deployed")
5585
5586 if update_type == "CHANGE_VNFPKG":
5587
5588 # Get the input parameters given through update request
5589 vnf_instance_id = db_nslcmop["operationParams"][
5590 "changeVnfPackageData"
5591 ].get("vnfInstanceId")
5592
5593 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5594 "vnfdId"
5595 )
5596 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5597
5598 step = "Getting vnfr from database"
5599 db_vnfr = self.db.get_one(
5600 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5601 )
5602
5603 step = "Getting vnfds from database"
5604 # Latest VNFD
5605 latest_vnfd = self.db.get_one(
5606 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5607 )
5608 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5609
5610 # Current VNFD
5611 current_vnf_revision = db_vnfr.get("revision", 1)
5612 current_vnfd = self.db.get_one(
5613 "vnfds_revisions",
5614 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5615 fail_on_empty=False,
5616 )
5617 # Charm artifact paths will be filled up later
5618 (
5619 current_charm_artifact_path,
5620 target_charm_artifact_path,
5621 charm_artifact_paths,
5622 helm_artifacts,
5623 ) = ([], [], [], [])
5624
5625 step = "Checking if revision has changed in VNFD"
5626 if current_vnf_revision != latest_vnfd_revision:
5627
5628 change_type = "policy_updated"
5629
5630 # There is new revision of VNFD, update operation is required
5631 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5632 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5633
5634 step = "Removing the VNFD packages if they exist in the local path"
5635 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5636 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5637
5638 step = "Get the VNFD packages from FSMongo"
5639 self.fs.sync(from_path=latest_vnfd_path)
5640 self.fs.sync(from_path=current_vnfd_path)
5641
5642 step = (
5643 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5644 )
5645 current_base_folder = current_vnfd["_admin"]["storage"]
5646 latest_base_folder = latest_vnfd["_admin"]["storage"]
5647
5648 for vca_index, vca_deployed in enumerate(
5649 get_iterable(nsr_deployed, "VCA")
5650 ):
5651 vnf_index = db_vnfr.get("member-vnf-index-ref")
5652
5653 # Getting charm-id and charm-type
5654 if vca_deployed.get("member-vnf-index") == vnf_index:
5655 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5656 vca_type = vca_deployed.get("type")
5657 vdu_count_index = vca_deployed.get("vdu_count_index")
5658
5659 # Getting ee-id
5660 ee_id = vca_deployed.get("ee_id")
5661
5662 step = "Getting descriptor config"
5663 if current_vnfd.get("kdu"):
5664
5665 search_key = "kdu_name"
5666 else:
5667 search_key = "vnfd_id"
5668
5669 entity_id = vca_deployed.get(search_key)
5670
5671 descriptor_config = get_configuration(
5672 current_vnfd, entity_id
5673 )
5674
5675 if "execution-environment-list" in descriptor_config:
5676 ee_list = descriptor_config.get(
5677 "execution-environment-list", []
5678 )
5679 else:
5680 ee_list = []
5681
5682 # There could be several charm used in the same VNF
5683 for ee_item in ee_list:
5684 if ee_item.get("juju"):
5685
5686 step = "Getting charm name"
5687 charm_name = ee_item["juju"].get("charm")
5688
5689 step = "Setting Charm artifact paths"
5690 current_charm_artifact_path.append(
5691 get_charm_artifact_path(
5692 current_base_folder,
5693 charm_name,
5694 vca_type,
5695 current_vnf_revision,
5696 )
5697 )
5698 target_charm_artifact_path.append(
5699 get_charm_artifact_path(
5700 latest_base_folder,
5701 charm_name,
5702 vca_type,
5703 latest_vnfd_revision,
5704 )
5705 )
5706 elif ee_item.get("helm-chart"):
5707 # add chart to list and all parameters
5708 step = "Getting helm chart name"
5709 chart_name = ee_item.get("helm-chart")
5710 if (
5711 ee_item.get("helm-version")
5712 and ee_item.get("helm-version") == "v2"
5713 ):
5714 vca_type = "helm"
5715 else:
5716 vca_type = "helm-v3"
5717 step = "Setting Helm chart artifact paths"
5718
5719 helm_artifacts.append(
5720 {
5721 "current_artifact_path": get_charm_artifact_path(
5722 current_base_folder,
5723 chart_name,
5724 vca_type,
5725 current_vnf_revision,
5726 ),
5727 "target_artifact_path": get_charm_artifact_path(
5728 latest_base_folder,
5729 chart_name,
5730 vca_type,
5731 latest_vnfd_revision,
5732 ),
5733 "ee_id": ee_id,
5734 "vca_index": vca_index,
5735 "vdu_index": vdu_count_index,
5736 }
5737 )
5738
5739 charm_artifact_paths = zip(
5740 current_charm_artifact_path, target_charm_artifact_path
5741 )
5742
5743 step = "Checking if software version has changed in VNFD"
5744 if find_software_version(current_vnfd) != find_software_version(
5745 latest_vnfd
5746 ):
5747
5748 step = "Checking if existing VNF has charm"
5749 for current_charm_path, target_charm_path in list(
5750 charm_artifact_paths
5751 ):
5752 if current_charm_path:
5753 raise LcmException(
5754 "Software version change is not supported as VNF instance {} has charm.".format(
5755 vnf_instance_id
5756 )
5757 )
5758
5759 # There is no change in the charm package, then redeploy the VNF
5760 # based on new descriptor
5761 step = "Redeploying VNF"
5762 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5763 (result, detailed_status) = await self._ns_redeploy_vnf(
5764 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5765 )
5766 if result == "FAILED":
5767 nslcmop_operation_state = result
5768 error_description_nslcmop = detailed_status
5769 db_nslcmop_update["detailed-status"] = detailed_status
5770 self.logger.debug(
5771 logging_text
5772 + " step {} Done with result {} {}".format(
5773 step, nslcmop_operation_state, detailed_status
5774 )
5775 )
5776
5777 else:
5778 step = "Checking if any charm package has changed or not"
5779 for current_charm_path, target_charm_path in list(
5780 charm_artifact_paths
5781 ):
5782 if (
5783 current_charm_path
5784 and target_charm_path
5785 and self.check_charm_hash_changed(
5786 current_charm_path, target_charm_path
5787 )
5788 ):
5789
5790 step = "Checking whether VNF uses juju bundle"
5791 if check_juju_bundle_existence(current_vnfd):
5792
5793 raise LcmException(
5794 "Charm upgrade is not supported for the instance which"
5795 " uses juju-bundle: {}".format(
5796 check_juju_bundle_existence(current_vnfd)
5797 )
5798 )
5799
5800 step = "Upgrading Charm"
5801 (
5802 result,
5803 detailed_status,
5804 ) = await self._ns_charm_upgrade(
5805 ee_id=ee_id,
5806 charm_id=vca_id,
5807 charm_type=vca_type,
5808 path=self.fs.path + target_charm_path,
5809 timeout=timeout_seconds,
5810 )
5811
5812 if result == "FAILED":
5813 nslcmop_operation_state = result
5814 error_description_nslcmop = detailed_status
5815
5816 db_nslcmop_update["detailed-status"] = detailed_status
5817 self.logger.debug(
5818 logging_text
5819 + " step {} Done with result {} {}".format(
5820 step, nslcmop_operation_state, detailed_status
5821 )
5822 )
5823
5824 step = "Updating policies"
5825 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5826 result = "COMPLETED"
5827 detailed_status = "Done"
5828 db_nslcmop_update["detailed-status"] = "Done"
5829
5830 # helm base EE
5831 for item in helm_artifacts:
5832 if not (
5833 item["current_artifact_path"]
5834 and item["target_artifact_path"]
5835 and self.check_charm_hash_changed(
5836 item["current_artifact_path"],
5837 item["target_artifact_path"],
5838 )
5839 ):
5840 continue
5841 db_update_entry = "_admin.deployed.VCA.{}.".format(
5842 item["vca_index"]
5843 )
5844 vnfr_id = db_vnfr["_id"]
5845 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5846 db_dict = {
5847 "collection": "nsrs",
5848 "filter": {"_id": nsr_id},
5849 "path": db_update_entry,
5850 }
5851 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5852 await self.vca_map[vca_type].upgrade_execution_environment(
5853 namespace=namespace,
5854 helm_id=helm_id,
5855 db_dict=db_dict,
5856 config=osm_config,
5857 artifact_path=item["target_artifact_path"],
5858 vca_type=vca_type,
5859 )
5860 vnf_id = db_vnfr.get("vnfd-ref")
5861 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5862 self.logger.debug("get ssh key block")
5863 rw_mgmt_ip = None
5864 if deep_get(
5865 config_descriptor,
5866 ("config-access", "ssh-access", "required"),
5867 ):
5868 # Needed to inject a ssh key
5869 user = deep_get(
5870 config_descriptor,
5871 ("config-access", "ssh-access", "default-user"),
5872 )
5873 step = (
5874 "Install configuration Software, getting public ssh key"
5875 )
5876 pub_key = await self.vca_map[
5877 vca_type
5878 ].get_ee_ssh_public__key(
5879 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5880 )
5881
5882 step = (
5883 "Insert public key into VM user={} ssh_key={}".format(
5884 user, pub_key
5885 )
5886 )
5887 self.logger.debug(logging_text + step)
5888
5889 # wait for RO (ip-address) Insert pub_key into VM
5890 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
5891 logging_text,
5892 nsr_id,
5893 vnfr_id,
5894 None,
5895 item["vdu_index"],
5896 user=user,
5897 pub_key=pub_key,
5898 )
5899
5900 initial_config_primitive_list = config_descriptor.get(
5901 "initial-config-primitive"
5902 )
5903 config_primitive = next(
5904 (
5905 p
5906 for p in initial_config_primitive_list
5907 if p["name"] == "config"
5908 ),
5909 None,
5910 )
5911 if not config_primitive:
5912 continue
5913
5914 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5915 if rw_mgmt_ip:
5916 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
5917 if db_vnfr.get("additionalParamsForVnf"):
5918 deploy_params.update(
5919 parse_yaml_strings(
5920 db_vnfr["additionalParamsForVnf"].copy()
5921 )
5922 )
5923 primitive_params_ = self._map_primitive_params(
5924 config_primitive, {}, deploy_params
5925 )
5926
5927 step = "execute primitive '{}' params '{}'".format(
5928 config_primitive["name"], primitive_params_
5929 )
5930 self.logger.debug(logging_text + step)
5931 await self.vca_map[vca_type].exec_primitive(
5932 ee_id=ee_id,
5933 primitive_name=config_primitive["name"],
5934 params_dict=primitive_params_,
5935 db_dict=db_dict,
5936 vca_id=vca_id,
5937 vca_type=vca_type,
5938 )
5939
5940 step = "Updating policies"
5941 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5942 detailed_status = "Done"
5943 db_nslcmop_update["detailed-status"] = "Done"
5944
5945 # If nslcmop_operation_state is None, so any operation is not failed.
5946 if not nslcmop_operation_state:
5947 nslcmop_operation_state = "COMPLETED"
5948
5949 # If update CHANGE_VNFPKG nslcmop_operation is successful
5950 # vnf revision need to be updated
5951 vnfr_update["revision"] = latest_vnfd_revision
5952 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5953
5954 self.logger.debug(
5955 logging_text
5956 + " task Done with result {} {}".format(
5957 nslcmop_operation_state, detailed_status
5958 )
5959 )
5960 elif update_type == "REMOVE_VNF":
5961 # This part is included in https://osm.etsi.org/gerrit/11876
5962 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5963 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5964 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5965 step = "Removing VNF"
5966 (result, detailed_status) = await self.remove_vnf(
5967 nsr_id, nslcmop_id, vnf_instance_id
5968 )
5969 if result == "FAILED":
5970 nslcmop_operation_state = result
5971 error_description_nslcmop = detailed_status
5972 db_nslcmop_update["detailed-status"] = detailed_status
5973 change_type = "vnf_terminated"
5974 if not nslcmop_operation_state:
5975 nslcmop_operation_state = "COMPLETED"
5976 self.logger.debug(
5977 logging_text
5978 + " task Done with result {} {}".format(
5979 nslcmop_operation_state, detailed_status
5980 )
5981 )
5982
5983 elif update_type == "OPERATE_VNF":
5984 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
5985 "vnfInstanceId"
5986 ]
5987 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
5988 "changeStateTo"
5989 ]
5990 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
5991 "additionalParam"
5992 ]
5993 (result, detailed_status) = await self.rebuild_start_stop(
5994 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5995 )
5996 if result == "FAILED":
5997 nslcmop_operation_state = result
5998 error_description_nslcmop = detailed_status
5999 db_nslcmop_update["detailed-status"] = detailed_status
6000 if not nslcmop_operation_state:
6001 nslcmop_operation_state = "COMPLETED"
6002 self.logger.debug(
6003 logging_text
6004 + " task Done with result {} {}".format(
6005 nslcmop_operation_state, detailed_status
6006 )
6007 )
6008
6009 # If nslcmop_operation_state is None, so any operation is not failed.
6010 # All operations are executed in overall.
6011 if not nslcmop_operation_state:
6012 nslcmop_operation_state = "COMPLETED"
6013 db_nsr_update["operational-status"] = old_operational_status
6014
6015 except (DbException, LcmException, N2VCException, K8sException) as e:
6016 self.logger.error(logging_text + "Exit Exception {}".format(e))
6017 exc = e
6018 except asyncio.CancelledError:
6019 self.logger.error(
6020 logging_text + "Cancelled Exception while '{}'".format(step)
6021 )
6022 exc = "Operation was cancelled"
6023 except asyncio.TimeoutError:
6024 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6025 exc = "Timeout"
6026 except Exception as e:
6027 exc = traceback.format_exc()
6028 self.logger.critical(
6029 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6030 exc_info=True,
6031 )
6032 finally:
6033 if exc:
6034 db_nslcmop_update[
6035 "detailed-status"
6036 ] = (
6037 detailed_status
6038 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6039 nslcmop_operation_state = "FAILED"
6040 db_nsr_update["operational-status"] = old_operational_status
6041 if db_nsr:
6042 self._write_ns_status(
6043 nsr_id=nsr_id,
6044 ns_state=db_nsr["nsState"],
6045 current_operation="IDLE",
6046 current_operation_id=None,
6047 other_update=db_nsr_update,
6048 )
6049
6050 self._write_op_status(
6051 op_id=nslcmop_id,
6052 stage="",
6053 error_message=error_description_nslcmop,
6054 operation_state=nslcmop_operation_state,
6055 other_update=db_nslcmop_update,
6056 )
6057
6058 if nslcmop_operation_state:
6059 try:
6060 msg = {
6061 "nsr_id": nsr_id,
6062 "nslcmop_id": nslcmop_id,
6063 "operationState": nslcmop_operation_state,
6064 }
6065 if (
6066 change_type in ("vnf_terminated", "policy_updated")
6067 and member_vnf_index
6068 ):
6069 msg.update({"vnf_member_index": member_vnf_index})
6070 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6071 except Exception as e:
6072 self.logger.error(
6073 logging_text + "kafka_write notification Exception {}".format(e)
6074 )
6075 self.logger.debug(logging_text + "Exit")
6076 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6077 return nslcmop_operation_state, detailed_status
6078
6079 async def scale(self, nsr_id, nslcmop_id):
6080 # Try to lock HA task here
6081 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6082 if not task_is_locked_by_me:
6083 return
6084
6085 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6086 stage = ["", "", ""]
6087 tasks_dict_info = {}
6088 # ^ stage, step, VIM progress
6089 self.logger.debug(logging_text + "Enter")
6090 # get all needed from database
6091 db_nsr = None
6092 db_nslcmop_update = {}
6093 db_nsr_update = {}
6094 exc = None
6095 # in case of error, indicates what part of scale was failed to put nsr at error status
6096 scale_process = None
6097 old_operational_status = ""
6098 old_config_status = ""
6099 nsi_id = None
6100 try:
6101 # wait for any previous tasks in process
6102 step = "Waiting for previous operations to terminate"
6103 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6104 self._write_ns_status(
6105 nsr_id=nsr_id,
6106 ns_state=None,
6107 current_operation="SCALING",
6108 current_operation_id=nslcmop_id,
6109 )
6110
6111 step = "Getting nslcmop from database"
6112 self.logger.debug(
6113 step + " after having waited for previous tasks to be completed"
6114 )
6115 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6116
6117 step = "Getting nsr from database"
6118 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6119 old_operational_status = db_nsr["operational-status"]
6120 old_config_status = db_nsr["config-status"]
6121
6122 step = "Parsing scaling parameters"
6123 db_nsr_update["operational-status"] = "scaling"
6124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6125 nsr_deployed = db_nsr["_admin"].get("deployed")
6126
6127 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6128 "scaleByStepData"
6129 ]["member-vnf-index"]
6130 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6131 "scaleByStepData"
6132 ]["scaling-group-descriptor"]
6133 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6134 # for backward compatibility
6135 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6136 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6137 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6138 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6139
6140 step = "Getting vnfr from database"
6141 db_vnfr = self.db.get_one(
6142 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6143 )
6144
6145 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6146
6147 step = "Getting vnfd from database"
6148 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6149
6150 base_folder = db_vnfd["_admin"]["storage"]
6151
6152 step = "Getting scaling-group-descriptor"
6153 scaling_descriptor = find_in_list(
6154 get_scaling_aspect(db_vnfd),
6155 lambda scale_desc: scale_desc["name"] == scaling_group,
6156 )
6157 if not scaling_descriptor:
6158 raise LcmException(
6159 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6160 "at vnfd:scaling-group-descriptor".format(scaling_group)
6161 )
6162
6163 step = "Sending scale order to VIM"
6164 # TODO check if ns is in a proper status
6165 nb_scale_op = 0
6166 if not db_nsr["_admin"].get("scaling-group"):
6167 self.update_db_2(
6168 "nsrs",
6169 nsr_id,
6170 {
6171 "_admin.scaling-group": [
6172 {"name": scaling_group, "nb-scale-op": 0}
6173 ]
6174 },
6175 )
6176 admin_scale_index = 0
6177 else:
6178 for admin_scale_index, admin_scale_info in enumerate(
6179 db_nsr["_admin"]["scaling-group"]
6180 ):
6181 if admin_scale_info["name"] == scaling_group:
6182 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6183 break
6184 else: # not found, set index one plus last element and add new entry with the name
6185 admin_scale_index += 1
6186 db_nsr_update[
6187 "_admin.scaling-group.{}.name".format(admin_scale_index)
6188 ] = scaling_group
6189
6190 vca_scaling_info = []
6191 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6192 if scaling_type == "SCALE_OUT":
6193 if "aspect-delta-details" not in scaling_descriptor:
6194 raise LcmException(
6195 "Aspect delta details not fount in scaling descriptor {}".format(
6196 scaling_descriptor["name"]
6197 )
6198 )
6199 # count if max-instance-count is reached
6200 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6201
6202 scaling_info["scaling_direction"] = "OUT"
6203 scaling_info["vdu-create"] = {}
6204 scaling_info["kdu-create"] = {}
6205 for delta in deltas:
6206 for vdu_delta in delta.get("vdu-delta", {}):
6207 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6208 # vdu_index also provides the number of instance of the targeted vdu
6209 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6210 cloud_init_text = self._get_vdu_cloud_init_content(
6211 vdud, db_vnfd
6212 )
6213 if cloud_init_text:
6214 additional_params = (
6215 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6216 or {}
6217 )
6218 cloud_init_list = []
6219
6220 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6221 max_instance_count = 10
6222 if vdu_profile and "max-number-of-instances" in vdu_profile:
6223 max_instance_count = vdu_profile.get(
6224 "max-number-of-instances", 10
6225 )
6226
6227 default_instance_num = get_number_of_instances(
6228 db_vnfd, vdud["id"]
6229 )
6230 instances_number = vdu_delta.get("number-of-instances", 1)
6231 nb_scale_op += instances_number
6232
6233 new_instance_count = nb_scale_op + default_instance_num
6234 # Control if new count is over max and vdu count is less than max.
6235 # Then assign new instance count
6236 if new_instance_count > max_instance_count > vdu_count:
6237 instances_number = new_instance_count - max_instance_count
6238 else:
6239 instances_number = instances_number
6240
6241 if new_instance_count > max_instance_count:
6242 raise LcmException(
6243 "reached the limit of {} (max-instance-count) "
6244 "scaling-out operations for the "
6245 "scaling-group-descriptor '{}'".format(
6246 nb_scale_op, scaling_group
6247 )
6248 )
6249 for x in range(vdu_delta.get("number-of-instances", 1)):
6250 if cloud_init_text:
6251 # TODO Information of its own ip is not available because db_vnfr is not updated.
6252 additional_params["OSM"] = get_osm_params(
6253 db_vnfr, vdu_delta["id"], vdu_index + x
6254 )
6255 cloud_init_list.append(
6256 self._parse_cloud_init(
6257 cloud_init_text,
6258 additional_params,
6259 db_vnfd["id"],
6260 vdud["id"],
6261 )
6262 )
6263 vca_scaling_info.append(
6264 {
6265 "osm_vdu_id": vdu_delta["id"],
6266 "member-vnf-index": vnf_index,
6267 "type": "create",
6268 "vdu_index": vdu_index + x,
6269 }
6270 )
6271 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6272 for kdu_delta in delta.get("kdu-resource-delta", {}):
6273 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6274 kdu_name = kdu_profile["kdu-name"]
6275 resource_name = kdu_profile.get("resource-name", "")
6276
6277 # Might have different kdus in the same delta
6278 # Should have list for each kdu
6279 if not scaling_info["kdu-create"].get(kdu_name, None):
6280 scaling_info["kdu-create"][kdu_name] = []
6281
6282 kdur = get_kdur(db_vnfr, kdu_name)
6283 if kdur.get("helm-chart"):
6284 k8s_cluster_type = "helm-chart-v3"
6285 self.logger.debug("kdur: {}".format(kdur))
6286 if (
6287 kdur.get("helm-version")
6288 and kdur.get("helm-version") == "v2"
6289 ):
6290 k8s_cluster_type = "helm-chart"
6291 elif kdur.get("juju-bundle"):
6292 k8s_cluster_type = "juju-bundle"
6293 else:
6294 raise LcmException(
6295 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6296 "juju-bundle. Maybe an old NBI version is running".format(
6297 db_vnfr["member-vnf-index-ref"], kdu_name
6298 )
6299 )
6300
6301 max_instance_count = 10
6302 if kdu_profile and "max-number-of-instances" in kdu_profile:
6303 max_instance_count = kdu_profile.get(
6304 "max-number-of-instances", 10
6305 )
6306
6307 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6308 deployed_kdu, _ = get_deployed_kdu(
6309 nsr_deployed, kdu_name, vnf_index
6310 )
6311 if deployed_kdu is None:
6312 raise LcmException(
6313 "KDU '{}' for vnf '{}' not deployed".format(
6314 kdu_name, vnf_index
6315 )
6316 )
6317 kdu_instance = deployed_kdu.get("kdu-instance")
6318 instance_num = await self.k8scluster_map[
6319 k8s_cluster_type
6320 ].get_scale_count(
6321 resource_name,
6322 kdu_instance,
6323 vca_id=vca_id,
6324 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6325 kdu_model=deployed_kdu.get("kdu-model"),
6326 )
6327 kdu_replica_count = instance_num + kdu_delta.get(
6328 "number-of-instances", 1
6329 )
6330
6331 # Control if new count is over max and instance_num is less than max.
6332 # Then assign max instance number to kdu replica count
6333 if kdu_replica_count > max_instance_count > instance_num:
6334 kdu_replica_count = max_instance_count
6335 if kdu_replica_count > max_instance_count:
6336 raise LcmException(
6337 "reached the limit of {} (max-instance-count) "
6338 "scaling-out operations for the "
6339 "scaling-group-descriptor '{}'".format(
6340 instance_num, scaling_group
6341 )
6342 )
6343
6344 for x in range(kdu_delta.get("number-of-instances", 1)):
6345 vca_scaling_info.append(
6346 {
6347 "osm_kdu_id": kdu_name,
6348 "member-vnf-index": vnf_index,
6349 "type": "create",
6350 "kdu_index": instance_num + x - 1,
6351 }
6352 )
6353 scaling_info["kdu-create"][kdu_name].append(
6354 {
6355 "member-vnf-index": vnf_index,
6356 "type": "create",
6357 "k8s-cluster-type": k8s_cluster_type,
6358 "resource-name": resource_name,
6359 "scale": kdu_replica_count,
6360 }
6361 )
6362 elif scaling_type == "SCALE_IN":
6363 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6364
6365 scaling_info["scaling_direction"] = "IN"
6366 scaling_info["vdu-delete"] = {}
6367 scaling_info["kdu-delete"] = {}
6368
6369 for delta in deltas:
6370 for vdu_delta in delta.get("vdu-delta", {}):
6371 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6372 min_instance_count = 0
6373 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6374 if vdu_profile and "min-number-of-instances" in vdu_profile:
6375 min_instance_count = vdu_profile["min-number-of-instances"]
6376
6377 default_instance_num = get_number_of_instances(
6378 db_vnfd, vdu_delta["id"]
6379 )
6380 instance_num = vdu_delta.get("number-of-instances", 1)
6381 nb_scale_op -= instance_num
6382
6383 new_instance_count = nb_scale_op + default_instance_num
6384
6385 if new_instance_count < min_instance_count < vdu_count:
6386 instances_number = min_instance_count - new_instance_count
6387 else:
6388 instances_number = instance_num
6389
6390 if new_instance_count < min_instance_count:
6391 raise LcmException(
6392 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6393 "scaling-group-descriptor '{}'".format(
6394 nb_scale_op, scaling_group
6395 )
6396 )
6397 for x in range(vdu_delta.get("number-of-instances", 1)):
6398 vca_scaling_info.append(
6399 {
6400 "osm_vdu_id": vdu_delta["id"],
6401 "member-vnf-index": vnf_index,
6402 "type": "delete",
6403 "vdu_index": vdu_index - 1 - x,
6404 }
6405 )
6406 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6407 for kdu_delta in delta.get("kdu-resource-delta", {}):
6408 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6409 kdu_name = kdu_profile["kdu-name"]
6410 resource_name = kdu_profile.get("resource-name", "")
6411
6412 if not scaling_info["kdu-delete"].get(kdu_name, None):
6413 scaling_info["kdu-delete"][kdu_name] = []
6414
6415 kdur = get_kdur(db_vnfr, kdu_name)
6416 if kdur.get("helm-chart"):
6417 k8s_cluster_type = "helm-chart-v3"
6418 self.logger.debug("kdur: {}".format(kdur))
6419 if (
6420 kdur.get("helm-version")
6421 and kdur.get("helm-version") == "v2"
6422 ):
6423 k8s_cluster_type = "helm-chart"
6424 elif kdur.get("juju-bundle"):
6425 k8s_cluster_type = "juju-bundle"
6426 else:
6427 raise LcmException(
6428 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6429 "juju-bundle. Maybe an old NBI version is running".format(
6430 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6431 )
6432 )
6433
6434 min_instance_count = 0
6435 if kdu_profile and "min-number-of-instances" in kdu_profile:
6436 min_instance_count = kdu_profile["min-number-of-instances"]
6437
6438 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6439 deployed_kdu, _ = get_deployed_kdu(
6440 nsr_deployed, kdu_name, vnf_index
6441 )
6442 if deployed_kdu is None:
6443 raise LcmException(
6444 "KDU '{}' for vnf '{}' not deployed".format(
6445 kdu_name, vnf_index
6446 )
6447 )
6448 kdu_instance = deployed_kdu.get("kdu-instance")
6449 instance_num = await self.k8scluster_map[
6450 k8s_cluster_type
6451 ].get_scale_count(
6452 resource_name,
6453 kdu_instance,
6454 vca_id=vca_id,
6455 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6456 kdu_model=deployed_kdu.get("kdu-model"),
6457 )
6458 kdu_replica_count = instance_num - kdu_delta.get(
6459 "number-of-instances", 1
6460 )
6461
6462 if kdu_replica_count < min_instance_count < instance_num:
6463 kdu_replica_count = min_instance_count
6464 if kdu_replica_count < min_instance_count:
6465 raise LcmException(
6466 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6467 "scaling-group-descriptor '{}'".format(
6468 instance_num, scaling_group
6469 )
6470 )
6471
6472 for x in range(kdu_delta.get("number-of-instances", 1)):
6473 vca_scaling_info.append(
6474 {
6475 "osm_kdu_id": kdu_name,
6476 "member-vnf-index": vnf_index,
6477 "type": "delete",
6478 "kdu_index": instance_num - x - 1,
6479 }
6480 )
6481 scaling_info["kdu-delete"][kdu_name].append(
6482 {
6483 "member-vnf-index": vnf_index,
6484 "type": "delete",
6485 "k8s-cluster-type": k8s_cluster_type,
6486 "resource-name": resource_name,
6487 "scale": kdu_replica_count,
6488 }
6489 )
6490
6491 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6492 vdu_delete = copy(scaling_info.get("vdu-delete"))
6493 if scaling_info["scaling_direction"] == "IN":
6494 for vdur in reversed(db_vnfr["vdur"]):
6495 if vdu_delete.get(vdur["vdu-id-ref"]):
6496 vdu_delete[vdur["vdu-id-ref"]] -= 1
6497 scaling_info["vdu"].append(
6498 {
6499 "name": vdur.get("name") or vdur.get("vdu-name"),
6500 "vdu_id": vdur["vdu-id-ref"],
6501 "interface": [],
6502 }
6503 )
6504 for interface in vdur["interfaces"]:
6505 scaling_info["vdu"][-1]["interface"].append(
6506 {
6507 "name": interface["name"],
6508 "ip_address": interface["ip-address"],
6509 "mac_address": interface.get("mac-address"),
6510 }
6511 )
6512 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6513
6514 # PRE-SCALE BEGIN
6515 step = "Executing pre-scale vnf-config-primitive"
6516 if scaling_descriptor.get("scaling-config-action"):
6517 for scaling_config_action in scaling_descriptor[
6518 "scaling-config-action"
6519 ]:
6520 if (
6521 scaling_config_action.get("trigger") == "pre-scale-in"
6522 and scaling_type == "SCALE_IN"
6523 ) or (
6524 scaling_config_action.get("trigger") == "pre-scale-out"
6525 and scaling_type == "SCALE_OUT"
6526 ):
6527 vnf_config_primitive = scaling_config_action[
6528 "vnf-config-primitive-name-ref"
6529 ]
6530 step = db_nslcmop_update[
6531 "detailed-status"
6532 ] = "executing pre-scale scaling-config-action '{}'".format(
6533 vnf_config_primitive
6534 )
6535
6536 # look for primitive
6537 for config_primitive in (
6538 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6539 ).get("config-primitive", ()):
6540 if config_primitive["name"] == vnf_config_primitive:
6541 break
6542 else:
6543 raise LcmException(
6544 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6545 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6546 "primitive".format(scaling_group, vnf_config_primitive)
6547 )
6548
6549 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6550 if db_vnfr.get("additionalParamsForVnf"):
6551 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6552
6553 scale_process = "VCA"
6554 db_nsr_update["config-status"] = "configuring pre-scaling"
6555 primitive_params = self._map_primitive_params(
6556 config_primitive, {}, vnfr_params
6557 )
6558
6559 # Pre-scale retry check: Check if this sub-operation has been executed before
6560 op_index = self._check_or_add_scale_suboperation(
6561 db_nslcmop,
6562 vnf_index,
6563 vnf_config_primitive,
6564 primitive_params,
6565 "PRE-SCALE",
6566 )
6567 if op_index == self.SUBOPERATION_STATUS_SKIP:
6568 # Skip sub-operation
6569 result = "COMPLETED"
6570 result_detail = "Done"
6571 self.logger.debug(
6572 logging_text
6573 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6574 vnf_config_primitive, result, result_detail
6575 )
6576 )
6577 else:
6578 if op_index == self.SUBOPERATION_STATUS_NEW:
6579 # New sub-operation: Get index of this sub-operation
6580 op_index = (
6581 len(db_nslcmop.get("_admin", {}).get("operations"))
6582 - 1
6583 )
6584 self.logger.debug(
6585 logging_text
6586 + "vnf_config_primitive={} New sub-operation".format(
6587 vnf_config_primitive
6588 )
6589 )
6590 else:
6591 # retry: Get registered params for this existing sub-operation
6592 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6593 op_index
6594 ]
6595 vnf_index = op.get("member_vnf_index")
6596 vnf_config_primitive = op.get("primitive")
6597 primitive_params = op.get("primitive_params")
6598 self.logger.debug(
6599 logging_text
6600 + "vnf_config_primitive={} Sub-operation retry".format(
6601 vnf_config_primitive
6602 )
6603 )
6604 # Execute the primitive, either with new (first-time) or registered (reintent) args
6605 ee_descriptor_id = config_primitive.get(
6606 "execution-environment-ref"
6607 )
6608 primitive_name = config_primitive.get(
6609 "execution-environment-primitive", vnf_config_primitive
6610 )
6611 ee_id, vca_type = self._look_for_deployed_vca(
6612 nsr_deployed["VCA"],
6613 member_vnf_index=vnf_index,
6614 vdu_id=None,
6615 vdu_count_index=None,
6616 ee_descriptor_id=ee_descriptor_id,
6617 )
6618 result, result_detail = await self._ns_execute_primitive(
6619 ee_id,
6620 primitive_name,
6621 primitive_params,
6622 vca_type=vca_type,
6623 vca_id=vca_id,
6624 )
6625 self.logger.debug(
6626 logging_text
6627 + "vnf_config_primitive={} Done with result {} {}".format(
6628 vnf_config_primitive, result, result_detail
6629 )
6630 )
6631 # Update operationState = COMPLETED | FAILED
6632 self._update_suboperation_status(
6633 db_nslcmop, op_index, result, result_detail
6634 )
6635
6636 if result == "FAILED":
6637 raise LcmException(result_detail)
6638 db_nsr_update["config-status"] = old_config_status
6639 scale_process = None
6640 # PRE-SCALE END
6641
6642 db_nsr_update[
6643 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6644 ] = nb_scale_op
6645 db_nsr_update[
6646 "_admin.scaling-group.{}.time".format(admin_scale_index)
6647 ] = time()
6648
6649 # SCALE-IN VCA - BEGIN
6650 if vca_scaling_info:
6651 step = db_nslcmop_update[
6652 "detailed-status"
6653 ] = "Deleting the execution environments"
6654 scale_process = "VCA"
6655 for vca_info in vca_scaling_info:
6656 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6657 member_vnf_index = str(vca_info["member-vnf-index"])
6658 self.logger.debug(
6659 logging_text + "vdu info: {}".format(vca_info)
6660 )
6661 if vca_info.get("osm_vdu_id"):
6662 vdu_id = vca_info["osm_vdu_id"]
6663 vdu_index = int(vca_info["vdu_index"])
6664 stage[
6665 1
6666 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6667 member_vnf_index, vdu_id, vdu_index
6668 )
6669 stage[2] = step = "Scaling in VCA"
6670 self._write_op_status(op_id=nslcmop_id, stage=stage)
6671 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6672 config_update = db_nsr["configurationStatus"]
6673 for vca_index, vca in enumerate(vca_update):
6674 if (
6675 (vca or vca.get("ee_id"))
6676 and vca["member-vnf-index"] == member_vnf_index
6677 and vca["vdu_count_index"] == vdu_index
6678 ):
6679 if vca.get("vdu_id"):
6680 config_descriptor = get_configuration(
6681 db_vnfd, vca.get("vdu_id")
6682 )
6683 elif vca.get("kdu_name"):
6684 config_descriptor = get_configuration(
6685 db_vnfd, vca.get("kdu_name")
6686 )
6687 else:
6688 config_descriptor = get_configuration(
6689 db_vnfd, db_vnfd["id"]
6690 )
6691 operation_params = (
6692 db_nslcmop.get("operationParams") or {}
6693 )
6694 exec_terminate_primitives = not operation_params.get(
6695 "skip_terminate_primitives"
6696 ) and vca.get("needed_terminate")
6697 task = asyncio.ensure_future(
6698 asyncio.wait_for(
6699 self.destroy_N2VC(
6700 logging_text,
6701 db_nslcmop,
6702 vca,
6703 config_descriptor,
6704 vca_index,
6705 destroy_ee=True,
6706 exec_primitives=exec_terminate_primitives,
6707 scaling_in=True,
6708 vca_id=vca_id,
6709 ),
6710 timeout=self.timeout.charm_delete,
6711 )
6712 )
6713 tasks_dict_info[task] = "Terminating VCA {}".format(
6714 vca.get("ee_id")
6715 )
6716 del vca_update[vca_index]
6717 del config_update[vca_index]
6718 # wait for pending tasks of terminate primitives
6719 if tasks_dict_info:
6720 self.logger.debug(
6721 logging_text
6722 + "Waiting for tasks {}".format(
6723 list(tasks_dict_info.keys())
6724 )
6725 )
6726 error_list = await self._wait_for_tasks(
6727 logging_text,
6728 tasks_dict_info,
6729 min(
6730 self.timeout.charm_delete, self.timeout.ns_terminate
6731 ),
6732 stage,
6733 nslcmop_id,
6734 )
6735 tasks_dict_info.clear()
6736 if error_list:
6737 raise LcmException("; ".join(error_list))
6738
6739 db_vca_and_config_update = {
6740 "_admin.deployed.VCA": vca_update,
6741 "configurationStatus": config_update,
6742 }
6743 self.update_db_2(
6744 "nsrs", db_nsr["_id"], db_vca_and_config_update
6745 )
6746 scale_process = None
6747 # SCALE-IN VCA - END
6748
6749 # SCALE RO - BEGIN
6750 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6751 scale_process = "RO"
6752 if self.ro_config.ng:
6753 await self._scale_ng_ro(
6754 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6755 )
6756 scaling_info.pop("vdu-create", None)
6757 scaling_info.pop("vdu-delete", None)
6758
6759 scale_process = None
6760 # SCALE RO - END
6761
6762 # SCALE KDU - BEGIN
6763 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6764 scale_process = "KDU"
6765 await self._scale_kdu(
6766 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6767 )
6768 scaling_info.pop("kdu-create", None)
6769 scaling_info.pop("kdu-delete", None)
6770
6771 scale_process = None
6772 # SCALE KDU - END
6773
6774 if db_nsr_update:
6775 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6776
6777 # SCALE-UP VCA - BEGIN
6778 if vca_scaling_info:
6779 step = db_nslcmop_update[
6780 "detailed-status"
6781 ] = "Creating new execution environments"
6782 scale_process = "VCA"
6783 for vca_info in vca_scaling_info:
6784 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6785 member_vnf_index = str(vca_info["member-vnf-index"])
6786 self.logger.debug(
6787 logging_text + "vdu info: {}".format(vca_info)
6788 )
6789 vnfd_id = db_vnfr["vnfd-ref"]
6790 if vca_info.get("osm_vdu_id"):
6791 vdu_index = int(vca_info["vdu_index"])
6792 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6793 if db_vnfr.get("additionalParamsForVnf"):
6794 deploy_params.update(
6795 parse_yaml_strings(
6796 db_vnfr["additionalParamsForVnf"].copy()
6797 )
6798 )
6799 descriptor_config = get_configuration(
6800 db_vnfd, db_vnfd["id"]
6801 )
6802 if descriptor_config:
6803 vdu_id = None
6804 vdu_name = None
6805 kdu_name = None
6806 kdu_index = None
6807 self._deploy_n2vc(
6808 logging_text=logging_text
6809 + "member_vnf_index={} ".format(member_vnf_index),
6810 db_nsr=db_nsr,
6811 db_vnfr=db_vnfr,
6812 nslcmop_id=nslcmop_id,
6813 nsr_id=nsr_id,
6814 nsi_id=nsi_id,
6815 vnfd_id=vnfd_id,
6816 vdu_id=vdu_id,
6817 kdu_name=kdu_name,
6818 kdu_index=kdu_index,
6819 member_vnf_index=member_vnf_index,
6820 vdu_index=vdu_index,
6821 vdu_name=vdu_name,
6822 deploy_params=deploy_params,
6823 descriptor_config=descriptor_config,
6824 base_folder=base_folder,
6825 task_instantiation_info=tasks_dict_info,
6826 stage=stage,
6827 )
6828 vdu_id = vca_info["osm_vdu_id"]
6829 vdur = find_in_list(
6830 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6831 )
6832 descriptor_config = get_configuration(db_vnfd, vdu_id)
6833 if vdur.get("additionalParams"):
6834 deploy_params_vdu = parse_yaml_strings(
6835 vdur["additionalParams"]
6836 )
6837 else:
6838 deploy_params_vdu = deploy_params
6839 deploy_params_vdu["OSM"] = get_osm_params(
6840 db_vnfr, vdu_id, vdu_count_index=vdu_index
6841 )
6842 if descriptor_config:
6843 vdu_name = None
6844 kdu_name = None
6845 kdu_index = None
6846 stage[
6847 1
6848 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6849 member_vnf_index, vdu_id, vdu_index
6850 )
6851 stage[2] = step = "Scaling out VCA"
6852 self._write_op_status(op_id=nslcmop_id, stage=stage)
6853 self._deploy_n2vc(
6854 logging_text=logging_text
6855 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6856 member_vnf_index, vdu_id, vdu_index
6857 ),
6858 db_nsr=db_nsr,
6859 db_vnfr=db_vnfr,
6860 nslcmop_id=nslcmop_id,
6861 nsr_id=nsr_id,
6862 nsi_id=nsi_id,
6863 vnfd_id=vnfd_id,
6864 vdu_id=vdu_id,
6865 kdu_name=kdu_name,
6866 member_vnf_index=member_vnf_index,
6867 vdu_index=vdu_index,
6868 kdu_index=kdu_index,
6869 vdu_name=vdu_name,
6870 deploy_params=deploy_params_vdu,
6871 descriptor_config=descriptor_config,
6872 base_folder=base_folder,
6873 task_instantiation_info=tasks_dict_info,
6874 stage=stage,
6875 )
6876 # SCALE-UP VCA - END
6877 scale_process = None
6878
6879 # POST-SCALE BEGIN
6880 # execute primitive service POST-SCALING
6881 step = "Executing post-scale vnf-config-primitive"
6882 if scaling_descriptor.get("scaling-config-action"):
6883 for scaling_config_action in scaling_descriptor[
6884 "scaling-config-action"
6885 ]:
6886 if (
6887 scaling_config_action.get("trigger") == "post-scale-in"
6888 and scaling_type == "SCALE_IN"
6889 ) or (
6890 scaling_config_action.get("trigger") == "post-scale-out"
6891 and scaling_type == "SCALE_OUT"
6892 ):
6893 vnf_config_primitive = scaling_config_action[
6894 "vnf-config-primitive-name-ref"
6895 ]
6896 step = db_nslcmop_update[
6897 "detailed-status"
6898 ] = "executing post-scale scaling-config-action '{}'".format(
6899 vnf_config_primitive
6900 )
6901
6902 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6903 if db_vnfr.get("additionalParamsForVnf"):
6904 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6905
6906 # look for primitive
6907 for config_primitive in (
6908 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6909 ).get("config-primitive", ()):
6910 if config_primitive["name"] == vnf_config_primitive:
6911 break
6912 else:
6913 raise LcmException(
6914 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6915 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6916 "config-primitive".format(
6917 scaling_group, vnf_config_primitive
6918 )
6919 )
6920 scale_process = "VCA"
6921 db_nsr_update["config-status"] = "configuring post-scaling"
6922 primitive_params = self._map_primitive_params(
6923 config_primitive, {}, vnfr_params
6924 )
6925
6926 # Post-scale retry check: Check if this sub-operation has been executed before
6927 op_index = self._check_or_add_scale_suboperation(
6928 db_nslcmop,
6929 vnf_index,
6930 vnf_config_primitive,
6931 primitive_params,
6932 "POST-SCALE",
6933 )
6934 if op_index == self.SUBOPERATION_STATUS_SKIP:
6935 # Skip sub-operation
6936 result = "COMPLETED"
6937 result_detail = "Done"
6938 self.logger.debug(
6939 logging_text
6940 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6941 vnf_config_primitive, result, result_detail
6942 )
6943 )
6944 else:
6945 if op_index == self.SUBOPERATION_STATUS_NEW:
6946 # New sub-operation: Get index of this sub-operation
6947 op_index = (
6948 len(db_nslcmop.get("_admin", {}).get("operations"))
6949 - 1
6950 )
6951 self.logger.debug(
6952 logging_text
6953 + "vnf_config_primitive={} New sub-operation".format(
6954 vnf_config_primitive
6955 )
6956 )
6957 else:
6958 # retry: Get registered params for this existing sub-operation
6959 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6960 op_index
6961 ]
6962 vnf_index = op.get("member_vnf_index")
6963 vnf_config_primitive = op.get("primitive")
6964 primitive_params = op.get("primitive_params")
6965 self.logger.debug(
6966 logging_text
6967 + "vnf_config_primitive={} Sub-operation retry".format(
6968 vnf_config_primitive
6969 )
6970 )
6971 # Execute the primitive, either with new (first-time) or registered (reintent) args
6972 ee_descriptor_id = config_primitive.get(
6973 "execution-environment-ref"
6974 )
6975 primitive_name = config_primitive.get(
6976 "execution-environment-primitive", vnf_config_primitive
6977 )
6978 ee_id, vca_type = self._look_for_deployed_vca(
6979 nsr_deployed["VCA"],
6980 member_vnf_index=vnf_index,
6981 vdu_id=None,
6982 vdu_count_index=None,
6983 ee_descriptor_id=ee_descriptor_id,
6984 )
6985 result, result_detail = await self._ns_execute_primitive(
6986 ee_id,
6987 primitive_name,
6988 primitive_params,
6989 vca_type=vca_type,
6990 vca_id=vca_id,
6991 )
6992 self.logger.debug(
6993 logging_text
6994 + "vnf_config_primitive={} Done with result {} {}".format(
6995 vnf_config_primitive, result, result_detail
6996 )
6997 )
6998 # Update operationState = COMPLETED | FAILED
6999 self._update_suboperation_status(
7000 db_nslcmop, op_index, result, result_detail
7001 )
7002
7003 if result == "FAILED":
7004 raise LcmException(result_detail)
7005 db_nsr_update["config-status"] = old_config_status
7006 scale_process = None
7007 # POST-SCALE END
7008
7009 db_nsr_update[
7010 "detailed-status"
7011 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7012 db_nsr_update["operational-status"] = (
7013 "running"
7014 if old_operational_status == "failed"
7015 else old_operational_status
7016 )
7017 db_nsr_update["config-status"] = old_config_status
7018 return
7019 except (
7020 ROclient.ROClientException,
7021 DbException,
7022 LcmException,
7023 NgRoException,
7024 ) as e:
7025 self.logger.error(logging_text + "Exit Exception {}".format(e))
7026 exc = e
7027 except asyncio.CancelledError:
7028 self.logger.error(
7029 logging_text + "Cancelled Exception while '{}'".format(step)
7030 )
7031 exc = "Operation was cancelled"
7032 except Exception as e:
7033 exc = traceback.format_exc()
7034 self.logger.critical(
7035 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7036 exc_info=True,
7037 )
7038 finally:
7039 self._write_ns_status(
7040 nsr_id=nsr_id,
7041 ns_state=None,
7042 current_operation="IDLE",
7043 current_operation_id=None,
7044 )
7045 if tasks_dict_info:
7046 stage[1] = "Waiting for instantiate pending tasks."
7047 self.logger.debug(logging_text + stage[1])
7048 exc = await self._wait_for_tasks(
7049 logging_text,
7050 tasks_dict_info,
7051 self.timeout.ns_deploy,
7052 stage,
7053 nslcmop_id,
7054 nsr_id=nsr_id,
7055 )
7056 if exc:
7057 db_nslcmop_update[
7058 "detailed-status"
7059 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7060 nslcmop_operation_state = "FAILED"
7061 if db_nsr:
7062 db_nsr_update["operational-status"] = old_operational_status
7063 db_nsr_update["config-status"] = old_config_status
7064 db_nsr_update["detailed-status"] = ""
7065 if scale_process:
7066 if "VCA" in scale_process:
7067 db_nsr_update["config-status"] = "failed"
7068 if "RO" in scale_process:
7069 db_nsr_update["operational-status"] = "failed"
7070 db_nsr_update[
7071 "detailed-status"
7072 ] = "FAILED scaling nslcmop={} {}: {}".format(
7073 nslcmop_id, step, exc
7074 )
7075 else:
7076 error_description_nslcmop = None
7077 nslcmop_operation_state = "COMPLETED"
7078 db_nslcmop_update["detailed-status"] = "Done"
7079
7080 self._write_op_status(
7081 op_id=nslcmop_id,
7082 stage="",
7083 error_message=error_description_nslcmop,
7084 operation_state=nslcmop_operation_state,
7085 other_update=db_nslcmop_update,
7086 )
7087 if db_nsr:
7088 self._write_ns_status(
7089 nsr_id=nsr_id,
7090 ns_state=None,
7091 current_operation="IDLE",
7092 current_operation_id=None,
7093 other_update=db_nsr_update,
7094 )
7095
7096 if nslcmop_operation_state:
7097 try:
7098 msg = {
7099 "nsr_id": nsr_id,
7100 "nslcmop_id": nslcmop_id,
7101 "operationState": nslcmop_operation_state,
7102 }
7103 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7104 except Exception as e:
7105 self.logger.error(
7106 logging_text + "kafka_write notification Exception {}".format(e)
7107 )
7108 self.logger.debug(logging_text + "Exit")
7109 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7110
7111 async def _scale_kdu(
7112 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7113 ):
7114 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7115 for kdu_name in _scaling_info:
7116 for kdu_scaling_info in _scaling_info[kdu_name]:
7117 deployed_kdu, index = get_deployed_kdu(
7118 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7119 )
7120 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7121 kdu_instance = deployed_kdu["kdu-instance"]
7122 kdu_model = deployed_kdu.get("kdu-model")
7123 scale = int(kdu_scaling_info["scale"])
7124 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7125
7126 db_dict = {
7127 "collection": "nsrs",
7128 "filter": {"_id": nsr_id},
7129 "path": "_admin.deployed.K8s.{}".format(index),
7130 }
7131
7132 step = "scaling application {}".format(
7133 kdu_scaling_info["resource-name"]
7134 )
7135 self.logger.debug(logging_text + step)
7136
7137 if kdu_scaling_info["type"] == "delete":
7138 kdu_config = get_configuration(db_vnfd, kdu_name)
7139 if (
7140 kdu_config
7141 and kdu_config.get("terminate-config-primitive")
7142 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7143 ):
7144 terminate_config_primitive_list = kdu_config.get(
7145 "terminate-config-primitive"
7146 )
7147 terminate_config_primitive_list.sort(
7148 key=lambda val: int(val["seq"])
7149 )
7150
7151 for (
7152 terminate_config_primitive
7153 ) in terminate_config_primitive_list:
7154 primitive_params_ = self._map_primitive_params(
7155 terminate_config_primitive, {}, {}
7156 )
7157 step = "execute terminate config primitive"
7158 self.logger.debug(logging_text + step)
7159 await asyncio.wait_for(
7160 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7161 cluster_uuid=cluster_uuid,
7162 kdu_instance=kdu_instance,
7163 primitive_name=terminate_config_primitive["name"],
7164 params=primitive_params_,
7165 db_dict=db_dict,
7166 total_timeout=self.timeout.primitive,
7167 vca_id=vca_id,
7168 ),
7169 timeout=self.timeout.primitive
7170 * self.timeout.primitive_outer_factor,
7171 )
7172
7173 await asyncio.wait_for(
7174 self.k8scluster_map[k8s_cluster_type].scale(
7175 kdu_instance=kdu_instance,
7176 scale=scale,
7177 resource_name=kdu_scaling_info["resource-name"],
7178 total_timeout=self.timeout.scale_on_error,
7179 vca_id=vca_id,
7180 cluster_uuid=cluster_uuid,
7181 kdu_model=kdu_model,
7182 atomic=True,
7183 db_dict=db_dict,
7184 ),
7185 timeout=self.timeout.scale_on_error
7186 * self.timeout.scale_on_error_outer_factor,
7187 )
7188
7189 if kdu_scaling_info["type"] == "create":
7190 kdu_config = get_configuration(db_vnfd, kdu_name)
7191 if (
7192 kdu_config
7193 and kdu_config.get("initial-config-primitive")
7194 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7195 ):
7196 initial_config_primitive_list = kdu_config.get(
7197 "initial-config-primitive"
7198 )
7199 initial_config_primitive_list.sort(
7200 key=lambda val: int(val["seq"])
7201 )
7202
7203 for initial_config_primitive in initial_config_primitive_list:
7204 primitive_params_ = self._map_primitive_params(
7205 initial_config_primitive, {}, {}
7206 )
7207 step = "execute initial config primitive"
7208 self.logger.debug(logging_text + step)
7209 await asyncio.wait_for(
7210 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7211 cluster_uuid=cluster_uuid,
7212 kdu_instance=kdu_instance,
7213 primitive_name=initial_config_primitive["name"],
7214 params=primitive_params_,
7215 db_dict=db_dict,
7216 vca_id=vca_id,
7217 ),
7218 timeout=600,
7219 )
7220
7221 async def _scale_ng_ro(
7222 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7223 ):
7224 nsr_id = db_nslcmop["nsInstanceId"]
7225 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7226 db_vnfrs = {}
7227
7228 # read from db: vnfd's for every vnf
7229 db_vnfds = []
7230
7231 # for each vnf in ns, read vnfd
7232 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7233 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7234 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7235 # if we haven't this vnfd, read it from db
7236 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7237 # read from db
7238 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7239 db_vnfds.append(vnfd)
7240 n2vc_key = self.n2vc.get_public_key()
7241 n2vc_key_list = [n2vc_key]
7242 self.scale_vnfr(
7243 db_vnfr,
7244 vdu_scaling_info.get("vdu-create"),
7245 vdu_scaling_info.get("vdu-delete"),
7246 mark_delete=True,
7247 )
7248 # db_vnfr has been updated, update db_vnfrs to use it
7249 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7250 await self._instantiate_ng_ro(
7251 logging_text,
7252 nsr_id,
7253 db_nsd,
7254 db_nsr,
7255 db_nslcmop,
7256 db_vnfrs,
7257 db_vnfds,
7258 n2vc_key_list,
7259 stage=stage,
7260 start_deploy=time(),
7261 timeout_ns_deploy=self.timeout.ns_deploy,
7262 )
7263 if vdu_scaling_info.get("vdu-delete"):
7264 self.scale_vnfr(
7265 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7266 )
7267
7268 async def extract_prometheus_scrape_jobs(
7269 self,
7270 ee_id: str,
7271 artifact_path: str,
7272 ee_config_descriptor: dict,
7273 vnfr_id: str,
7274 nsr_id: str,
7275 target_ip: str,
7276 vnf_member_index: str = "",
7277 vdu_id: str = "",
7278 vdu_index: int = None,
7279 kdu_name: str = "",
7280 kdu_index: int = None,
7281 ) -> dict:
7282 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7283 This method will wait until the corresponding VDU or KDU is fully instantiated
7284
7285 Args:
7286 ee_id (str): Execution Environment ID
7287 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7288 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7289 vnfr_id (str): VNFR ID where this EE applies
7290 nsr_id (str): NSR ID where this EE applies
7291 target_ip (str): VDU/KDU instance IP address
7292 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7293 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7294 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7295 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7296 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7297
7298 Raises:
7299 LcmException: When the VDU or KDU instance was not found in an hour
7300
7301 Returns:
7302 _type_: Prometheus jobs
7303 """
7304 self.logger.debug(f"KDU: {kdu_name}; KDU INDEX: {kdu_index}")
7305 # look if exist a file called 'prometheus*.j2' and
7306 artifact_content = self.fs.dir_ls(artifact_path)
7307 job_file = next(
7308 (
7309 f
7310 for f in artifact_content
7311 if f.startswith("prometheus") and f.endswith(".j2")
7312 ),
7313 None,
7314 )
7315 if not job_file:
7316 return
7317 with self.fs.file_open((artifact_path, job_file), "r") as f:
7318 job_data = f.read()
7319
7320 vdur_name = ""
7321 kdur_name = ""
7322 for r in range(360):
7323 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7324 if vdu_id and vdu_index is not None:
7325 vdur = next(
7326 (
7327 x
7328 for x in get_iterable(db_vnfr, "vdur")
7329 if (
7330 x.get("vdu-id-ref") == vdu_id
7331 and x.get("count-index") == vdu_index
7332 )
7333 ),
7334 {},
7335 )
7336 if vdur.get("name"):
7337 vdur_name = vdur.get("name")
7338 break
7339 if kdu_name and kdu_index is not None:
7340 kdur = next(
7341 (
7342 x
7343 for x in get_iterable(db_vnfr, "kdur")
7344 if (
7345 x.get("kdu-name") == kdu_name
7346 and x.get("count-index") == kdu_index
7347 )
7348 ),
7349 {},
7350 )
7351 if kdur.get("name"):
7352 kdur_name = kdur.get("name")
7353 break
7354
7355 await asyncio.sleep(10, loop=self.loop)
7356 else:
7357 if vdu_id and vdu_index is not None:
7358 raise LcmException(
7359 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7360 )
7361 if kdu_name and kdu_index is not None:
7362 raise LcmException(
7363 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7364 )
7365
7366 # TODO get_service
7367 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7368 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7369 host_port = "80"
7370 vnfr_id = vnfr_id.replace("-", "")
7371 variables = {
7372 "JOB_NAME": vnfr_id,
7373 "TARGET_IP": target_ip,
7374 "EXPORTER_POD_IP": host_name,
7375 "EXPORTER_POD_PORT": host_port,
7376 "NSR_ID": nsr_id,
7377 "VNF_MEMBER_INDEX": vnf_member_index,
7378 "VDUR_NAME": vdur_name,
7379 "KDUR_NAME": kdur_name,
7380 }
7381 job_list = parse_job(job_data, variables)
7382 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7383 for job in job_list:
7384 if (
7385 not isinstance(job.get("job_name"), str)
7386 or vnfr_id not in job["job_name"]
7387 ):
7388 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7389 job["nsr_id"] = nsr_id
7390 job["vnfr_id"] = vnfr_id
7391 return job_list
7392
7393 async def rebuild_start_stop(
7394 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7395 ):
7396 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7397 self.logger.info(logging_text + "Enter")
7398 stage = ["Preparing the environment", ""]
7399 # database nsrs record
7400 db_nsr_update = {}
7401 vdu_vim_name = None
7402 vim_vm_id = None
7403 # in case of error, indicates what part of scale was failed to put nsr at error status
7404 start_deploy = time()
7405 try:
7406 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7407 vim_account_id = db_vnfr.get("vim-account-id")
7408 vim_info_key = "vim:" + vim_account_id
7409 vdu_id = additional_param["vdu_id"]
7410 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7411 vdur = find_in_list(
7412 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7413 )
7414 if vdur:
7415 vdu_vim_name = vdur["name"]
7416 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7417 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7418 else:
7419 raise LcmException("Target vdu is not found")
7420 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7421 # wait for any previous tasks in process
7422 stage[1] = "Waiting for previous operations to terminate"
7423 self.logger.info(stage[1])
7424 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7425
7426 stage[1] = "Reading from database."
7427 self.logger.info(stage[1])
7428 self._write_ns_status(
7429 nsr_id=nsr_id,
7430 ns_state=None,
7431 current_operation=operation_type.upper(),
7432 current_operation_id=nslcmop_id,
7433 )
7434 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7435
7436 # read from db: ns
7437 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7438 db_nsr_update["operational-status"] = operation_type
7439 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7440 # Payload for RO
7441 desc = {
7442 operation_type: {
7443 "vim_vm_id": vim_vm_id,
7444 "vnf_id": vnf_id,
7445 "vdu_index": additional_param["count-index"],
7446 "vdu_id": vdur["id"],
7447 "target_vim": target_vim,
7448 "vim_account_id": vim_account_id,
7449 }
7450 }
7451 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7452 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7453 self.logger.info("ro nsr id: {}".format(nsr_id))
7454 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7455 self.logger.info("response from RO: {}".format(result_dict))
7456 action_id = result_dict["action_id"]
7457 await self._wait_ng_ro(
7458 nsr_id,
7459 action_id,
7460 nslcmop_id,
7461 start_deploy,
7462 self.timeout.operate,
7463 None,
7464 "start_stop_rebuild",
7465 )
7466 return "COMPLETED", "Done"
7467 except (ROclient.ROClientException, DbException, LcmException) as e:
7468 self.logger.error("Exit Exception {}".format(e))
7469 exc = e
7470 except asyncio.CancelledError:
7471 self.logger.error("Cancelled Exception while '{}'".format(stage))
7472 exc = "Operation was cancelled"
7473 except Exception as e:
7474 exc = traceback.format_exc()
7475 self.logger.critical(
7476 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7477 )
7478 return "FAILED", "Error in operate VNF {}".format(exc)
7479
7480 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7481 """
7482 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7483
7484 :param: vim_account_id: VIM Account ID
7485
7486 :return: (cloud_name, cloud_credential)
7487 """
7488 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7489 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7490
7491 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7492 """
7493 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7494
7495 :param: vim_account_id: VIM Account ID
7496
7497 :return: (cloud_name, cloud_credential)
7498 """
7499 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7500 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7501
7502 async def migrate(self, nsr_id, nslcmop_id):
7503 """
7504 Migrate VNFs and VDUs instances in a NS
7505
7506 :param: nsr_id: NS Instance ID
7507 :param: nslcmop_id: nslcmop ID of migrate
7508
7509 """
7510 # Try to lock HA task here
7511 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7512 if not task_is_locked_by_me:
7513 return
7514 logging_text = "Task ns={} migrate ".format(nsr_id)
7515 self.logger.debug(logging_text + "Enter")
7516 # get all needed from database
7517 db_nslcmop = None
7518 db_nslcmop_update = {}
7519 nslcmop_operation_state = None
7520 db_nsr_update = {}
7521 target = {}
7522 exc = None
7523 # in case of error, indicates what part of scale was failed to put nsr at error status
7524 start_deploy = time()
7525
7526 try:
7527 # wait for any previous tasks in process
7528 step = "Waiting for previous operations to terminate"
7529 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7530
7531 self._write_ns_status(
7532 nsr_id=nsr_id,
7533 ns_state=None,
7534 current_operation="MIGRATING",
7535 current_operation_id=nslcmop_id,
7536 )
7537 step = "Getting nslcmop from database"
7538 self.logger.debug(
7539 step + " after having waited for previous tasks to be completed"
7540 )
7541 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7542 migrate_params = db_nslcmop.get("operationParams")
7543
7544 target = {}
7545 target.update(migrate_params)
7546 desc = await self.RO.migrate(nsr_id, target)
7547 self.logger.debug("RO return > {}".format(desc))
7548 action_id = desc["action_id"]
7549 await self._wait_ng_ro(
7550 nsr_id,
7551 action_id,
7552 nslcmop_id,
7553 start_deploy,
7554 self.timeout.migrate,
7555 operation="migrate",
7556 )
7557 except (ROclient.ROClientException, DbException, LcmException) as e:
7558 self.logger.error("Exit Exception {}".format(e))
7559 exc = e
7560 except asyncio.CancelledError:
7561 self.logger.error("Cancelled Exception while '{}'".format(step))
7562 exc = "Operation was cancelled"
7563 except Exception as e:
7564 exc = traceback.format_exc()
7565 self.logger.critical(
7566 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7567 )
7568 finally:
7569 self._write_ns_status(
7570 nsr_id=nsr_id,
7571 ns_state=None,
7572 current_operation="IDLE",
7573 current_operation_id=None,
7574 )
7575 if exc:
7576 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7577 nslcmop_operation_state = "FAILED"
7578 else:
7579 nslcmop_operation_state = "COMPLETED"
7580 db_nslcmop_update["detailed-status"] = "Done"
7581 db_nsr_update["detailed-status"] = "Done"
7582
7583 self._write_op_status(
7584 op_id=nslcmop_id,
7585 stage="",
7586 error_message="",
7587 operation_state=nslcmop_operation_state,
7588 other_update=db_nslcmop_update,
7589 )
7590 if nslcmop_operation_state:
7591 try:
7592 msg = {
7593 "nsr_id": nsr_id,
7594 "nslcmop_id": nslcmop_id,
7595 "operationState": nslcmop_operation_state,
7596 }
7597 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7598 except Exception as e:
7599 self.logger.error(
7600 logging_text + "kafka_write notification Exception {}".format(e)
7601 )
7602 self.logger.debug(logging_text + "Exit")
7603 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7604
7605 async def heal(self, nsr_id, nslcmop_id):
7606 """
7607 Heal NS
7608
7609 :param nsr_id: ns instance to heal
7610 :param nslcmop_id: operation to run
7611 :return:
7612 """
7613
7614 # Try to lock HA task here
7615 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7616 if not task_is_locked_by_me:
7617 return
7618
7619 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7620 stage = ["", "", ""]
7621 tasks_dict_info = {}
7622 # ^ stage, step, VIM progress
7623 self.logger.debug(logging_text + "Enter")
7624 # get all needed from database
7625 db_nsr = None
7626 db_nslcmop_update = {}
7627 db_nsr_update = {}
7628 db_vnfrs = {} # vnf's info indexed by _id
7629 exc = None
7630 old_operational_status = ""
7631 old_config_status = ""
7632 nsi_id = None
7633 try:
7634 # wait for any previous tasks in process
7635 step = "Waiting for previous operations to terminate"
7636 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7637 self._write_ns_status(
7638 nsr_id=nsr_id,
7639 ns_state=None,
7640 current_operation="HEALING",
7641 current_operation_id=nslcmop_id,
7642 )
7643
7644 step = "Getting nslcmop from database"
7645 self.logger.debug(
7646 step + " after having waited for previous tasks to be completed"
7647 )
7648 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7649
7650 step = "Getting nsr from database"
7651 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7652 old_operational_status = db_nsr["operational-status"]
7653 old_config_status = db_nsr["config-status"]
7654
7655 db_nsr_update = {
7656 "_admin.deployed.RO.operational-status": "healing",
7657 }
7658 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7659
7660 step = "Sending heal order to VIM"
7661 await self.heal_RO(
7662 logging_text=logging_text,
7663 nsr_id=nsr_id,
7664 db_nslcmop=db_nslcmop,
7665 stage=stage,
7666 )
7667 # VCA tasks
7668 # read from db: nsd
7669 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7670 self.logger.debug(logging_text + stage[1])
7671 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7672 self.fs.sync(db_nsr["nsd-id"])
7673 db_nsr["nsd"] = nsd
7674 # read from db: vnfr's of this ns
7675 step = "Getting vnfrs from db"
7676 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7677 for vnfr in db_vnfrs_list:
7678 db_vnfrs[vnfr["_id"]] = vnfr
7679 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7680
7681 # Check for each target VNF
7682 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7683 for target_vnf in target_list:
7684 # Find this VNF in the list from DB
7685 vnfr_id = target_vnf.get("vnfInstanceId", None)
7686 if vnfr_id:
7687 db_vnfr = db_vnfrs[vnfr_id]
7688 vnfd_id = db_vnfr.get("vnfd-id")
7689 vnfd_ref = db_vnfr.get("vnfd-ref")
7690 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7691 base_folder = vnfd["_admin"]["storage"]
7692 vdu_id = None
7693 vdu_index = 0
7694 vdu_name = None
7695 kdu_name = None
7696 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7697 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7698
7699 # Check each target VDU and deploy N2VC
7700 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7701 "vdu", []
7702 )
7703 if not target_vdu_list:
7704 # Codigo nuevo para crear diccionario
7705 target_vdu_list = []
7706 for existing_vdu in db_vnfr.get("vdur"):
7707 vdu_name = existing_vdu.get("vdu-name", None)
7708 vdu_index = existing_vdu.get("count-index", 0)
7709 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7710 "run-day1", False
7711 )
7712 vdu_to_be_healed = {
7713 "vdu-id": vdu_name,
7714 "count-index": vdu_index,
7715 "run-day1": vdu_run_day1,
7716 }
7717 target_vdu_list.append(vdu_to_be_healed)
7718 for target_vdu in target_vdu_list:
7719 deploy_params_vdu = target_vdu
7720 # Set run-day1 vnf level value if not vdu level value exists
7721 if not deploy_params_vdu.get("run-day1") and target_vnf[
7722 "additionalParams"
7723 ].get("run-day1"):
7724 deploy_params_vdu["run-day1"] = target_vnf[
7725 "additionalParams"
7726 ].get("run-day1")
7727 vdu_name = target_vdu.get("vdu-id", None)
7728 # TODO: Get vdu_id from vdud.
7729 vdu_id = vdu_name
7730 # For multi instance VDU count-index is mandatory
7731 # For single session VDU count-indes is 0
7732 vdu_index = target_vdu.get("count-index", 0)
7733
7734 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7735 stage[1] = "Deploying Execution Environments."
7736 self.logger.debug(logging_text + stage[1])
7737
7738 # VNF Level charm. Normal case when proxy charms.
7739 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7740 descriptor_config = get_configuration(vnfd, vnfd_ref)
7741 if descriptor_config:
7742 # Continue if healed machine is management machine
7743 vnf_ip_address = db_vnfr.get("ip-address")
7744 target_instance = None
7745 for instance in db_vnfr.get("vdur", None):
7746 if (
7747 instance["vdu-name"] == vdu_name
7748 and instance["count-index"] == vdu_index
7749 ):
7750 target_instance = instance
7751 break
7752 if vnf_ip_address == target_instance.get("ip-address"):
7753 self._heal_n2vc(
7754 logging_text=logging_text
7755 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7756 member_vnf_index, vdu_name, vdu_index
7757 ),
7758 db_nsr=db_nsr,
7759 db_vnfr=db_vnfr,
7760 nslcmop_id=nslcmop_id,
7761 nsr_id=nsr_id,
7762 nsi_id=nsi_id,
7763 vnfd_id=vnfd_ref,
7764 vdu_id=None,
7765 kdu_name=None,
7766 member_vnf_index=member_vnf_index,
7767 vdu_index=0,
7768 vdu_name=None,
7769 deploy_params=deploy_params_vdu,
7770 descriptor_config=descriptor_config,
7771 base_folder=base_folder,
7772 task_instantiation_info=tasks_dict_info,
7773 stage=stage,
7774 )
7775
7776 # VDU Level charm. Normal case with native charms.
7777 descriptor_config = get_configuration(vnfd, vdu_name)
7778 if descriptor_config:
7779 self._heal_n2vc(
7780 logging_text=logging_text
7781 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7782 member_vnf_index, vdu_name, vdu_index
7783 ),
7784 db_nsr=db_nsr,
7785 db_vnfr=db_vnfr,
7786 nslcmop_id=nslcmop_id,
7787 nsr_id=nsr_id,
7788 nsi_id=nsi_id,
7789 vnfd_id=vnfd_ref,
7790 vdu_id=vdu_id,
7791 kdu_name=kdu_name,
7792 member_vnf_index=member_vnf_index,
7793 vdu_index=vdu_index,
7794 vdu_name=vdu_name,
7795 deploy_params=deploy_params_vdu,
7796 descriptor_config=descriptor_config,
7797 base_folder=base_folder,
7798 task_instantiation_info=tasks_dict_info,
7799 stage=stage,
7800 )
7801
7802 except (
7803 ROclient.ROClientException,
7804 DbException,
7805 LcmException,
7806 NgRoException,
7807 ) as e:
7808 self.logger.error(logging_text + "Exit Exception {}".format(e))
7809 exc = e
7810 except asyncio.CancelledError:
7811 self.logger.error(
7812 logging_text + "Cancelled Exception while '{}'".format(step)
7813 )
7814 exc = "Operation was cancelled"
7815 except Exception as e:
7816 exc = traceback.format_exc()
7817 self.logger.critical(
7818 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7819 exc_info=True,
7820 )
7821 finally:
7822 if tasks_dict_info:
7823 stage[1] = "Waiting for healing pending tasks."
7824 self.logger.debug(logging_text + stage[1])
7825 exc = await self._wait_for_tasks(
7826 logging_text,
7827 tasks_dict_info,
7828 self.timeout.ns_deploy,
7829 stage,
7830 nslcmop_id,
7831 nsr_id=nsr_id,
7832 )
7833 if exc:
7834 db_nslcmop_update[
7835 "detailed-status"
7836 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7837 nslcmop_operation_state = "FAILED"
7838 if db_nsr:
7839 db_nsr_update["operational-status"] = old_operational_status
7840 db_nsr_update["config-status"] = old_config_status
7841 db_nsr_update[
7842 "detailed-status"
7843 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7844 for task, task_name in tasks_dict_info.items():
7845 if not task.done() or task.cancelled() or task.exception():
7846 if task_name.startswith(self.task_name_deploy_vca):
7847 # A N2VC task is pending
7848 db_nsr_update["config-status"] = "failed"
7849 else:
7850 # RO task is pending
7851 db_nsr_update["operational-status"] = "failed"
7852 else:
7853 error_description_nslcmop = None
7854 nslcmop_operation_state = "COMPLETED"
7855 db_nslcmop_update["detailed-status"] = "Done"
7856 db_nsr_update["detailed-status"] = "Done"
7857 db_nsr_update["operational-status"] = "running"
7858 db_nsr_update["config-status"] = "configured"
7859
7860 self._write_op_status(
7861 op_id=nslcmop_id,
7862 stage="",
7863 error_message=error_description_nslcmop,
7864 operation_state=nslcmop_operation_state,
7865 other_update=db_nslcmop_update,
7866 )
7867 if db_nsr:
7868 self._write_ns_status(
7869 nsr_id=nsr_id,
7870 ns_state=None,
7871 current_operation="IDLE",
7872 current_operation_id=None,
7873 other_update=db_nsr_update,
7874 )
7875
7876 if nslcmop_operation_state:
7877 try:
7878 msg = {
7879 "nsr_id": nsr_id,
7880 "nslcmop_id": nslcmop_id,
7881 "operationState": nslcmop_operation_state,
7882 }
7883 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7884 except Exception as e:
7885 self.logger.error(
7886 logging_text + "kafka_write notification Exception {}".format(e)
7887 )
7888 self.logger.debug(logging_text + "Exit")
7889 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7890
7891 async def heal_RO(
7892 self,
7893 logging_text,
7894 nsr_id,
7895 db_nslcmop,
7896 stage,
7897 ):
7898 """
7899 Heal at RO
7900 :param logging_text: preffix text to use at logging
7901 :param nsr_id: nsr identity
7902 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7903 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7904 :return: None or exception
7905 """
7906
7907 def get_vim_account(vim_account_id):
7908 nonlocal db_vims
7909 if vim_account_id in db_vims:
7910 return db_vims[vim_account_id]
7911 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7912 db_vims[vim_account_id] = db_vim
7913 return db_vim
7914
7915 try:
7916 start_heal = time()
7917 ns_params = db_nslcmop.get("operationParams")
7918 if ns_params and ns_params.get("timeout_ns_heal"):
7919 timeout_ns_heal = ns_params["timeout_ns_heal"]
7920 else:
7921 timeout_ns_heal = self.timeout.ns_heal
7922
7923 db_vims = {}
7924
7925 nslcmop_id = db_nslcmop["_id"]
7926 target = {
7927 "action_id": nslcmop_id,
7928 }
7929 self.logger.warning(
7930 "db_nslcmop={} and timeout_ns_heal={}".format(
7931 db_nslcmop, timeout_ns_heal
7932 )
7933 )
7934 target.update(db_nslcmop.get("operationParams", {}))
7935
7936 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7937 desc = await self.RO.recreate(nsr_id, target)
7938 self.logger.debug("RO return > {}".format(desc))
7939 action_id = desc["action_id"]
7940 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7941 await self._wait_ng_ro(
7942 nsr_id,
7943 action_id,
7944 nslcmop_id,
7945 start_heal,
7946 timeout_ns_heal,
7947 stage,
7948 operation="healing",
7949 )
7950
7951 # Updating NSR
7952 db_nsr_update = {
7953 "_admin.deployed.RO.operational-status": "running",
7954 "detailed-status": " ".join(stage),
7955 }
7956 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7957 self._write_op_status(nslcmop_id, stage)
7958 self.logger.debug(
7959 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7960 )
7961
7962 except Exception as e:
7963 stage[2] = "ERROR healing at VIM"
7964 # self.set_vnfr_at_error(db_vnfrs, str(e))
7965 self.logger.error(
7966 "Error healing at VIM {}".format(e),
7967 exc_info=not isinstance(
7968 e,
7969 (
7970 ROclient.ROClientException,
7971 LcmException,
7972 DbException,
7973 NgRoException,
7974 ),
7975 ),
7976 )
7977 raise
7978
7979 def _heal_n2vc(
7980 self,
7981 logging_text,
7982 db_nsr,
7983 db_vnfr,
7984 nslcmop_id,
7985 nsr_id,
7986 nsi_id,
7987 vnfd_id,
7988 vdu_id,
7989 kdu_name,
7990 member_vnf_index,
7991 vdu_index,
7992 vdu_name,
7993 deploy_params,
7994 descriptor_config,
7995 base_folder,
7996 task_instantiation_info,
7997 stage,
7998 ):
7999 # launch instantiate_N2VC in a asyncio task and register task object
8000 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8001 # if not found, create one entry and update database
8002 # fill db_nsr._admin.deployed.VCA.<index>
8003
8004 self.logger.debug(
8005 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8006 )
8007
8008 charm_name = ""
8009 get_charm_name = False
8010 if "execution-environment-list" in descriptor_config:
8011 ee_list = descriptor_config.get("execution-environment-list", [])
8012 elif "juju" in descriptor_config:
8013 ee_list = [descriptor_config] # ns charms
8014 if "execution-environment-list" not in descriptor_config:
8015 # charm name is only required for ns charms
8016 get_charm_name = True
8017 else: # other types as script are not supported
8018 ee_list = []
8019
8020 for ee_item in ee_list:
8021 self.logger.debug(
8022 logging_text
8023 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8024 ee_item.get("juju"), ee_item.get("helm-chart")
8025 )
8026 )
8027 ee_descriptor_id = ee_item.get("id")
8028 if ee_item.get("juju"):
8029 vca_name = ee_item["juju"].get("charm")
8030 if get_charm_name:
8031 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8032 vca_type = (
8033 "lxc_proxy_charm"
8034 if ee_item["juju"].get("charm") is not None
8035 else "native_charm"
8036 )
8037 if ee_item["juju"].get("cloud") == "k8s":
8038 vca_type = "k8s_proxy_charm"
8039 elif ee_item["juju"].get("proxy") is False:
8040 vca_type = "native_charm"
8041 elif ee_item.get("helm-chart"):
8042 vca_name = ee_item["helm-chart"]
8043 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8044 vca_type = "helm"
8045 else:
8046 vca_type = "helm-v3"
8047 else:
8048 self.logger.debug(
8049 logging_text + "skipping non juju neither charm configuration"
8050 )
8051 continue
8052
8053 vca_index = -1
8054 for vca_index, vca_deployed in enumerate(
8055 db_nsr["_admin"]["deployed"]["VCA"]
8056 ):
8057 if not vca_deployed:
8058 continue
8059 if (
8060 vca_deployed.get("member-vnf-index") == member_vnf_index
8061 and vca_deployed.get("vdu_id") == vdu_id
8062 and vca_deployed.get("kdu_name") == kdu_name
8063 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8064 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8065 ):
8066 break
8067 else:
8068 # not found, create one.
8069 target = (
8070 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8071 )
8072 if vdu_id:
8073 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8074 elif kdu_name:
8075 target += "/kdu/{}".format(kdu_name)
8076 vca_deployed = {
8077 "target_element": target,
8078 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8079 "member-vnf-index": member_vnf_index,
8080 "vdu_id": vdu_id,
8081 "kdu_name": kdu_name,
8082 "vdu_count_index": vdu_index,
8083 "operational-status": "init", # TODO revise
8084 "detailed-status": "", # TODO revise
8085 "step": "initial-deploy", # TODO revise
8086 "vnfd_id": vnfd_id,
8087 "vdu_name": vdu_name,
8088 "type": vca_type,
8089 "ee_descriptor_id": ee_descriptor_id,
8090 "charm_name": charm_name,
8091 }
8092 vca_index += 1
8093
8094 # create VCA and configurationStatus in db
8095 db_dict = {
8096 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8097 "configurationStatus.{}".format(vca_index): dict(),
8098 }
8099 self.update_db_2("nsrs", nsr_id, db_dict)
8100
8101 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8102
8103 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8104 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8105 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8106
8107 # Launch task
8108 task_n2vc = asyncio.ensure_future(
8109 self.heal_N2VC(
8110 logging_text=logging_text,
8111 vca_index=vca_index,
8112 nsi_id=nsi_id,
8113 db_nsr=db_nsr,
8114 db_vnfr=db_vnfr,
8115 vdu_id=vdu_id,
8116 kdu_name=kdu_name,
8117 vdu_index=vdu_index,
8118 deploy_params=deploy_params,
8119 config_descriptor=descriptor_config,
8120 base_folder=base_folder,
8121 nslcmop_id=nslcmop_id,
8122 stage=stage,
8123 vca_type=vca_type,
8124 vca_name=vca_name,
8125 ee_config_descriptor=ee_item,
8126 )
8127 )
8128 self.lcm_tasks.register(
8129 "ns",
8130 nsr_id,
8131 nslcmop_id,
8132 "instantiate_N2VC-{}".format(vca_index),
8133 task_n2vc,
8134 )
8135 task_instantiation_info[
8136 task_n2vc
8137 ] = self.task_name_deploy_vca + " {}.{}".format(
8138 member_vnf_index or "", vdu_id or ""
8139 )
8140
8141 async def heal_N2VC(
8142 self,
8143 logging_text,
8144 vca_index,
8145 nsi_id,
8146 db_nsr,
8147 db_vnfr,
8148 vdu_id,
8149 kdu_name,
8150 vdu_index,
8151 config_descriptor,
8152 deploy_params,
8153 base_folder,
8154 nslcmop_id,
8155 stage,
8156 vca_type,
8157 vca_name,
8158 ee_config_descriptor,
8159 ):
8160 nsr_id = db_nsr["_id"]
8161 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8162 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8163 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8164 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8165 db_dict = {
8166 "collection": "nsrs",
8167 "filter": {"_id": nsr_id},
8168 "path": db_update_entry,
8169 }
8170 step = ""
8171 try:
8172
8173 element_type = "NS"
8174 element_under_configuration = nsr_id
8175
8176 vnfr_id = None
8177 if db_vnfr:
8178 vnfr_id = db_vnfr["_id"]
8179 osm_config["osm"]["vnf_id"] = vnfr_id
8180
8181 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8182
8183 if vca_type == "native_charm":
8184 index_number = 0
8185 else:
8186 index_number = vdu_index or 0
8187
8188 if vnfr_id:
8189 element_type = "VNF"
8190 element_under_configuration = vnfr_id
8191 namespace += ".{}-{}".format(vnfr_id, index_number)
8192 if vdu_id:
8193 namespace += ".{}-{}".format(vdu_id, index_number)
8194 element_type = "VDU"
8195 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8196 osm_config["osm"]["vdu_id"] = vdu_id
8197 elif kdu_name:
8198 namespace += ".{}".format(kdu_name)
8199 element_type = "KDU"
8200 element_under_configuration = kdu_name
8201 osm_config["osm"]["kdu_name"] = kdu_name
8202
8203 # Get artifact path
8204 if base_folder["pkg-dir"]:
8205 artifact_path = "{}/{}/{}/{}".format(
8206 base_folder["folder"],
8207 base_folder["pkg-dir"],
8208 "charms"
8209 if vca_type
8210 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8211 else "helm-charts",
8212 vca_name,
8213 )
8214 else:
8215 artifact_path = "{}/Scripts/{}/{}/".format(
8216 base_folder["folder"],
8217 "charms"
8218 if vca_type
8219 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8220 else "helm-charts",
8221 vca_name,
8222 )
8223
8224 self.logger.debug("Artifact path > {}".format(artifact_path))
8225
8226 # get initial_config_primitive_list that applies to this element
8227 initial_config_primitive_list = config_descriptor.get(
8228 "initial-config-primitive"
8229 )
8230
8231 self.logger.debug(
8232 "Initial config primitive list > {}".format(
8233 initial_config_primitive_list
8234 )
8235 )
8236
8237 # add config if not present for NS charm
8238 ee_descriptor_id = ee_config_descriptor.get("id")
8239 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8240 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8241 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8242 )
8243
8244 self.logger.debug(
8245 "Initial config primitive list #2 > {}".format(
8246 initial_config_primitive_list
8247 )
8248 )
8249 # n2vc_redesign STEP 3.1
8250 # find old ee_id if exists
8251 ee_id = vca_deployed.get("ee_id")
8252
8253 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8254 # create or register execution environment in VCA. Only for native charms when healing
8255 if vca_type == "native_charm":
8256 step = "Waiting to VM being up and getting IP address"
8257 self.logger.debug(logging_text + step)
8258 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8259 logging_text,
8260 nsr_id,
8261 vnfr_id,
8262 vdu_id,
8263 vdu_index,
8264 user=None,
8265 pub_key=None,
8266 )
8267 credentials = {"hostname": rw_mgmt_ip}
8268 # get username
8269 username = deep_get(
8270 config_descriptor, ("config-access", "ssh-access", "default-user")
8271 )
8272 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8273 # merged. Meanwhile let's get username from initial-config-primitive
8274 if not username and initial_config_primitive_list:
8275 for config_primitive in initial_config_primitive_list:
8276 for param in config_primitive.get("parameter", ()):
8277 if param["name"] == "ssh-username":
8278 username = param["value"]
8279 break
8280 if not username:
8281 raise LcmException(
8282 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8283 "'config-access.ssh-access.default-user'"
8284 )
8285 credentials["username"] = username
8286
8287 # n2vc_redesign STEP 3.2
8288 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8289 self._write_configuration_status(
8290 nsr_id=nsr_id,
8291 vca_index=vca_index,
8292 status="REGISTERING",
8293 element_under_configuration=element_under_configuration,
8294 element_type=element_type,
8295 )
8296
8297 step = "register execution environment {}".format(credentials)
8298 self.logger.debug(logging_text + step)
8299 ee_id = await self.vca_map[vca_type].register_execution_environment(
8300 credentials=credentials,
8301 namespace=namespace,
8302 db_dict=db_dict,
8303 vca_id=vca_id,
8304 )
8305
8306 # update ee_id en db
8307 db_dict_ee_id = {
8308 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8309 }
8310 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8311
8312 # for compatibility with MON/POL modules, the need model and application name at database
8313 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8314 # Not sure if this need to be done when healing
8315 """
8316 ee_id_parts = ee_id.split(".")
8317 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8318 if len(ee_id_parts) >= 2:
8319 model_name = ee_id_parts[0]
8320 application_name = ee_id_parts[1]
8321 db_nsr_update[db_update_entry + "model"] = model_name
8322 db_nsr_update[db_update_entry + "application"] = application_name
8323 """
8324
8325 # n2vc_redesign STEP 3.3
8326 # Install configuration software. Only for native charms.
8327 step = "Install configuration Software"
8328
8329 self._write_configuration_status(
8330 nsr_id=nsr_id,
8331 vca_index=vca_index,
8332 status="INSTALLING SW",
8333 element_under_configuration=element_under_configuration,
8334 element_type=element_type,
8335 # other_update=db_nsr_update,
8336 other_update=None,
8337 )
8338
8339 # TODO check if already done
8340 self.logger.debug(logging_text + step)
8341 config = None
8342 if vca_type == "native_charm":
8343 config_primitive = next(
8344 (p for p in initial_config_primitive_list if p["name"] == "config"),
8345 None,
8346 )
8347 if config_primitive:
8348 config = self._map_primitive_params(
8349 config_primitive, {}, deploy_params
8350 )
8351 await self.vca_map[vca_type].install_configuration_sw(
8352 ee_id=ee_id,
8353 artifact_path=artifact_path,
8354 db_dict=db_dict,
8355 config=config,
8356 num_units=1,
8357 vca_id=vca_id,
8358 vca_type=vca_type,
8359 )
8360
8361 # write in db flag of configuration_sw already installed
8362 self.update_db_2(
8363 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8364 )
8365
8366 # Not sure if this need to be done when healing
8367 """
8368 # add relations for this VCA (wait for other peers related with this VCA)
8369 await self._add_vca_relations(
8370 logging_text=logging_text,
8371 nsr_id=nsr_id,
8372 vca_type=vca_type,
8373 vca_index=vca_index,
8374 )
8375 """
8376
8377 # if SSH access is required, then get execution environment SSH public
8378 # if native charm we have waited already to VM be UP
8379 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8380 pub_key = None
8381 user = None
8382 # self.logger.debug("get ssh key block")
8383 if deep_get(
8384 config_descriptor, ("config-access", "ssh-access", "required")
8385 ):
8386 # self.logger.debug("ssh key needed")
8387 # Needed to inject a ssh key
8388 user = deep_get(
8389 config_descriptor,
8390 ("config-access", "ssh-access", "default-user"),
8391 )
8392 step = "Install configuration Software, getting public ssh key"
8393 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8394 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8395 )
8396
8397 step = "Insert public key into VM user={} ssh_key={}".format(
8398 user, pub_key
8399 )
8400 else:
8401 # self.logger.debug("no need to get ssh key")
8402 step = "Waiting to VM being up and getting IP address"
8403 self.logger.debug(logging_text + step)
8404
8405 # n2vc_redesign STEP 5.1
8406 # wait for RO (ip-address) Insert pub_key into VM
8407 # IMPORTANT: We need do wait for RO to complete healing operation.
8408 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8409 if vnfr_id:
8410 if kdu_name:
8411 rw_mgmt_ip = await self.wait_kdu_up(
8412 logging_text, nsr_id, vnfr_id, kdu_name
8413 )
8414 else:
8415 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8416 logging_text,
8417 nsr_id,
8418 vnfr_id,
8419 vdu_id,
8420 vdu_index,
8421 user=user,
8422 pub_key=pub_key,
8423 )
8424 else:
8425 rw_mgmt_ip = None # This is for a NS configuration
8426
8427 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8428
8429 # store rw_mgmt_ip in deploy params for later replacement
8430 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8431
8432 # Day1 operations.
8433 # get run-day1 operation parameter
8434 runDay1 = deploy_params.get("run-day1", False)
8435 self.logger.debug(
8436 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8437 )
8438 if runDay1:
8439 # n2vc_redesign STEP 6 Execute initial config primitive
8440 step = "execute initial config primitive"
8441
8442 # wait for dependent primitives execution (NS -> VNF -> VDU)
8443 if initial_config_primitive_list:
8444 await self._wait_dependent_n2vc(
8445 nsr_id, vca_deployed_list, vca_index
8446 )
8447
8448 # stage, in function of element type: vdu, kdu, vnf or ns
8449 my_vca = vca_deployed_list[vca_index]
8450 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8451 # VDU or KDU
8452 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8453 elif my_vca.get("member-vnf-index"):
8454 # VNF
8455 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8456 else:
8457 # NS
8458 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8459
8460 self._write_configuration_status(
8461 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8462 )
8463
8464 self._write_op_status(op_id=nslcmop_id, stage=stage)
8465
8466 check_if_terminated_needed = True
8467 for initial_config_primitive in initial_config_primitive_list:
8468 # adding information on the vca_deployed if it is a NS execution environment
8469 if not vca_deployed["member-vnf-index"]:
8470 deploy_params["ns_config_info"] = json.dumps(
8471 self._get_ns_config_info(nsr_id)
8472 )
8473 # TODO check if already done
8474 primitive_params_ = self._map_primitive_params(
8475 initial_config_primitive, {}, deploy_params
8476 )
8477
8478 step = "execute primitive '{}' params '{}'".format(
8479 initial_config_primitive["name"], primitive_params_
8480 )
8481 self.logger.debug(logging_text + step)
8482 await self.vca_map[vca_type].exec_primitive(
8483 ee_id=ee_id,
8484 primitive_name=initial_config_primitive["name"],
8485 params_dict=primitive_params_,
8486 db_dict=db_dict,
8487 vca_id=vca_id,
8488 vca_type=vca_type,
8489 )
8490 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8491 if check_if_terminated_needed:
8492 if config_descriptor.get("terminate-config-primitive"):
8493 self.update_db_2(
8494 "nsrs",
8495 nsr_id,
8496 {db_update_entry + "needed_terminate": True},
8497 )
8498 check_if_terminated_needed = False
8499
8500 # TODO register in database that primitive is done
8501
8502 # STEP 7 Configure metrics
8503 # Not sure if this need to be done when healing
8504 """
8505 if vca_type == "helm" or vca_type == "helm-v3":
8506 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8507 ee_id=ee_id,
8508 artifact_path=artifact_path,
8509 ee_config_descriptor=ee_config_descriptor,
8510 vnfr_id=vnfr_id,
8511 nsr_id=nsr_id,
8512 target_ip=rw_mgmt_ip,
8513 )
8514 if prometheus_jobs:
8515 self.update_db_2(
8516 "nsrs",
8517 nsr_id,
8518 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8519 )
8520
8521 for job in prometheus_jobs:
8522 self.db.set_one(
8523 "prometheus_jobs",
8524 {"job_name": job["job_name"]},
8525 job,
8526 upsert=True,
8527 fail_on_empty=False,
8528 )
8529
8530 """
8531 step = "instantiated at VCA"
8532 self.logger.debug(logging_text + step)
8533
8534 self._write_configuration_status(
8535 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8536 )
8537
8538 except Exception as e: # TODO not use Exception but N2VC exception
8539 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8540 if not isinstance(
8541 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8542 ):
8543 self.logger.error(
8544 "Exception while {} : {}".format(step, e), exc_info=True
8545 )
8546 self._write_configuration_status(
8547 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8548 )
8549 raise LcmException("{} {}".format(step, e)) from e
8550
8551 async def _wait_heal_ro(
8552 self,
8553 nsr_id,
8554 timeout=600,
8555 ):
8556 start_time = time()
8557 while time() <= start_time + timeout:
8558 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8559 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8560 "operational-status"
8561 ]
8562 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8563 if operational_status_ro != "healing":
8564 break
8565 await asyncio.sleep(15, loop=self.loop)
8566 else: # timeout_ns_deploy
8567 raise NgRoException("Timeout waiting ns to deploy")
8568
8569 async def vertical_scale(self, nsr_id, nslcmop_id):
8570 """
8571 Vertical Scale the VDUs in a NS
8572
8573 :param: nsr_id: NS Instance ID
8574 :param: nslcmop_id: nslcmop ID of migrate
8575
8576 """
8577 # Try to lock HA task here
8578 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8579 if not task_is_locked_by_me:
8580 return
8581 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8582 self.logger.debug(logging_text + "Enter")
8583 # get all needed from database
8584 db_nslcmop = None
8585 db_nslcmop_update = {}
8586 nslcmop_operation_state = None
8587 db_nsr_update = {}
8588 target = {}
8589 exc = None
8590 # in case of error, indicates what part of scale was failed to put nsr at error status
8591 start_deploy = time()
8592
8593 try:
8594 # wait for any previous tasks in process
8595 step = "Waiting for previous operations to terminate"
8596 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8597
8598 self._write_ns_status(
8599 nsr_id=nsr_id,
8600 ns_state=None,
8601 current_operation="VerticalScale",
8602 current_operation_id=nslcmop_id,
8603 )
8604 step = "Getting nslcmop from database"
8605 self.logger.debug(
8606 step + " after having waited for previous tasks to be completed"
8607 )
8608 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8609 operationParams = db_nslcmop.get("operationParams")
8610 target = {}
8611 target.update(operationParams)
8612 desc = await self.RO.vertical_scale(nsr_id, target)
8613 self.logger.debug("RO return > {}".format(desc))
8614 action_id = desc["action_id"]
8615 await self._wait_ng_ro(
8616 nsr_id,
8617 action_id,
8618 nslcmop_id,
8619 start_deploy,
8620 self.timeout.verticalscale,
8621 operation="verticalscale",
8622 )
8623 except (ROclient.ROClientException, DbException, LcmException) as e:
8624 self.logger.error("Exit Exception {}".format(e))
8625 exc = e
8626 except asyncio.CancelledError:
8627 self.logger.error("Cancelled Exception while '{}'".format(step))
8628 exc = "Operation was cancelled"
8629 except Exception as e:
8630 exc = traceback.format_exc()
8631 self.logger.critical(
8632 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8633 )
8634 finally:
8635 self._write_ns_status(
8636 nsr_id=nsr_id,
8637 ns_state=None,
8638 current_operation="IDLE",
8639 current_operation_id=None,
8640 )
8641 if exc:
8642 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8643 nslcmop_operation_state = "FAILED"
8644 else:
8645 nslcmop_operation_state = "COMPLETED"
8646 db_nslcmop_update["detailed-status"] = "Done"
8647 db_nsr_update["detailed-status"] = "Done"
8648
8649 self._write_op_status(
8650 op_id=nslcmop_id,
8651 stage="",
8652 error_message="",
8653 operation_state=nslcmop_operation_state,
8654 other_update=db_nslcmop_update,
8655 )
8656 if nslcmop_operation_state:
8657 try:
8658 msg = {
8659 "nsr_id": nsr_id,
8660 "nslcmop_id": nslcmop_id,
8661 "operationState": nslcmop_operation_state,
8662 }
8663 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8664 except Exception as e:
8665 self.logger.error(
8666 logging_text + "kafka_write notification Exception {}".format(e)
8667 )
8668 self.logger.debug(logging_text + "Exit")
8669 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")