Revert "Needed for feature 10938: extract_prometheus_scrape_jobs populating prom...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import randint
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 task_name_deploy_vca = "Deploying VCA"
136
137 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138 """
139 Init, Connect to database, filesystem storage, and messaging
140 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141 :return: None
142 """
143 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 self.db = Database().instance.db
146 self.fs = Filesystem().instance.fs
147 self.loop = loop
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.kubectlpath,
171 helm_command=self.vca_config.helmpath,
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.kubectlpath,
180 helm_command=self.vca_config.helm3path,
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.kubectlpath,
189 juju_command=self.vca_config.jujupath,
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 self.op_status_map = {
217 "instantiation": self.RO.status,
218 "termination": self.RO.status,
219 "migrate": self.RO.status,
220 "healing": self.RO.recreate_status,
221 "verticalscale": self.RO.status,
222 "start_stop_rebuild": self.RO.status,
223 }
224
225 @staticmethod
226 def increment_ip_mac(ip_mac, vm_index=1):
227 if not isinstance(ip_mac, str):
228 return ip_mac
229 try:
230 # try with ipv4 look for last dot
231 i = ip_mac.rfind(".")
232 if i > 0:
233 i += 1
234 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235 # try with ipv6 or mac look for last colon. Operate in hex
236 i = ip_mac.rfind(":")
237 if i > 0:
238 i += 1
239 # format in hex, len can be 2 for mac or 4 for ipv6
240 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242 )
243 except Exception:
244 pass
245 return None
246
247 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266
267 # remove last dot from path (if exists)
268 if path.endswith("."):
269 path = path[:-1]
270
271 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
272 # .format(table, filter, path, updated_data))
273 try:
274
275 nsr_id = filter.get("_id")
276
277 # read ns record from database
278 nsr = self.db.get_one(table="nsrs", q_filter=filter)
279 current_ns_status = nsr.get("nsState")
280
281 # get vca status for NS
282 status_dict = await self.n2vc.get_status(
283 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
284 )
285
286 # vcaStatus
287 db_dict = dict()
288 db_dict["vcaStatus"] = status_dict
289
290 # update configurationStatus for this VCA
291 try:
292 vca_index = int(path[path.rfind(".") + 1 :])
293
294 vca_list = deep_get(
295 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
296 )
297 vca_status = vca_list[vca_index].get("status")
298
299 configuration_status_list = nsr.get("configurationStatus")
300 config_status = configuration_status_list[vca_index].get("status")
301
302 if config_status == "BROKEN" and vca_status != "failed":
303 db_dict["configurationStatus"][vca_index] = "READY"
304 elif config_status != "BROKEN" and vca_status == "failed":
305 db_dict["configurationStatus"][vca_index] = "BROKEN"
306 except Exception as e:
307 # not update configurationStatus
308 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
309
310 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
311 # if nsState = 'DEGRADED' check if all is OK
312 is_degraded = False
313 if current_ns_status in ("READY", "DEGRADED"):
314 error_description = ""
315 # check machines
316 if status_dict.get("machines"):
317 for machine_id in status_dict.get("machines"):
318 machine = status_dict.get("machines").get(machine_id)
319 # check machine agent-status
320 if machine.get("agent-status"):
321 s = machine.get("agent-status").get("status")
322 if s != "started":
323 is_degraded = True
324 error_description += (
325 "machine {} agent-status={} ; ".format(
326 machine_id, s
327 )
328 )
329 # check machine instance status
330 if machine.get("instance-status"):
331 s = machine.get("instance-status").get("status")
332 if s != "running":
333 is_degraded = True
334 error_description += (
335 "machine {} instance-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check applications
340 if status_dict.get("applications"):
341 for app_id in status_dict.get("applications"):
342 app = status_dict.get("applications").get(app_id)
343 # check application status
344 if app.get("status"):
345 s = app.get("status").get("status")
346 if s != "active":
347 is_degraded = True
348 error_description += (
349 "application {} status={} ; ".format(app_id, s)
350 )
351
352 if error_description:
353 db_dict["errorDescription"] = error_description
354 if current_ns_status == "READY" and is_degraded:
355 db_dict["nsState"] = "DEGRADED"
356 if current_ns_status == "DEGRADED" and not is_degraded:
357 db_dict["nsState"] = "READY"
358
359 # write to database
360 self.update_db_2("nsrs", nsr_id, db_dict)
361
362 except (asyncio.CancelledError, asyncio.TimeoutError):
363 raise
364 except Exception as e:
365 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
366
367 async def _on_update_k8s_db(
368 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
369 ):
370 """
371 Updating vca status in NSR record
372 :param cluster_uuid: UUID of a k8s cluster
373 :param kdu_instance: The unique name of the KDU instance
374 :param filter: To get nsr_id
375 :cluster_type: The cluster type (juju, k8s)
376 :return: none
377 """
378
379 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
380 # .format(cluster_uuid, kdu_instance, filter))
381
382 nsr_id = filter.get("_id")
383 try:
384 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
385 cluster_uuid=cluster_uuid,
386 kdu_instance=kdu_instance,
387 yaml_format=False,
388 complete_status=True,
389 vca_id=vca_id,
390 )
391
392 # vcaStatus
393 db_dict = dict()
394 db_dict["vcaStatus"] = {nsr_id: vca_status}
395
396 self.logger.debug(
397 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
398 )
399
400 # write to database
401 self.update_db_2("nsrs", nsr_id, db_dict)
402 except (asyncio.CancelledError, asyncio.TimeoutError):
403 raise
404 except Exception as e:
405 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
406
407 @staticmethod
408 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
409 try:
410 env = Environment(
411 undefined=StrictUndefined,
412 autoescape=select_autoescape(default_for_string=True, default=True),
413 )
414 template = env.from_string(cloud_init_text)
415 return template.render(additional_params or {})
416 except UndefinedError as e:
417 raise LcmException(
418 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
419 "file, must be provided in the instantiation parameters inside the "
420 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
421 )
422 except (TemplateError, TemplateNotFound) as e:
423 raise LcmException(
424 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
425 vnfd_id, vdu_id, e
426 )
427 )
428
429 def _get_vdu_cloud_init_content(self, vdu, vnfd):
430 cloud_init_content = cloud_init_file = None
431 try:
432 if vdu.get("cloud-init-file"):
433 base_folder = vnfd["_admin"]["storage"]
434 if base_folder["pkg-dir"]:
435 cloud_init_file = "{}/{}/cloud_init/{}".format(
436 base_folder["folder"],
437 base_folder["pkg-dir"],
438 vdu["cloud-init-file"],
439 )
440 else:
441 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
442 base_folder["folder"],
443 vdu["cloud-init-file"],
444 )
445 with self.fs.file_open(cloud_init_file, "r") as ci_file:
446 cloud_init_content = ci_file.read()
447 elif vdu.get("cloud-init"):
448 cloud_init_content = vdu["cloud-init"]
449
450 return cloud_init_content
451 except FsException as e:
452 raise LcmException(
453 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
454 vnfd["id"], vdu["id"], cloud_init_file, e
455 )
456 )
457
458 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
459 vdur = next(
460 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
461 )
462 additional_params = vdur.get("additionalParams")
463 return parse_yaml_strings(additional_params)
464
465 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
466 """
467 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
468 :param vnfd: input vnfd
469 :param new_id: overrides vnf id if provided
470 :param additionalParams: Instantiation params for VNFs provided
471 :param nsrId: Id of the NSR
472 :return: copy of vnfd
473 """
474 vnfd_RO = deepcopy(vnfd)
475 # remove unused by RO configuration, monitoring, scaling and internal keys
476 vnfd_RO.pop("_id", None)
477 vnfd_RO.pop("_admin", None)
478 vnfd_RO.pop("monitoring-param", None)
479 vnfd_RO.pop("scaling-group-descriptor", None)
480 vnfd_RO.pop("kdu", None)
481 vnfd_RO.pop("k8s-cluster", None)
482 if new_id:
483 vnfd_RO["id"] = new_id
484
485 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
486 for vdu in get_iterable(vnfd_RO, "vdu"):
487 vdu.pop("cloud-init-file", None)
488 vdu.pop("cloud-init", None)
489 return vnfd_RO
490
491 @staticmethod
492 def ip_profile_2_RO(ip_profile):
493 RO_ip_profile = deepcopy(ip_profile)
494 if "dns-server" in RO_ip_profile:
495 if isinstance(RO_ip_profile["dns-server"], list):
496 RO_ip_profile["dns-address"] = []
497 for ds in RO_ip_profile.pop("dns-server"):
498 RO_ip_profile["dns-address"].append(ds["address"])
499 else:
500 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
501 if RO_ip_profile.get("ip-version") == "ipv4":
502 RO_ip_profile["ip-version"] = "IPv4"
503 if RO_ip_profile.get("ip-version") == "ipv6":
504 RO_ip_profile["ip-version"] = "IPv6"
505 if "dhcp-params" in RO_ip_profile:
506 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
507 return RO_ip_profile
508
509 def _get_ro_vim_id_for_vim_account(self, vim_account):
510 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
511 if db_vim["_admin"]["operationalState"] != "ENABLED":
512 raise LcmException(
513 "VIM={} is not available. operationalState={}".format(
514 vim_account, db_vim["_admin"]["operationalState"]
515 )
516 )
517 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
518 return RO_vim_id
519
520 def get_ro_wim_id_for_wim_account(self, wim_account):
521 if isinstance(wim_account, str):
522 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
523 if db_wim["_admin"]["operationalState"] != "ENABLED":
524 raise LcmException(
525 "WIM={} is not available. operationalState={}".format(
526 wim_account, db_wim["_admin"]["operationalState"]
527 )
528 )
529 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
530 return RO_wim_id
531 else:
532 return wim_account
533
534 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
535
536 db_vdu_push_list = []
537 template_vdur = []
538 db_update = {"_admin.modified": time()}
539 if vdu_create:
540 for vdu_id, vdu_count in vdu_create.items():
541 vdur = next(
542 (
543 vdur
544 for vdur in reversed(db_vnfr["vdur"])
545 if vdur["vdu-id-ref"] == vdu_id
546 ),
547 None,
548 )
549 if not vdur:
550 # Read the template saved in the db:
551 self.logger.debug(
552 "No vdur in the database. Using the vdur-template to scale"
553 )
554 vdur_template = db_vnfr.get("vdur-template")
555 if not vdur_template:
556 raise LcmException(
557 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
558 vdu_id
559 )
560 )
561 vdur = vdur_template[0]
562 # Delete a template from the database after using it
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur-template": {"_id": vdur["_id"]}},
568 )
569 for count in range(vdu_count):
570 vdur_copy = deepcopy(vdur)
571 vdur_copy["status"] = "BUILD"
572 vdur_copy["status-detailed"] = None
573 vdur_copy["ip-address"] = None
574 vdur_copy["_id"] = str(uuid4())
575 vdur_copy["count-index"] += count + 1
576 vdur_copy["id"] = "{}-{}".format(
577 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
578 )
579 vdur_copy.pop("vim_info", None)
580 for iface in vdur_copy["interfaces"]:
581 if iface.get("fixed-ip"):
582 iface["ip-address"] = self.increment_ip_mac(
583 iface["ip-address"], count + 1
584 )
585 else:
586 iface.pop("ip-address", None)
587 if iface.get("fixed-mac"):
588 iface["mac-address"] = self.increment_ip_mac(
589 iface["mac-address"], count + 1
590 )
591 else:
592 iface.pop("mac-address", None)
593 if db_vnfr["vdur"]:
594 iface.pop(
595 "mgmt_vnf", None
596 ) # only first vdu can be managment of vnf
597 db_vdu_push_list.append(vdur_copy)
598 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
599 if vdu_delete:
600 if len(db_vnfr["vdur"]) == 1:
601 # The scale will move to 0 instances
602 self.logger.debug(
603 "Scaling to 0 !, creating the template with the last vdur"
604 )
605 template_vdur = [db_vnfr["vdur"][0]]
606 for vdu_id, vdu_count in vdu_delete.items():
607 if mark_delete:
608 indexes_to_delete = [
609 iv[0]
610 for iv in enumerate(db_vnfr["vdur"])
611 if iv[1]["vdu-id-ref"] == vdu_id
612 ]
613 db_update.update(
614 {
615 "vdur.{}.status".format(i): "DELETING"
616 for i in indexes_to_delete[-vdu_count:]
617 }
618 )
619 else:
620 # it must be deleted one by one because common.db does not allow otherwise
621 vdus_to_delete = [
622 v
623 for v in reversed(db_vnfr["vdur"])
624 if v["vdu-id-ref"] == vdu_id
625 ]
626 for vdu in vdus_to_delete[:vdu_count]:
627 self.db.set_one(
628 "vnfrs",
629 {"_id": db_vnfr["_id"]},
630 None,
631 pull={"vdur": {"_id": vdu["_id"]}},
632 )
633 db_push = {}
634 if db_vdu_push_list:
635 db_push["vdur"] = db_vdu_push_list
636 if template_vdur:
637 db_push["vdur-template"] = template_vdur
638 if not db_push:
639 db_push = None
640 db_vnfr["vdur-template"] = template_vdur
641 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
642 # modify passed dictionary db_vnfr
643 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
644 db_vnfr["vdur"] = db_vnfr_["vdur"]
645
646 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
647 """
648 Updates database nsr with the RO info for the created vld
649 :param ns_update_nsr: dictionary to be filled with the updated info
650 :param db_nsr: content of db_nsr. This is also modified
651 :param nsr_desc_RO: nsr descriptor from RO
652 :return: Nothing, LcmException is raised on errors
653 """
654
655 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
656 for net_RO in get_iterable(nsr_desc_RO, "nets"):
657 if vld["id"] != net_RO.get("ns_net_osm_id"):
658 continue
659 vld["vim-id"] = net_RO.get("vim_net_id")
660 vld["name"] = net_RO.get("vim_name")
661 vld["status"] = net_RO.get("status")
662 vld["status-detailed"] = net_RO.get("error_msg")
663 ns_update_nsr["vld.{}".format(vld_index)] = vld
664 break
665 else:
666 raise LcmException(
667 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
668 )
669
670 def set_vnfr_at_error(self, db_vnfrs, error_text):
671 try:
672 for db_vnfr in db_vnfrs.values():
673 vnfr_update = {"status": "ERROR"}
674 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
675 if "status" not in vdur:
676 vdur["status"] = "ERROR"
677 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
678 if error_text:
679 vdur["status-detailed"] = str(error_text)
680 vnfr_update[
681 "vdur.{}.status-detailed".format(vdu_index)
682 ] = "ERROR"
683 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
684 except DbException as e:
685 self.logger.error("Cannot update vnf. {}".format(e))
686
687 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
688 """
689 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
690 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
691 :param nsr_desc_RO: nsr descriptor from RO
692 :return: Nothing, LcmException is raised on errors
693 """
694 for vnf_index, db_vnfr in db_vnfrs.items():
695 for vnf_RO in nsr_desc_RO["vnfs"]:
696 if vnf_RO["member_vnf_index"] != vnf_index:
697 continue
698 vnfr_update = {}
699 if vnf_RO.get("ip_address"):
700 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
701 "ip_address"
702 ].split(";")[0]
703 elif not db_vnfr.get("ip-address"):
704 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
705 raise LcmExceptionNoMgmtIP(
706 "ns member_vnf_index '{}' has no IP address".format(
707 vnf_index
708 )
709 )
710
711 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
712 vdur_RO_count_index = 0
713 if vdur.get("pdu-type"):
714 continue
715 for vdur_RO in get_iterable(vnf_RO, "vms"):
716 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
717 continue
718 if vdur["count-index"] != vdur_RO_count_index:
719 vdur_RO_count_index += 1
720 continue
721 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
722 if vdur_RO.get("ip_address"):
723 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
724 else:
725 vdur["ip-address"] = None
726 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
727 vdur["name"] = vdur_RO.get("vim_name")
728 vdur["status"] = vdur_RO.get("status")
729 vdur["status-detailed"] = vdur_RO.get("error_msg")
730 for ifacer in get_iterable(vdur, "interfaces"):
731 for interface_RO in get_iterable(vdur_RO, "interfaces"):
732 if ifacer["name"] == interface_RO.get("internal_name"):
733 ifacer["ip-address"] = interface_RO.get(
734 "ip_address"
735 )
736 ifacer["mac-address"] = interface_RO.get(
737 "mac_address"
738 )
739 break
740 else:
741 raise LcmException(
742 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
743 "from VIM info".format(
744 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
745 )
746 )
747 vnfr_update["vdur.{}".format(vdu_index)] = vdur
748 break
749 else:
750 raise LcmException(
751 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
752 "VIM info".format(
753 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
754 )
755 )
756
757 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
758 for net_RO in get_iterable(nsr_desc_RO, "nets"):
759 if vld["id"] != net_RO.get("vnf_net_osm_id"):
760 continue
761 vld["vim-id"] = net_RO.get("vim_net_id")
762 vld["name"] = net_RO.get("vim_name")
763 vld["status"] = net_RO.get("status")
764 vld["status-detailed"] = net_RO.get("error_msg")
765 vnfr_update["vld.{}".format(vld_index)] = vld
766 break
767 else:
768 raise LcmException(
769 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
770 vnf_index, vld["id"]
771 )
772 )
773
774 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
775 break
776
777 else:
778 raise LcmException(
779 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
780 vnf_index
781 )
782 )
783
784 def _get_ns_config_info(self, nsr_id):
785 """
786 Generates a mapping between vnf,vdu elements and the N2VC id
787 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
788 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
789 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
790 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
791 """
792 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
793 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
794 mapping = {}
795 ns_config_info = {"osm-config-mapping": mapping}
796 for vca in vca_deployed_list:
797 if not vca["member-vnf-index"]:
798 continue
799 if not vca["vdu_id"]:
800 mapping[vca["member-vnf-index"]] = vca["application"]
801 else:
802 mapping[
803 "{}.{}.{}".format(
804 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
805 )
806 ] = vca["application"]
807 return ns_config_info
808
809 async def _instantiate_ng_ro(
810 self,
811 logging_text,
812 nsr_id,
813 nsd,
814 db_nsr,
815 db_nslcmop,
816 db_vnfrs,
817 db_vnfds,
818 n2vc_key_list,
819 stage,
820 start_deploy,
821 timeout_ns_deploy,
822 ):
823
824 db_vims = {}
825
826 def get_vim_account(vim_account_id):
827 nonlocal db_vims
828 if vim_account_id in db_vims:
829 return db_vims[vim_account_id]
830 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
831 db_vims[vim_account_id] = db_vim
832 return db_vim
833
834 # modify target_vld info with instantiation parameters
835 def parse_vld_instantiation_params(
836 target_vim, target_vld, vld_params, target_sdn
837 ):
838 if vld_params.get("ip-profile"):
839 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
840 "ip-profile"
841 ]
842 if vld_params.get("provider-network"):
843 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
844 "provider-network"
845 ]
846 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
847 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
848 "provider-network"
849 ]["sdn-ports"]
850
851 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
852 # if wim_account_id is specified in vld_params, validate if it is feasible.
853 wim_account_id, db_wim = select_feasible_wim_account(
854 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
855 )
856
857 if wim_account_id:
858 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
859 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
860 # update vld_params with correct WIM account Id
861 vld_params["wimAccountId"] = wim_account_id
862
863 target_wim = "wim:{}".format(wim_account_id)
864 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
865 sdn_ports = get_sdn_ports(vld_params, db_wim)
866 if len(sdn_ports) > 0:
867 target_vld["vim_info"][target_wim] = target_wim_attrs
868 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
869
870 self.logger.debug(
871 "Target VLD with WIM data: {:s}".format(str(target_vld))
872 )
873
874 for param in ("vim-network-name", "vim-network-id"):
875 if vld_params.get(param):
876 if isinstance(vld_params[param], dict):
877 for vim, vim_net in vld_params[param].items():
878 other_target_vim = "vim:" + vim
879 populate_dict(
880 target_vld["vim_info"],
881 (other_target_vim, param.replace("-", "_")),
882 vim_net,
883 )
884 else: # isinstance str
885 target_vld["vim_info"][target_vim][
886 param.replace("-", "_")
887 ] = vld_params[param]
888 if vld_params.get("common_id"):
889 target_vld["common_id"] = vld_params.get("common_id")
890
891 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
892 def update_ns_vld_target(target, ns_params):
893 for vnf_params in ns_params.get("vnf", ()):
894 if vnf_params.get("vimAccountId"):
895 target_vnf = next(
896 (
897 vnfr
898 for vnfr in db_vnfrs.values()
899 if vnf_params["member-vnf-index"]
900 == vnfr["member-vnf-index-ref"]
901 ),
902 None,
903 )
904 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
905 if not vdur:
906 return
907 for a_index, a_vld in enumerate(target["ns"]["vld"]):
908 target_vld = find_in_list(
909 get_iterable(vdur, "interfaces"),
910 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
911 )
912
913 vld_params = find_in_list(
914 get_iterable(ns_params, "vld"),
915 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
916 )
917 if target_vld:
918
919 if vnf_params.get("vimAccountId") not in a_vld.get(
920 "vim_info", {}
921 ):
922 target_vim_network_list = [
923 v for _, v in a_vld.get("vim_info").items()
924 ]
925 target_vim_network_name = next(
926 (
927 item.get("vim_network_name", "")
928 for item in target_vim_network_list
929 ),
930 "",
931 )
932
933 target["ns"]["vld"][a_index].get("vim_info").update(
934 {
935 "vim:{}".format(vnf_params["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name,
937 }
938 }
939 )
940
941 if vld_params:
942 for param in ("vim-network-name", "vim-network-id"):
943 if vld_params.get(param) and isinstance(
944 vld_params[param], dict
945 ):
946 for vim, vim_net in vld_params[
947 param
948 ].items():
949 other_target_vim = "vim:" + vim
950 populate_dict(
951 target["ns"]["vld"][a_index].get(
952 "vim_info"
953 ),
954 (
955 other_target_vim,
956 param.replace("-", "_"),
957 ),
958 vim_net,
959 )
960
961 nslcmop_id = db_nslcmop["_id"]
962 target = {
963 "name": db_nsr["name"],
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": deepcopy(db_nsr["image"]),
967 "flavor": deepcopy(db_nsr["flavor"]),
968 "action_id": nslcmop_id,
969 "cloud_init_content": {},
970 }
971 for image in target["image"]:
972 image["vim_info"] = {}
973 for flavor in target["flavor"]:
974 flavor["vim_info"] = {}
975 if db_nsr.get("affinity-or-anti-affinity-group"):
976 target["affinity-or-anti-affinity-group"] = deepcopy(
977 db_nsr["affinity-or-anti-affinity-group"]
978 )
979 for affinity_or_anti_affinity_group in target[
980 "affinity-or-anti-affinity-group"
981 ]:
982 affinity_or_anti_affinity_group["vim_info"] = {}
983
984 if db_nslcmop.get("lcmOperationType") != "instantiate":
985 # get parameters of instantiation:
986 db_nslcmop_instantiate = self.db.get_list(
987 "nslcmops",
988 {
989 "nsInstanceId": db_nslcmop["nsInstanceId"],
990 "lcmOperationType": "instantiate",
991 },
992 )[-1]
993 ns_params = db_nslcmop_instantiate.get("operationParams")
994 else:
995 ns_params = db_nslcmop.get("operationParams")
996 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
997 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
998
999 cp2target = {}
1000 for vld_index, vld in enumerate(db_nsr.get("vld")):
1001 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1002 target_vld = {
1003 "id": vld["id"],
1004 "name": vld["name"],
1005 "mgmt-network": vld.get("mgmt-network", False),
1006 "type": vld.get("type"),
1007 "vim_info": {
1008 target_vim: {
1009 "vim_network_name": vld.get("vim-network-name"),
1010 "vim_account_id": ns_params["vimAccountId"],
1011 }
1012 },
1013 }
1014 # check if this network needs SDN assist
1015 if vld.get("pci-interfaces"):
1016 db_vim = get_vim_account(ns_params["vimAccountId"])
1017 if vim_config := db_vim.get("config"):
1018 if sdnc_id := vim_config.get("sdn-controller"):
1019 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1020 target_sdn = "sdn:{}".format(sdnc_id)
1021 target_vld["vim_info"][target_sdn] = {
1022 "sdn": True,
1023 "target_vim": target_vim,
1024 "vlds": [sdn_vld],
1025 "type": vld.get("type"),
1026 }
1027
1028 nsd_vnf_profiles = get_vnf_profiles(nsd)
1029 for nsd_vnf_profile in nsd_vnf_profiles:
1030 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1031 if cp["virtual-link-profile-id"] == vld["id"]:
1032 cp2target[
1033 "member_vnf:{}.{}".format(
1034 cp["constituent-cpd-id"][0][
1035 "constituent-base-element-id"
1036 ],
1037 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1038 )
1039 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1040
1041 # check at nsd descriptor, if there is an ip-profile
1042 vld_params = {}
1043 nsd_vlp = find_in_list(
1044 get_virtual_link_profiles(nsd),
1045 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1046 == vld["id"],
1047 )
1048 if (
1049 nsd_vlp
1050 and nsd_vlp.get("virtual-link-protocol-data")
1051 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1052 ):
1053 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1054 "l3-protocol-data"
1055 ]
1056 ip_profile_dest_data = {}
1057 if "ip-version" in ip_profile_source_data:
1058 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1059 "ip-version"
1060 ]
1061 if "cidr" in ip_profile_source_data:
1062 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1063 "cidr"
1064 ]
1065 if "gateway-ip" in ip_profile_source_data:
1066 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1067 "gateway-ip"
1068 ]
1069 if "dhcp-enabled" in ip_profile_source_data:
1070 ip_profile_dest_data["dhcp-params"] = {
1071 "enabled": ip_profile_source_data["dhcp-enabled"]
1072 }
1073 vld_params["ip-profile"] = ip_profile_dest_data
1074
1075 # update vld_params with instantiation params
1076 vld_instantiation_params = find_in_list(
1077 get_iterable(ns_params, "vld"),
1078 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1079 )
1080 if vld_instantiation_params:
1081 vld_params.update(vld_instantiation_params)
1082 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1083 target["ns"]["vld"].append(target_vld)
1084 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1085 update_ns_vld_target(target, ns_params)
1086
1087 for vnfr in db_vnfrs.values():
1088 vnfd = find_in_list(
1089 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1090 )
1091 vnf_params = find_in_list(
1092 get_iterable(ns_params, "vnf"),
1093 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1094 )
1095 target_vnf = deepcopy(vnfr)
1096 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1097 for vld in target_vnf.get("vld", ()):
1098 # check if connected to a ns.vld, to fill target'
1099 vnf_cp = find_in_list(
1100 vnfd.get("int-virtual-link-desc", ()),
1101 lambda cpd: cpd.get("id") == vld["id"],
1102 )
1103 if vnf_cp:
1104 ns_cp = "member_vnf:{}.{}".format(
1105 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1106 )
1107 if cp2target.get(ns_cp):
1108 vld["target"] = cp2target[ns_cp]
1109
1110 vld["vim_info"] = {
1111 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1112 }
1113 # check if this network needs SDN assist
1114 target_sdn = None
1115 if vld.get("pci-interfaces"):
1116 db_vim = get_vim_account(vnfr["vim-account-id"])
1117 sdnc_id = db_vim["config"].get("sdn-controller")
1118 if sdnc_id:
1119 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1120 target_sdn = "sdn:{}".format(sdnc_id)
1121 vld["vim_info"][target_sdn] = {
1122 "sdn": True,
1123 "target_vim": target_vim,
1124 "vlds": [sdn_vld],
1125 "type": vld.get("type"),
1126 }
1127
1128 # check at vnfd descriptor, if there is an ip-profile
1129 vld_params = {}
1130 vnfd_vlp = find_in_list(
1131 get_virtual_link_profiles(vnfd),
1132 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1133 )
1134 if (
1135 vnfd_vlp
1136 and vnfd_vlp.get("virtual-link-protocol-data")
1137 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1138 ):
1139 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1140 "l3-protocol-data"
1141 ]
1142 ip_profile_dest_data = {}
1143 if "ip-version" in ip_profile_source_data:
1144 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1145 "ip-version"
1146 ]
1147 if "cidr" in ip_profile_source_data:
1148 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1149 "cidr"
1150 ]
1151 if "gateway-ip" in ip_profile_source_data:
1152 ip_profile_dest_data[
1153 "gateway-address"
1154 ] = ip_profile_source_data["gateway-ip"]
1155 if "dhcp-enabled" in ip_profile_source_data:
1156 ip_profile_dest_data["dhcp-params"] = {
1157 "enabled": ip_profile_source_data["dhcp-enabled"]
1158 }
1159
1160 vld_params["ip-profile"] = ip_profile_dest_data
1161 # update vld_params with instantiation params
1162 if vnf_params:
1163 vld_instantiation_params = find_in_list(
1164 get_iterable(vnf_params, "internal-vld"),
1165 lambda i_vld: i_vld["name"] == vld["id"],
1166 )
1167 if vld_instantiation_params:
1168 vld_params.update(vld_instantiation_params)
1169 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1170
1171 vdur_list = []
1172 for vdur in target_vnf.get("vdur", ()):
1173 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1174 continue # This vdu must not be created
1175 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1176
1177 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1178
1179 if ssh_keys_all:
1180 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1181 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1182 if (
1183 vdu_configuration
1184 and vdu_configuration.get("config-access")
1185 and vdu_configuration.get("config-access").get("ssh-access")
1186 ):
1187 vdur["ssh-keys"] = ssh_keys_all
1188 vdur["ssh-access-required"] = vdu_configuration[
1189 "config-access"
1190 ]["ssh-access"]["required"]
1191 elif (
1192 vnf_configuration
1193 and vnf_configuration.get("config-access")
1194 and vnf_configuration.get("config-access").get("ssh-access")
1195 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1196 ):
1197 vdur["ssh-keys"] = ssh_keys_all
1198 vdur["ssh-access-required"] = vnf_configuration[
1199 "config-access"
1200 ]["ssh-access"]["required"]
1201 elif ssh_keys_instantiation and find_in_list(
1202 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1203 ):
1204 vdur["ssh-keys"] = ssh_keys_instantiation
1205
1206 self.logger.debug("NS > vdur > {}".format(vdur))
1207
1208 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1209 # cloud-init
1210 if vdud.get("cloud-init-file"):
1211 vdur["cloud-init"] = "{}:file:{}".format(
1212 vnfd["_id"], vdud.get("cloud-init-file")
1213 )
1214 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1215 if vdur["cloud-init"] not in target["cloud_init_content"]:
1216 base_folder = vnfd["_admin"]["storage"]
1217 if base_folder["pkg-dir"]:
1218 cloud_init_file = "{}/{}/cloud_init/{}".format(
1219 base_folder["folder"],
1220 base_folder["pkg-dir"],
1221 vdud.get("cloud-init-file"),
1222 )
1223 else:
1224 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1225 base_folder["folder"],
1226 vdud.get("cloud-init-file"),
1227 )
1228 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1229 target["cloud_init_content"][
1230 vdur["cloud-init"]
1231 ] = ci_file.read()
1232 elif vdud.get("cloud-init"):
1233 vdur["cloud-init"] = "{}:vdu:{}".format(
1234 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1235 )
1236 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1237 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1238 "cloud-init"
1239 ]
1240 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1241 deploy_params_vdu = self._format_additional_params(
1242 vdur.get("additionalParams") or {}
1243 )
1244 deploy_params_vdu["OSM"] = get_osm_params(
1245 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1246 )
1247 vdur["additionalParams"] = deploy_params_vdu
1248
1249 # flavor
1250 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1251 if target_vim not in ns_flavor["vim_info"]:
1252 ns_flavor["vim_info"][target_vim] = {}
1253
1254 # deal with images
1255 # in case alternative images are provided we must check if they should be applied
1256 # for the vim_type, modify the vim_type taking into account
1257 ns_image_id = int(vdur["ns-image-id"])
1258 if vdur.get("alt-image-ids"):
1259 db_vim = get_vim_account(vnfr["vim-account-id"])
1260 vim_type = db_vim["vim_type"]
1261 for alt_image_id in vdur.get("alt-image-ids"):
1262 ns_alt_image = target["image"][int(alt_image_id)]
1263 if vim_type == ns_alt_image.get("vim-type"):
1264 # must use alternative image
1265 self.logger.debug(
1266 "use alternative image id: {}".format(alt_image_id)
1267 )
1268 ns_image_id = alt_image_id
1269 vdur["ns-image-id"] = ns_image_id
1270 break
1271 ns_image = target["image"][int(ns_image_id)]
1272 if target_vim not in ns_image["vim_info"]:
1273 ns_image["vim_info"][target_vim] = {}
1274
1275 # Affinity groups
1276 if vdur.get("affinity-or-anti-affinity-group-id"):
1277 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1278 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1279 if target_vim not in ns_ags["vim_info"]:
1280 ns_ags["vim_info"][target_vim] = {}
1281
1282 vdur["vim_info"] = {target_vim: {}}
1283 # instantiation parameters
1284 if vnf_params:
1285 vdu_instantiation_params = find_in_list(
1286 get_iterable(vnf_params, "vdu"),
1287 lambda i_vdu: i_vdu["id"] == vdud["id"],
1288 )
1289 if vdu_instantiation_params:
1290 # Parse the vdu_volumes from the instantiation params
1291 vdu_volumes = get_volumes_from_instantiation_params(
1292 vdu_instantiation_params, vdud
1293 )
1294 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1295 vdur_list.append(vdur)
1296 target_vnf["vdur"] = vdur_list
1297 target["vnf"].append(target_vnf)
1298
1299 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1300 desc = await self.RO.deploy(nsr_id, target)
1301 self.logger.debug("RO return > {}".format(desc))
1302 action_id = desc["action_id"]
1303 await self._wait_ng_ro(
1304 nsr_id,
1305 action_id,
1306 nslcmop_id,
1307 start_deploy,
1308 timeout_ns_deploy,
1309 stage,
1310 operation="instantiation",
1311 )
1312
1313 # Updating NSR
1314 db_nsr_update = {
1315 "_admin.deployed.RO.operational-status": "running",
1316 "detailed-status": " ".join(stage),
1317 }
1318 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 self.logger.debug(
1322 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1323 )
1324 return
1325
1326 async def _wait_ng_ro(
1327 self,
1328 nsr_id,
1329 action_id,
1330 nslcmop_id=None,
1331 start_time=None,
1332 timeout=600,
1333 stage=None,
1334 operation=None,
1335 ):
1336 detailed_status_old = None
1337 db_nsr_update = {}
1338 start_time = start_time or time()
1339 while time() <= start_time + timeout:
1340 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1341 self.logger.debug("Wait NG RO > {}".format(desc_status))
1342 if desc_status["status"] == "FAILED":
1343 raise NgRoException(desc_status["details"])
1344 elif desc_status["status"] == "BUILD":
1345 if stage:
1346 stage[2] = "VIM: ({})".format(desc_status["details"])
1347 elif desc_status["status"] == "DONE":
1348 if stage:
1349 stage[2] = "Deployed at VIM"
1350 break
1351 else:
1352 assert False, "ROclient.check_ns_status returns unknown {}".format(
1353 desc_status["status"]
1354 )
1355 if stage and nslcmop_id and stage[2] != detailed_status_old:
1356 detailed_status_old = stage[2]
1357 db_nsr_update["detailed-status"] = " ".join(stage)
1358 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1359 self._write_op_status(nslcmop_id, stage)
1360 await asyncio.sleep(15, loop=self.loop)
1361 else: # timeout_ns_deploy
1362 raise NgRoException("Timeout waiting ns to deploy")
1363
1364 async def _terminate_ng_ro(
1365 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1366 ):
1367 db_nsr_update = {}
1368 failed_detail = []
1369 action_id = None
1370 start_deploy = time()
1371 try:
1372 target = {
1373 "ns": {"vld": []},
1374 "vnf": [],
1375 "image": [],
1376 "flavor": [],
1377 "action_id": nslcmop_id,
1378 }
1379 desc = await self.RO.deploy(nsr_id, target)
1380 action_id = desc["action_id"]
1381 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1382 self.logger.debug(
1383 logging_text
1384 + "ns terminate action at RO. action_id={}".format(action_id)
1385 )
1386
1387 # wait until done
1388 delete_timeout = 20 * 60 # 20 minutes
1389 await self._wait_ng_ro(
1390 nsr_id,
1391 action_id,
1392 nslcmop_id,
1393 start_deploy,
1394 delete_timeout,
1395 stage,
1396 operation="termination",
1397 )
1398 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1399 # delete all nsr
1400 await self.RO.delete(nsr_id)
1401 except NgRoException as e:
1402 if e.http_code == 404: # not found
1403 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1404 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1405 self.logger.debug(
1406 logging_text + "RO_action_id={} already deleted".format(action_id)
1407 )
1408 elif e.http_code == 409: # conflict
1409 failed_detail.append("delete conflict: {}".format(e))
1410 self.logger.debug(
1411 logging_text
1412 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1413 )
1414 else:
1415 failed_detail.append("delete error: {}".format(e))
1416 self.logger.error(
1417 logging_text
1418 + "RO_action_id={} delete error: {}".format(action_id, e)
1419 )
1420 except Exception as e:
1421 failed_detail.append("delete error: {}".format(e))
1422 self.logger.error(
1423 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1424 )
1425
1426 if failed_detail:
1427 stage[2] = "Error deleting from VIM"
1428 else:
1429 stage[2] = "Deleted from VIM"
1430 db_nsr_update["detailed-status"] = " ".join(stage)
1431 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1432 self._write_op_status(nslcmop_id, stage)
1433
1434 if failed_detail:
1435 raise LcmException("; ".join(failed_detail))
1436 return
1437
1438 async def instantiate_RO(
1439 self,
1440 logging_text,
1441 nsr_id,
1442 nsd,
1443 db_nsr,
1444 db_nslcmop,
1445 db_vnfrs,
1446 db_vnfds,
1447 n2vc_key_list,
1448 stage,
1449 ):
1450 """
1451 Instantiate at RO
1452 :param logging_text: preffix text to use at logging
1453 :param nsr_id: nsr identity
1454 :param nsd: database content of ns descriptor
1455 :param db_nsr: database content of ns record
1456 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1457 :param db_vnfrs:
1458 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1459 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1460 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1461 :return: None or exception
1462 """
1463 try:
1464 start_deploy = time()
1465 ns_params = db_nslcmop.get("operationParams")
1466 if ns_params and ns_params.get("timeout_ns_deploy"):
1467 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1468 else:
1469 timeout_ns_deploy = self.timeout.ns_deploy
1470
1471 # Check for and optionally request placement optimization. Database will be updated if placement activated
1472 stage[2] = "Waiting for Placement."
1473 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1474 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1475 for vnfr in db_vnfrs.values():
1476 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1477 break
1478 else:
1479 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1480
1481 return await self._instantiate_ng_ro(
1482 logging_text,
1483 nsr_id,
1484 nsd,
1485 db_nsr,
1486 db_nslcmop,
1487 db_vnfrs,
1488 db_vnfds,
1489 n2vc_key_list,
1490 stage,
1491 start_deploy,
1492 timeout_ns_deploy,
1493 )
1494 except Exception as e:
1495 stage[2] = "ERROR deploying at VIM"
1496 self.set_vnfr_at_error(db_vnfrs, str(e))
1497 self.logger.error(
1498 "Error deploying at VIM {}".format(e),
1499 exc_info=not isinstance(
1500 e,
1501 (
1502 ROclient.ROClientException,
1503 LcmException,
1504 DbException,
1505 NgRoException,
1506 ),
1507 ),
1508 )
1509 raise
1510
1511 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1512 """
1513 Wait for kdu to be up, get ip address
1514 :param logging_text: prefix use for logging
1515 :param nsr_id:
1516 :param vnfr_id:
1517 :param kdu_name:
1518 :return: IP address, K8s services
1519 """
1520
1521 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1522 nb_tries = 0
1523
1524 while nb_tries < 360:
1525 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1526 kdur = next(
1527 (
1528 x
1529 for x in get_iterable(db_vnfr, "kdur")
1530 if x.get("kdu-name") == kdu_name
1531 ),
1532 None,
1533 )
1534 if not kdur:
1535 raise LcmException(
1536 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1537 )
1538 if kdur.get("status"):
1539 if kdur["status"] in ("READY", "ENABLED"):
1540 return kdur.get("ip-address"), kdur.get("services")
1541 else:
1542 raise LcmException(
1543 "target KDU={} is in error state".format(kdu_name)
1544 )
1545
1546 await asyncio.sleep(10, loop=self.loop)
1547 nb_tries += 1
1548 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1549
1550 async def wait_vm_up_insert_key_ro(
1551 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1552 ):
1553 """
1554 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1555 :param logging_text: prefix use for logging
1556 :param nsr_id:
1557 :param vnfr_id:
1558 :param vdu_id:
1559 :param vdu_index:
1560 :param pub_key: public ssh key to inject, None to skip
1561 :param user: user to apply the public ssh key
1562 :return: IP address
1563 """
1564
1565 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1566 ip_address = None
1567 target_vdu_id = None
1568 ro_retries = 0
1569
1570 while True:
1571
1572 ro_retries += 1
1573 if ro_retries >= 360: # 1 hour
1574 raise LcmException(
1575 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1576 )
1577
1578 await asyncio.sleep(10, loop=self.loop)
1579
1580 # get ip address
1581 if not target_vdu_id:
1582 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1583
1584 if not vdu_id: # for the VNF case
1585 if db_vnfr.get("status") == "ERROR":
1586 raise LcmException(
1587 "Cannot inject ssh-key because target VNF is in error state"
1588 )
1589 ip_address = db_vnfr.get("ip-address")
1590 if not ip_address:
1591 continue
1592 vdur = next(
1593 (
1594 x
1595 for x in get_iterable(db_vnfr, "vdur")
1596 if x.get("ip-address") == ip_address
1597 ),
1598 None,
1599 )
1600 else: # VDU case
1601 vdur = next(
1602 (
1603 x
1604 for x in get_iterable(db_vnfr, "vdur")
1605 if x.get("vdu-id-ref") == vdu_id
1606 and x.get("count-index") == vdu_index
1607 ),
1608 None,
1609 )
1610
1611 if (
1612 not vdur and len(db_vnfr.get("vdur", ())) == 1
1613 ): # If only one, this should be the target vdu
1614 vdur = db_vnfr["vdur"][0]
1615 if not vdur:
1616 raise LcmException(
1617 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1618 vnfr_id, vdu_id, vdu_index
1619 )
1620 )
1621 # New generation RO stores information at "vim_info"
1622 ng_ro_status = None
1623 target_vim = None
1624 if vdur.get("vim_info"):
1625 target_vim = next(
1626 t for t in vdur["vim_info"]
1627 ) # there should be only one key
1628 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1629 if (
1630 vdur.get("pdu-type")
1631 or vdur.get("status") == "ACTIVE"
1632 or ng_ro_status == "ACTIVE"
1633 ):
1634 ip_address = vdur.get("ip-address")
1635 if not ip_address:
1636 continue
1637 target_vdu_id = vdur["vdu-id-ref"]
1638 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1639 raise LcmException(
1640 "Cannot inject ssh-key because target VM is in error state"
1641 )
1642
1643 if not target_vdu_id:
1644 continue
1645
1646 # inject public key into machine
1647 if pub_key and user:
1648 self.logger.debug(logging_text + "Inserting RO key")
1649 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1650 if vdur.get("pdu-type"):
1651 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1652 return ip_address
1653 try:
1654 target = {
1655 "action": {
1656 "action": "inject_ssh_key",
1657 "key": pub_key,
1658 "user": user,
1659 },
1660 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1661 }
1662 desc = await self.RO.deploy(nsr_id, target)
1663 action_id = desc["action_id"]
1664 await self._wait_ng_ro(
1665 nsr_id, action_id, timeout=600, operation="instantiation"
1666 )
1667 break
1668 except NgRoException as e:
1669 raise LcmException(
1670 "Reaching max tries injecting key. Error: {}".format(e)
1671 )
1672 else:
1673 break
1674
1675 return ip_address
1676
1677 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1678 """
1679 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1680 """
1681 my_vca = vca_deployed_list[vca_index]
1682 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1683 # vdu or kdu: no dependencies
1684 return
1685 timeout = 300
1686 while timeout >= 0:
1687 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1688 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1689 configuration_status_list = db_nsr["configurationStatus"]
1690 for index, vca_deployed in enumerate(configuration_status_list):
1691 if index == vca_index:
1692 # myself
1693 continue
1694 if not my_vca.get("member-vnf-index") or (
1695 vca_deployed.get("member-vnf-index")
1696 == my_vca.get("member-vnf-index")
1697 ):
1698 internal_status = configuration_status_list[index].get("status")
1699 if internal_status == "READY":
1700 continue
1701 elif internal_status == "BROKEN":
1702 raise LcmException(
1703 "Configuration aborted because dependent charm/s has failed"
1704 )
1705 else:
1706 break
1707 else:
1708 # no dependencies, return
1709 return
1710 await asyncio.sleep(10)
1711 timeout -= 1
1712
1713 raise LcmException("Configuration aborted because dependent charm/s timeout")
1714
1715 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1716 vca_id = None
1717 if db_vnfr:
1718 vca_id = deep_get(db_vnfr, ("vca-id",))
1719 elif db_nsr:
1720 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1721 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1722 return vca_id
1723
1724 async def instantiate_N2VC(
1725 self,
1726 logging_text,
1727 vca_index,
1728 nsi_id,
1729 db_nsr,
1730 db_vnfr,
1731 vdu_id,
1732 kdu_name,
1733 vdu_index,
1734 config_descriptor,
1735 deploy_params,
1736 base_folder,
1737 nslcmop_id,
1738 stage,
1739 vca_type,
1740 vca_name,
1741 ee_config_descriptor,
1742 ):
1743 nsr_id = db_nsr["_id"]
1744 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1745 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1746 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1747 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1748 db_dict = {
1749 "collection": "nsrs",
1750 "filter": {"_id": nsr_id},
1751 "path": db_update_entry,
1752 }
1753 step = ""
1754 try:
1755
1756 element_type = "NS"
1757 element_under_configuration = nsr_id
1758
1759 vnfr_id = None
1760 if db_vnfr:
1761 vnfr_id = db_vnfr["_id"]
1762 osm_config["osm"]["vnf_id"] = vnfr_id
1763
1764 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1765
1766 if vca_type == "native_charm":
1767 index_number = 0
1768 else:
1769 index_number = vdu_index or 0
1770
1771 if vnfr_id:
1772 element_type = "VNF"
1773 element_under_configuration = vnfr_id
1774 namespace += ".{}-{}".format(vnfr_id, index_number)
1775 if vdu_id:
1776 namespace += ".{}-{}".format(vdu_id, index_number)
1777 element_type = "VDU"
1778 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1779 osm_config["osm"]["vdu_id"] = vdu_id
1780 elif kdu_name:
1781 namespace += ".{}".format(kdu_name)
1782 element_type = "KDU"
1783 element_under_configuration = kdu_name
1784 osm_config["osm"]["kdu_name"] = kdu_name
1785
1786 # Get artifact path
1787 if base_folder["pkg-dir"]:
1788 artifact_path = "{}/{}/{}/{}".format(
1789 base_folder["folder"],
1790 base_folder["pkg-dir"],
1791 "charms"
1792 if vca_type
1793 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1794 else "helm-charts",
1795 vca_name,
1796 )
1797 else:
1798 artifact_path = "{}/Scripts/{}/{}/".format(
1799 base_folder["folder"],
1800 "charms"
1801 if vca_type
1802 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1803 else "helm-charts",
1804 vca_name,
1805 )
1806
1807 self.logger.debug("Artifact path > {}".format(artifact_path))
1808
1809 # get initial_config_primitive_list that applies to this element
1810 initial_config_primitive_list = config_descriptor.get(
1811 "initial-config-primitive"
1812 )
1813
1814 self.logger.debug(
1815 "Initial config primitive list > {}".format(
1816 initial_config_primitive_list
1817 )
1818 )
1819
1820 # add config if not present for NS charm
1821 ee_descriptor_id = ee_config_descriptor.get("id")
1822 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1823 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1824 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1825 )
1826
1827 self.logger.debug(
1828 "Initial config primitive list #2 > {}".format(
1829 initial_config_primitive_list
1830 )
1831 )
1832 # n2vc_redesign STEP 3.1
1833 # find old ee_id if exists
1834 ee_id = vca_deployed.get("ee_id")
1835
1836 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1837 # create or register execution environment in VCA
1838 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1839
1840 self._write_configuration_status(
1841 nsr_id=nsr_id,
1842 vca_index=vca_index,
1843 status="CREATING",
1844 element_under_configuration=element_under_configuration,
1845 element_type=element_type,
1846 )
1847
1848 step = "create execution environment"
1849 self.logger.debug(logging_text + step)
1850
1851 ee_id = None
1852 credentials = None
1853 if vca_type == "k8s_proxy_charm":
1854 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1855 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1856 namespace=namespace,
1857 artifact_path=artifact_path,
1858 db_dict=db_dict,
1859 vca_id=vca_id,
1860 )
1861 elif vca_type == "helm" or vca_type == "helm-v3":
1862 ee_id, credentials = await self.vca_map[
1863 vca_type
1864 ].create_execution_environment(
1865 namespace=namespace,
1866 reuse_ee_id=ee_id,
1867 db_dict=db_dict,
1868 config=osm_config,
1869 artifact_path=artifact_path,
1870 chart_model=vca_name,
1871 vca_type=vca_type,
1872 )
1873 else:
1874 ee_id, credentials = await self.vca_map[
1875 vca_type
1876 ].create_execution_environment(
1877 namespace=namespace,
1878 reuse_ee_id=ee_id,
1879 db_dict=db_dict,
1880 vca_id=vca_id,
1881 )
1882
1883 elif vca_type == "native_charm":
1884 step = "Waiting to VM being up and getting IP address"
1885 self.logger.debug(logging_text + step)
1886 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1887 logging_text,
1888 nsr_id,
1889 vnfr_id,
1890 vdu_id,
1891 vdu_index,
1892 user=None,
1893 pub_key=None,
1894 )
1895 credentials = {"hostname": rw_mgmt_ip}
1896 # get username
1897 username = deep_get(
1898 config_descriptor, ("config-access", "ssh-access", "default-user")
1899 )
1900 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1901 # merged. Meanwhile let's get username from initial-config-primitive
1902 if not username and initial_config_primitive_list:
1903 for config_primitive in initial_config_primitive_list:
1904 for param in config_primitive.get("parameter", ()):
1905 if param["name"] == "ssh-username":
1906 username = param["value"]
1907 break
1908 if not username:
1909 raise LcmException(
1910 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1911 "'config-access.ssh-access.default-user'"
1912 )
1913 credentials["username"] = username
1914 # n2vc_redesign STEP 3.2
1915
1916 self._write_configuration_status(
1917 nsr_id=nsr_id,
1918 vca_index=vca_index,
1919 status="REGISTERING",
1920 element_under_configuration=element_under_configuration,
1921 element_type=element_type,
1922 )
1923
1924 step = "register execution environment {}".format(credentials)
1925 self.logger.debug(logging_text + step)
1926 ee_id = await self.vca_map[vca_type].register_execution_environment(
1927 credentials=credentials,
1928 namespace=namespace,
1929 db_dict=db_dict,
1930 vca_id=vca_id,
1931 )
1932
1933 # for compatibility with MON/POL modules, the need model and application name at database
1934 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1935 ee_id_parts = ee_id.split(".")
1936 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1937 if len(ee_id_parts) >= 2:
1938 model_name = ee_id_parts[0]
1939 application_name = ee_id_parts[1]
1940 db_nsr_update[db_update_entry + "model"] = model_name
1941 db_nsr_update[db_update_entry + "application"] = application_name
1942
1943 # n2vc_redesign STEP 3.3
1944 step = "Install configuration Software"
1945
1946 self._write_configuration_status(
1947 nsr_id=nsr_id,
1948 vca_index=vca_index,
1949 status="INSTALLING SW",
1950 element_under_configuration=element_under_configuration,
1951 element_type=element_type,
1952 other_update=db_nsr_update,
1953 )
1954
1955 # TODO check if already done
1956 self.logger.debug(logging_text + step)
1957 config = None
1958 if vca_type == "native_charm":
1959 config_primitive = next(
1960 (p for p in initial_config_primitive_list if p["name"] == "config"),
1961 None,
1962 )
1963 if config_primitive:
1964 config = self._map_primitive_params(
1965 config_primitive, {}, deploy_params
1966 )
1967 num_units = 1
1968 if vca_type == "lxc_proxy_charm":
1969 if element_type == "NS":
1970 num_units = db_nsr.get("config-units") or 1
1971 elif element_type == "VNF":
1972 num_units = db_vnfr.get("config-units") or 1
1973 elif element_type == "VDU":
1974 for v in db_vnfr["vdur"]:
1975 if vdu_id == v["vdu-id-ref"]:
1976 num_units = v.get("config-units") or 1
1977 break
1978 if vca_type != "k8s_proxy_charm":
1979 await self.vca_map[vca_type].install_configuration_sw(
1980 ee_id=ee_id,
1981 artifact_path=artifact_path,
1982 db_dict=db_dict,
1983 config=config,
1984 num_units=num_units,
1985 vca_id=vca_id,
1986 vca_type=vca_type,
1987 )
1988
1989 # write in db flag of configuration_sw already installed
1990 self.update_db_2(
1991 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1992 )
1993
1994 # add relations for this VCA (wait for other peers related with this VCA)
1995 is_relation_added = await self._add_vca_relations(
1996 logging_text=logging_text,
1997 nsr_id=nsr_id,
1998 vca_type=vca_type,
1999 vca_index=vca_index,
2000 )
2001
2002 if not is_relation_added:
2003 raise LcmException("Relations could not be added to VCA.")
2004
2005 # if SSH access is required, then get execution environment SSH public
2006 # if native charm we have waited already to VM be UP
2007 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2008 pub_key = None
2009 user = None
2010 # self.logger.debug("get ssh key block")
2011 if deep_get(
2012 config_descriptor, ("config-access", "ssh-access", "required")
2013 ):
2014 # self.logger.debug("ssh key needed")
2015 # Needed to inject a ssh key
2016 user = deep_get(
2017 config_descriptor,
2018 ("config-access", "ssh-access", "default-user"),
2019 )
2020 step = "Install configuration Software, getting public ssh key"
2021 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2022 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2023 )
2024
2025 step = "Insert public key into VM user={} ssh_key={}".format(
2026 user, pub_key
2027 )
2028 else:
2029 # self.logger.debug("no need to get ssh key")
2030 step = "Waiting to VM being up and getting IP address"
2031 self.logger.debug(logging_text + step)
2032
2033 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2034 rw_mgmt_ip = None
2035
2036 # n2vc_redesign STEP 5.1
2037 # wait for RO (ip-address) Insert pub_key into VM
2038 if vnfr_id:
2039 if kdu_name:
2040 rw_mgmt_ip, services = await self.wait_kdu_up(
2041 logging_text, nsr_id, vnfr_id, kdu_name
2042 )
2043 vnfd = self.db.get_one(
2044 "vnfds_revisions",
2045 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2046 )
2047 kdu = get_kdu(vnfd, kdu_name)
2048 kdu_services = [
2049 service["name"] for service in get_kdu_services(kdu)
2050 ]
2051 exposed_services = []
2052 for service in services:
2053 if any(s in service["name"] for s in kdu_services):
2054 exposed_services.append(service)
2055 await self.vca_map[vca_type].exec_primitive(
2056 ee_id=ee_id,
2057 primitive_name="config",
2058 params_dict={
2059 "osm-config": json.dumps(
2060 OsmConfigBuilder(
2061 k8s={"services": exposed_services}
2062 ).build()
2063 )
2064 },
2065 vca_id=vca_id,
2066 )
2067
2068 # This verification is needed in order to avoid trying to add a public key
2069 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2070 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2071 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2072 # or it is a KNF)
2073 elif db_vnfr.get("vdur"):
2074 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2075 logging_text,
2076 nsr_id,
2077 vnfr_id,
2078 vdu_id,
2079 vdu_index,
2080 user=user,
2081 pub_key=pub_key,
2082 )
2083
2084 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2085
2086 # store rw_mgmt_ip in deploy params for later replacement
2087 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2088
2089 # n2vc_redesign STEP 6 Execute initial config primitive
2090 step = "execute initial config primitive"
2091
2092 # wait for dependent primitives execution (NS -> VNF -> VDU)
2093 if initial_config_primitive_list:
2094 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2095
2096 # stage, in function of element type: vdu, kdu, vnf or ns
2097 my_vca = vca_deployed_list[vca_index]
2098 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2099 # VDU or KDU
2100 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2101 elif my_vca.get("member-vnf-index"):
2102 # VNF
2103 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2104 else:
2105 # NS
2106 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2107
2108 self._write_configuration_status(
2109 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2110 )
2111
2112 self._write_op_status(op_id=nslcmop_id, stage=stage)
2113
2114 check_if_terminated_needed = True
2115 for initial_config_primitive in initial_config_primitive_list:
2116 # adding information on the vca_deployed if it is a NS execution environment
2117 if not vca_deployed["member-vnf-index"]:
2118 deploy_params["ns_config_info"] = json.dumps(
2119 self._get_ns_config_info(nsr_id)
2120 )
2121 # TODO check if already done
2122 primitive_params_ = self._map_primitive_params(
2123 initial_config_primitive, {}, deploy_params
2124 )
2125
2126 step = "execute primitive '{}' params '{}'".format(
2127 initial_config_primitive["name"], primitive_params_
2128 )
2129 self.logger.debug(logging_text + step)
2130 await self.vca_map[vca_type].exec_primitive(
2131 ee_id=ee_id,
2132 primitive_name=initial_config_primitive["name"],
2133 params_dict=primitive_params_,
2134 db_dict=db_dict,
2135 vca_id=vca_id,
2136 vca_type=vca_type,
2137 )
2138 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2139 if check_if_terminated_needed:
2140 if config_descriptor.get("terminate-config-primitive"):
2141 self.update_db_2(
2142 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2143 )
2144 check_if_terminated_needed = False
2145
2146 # TODO register in database that primitive is done
2147
2148 # STEP 7 Configure metrics
2149 if vca_type == "helm" or vca_type == "helm-v3":
2150 # TODO: review for those cases where the helm chart is a reference and
2151 # is not part of the NF package
2152 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2153 ee_id=ee_id,
2154 artifact_path=artifact_path,
2155 ee_config_descriptor=ee_config_descriptor,
2156 vnfr_id=vnfr_id,
2157 nsr_id=nsr_id,
2158 target_ip=rw_mgmt_ip,
2159 )
2160 if prometheus_jobs:
2161 self.update_db_2(
2162 "nsrs",
2163 nsr_id,
2164 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2165 )
2166
2167 for job in prometheus_jobs:
2168 self.db.set_one(
2169 "prometheus_jobs",
2170 {"job_name": job["job_name"]},
2171 job,
2172 upsert=True,
2173 fail_on_empty=False,
2174 )
2175
2176 step = "instantiated at VCA"
2177 self.logger.debug(logging_text + step)
2178
2179 self._write_configuration_status(
2180 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2181 )
2182
2183 except Exception as e: # TODO not use Exception but N2VC exception
2184 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2185 if not isinstance(
2186 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2187 ):
2188 self.logger.error(
2189 "Exception while {} : {}".format(step, e), exc_info=True
2190 )
2191 self._write_configuration_status(
2192 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2193 )
2194 raise LcmException("{}. {}".format(step, e)) from e
2195
2196 def _write_ns_status(
2197 self,
2198 nsr_id: str,
2199 ns_state: str,
2200 current_operation: str,
2201 current_operation_id: str,
2202 error_description: str = None,
2203 error_detail: str = None,
2204 other_update: dict = None,
2205 ):
2206 """
2207 Update db_nsr fields.
2208 :param nsr_id:
2209 :param ns_state:
2210 :param current_operation:
2211 :param current_operation_id:
2212 :param error_description:
2213 :param error_detail:
2214 :param other_update: Other required changes at database if provided, will be cleared
2215 :return:
2216 """
2217 try:
2218 db_dict = other_update or {}
2219 db_dict[
2220 "_admin.nslcmop"
2221 ] = current_operation_id # for backward compatibility
2222 db_dict["_admin.current-operation"] = current_operation_id
2223 db_dict["_admin.operation-type"] = (
2224 current_operation if current_operation != "IDLE" else None
2225 )
2226 db_dict["currentOperation"] = current_operation
2227 db_dict["currentOperationID"] = current_operation_id
2228 db_dict["errorDescription"] = error_description
2229 db_dict["errorDetail"] = error_detail
2230
2231 if ns_state:
2232 db_dict["nsState"] = ns_state
2233 self.update_db_2("nsrs", nsr_id, db_dict)
2234 except DbException as e:
2235 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2236
2237 def _write_op_status(
2238 self,
2239 op_id: str,
2240 stage: list = None,
2241 error_message: str = None,
2242 queuePosition: int = 0,
2243 operation_state: str = None,
2244 other_update: dict = None,
2245 ):
2246 try:
2247 db_dict = other_update or {}
2248 db_dict["queuePosition"] = queuePosition
2249 if isinstance(stage, list):
2250 db_dict["stage"] = stage[0]
2251 db_dict["detailed-status"] = " ".join(stage)
2252 elif stage is not None:
2253 db_dict["stage"] = str(stage)
2254
2255 if error_message is not None:
2256 db_dict["errorMessage"] = error_message
2257 if operation_state is not None:
2258 db_dict["operationState"] = operation_state
2259 db_dict["statusEnteredTime"] = time()
2260 self.update_db_2("nslcmops", op_id, db_dict)
2261 except DbException as e:
2262 self.logger.warn(
2263 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2264 )
2265
2266 def _write_all_config_status(self, db_nsr: dict, status: str):
2267 try:
2268 nsr_id = db_nsr["_id"]
2269 # configurationStatus
2270 config_status = db_nsr.get("configurationStatus")
2271 if config_status:
2272 db_nsr_update = {
2273 "configurationStatus.{}.status".format(index): status
2274 for index, v in enumerate(config_status)
2275 if v
2276 }
2277 # update status
2278 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2279
2280 except DbException as e:
2281 self.logger.warn(
2282 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2283 )
2284
2285 def _write_configuration_status(
2286 self,
2287 nsr_id: str,
2288 vca_index: int,
2289 status: str = None,
2290 element_under_configuration: str = None,
2291 element_type: str = None,
2292 other_update: dict = None,
2293 ):
2294
2295 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2296 # .format(vca_index, status))
2297
2298 try:
2299 db_path = "configurationStatus.{}.".format(vca_index)
2300 db_dict = other_update or {}
2301 if status:
2302 db_dict[db_path + "status"] = status
2303 if element_under_configuration:
2304 db_dict[
2305 db_path + "elementUnderConfiguration"
2306 ] = element_under_configuration
2307 if element_type:
2308 db_dict[db_path + "elementType"] = element_type
2309 self.update_db_2("nsrs", nsr_id, db_dict)
2310 except DbException as e:
2311 self.logger.warn(
2312 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2313 status, nsr_id, vca_index, e
2314 )
2315 )
2316
2317 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2318 """
2319 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2320 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2321 Database is used because the result can be obtained from a different LCM worker in case of HA.
2322 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2323 :param db_nslcmop: database content of nslcmop
2324 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2325 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2326 computed 'vim-account-id'
2327 """
2328 modified = False
2329 nslcmop_id = db_nslcmop["_id"]
2330 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2331 if placement_engine == "PLA":
2332 self.logger.debug(
2333 logging_text + "Invoke and wait for placement optimization"
2334 )
2335 await self.msg.aiowrite(
2336 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2337 )
2338 db_poll_interval = 5
2339 wait = db_poll_interval * 10
2340 pla_result = None
2341 while not pla_result and wait >= 0:
2342 await asyncio.sleep(db_poll_interval)
2343 wait -= db_poll_interval
2344 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2345 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2346
2347 if not pla_result:
2348 raise LcmException(
2349 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2350 )
2351
2352 for pla_vnf in pla_result["vnf"]:
2353 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2354 if not pla_vnf.get("vimAccountId") or not vnfr:
2355 continue
2356 modified = True
2357 self.db.set_one(
2358 "vnfrs",
2359 {"_id": vnfr["_id"]},
2360 {"vim-account-id": pla_vnf["vimAccountId"]},
2361 )
2362 # Modifies db_vnfrs
2363 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2364 return modified
2365
2366 def update_nsrs_with_pla_result(self, params):
2367 try:
2368 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2369 self.update_db_2(
2370 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2371 )
2372 except Exception as e:
2373 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2374
2375 async def instantiate(self, nsr_id, nslcmop_id):
2376 """
2377
2378 :param nsr_id: ns instance to deploy
2379 :param nslcmop_id: operation to run
2380 :return:
2381 """
2382
2383 # Try to lock HA task here
2384 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2385 if not task_is_locked_by_me:
2386 self.logger.debug(
2387 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2388 )
2389 return
2390
2391 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2392 self.logger.debug(logging_text + "Enter")
2393
2394 # get all needed from database
2395
2396 # database nsrs record
2397 db_nsr = None
2398
2399 # database nslcmops record
2400 db_nslcmop = None
2401
2402 # update operation on nsrs
2403 db_nsr_update = {}
2404 # update operation on nslcmops
2405 db_nslcmop_update = {}
2406
2407 timeout_ns_deploy = self.timeout.ns_deploy
2408
2409 nslcmop_operation_state = None
2410 db_vnfrs = {} # vnf's info indexed by member-index
2411 # n2vc_info = {}
2412 tasks_dict_info = {} # from task to info text
2413 exc = None
2414 error_list = []
2415 stage = [
2416 "Stage 1/5: preparation of the environment.",
2417 "Waiting for previous operations to terminate.",
2418 "",
2419 ]
2420 # ^ stage, step, VIM progress
2421 try:
2422 # wait for any previous tasks in process
2423 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2424
2425 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2426 stage[1] = "Reading from database."
2427 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2428 db_nsr_update["detailed-status"] = "creating"
2429 db_nsr_update["operational-status"] = "init"
2430 self._write_ns_status(
2431 nsr_id=nsr_id,
2432 ns_state="BUILDING",
2433 current_operation="INSTANTIATING",
2434 current_operation_id=nslcmop_id,
2435 other_update=db_nsr_update,
2436 )
2437 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2438
2439 # read from db: operation
2440 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2441 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2442 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2443 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2444 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2445 )
2446 ns_params = db_nslcmop.get("operationParams")
2447 if ns_params and ns_params.get("timeout_ns_deploy"):
2448 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2449
2450 # read from db: ns
2451 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2452 self.logger.debug(logging_text + stage[1])
2453 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2454 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2455 self.logger.debug(logging_text + stage[1])
2456 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2457 self.fs.sync(db_nsr["nsd-id"])
2458 db_nsr["nsd"] = nsd
2459 # nsr_name = db_nsr["name"] # TODO short-name??
2460
2461 # read from db: vnf's of this ns
2462 stage[1] = "Getting vnfrs from db."
2463 self.logger.debug(logging_text + stage[1])
2464 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2465
2466 # read from db: vnfd's for every vnf
2467 db_vnfds = [] # every vnfd data
2468
2469 # for each vnf in ns, read vnfd
2470 for vnfr in db_vnfrs_list:
2471 if vnfr.get("kdur"):
2472 kdur_list = []
2473 for kdur in vnfr["kdur"]:
2474 if kdur.get("additionalParams"):
2475 kdur["additionalParams"] = json.loads(
2476 kdur["additionalParams"]
2477 )
2478 kdur_list.append(kdur)
2479 vnfr["kdur"] = kdur_list
2480
2481 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2482 vnfd_id = vnfr["vnfd-id"]
2483 vnfd_ref = vnfr["vnfd-ref"]
2484 self.fs.sync(vnfd_id)
2485
2486 # if we haven't this vnfd, read it from db
2487 if vnfd_id not in db_vnfds:
2488 # read from db
2489 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2490 vnfd_id, vnfd_ref
2491 )
2492 self.logger.debug(logging_text + stage[1])
2493 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2494
2495 # store vnfd
2496 db_vnfds.append(vnfd)
2497
2498 # Get or generates the _admin.deployed.VCA list
2499 vca_deployed_list = None
2500 if db_nsr["_admin"].get("deployed"):
2501 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2502 if vca_deployed_list is None:
2503 vca_deployed_list = []
2504 configuration_status_list = []
2505 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2506 db_nsr_update["configurationStatus"] = configuration_status_list
2507 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2508 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2509 elif isinstance(vca_deployed_list, dict):
2510 # maintain backward compatibility. Change a dict to list at database
2511 vca_deployed_list = list(vca_deployed_list.values())
2512 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2513 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2514
2515 if not isinstance(
2516 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2517 ):
2518 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2519 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2520
2521 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2522 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2523 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2524 self.db.set_list(
2525 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2526 )
2527
2528 # n2vc_redesign STEP 2 Deploy Network Scenario
2529 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2530 self._write_op_status(op_id=nslcmop_id, stage=stage)
2531
2532 stage[1] = "Deploying KDUs."
2533 # self.logger.debug(logging_text + "Before deploy_kdus")
2534 # Call to deploy_kdus in case exists the "vdu:kdu" param
2535 await self.deploy_kdus(
2536 logging_text=logging_text,
2537 nsr_id=nsr_id,
2538 nslcmop_id=nslcmop_id,
2539 db_vnfrs=db_vnfrs,
2540 db_vnfds=db_vnfds,
2541 task_instantiation_info=tasks_dict_info,
2542 )
2543
2544 stage[1] = "Getting VCA public key."
2545 # n2vc_redesign STEP 1 Get VCA public ssh-key
2546 # feature 1429. Add n2vc public key to needed VMs
2547 n2vc_key = self.n2vc.get_public_key()
2548 n2vc_key_list = [n2vc_key]
2549 if self.vca_config.public_key:
2550 n2vc_key_list.append(self.vca_config.public_key)
2551
2552 stage[1] = "Deploying NS at VIM."
2553 task_ro = asyncio.ensure_future(
2554 self.instantiate_RO(
2555 logging_text=logging_text,
2556 nsr_id=nsr_id,
2557 nsd=nsd,
2558 db_nsr=db_nsr,
2559 db_nslcmop=db_nslcmop,
2560 db_vnfrs=db_vnfrs,
2561 db_vnfds=db_vnfds,
2562 n2vc_key_list=n2vc_key_list,
2563 stage=stage,
2564 )
2565 )
2566 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2567 tasks_dict_info[task_ro] = "Deploying at VIM"
2568
2569 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2570 stage[1] = "Deploying Execution Environments."
2571 self.logger.debug(logging_text + stage[1])
2572
2573 # create namespace and certificate if any helm based EE is present in the NS
2574 if check_helm_ee_in_ns(db_vnfds):
2575 # TODO: create EE namespace
2576 # create TLS certificates
2577 await self.vca_map["helm-v3"].create_tls_certificate(
2578 secret_name="ee-tls-{}".format(nsr_id),
2579 dns_prefix="*",
2580 nsr_id=nsr_id,
2581 usage="server auth",
2582 )
2583
2584 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2585 for vnf_profile in get_vnf_profiles(nsd):
2586 vnfd_id = vnf_profile["vnfd-id"]
2587 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2588 member_vnf_index = str(vnf_profile["id"])
2589 db_vnfr = db_vnfrs[member_vnf_index]
2590 base_folder = vnfd["_admin"]["storage"]
2591 vdu_id = None
2592 vdu_index = 0
2593 vdu_name = None
2594 kdu_name = None
2595
2596 # Get additional parameters
2597 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2598 if db_vnfr.get("additionalParamsForVnf"):
2599 deploy_params.update(
2600 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2601 )
2602
2603 descriptor_config = get_configuration(vnfd, vnfd["id"])
2604 if descriptor_config:
2605 self._deploy_n2vc(
2606 logging_text=logging_text
2607 + "member_vnf_index={} ".format(member_vnf_index),
2608 db_nsr=db_nsr,
2609 db_vnfr=db_vnfr,
2610 nslcmop_id=nslcmop_id,
2611 nsr_id=nsr_id,
2612 nsi_id=nsi_id,
2613 vnfd_id=vnfd_id,
2614 vdu_id=vdu_id,
2615 kdu_name=kdu_name,
2616 member_vnf_index=member_vnf_index,
2617 vdu_index=vdu_index,
2618 vdu_name=vdu_name,
2619 deploy_params=deploy_params,
2620 descriptor_config=descriptor_config,
2621 base_folder=base_folder,
2622 task_instantiation_info=tasks_dict_info,
2623 stage=stage,
2624 )
2625
2626 # Deploy charms for each VDU that supports one.
2627 for vdud in get_vdu_list(vnfd):
2628 vdu_id = vdud["id"]
2629 descriptor_config = get_configuration(vnfd, vdu_id)
2630 vdur = find_in_list(
2631 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2632 )
2633
2634 if vdur.get("additionalParams"):
2635 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2636 else:
2637 deploy_params_vdu = deploy_params
2638 deploy_params_vdu["OSM"] = get_osm_params(
2639 db_vnfr, vdu_id, vdu_count_index=0
2640 )
2641 vdud_count = get_number_of_instances(vnfd, vdu_id)
2642
2643 self.logger.debug("VDUD > {}".format(vdud))
2644 self.logger.debug(
2645 "Descriptor config > {}".format(descriptor_config)
2646 )
2647 if descriptor_config:
2648 vdu_name = None
2649 kdu_name = None
2650 for vdu_index in range(vdud_count):
2651 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2652 self._deploy_n2vc(
2653 logging_text=logging_text
2654 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2655 member_vnf_index, vdu_id, vdu_index
2656 ),
2657 db_nsr=db_nsr,
2658 db_vnfr=db_vnfr,
2659 nslcmop_id=nslcmop_id,
2660 nsr_id=nsr_id,
2661 nsi_id=nsi_id,
2662 vnfd_id=vnfd_id,
2663 vdu_id=vdu_id,
2664 kdu_name=kdu_name,
2665 member_vnf_index=member_vnf_index,
2666 vdu_index=vdu_index,
2667 vdu_name=vdu_name,
2668 deploy_params=deploy_params_vdu,
2669 descriptor_config=descriptor_config,
2670 base_folder=base_folder,
2671 task_instantiation_info=tasks_dict_info,
2672 stage=stage,
2673 )
2674 for kdud in get_kdu_list(vnfd):
2675 kdu_name = kdud["name"]
2676 descriptor_config = get_configuration(vnfd, kdu_name)
2677 if descriptor_config:
2678 vdu_id = None
2679 vdu_index = 0
2680 vdu_name = None
2681 kdur = next(
2682 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2683 )
2684 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2685 if kdur.get("additionalParams"):
2686 deploy_params_kdu.update(
2687 parse_yaml_strings(kdur["additionalParams"].copy())
2688 )
2689
2690 self._deploy_n2vc(
2691 logging_text=logging_text,
2692 db_nsr=db_nsr,
2693 db_vnfr=db_vnfr,
2694 nslcmop_id=nslcmop_id,
2695 nsr_id=nsr_id,
2696 nsi_id=nsi_id,
2697 vnfd_id=vnfd_id,
2698 vdu_id=vdu_id,
2699 kdu_name=kdu_name,
2700 member_vnf_index=member_vnf_index,
2701 vdu_index=vdu_index,
2702 vdu_name=vdu_name,
2703 deploy_params=deploy_params_kdu,
2704 descriptor_config=descriptor_config,
2705 base_folder=base_folder,
2706 task_instantiation_info=tasks_dict_info,
2707 stage=stage,
2708 )
2709
2710 # Check if this NS has a charm configuration
2711 descriptor_config = nsd.get("ns-configuration")
2712 if descriptor_config and descriptor_config.get("juju"):
2713 vnfd_id = None
2714 db_vnfr = None
2715 member_vnf_index = None
2716 vdu_id = None
2717 kdu_name = None
2718 vdu_index = 0
2719 vdu_name = None
2720
2721 # Get additional parameters
2722 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2723 if db_nsr.get("additionalParamsForNs"):
2724 deploy_params.update(
2725 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2726 )
2727 base_folder = nsd["_admin"]["storage"]
2728 self._deploy_n2vc(
2729 logging_text=logging_text,
2730 db_nsr=db_nsr,
2731 db_vnfr=db_vnfr,
2732 nslcmop_id=nslcmop_id,
2733 nsr_id=nsr_id,
2734 nsi_id=nsi_id,
2735 vnfd_id=vnfd_id,
2736 vdu_id=vdu_id,
2737 kdu_name=kdu_name,
2738 member_vnf_index=member_vnf_index,
2739 vdu_index=vdu_index,
2740 vdu_name=vdu_name,
2741 deploy_params=deploy_params,
2742 descriptor_config=descriptor_config,
2743 base_folder=base_folder,
2744 task_instantiation_info=tasks_dict_info,
2745 stage=stage,
2746 )
2747
2748 # rest of staff will be done at finally
2749
2750 except (
2751 ROclient.ROClientException,
2752 DbException,
2753 LcmException,
2754 N2VCException,
2755 ) as e:
2756 self.logger.error(
2757 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2758 )
2759 exc = e
2760 except asyncio.CancelledError:
2761 self.logger.error(
2762 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2763 )
2764 exc = "Operation was cancelled"
2765 except Exception as e:
2766 exc = traceback.format_exc()
2767 self.logger.critical(
2768 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2769 exc_info=True,
2770 )
2771 finally:
2772 if exc:
2773 error_list.append(str(exc))
2774 try:
2775 # wait for pending tasks
2776 if tasks_dict_info:
2777 stage[1] = "Waiting for instantiate pending tasks."
2778 self.logger.debug(logging_text + stage[1])
2779 error_list += await self._wait_for_tasks(
2780 logging_text,
2781 tasks_dict_info,
2782 timeout_ns_deploy,
2783 stage,
2784 nslcmop_id,
2785 nsr_id=nsr_id,
2786 )
2787 stage[1] = stage[2] = ""
2788 except asyncio.CancelledError:
2789 error_list.append("Cancelled")
2790 # TODO cancel all tasks
2791 except Exception as exc:
2792 error_list.append(str(exc))
2793
2794 # update operation-status
2795 db_nsr_update["operational-status"] = "running"
2796 # let's begin with VCA 'configured' status (later we can change it)
2797 db_nsr_update["config-status"] = "configured"
2798 for task, task_name in tasks_dict_info.items():
2799 if not task.done() or task.cancelled() or task.exception():
2800 if task_name.startswith(self.task_name_deploy_vca):
2801 # A N2VC task is pending
2802 db_nsr_update["config-status"] = "failed"
2803 else:
2804 # RO or KDU task is pending
2805 db_nsr_update["operational-status"] = "failed"
2806
2807 # update status at database
2808 if error_list:
2809 error_detail = ". ".join(error_list)
2810 self.logger.error(logging_text + error_detail)
2811 error_description_nslcmop = "{} Detail: {}".format(
2812 stage[0], error_detail
2813 )
2814 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2815 nslcmop_id, stage[0]
2816 )
2817
2818 db_nsr_update["detailed-status"] = (
2819 error_description_nsr + " Detail: " + error_detail
2820 )
2821 db_nslcmop_update["detailed-status"] = error_detail
2822 nslcmop_operation_state = "FAILED"
2823 ns_state = "BROKEN"
2824 else:
2825 error_detail = None
2826 error_description_nsr = error_description_nslcmop = None
2827 ns_state = "READY"
2828 db_nsr_update["detailed-status"] = "Done"
2829 db_nslcmop_update["detailed-status"] = "Done"
2830 nslcmop_operation_state = "COMPLETED"
2831
2832 if db_nsr:
2833 self._write_ns_status(
2834 nsr_id=nsr_id,
2835 ns_state=ns_state,
2836 current_operation="IDLE",
2837 current_operation_id=None,
2838 error_description=error_description_nsr,
2839 error_detail=error_detail,
2840 other_update=db_nsr_update,
2841 )
2842 self._write_op_status(
2843 op_id=nslcmop_id,
2844 stage="",
2845 error_message=error_description_nslcmop,
2846 operation_state=nslcmop_operation_state,
2847 other_update=db_nslcmop_update,
2848 )
2849
2850 if nslcmop_operation_state:
2851 try:
2852 await self.msg.aiowrite(
2853 "ns",
2854 "instantiated",
2855 {
2856 "nsr_id": nsr_id,
2857 "nslcmop_id": nslcmop_id,
2858 "operationState": nslcmop_operation_state,
2859 },
2860 loop=self.loop,
2861 )
2862 except Exception as e:
2863 self.logger.error(
2864 logging_text + "kafka_write notification Exception {}".format(e)
2865 )
2866
2867 self.logger.debug(logging_text + "Exit")
2868 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2869
2870 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2871 if vnfd_id not in cached_vnfds:
2872 cached_vnfds[vnfd_id] = self.db.get_one(
2873 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2874 )
2875 return cached_vnfds[vnfd_id]
2876
2877 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2878 if vnf_profile_id not in cached_vnfrs:
2879 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2880 "vnfrs",
2881 {
2882 "member-vnf-index-ref": vnf_profile_id,
2883 "nsr-id-ref": nsr_id,
2884 },
2885 )
2886 return cached_vnfrs[vnf_profile_id]
2887
2888 def _is_deployed_vca_in_relation(
2889 self, vca: DeployedVCA, relation: Relation
2890 ) -> bool:
2891 found = False
2892 for endpoint in (relation.provider, relation.requirer):
2893 if endpoint["kdu-resource-profile-id"]:
2894 continue
2895 found = (
2896 vca.vnf_profile_id == endpoint.vnf_profile_id
2897 and vca.vdu_profile_id == endpoint.vdu_profile_id
2898 and vca.execution_environment_ref == endpoint.execution_environment_ref
2899 )
2900 if found:
2901 break
2902 return found
2903
2904 def _update_ee_relation_data_with_implicit_data(
2905 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2906 ):
2907 ee_relation_data = safe_get_ee_relation(
2908 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2909 )
2910 ee_relation_level = EELevel.get_level(ee_relation_data)
2911 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2912 "execution-environment-ref"
2913 ]:
2914 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2915 vnfd_id = vnf_profile["vnfd-id"]
2916 project = nsd["_admin"]["projects_read"][0]
2917 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2918 entity_id = (
2919 vnfd_id
2920 if ee_relation_level == EELevel.VNF
2921 else ee_relation_data["vdu-profile-id"]
2922 )
2923 ee = get_juju_ee_ref(db_vnfd, entity_id)
2924 if not ee:
2925 raise Exception(
2926 f"not execution environments found for ee_relation {ee_relation_data}"
2927 )
2928 ee_relation_data["execution-environment-ref"] = ee["id"]
2929 return ee_relation_data
2930
2931 def _get_ns_relations(
2932 self,
2933 nsr_id: str,
2934 nsd: Dict[str, Any],
2935 vca: DeployedVCA,
2936 cached_vnfds: Dict[str, Any],
2937 ) -> List[Relation]:
2938 relations = []
2939 db_ns_relations = get_ns_configuration_relation_list(nsd)
2940 for r in db_ns_relations:
2941 provider_dict = None
2942 requirer_dict = None
2943 if all(key in r for key in ("provider", "requirer")):
2944 provider_dict = r["provider"]
2945 requirer_dict = r["requirer"]
2946 elif "entities" in r:
2947 provider_id = r["entities"][0]["id"]
2948 provider_dict = {
2949 "nsr-id": nsr_id,
2950 "endpoint": r["entities"][0]["endpoint"],
2951 }
2952 if provider_id != nsd["id"]:
2953 provider_dict["vnf-profile-id"] = provider_id
2954 requirer_id = r["entities"][1]["id"]
2955 requirer_dict = {
2956 "nsr-id": nsr_id,
2957 "endpoint": r["entities"][1]["endpoint"],
2958 }
2959 if requirer_id != nsd["id"]:
2960 requirer_dict["vnf-profile-id"] = requirer_id
2961 else:
2962 raise Exception(
2963 "provider/requirer or entities must be included in the relation."
2964 )
2965 relation_provider = self._update_ee_relation_data_with_implicit_data(
2966 nsr_id, nsd, provider_dict, cached_vnfds
2967 )
2968 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2969 nsr_id, nsd, requirer_dict, cached_vnfds
2970 )
2971 provider = EERelation(relation_provider)
2972 requirer = EERelation(relation_requirer)
2973 relation = Relation(r["name"], provider, requirer)
2974 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2975 if vca_in_relation:
2976 relations.append(relation)
2977 return relations
2978
2979 def _get_vnf_relations(
2980 self,
2981 nsr_id: str,
2982 nsd: Dict[str, Any],
2983 vca: DeployedVCA,
2984 cached_vnfds: Dict[str, Any],
2985 ) -> List[Relation]:
2986 relations = []
2987 if vca.target_element == "ns":
2988 self.logger.debug("VCA is a NS charm, not a VNF.")
2989 return relations
2990 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2991 vnf_profile_id = vnf_profile["id"]
2992 vnfd_id = vnf_profile["vnfd-id"]
2993 project = nsd["_admin"]["projects_read"][0]
2994 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2995 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2996 for r in db_vnf_relations:
2997 provider_dict = None
2998 requirer_dict = None
2999 if all(key in r for key in ("provider", "requirer")):
3000 provider_dict = r["provider"]
3001 requirer_dict = r["requirer"]
3002 elif "entities" in r:
3003 provider_id = r["entities"][0]["id"]
3004 provider_dict = {
3005 "nsr-id": nsr_id,
3006 "vnf-profile-id": vnf_profile_id,
3007 "endpoint": r["entities"][0]["endpoint"],
3008 }
3009 if provider_id != vnfd_id:
3010 provider_dict["vdu-profile-id"] = provider_id
3011 requirer_id = r["entities"][1]["id"]
3012 requirer_dict = {
3013 "nsr-id": nsr_id,
3014 "vnf-profile-id": vnf_profile_id,
3015 "endpoint": r["entities"][1]["endpoint"],
3016 }
3017 if requirer_id != vnfd_id:
3018 requirer_dict["vdu-profile-id"] = requirer_id
3019 else:
3020 raise Exception(
3021 "provider/requirer or entities must be included in the relation."
3022 )
3023 relation_provider = self._update_ee_relation_data_with_implicit_data(
3024 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3025 )
3026 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3027 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3028 )
3029 provider = EERelation(relation_provider)
3030 requirer = EERelation(relation_requirer)
3031 relation = Relation(r["name"], provider, requirer)
3032 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3033 if vca_in_relation:
3034 relations.append(relation)
3035 return relations
3036
3037 def _get_kdu_resource_data(
3038 self,
3039 ee_relation: EERelation,
3040 db_nsr: Dict[str, Any],
3041 cached_vnfds: Dict[str, Any],
3042 ) -> DeployedK8sResource:
3043 nsd = get_nsd(db_nsr)
3044 vnf_profiles = get_vnf_profiles(nsd)
3045 vnfd_id = find_in_list(
3046 vnf_profiles,
3047 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3048 )["vnfd-id"]
3049 project = nsd["_admin"]["projects_read"][0]
3050 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3051 kdu_resource_profile = get_kdu_resource_profile(
3052 db_vnfd, ee_relation.kdu_resource_profile_id
3053 )
3054 kdu_name = kdu_resource_profile["kdu-name"]
3055 deployed_kdu, _ = get_deployed_kdu(
3056 db_nsr.get("_admin", ()).get("deployed", ()),
3057 kdu_name,
3058 ee_relation.vnf_profile_id,
3059 )
3060 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3061 return deployed_kdu
3062
3063 def _get_deployed_component(
3064 self,
3065 ee_relation: EERelation,
3066 db_nsr: Dict[str, Any],
3067 cached_vnfds: Dict[str, Any],
3068 ) -> DeployedComponent:
3069 nsr_id = db_nsr["_id"]
3070 deployed_component = None
3071 ee_level = EELevel.get_level(ee_relation)
3072 if ee_level == EELevel.NS:
3073 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3074 if vca:
3075 deployed_component = DeployedVCA(nsr_id, vca)
3076 elif ee_level == EELevel.VNF:
3077 vca = get_deployed_vca(
3078 db_nsr,
3079 {
3080 "vdu_id": None,
3081 "member-vnf-index": ee_relation.vnf_profile_id,
3082 "ee_descriptor_id": ee_relation.execution_environment_ref,
3083 },
3084 )
3085 if vca:
3086 deployed_component = DeployedVCA(nsr_id, vca)
3087 elif ee_level == EELevel.VDU:
3088 vca = get_deployed_vca(
3089 db_nsr,
3090 {
3091 "vdu_id": ee_relation.vdu_profile_id,
3092 "member-vnf-index": ee_relation.vnf_profile_id,
3093 "ee_descriptor_id": ee_relation.execution_environment_ref,
3094 },
3095 )
3096 if vca:
3097 deployed_component = DeployedVCA(nsr_id, vca)
3098 elif ee_level == EELevel.KDU:
3099 kdu_resource_data = self._get_kdu_resource_data(
3100 ee_relation, db_nsr, cached_vnfds
3101 )
3102 if kdu_resource_data:
3103 deployed_component = DeployedK8sResource(kdu_resource_data)
3104 return deployed_component
3105
3106 async def _add_relation(
3107 self,
3108 relation: Relation,
3109 vca_type: str,
3110 db_nsr: Dict[str, Any],
3111 cached_vnfds: Dict[str, Any],
3112 cached_vnfrs: Dict[str, Any],
3113 ) -> bool:
3114 deployed_provider = self._get_deployed_component(
3115 relation.provider, db_nsr, cached_vnfds
3116 )
3117 deployed_requirer = self._get_deployed_component(
3118 relation.requirer, db_nsr, cached_vnfds
3119 )
3120 if (
3121 deployed_provider
3122 and deployed_requirer
3123 and deployed_provider.config_sw_installed
3124 and deployed_requirer.config_sw_installed
3125 ):
3126 provider_db_vnfr = (
3127 self._get_vnfr(
3128 relation.provider.nsr_id,
3129 relation.provider.vnf_profile_id,
3130 cached_vnfrs,
3131 )
3132 if relation.provider.vnf_profile_id
3133 else None
3134 )
3135 requirer_db_vnfr = (
3136 self._get_vnfr(
3137 relation.requirer.nsr_id,
3138 relation.requirer.vnf_profile_id,
3139 cached_vnfrs,
3140 )
3141 if relation.requirer.vnf_profile_id
3142 else None
3143 )
3144 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3145 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3146 provider_relation_endpoint = RelationEndpoint(
3147 deployed_provider.ee_id,
3148 provider_vca_id,
3149 relation.provider.endpoint,
3150 )
3151 requirer_relation_endpoint = RelationEndpoint(
3152 deployed_requirer.ee_id,
3153 requirer_vca_id,
3154 relation.requirer.endpoint,
3155 )
3156 try:
3157 await self.vca_map[vca_type].add_relation(
3158 provider=provider_relation_endpoint,
3159 requirer=requirer_relation_endpoint,
3160 )
3161 except N2VCException as exception:
3162 self.logger.error(exception)
3163 raise LcmException(exception)
3164 return True
3165 return False
3166
3167 async def _add_vca_relations(
3168 self,
3169 logging_text,
3170 nsr_id,
3171 vca_type: str,
3172 vca_index: int,
3173 timeout: int = 3600,
3174 ) -> bool:
3175
3176 # steps:
3177 # 1. find all relations for this VCA
3178 # 2. wait for other peers related
3179 # 3. add relations
3180
3181 try:
3182 # STEP 1: find all relations for this VCA
3183
3184 # read nsr record
3185 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3186 nsd = get_nsd(db_nsr)
3187
3188 # this VCA data
3189 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3190 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3191
3192 cached_vnfds = {}
3193 cached_vnfrs = {}
3194 relations = []
3195 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3196 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3197
3198 # if no relations, terminate
3199 if not relations:
3200 self.logger.debug(logging_text + " No relations")
3201 return True
3202
3203 self.logger.debug(logging_text + " adding relations {}".format(relations))
3204
3205 # add all relations
3206 start = time()
3207 while True:
3208 # check timeout
3209 now = time()
3210 if now - start >= timeout:
3211 self.logger.error(logging_text + " : timeout adding relations")
3212 return False
3213
3214 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3215 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3216
3217 # for each relation, find the VCA's related
3218 for relation in relations.copy():
3219 added = await self._add_relation(
3220 relation,
3221 vca_type,
3222 db_nsr,
3223 cached_vnfds,
3224 cached_vnfrs,
3225 )
3226 if added:
3227 relations.remove(relation)
3228
3229 if not relations:
3230 self.logger.debug("Relations added")
3231 break
3232 await asyncio.sleep(5.0)
3233
3234 return True
3235
3236 except Exception as e:
3237 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3238 return False
3239
3240 async def _install_kdu(
3241 self,
3242 nsr_id: str,
3243 nsr_db_path: str,
3244 vnfr_data: dict,
3245 kdu_index: int,
3246 kdud: dict,
3247 vnfd: dict,
3248 k8s_instance_info: dict,
3249 k8params: dict = None,
3250 timeout: int = 600,
3251 vca_id: str = None,
3252 ):
3253
3254 try:
3255 k8sclustertype = k8s_instance_info["k8scluster-type"]
3256 # Instantiate kdu
3257 db_dict_install = {
3258 "collection": "nsrs",
3259 "filter": {"_id": nsr_id},
3260 "path": nsr_db_path,
3261 }
3262
3263 if k8s_instance_info.get("kdu-deployment-name"):
3264 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3265 else:
3266 kdu_instance = self.k8scluster_map[
3267 k8sclustertype
3268 ].generate_kdu_instance_name(
3269 db_dict=db_dict_install,
3270 kdu_model=k8s_instance_info["kdu-model"],
3271 kdu_name=k8s_instance_info["kdu-name"],
3272 )
3273
3274 # Update the nsrs table with the kdu-instance value
3275 self.update_db_2(
3276 item="nsrs",
3277 _id=nsr_id,
3278 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3279 )
3280
3281 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3282 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3283 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3284 # namespace, this first verification could be removed, and the next step would be done for any kind
3285 # of KNF.
3286 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3287 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3288 if k8sclustertype in ("juju", "juju-bundle"):
3289 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3290 # that the user passed a namespace which he wants its KDU to be deployed in)
3291 if (
3292 self.db.count(
3293 table="nsrs",
3294 q_filter={
3295 "_id": nsr_id,
3296 "_admin.projects_write": k8s_instance_info["namespace"],
3297 "_admin.projects_read": k8s_instance_info["namespace"],
3298 },
3299 )
3300 > 0
3301 ):
3302 self.logger.debug(
3303 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3304 )
3305 self.update_db_2(
3306 item="nsrs",
3307 _id=nsr_id,
3308 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3309 )
3310 k8s_instance_info["namespace"] = kdu_instance
3311
3312 await self.k8scluster_map[k8sclustertype].install(
3313 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3314 kdu_model=k8s_instance_info["kdu-model"],
3315 atomic=True,
3316 params=k8params,
3317 db_dict=db_dict_install,
3318 timeout=timeout,
3319 kdu_name=k8s_instance_info["kdu-name"],
3320 namespace=k8s_instance_info["namespace"],
3321 kdu_instance=kdu_instance,
3322 vca_id=vca_id,
3323 )
3324
3325 # Obtain services to obtain management service ip
3326 services = await self.k8scluster_map[k8sclustertype].get_services(
3327 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3328 kdu_instance=kdu_instance,
3329 namespace=k8s_instance_info["namespace"],
3330 )
3331
3332 # Obtain management service info (if exists)
3333 vnfr_update_dict = {}
3334 kdu_config = get_configuration(vnfd, kdud["name"])
3335 if kdu_config:
3336 target_ee_list = kdu_config.get("execution-environment-list", [])
3337 else:
3338 target_ee_list = []
3339
3340 if services:
3341 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3342 mgmt_services = [
3343 service
3344 for service in kdud.get("service", [])
3345 if service.get("mgmt-service")
3346 ]
3347 for mgmt_service in mgmt_services:
3348 for service in services:
3349 if service["name"].startswith(mgmt_service["name"]):
3350 # Mgmt service found, Obtain service ip
3351 ip = service.get("external_ip", service.get("cluster_ip"))
3352 if isinstance(ip, list) and len(ip) == 1:
3353 ip = ip[0]
3354
3355 vnfr_update_dict[
3356 "kdur.{}.ip-address".format(kdu_index)
3357 ] = ip
3358
3359 # Check if must update also mgmt ip at the vnf
3360 service_external_cp = mgmt_service.get(
3361 "external-connection-point-ref"
3362 )
3363 if service_external_cp:
3364 if (
3365 deep_get(vnfd, ("mgmt-interface", "cp"))
3366 == service_external_cp
3367 ):
3368 vnfr_update_dict["ip-address"] = ip
3369
3370 if find_in_list(
3371 target_ee_list,
3372 lambda ee: ee.get(
3373 "external-connection-point-ref", ""
3374 )
3375 == service_external_cp,
3376 ):
3377 vnfr_update_dict[
3378 "kdur.{}.ip-address".format(kdu_index)
3379 ] = ip
3380 break
3381 else:
3382 self.logger.warn(
3383 "Mgmt service name: {} not found".format(
3384 mgmt_service["name"]
3385 )
3386 )
3387
3388 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3389 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3390
3391 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3392 if (
3393 kdu_config
3394 and kdu_config.get("initial-config-primitive")
3395 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3396 ):
3397 initial_config_primitive_list = kdu_config.get(
3398 "initial-config-primitive"
3399 )
3400 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3401
3402 for initial_config_primitive in initial_config_primitive_list:
3403 primitive_params_ = self._map_primitive_params(
3404 initial_config_primitive, {}, {}
3405 )
3406
3407 await asyncio.wait_for(
3408 self.k8scluster_map[k8sclustertype].exec_primitive(
3409 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3410 kdu_instance=kdu_instance,
3411 primitive_name=initial_config_primitive["name"],
3412 params=primitive_params_,
3413 db_dict=db_dict_install,
3414 vca_id=vca_id,
3415 ),
3416 timeout=timeout,
3417 )
3418
3419 except Exception as e:
3420 # Prepare update db with error and raise exception
3421 try:
3422 self.update_db_2(
3423 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3424 )
3425 self.update_db_2(
3426 "vnfrs",
3427 vnfr_data.get("_id"),
3428 {"kdur.{}.status".format(kdu_index): "ERROR"},
3429 )
3430 except Exception:
3431 # ignore to keep original exception
3432 pass
3433 # reraise original error
3434 raise
3435
3436 return kdu_instance
3437
3438 async def deploy_kdus(
3439 self,
3440 logging_text,
3441 nsr_id,
3442 nslcmop_id,
3443 db_vnfrs,
3444 db_vnfds,
3445 task_instantiation_info,
3446 ):
3447 # Launch kdus if present in the descriptor
3448
3449 k8scluster_id_2_uuic = {
3450 "helm-chart-v3": {},
3451 "helm-chart": {},
3452 "juju-bundle": {},
3453 }
3454
3455 async def _get_cluster_id(cluster_id, cluster_type):
3456 nonlocal k8scluster_id_2_uuic
3457 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3458 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3459
3460 # check if K8scluster is creating and wait look if previous tasks in process
3461 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3462 "k8scluster", cluster_id
3463 )
3464 if task_dependency:
3465 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3466 task_name, cluster_id
3467 )
3468 self.logger.debug(logging_text + text)
3469 await asyncio.wait(task_dependency, timeout=3600)
3470
3471 db_k8scluster = self.db.get_one(
3472 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3473 )
3474 if not db_k8scluster:
3475 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3476
3477 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3478 if not k8s_id:
3479 if cluster_type == "helm-chart-v3":
3480 try:
3481 # backward compatibility for existing clusters that have not been initialized for helm v3
3482 k8s_credentials = yaml.safe_dump(
3483 db_k8scluster.get("credentials")
3484 )
3485 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3486 k8s_credentials, reuse_cluster_uuid=cluster_id
3487 )
3488 db_k8scluster_update = {}
3489 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3490 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3491 db_k8scluster_update[
3492 "_admin.helm-chart-v3.created"
3493 ] = uninstall_sw
3494 db_k8scluster_update[
3495 "_admin.helm-chart-v3.operationalState"
3496 ] = "ENABLED"
3497 self.update_db_2(
3498 "k8sclusters", cluster_id, db_k8scluster_update
3499 )
3500 except Exception as e:
3501 self.logger.error(
3502 logging_text
3503 + "error initializing helm-v3 cluster: {}".format(str(e))
3504 )
3505 raise LcmException(
3506 "K8s cluster '{}' has not been initialized for '{}'".format(
3507 cluster_id, cluster_type
3508 )
3509 )
3510 else:
3511 raise LcmException(
3512 "K8s cluster '{}' has not been initialized for '{}'".format(
3513 cluster_id, cluster_type
3514 )
3515 )
3516 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3517 return k8s_id
3518
3519 logging_text += "Deploy kdus: "
3520 step = ""
3521 try:
3522 db_nsr_update = {"_admin.deployed.K8s": []}
3523 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3524
3525 index = 0
3526 updated_cluster_list = []
3527 updated_v3_cluster_list = []
3528
3529 for vnfr_data in db_vnfrs.values():
3530 vca_id = self.get_vca_id(vnfr_data, {})
3531 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3532 # Step 0: Prepare and set parameters
3533 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3534 vnfd_id = vnfr_data.get("vnfd-id")
3535 vnfd_with_id = find_in_list(
3536 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3537 )
3538 kdud = next(
3539 kdud
3540 for kdud in vnfd_with_id["kdu"]
3541 if kdud["name"] == kdur["kdu-name"]
3542 )
3543 namespace = kdur.get("k8s-namespace")
3544 kdu_deployment_name = kdur.get("kdu-deployment-name")
3545 if kdur.get("helm-chart"):
3546 kdumodel = kdur["helm-chart"]
3547 # Default version: helm3, if helm-version is v2 assign v2
3548 k8sclustertype = "helm-chart-v3"
3549 self.logger.debug("kdur: {}".format(kdur))
3550 if (
3551 kdur.get("helm-version")
3552 and kdur.get("helm-version") == "v2"
3553 ):
3554 k8sclustertype = "helm-chart"
3555 elif kdur.get("juju-bundle"):
3556 kdumodel = kdur["juju-bundle"]
3557 k8sclustertype = "juju-bundle"
3558 else:
3559 raise LcmException(
3560 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3561 "juju-bundle. Maybe an old NBI version is running".format(
3562 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3563 )
3564 )
3565 # check if kdumodel is a file and exists
3566 try:
3567 vnfd_with_id = find_in_list(
3568 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3569 )
3570 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3571 if storage: # may be not present if vnfd has not artifacts
3572 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3573 if storage["pkg-dir"]:
3574 filename = "{}/{}/{}s/{}".format(
3575 storage["folder"],
3576 storage["pkg-dir"],
3577 k8sclustertype,
3578 kdumodel,
3579 )
3580 else:
3581 filename = "{}/Scripts/{}s/{}".format(
3582 storage["folder"],
3583 k8sclustertype,
3584 kdumodel,
3585 )
3586 if self.fs.file_exists(
3587 filename, mode="file"
3588 ) or self.fs.file_exists(filename, mode="dir"):
3589 kdumodel = self.fs.path + filename
3590 except (asyncio.TimeoutError, asyncio.CancelledError):
3591 raise
3592 except Exception: # it is not a file
3593 pass
3594
3595 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3596 step = "Synchronize repos for k8s cluster '{}'".format(
3597 k8s_cluster_id
3598 )
3599 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3600
3601 # Synchronize repos
3602 if (
3603 k8sclustertype == "helm-chart"
3604 and cluster_uuid not in updated_cluster_list
3605 ) or (
3606 k8sclustertype == "helm-chart-v3"
3607 and cluster_uuid not in updated_v3_cluster_list
3608 ):
3609 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3610 self.k8scluster_map[k8sclustertype].synchronize_repos(
3611 cluster_uuid=cluster_uuid
3612 )
3613 )
3614 if del_repo_list or added_repo_dict:
3615 if k8sclustertype == "helm-chart":
3616 unset = {
3617 "_admin.helm_charts_added." + item: None
3618 for item in del_repo_list
3619 }
3620 updated = {
3621 "_admin.helm_charts_added." + item: name
3622 for item, name in added_repo_dict.items()
3623 }
3624 updated_cluster_list.append(cluster_uuid)
3625 elif k8sclustertype == "helm-chart-v3":
3626 unset = {
3627 "_admin.helm_charts_v3_added." + item: None
3628 for item in del_repo_list
3629 }
3630 updated = {
3631 "_admin.helm_charts_v3_added." + item: name
3632 for item, name in added_repo_dict.items()
3633 }
3634 updated_v3_cluster_list.append(cluster_uuid)
3635 self.logger.debug(
3636 logging_text + "repos synchronized on k8s cluster "
3637 "'{}' to_delete: {}, to_add: {}".format(
3638 k8s_cluster_id, del_repo_list, added_repo_dict
3639 )
3640 )
3641 self.db.set_one(
3642 "k8sclusters",
3643 {"_id": k8s_cluster_id},
3644 updated,
3645 unset=unset,
3646 )
3647
3648 # Instantiate kdu
3649 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3650 vnfr_data["member-vnf-index-ref"],
3651 kdur["kdu-name"],
3652 k8s_cluster_id,
3653 )
3654 k8s_instance_info = {
3655 "kdu-instance": None,
3656 "k8scluster-uuid": cluster_uuid,
3657 "k8scluster-type": k8sclustertype,
3658 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3659 "kdu-name": kdur["kdu-name"],
3660 "kdu-model": kdumodel,
3661 "namespace": namespace,
3662 "kdu-deployment-name": kdu_deployment_name,
3663 }
3664 db_path = "_admin.deployed.K8s.{}".format(index)
3665 db_nsr_update[db_path] = k8s_instance_info
3666 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3667 vnfd_with_id = find_in_list(
3668 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3669 )
3670 task = asyncio.ensure_future(
3671 self._install_kdu(
3672 nsr_id,
3673 db_path,
3674 vnfr_data,
3675 kdu_index,
3676 kdud,
3677 vnfd_with_id,
3678 k8s_instance_info,
3679 k8params=desc_params,
3680 timeout=1800,
3681 vca_id=vca_id,
3682 )
3683 )
3684 self.lcm_tasks.register(
3685 "ns",
3686 nsr_id,
3687 nslcmop_id,
3688 "instantiate_KDU-{}".format(index),
3689 task,
3690 )
3691 task_instantiation_info[task] = "Deploying KDU {}".format(
3692 kdur["kdu-name"]
3693 )
3694
3695 index += 1
3696
3697 except (LcmException, asyncio.CancelledError):
3698 raise
3699 except Exception as e:
3700 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3701 if isinstance(e, (N2VCException, DbException)):
3702 self.logger.error(logging_text + msg)
3703 else:
3704 self.logger.critical(logging_text + msg, exc_info=True)
3705 raise LcmException(msg)
3706 finally:
3707 if db_nsr_update:
3708 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3709
3710 def _deploy_n2vc(
3711 self,
3712 logging_text,
3713 db_nsr,
3714 db_vnfr,
3715 nslcmop_id,
3716 nsr_id,
3717 nsi_id,
3718 vnfd_id,
3719 vdu_id,
3720 kdu_name,
3721 member_vnf_index,
3722 vdu_index,
3723 vdu_name,
3724 deploy_params,
3725 descriptor_config,
3726 base_folder,
3727 task_instantiation_info,
3728 stage,
3729 ):
3730 # launch instantiate_N2VC in a asyncio task and register task object
3731 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3732 # if not found, create one entry and update database
3733 # fill db_nsr._admin.deployed.VCA.<index>
3734
3735 self.logger.debug(
3736 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3737 )
3738
3739 charm_name = ""
3740 get_charm_name = False
3741 if "execution-environment-list" in descriptor_config:
3742 ee_list = descriptor_config.get("execution-environment-list", [])
3743 elif "juju" in descriptor_config:
3744 ee_list = [descriptor_config] # ns charms
3745 if "execution-environment-list" not in descriptor_config:
3746 # charm name is only required for ns charms
3747 get_charm_name = True
3748 else: # other types as script are not supported
3749 ee_list = []
3750
3751 for ee_item in ee_list:
3752 self.logger.debug(
3753 logging_text
3754 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3755 ee_item.get("juju"), ee_item.get("helm-chart")
3756 )
3757 )
3758 ee_descriptor_id = ee_item.get("id")
3759 if ee_item.get("juju"):
3760 vca_name = ee_item["juju"].get("charm")
3761 if get_charm_name:
3762 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3763 vca_type = (
3764 "lxc_proxy_charm"
3765 if ee_item["juju"].get("charm") is not None
3766 else "native_charm"
3767 )
3768 if ee_item["juju"].get("cloud") == "k8s":
3769 vca_type = "k8s_proxy_charm"
3770 elif ee_item["juju"].get("proxy") is False:
3771 vca_type = "native_charm"
3772 elif ee_item.get("helm-chart"):
3773 vca_name = ee_item["helm-chart"]
3774 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3775 vca_type = "helm"
3776 else:
3777 vca_type = "helm-v3"
3778 else:
3779 self.logger.debug(
3780 logging_text + "skipping non juju neither charm configuration"
3781 )
3782 continue
3783
3784 vca_index = -1
3785 for vca_index, vca_deployed in enumerate(
3786 db_nsr["_admin"]["deployed"]["VCA"]
3787 ):
3788 if not vca_deployed:
3789 continue
3790 if (
3791 vca_deployed.get("member-vnf-index") == member_vnf_index
3792 and vca_deployed.get("vdu_id") == vdu_id
3793 and vca_deployed.get("kdu_name") == kdu_name
3794 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3795 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3796 ):
3797 break
3798 else:
3799 # not found, create one.
3800 target = (
3801 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3802 )
3803 if vdu_id:
3804 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3805 elif kdu_name:
3806 target += "/kdu/{}".format(kdu_name)
3807 vca_deployed = {
3808 "target_element": target,
3809 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3810 "member-vnf-index": member_vnf_index,
3811 "vdu_id": vdu_id,
3812 "kdu_name": kdu_name,
3813 "vdu_count_index": vdu_index,
3814 "operational-status": "init", # TODO revise
3815 "detailed-status": "", # TODO revise
3816 "step": "initial-deploy", # TODO revise
3817 "vnfd_id": vnfd_id,
3818 "vdu_name": vdu_name,
3819 "type": vca_type,
3820 "ee_descriptor_id": ee_descriptor_id,
3821 "charm_name": charm_name,
3822 }
3823 vca_index += 1
3824
3825 # create VCA and configurationStatus in db
3826 db_dict = {
3827 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3828 "configurationStatus.{}".format(vca_index): dict(),
3829 }
3830 self.update_db_2("nsrs", nsr_id, db_dict)
3831
3832 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3833
3834 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3835 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3836 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3837
3838 # Launch task
3839 task_n2vc = asyncio.ensure_future(
3840 self.instantiate_N2VC(
3841 logging_text=logging_text,
3842 vca_index=vca_index,
3843 nsi_id=nsi_id,
3844 db_nsr=db_nsr,
3845 db_vnfr=db_vnfr,
3846 vdu_id=vdu_id,
3847 kdu_name=kdu_name,
3848 vdu_index=vdu_index,
3849 deploy_params=deploy_params,
3850 config_descriptor=descriptor_config,
3851 base_folder=base_folder,
3852 nslcmop_id=nslcmop_id,
3853 stage=stage,
3854 vca_type=vca_type,
3855 vca_name=vca_name,
3856 ee_config_descriptor=ee_item,
3857 )
3858 )
3859 self.lcm_tasks.register(
3860 "ns",
3861 nsr_id,
3862 nslcmop_id,
3863 "instantiate_N2VC-{}".format(vca_index),
3864 task_n2vc,
3865 )
3866 task_instantiation_info[
3867 task_n2vc
3868 ] = self.task_name_deploy_vca + " {}.{}".format(
3869 member_vnf_index or "", vdu_id or ""
3870 )
3871
3872 @staticmethod
3873 def _create_nslcmop(nsr_id, operation, params):
3874 """
3875 Creates a ns-lcm-opp content to be stored at database.
3876 :param nsr_id: internal id of the instance
3877 :param operation: instantiate, terminate, scale, action, ...
3878 :param params: user parameters for the operation
3879 :return: dictionary following SOL005 format
3880 """
3881 # Raise exception if invalid arguments
3882 if not (nsr_id and operation and params):
3883 raise LcmException(
3884 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3885 )
3886 now = time()
3887 _id = str(uuid4())
3888 nslcmop = {
3889 "id": _id,
3890 "_id": _id,
3891 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3892 "operationState": "PROCESSING",
3893 "statusEnteredTime": now,
3894 "nsInstanceId": nsr_id,
3895 "lcmOperationType": operation,
3896 "startTime": now,
3897 "isAutomaticInvocation": False,
3898 "operationParams": params,
3899 "isCancelPending": False,
3900 "links": {
3901 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3902 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3903 },
3904 }
3905 return nslcmop
3906
3907 def _format_additional_params(self, params):
3908 params = params or {}
3909 for key, value in params.items():
3910 if str(value).startswith("!!yaml "):
3911 params[key] = yaml.safe_load(value[7:])
3912 return params
3913
3914 def _get_terminate_primitive_params(self, seq, vnf_index):
3915 primitive = seq.get("name")
3916 primitive_params = {}
3917 params = {
3918 "member_vnf_index": vnf_index,
3919 "primitive": primitive,
3920 "primitive_params": primitive_params,
3921 }
3922 desc_params = {}
3923 return self._map_primitive_params(seq, params, desc_params)
3924
3925 # sub-operations
3926
3927 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3928 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3929 if op.get("operationState") == "COMPLETED":
3930 # b. Skip sub-operation
3931 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3932 return self.SUBOPERATION_STATUS_SKIP
3933 else:
3934 # c. retry executing sub-operation
3935 # The sub-operation exists, and operationState != 'COMPLETED'
3936 # Update operationState = 'PROCESSING' to indicate a retry.
3937 operationState = "PROCESSING"
3938 detailed_status = "In progress"
3939 self._update_suboperation_status(
3940 db_nslcmop, op_index, operationState, detailed_status
3941 )
3942 # Return the sub-operation index
3943 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3944 # with arguments extracted from the sub-operation
3945 return op_index
3946
3947 # Find a sub-operation where all keys in a matching dictionary must match
3948 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3949 def _find_suboperation(self, db_nslcmop, match):
3950 if db_nslcmop and match:
3951 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3952 for i, op in enumerate(op_list):
3953 if all(op.get(k) == match[k] for k in match):
3954 return i
3955 return self.SUBOPERATION_STATUS_NOT_FOUND
3956
3957 # Update status for a sub-operation given its index
3958 def _update_suboperation_status(
3959 self, db_nslcmop, op_index, operationState, detailed_status
3960 ):
3961 # Update DB for HA tasks
3962 q_filter = {"_id": db_nslcmop["_id"]}
3963 update_dict = {
3964 "_admin.operations.{}.operationState".format(op_index): operationState,
3965 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3966 }
3967 self.db.set_one(
3968 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3969 )
3970
3971 # Add sub-operation, return the index of the added sub-operation
3972 # Optionally, set operationState, detailed-status, and operationType
3973 # Status and type are currently set for 'scale' sub-operations:
3974 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3975 # 'detailed-status' : status message
3976 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3977 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3978 def _add_suboperation(
3979 self,
3980 db_nslcmop,
3981 vnf_index,
3982 vdu_id,
3983 vdu_count_index,
3984 vdu_name,
3985 primitive,
3986 mapped_primitive_params,
3987 operationState=None,
3988 detailed_status=None,
3989 operationType=None,
3990 RO_nsr_id=None,
3991 RO_scaling_info=None,
3992 ):
3993 if not db_nslcmop:
3994 return self.SUBOPERATION_STATUS_NOT_FOUND
3995 # Get the "_admin.operations" list, if it exists
3996 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3997 op_list = db_nslcmop_admin.get("operations")
3998 # Create or append to the "_admin.operations" list
3999 new_op = {
4000 "member_vnf_index": vnf_index,
4001 "vdu_id": vdu_id,
4002 "vdu_count_index": vdu_count_index,
4003 "primitive": primitive,
4004 "primitive_params": mapped_primitive_params,
4005 }
4006 if operationState:
4007 new_op["operationState"] = operationState
4008 if detailed_status:
4009 new_op["detailed-status"] = detailed_status
4010 if operationType:
4011 new_op["lcmOperationType"] = operationType
4012 if RO_nsr_id:
4013 new_op["RO_nsr_id"] = RO_nsr_id
4014 if RO_scaling_info:
4015 new_op["RO_scaling_info"] = RO_scaling_info
4016 if not op_list:
4017 # No existing operations, create key 'operations' with current operation as first list element
4018 db_nslcmop_admin.update({"operations": [new_op]})
4019 op_list = db_nslcmop_admin.get("operations")
4020 else:
4021 # Existing operations, append operation to list
4022 op_list.append(new_op)
4023
4024 db_nslcmop_update = {"_admin.operations": op_list}
4025 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4026 op_index = len(op_list) - 1
4027 return op_index
4028
4029 # Helper methods for scale() sub-operations
4030
4031 # pre-scale/post-scale:
4032 # Check for 3 different cases:
4033 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4034 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4035 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4036 def _check_or_add_scale_suboperation(
4037 self,
4038 db_nslcmop,
4039 vnf_index,
4040 vnf_config_primitive,
4041 primitive_params,
4042 operationType,
4043 RO_nsr_id=None,
4044 RO_scaling_info=None,
4045 ):
4046 # Find this sub-operation
4047 if RO_nsr_id and RO_scaling_info:
4048 operationType = "SCALE-RO"
4049 match = {
4050 "member_vnf_index": vnf_index,
4051 "RO_nsr_id": RO_nsr_id,
4052 "RO_scaling_info": RO_scaling_info,
4053 }
4054 else:
4055 match = {
4056 "member_vnf_index": vnf_index,
4057 "primitive": vnf_config_primitive,
4058 "primitive_params": primitive_params,
4059 "lcmOperationType": operationType,
4060 }
4061 op_index = self._find_suboperation(db_nslcmop, match)
4062 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4063 # a. New sub-operation
4064 # The sub-operation does not exist, add it.
4065 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4066 # The following parameters are set to None for all kind of scaling:
4067 vdu_id = None
4068 vdu_count_index = None
4069 vdu_name = None
4070 if RO_nsr_id and RO_scaling_info:
4071 vnf_config_primitive = None
4072 primitive_params = None
4073 else:
4074 RO_nsr_id = None
4075 RO_scaling_info = None
4076 # Initial status for sub-operation
4077 operationState = "PROCESSING"
4078 detailed_status = "In progress"
4079 # Add sub-operation for pre/post-scaling (zero or more operations)
4080 self._add_suboperation(
4081 db_nslcmop,
4082 vnf_index,
4083 vdu_id,
4084 vdu_count_index,
4085 vdu_name,
4086 vnf_config_primitive,
4087 primitive_params,
4088 operationState,
4089 detailed_status,
4090 operationType,
4091 RO_nsr_id,
4092 RO_scaling_info,
4093 )
4094 return self.SUBOPERATION_STATUS_NEW
4095 else:
4096 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4097 # or op_index (operationState != 'COMPLETED')
4098 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4099
4100 # Function to return execution_environment id
4101
4102 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4103 # TODO vdu_index_count
4104 for vca in vca_deployed_list:
4105 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4106 return vca["ee_id"]
4107
4108 async def destroy_N2VC(
4109 self,
4110 logging_text,
4111 db_nslcmop,
4112 vca_deployed,
4113 config_descriptor,
4114 vca_index,
4115 destroy_ee=True,
4116 exec_primitives=True,
4117 scaling_in=False,
4118 vca_id: str = None,
4119 ):
4120 """
4121 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4122 :param logging_text:
4123 :param db_nslcmop:
4124 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4125 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4126 :param vca_index: index in the database _admin.deployed.VCA
4127 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4128 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4129 not executed properly
4130 :param scaling_in: True destroys the application, False destroys the model
4131 :return: None or exception
4132 """
4133
4134 self.logger.debug(
4135 logging_text
4136 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4137 vca_index, vca_deployed, config_descriptor, destroy_ee
4138 )
4139 )
4140
4141 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4142
4143 # execute terminate_primitives
4144 if exec_primitives:
4145 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4146 config_descriptor.get("terminate-config-primitive"),
4147 vca_deployed.get("ee_descriptor_id"),
4148 )
4149 vdu_id = vca_deployed.get("vdu_id")
4150 vdu_count_index = vca_deployed.get("vdu_count_index")
4151 vdu_name = vca_deployed.get("vdu_name")
4152 vnf_index = vca_deployed.get("member-vnf-index")
4153 if terminate_primitives and vca_deployed.get("needed_terminate"):
4154 for seq in terminate_primitives:
4155 # For each sequence in list, get primitive and call _ns_execute_primitive()
4156 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4157 vnf_index, seq.get("name")
4158 )
4159 self.logger.debug(logging_text + step)
4160 # Create the primitive for each sequence, i.e. "primitive": "touch"
4161 primitive = seq.get("name")
4162 mapped_primitive_params = self._get_terminate_primitive_params(
4163 seq, vnf_index
4164 )
4165
4166 # Add sub-operation
4167 self._add_suboperation(
4168 db_nslcmop,
4169 vnf_index,
4170 vdu_id,
4171 vdu_count_index,
4172 vdu_name,
4173 primitive,
4174 mapped_primitive_params,
4175 )
4176 # Sub-operations: Call _ns_execute_primitive() instead of action()
4177 try:
4178 result, result_detail = await self._ns_execute_primitive(
4179 vca_deployed["ee_id"],
4180 primitive,
4181 mapped_primitive_params,
4182 vca_type=vca_type,
4183 vca_id=vca_id,
4184 )
4185 except LcmException:
4186 # this happens when VCA is not deployed. In this case it is not needed to terminate
4187 continue
4188 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4189 if result not in result_ok:
4190 raise LcmException(
4191 "terminate_primitive {} for vnf_member_index={} fails with "
4192 "error {}".format(seq.get("name"), vnf_index, result_detail)
4193 )
4194 # set that this VCA do not need terminated
4195 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4196 vca_index
4197 )
4198 self.update_db_2(
4199 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4200 )
4201
4202 # Delete Prometheus Jobs if any
4203 # This uses NSR_ID, so it will destroy any jobs under this index
4204 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4205
4206 if destroy_ee:
4207 await self.vca_map[vca_type].delete_execution_environment(
4208 vca_deployed["ee_id"],
4209 scaling_in=scaling_in,
4210 vca_type=vca_type,
4211 vca_id=vca_id,
4212 )
4213
4214 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4215 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4216 namespace = "." + db_nsr["_id"]
4217 try:
4218 await self.n2vc.delete_namespace(
4219 namespace=namespace,
4220 total_timeout=self.timeout.charm_delete,
4221 vca_id=vca_id,
4222 )
4223 except N2VCNotFound: # already deleted. Skip
4224 pass
4225 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4226
4227 async def terminate(self, nsr_id, nslcmop_id):
4228 # Try to lock HA task here
4229 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4230 if not task_is_locked_by_me:
4231 return
4232
4233 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4234 self.logger.debug(logging_text + "Enter")
4235 timeout_ns_terminate = self.timeout.ns_terminate
4236 db_nsr = None
4237 db_nslcmop = None
4238 operation_params = None
4239 exc = None
4240 error_list = [] # annotates all failed error messages
4241 db_nslcmop_update = {}
4242 autoremove = False # autoremove after terminated
4243 tasks_dict_info = {}
4244 db_nsr_update = {}
4245 stage = [
4246 "Stage 1/3: Preparing task.",
4247 "Waiting for previous operations to terminate.",
4248 "",
4249 ]
4250 # ^ contains [stage, step, VIM-status]
4251 try:
4252 # wait for any previous tasks in process
4253 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4254
4255 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4256 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4257 operation_params = db_nslcmop.get("operationParams") or {}
4258 if operation_params.get("timeout_ns_terminate"):
4259 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4260 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4261 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4262
4263 db_nsr_update["operational-status"] = "terminating"
4264 db_nsr_update["config-status"] = "terminating"
4265 self._write_ns_status(
4266 nsr_id=nsr_id,
4267 ns_state="TERMINATING",
4268 current_operation="TERMINATING",
4269 current_operation_id=nslcmop_id,
4270 other_update=db_nsr_update,
4271 )
4272 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4273 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4274 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4275 return
4276
4277 stage[1] = "Getting vnf descriptors from db."
4278 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4279 db_vnfrs_dict = {
4280 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4281 }
4282 db_vnfds_from_id = {}
4283 db_vnfds_from_member_index = {}
4284 # Loop over VNFRs
4285 for vnfr in db_vnfrs_list:
4286 vnfd_id = vnfr["vnfd-id"]
4287 if vnfd_id not in db_vnfds_from_id:
4288 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4289 db_vnfds_from_id[vnfd_id] = vnfd
4290 db_vnfds_from_member_index[
4291 vnfr["member-vnf-index-ref"]
4292 ] = db_vnfds_from_id[vnfd_id]
4293
4294 # Destroy individual execution environments when there are terminating primitives.
4295 # Rest of EE will be deleted at once
4296 # TODO - check before calling _destroy_N2VC
4297 # if not operation_params.get("skip_terminate_primitives"):#
4298 # or not vca.get("needed_terminate"):
4299 stage[0] = "Stage 2/3 execute terminating primitives."
4300 self.logger.debug(logging_text + stage[0])
4301 stage[1] = "Looking execution environment that needs terminate."
4302 self.logger.debug(logging_text + stage[1])
4303
4304 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4305 config_descriptor = None
4306 vca_member_vnf_index = vca.get("member-vnf-index")
4307 vca_id = self.get_vca_id(
4308 db_vnfrs_dict.get(vca_member_vnf_index)
4309 if vca_member_vnf_index
4310 else None,
4311 db_nsr,
4312 )
4313 if not vca or not vca.get("ee_id"):
4314 continue
4315 if not vca.get("member-vnf-index"):
4316 # ns
4317 config_descriptor = db_nsr.get("ns-configuration")
4318 elif vca.get("vdu_id"):
4319 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4320 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4321 elif vca.get("kdu_name"):
4322 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4323 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4324 else:
4325 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4326 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4327 vca_type = vca.get("type")
4328 exec_terminate_primitives = not operation_params.get(
4329 "skip_terminate_primitives"
4330 ) and vca.get("needed_terminate")
4331 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4332 # pending native charms
4333 destroy_ee = (
4334 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4335 )
4336 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4337 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4338 task = asyncio.ensure_future(
4339 self.destroy_N2VC(
4340 logging_text,
4341 db_nslcmop,
4342 vca,
4343 config_descriptor,
4344 vca_index,
4345 destroy_ee,
4346 exec_terminate_primitives,
4347 vca_id=vca_id,
4348 )
4349 )
4350 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4351
4352 # wait for pending tasks of terminate primitives
4353 if tasks_dict_info:
4354 self.logger.debug(
4355 logging_text
4356 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4357 )
4358 error_list = await self._wait_for_tasks(
4359 logging_text,
4360 tasks_dict_info,
4361 min(self.timeout.charm_delete, timeout_ns_terminate),
4362 stage,
4363 nslcmop_id,
4364 )
4365 tasks_dict_info.clear()
4366 if error_list:
4367 return # raise LcmException("; ".join(error_list))
4368
4369 # remove All execution environments at once
4370 stage[0] = "Stage 3/3 delete all."
4371
4372 if nsr_deployed.get("VCA"):
4373 stage[1] = "Deleting all execution environments."
4374 self.logger.debug(logging_text + stage[1])
4375 vca_id = self.get_vca_id({}, db_nsr)
4376 task_delete_ee = asyncio.ensure_future(
4377 asyncio.wait_for(
4378 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4379 timeout=self.timeout.charm_delete,
4380 )
4381 )
4382 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4383 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4384
4385 # Delete Namespace and Certificates if necessary
4386 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4387 await self.vca_map["helm-v3"].delete_tls_certificate(
4388 certificate_name=db_nslcmop["nsInstanceId"],
4389 )
4390 # TODO: Delete namespace
4391
4392 # Delete from k8scluster
4393 stage[1] = "Deleting KDUs."
4394 self.logger.debug(logging_text + stage[1])
4395 # print(nsr_deployed)
4396 for kdu in get_iterable(nsr_deployed, "K8s"):
4397 if not kdu or not kdu.get("kdu-instance"):
4398 continue
4399 kdu_instance = kdu.get("kdu-instance")
4400 if kdu.get("k8scluster-type") in self.k8scluster_map:
4401 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4402 vca_id = self.get_vca_id({}, db_nsr)
4403 task_delete_kdu_instance = asyncio.ensure_future(
4404 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4405 cluster_uuid=kdu.get("k8scluster-uuid"),
4406 kdu_instance=kdu_instance,
4407 vca_id=vca_id,
4408 namespace=kdu.get("namespace"),
4409 )
4410 )
4411 else:
4412 self.logger.error(
4413 logging_text
4414 + "Unknown k8s deployment type {}".format(
4415 kdu.get("k8scluster-type")
4416 )
4417 )
4418 continue
4419 tasks_dict_info[
4420 task_delete_kdu_instance
4421 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4422
4423 # remove from RO
4424 stage[1] = "Deleting ns from VIM."
4425 if self.ro_config.ng:
4426 task_delete_ro = asyncio.ensure_future(
4427 self._terminate_ng_ro(
4428 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4429 )
4430 )
4431 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4432
4433 # rest of staff will be done at finally
4434
4435 except (
4436 ROclient.ROClientException,
4437 DbException,
4438 LcmException,
4439 N2VCException,
4440 ) as e:
4441 self.logger.error(logging_text + "Exit Exception {}".format(e))
4442 exc = e
4443 except asyncio.CancelledError:
4444 self.logger.error(
4445 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4446 )
4447 exc = "Operation was cancelled"
4448 except Exception as e:
4449 exc = traceback.format_exc()
4450 self.logger.critical(
4451 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4452 exc_info=True,
4453 )
4454 finally:
4455 if exc:
4456 error_list.append(str(exc))
4457 try:
4458 # wait for pending tasks
4459 if tasks_dict_info:
4460 stage[1] = "Waiting for terminate pending tasks."
4461 self.logger.debug(logging_text + stage[1])
4462 error_list += await self._wait_for_tasks(
4463 logging_text,
4464 tasks_dict_info,
4465 timeout_ns_terminate,
4466 stage,
4467 nslcmop_id,
4468 )
4469 stage[1] = stage[2] = ""
4470 except asyncio.CancelledError:
4471 error_list.append("Cancelled")
4472 # TODO cancell all tasks
4473 except Exception as exc:
4474 error_list.append(str(exc))
4475 # update status at database
4476 if error_list:
4477 error_detail = "; ".join(error_list)
4478 # self.logger.error(logging_text + error_detail)
4479 error_description_nslcmop = "{} Detail: {}".format(
4480 stage[0], error_detail
4481 )
4482 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4483 nslcmop_id, stage[0]
4484 )
4485
4486 db_nsr_update["operational-status"] = "failed"
4487 db_nsr_update["detailed-status"] = (
4488 error_description_nsr + " Detail: " + error_detail
4489 )
4490 db_nslcmop_update["detailed-status"] = error_detail
4491 nslcmop_operation_state = "FAILED"
4492 ns_state = "BROKEN"
4493 else:
4494 error_detail = None
4495 error_description_nsr = error_description_nslcmop = None
4496 ns_state = "NOT_INSTANTIATED"
4497 db_nsr_update["operational-status"] = "terminated"
4498 db_nsr_update["detailed-status"] = "Done"
4499 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4500 db_nslcmop_update["detailed-status"] = "Done"
4501 nslcmop_operation_state = "COMPLETED"
4502
4503 if db_nsr:
4504 self._write_ns_status(
4505 nsr_id=nsr_id,
4506 ns_state=ns_state,
4507 current_operation="IDLE",
4508 current_operation_id=None,
4509 error_description=error_description_nsr,
4510 error_detail=error_detail,
4511 other_update=db_nsr_update,
4512 )
4513 self._write_op_status(
4514 op_id=nslcmop_id,
4515 stage="",
4516 error_message=error_description_nslcmop,
4517 operation_state=nslcmop_operation_state,
4518 other_update=db_nslcmop_update,
4519 )
4520 if ns_state == "NOT_INSTANTIATED":
4521 try:
4522 self.db.set_list(
4523 "vnfrs",
4524 {"nsr-id-ref": nsr_id},
4525 {"_admin.nsState": "NOT_INSTANTIATED"},
4526 )
4527 except DbException as e:
4528 self.logger.warn(
4529 logging_text
4530 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4531 nsr_id, e
4532 )
4533 )
4534 if operation_params:
4535 autoremove = operation_params.get("autoremove", False)
4536 if nslcmop_operation_state:
4537 try:
4538 await self.msg.aiowrite(
4539 "ns",
4540 "terminated",
4541 {
4542 "nsr_id": nsr_id,
4543 "nslcmop_id": nslcmop_id,
4544 "operationState": nslcmop_operation_state,
4545 "autoremove": autoremove,
4546 },
4547 loop=self.loop,
4548 )
4549 except Exception as e:
4550 self.logger.error(
4551 logging_text + "kafka_write notification Exception {}".format(e)
4552 )
4553
4554 self.logger.debug(logging_text + "Exit")
4555 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4556
4557 async def _wait_for_tasks(
4558 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4559 ):
4560 time_start = time()
4561 error_detail_list = []
4562 error_list = []
4563 pending_tasks = list(created_tasks_info.keys())
4564 num_tasks = len(pending_tasks)
4565 num_done = 0
4566 stage[1] = "{}/{}.".format(num_done, num_tasks)
4567 self._write_op_status(nslcmop_id, stage)
4568 while pending_tasks:
4569 new_error = None
4570 _timeout = timeout + time_start - time()
4571 done, pending_tasks = await asyncio.wait(
4572 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4573 )
4574 num_done += len(done)
4575 if not done: # Timeout
4576 for task in pending_tasks:
4577 new_error = created_tasks_info[task] + ": Timeout"
4578 error_detail_list.append(new_error)
4579 error_list.append(new_error)
4580 break
4581 for task in done:
4582 if task.cancelled():
4583 exc = "Cancelled"
4584 else:
4585 exc = task.exception()
4586 if exc:
4587 if isinstance(exc, asyncio.TimeoutError):
4588 exc = "Timeout"
4589 new_error = created_tasks_info[task] + ": {}".format(exc)
4590 error_list.append(created_tasks_info[task])
4591 error_detail_list.append(new_error)
4592 if isinstance(
4593 exc,
4594 (
4595 str,
4596 DbException,
4597 N2VCException,
4598 ROclient.ROClientException,
4599 LcmException,
4600 K8sException,
4601 NgRoException,
4602 ),
4603 ):
4604 self.logger.error(logging_text + new_error)
4605 else:
4606 exc_traceback = "".join(
4607 traceback.format_exception(None, exc, exc.__traceback__)
4608 )
4609 self.logger.error(
4610 logging_text
4611 + created_tasks_info[task]
4612 + " "
4613 + exc_traceback
4614 )
4615 else:
4616 self.logger.debug(
4617 logging_text + created_tasks_info[task] + ": Done"
4618 )
4619 stage[1] = "{}/{}.".format(num_done, num_tasks)
4620 if new_error:
4621 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4622 if nsr_id: # update also nsr
4623 self.update_db_2(
4624 "nsrs",
4625 nsr_id,
4626 {
4627 "errorDescription": "Error at: " + ", ".join(error_list),
4628 "errorDetail": ". ".join(error_detail_list),
4629 },
4630 )
4631 self._write_op_status(nslcmop_id, stage)
4632 return error_detail_list
4633
4634 @staticmethod
4635 def _map_primitive_params(primitive_desc, params, instantiation_params):
4636 """
4637 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4638 The default-value is used. If it is between < > it look for a value at instantiation_params
4639 :param primitive_desc: portion of VNFD/NSD that describes primitive
4640 :param params: Params provided by user
4641 :param instantiation_params: Instantiation params provided by user
4642 :return: a dictionary with the calculated params
4643 """
4644 calculated_params = {}
4645 for parameter in primitive_desc.get("parameter", ()):
4646 param_name = parameter["name"]
4647 if param_name in params:
4648 calculated_params[param_name] = params[param_name]
4649 elif "default-value" in parameter or "value" in parameter:
4650 if "value" in parameter:
4651 calculated_params[param_name] = parameter["value"]
4652 else:
4653 calculated_params[param_name] = parameter["default-value"]
4654 if (
4655 isinstance(calculated_params[param_name], str)
4656 and calculated_params[param_name].startswith("<")
4657 and calculated_params[param_name].endswith(">")
4658 ):
4659 if calculated_params[param_name][1:-1] in instantiation_params:
4660 calculated_params[param_name] = instantiation_params[
4661 calculated_params[param_name][1:-1]
4662 ]
4663 else:
4664 raise LcmException(
4665 "Parameter {} needed to execute primitive {} not provided".format(
4666 calculated_params[param_name], primitive_desc["name"]
4667 )
4668 )
4669 else:
4670 raise LcmException(
4671 "Parameter {} needed to execute primitive {} not provided".format(
4672 param_name, primitive_desc["name"]
4673 )
4674 )
4675
4676 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4677 calculated_params[param_name] = yaml.safe_dump(
4678 calculated_params[param_name], default_flow_style=True, width=256
4679 )
4680 elif isinstance(calculated_params[param_name], str) and calculated_params[
4681 param_name
4682 ].startswith("!!yaml "):
4683 calculated_params[param_name] = calculated_params[param_name][7:]
4684 if parameter.get("data-type") == "INTEGER":
4685 try:
4686 calculated_params[param_name] = int(calculated_params[param_name])
4687 except ValueError: # error converting string to int
4688 raise LcmException(
4689 "Parameter {} of primitive {} must be integer".format(
4690 param_name, primitive_desc["name"]
4691 )
4692 )
4693 elif parameter.get("data-type") == "BOOLEAN":
4694 calculated_params[param_name] = not (
4695 (str(calculated_params[param_name])).lower() == "false"
4696 )
4697
4698 # add always ns_config_info if primitive name is config
4699 if primitive_desc["name"] == "config":
4700 if "ns_config_info" in instantiation_params:
4701 calculated_params["ns_config_info"] = instantiation_params[
4702 "ns_config_info"
4703 ]
4704 return calculated_params
4705
4706 def _look_for_deployed_vca(
4707 self,
4708 deployed_vca,
4709 member_vnf_index,
4710 vdu_id,
4711 vdu_count_index,
4712 kdu_name=None,
4713 ee_descriptor_id=None,
4714 ):
4715 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4716 for vca in deployed_vca:
4717 if not vca:
4718 continue
4719 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4720 continue
4721 if (
4722 vdu_count_index is not None
4723 and vdu_count_index != vca["vdu_count_index"]
4724 ):
4725 continue
4726 if kdu_name and kdu_name != vca["kdu_name"]:
4727 continue
4728 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4729 continue
4730 break
4731 else:
4732 # vca_deployed not found
4733 raise LcmException(
4734 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4735 " is not deployed".format(
4736 member_vnf_index,
4737 vdu_id,
4738 vdu_count_index,
4739 kdu_name,
4740 ee_descriptor_id,
4741 )
4742 )
4743 # get ee_id
4744 ee_id = vca.get("ee_id")
4745 vca_type = vca.get(
4746 "type", "lxc_proxy_charm"
4747 ) # default value for backward compatibility - proxy charm
4748 if not ee_id:
4749 raise LcmException(
4750 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4751 "execution environment".format(
4752 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4753 )
4754 )
4755 return ee_id, vca_type
4756
4757 async def _ns_execute_primitive(
4758 self,
4759 ee_id,
4760 primitive,
4761 primitive_params,
4762 retries=0,
4763 retries_interval=30,
4764 timeout=None,
4765 vca_type=None,
4766 db_dict=None,
4767 vca_id: str = None,
4768 ) -> (str, str):
4769 try:
4770 if primitive == "config":
4771 primitive_params = {"params": primitive_params}
4772
4773 vca_type = vca_type or "lxc_proxy_charm"
4774
4775 while retries >= 0:
4776 try:
4777 output = await asyncio.wait_for(
4778 self.vca_map[vca_type].exec_primitive(
4779 ee_id=ee_id,
4780 primitive_name=primitive,
4781 params_dict=primitive_params,
4782 progress_timeout=self.timeout.progress_primitive,
4783 total_timeout=self.timeout.primitive,
4784 db_dict=db_dict,
4785 vca_id=vca_id,
4786 vca_type=vca_type,
4787 ),
4788 timeout=timeout or self.timeout.primitive,
4789 )
4790 # execution was OK
4791 break
4792 except asyncio.CancelledError:
4793 raise
4794 except Exception as e:
4795 retries -= 1
4796 if retries >= 0:
4797 self.logger.debug(
4798 "Error executing action {} on {} -> {}".format(
4799 primitive, ee_id, e
4800 )
4801 )
4802 # wait and retry
4803 await asyncio.sleep(retries_interval, loop=self.loop)
4804 else:
4805 if isinstance(e, asyncio.TimeoutError):
4806 e = N2VCException(
4807 message="Timed out waiting for action to complete"
4808 )
4809 return "FAILED", getattr(e, "message", repr(e))
4810
4811 return "COMPLETED", output
4812
4813 except (LcmException, asyncio.CancelledError):
4814 raise
4815 except Exception as e:
4816 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4817
4818 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4819 """
4820 Updating the vca_status with latest juju information in nsrs record
4821 :param: nsr_id: Id of the nsr
4822 :param: nslcmop_id: Id of the nslcmop
4823 :return: None
4824 """
4825
4826 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4827 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4828 vca_id = self.get_vca_id({}, db_nsr)
4829 if db_nsr["_admin"]["deployed"]["K8s"]:
4830 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4831 cluster_uuid, kdu_instance, cluster_type = (
4832 k8s["k8scluster-uuid"],
4833 k8s["kdu-instance"],
4834 k8s["k8scluster-type"],
4835 )
4836 await self._on_update_k8s_db(
4837 cluster_uuid=cluster_uuid,
4838 kdu_instance=kdu_instance,
4839 filter={"_id": nsr_id},
4840 vca_id=vca_id,
4841 cluster_type=cluster_type,
4842 )
4843 else:
4844 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4845 table, filter = "nsrs", {"_id": nsr_id}
4846 path = "_admin.deployed.VCA.{}.".format(vca_index)
4847 await self._on_update_n2vc_db(table, filter, path, {})
4848
4849 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4850 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4851
4852 async def action(self, nsr_id, nslcmop_id):
4853 # Try to lock HA task here
4854 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4855 if not task_is_locked_by_me:
4856 return
4857
4858 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4859 self.logger.debug(logging_text + "Enter")
4860 # get all needed from database
4861 db_nsr = None
4862 db_nslcmop = None
4863 db_nsr_update = {}
4864 db_nslcmop_update = {}
4865 nslcmop_operation_state = None
4866 error_description_nslcmop = None
4867 exc = None
4868 step = ""
4869 try:
4870 # wait for any previous tasks in process
4871 step = "Waiting for previous operations to terminate"
4872 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4873
4874 self._write_ns_status(
4875 nsr_id=nsr_id,
4876 ns_state=None,
4877 current_operation="RUNNING ACTION",
4878 current_operation_id=nslcmop_id,
4879 )
4880
4881 step = "Getting information from database"
4882 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4883 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4884 if db_nslcmop["operationParams"].get("primitive_params"):
4885 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4886 db_nslcmop["operationParams"]["primitive_params"]
4887 )
4888
4889 nsr_deployed = db_nsr["_admin"].get("deployed")
4890 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4891 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4892 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4893 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4894 primitive = db_nslcmop["operationParams"]["primitive"]
4895 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4896 timeout_ns_action = db_nslcmop["operationParams"].get(
4897 "timeout_ns_action", self.timeout.primitive
4898 )
4899
4900 if vnf_index:
4901 step = "Getting vnfr from database"
4902 db_vnfr = self.db.get_one(
4903 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4904 )
4905 if db_vnfr.get("kdur"):
4906 kdur_list = []
4907 for kdur in db_vnfr["kdur"]:
4908 if kdur.get("additionalParams"):
4909 kdur["additionalParams"] = json.loads(
4910 kdur["additionalParams"]
4911 )
4912 kdur_list.append(kdur)
4913 db_vnfr["kdur"] = kdur_list
4914 step = "Getting vnfd from database"
4915 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4916
4917 # Sync filesystem before running a primitive
4918 self.fs.sync(db_vnfr["vnfd-id"])
4919 else:
4920 step = "Getting nsd from database"
4921 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4922
4923 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4924 # for backward compatibility
4925 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4926 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4927 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4928 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4929
4930 # look for primitive
4931 config_primitive_desc = descriptor_configuration = None
4932 if vdu_id:
4933 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4934 elif kdu_name:
4935 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4936 elif vnf_index:
4937 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4938 else:
4939 descriptor_configuration = db_nsd.get("ns-configuration")
4940
4941 if descriptor_configuration and descriptor_configuration.get(
4942 "config-primitive"
4943 ):
4944 for config_primitive in descriptor_configuration["config-primitive"]:
4945 if config_primitive["name"] == primitive:
4946 config_primitive_desc = config_primitive
4947 break
4948
4949 if not config_primitive_desc:
4950 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4951 raise LcmException(
4952 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4953 primitive
4954 )
4955 )
4956 primitive_name = primitive
4957 ee_descriptor_id = None
4958 else:
4959 primitive_name = config_primitive_desc.get(
4960 "execution-environment-primitive", primitive
4961 )
4962 ee_descriptor_id = config_primitive_desc.get(
4963 "execution-environment-ref"
4964 )
4965
4966 if vnf_index:
4967 if vdu_id:
4968 vdur = next(
4969 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4970 )
4971 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4972 elif kdu_name:
4973 kdur = next(
4974 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4975 )
4976 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4977 else:
4978 desc_params = parse_yaml_strings(
4979 db_vnfr.get("additionalParamsForVnf")
4980 )
4981 else:
4982 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4983 if kdu_name and get_configuration(db_vnfd, kdu_name):
4984 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4985 actions = set()
4986 for primitive in kdu_configuration.get("initial-config-primitive", []):
4987 actions.add(primitive["name"])
4988 for primitive in kdu_configuration.get("config-primitive", []):
4989 actions.add(primitive["name"])
4990 kdu = find_in_list(
4991 nsr_deployed["K8s"],
4992 lambda kdu: kdu_name == kdu["kdu-name"]
4993 and kdu["member-vnf-index"] == vnf_index,
4994 )
4995 kdu_action = (
4996 True
4997 if primitive_name in actions
4998 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
4999 else False
5000 )
5001
5002 # TODO check if ns is in a proper status
5003 if kdu_name and (
5004 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5005 ):
5006 # kdur and desc_params already set from before
5007 if primitive_params:
5008 desc_params.update(primitive_params)
5009 # TODO Check if we will need something at vnf level
5010 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5011 if (
5012 kdu_name == kdu["kdu-name"]
5013 and kdu["member-vnf-index"] == vnf_index
5014 ):
5015 break
5016 else:
5017 raise LcmException(
5018 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5019 )
5020
5021 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5022 msg = "unknown k8scluster-type '{}'".format(
5023 kdu.get("k8scluster-type")
5024 )
5025 raise LcmException(msg)
5026
5027 db_dict = {
5028 "collection": "nsrs",
5029 "filter": {"_id": nsr_id},
5030 "path": "_admin.deployed.K8s.{}".format(index),
5031 }
5032 self.logger.debug(
5033 logging_text
5034 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5035 )
5036 step = "Executing kdu {}".format(primitive_name)
5037 if primitive_name == "upgrade":
5038 if desc_params.get("kdu_model"):
5039 kdu_model = desc_params.get("kdu_model")
5040 del desc_params["kdu_model"]
5041 else:
5042 kdu_model = kdu.get("kdu-model")
5043 parts = kdu_model.split(sep=":")
5044 if len(parts) == 2:
5045 kdu_model = parts[0]
5046 if desc_params.get("kdu_atomic_upgrade"):
5047 atomic_upgrade = desc_params.get(
5048 "kdu_atomic_upgrade"
5049 ).lower() in ("yes", "true", "1")
5050 del desc_params["kdu_atomic_upgrade"]
5051 else:
5052 atomic_upgrade = True
5053
5054 detailed_status = await asyncio.wait_for(
5055 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5056 cluster_uuid=kdu.get("k8scluster-uuid"),
5057 kdu_instance=kdu.get("kdu-instance"),
5058 atomic=atomic_upgrade,
5059 kdu_model=kdu_model,
5060 params=desc_params,
5061 db_dict=db_dict,
5062 timeout=timeout_ns_action,
5063 ),
5064 timeout=timeout_ns_action + 10,
5065 )
5066 self.logger.debug(
5067 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5068 )
5069 elif primitive_name == "rollback":
5070 detailed_status = await asyncio.wait_for(
5071 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5072 cluster_uuid=kdu.get("k8scluster-uuid"),
5073 kdu_instance=kdu.get("kdu-instance"),
5074 db_dict=db_dict,
5075 ),
5076 timeout=timeout_ns_action,
5077 )
5078 elif primitive_name == "status":
5079 detailed_status = await asyncio.wait_for(
5080 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5081 cluster_uuid=kdu.get("k8scluster-uuid"),
5082 kdu_instance=kdu.get("kdu-instance"),
5083 vca_id=vca_id,
5084 ),
5085 timeout=timeout_ns_action,
5086 )
5087 else:
5088 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5089 kdu["kdu-name"], nsr_id
5090 )
5091 params = self._map_primitive_params(
5092 config_primitive_desc, primitive_params, desc_params
5093 )
5094
5095 detailed_status = await asyncio.wait_for(
5096 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5097 cluster_uuid=kdu.get("k8scluster-uuid"),
5098 kdu_instance=kdu_instance,
5099 primitive_name=primitive_name,
5100 params=params,
5101 db_dict=db_dict,
5102 timeout=timeout_ns_action,
5103 vca_id=vca_id,
5104 ),
5105 timeout=timeout_ns_action,
5106 )
5107
5108 if detailed_status:
5109 nslcmop_operation_state = "COMPLETED"
5110 else:
5111 detailed_status = ""
5112 nslcmop_operation_state = "FAILED"
5113 else:
5114 ee_id, vca_type = self._look_for_deployed_vca(
5115 nsr_deployed["VCA"],
5116 member_vnf_index=vnf_index,
5117 vdu_id=vdu_id,
5118 vdu_count_index=vdu_count_index,
5119 ee_descriptor_id=ee_descriptor_id,
5120 )
5121 for vca_index, vca_deployed in enumerate(
5122 db_nsr["_admin"]["deployed"]["VCA"]
5123 ):
5124 if vca_deployed.get("member-vnf-index") == vnf_index:
5125 db_dict = {
5126 "collection": "nsrs",
5127 "filter": {"_id": nsr_id},
5128 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5129 }
5130 break
5131 (
5132 nslcmop_operation_state,
5133 detailed_status,
5134 ) = await self._ns_execute_primitive(
5135 ee_id,
5136 primitive=primitive_name,
5137 primitive_params=self._map_primitive_params(
5138 config_primitive_desc, primitive_params, desc_params
5139 ),
5140 timeout=timeout_ns_action,
5141 vca_type=vca_type,
5142 db_dict=db_dict,
5143 vca_id=vca_id,
5144 )
5145
5146 db_nslcmop_update["detailed-status"] = detailed_status
5147 error_description_nslcmop = (
5148 detailed_status if nslcmop_operation_state == "FAILED" else ""
5149 )
5150 self.logger.debug(
5151 logging_text
5152 + "Done with result {} {}".format(
5153 nslcmop_operation_state, detailed_status
5154 )
5155 )
5156 return # database update is called inside finally
5157
5158 except (DbException, LcmException, N2VCException, K8sException) as e:
5159 self.logger.error(logging_text + "Exit Exception {}".format(e))
5160 exc = e
5161 except asyncio.CancelledError:
5162 self.logger.error(
5163 logging_text + "Cancelled Exception while '{}'".format(step)
5164 )
5165 exc = "Operation was cancelled"
5166 except asyncio.TimeoutError:
5167 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5168 exc = "Timeout"
5169 except Exception as e:
5170 exc = traceback.format_exc()
5171 self.logger.critical(
5172 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5173 exc_info=True,
5174 )
5175 finally:
5176 if exc:
5177 db_nslcmop_update[
5178 "detailed-status"
5179 ] = (
5180 detailed_status
5181 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5182 nslcmop_operation_state = "FAILED"
5183 if db_nsr:
5184 self._write_ns_status(
5185 nsr_id=nsr_id,
5186 ns_state=db_nsr[
5187 "nsState"
5188 ], # TODO check if degraded. For the moment use previous status
5189 current_operation="IDLE",
5190 current_operation_id=None,
5191 # error_description=error_description_nsr,
5192 # error_detail=error_detail,
5193 other_update=db_nsr_update,
5194 )
5195
5196 self._write_op_status(
5197 op_id=nslcmop_id,
5198 stage="",
5199 error_message=error_description_nslcmop,
5200 operation_state=nslcmop_operation_state,
5201 other_update=db_nslcmop_update,
5202 )
5203
5204 if nslcmop_operation_state:
5205 try:
5206 await self.msg.aiowrite(
5207 "ns",
5208 "actioned",
5209 {
5210 "nsr_id": nsr_id,
5211 "nslcmop_id": nslcmop_id,
5212 "operationState": nslcmop_operation_state,
5213 },
5214 loop=self.loop,
5215 )
5216 except Exception as e:
5217 self.logger.error(
5218 logging_text + "kafka_write notification Exception {}".format(e)
5219 )
5220 self.logger.debug(logging_text + "Exit")
5221 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5222 return nslcmop_operation_state, detailed_status
5223
5224 async def terminate_vdus(
5225 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5226 ):
5227 """This method terminates VDUs
5228
5229 Args:
5230 db_vnfr: VNF instance record
5231 member_vnf_index: VNF index to identify the VDUs to be removed
5232 db_nsr: NS instance record
5233 update_db_nslcmops: Nslcmop update record
5234 """
5235 vca_scaling_info = []
5236 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5237 scaling_info["scaling_direction"] = "IN"
5238 scaling_info["vdu-delete"] = {}
5239 scaling_info["kdu-delete"] = {}
5240 db_vdur = db_vnfr.get("vdur")
5241 vdur_list = copy(db_vdur)
5242 count_index = 0
5243 for index, vdu in enumerate(vdur_list):
5244 vca_scaling_info.append(
5245 {
5246 "osm_vdu_id": vdu["vdu-id-ref"],
5247 "member-vnf-index": member_vnf_index,
5248 "type": "delete",
5249 "vdu_index": count_index,
5250 }
5251 )
5252 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5253 scaling_info["vdu"].append(
5254 {
5255 "name": vdu.get("name") or vdu.get("vdu-name"),
5256 "vdu_id": vdu["vdu-id-ref"],
5257 "interface": [],
5258 }
5259 )
5260 for interface in vdu["interfaces"]:
5261 scaling_info["vdu"][index]["interface"].append(
5262 {
5263 "name": interface["name"],
5264 "ip_address": interface["ip-address"],
5265 "mac_address": interface.get("mac-address"),
5266 }
5267 )
5268 self.logger.info("NS update scaling info{}".format(scaling_info))
5269 stage[2] = "Terminating VDUs"
5270 if scaling_info.get("vdu-delete"):
5271 # scale_process = "RO"
5272 if self.ro_config.ng:
5273 await self._scale_ng_ro(
5274 logging_text,
5275 db_nsr,
5276 update_db_nslcmops,
5277 db_vnfr,
5278 scaling_info,
5279 stage,
5280 )
5281
5282 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5283 """This method is to Remove VNF instances from NS.
5284
5285 Args:
5286 nsr_id: NS instance id
5287 nslcmop_id: nslcmop id of update
5288 vnf_instance_id: id of the VNF instance to be removed
5289
5290 Returns:
5291 result: (str, str) COMPLETED/FAILED, details
5292 """
5293 try:
5294 db_nsr_update = {}
5295 logging_text = "Task ns={} update ".format(nsr_id)
5296 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5297 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5298 if check_vnfr_count > 1:
5299 stage = ["", "", ""]
5300 step = "Getting nslcmop from database"
5301 self.logger.debug(
5302 step + " after having waited for previous tasks to be completed"
5303 )
5304 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5305 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5306 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5307 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5308 """ db_vnfr = self.db.get_one(
5309 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5310
5311 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5312 await self.terminate_vdus(
5313 db_vnfr,
5314 member_vnf_index,
5315 db_nsr,
5316 update_db_nslcmops,
5317 stage,
5318 logging_text,
5319 )
5320
5321 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5322 constituent_vnfr.remove(db_vnfr.get("_id"))
5323 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5324 "constituent-vnfr-ref"
5325 )
5326 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5327 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5328 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5329 return "COMPLETED", "Done"
5330 else:
5331 step = "Terminate VNF Failed with"
5332 raise LcmException(
5333 "{} Cannot terminate the last VNF in this NS.".format(
5334 vnf_instance_id
5335 )
5336 )
5337 except (LcmException, asyncio.CancelledError):
5338 raise
5339 except Exception as e:
5340 self.logger.debug("Error removing VNF {}".format(e))
5341 return "FAILED", "Error removing VNF {}".format(e)
5342
5343 async def _ns_redeploy_vnf(
5344 self,
5345 nsr_id,
5346 nslcmop_id,
5347 db_vnfd,
5348 db_vnfr,
5349 db_nsr,
5350 ):
5351 """This method updates and redeploys VNF instances
5352
5353 Args:
5354 nsr_id: NS instance id
5355 nslcmop_id: nslcmop id
5356 db_vnfd: VNF descriptor
5357 db_vnfr: VNF instance record
5358 db_nsr: NS instance record
5359
5360 Returns:
5361 result: (str, str) COMPLETED/FAILED, details
5362 """
5363 try:
5364 count_index = 0
5365 stage = ["", "", ""]
5366 logging_text = "Task ns={} update ".format(nsr_id)
5367 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5368 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5369
5370 # Terminate old VNF resources
5371 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5372 await self.terminate_vdus(
5373 db_vnfr,
5374 member_vnf_index,
5375 db_nsr,
5376 update_db_nslcmops,
5377 stage,
5378 logging_text,
5379 )
5380
5381 # old_vnfd_id = db_vnfr["vnfd-id"]
5382 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5383 new_db_vnfd = db_vnfd
5384 # new_vnfd_ref = new_db_vnfd["id"]
5385 # new_vnfd_id = vnfd_id
5386
5387 # Create VDUR
5388 new_vnfr_cp = []
5389 for cp in new_db_vnfd.get("ext-cpd", ()):
5390 vnf_cp = {
5391 "name": cp.get("id"),
5392 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5393 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5394 "id": cp.get("id"),
5395 }
5396 new_vnfr_cp.append(vnf_cp)
5397 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5398 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5399 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5400 new_vnfr_update = {
5401 "revision": latest_vnfd_revision,
5402 "connection-point": new_vnfr_cp,
5403 "vdur": new_vdur,
5404 "ip-address": "",
5405 }
5406 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5407 updated_db_vnfr = self.db.get_one(
5408 "vnfrs",
5409 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5410 )
5411
5412 # Instantiate new VNF resources
5413 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5414 vca_scaling_info = []
5415 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5416 scaling_info["scaling_direction"] = "OUT"
5417 scaling_info["vdu-create"] = {}
5418 scaling_info["kdu-create"] = {}
5419 vdud_instantiate_list = db_vnfd["vdu"]
5420 for index, vdud in enumerate(vdud_instantiate_list):
5421 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5422 if cloud_init_text:
5423 additional_params = (
5424 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5425 or {}
5426 )
5427 cloud_init_list = []
5428 if cloud_init_text:
5429 # TODO Information of its own ip is not available because db_vnfr is not updated.
5430 additional_params["OSM"] = get_osm_params(
5431 updated_db_vnfr, vdud["id"], 1
5432 )
5433 cloud_init_list.append(
5434 self._parse_cloud_init(
5435 cloud_init_text,
5436 additional_params,
5437 db_vnfd["id"],
5438 vdud["id"],
5439 )
5440 )
5441 vca_scaling_info.append(
5442 {
5443 "osm_vdu_id": vdud["id"],
5444 "member-vnf-index": member_vnf_index,
5445 "type": "create",
5446 "vdu_index": count_index,
5447 }
5448 )
5449 scaling_info["vdu-create"][vdud["id"]] = count_index
5450 if self.ro_config.ng:
5451 self.logger.debug(
5452 "New Resources to be deployed: {}".format(scaling_info)
5453 )
5454 await self._scale_ng_ro(
5455 logging_text,
5456 db_nsr,
5457 update_db_nslcmops,
5458 updated_db_vnfr,
5459 scaling_info,
5460 stage,
5461 )
5462 return "COMPLETED", "Done"
5463 except (LcmException, asyncio.CancelledError):
5464 raise
5465 except Exception as e:
5466 self.logger.debug("Error updating VNF {}".format(e))
5467 return "FAILED", "Error updating VNF {}".format(e)
5468
5469 async def _ns_charm_upgrade(
5470 self,
5471 ee_id,
5472 charm_id,
5473 charm_type,
5474 path,
5475 timeout: float = None,
5476 ) -> (str, str):
5477 """This method upgrade charms in VNF instances
5478
5479 Args:
5480 ee_id: Execution environment id
5481 path: Local path to the charm
5482 charm_id: charm-id
5483 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5484 timeout: (Float) Timeout for the ns update operation
5485
5486 Returns:
5487 result: (str, str) COMPLETED/FAILED, details
5488 """
5489 try:
5490 charm_type = charm_type or "lxc_proxy_charm"
5491 output = await self.vca_map[charm_type].upgrade_charm(
5492 ee_id=ee_id,
5493 path=path,
5494 charm_id=charm_id,
5495 charm_type=charm_type,
5496 timeout=timeout or self.timeout.ns_update,
5497 )
5498
5499 if output:
5500 return "COMPLETED", output
5501
5502 except (LcmException, asyncio.CancelledError):
5503 raise
5504
5505 except Exception as e:
5506
5507 self.logger.debug("Error upgrading charm {}".format(path))
5508
5509 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5510
5511 async def update(self, nsr_id, nslcmop_id):
5512 """Update NS according to different update types
5513
5514 This method performs upgrade of VNF instances then updates the revision
5515 number in VNF record
5516
5517 Args:
5518 nsr_id: Network service will be updated
5519 nslcmop_id: ns lcm operation id
5520
5521 Returns:
5522 It may raise DbException, LcmException, N2VCException, K8sException
5523
5524 """
5525 # Try to lock HA task here
5526 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5527 if not task_is_locked_by_me:
5528 return
5529
5530 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5531 self.logger.debug(logging_text + "Enter")
5532
5533 # Set the required variables to be filled up later
5534 db_nsr = None
5535 db_nslcmop_update = {}
5536 vnfr_update = {}
5537 nslcmop_operation_state = None
5538 db_nsr_update = {}
5539 error_description_nslcmop = ""
5540 exc = None
5541 change_type = "updated"
5542 detailed_status = ""
5543 member_vnf_index = None
5544
5545 try:
5546 # wait for any previous tasks in process
5547 step = "Waiting for previous operations to terminate"
5548 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5549 self._write_ns_status(
5550 nsr_id=nsr_id,
5551 ns_state=None,
5552 current_operation="UPDATING",
5553 current_operation_id=nslcmop_id,
5554 )
5555
5556 step = "Getting nslcmop from database"
5557 db_nslcmop = self.db.get_one(
5558 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5559 )
5560 update_type = db_nslcmop["operationParams"]["updateType"]
5561
5562 step = "Getting nsr from database"
5563 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5564 old_operational_status = db_nsr["operational-status"]
5565 db_nsr_update["operational-status"] = "updating"
5566 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5567 nsr_deployed = db_nsr["_admin"].get("deployed")
5568
5569 if update_type == "CHANGE_VNFPKG":
5570
5571 # Get the input parameters given through update request
5572 vnf_instance_id = db_nslcmop["operationParams"][
5573 "changeVnfPackageData"
5574 ].get("vnfInstanceId")
5575
5576 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5577 "vnfdId"
5578 )
5579 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5580
5581 step = "Getting vnfr from database"
5582 db_vnfr = self.db.get_one(
5583 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5584 )
5585
5586 step = "Getting vnfds from database"
5587 # Latest VNFD
5588 latest_vnfd = self.db.get_one(
5589 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5590 )
5591 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5592
5593 # Current VNFD
5594 current_vnf_revision = db_vnfr.get("revision", 1)
5595 current_vnfd = self.db.get_one(
5596 "vnfds_revisions",
5597 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5598 fail_on_empty=False,
5599 )
5600 # Charm artifact paths will be filled up later
5601 (
5602 current_charm_artifact_path,
5603 target_charm_artifact_path,
5604 charm_artifact_paths,
5605 helm_artifacts,
5606 ) = ([], [], [], [])
5607
5608 step = "Checking if revision has changed in VNFD"
5609 if current_vnf_revision != latest_vnfd_revision:
5610
5611 change_type = "policy_updated"
5612
5613 # There is new revision of VNFD, update operation is required
5614 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5615 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5616
5617 step = "Removing the VNFD packages if they exist in the local path"
5618 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5619 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5620
5621 step = "Get the VNFD packages from FSMongo"
5622 self.fs.sync(from_path=latest_vnfd_path)
5623 self.fs.sync(from_path=current_vnfd_path)
5624
5625 step = (
5626 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5627 )
5628 current_base_folder = current_vnfd["_admin"]["storage"]
5629 latest_base_folder = latest_vnfd["_admin"]["storage"]
5630
5631 for vca_index, vca_deployed in enumerate(
5632 get_iterable(nsr_deployed, "VCA")
5633 ):
5634 vnf_index = db_vnfr.get("member-vnf-index-ref")
5635
5636 # Getting charm-id and charm-type
5637 if vca_deployed.get("member-vnf-index") == vnf_index:
5638 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5639 vca_type = vca_deployed.get("type")
5640 vdu_count_index = vca_deployed.get("vdu_count_index")
5641
5642 # Getting ee-id
5643 ee_id = vca_deployed.get("ee_id")
5644
5645 step = "Getting descriptor config"
5646 if current_vnfd.get("kdu"):
5647
5648 search_key = "kdu_name"
5649 else:
5650 search_key = "vnfd_id"
5651
5652 entity_id = vca_deployed.get(search_key)
5653
5654 descriptor_config = get_configuration(
5655 current_vnfd, entity_id
5656 )
5657
5658 if "execution-environment-list" in descriptor_config:
5659 ee_list = descriptor_config.get(
5660 "execution-environment-list", []
5661 )
5662 else:
5663 ee_list = []
5664
5665 # There could be several charm used in the same VNF
5666 for ee_item in ee_list:
5667 if ee_item.get("juju"):
5668
5669 step = "Getting charm name"
5670 charm_name = ee_item["juju"].get("charm")
5671
5672 step = "Setting Charm artifact paths"
5673 current_charm_artifact_path.append(
5674 get_charm_artifact_path(
5675 current_base_folder,
5676 charm_name,
5677 vca_type,
5678 current_vnf_revision,
5679 )
5680 )
5681 target_charm_artifact_path.append(
5682 get_charm_artifact_path(
5683 latest_base_folder,
5684 charm_name,
5685 vca_type,
5686 latest_vnfd_revision,
5687 )
5688 )
5689 elif ee_item.get("helm-chart"):
5690 # add chart to list and all parameters
5691 step = "Getting helm chart name"
5692 chart_name = ee_item.get("helm-chart")
5693 if (
5694 ee_item.get("helm-version")
5695 and ee_item.get("helm-version") == "v2"
5696 ):
5697 vca_type = "helm"
5698 else:
5699 vca_type = "helm-v3"
5700 step = "Setting Helm chart artifact paths"
5701
5702 helm_artifacts.append(
5703 {
5704 "current_artifact_path": get_charm_artifact_path(
5705 current_base_folder,
5706 chart_name,
5707 vca_type,
5708 current_vnf_revision,
5709 ),
5710 "target_artifact_path": get_charm_artifact_path(
5711 latest_base_folder,
5712 chart_name,
5713 vca_type,
5714 latest_vnfd_revision,
5715 ),
5716 "ee_id": ee_id,
5717 "vca_index": vca_index,
5718 "vdu_index": vdu_count_index,
5719 }
5720 )
5721
5722 charm_artifact_paths = zip(
5723 current_charm_artifact_path, target_charm_artifact_path
5724 )
5725
5726 step = "Checking if software version has changed in VNFD"
5727 if find_software_version(current_vnfd) != find_software_version(
5728 latest_vnfd
5729 ):
5730
5731 step = "Checking if existing VNF has charm"
5732 for current_charm_path, target_charm_path in list(
5733 charm_artifact_paths
5734 ):
5735 if current_charm_path:
5736 raise LcmException(
5737 "Software version change is not supported as VNF instance {} has charm.".format(
5738 vnf_instance_id
5739 )
5740 )
5741
5742 # There is no change in the charm package, then redeploy the VNF
5743 # based on new descriptor
5744 step = "Redeploying VNF"
5745 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5746 (result, detailed_status) = await self._ns_redeploy_vnf(
5747 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5748 )
5749 if result == "FAILED":
5750 nslcmop_operation_state = result
5751 error_description_nslcmop = detailed_status
5752 db_nslcmop_update["detailed-status"] = detailed_status
5753 self.logger.debug(
5754 logging_text
5755 + " step {} Done with result {} {}".format(
5756 step, nslcmop_operation_state, detailed_status
5757 )
5758 )
5759
5760 else:
5761 step = "Checking if any charm package has changed or not"
5762 for current_charm_path, target_charm_path in list(
5763 charm_artifact_paths
5764 ):
5765 if (
5766 current_charm_path
5767 and target_charm_path
5768 and self.check_charm_hash_changed(
5769 current_charm_path, target_charm_path
5770 )
5771 ):
5772
5773 step = "Checking whether VNF uses juju bundle"
5774 if check_juju_bundle_existence(current_vnfd):
5775
5776 raise LcmException(
5777 "Charm upgrade is not supported for the instance which"
5778 " uses juju-bundle: {}".format(
5779 check_juju_bundle_existence(current_vnfd)
5780 )
5781 )
5782
5783 step = "Upgrading Charm"
5784 (
5785 result,
5786 detailed_status,
5787 ) = await self._ns_charm_upgrade(
5788 ee_id=ee_id,
5789 charm_id=vca_id,
5790 charm_type=vca_type,
5791 path=self.fs.path + target_charm_path,
5792 timeout=timeout_seconds,
5793 )
5794
5795 if result == "FAILED":
5796 nslcmop_operation_state = result
5797 error_description_nslcmop = detailed_status
5798
5799 db_nslcmop_update["detailed-status"] = detailed_status
5800 self.logger.debug(
5801 logging_text
5802 + " step {} Done with result {} {}".format(
5803 step, nslcmop_operation_state, detailed_status
5804 )
5805 )
5806
5807 step = "Updating policies"
5808 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5809 result = "COMPLETED"
5810 detailed_status = "Done"
5811 db_nslcmop_update["detailed-status"] = "Done"
5812
5813 # helm base EE
5814 for item in helm_artifacts:
5815 if not (
5816 item["current_artifact_path"]
5817 and item["target_artifact_path"]
5818 and self.check_charm_hash_changed(
5819 item["current_artifact_path"],
5820 item["target_artifact_path"],
5821 )
5822 ):
5823 continue
5824 db_update_entry = "_admin.deployed.VCA.{}.".format(
5825 item["vca_index"]
5826 )
5827 vnfr_id = db_vnfr["_id"]
5828 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5829 db_dict = {
5830 "collection": "nsrs",
5831 "filter": {"_id": nsr_id},
5832 "path": db_update_entry,
5833 }
5834 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5835 await self.vca_map[vca_type].upgrade_execution_environment(
5836 namespace=namespace,
5837 helm_id=helm_id,
5838 db_dict=db_dict,
5839 config=osm_config,
5840 artifact_path=item["target_artifact_path"],
5841 vca_type=vca_type,
5842 )
5843 vnf_id = db_vnfr.get("vnfd-ref")
5844 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5845 self.logger.debug("get ssh key block")
5846 rw_mgmt_ip = None
5847 if deep_get(
5848 config_descriptor,
5849 ("config-access", "ssh-access", "required"),
5850 ):
5851 # Needed to inject a ssh key
5852 user = deep_get(
5853 config_descriptor,
5854 ("config-access", "ssh-access", "default-user"),
5855 )
5856 step = (
5857 "Install configuration Software, getting public ssh key"
5858 )
5859 pub_key = await self.vca_map[
5860 vca_type
5861 ].get_ee_ssh_public__key(
5862 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5863 )
5864
5865 step = (
5866 "Insert public key into VM user={} ssh_key={}".format(
5867 user, pub_key
5868 )
5869 )
5870 self.logger.debug(logging_text + step)
5871
5872 # wait for RO (ip-address) Insert pub_key into VM
5873 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
5874 logging_text,
5875 nsr_id,
5876 vnfr_id,
5877 None,
5878 item["vdu_index"],
5879 user=user,
5880 pub_key=pub_key,
5881 )
5882
5883 initial_config_primitive_list = config_descriptor.get(
5884 "initial-config-primitive"
5885 )
5886 config_primitive = next(
5887 (
5888 p
5889 for p in initial_config_primitive_list
5890 if p["name"] == "config"
5891 ),
5892 None,
5893 )
5894 if not config_primitive:
5895 continue
5896
5897 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5898 if rw_mgmt_ip:
5899 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
5900 if db_vnfr.get("additionalParamsForVnf"):
5901 deploy_params.update(
5902 parse_yaml_strings(
5903 db_vnfr["additionalParamsForVnf"].copy()
5904 )
5905 )
5906 primitive_params_ = self._map_primitive_params(
5907 config_primitive, {}, deploy_params
5908 )
5909
5910 step = "execute primitive '{}' params '{}'".format(
5911 config_primitive["name"], primitive_params_
5912 )
5913 self.logger.debug(logging_text + step)
5914 await self.vca_map[vca_type].exec_primitive(
5915 ee_id=ee_id,
5916 primitive_name=config_primitive["name"],
5917 params_dict=primitive_params_,
5918 db_dict=db_dict,
5919 vca_id=vca_id,
5920 vca_type=vca_type,
5921 )
5922
5923 step = "Updating policies"
5924 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5925 detailed_status = "Done"
5926 db_nslcmop_update["detailed-status"] = "Done"
5927
5928 # If nslcmop_operation_state is None, so any operation is not failed.
5929 if not nslcmop_operation_state:
5930 nslcmop_operation_state = "COMPLETED"
5931
5932 # If update CHANGE_VNFPKG nslcmop_operation is successful
5933 # vnf revision need to be updated
5934 vnfr_update["revision"] = latest_vnfd_revision
5935 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5936
5937 self.logger.debug(
5938 logging_text
5939 + " task Done with result {} {}".format(
5940 nslcmop_operation_state, detailed_status
5941 )
5942 )
5943 elif update_type == "REMOVE_VNF":
5944 # This part is included in https://osm.etsi.org/gerrit/11876
5945 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5946 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5947 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5948 step = "Removing VNF"
5949 (result, detailed_status) = await self.remove_vnf(
5950 nsr_id, nslcmop_id, vnf_instance_id
5951 )
5952 if result == "FAILED":
5953 nslcmop_operation_state = result
5954 error_description_nslcmop = detailed_status
5955 db_nslcmop_update["detailed-status"] = detailed_status
5956 change_type = "vnf_terminated"
5957 if not nslcmop_operation_state:
5958 nslcmop_operation_state = "COMPLETED"
5959 self.logger.debug(
5960 logging_text
5961 + " task Done with result {} {}".format(
5962 nslcmop_operation_state, detailed_status
5963 )
5964 )
5965
5966 elif update_type == "OPERATE_VNF":
5967 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
5968 "vnfInstanceId"
5969 ]
5970 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
5971 "changeStateTo"
5972 ]
5973 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
5974 "additionalParam"
5975 ]
5976 (result, detailed_status) = await self.rebuild_start_stop(
5977 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5978 )
5979 if result == "FAILED":
5980 nslcmop_operation_state = result
5981 error_description_nslcmop = detailed_status
5982 db_nslcmop_update["detailed-status"] = detailed_status
5983 if not nslcmop_operation_state:
5984 nslcmop_operation_state = "COMPLETED"
5985 self.logger.debug(
5986 logging_text
5987 + " task Done with result {} {}".format(
5988 nslcmop_operation_state, detailed_status
5989 )
5990 )
5991
5992 # If nslcmop_operation_state is None, so any operation is not failed.
5993 # All operations are executed in overall.
5994 if not nslcmop_operation_state:
5995 nslcmop_operation_state = "COMPLETED"
5996 db_nsr_update["operational-status"] = old_operational_status
5997
5998 except (DbException, LcmException, N2VCException, K8sException) as e:
5999 self.logger.error(logging_text + "Exit Exception {}".format(e))
6000 exc = e
6001 except asyncio.CancelledError:
6002 self.logger.error(
6003 logging_text + "Cancelled Exception while '{}'".format(step)
6004 )
6005 exc = "Operation was cancelled"
6006 except asyncio.TimeoutError:
6007 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6008 exc = "Timeout"
6009 except Exception as e:
6010 exc = traceback.format_exc()
6011 self.logger.critical(
6012 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6013 exc_info=True,
6014 )
6015 finally:
6016 if exc:
6017 db_nslcmop_update[
6018 "detailed-status"
6019 ] = (
6020 detailed_status
6021 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6022 nslcmop_operation_state = "FAILED"
6023 db_nsr_update["operational-status"] = old_operational_status
6024 if db_nsr:
6025 self._write_ns_status(
6026 nsr_id=nsr_id,
6027 ns_state=db_nsr["nsState"],
6028 current_operation="IDLE",
6029 current_operation_id=None,
6030 other_update=db_nsr_update,
6031 )
6032
6033 self._write_op_status(
6034 op_id=nslcmop_id,
6035 stage="",
6036 error_message=error_description_nslcmop,
6037 operation_state=nslcmop_operation_state,
6038 other_update=db_nslcmop_update,
6039 )
6040
6041 if nslcmop_operation_state:
6042 try:
6043 msg = {
6044 "nsr_id": nsr_id,
6045 "nslcmop_id": nslcmop_id,
6046 "operationState": nslcmop_operation_state,
6047 }
6048 if (
6049 change_type in ("vnf_terminated", "policy_updated")
6050 and member_vnf_index
6051 ):
6052 msg.update({"vnf_member_index": member_vnf_index})
6053 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6054 except Exception as e:
6055 self.logger.error(
6056 logging_text + "kafka_write notification Exception {}".format(e)
6057 )
6058 self.logger.debug(logging_text + "Exit")
6059 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6060 return nslcmop_operation_state, detailed_status
6061
6062 async def scale(self, nsr_id, nslcmop_id):
6063 # Try to lock HA task here
6064 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6065 if not task_is_locked_by_me:
6066 return
6067
6068 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6069 stage = ["", "", ""]
6070 tasks_dict_info = {}
6071 # ^ stage, step, VIM progress
6072 self.logger.debug(logging_text + "Enter")
6073 # get all needed from database
6074 db_nsr = None
6075 db_nslcmop_update = {}
6076 db_nsr_update = {}
6077 exc = None
6078 # in case of error, indicates what part of scale was failed to put nsr at error status
6079 scale_process = None
6080 old_operational_status = ""
6081 old_config_status = ""
6082 nsi_id = None
6083 try:
6084 # wait for any previous tasks in process
6085 step = "Waiting for previous operations to terminate"
6086 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6087 self._write_ns_status(
6088 nsr_id=nsr_id,
6089 ns_state=None,
6090 current_operation="SCALING",
6091 current_operation_id=nslcmop_id,
6092 )
6093
6094 step = "Getting nslcmop from database"
6095 self.logger.debug(
6096 step + " after having waited for previous tasks to be completed"
6097 )
6098 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6099
6100 step = "Getting nsr from database"
6101 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6102 old_operational_status = db_nsr["operational-status"]
6103 old_config_status = db_nsr["config-status"]
6104
6105 step = "Parsing scaling parameters"
6106 db_nsr_update["operational-status"] = "scaling"
6107 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6108 nsr_deployed = db_nsr["_admin"].get("deployed")
6109
6110 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6111 "scaleByStepData"
6112 ]["member-vnf-index"]
6113 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6114 "scaleByStepData"
6115 ]["scaling-group-descriptor"]
6116 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6117 # for backward compatibility
6118 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6119 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6120 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6122
6123 step = "Getting vnfr from database"
6124 db_vnfr = self.db.get_one(
6125 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6126 )
6127
6128 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6129
6130 step = "Getting vnfd from database"
6131 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6132
6133 base_folder = db_vnfd["_admin"]["storage"]
6134
6135 step = "Getting scaling-group-descriptor"
6136 scaling_descriptor = find_in_list(
6137 get_scaling_aspect(db_vnfd),
6138 lambda scale_desc: scale_desc["name"] == scaling_group,
6139 )
6140 if not scaling_descriptor:
6141 raise LcmException(
6142 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6143 "at vnfd:scaling-group-descriptor".format(scaling_group)
6144 )
6145
6146 step = "Sending scale order to VIM"
6147 # TODO check if ns is in a proper status
6148 nb_scale_op = 0
6149 if not db_nsr["_admin"].get("scaling-group"):
6150 self.update_db_2(
6151 "nsrs",
6152 nsr_id,
6153 {
6154 "_admin.scaling-group": [
6155 {"name": scaling_group, "nb-scale-op": 0}
6156 ]
6157 },
6158 )
6159 admin_scale_index = 0
6160 else:
6161 for admin_scale_index, admin_scale_info in enumerate(
6162 db_nsr["_admin"]["scaling-group"]
6163 ):
6164 if admin_scale_info["name"] == scaling_group:
6165 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6166 break
6167 else: # not found, set index one plus last element and add new entry with the name
6168 admin_scale_index += 1
6169 db_nsr_update[
6170 "_admin.scaling-group.{}.name".format(admin_scale_index)
6171 ] = scaling_group
6172
6173 vca_scaling_info = []
6174 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6175 if scaling_type == "SCALE_OUT":
6176 if "aspect-delta-details" not in scaling_descriptor:
6177 raise LcmException(
6178 "Aspect delta details not fount in scaling descriptor {}".format(
6179 scaling_descriptor["name"]
6180 )
6181 )
6182 # count if max-instance-count is reached
6183 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6184
6185 scaling_info["scaling_direction"] = "OUT"
6186 scaling_info["vdu-create"] = {}
6187 scaling_info["kdu-create"] = {}
6188 for delta in deltas:
6189 for vdu_delta in delta.get("vdu-delta", {}):
6190 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6191 # vdu_index also provides the number of instance of the targeted vdu
6192 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6193 cloud_init_text = self._get_vdu_cloud_init_content(
6194 vdud, db_vnfd
6195 )
6196 if cloud_init_text:
6197 additional_params = (
6198 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6199 or {}
6200 )
6201 cloud_init_list = []
6202
6203 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6204 max_instance_count = 10
6205 if vdu_profile and "max-number-of-instances" in vdu_profile:
6206 max_instance_count = vdu_profile.get(
6207 "max-number-of-instances", 10
6208 )
6209
6210 default_instance_num = get_number_of_instances(
6211 db_vnfd, vdud["id"]
6212 )
6213 instances_number = vdu_delta.get("number-of-instances", 1)
6214 nb_scale_op += instances_number
6215
6216 new_instance_count = nb_scale_op + default_instance_num
6217 # Control if new count is over max and vdu count is less than max.
6218 # Then assign new instance count
6219 if new_instance_count > max_instance_count > vdu_count:
6220 instances_number = new_instance_count - max_instance_count
6221 else:
6222 instances_number = instances_number
6223
6224 if new_instance_count > max_instance_count:
6225 raise LcmException(
6226 "reached the limit of {} (max-instance-count) "
6227 "scaling-out operations for the "
6228 "scaling-group-descriptor '{}'".format(
6229 nb_scale_op, scaling_group
6230 )
6231 )
6232 for x in range(vdu_delta.get("number-of-instances", 1)):
6233 if cloud_init_text:
6234 # TODO Information of its own ip is not available because db_vnfr is not updated.
6235 additional_params["OSM"] = get_osm_params(
6236 db_vnfr, vdu_delta["id"], vdu_index + x
6237 )
6238 cloud_init_list.append(
6239 self._parse_cloud_init(
6240 cloud_init_text,
6241 additional_params,
6242 db_vnfd["id"],
6243 vdud["id"],
6244 )
6245 )
6246 vca_scaling_info.append(
6247 {
6248 "osm_vdu_id": vdu_delta["id"],
6249 "member-vnf-index": vnf_index,
6250 "type": "create",
6251 "vdu_index": vdu_index + x,
6252 }
6253 )
6254 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6255 for kdu_delta in delta.get("kdu-resource-delta", {}):
6256 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6257 kdu_name = kdu_profile["kdu-name"]
6258 resource_name = kdu_profile.get("resource-name", "")
6259
6260 # Might have different kdus in the same delta
6261 # Should have list for each kdu
6262 if not scaling_info["kdu-create"].get(kdu_name, None):
6263 scaling_info["kdu-create"][kdu_name] = []
6264
6265 kdur = get_kdur(db_vnfr, kdu_name)
6266 if kdur.get("helm-chart"):
6267 k8s_cluster_type = "helm-chart-v3"
6268 self.logger.debug("kdur: {}".format(kdur))
6269 if (
6270 kdur.get("helm-version")
6271 and kdur.get("helm-version") == "v2"
6272 ):
6273 k8s_cluster_type = "helm-chart"
6274 elif kdur.get("juju-bundle"):
6275 k8s_cluster_type = "juju-bundle"
6276 else:
6277 raise LcmException(
6278 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6279 "juju-bundle. Maybe an old NBI version is running".format(
6280 db_vnfr["member-vnf-index-ref"], kdu_name
6281 )
6282 )
6283
6284 max_instance_count = 10
6285 if kdu_profile and "max-number-of-instances" in kdu_profile:
6286 max_instance_count = kdu_profile.get(
6287 "max-number-of-instances", 10
6288 )
6289
6290 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6291 deployed_kdu, _ = get_deployed_kdu(
6292 nsr_deployed, kdu_name, vnf_index
6293 )
6294 if deployed_kdu is None:
6295 raise LcmException(
6296 "KDU '{}' for vnf '{}' not deployed".format(
6297 kdu_name, vnf_index
6298 )
6299 )
6300 kdu_instance = deployed_kdu.get("kdu-instance")
6301 instance_num = await self.k8scluster_map[
6302 k8s_cluster_type
6303 ].get_scale_count(
6304 resource_name,
6305 kdu_instance,
6306 vca_id=vca_id,
6307 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6308 kdu_model=deployed_kdu.get("kdu-model"),
6309 )
6310 kdu_replica_count = instance_num + kdu_delta.get(
6311 "number-of-instances", 1
6312 )
6313
6314 # Control if new count is over max and instance_num is less than max.
6315 # Then assign max instance number to kdu replica count
6316 if kdu_replica_count > max_instance_count > instance_num:
6317 kdu_replica_count = max_instance_count
6318 if kdu_replica_count > max_instance_count:
6319 raise LcmException(
6320 "reached the limit of {} (max-instance-count) "
6321 "scaling-out operations for the "
6322 "scaling-group-descriptor '{}'".format(
6323 instance_num, scaling_group
6324 )
6325 )
6326
6327 for x in range(kdu_delta.get("number-of-instances", 1)):
6328 vca_scaling_info.append(
6329 {
6330 "osm_kdu_id": kdu_name,
6331 "member-vnf-index": vnf_index,
6332 "type": "create",
6333 "kdu_index": instance_num + x - 1,
6334 }
6335 )
6336 scaling_info["kdu-create"][kdu_name].append(
6337 {
6338 "member-vnf-index": vnf_index,
6339 "type": "create",
6340 "k8s-cluster-type": k8s_cluster_type,
6341 "resource-name": resource_name,
6342 "scale": kdu_replica_count,
6343 }
6344 )
6345 elif scaling_type == "SCALE_IN":
6346 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6347
6348 scaling_info["scaling_direction"] = "IN"
6349 scaling_info["vdu-delete"] = {}
6350 scaling_info["kdu-delete"] = {}
6351
6352 for delta in deltas:
6353 for vdu_delta in delta.get("vdu-delta", {}):
6354 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6355 min_instance_count = 0
6356 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6357 if vdu_profile and "min-number-of-instances" in vdu_profile:
6358 min_instance_count = vdu_profile["min-number-of-instances"]
6359
6360 default_instance_num = get_number_of_instances(
6361 db_vnfd, vdu_delta["id"]
6362 )
6363 instance_num = vdu_delta.get("number-of-instances", 1)
6364 nb_scale_op -= instance_num
6365
6366 new_instance_count = nb_scale_op + default_instance_num
6367
6368 if new_instance_count < min_instance_count < vdu_count:
6369 instances_number = min_instance_count - new_instance_count
6370 else:
6371 instances_number = instance_num
6372
6373 if new_instance_count < min_instance_count:
6374 raise LcmException(
6375 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6376 "scaling-group-descriptor '{}'".format(
6377 nb_scale_op, scaling_group
6378 )
6379 )
6380 for x in range(vdu_delta.get("number-of-instances", 1)):
6381 vca_scaling_info.append(
6382 {
6383 "osm_vdu_id": vdu_delta["id"],
6384 "member-vnf-index": vnf_index,
6385 "type": "delete",
6386 "vdu_index": vdu_index - 1 - x,
6387 }
6388 )
6389 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6390 for kdu_delta in delta.get("kdu-resource-delta", {}):
6391 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6392 kdu_name = kdu_profile["kdu-name"]
6393 resource_name = kdu_profile.get("resource-name", "")
6394
6395 if not scaling_info["kdu-delete"].get(kdu_name, None):
6396 scaling_info["kdu-delete"][kdu_name] = []
6397
6398 kdur = get_kdur(db_vnfr, kdu_name)
6399 if kdur.get("helm-chart"):
6400 k8s_cluster_type = "helm-chart-v3"
6401 self.logger.debug("kdur: {}".format(kdur))
6402 if (
6403 kdur.get("helm-version")
6404 and kdur.get("helm-version") == "v2"
6405 ):
6406 k8s_cluster_type = "helm-chart"
6407 elif kdur.get("juju-bundle"):
6408 k8s_cluster_type = "juju-bundle"
6409 else:
6410 raise LcmException(
6411 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6412 "juju-bundle. Maybe an old NBI version is running".format(
6413 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6414 )
6415 )
6416
6417 min_instance_count = 0
6418 if kdu_profile and "min-number-of-instances" in kdu_profile:
6419 min_instance_count = kdu_profile["min-number-of-instances"]
6420
6421 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6422 deployed_kdu, _ = get_deployed_kdu(
6423 nsr_deployed, kdu_name, vnf_index
6424 )
6425 if deployed_kdu is None:
6426 raise LcmException(
6427 "KDU '{}' for vnf '{}' not deployed".format(
6428 kdu_name, vnf_index
6429 )
6430 )
6431 kdu_instance = deployed_kdu.get("kdu-instance")
6432 instance_num = await self.k8scluster_map[
6433 k8s_cluster_type
6434 ].get_scale_count(
6435 resource_name,
6436 kdu_instance,
6437 vca_id=vca_id,
6438 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6439 kdu_model=deployed_kdu.get("kdu-model"),
6440 )
6441 kdu_replica_count = instance_num - kdu_delta.get(
6442 "number-of-instances", 1
6443 )
6444
6445 if kdu_replica_count < min_instance_count < instance_num:
6446 kdu_replica_count = min_instance_count
6447 if kdu_replica_count < min_instance_count:
6448 raise LcmException(
6449 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6450 "scaling-group-descriptor '{}'".format(
6451 instance_num, scaling_group
6452 )
6453 )
6454
6455 for x in range(kdu_delta.get("number-of-instances", 1)):
6456 vca_scaling_info.append(
6457 {
6458 "osm_kdu_id": kdu_name,
6459 "member-vnf-index": vnf_index,
6460 "type": "delete",
6461 "kdu_index": instance_num - x - 1,
6462 }
6463 )
6464 scaling_info["kdu-delete"][kdu_name].append(
6465 {
6466 "member-vnf-index": vnf_index,
6467 "type": "delete",
6468 "k8s-cluster-type": k8s_cluster_type,
6469 "resource-name": resource_name,
6470 "scale": kdu_replica_count,
6471 }
6472 )
6473
6474 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6475 vdu_delete = copy(scaling_info.get("vdu-delete"))
6476 if scaling_info["scaling_direction"] == "IN":
6477 for vdur in reversed(db_vnfr["vdur"]):
6478 if vdu_delete.get(vdur["vdu-id-ref"]):
6479 vdu_delete[vdur["vdu-id-ref"]] -= 1
6480 scaling_info["vdu"].append(
6481 {
6482 "name": vdur.get("name") or vdur.get("vdu-name"),
6483 "vdu_id": vdur["vdu-id-ref"],
6484 "interface": [],
6485 }
6486 )
6487 for interface in vdur["interfaces"]:
6488 scaling_info["vdu"][-1]["interface"].append(
6489 {
6490 "name": interface["name"],
6491 "ip_address": interface["ip-address"],
6492 "mac_address": interface.get("mac-address"),
6493 }
6494 )
6495 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6496
6497 # PRE-SCALE BEGIN
6498 step = "Executing pre-scale vnf-config-primitive"
6499 if scaling_descriptor.get("scaling-config-action"):
6500 for scaling_config_action in scaling_descriptor[
6501 "scaling-config-action"
6502 ]:
6503 if (
6504 scaling_config_action.get("trigger") == "pre-scale-in"
6505 and scaling_type == "SCALE_IN"
6506 ) or (
6507 scaling_config_action.get("trigger") == "pre-scale-out"
6508 and scaling_type == "SCALE_OUT"
6509 ):
6510 vnf_config_primitive = scaling_config_action[
6511 "vnf-config-primitive-name-ref"
6512 ]
6513 step = db_nslcmop_update[
6514 "detailed-status"
6515 ] = "executing pre-scale scaling-config-action '{}'".format(
6516 vnf_config_primitive
6517 )
6518
6519 # look for primitive
6520 for config_primitive in (
6521 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6522 ).get("config-primitive", ()):
6523 if config_primitive["name"] == vnf_config_primitive:
6524 break
6525 else:
6526 raise LcmException(
6527 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6528 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6529 "primitive".format(scaling_group, vnf_config_primitive)
6530 )
6531
6532 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6533 if db_vnfr.get("additionalParamsForVnf"):
6534 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6535
6536 scale_process = "VCA"
6537 db_nsr_update["config-status"] = "configuring pre-scaling"
6538 primitive_params = self._map_primitive_params(
6539 config_primitive, {}, vnfr_params
6540 )
6541
6542 # Pre-scale retry check: Check if this sub-operation has been executed before
6543 op_index = self._check_or_add_scale_suboperation(
6544 db_nslcmop,
6545 vnf_index,
6546 vnf_config_primitive,
6547 primitive_params,
6548 "PRE-SCALE",
6549 )
6550 if op_index == self.SUBOPERATION_STATUS_SKIP:
6551 # Skip sub-operation
6552 result = "COMPLETED"
6553 result_detail = "Done"
6554 self.logger.debug(
6555 logging_text
6556 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6557 vnf_config_primitive, result, result_detail
6558 )
6559 )
6560 else:
6561 if op_index == self.SUBOPERATION_STATUS_NEW:
6562 # New sub-operation: Get index of this sub-operation
6563 op_index = (
6564 len(db_nslcmop.get("_admin", {}).get("operations"))
6565 - 1
6566 )
6567 self.logger.debug(
6568 logging_text
6569 + "vnf_config_primitive={} New sub-operation".format(
6570 vnf_config_primitive
6571 )
6572 )
6573 else:
6574 # retry: Get registered params for this existing sub-operation
6575 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6576 op_index
6577 ]
6578 vnf_index = op.get("member_vnf_index")
6579 vnf_config_primitive = op.get("primitive")
6580 primitive_params = op.get("primitive_params")
6581 self.logger.debug(
6582 logging_text
6583 + "vnf_config_primitive={} Sub-operation retry".format(
6584 vnf_config_primitive
6585 )
6586 )
6587 # Execute the primitive, either with new (first-time) or registered (reintent) args
6588 ee_descriptor_id = config_primitive.get(
6589 "execution-environment-ref"
6590 )
6591 primitive_name = config_primitive.get(
6592 "execution-environment-primitive", vnf_config_primitive
6593 )
6594 ee_id, vca_type = self._look_for_deployed_vca(
6595 nsr_deployed["VCA"],
6596 member_vnf_index=vnf_index,
6597 vdu_id=None,
6598 vdu_count_index=None,
6599 ee_descriptor_id=ee_descriptor_id,
6600 )
6601 result, result_detail = await self._ns_execute_primitive(
6602 ee_id,
6603 primitive_name,
6604 primitive_params,
6605 vca_type=vca_type,
6606 vca_id=vca_id,
6607 )
6608 self.logger.debug(
6609 logging_text
6610 + "vnf_config_primitive={} Done with result {} {}".format(
6611 vnf_config_primitive, result, result_detail
6612 )
6613 )
6614 # Update operationState = COMPLETED | FAILED
6615 self._update_suboperation_status(
6616 db_nslcmop, op_index, result, result_detail
6617 )
6618
6619 if result == "FAILED":
6620 raise LcmException(result_detail)
6621 db_nsr_update["config-status"] = old_config_status
6622 scale_process = None
6623 # PRE-SCALE END
6624
6625 db_nsr_update[
6626 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6627 ] = nb_scale_op
6628 db_nsr_update[
6629 "_admin.scaling-group.{}.time".format(admin_scale_index)
6630 ] = time()
6631
6632 # SCALE-IN VCA - BEGIN
6633 if vca_scaling_info:
6634 step = db_nslcmop_update[
6635 "detailed-status"
6636 ] = "Deleting the execution environments"
6637 scale_process = "VCA"
6638 for vca_info in vca_scaling_info:
6639 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6640 member_vnf_index = str(vca_info["member-vnf-index"])
6641 self.logger.debug(
6642 logging_text + "vdu info: {}".format(vca_info)
6643 )
6644 if vca_info.get("osm_vdu_id"):
6645 vdu_id = vca_info["osm_vdu_id"]
6646 vdu_index = int(vca_info["vdu_index"])
6647 stage[
6648 1
6649 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6650 member_vnf_index, vdu_id, vdu_index
6651 )
6652 stage[2] = step = "Scaling in VCA"
6653 self._write_op_status(op_id=nslcmop_id, stage=stage)
6654 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6655 config_update = db_nsr["configurationStatus"]
6656 for vca_index, vca in enumerate(vca_update):
6657 if (
6658 (vca or vca.get("ee_id"))
6659 and vca["member-vnf-index"] == member_vnf_index
6660 and vca["vdu_count_index"] == vdu_index
6661 ):
6662 if vca.get("vdu_id"):
6663 config_descriptor = get_configuration(
6664 db_vnfd, vca.get("vdu_id")
6665 )
6666 elif vca.get("kdu_name"):
6667 config_descriptor = get_configuration(
6668 db_vnfd, vca.get("kdu_name")
6669 )
6670 else:
6671 config_descriptor = get_configuration(
6672 db_vnfd, db_vnfd["id"]
6673 )
6674 operation_params = (
6675 db_nslcmop.get("operationParams") or {}
6676 )
6677 exec_terminate_primitives = not operation_params.get(
6678 "skip_terminate_primitives"
6679 ) and vca.get("needed_terminate")
6680 task = asyncio.ensure_future(
6681 asyncio.wait_for(
6682 self.destroy_N2VC(
6683 logging_text,
6684 db_nslcmop,
6685 vca,
6686 config_descriptor,
6687 vca_index,
6688 destroy_ee=True,
6689 exec_primitives=exec_terminate_primitives,
6690 scaling_in=True,
6691 vca_id=vca_id,
6692 ),
6693 timeout=self.timeout.charm_delete,
6694 )
6695 )
6696 tasks_dict_info[task] = "Terminating VCA {}".format(
6697 vca.get("ee_id")
6698 )
6699 del vca_update[vca_index]
6700 del config_update[vca_index]
6701 # wait for pending tasks of terminate primitives
6702 if tasks_dict_info:
6703 self.logger.debug(
6704 logging_text
6705 + "Waiting for tasks {}".format(
6706 list(tasks_dict_info.keys())
6707 )
6708 )
6709 error_list = await self._wait_for_tasks(
6710 logging_text,
6711 tasks_dict_info,
6712 min(
6713 self.timeout.charm_delete, self.timeout.ns_terminate
6714 ),
6715 stage,
6716 nslcmop_id,
6717 )
6718 tasks_dict_info.clear()
6719 if error_list:
6720 raise LcmException("; ".join(error_list))
6721
6722 db_vca_and_config_update = {
6723 "_admin.deployed.VCA": vca_update,
6724 "configurationStatus": config_update,
6725 }
6726 self.update_db_2(
6727 "nsrs", db_nsr["_id"], db_vca_and_config_update
6728 )
6729 scale_process = None
6730 # SCALE-IN VCA - END
6731
6732 # SCALE RO - BEGIN
6733 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6734 scale_process = "RO"
6735 if self.ro_config.ng:
6736 await self._scale_ng_ro(
6737 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6738 )
6739 scaling_info.pop("vdu-create", None)
6740 scaling_info.pop("vdu-delete", None)
6741
6742 scale_process = None
6743 # SCALE RO - END
6744
6745 # SCALE KDU - BEGIN
6746 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6747 scale_process = "KDU"
6748 await self._scale_kdu(
6749 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6750 )
6751 scaling_info.pop("kdu-create", None)
6752 scaling_info.pop("kdu-delete", None)
6753
6754 scale_process = None
6755 # SCALE KDU - END
6756
6757 if db_nsr_update:
6758 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6759
6760 # SCALE-UP VCA - BEGIN
6761 if vca_scaling_info:
6762 step = db_nslcmop_update[
6763 "detailed-status"
6764 ] = "Creating new execution environments"
6765 scale_process = "VCA"
6766 for vca_info in vca_scaling_info:
6767 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6768 member_vnf_index = str(vca_info["member-vnf-index"])
6769 self.logger.debug(
6770 logging_text + "vdu info: {}".format(vca_info)
6771 )
6772 vnfd_id = db_vnfr["vnfd-ref"]
6773 if vca_info.get("osm_vdu_id"):
6774 vdu_index = int(vca_info["vdu_index"])
6775 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6776 if db_vnfr.get("additionalParamsForVnf"):
6777 deploy_params.update(
6778 parse_yaml_strings(
6779 db_vnfr["additionalParamsForVnf"].copy()
6780 )
6781 )
6782 descriptor_config = get_configuration(
6783 db_vnfd, db_vnfd["id"]
6784 )
6785 if descriptor_config:
6786 vdu_id = None
6787 vdu_name = None
6788 kdu_name = None
6789 self._deploy_n2vc(
6790 logging_text=logging_text
6791 + "member_vnf_index={} ".format(member_vnf_index),
6792 db_nsr=db_nsr,
6793 db_vnfr=db_vnfr,
6794 nslcmop_id=nslcmop_id,
6795 nsr_id=nsr_id,
6796 nsi_id=nsi_id,
6797 vnfd_id=vnfd_id,
6798 vdu_id=vdu_id,
6799 kdu_name=kdu_name,
6800 member_vnf_index=member_vnf_index,
6801 vdu_index=vdu_index,
6802 vdu_name=vdu_name,
6803 deploy_params=deploy_params,
6804 descriptor_config=descriptor_config,
6805 base_folder=base_folder,
6806 task_instantiation_info=tasks_dict_info,
6807 stage=stage,
6808 )
6809 vdu_id = vca_info["osm_vdu_id"]
6810 vdur = find_in_list(
6811 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6812 )
6813 descriptor_config = get_configuration(db_vnfd, vdu_id)
6814 if vdur.get("additionalParams"):
6815 deploy_params_vdu = parse_yaml_strings(
6816 vdur["additionalParams"]
6817 )
6818 else:
6819 deploy_params_vdu = deploy_params
6820 deploy_params_vdu["OSM"] = get_osm_params(
6821 db_vnfr, vdu_id, vdu_count_index=vdu_index
6822 )
6823 if descriptor_config:
6824 vdu_name = None
6825 kdu_name = None
6826 stage[
6827 1
6828 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6829 member_vnf_index, vdu_id, vdu_index
6830 )
6831 stage[2] = step = "Scaling out VCA"
6832 self._write_op_status(op_id=nslcmop_id, stage=stage)
6833 self._deploy_n2vc(
6834 logging_text=logging_text
6835 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6836 member_vnf_index, vdu_id, vdu_index
6837 ),
6838 db_nsr=db_nsr,
6839 db_vnfr=db_vnfr,
6840 nslcmop_id=nslcmop_id,
6841 nsr_id=nsr_id,
6842 nsi_id=nsi_id,
6843 vnfd_id=vnfd_id,
6844 vdu_id=vdu_id,
6845 kdu_name=kdu_name,
6846 member_vnf_index=member_vnf_index,
6847 vdu_index=vdu_index,
6848 vdu_name=vdu_name,
6849 deploy_params=deploy_params_vdu,
6850 descriptor_config=descriptor_config,
6851 base_folder=base_folder,
6852 task_instantiation_info=tasks_dict_info,
6853 stage=stage,
6854 )
6855 # SCALE-UP VCA - END
6856 scale_process = None
6857
6858 # POST-SCALE BEGIN
6859 # execute primitive service POST-SCALING
6860 step = "Executing post-scale vnf-config-primitive"
6861 if scaling_descriptor.get("scaling-config-action"):
6862 for scaling_config_action in scaling_descriptor[
6863 "scaling-config-action"
6864 ]:
6865 if (
6866 scaling_config_action.get("trigger") == "post-scale-in"
6867 and scaling_type == "SCALE_IN"
6868 ) or (
6869 scaling_config_action.get("trigger") == "post-scale-out"
6870 and scaling_type == "SCALE_OUT"
6871 ):
6872 vnf_config_primitive = scaling_config_action[
6873 "vnf-config-primitive-name-ref"
6874 ]
6875 step = db_nslcmop_update[
6876 "detailed-status"
6877 ] = "executing post-scale scaling-config-action '{}'".format(
6878 vnf_config_primitive
6879 )
6880
6881 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6882 if db_vnfr.get("additionalParamsForVnf"):
6883 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6884
6885 # look for primitive
6886 for config_primitive in (
6887 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6888 ).get("config-primitive", ()):
6889 if config_primitive["name"] == vnf_config_primitive:
6890 break
6891 else:
6892 raise LcmException(
6893 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6894 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6895 "config-primitive".format(
6896 scaling_group, vnf_config_primitive
6897 )
6898 )
6899 scale_process = "VCA"
6900 db_nsr_update["config-status"] = "configuring post-scaling"
6901 primitive_params = self._map_primitive_params(
6902 config_primitive, {}, vnfr_params
6903 )
6904
6905 # Post-scale retry check: Check if this sub-operation has been executed before
6906 op_index = self._check_or_add_scale_suboperation(
6907 db_nslcmop,
6908 vnf_index,
6909 vnf_config_primitive,
6910 primitive_params,
6911 "POST-SCALE",
6912 )
6913 if op_index == self.SUBOPERATION_STATUS_SKIP:
6914 # Skip sub-operation
6915 result = "COMPLETED"
6916 result_detail = "Done"
6917 self.logger.debug(
6918 logging_text
6919 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6920 vnf_config_primitive, result, result_detail
6921 )
6922 )
6923 else:
6924 if op_index == self.SUBOPERATION_STATUS_NEW:
6925 # New sub-operation: Get index of this sub-operation
6926 op_index = (
6927 len(db_nslcmop.get("_admin", {}).get("operations"))
6928 - 1
6929 )
6930 self.logger.debug(
6931 logging_text
6932 + "vnf_config_primitive={} New sub-operation".format(
6933 vnf_config_primitive
6934 )
6935 )
6936 else:
6937 # retry: Get registered params for this existing sub-operation
6938 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6939 op_index
6940 ]
6941 vnf_index = op.get("member_vnf_index")
6942 vnf_config_primitive = op.get("primitive")
6943 primitive_params = op.get("primitive_params")
6944 self.logger.debug(
6945 logging_text
6946 + "vnf_config_primitive={} Sub-operation retry".format(
6947 vnf_config_primitive
6948 )
6949 )
6950 # Execute the primitive, either with new (first-time) or registered (reintent) args
6951 ee_descriptor_id = config_primitive.get(
6952 "execution-environment-ref"
6953 )
6954 primitive_name = config_primitive.get(
6955 "execution-environment-primitive", vnf_config_primitive
6956 )
6957 ee_id, vca_type = self._look_for_deployed_vca(
6958 nsr_deployed["VCA"],
6959 member_vnf_index=vnf_index,
6960 vdu_id=None,
6961 vdu_count_index=None,
6962 ee_descriptor_id=ee_descriptor_id,
6963 )
6964 result, result_detail = await self._ns_execute_primitive(
6965 ee_id,
6966 primitive_name,
6967 primitive_params,
6968 vca_type=vca_type,
6969 vca_id=vca_id,
6970 )
6971 self.logger.debug(
6972 logging_text
6973 + "vnf_config_primitive={} Done with result {} {}".format(
6974 vnf_config_primitive, result, result_detail
6975 )
6976 )
6977 # Update operationState = COMPLETED | FAILED
6978 self._update_suboperation_status(
6979 db_nslcmop, op_index, result, result_detail
6980 )
6981
6982 if result == "FAILED":
6983 raise LcmException(result_detail)
6984 db_nsr_update["config-status"] = old_config_status
6985 scale_process = None
6986 # POST-SCALE END
6987
6988 db_nsr_update[
6989 "detailed-status"
6990 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6991 db_nsr_update["operational-status"] = (
6992 "running"
6993 if old_operational_status == "failed"
6994 else old_operational_status
6995 )
6996 db_nsr_update["config-status"] = old_config_status
6997 return
6998 except (
6999 ROclient.ROClientException,
7000 DbException,
7001 LcmException,
7002 NgRoException,
7003 ) as e:
7004 self.logger.error(logging_text + "Exit Exception {}".format(e))
7005 exc = e
7006 except asyncio.CancelledError:
7007 self.logger.error(
7008 logging_text + "Cancelled Exception while '{}'".format(step)
7009 )
7010 exc = "Operation was cancelled"
7011 except Exception as e:
7012 exc = traceback.format_exc()
7013 self.logger.critical(
7014 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7015 exc_info=True,
7016 )
7017 finally:
7018 self._write_ns_status(
7019 nsr_id=nsr_id,
7020 ns_state=None,
7021 current_operation="IDLE",
7022 current_operation_id=None,
7023 )
7024 if tasks_dict_info:
7025 stage[1] = "Waiting for instantiate pending tasks."
7026 self.logger.debug(logging_text + stage[1])
7027 exc = await self._wait_for_tasks(
7028 logging_text,
7029 tasks_dict_info,
7030 self.timeout.ns_deploy,
7031 stage,
7032 nslcmop_id,
7033 nsr_id=nsr_id,
7034 )
7035 if exc:
7036 db_nslcmop_update[
7037 "detailed-status"
7038 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7039 nslcmop_operation_state = "FAILED"
7040 if db_nsr:
7041 db_nsr_update["operational-status"] = old_operational_status
7042 db_nsr_update["config-status"] = old_config_status
7043 db_nsr_update["detailed-status"] = ""
7044 if scale_process:
7045 if "VCA" in scale_process:
7046 db_nsr_update["config-status"] = "failed"
7047 if "RO" in scale_process:
7048 db_nsr_update["operational-status"] = "failed"
7049 db_nsr_update[
7050 "detailed-status"
7051 ] = "FAILED scaling nslcmop={} {}: {}".format(
7052 nslcmop_id, step, exc
7053 )
7054 else:
7055 error_description_nslcmop = None
7056 nslcmop_operation_state = "COMPLETED"
7057 db_nslcmop_update["detailed-status"] = "Done"
7058
7059 self._write_op_status(
7060 op_id=nslcmop_id,
7061 stage="",
7062 error_message=error_description_nslcmop,
7063 operation_state=nslcmop_operation_state,
7064 other_update=db_nslcmop_update,
7065 )
7066 if db_nsr:
7067 self._write_ns_status(
7068 nsr_id=nsr_id,
7069 ns_state=None,
7070 current_operation="IDLE",
7071 current_operation_id=None,
7072 other_update=db_nsr_update,
7073 )
7074
7075 if nslcmop_operation_state:
7076 try:
7077 msg = {
7078 "nsr_id": nsr_id,
7079 "nslcmop_id": nslcmop_id,
7080 "operationState": nslcmop_operation_state,
7081 }
7082 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7083 except Exception as e:
7084 self.logger.error(
7085 logging_text + "kafka_write notification Exception {}".format(e)
7086 )
7087 self.logger.debug(logging_text + "Exit")
7088 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7089
7090 async def _scale_kdu(
7091 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7092 ):
7093 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7094 for kdu_name in _scaling_info:
7095 for kdu_scaling_info in _scaling_info[kdu_name]:
7096 deployed_kdu, index = get_deployed_kdu(
7097 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7098 )
7099 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7100 kdu_instance = deployed_kdu["kdu-instance"]
7101 kdu_model = deployed_kdu.get("kdu-model")
7102 scale = int(kdu_scaling_info["scale"])
7103 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7104
7105 db_dict = {
7106 "collection": "nsrs",
7107 "filter": {"_id": nsr_id},
7108 "path": "_admin.deployed.K8s.{}".format(index),
7109 }
7110
7111 step = "scaling application {}".format(
7112 kdu_scaling_info["resource-name"]
7113 )
7114 self.logger.debug(logging_text + step)
7115
7116 if kdu_scaling_info["type"] == "delete":
7117 kdu_config = get_configuration(db_vnfd, kdu_name)
7118 if (
7119 kdu_config
7120 and kdu_config.get("terminate-config-primitive")
7121 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7122 ):
7123 terminate_config_primitive_list = kdu_config.get(
7124 "terminate-config-primitive"
7125 )
7126 terminate_config_primitive_list.sort(
7127 key=lambda val: int(val["seq"])
7128 )
7129
7130 for (
7131 terminate_config_primitive
7132 ) in terminate_config_primitive_list:
7133 primitive_params_ = self._map_primitive_params(
7134 terminate_config_primitive, {}, {}
7135 )
7136 step = "execute terminate config primitive"
7137 self.logger.debug(logging_text + step)
7138 await asyncio.wait_for(
7139 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7140 cluster_uuid=cluster_uuid,
7141 kdu_instance=kdu_instance,
7142 primitive_name=terminate_config_primitive["name"],
7143 params=primitive_params_,
7144 db_dict=db_dict,
7145 total_timeout=self.timeout.primitive,
7146 vca_id=vca_id,
7147 ),
7148 timeout=self.timeout.primitive
7149 * self.timeout.primitive_outer_factor,
7150 )
7151
7152 await asyncio.wait_for(
7153 self.k8scluster_map[k8s_cluster_type].scale(
7154 kdu_instance=kdu_instance,
7155 scale=scale,
7156 resource_name=kdu_scaling_info["resource-name"],
7157 total_timeout=self.timeout.scale_on_error,
7158 vca_id=vca_id,
7159 cluster_uuid=cluster_uuid,
7160 kdu_model=kdu_model,
7161 atomic=True,
7162 db_dict=db_dict,
7163 ),
7164 timeout=self.timeout.scale_on_error
7165 * self.timeout.scale_on_error_outer_factor,
7166 )
7167
7168 if kdu_scaling_info["type"] == "create":
7169 kdu_config = get_configuration(db_vnfd, kdu_name)
7170 if (
7171 kdu_config
7172 and kdu_config.get("initial-config-primitive")
7173 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7174 ):
7175 initial_config_primitive_list = kdu_config.get(
7176 "initial-config-primitive"
7177 )
7178 initial_config_primitive_list.sort(
7179 key=lambda val: int(val["seq"])
7180 )
7181
7182 for initial_config_primitive in initial_config_primitive_list:
7183 primitive_params_ = self._map_primitive_params(
7184 initial_config_primitive, {}, {}
7185 )
7186 step = "execute initial config primitive"
7187 self.logger.debug(logging_text + step)
7188 await asyncio.wait_for(
7189 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7190 cluster_uuid=cluster_uuid,
7191 kdu_instance=kdu_instance,
7192 primitive_name=initial_config_primitive["name"],
7193 params=primitive_params_,
7194 db_dict=db_dict,
7195 vca_id=vca_id,
7196 ),
7197 timeout=600,
7198 )
7199
7200 async def _scale_ng_ro(
7201 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7202 ):
7203 nsr_id = db_nslcmop["nsInstanceId"]
7204 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7205 db_vnfrs = {}
7206
7207 # read from db: vnfd's for every vnf
7208 db_vnfds = []
7209
7210 # for each vnf in ns, read vnfd
7211 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7212 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7213 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7214 # if we haven't this vnfd, read it from db
7215 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7216 # read from db
7217 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7218 db_vnfds.append(vnfd)
7219 n2vc_key = self.n2vc.get_public_key()
7220 n2vc_key_list = [n2vc_key]
7221 self.scale_vnfr(
7222 db_vnfr,
7223 vdu_scaling_info.get("vdu-create"),
7224 vdu_scaling_info.get("vdu-delete"),
7225 mark_delete=True,
7226 )
7227 # db_vnfr has been updated, update db_vnfrs to use it
7228 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7229 await self._instantiate_ng_ro(
7230 logging_text,
7231 nsr_id,
7232 db_nsd,
7233 db_nsr,
7234 db_nslcmop,
7235 db_vnfrs,
7236 db_vnfds,
7237 n2vc_key_list,
7238 stage=stage,
7239 start_deploy=time(),
7240 timeout_ns_deploy=self.timeout.ns_deploy,
7241 )
7242 if vdu_scaling_info.get("vdu-delete"):
7243 self.scale_vnfr(
7244 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7245 )
7246
7247 async def extract_prometheus_scrape_jobs(
7248 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7249 ):
7250 # look if exist a file called 'prometheus*.j2' and
7251 artifact_content = self.fs.dir_ls(artifact_path)
7252 job_file = next(
7253 (
7254 f
7255 for f in artifact_content
7256 if f.startswith("prometheus") and f.endswith(".j2")
7257 ),
7258 None,
7259 )
7260 if not job_file:
7261 return
7262 with self.fs.file_open((artifact_path, job_file), "r") as f:
7263 job_data = f.read()
7264
7265 # TODO get_service
7266 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7267 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7268 host_port = "80"
7269 vnfr_id = vnfr_id.replace("-", "")
7270 variables = {
7271 "JOB_NAME": vnfr_id,
7272 "TARGET_IP": target_ip,
7273 "EXPORTER_POD_IP": host_name,
7274 "EXPORTER_POD_PORT": host_port,
7275 }
7276 job_list = parse_job(job_data, variables)
7277 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7278 for job in job_list:
7279 if (
7280 not isinstance(job.get("job_name"), str)
7281 or vnfr_id not in job["job_name"]
7282 ):
7283 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7284 job["nsr_id"] = nsr_id
7285 job["vnfr_id"] = vnfr_id
7286 return job_list
7287
7288 async def rebuild_start_stop(
7289 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7290 ):
7291 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7292 self.logger.info(logging_text + "Enter")
7293 stage = ["Preparing the environment", ""]
7294 # database nsrs record
7295 db_nsr_update = {}
7296 vdu_vim_name = None
7297 vim_vm_id = None
7298 # in case of error, indicates what part of scale was failed to put nsr at error status
7299 start_deploy = time()
7300 try:
7301 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7302 vim_account_id = db_vnfr.get("vim-account-id")
7303 vim_info_key = "vim:" + vim_account_id
7304 vdu_id = additional_param["vdu_id"]
7305 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7306 vdur = find_in_list(
7307 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7308 )
7309 if vdur:
7310 vdu_vim_name = vdur["name"]
7311 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7312 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7313 else:
7314 raise LcmException("Target vdu is not found")
7315 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7316 # wait for any previous tasks in process
7317 stage[1] = "Waiting for previous operations to terminate"
7318 self.logger.info(stage[1])
7319 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7320
7321 stage[1] = "Reading from database."
7322 self.logger.info(stage[1])
7323 self._write_ns_status(
7324 nsr_id=nsr_id,
7325 ns_state=None,
7326 current_operation=operation_type.upper(),
7327 current_operation_id=nslcmop_id,
7328 )
7329 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7330
7331 # read from db: ns
7332 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7333 db_nsr_update["operational-status"] = operation_type
7334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7335 # Payload for RO
7336 desc = {
7337 operation_type: {
7338 "vim_vm_id": vim_vm_id,
7339 "vnf_id": vnf_id,
7340 "vdu_index": additional_param["count-index"],
7341 "vdu_id": vdur["id"],
7342 "target_vim": target_vim,
7343 "vim_account_id": vim_account_id,
7344 }
7345 }
7346 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7347 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7348 self.logger.info("ro nsr id: {}".format(nsr_id))
7349 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7350 self.logger.info("response from RO: {}".format(result_dict))
7351 action_id = result_dict["action_id"]
7352 await self._wait_ng_ro(
7353 nsr_id,
7354 action_id,
7355 nslcmop_id,
7356 start_deploy,
7357 self.timeout.operate,
7358 None,
7359 "start_stop_rebuild",
7360 )
7361 return "COMPLETED", "Done"
7362 except (ROclient.ROClientException, DbException, LcmException) as e:
7363 self.logger.error("Exit Exception {}".format(e))
7364 exc = e
7365 except asyncio.CancelledError:
7366 self.logger.error("Cancelled Exception while '{}'".format(stage))
7367 exc = "Operation was cancelled"
7368 except Exception as e:
7369 exc = traceback.format_exc()
7370 self.logger.critical(
7371 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7372 )
7373 return "FAILED", "Error in operate VNF {}".format(exc)
7374
7375 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7376 """
7377 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7378
7379 :param: vim_account_id: VIM Account ID
7380
7381 :return: (cloud_name, cloud_credential)
7382 """
7383 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7384 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7385
7386 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7387 """
7388 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7389
7390 :param: vim_account_id: VIM Account ID
7391
7392 :return: (cloud_name, cloud_credential)
7393 """
7394 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7395 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7396
7397 async def migrate(self, nsr_id, nslcmop_id):
7398 """
7399 Migrate VNFs and VDUs instances in a NS
7400
7401 :param: nsr_id: NS Instance ID
7402 :param: nslcmop_id: nslcmop ID of migrate
7403
7404 """
7405 # Try to lock HA task here
7406 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7407 if not task_is_locked_by_me:
7408 return
7409 logging_text = "Task ns={} migrate ".format(nsr_id)
7410 self.logger.debug(logging_text + "Enter")
7411 # get all needed from database
7412 db_nslcmop = None
7413 db_nslcmop_update = {}
7414 nslcmop_operation_state = None
7415 db_nsr_update = {}
7416 target = {}
7417 exc = None
7418 # in case of error, indicates what part of scale was failed to put nsr at error status
7419 start_deploy = time()
7420
7421 try:
7422 # wait for any previous tasks in process
7423 step = "Waiting for previous operations to terminate"
7424 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7425
7426 self._write_ns_status(
7427 nsr_id=nsr_id,
7428 ns_state=None,
7429 current_operation="MIGRATING",
7430 current_operation_id=nslcmop_id,
7431 )
7432 step = "Getting nslcmop from database"
7433 self.logger.debug(
7434 step + " after having waited for previous tasks to be completed"
7435 )
7436 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7437 migrate_params = db_nslcmop.get("operationParams")
7438
7439 target = {}
7440 target.update(migrate_params)
7441 desc = await self.RO.migrate(nsr_id, target)
7442 self.logger.debug("RO return > {}".format(desc))
7443 action_id = desc["action_id"]
7444 await self._wait_ng_ro(
7445 nsr_id,
7446 action_id,
7447 nslcmop_id,
7448 start_deploy,
7449 self.timeout.migrate,
7450 operation="migrate",
7451 )
7452 except (ROclient.ROClientException, DbException, LcmException) as e:
7453 self.logger.error("Exit Exception {}".format(e))
7454 exc = e
7455 except asyncio.CancelledError:
7456 self.logger.error("Cancelled Exception while '{}'".format(step))
7457 exc = "Operation was cancelled"
7458 except Exception as e:
7459 exc = traceback.format_exc()
7460 self.logger.critical(
7461 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7462 )
7463 finally:
7464 self._write_ns_status(
7465 nsr_id=nsr_id,
7466 ns_state=None,
7467 current_operation="IDLE",
7468 current_operation_id=None,
7469 )
7470 if exc:
7471 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7472 nslcmop_operation_state = "FAILED"
7473 else:
7474 nslcmop_operation_state = "COMPLETED"
7475 db_nslcmop_update["detailed-status"] = "Done"
7476 db_nsr_update["detailed-status"] = "Done"
7477
7478 self._write_op_status(
7479 op_id=nslcmop_id,
7480 stage="",
7481 error_message="",
7482 operation_state=nslcmop_operation_state,
7483 other_update=db_nslcmop_update,
7484 )
7485 if nslcmop_operation_state:
7486 try:
7487 msg = {
7488 "nsr_id": nsr_id,
7489 "nslcmop_id": nslcmop_id,
7490 "operationState": nslcmop_operation_state,
7491 }
7492 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7493 except Exception as e:
7494 self.logger.error(
7495 logging_text + "kafka_write notification Exception {}".format(e)
7496 )
7497 self.logger.debug(logging_text + "Exit")
7498 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7499
7500 async def heal(self, nsr_id, nslcmop_id):
7501 """
7502 Heal NS
7503
7504 :param nsr_id: ns instance to heal
7505 :param nslcmop_id: operation to run
7506 :return:
7507 """
7508
7509 # Try to lock HA task here
7510 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7511 if not task_is_locked_by_me:
7512 return
7513
7514 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7515 stage = ["", "", ""]
7516 tasks_dict_info = {}
7517 # ^ stage, step, VIM progress
7518 self.logger.debug(logging_text + "Enter")
7519 # get all needed from database
7520 db_nsr = None
7521 db_nslcmop_update = {}
7522 db_nsr_update = {}
7523 db_vnfrs = {} # vnf's info indexed by _id
7524 exc = None
7525 old_operational_status = ""
7526 old_config_status = ""
7527 nsi_id = None
7528 try:
7529 # wait for any previous tasks in process
7530 step = "Waiting for previous operations to terminate"
7531 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7532 self._write_ns_status(
7533 nsr_id=nsr_id,
7534 ns_state=None,
7535 current_operation="HEALING",
7536 current_operation_id=nslcmop_id,
7537 )
7538
7539 step = "Getting nslcmop from database"
7540 self.logger.debug(
7541 step + " after having waited for previous tasks to be completed"
7542 )
7543 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7544
7545 step = "Getting nsr from database"
7546 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7547 old_operational_status = db_nsr["operational-status"]
7548 old_config_status = db_nsr["config-status"]
7549
7550 db_nsr_update = {
7551 "_admin.deployed.RO.operational-status": "healing",
7552 }
7553 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7554
7555 step = "Sending heal order to VIM"
7556 await self.heal_RO(
7557 logging_text=logging_text,
7558 nsr_id=nsr_id,
7559 db_nslcmop=db_nslcmop,
7560 stage=stage,
7561 )
7562 # VCA tasks
7563 # read from db: nsd
7564 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7565 self.logger.debug(logging_text + stage[1])
7566 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7567 self.fs.sync(db_nsr["nsd-id"])
7568 db_nsr["nsd"] = nsd
7569 # read from db: vnfr's of this ns
7570 step = "Getting vnfrs from db"
7571 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7572 for vnfr in db_vnfrs_list:
7573 db_vnfrs[vnfr["_id"]] = vnfr
7574 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7575
7576 # Check for each target VNF
7577 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7578 for target_vnf in target_list:
7579 # Find this VNF in the list from DB
7580 vnfr_id = target_vnf.get("vnfInstanceId", None)
7581 if vnfr_id:
7582 db_vnfr = db_vnfrs[vnfr_id]
7583 vnfd_id = db_vnfr.get("vnfd-id")
7584 vnfd_ref = db_vnfr.get("vnfd-ref")
7585 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7586 base_folder = vnfd["_admin"]["storage"]
7587 vdu_id = None
7588 vdu_index = 0
7589 vdu_name = None
7590 kdu_name = None
7591 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7592 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7593
7594 # Check each target VDU and deploy N2VC
7595 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7596 "vdu", []
7597 )
7598 if not target_vdu_list:
7599 # Codigo nuevo para crear diccionario
7600 target_vdu_list = []
7601 for existing_vdu in db_vnfr.get("vdur"):
7602 vdu_name = existing_vdu.get("vdu-name", None)
7603 vdu_index = existing_vdu.get("count-index", 0)
7604 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7605 "run-day1", False
7606 )
7607 vdu_to_be_healed = {
7608 "vdu-id": vdu_name,
7609 "count-index": vdu_index,
7610 "run-day1": vdu_run_day1,
7611 }
7612 target_vdu_list.append(vdu_to_be_healed)
7613 for target_vdu in target_vdu_list:
7614 deploy_params_vdu = target_vdu
7615 # Set run-day1 vnf level value if not vdu level value exists
7616 if not deploy_params_vdu.get("run-day1") and target_vnf[
7617 "additionalParams"
7618 ].get("run-day1"):
7619 deploy_params_vdu["run-day1"] = target_vnf[
7620 "additionalParams"
7621 ].get("run-day1")
7622 vdu_name = target_vdu.get("vdu-id", None)
7623 # TODO: Get vdu_id from vdud.
7624 vdu_id = vdu_name
7625 # For multi instance VDU count-index is mandatory
7626 # For single session VDU count-indes is 0
7627 vdu_index = target_vdu.get("count-index", 0)
7628
7629 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7630 stage[1] = "Deploying Execution Environments."
7631 self.logger.debug(logging_text + stage[1])
7632
7633 # VNF Level charm. Normal case when proxy charms.
7634 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7635 descriptor_config = get_configuration(vnfd, vnfd_ref)
7636 if descriptor_config:
7637 # Continue if healed machine is management machine
7638 vnf_ip_address = db_vnfr.get("ip-address")
7639 target_instance = None
7640 for instance in db_vnfr.get("vdur", None):
7641 if (
7642 instance["vdu-name"] == vdu_name
7643 and instance["count-index"] == vdu_index
7644 ):
7645 target_instance = instance
7646 break
7647 if vnf_ip_address == target_instance.get("ip-address"):
7648 self._heal_n2vc(
7649 logging_text=logging_text
7650 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7651 member_vnf_index, vdu_name, vdu_index
7652 ),
7653 db_nsr=db_nsr,
7654 db_vnfr=db_vnfr,
7655 nslcmop_id=nslcmop_id,
7656 nsr_id=nsr_id,
7657 nsi_id=nsi_id,
7658 vnfd_id=vnfd_ref,
7659 vdu_id=None,
7660 kdu_name=None,
7661 member_vnf_index=member_vnf_index,
7662 vdu_index=0,
7663 vdu_name=None,
7664 deploy_params=deploy_params_vdu,
7665 descriptor_config=descriptor_config,
7666 base_folder=base_folder,
7667 task_instantiation_info=tasks_dict_info,
7668 stage=stage,
7669 )
7670
7671 # VDU Level charm. Normal case with native charms.
7672 descriptor_config = get_configuration(vnfd, vdu_name)
7673 if descriptor_config:
7674 self._heal_n2vc(
7675 logging_text=logging_text
7676 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7677 member_vnf_index, vdu_name, vdu_index
7678 ),
7679 db_nsr=db_nsr,
7680 db_vnfr=db_vnfr,
7681 nslcmop_id=nslcmop_id,
7682 nsr_id=nsr_id,
7683 nsi_id=nsi_id,
7684 vnfd_id=vnfd_ref,
7685 vdu_id=vdu_id,
7686 kdu_name=kdu_name,
7687 member_vnf_index=member_vnf_index,
7688 vdu_index=vdu_index,
7689 vdu_name=vdu_name,
7690 deploy_params=deploy_params_vdu,
7691 descriptor_config=descriptor_config,
7692 base_folder=base_folder,
7693 task_instantiation_info=tasks_dict_info,
7694 stage=stage,
7695 )
7696
7697 except (
7698 ROclient.ROClientException,
7699 DbException,
7700 LcmException,
7701 NgRoException,
7702 ) as e:
7703 self.logger.error(logging_text + "Exit Exception {}".format(e))
7704 exc = e
7705 except asyncio.CancelledError:
7706 self.logger.error(
7707 logging_text + "Cancelled Exception while '{}'".format(step)
7708 )
7709 exc = "Operation was cancelled"
7710 except Exception as e:
7711 exc = traceback.format_exc()
7712 self.logger.critical(
7713 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7714 exc_info=True,
7715 )
7716 finally:
7717 if tasks_dict_info:
7718 stage[1] = "Waiting for healing pending tasks."
7719 self.logger.debug(logging_text + stage[1])
7720 exc = await self._wait_for_tasks(
7721 logging_text,
7722 tasks_dict_info,
7723 self.timeout.ns_deploy,
7724 stage,
7725 nslcmop_id,
7726 nsr_id=nsr_id,
7727 )
7728 if exc:
7729 db_nslcmop_update[
7730 "detailed-status"
7731 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7732 nslcmop_operation_state = "FAILED"
7733 if db_nsr:
7734 db_nsr_update["operational-status"] = old_operational_status
7735 db_nsr_update["config-status"] = old_config_status
7736 db_nsr_update[
7737 "detailed-status"
7738 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7739 for task, task_name in tasks_dict_info.items():
7740 if not task.done() or task.cancelled() or task.exception():
7741 if task_name.startswith(self.task_name_deploy_vca):
7742 # A N2VC task is pending
7743 db_nsr_update["config-status"] = "failed"
7744 else:
7745 # RO task is pending
7746 db_nsr_update["operational-status"] = "failed"
7747 else:
7748 error_description_nslcmop = None
7749 nslcmop_operation_state = "COMPLETED"
7750 db_nslcmop_update["detailed-status"] = "Done"
7751 db_nsr_update["detailed-status"] = "Done"
7752 db_nsr_update["operational-status"] = "running"
7753 db_nsr_update["config-status"] = "configured"
7754
7755 self._write_op_status(
7756 op_id=nslcmop_id,
7757 stage="",
7758 error_message=error_description_nslcmop,
7759 operation_state=nslcmop_operation_state,
7760 other_update=db_nslcmop_update,
7761 )
7762 if db_nsr:
7763 self._write_ns_status(
7764 nsr_id=nsr_id,
7765 ns_state=None,
7766 current_operation="IDLE",
7767 current_operation_id=None,
7768 other_update=db_nsr_update,
7769 )
7770
7771 if nslcmop_operation_state:
7772 try:
7773 msg = {
7774 "nsr_id": nsr_id,
7775 "nslcmop_id": nslcmop_id,
7776 "operationState": nslcmop_operation_state,
7777 }
7778 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7779 except Exception as e:
7780 self.logger.error(
7781 logging_text + "kafka_write notification Exception {}".format(e)
7782 )
7783 self.logger.debug(logging_text + "Exit")
7784 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7785
7786 async def heal_RO(
7787 self,
7788 logging_text,
7789 nsr_id,
7790 db_nslcmop,
7791 stage,
7792 ):
7793 """
7794 Heal at RO
7795 :param logging_text: preffix text to use at logging
7796 :param nsr_id: nsr identity
7797 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7798 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7799 :return: None or exception
7800 """
7801
7802 def get_vim_account(vim_account_id):
7803 nonlocal db_vims
7804 if vim_account_id in db_vims:
7805 return db_vims[vim_account_id]
7806 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7807 db_vims[vim_account_id] = db_vim
7808 return db_vim
7809
7810 try:
7811 start_heal = time()
7812 ns_params = db_nslcmop.get("operationParams")
7813 if ns_params and ns_params.get("timeout_ns_heal"):
7814 timeout_ns_heal = ns_params["timeout_ns_heal"]
7815 else:
7816 timeout_ns_heal = self.timeout.ns_heal
7817
7818 db_vims = {}
7819
7820 nslcmop_id = db_nslcmop["_id"]
7821 target = {
7822 "action_id": nslcmop_id,
7823 }
7824 self.logger.warning(
7825 "db_nslcmop={} and timeout_ns_heal={}".format(
7826 db_nslcmop, timeout_ns_heal
7827 )
7828 )
7829 target.update(db_nslcmop.get("operationParams", {}))
7830
7831 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7832 desc = await self.RO.recreate(nsr_id, target)
7833 self.logger.debug("RO return > {}".format(desc))
7834 action_id = desc["action_id"]
7835 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7836 await self._wait_ng_ro(
7837 nsr_id,
7838 action_id,
7839 nslcmop_id,
7840 start_heal,
7841 timeout_ns_heal,
7842 stage,
7843 operation="healing",
7844 )
7845
7846 # Updating NSR
7847 db_nsr_update = {
7848 "_admin.deployed.RO.operational-status": "running",
7849 "detailed-status": " ".join(stage),
7850 }
7851 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7852 self._write_op_status(nslcmop_id, stage)
7853 self.logger.debug(
7854 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7855 )
7856
7857 except Exception as e:
7858 stage[2] = "ERROR healing at VIM"
7859 # self.set_vnfr_at_error(db_vnfrs, str(e))
7860 self.logger.error(
7861 "Error healing at VIM {}".format(e),
7862 exc_info=not isinstance(
7863 e,
7864 (
7865 ROclient.ROClientException,
7866 LcmException,
7867 DbException,
7868 NgRoException,
7869 ),
7870 ),
7871 )
7872 raise
7873
7874 def _heal_n2vc(
7875 self,
7876 logging_text,
7877 db_nsr,
7878 db_vnfr,
7879 nslcmop_id,
7880 nsr_id,
7881 nsi_id,
7882 vnfd_id,
7883 vdu_id,
7884 kdu_name,
7885 member_vnf_index,
7886 vdu_index,
7887 vdu_name,
7888 deploy_params,
7889 descriptor_config,
7890 base_folder,
7891 task_instantiation_info,
7892 stage,
7893 ):
7894 # launch instantiate_N2VC in a asyncio task and register task object
7895 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7896 # if not found, create one entry and update database
7897 # fill db_nsr._admin.deployed.VCA.<index>
7898
7899 self.logger.debug(
7900 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7901 )
7902
7903 charm_name = ""
7904 get_charm_name = False
7905 if "execution-environment-list" in descriptor_config:
7906 ee_list = descriptor_config.get("execution-environment-list", [])
7907 elif "juju" in descriptor_config:
7908 ee_list = [descriptor_config] # ns charms
7909 if "execution-environment-list" not in descriptor_config:
7910 # charm name is only required for ns charms
7911 get_charm_name = True
7912 else: # other types as script are not supported
7913 ee_list = []
7914
7915 for ee_item in ee_list:
7916 self.logger.debug(
7917 logging_text
7918 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7919 ee_item.get("juju"), ee_item.get("helm-chart")
7920 )
7921 )
7922 ee_descriptor_id = ee_item.get("id")
7923 if ee_item.get("juju"):
7924 vca_name = ee_item["juju"].get("charm")
7925 if get_charm_name:
7926 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7927 vca_type = (
7928 "lxc_proxy_charm"
7929 if ee_item["juju"].get("charm") is not None
7930 else "native_charm"
7931 )
7932 if ee_item["juju"].get("cloud") == "k8s":
7933 vca_type = "k8s_proxy_charm"
7934 elif ee_item["juju"].get("proxy") is False:
7935 vca_type = "native_charm"
7936 elif ee_item.get("helm-chart"):
7937 vca_name = ee_item["helm-chart"]
7938 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7939 vca_type = "helm"
7940 else:
7941 vca_type = "helm-v3"
7942 else:
7943 self.logger.debug(
7944 logging_text + "skipping non juju neither charm configuration"
7945 )
7946 continue
7947
7948 vca_index = -1
7949 for vca_index, vca_deployed in enumerate(
7950 db_nsr["_admin"]["deployed"]["VCA"]
7951 ):
7952 if not vca_deployed:
7953 continue
7954 if (
7955 vca_deployed.get("member-vnf-index") == member_vnf_index
7956 and vca_deployed.get("vdu_id") == vdu_id
7957 and vca_deployed.get("kdu_name") == kdu_name
7958 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7959 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7960 ):
7961 break
7962 else:
7963 # not found, create one.
7964 target = (
7965 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7966 )
7967 if vdu_id:
7968 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7969 elif kdu_name:
7970 target += "/kdu/{}".format(kdu_name)
7971 vca_deployed = {
7972 "target_element": target,
7973 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7974 "member-vnf-index": member_vnf_index,
7975 "vdu_id": vdu_id,
7976 "kdu_name": kdu_name,
7977 "vdu_count_index": vdu_index,
7978 "operational-status": "init", # TODO revise
7979 "detailed-status": "", # TODO revise
7980 "step": "initial-deploy", # TODO revise
7981 "vnfd_id": vnfd_id,
7982 "vdu_name": vdu_name,
7983 "type": vca_type,
7984 "ee_descriptor_id": ee_descriptor_id,
7985 "charm_name": charm_name,
7986 }
7987 vca_index += 1
7988
7989 # create VCA and configurationStatus in db
7990 db_dict = {
7991 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7992 "configurationStatus.{}".format(vca_index): dict(),
7993 }
7994 self.update_db_2("nsrs", nsr_id, db_dict)
7995
7996 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7997
7998 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7999 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8000 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8001
8002 # Launch task
8003 task_n2vc = asyncio.ensure_future(
8004 self.heal_N2VC(
8005 logging_text=logging_text,
8006 vca_index=vca_index,
8007 nsi_id=nsi_id,
8008 db_nsr=db_nsr,
8009 db_vnfr=db_vnfr,
8010 vdu_id=vdu_id,
8011 kdu_name=kdu_name,
8012 vdu_index=vdu_index,
8013 deploy_params=deploy_params,
8014 config_descriptor=descriptor_config,
8015 base_folder=base_folder,
8016 nslcmop_id=nslcmop_id,
8017 stage=stage,
8018 vca_type=vca_type,
8019 vca_name=vca_name,
8020 ee_config_descriptor=ee_item,
8021 )
8022 )
8023 self.lcm_tasks.register(
8024 "ns",
8025 nsr_id,
8026 nslcmop_id,
8027 "instantiate_N2VC-{}".format(vca_index),
8028 task_n2vc,
8029 )
8030 task_instantiation_info[
8031 task_n2vc
8032 ] = self.task_name_deploy_vca + " {}.{}".format(
8033 member_vnf_index or "", vdu_id or ""
8034 )
8035
8036 async def heal_N2VC(
8037 self,
8038 logging_text,
8039 vca_index,
8040 nsi_id,
8041 db_nsr,
8042 db_vnfr,
8043 vdu_id,
8044 kdu_name,
8045 vdu_index,
8046 config_descriptor,
8047 deploy_params,
8048 base_folder,
8049 nslcmop_id,
8050 stage,
8051 vca_type,
8052 vca_name,
8053 ee_config_descriptor,
8054 ):
8055 nsr_id = db_nsr["_id"]
8056 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8057 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8058 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8059 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8060 db_dict = {
8061 "collection": "nsrs",
8062 "filter": {"_id": nsr_id},
8063 "path": db_update_entry,
8064 }
8065 step = ""
8066 try:
8067
8068 element_type = "NS"
8069 element_under_configuration = nsr_id
8070
8071 vnfr_id = None
8072 if db_vnfr:
8073 vnfr_id = db_vnfr["_id"]
8074 osm_config["osm"]["vnf_id"] = vnfr_id
8075
8076 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8077
8078 if vca_type == "native_charm":
8079 index_number = 0
8080 else:
8081 index_number = vdu_index or 0
8082
8083 if vnfr_id:
8084 element_type = "VNF"
8085 element_under_configuration = vnfr_id
8086 namespace += ".{}-{}".format(vnfr_id, index_number)
8087 if vdu_id:
8088 namespace += ".{}-{}".format(vdu_id, index_number)
8089 element_type = "VDU"
8090 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8091 osm_config["osm"]["vdu_id"] = vdu_id
8092 elif kdu_name:
8093 namespace += ".{}".format(kdu_name)
8094 element_type = "KDU"
8095 element_under_configuration = kdu_name
8096 osm_config["osm"]["kdu_name"] = kdu_name
8097
8098 # Get artifact path
8099 if base_folder["pkg-dir"]:
8100 artifact_path = "{}/{}/{}/{}".format(
8101 base_folder["folder"],
8102 base_folder["pkg-dir"],
8103 "charms"
8104 if vca_type
8105 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8106 else "helm-charts",
8107 vca_name,
8108 )
8109 else:
8110 artifact_path = "{}/Scripts/{}/{}/".format(
8111 base_folder["folder"],
8112 "charms"
8113 if vca_type
8114 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8115 else "helm-charts",
8116 vca_name,
8117 )
8118
8119 self.logger.debug("Artifact path > {}".format(artifact_path))
8120
8121 # get initial_config_primitive_list that applies to this element
8122 initial_config_primitive_list = config_descriptor.get(
8123 "initial-config-primitive"
8124 )
8125
8126 self.logger.debug(
8127 "Initial config primitive list > {}".format(
8128 initial_config_primitive_list
8129 )
8130 )
8131
8132 # add config if not present for NS charm
8133 ee_descriptor_id = ee_config_descriptor.get("id")
8134 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8135 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8136 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8137 )
8138
8139 self.logger.debug(
8140 "Initial config primitive list #2 > {}".format(
8141 initial_config_primitive_list
8142 )
8143 )
8144 # n2vc_redesign STEP 3.1
8145 # find old ee_id if exists
8146 ee_id = vca_deployed.get("ee_id")
8147
8148 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8149 # create or register execution environment in VCA. Only for native charms when healing
8150 if vca_type == "native_charm":
8151 step = "Waiting to VM being up and getting IP address"
8152 self.logger.debug(logging_text + step)
8153 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8154 logging_text,
8155 nsr_id,
8156 vnfr_id,
8157 vdu_id,
8158 vdu_index,
8159 user=None,
8160 pub_key=None,
8161 )
8162 credentials = {"hostname": rw_mgmt_ip}
8163 # get username
8164 username = deep_get(
8165 config_descriptor, ("config-access", "ssh-access", "default-user")
8166 )
8167 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8168 # merged. Meanwhile let's get username from initial-config-primitive
8169 if not username and initial_config_primitive_list:
8170 for config_primitive in initial_config_primitive_list:
8171 for param in config_primitive.get("parameter", ()):
8172 if param["name"] == "ssh-username":
8173 username = param["value"]
8174 break
8175 if not username:
8176 raise LcmException(
8177 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8178 "'config-access.ssh-access.default-user'"
8179 )
8180 credentials["username"] = username
8181
8182 # n2vc_redesign STEP 3.2
8183 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8184 self._write_configuration_status(
8185 nsr_id=nsr_id,
8186 vca_index=vca_index,
8187 status="REGISTERING",
8188 element_under_configuration=element_under_configuration,
8189 element_type=element_type,
8190 )
8191
8192 step = "register execution environment {}".format(credentials)
8193 self.logger.debug(logging_text + step)
8194 ee_id = await self.vca_map[vca_type].register_execution_environment(
8195 credentials=credentials,
8196 namespace=namespace,
8197 db_dict=db_dict,
8198 vca_id=vca_id,
8199 )
8200
8201 # update ee_id en db
8202 db_dict_ee_id = {
8203 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8204 }
8205 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8206
8207 # for compatibility with MON/POL modules, the need model and application name at database
8208 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8209 # Not sure if this need to be done when healing
8210 """
8211 ee_id_parts = ee_id.split(".")
8212 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8213 if len(ee_id_parts) >= 2:
8214 model_name = ee_id_parts[0]
8215 application_name = ee_id_parts[1]
8216 db_nsr_update[db_update_entry + "model"] = model_name
8217 db_nsr_update[db_update_entry + "application"] = application_name
8218 """
8219
8220 # n2vc_redesign STEP 3.3
8221 # Install configuration software. Only for native charms.
8222 step = "Install configuration Software"
8223
8224 self._write_configuration_status(
8225 nsr_id=nsr_id,
8226 vca_index=vca_index,
8227 status="INSTALLING SW",
8228 element_under_configuration=element_under_configuration,
8229 element_type=element_type,
8230 # other_update=db_nsr_update,
8231 other_update=None,
8232 )
8233
8234 # TODO check if already done
8235 self.logger.debug(logging_text + step)
8236 config = None
8237 if vca_type == "native_charm":
8238 config_primitive = next(
8239 (p for p in initial_config_primitive_list if p["name"] == "config"),
8240 None,
8241 )
8242 if config_primitive:
8243 config = self._map_primitive_params(
8244 config_primitive, {}, deploy_params
8245 )
8246 await self.vca_map[vca_type].install_configuration_sw(
8247 ee_id=ee_id,
8248 artifact_path=artifact_path,
8249 db_dict=db_dict,
8250 config=config,
8251 num_units=1,
8252 vca_id=vca_id,
8253 vca_type=vca_type,
8254 )
8255
8256 # write in db flag of configuration_sw already installed
8257 self.update_db_2(
8258 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8259 )
8260
8261 # Not sure if this need to be done when healing
8262 """
8263 # add relations for this VCA (wait for other peers related with this VCA)
8264 await self._add_vca_relations(
8265 logging_text=logging_text,
8266 nsr_id=nsr_id,
8267 vca_type=vca_type,
8268 vca_index=vca_index,
8269 )
8270 """
8271
8272 # if SSH access is required, then get execution environment SSH public
8273 # if native charm we have waited already to VM be UP
8274 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8275 pub_key = None
8276 user = None
8277 # self.logger.debug("get ssh key block")
8278 if deep_get(
8279 config_descriptor, ("config-access", "ssh-access", "required")
8280 ):
8281 # self.logger.debug("ssh key needed")
8282 # Needed to inject a ssh key
8283 user = deep_get(
8284 config_descriptor,
8285 ("config-access", "ssh-access", "default-user"),
8286 )
8287 step = "Install configuration Software, getting public ssh key"
8288 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8289 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8290 )
8291
8292 step = "Insert public key into VM user={} ssh_key={}".format(
8293 user, pub_key
8294 )
8295 else:
8296 # self.logger.debug("no need to get ssh key")
8297 step = "Waiting to VM being up and getting IP address"
8298 self.logger.debug(logging_text + step)
8299
8300 # n2vc_redesign STEP 5.1
8301 # wait for RO (ip-address) Insert pub_key into VM
8302 # IMPORTANT: We need do wait for RO to complete healing operation.
8303 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8304 if vnfr_id:
8305 if kdu_name:
8306 rw_mgmt_ip = await self.wait_kdu_up(
8307 logging_text, nsr_id, vnfr_id, kdu_name
8308 )
8309 else:
8310 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8311 logging_text,
8312 nsr_id,
8313 vnfr_id,
8314 vdu_id,
8315 vdu_index,
8316 user=user,
8317 pub_key=pub_key,
8318 )
8319 else:
8320 rw_mgmt_ip = None # This is for a NS configuration
8321
8322 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8323
8324 # store rw_mgmt_ip in deploy params for later replacement
8325 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8326
8327 # Day1 operations.
8328 # get run-day1 operation parameter
8329 runDay1 = deploy_params.get("run-day1", False)
8330 self.logger.debug(
8331 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8332 )
8333 if runDay1:
8334 # n2vc_redesign STEP 6 Execute initial config primitive
8335 step = "execute initial config primitive"
8336
8337 # wait for dependent primitives execution (NS -> VNF -> VDU)
8338 if initial_config_primitive_list:
8339 await self._wait_dependent_n2vc(
8340 nsr_id, vca_deployed_list, vca_index
8341 )
8342
8343 # stage, in function of element type: vdu, kdu, vnf or ns
8344 my_vca = vca_deployed_list[vca_index]
8345 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8346 # VDU or KDU
8347 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8348 elif my_vca.get("member-vnf-index"):
8349 # VNF
8350 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8351 else:
8352 # NS
8353 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8354
8355 self._write_configuration_status(
8356 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8357 )
8358
8359 self._write_op_status(op_id=nslcmop_id, stage=stage)
8360
8361 check_if_terminated_needed = True
8362 for initial_config_primitive in initial_config_primitive_list:
8363 # adding information on the vca_deployed if it is a NS execution environment
8364 if not vca_deployed["member-vnf-index"]:
8365 deploy_params["ns_config_info"] = json.dumps(
8366 self._get_ns_config_info(nsr_id)
8367 )
8368 # TODO check if already done
8369 primitive_params_ = self._map_primitive_params(
8370 initial_config_primitive, {}, deploy_params
8371 )
8372
8373 step = "execute primitive '{}' params '{}'".format(
8374 initial_config_primitive["name"], primitive_params_
8375 )
8376 self.logger.debug(logging_text + step)
8377 await self.vca_map[vca_type].exec_primitive(
8378 ee_id=ee_id,
8379 primitive_name=initial_config_primitive["name"],
8380 params_dict=primitive_params_,
8381 db_dict=db_dict,
8382 vca_id=vca_id,
8383 vca_type=vca_type,
8384 )
8385 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8386 if check_if_terminated_needed:
8387 if config_descriptor.get("terminate-config-primitive"):
8388 self.update_db_2(
8389 "nsrs",
8390 nsr_id,
8391 {db_update_entry + "needed_terminate": True},
8392 )
8393 check_if_terminated_needed = False
8394
8395 # TODO register in database that primitive is done
8396
8397 # STEP 7 Configure metrics
8398 # Not sure if this need to be done when healing
8399 """
8400 if vca_type == "helm" or vca_type == "helm-v3":
8401 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8402 ee_id=ee_id,
8403 artifact_path=artifact_path,
8404 ee_config_descriptor=ee_config_descriptor,
8405 vnfr_id=vnfr_id,
8406 nsr_id=nsr_id,
8407 target_ip=rw_mgmt_ip,
8408 )
8409 if prometheus_jobs:
8410 self.update_db_2(
8411 "nsrs",
8412 nsr_id,
8413 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8414 )
8415
8416 for job in prometheus_jobs:
8417 self.db.set_one(
8418 "prometheus_jobs",
8419 {"job_name": job["job_name"]},
8420 job,
8421 upsert=True,
8422 fail_on_empty=False,
8423 )
8424
8425 """
8426 step = "instantiated at VCA"
8427 self.logger.debug(logging_text + step)
8428
8429 self._write_configuration_status(
8430 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8431 )
8432
8433 except Exception as e: # TODO not use Exception but N2VC exception
8434 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8435 if not isinstance(
8436 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8437 ):
8438 self.logger.error(
8439 "Exception while {} : {}".format(step, e), exc_info=True
8440 )
8441 self._write_configuration_status(
8442 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8443 )
8444 raise LcmException("{} {}".format(step, e)) from e
8445
8446 async def _wait_heal_ro(
8447 self,
8448 nsr_id,
8449 timeout=600,
8450 ):
8451 start_time = time()
8452 while time() <= start_time + timeout:
8453 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8454 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8455 "operational-status"
8456 ]
8457 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8458 if operational_status_ro != "healing":
8459 break
8460 await asyncio.sleep(15, loop=self.loop)
8461 else: # timeout_ns_deploy
8462 raise NgRoException("Timeout waiting ns to deploy")
8463
8464 async def vertical_scale(self, nsr_id, nslcmop_id):
8465 """
8466 Vertical Scale the VDUs in a NS
8467
8468 :param: nsr_id: NS Instance ID
8469 :param: nslcmop_id: nslcmop ID of migrate
8470
8471 """
8472 # Try to lock HA task here
8473 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8474 if not task_is_locked_by_me:
8475 return
8476 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8477 self.logger.debug(logging_text + "Enter")
8478 # get all needed from database
8479 db_nslcmop = None
8480 db_nslcmop_update = {}
8481 nslcmop_operation_state = None
8482 db_nsr_update = {}
8483 target = {}
8484 exc = None
8485 # in case of error, indicates what part of scale was failed to put nsr at error status
8486 start_deploy = time()
8487
8488 try:
8489 # wait for any previous tasks in process
8490 step = "Waiting for previous operations to terminate"
8491 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8492
8493 self._write_ns_status(
8494 nsr_id=nsr_id,
8495 ns_state=None,
8496 current_operation="VerticalScale",
8497 current_operation_id=nslcmop_id,
8498 )
8499 step = "Getting nslcmop from database"
8500 self.logger.debug(
8501 step + " after having waited for previous tasks to be completed"
8502 )
8503 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8504 operationParams = db_nslcmop.get("operationParams")
8505 target = {}
8506 target.update(operationParams)
8507 desc = await self.RO.vertical_scale(nsr_id, target)
8508 self.logger.debug("RO return > {}".format(desc))
8509 action_id = desc["action_id"]
8510 await self._wait_ng_ro(
8511 nsr_id,
8512 action_id,
8513 nslcmop_id,
8514 start_deploy,
8515 self.timeout.verticalscale,
8516 operation="verticalscale",
8517 )
8518 except (ROclient.ROClientException, DbException, LcmException) as e:
8519 self.logger.error("Exit Exception {}".format(e))
8520 exc = e
8521 except asyncio.CancelledError:
8522 self.logger.error("Cancelled Exception while '{}'".format(step))
8523 exc = "Operation was cancelled"
8524 except Exception as e:
8525 exc = traceback.format_exc()
8526 self.logger.critical(
8527 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8528 )
8529 finally:
8530 self._write_ns_status(
8531 nsr_id=nsr_id,
8532 ns_state=None,
8533 current_operation="IDLE",
8534 current_operation_id=None,
8535 )
8536 if exc:
8537 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8538 nslcmop_operation_state = "FAILED"
8539 else:
8540 nslcmop_operation_state = "COMPLETED"
8541 db_nslcmop_update["detailed-status"] = "Done"
8542 db_nsr_update["detailed-status"] = "Done"
8543
8544 self._write_op_status(
8545 op_id=nslcmop_id,
8546 stage="",
8547 error_message="",
8548 operation_state=nslcmop_operation_state,
8549 other_update=db_nslcmop_update,
8550 )
8551 if nslcmop_operation_state:
8552 try:
8553 msg = {
8554 "nsr_id": nsr_id,
8555 "nslcmop_id": nslcmop_id,
8556 "operationState": nslcmop_operation_state,
8557 }
8558 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8559 except Exception as e:
8560 self.logger.error(
8561 logging_text + "kafka_write notification Exception {}".format(e)
8562 )
8563 self.logger.debug(logging_text + "Exit")
8564 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")