Fix Bug 2087: Use projects_read when searching for VFND by non-unique id
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 )
65 from osm_lcm.data_utils.nsd import (
66 get_ns_configuration_relation_list,
67 get_vnf_profile,
68 get_vnf_profiles,
69 )
70 from osm_lcm.data_utils.vnfd import (
71 get_kdu,
72 get_kdu_services,
73 get_relation_list,
74 get_vdu_list,
75 get_vdu_profile,
76 get_ee_sorted_initial_config_primitive_list,
77 get_ee_sorted_terminate_config_primitive_list,
78 get_kdu_list,
79 get_virtual_link_profiles,
80 get_vdu,
81 get_configuration,
82 get_vdu_index,
83 get_scaling_aspect,
84 get_number_of_instances,
85 get_juju_ee_ref,
86 get_kdu_resource_profile,
87 find_software_version,
88 check_helm_ee_in_ns,
89 )
90 from osm_lcm.data_utils.list_utils import find_in_list
91 from osm_lcm.data_utils.vnfr import (
92 get_osm_params,
93 get_vdur_index,
94 get_kdur,
95 get_volumes_from_instantiation_params,
96 )
97 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
98 from osm_lcm.data_utils.database.vim_account import VimAccountDB
99 from n2vc.definitions import RelationEndpoint
100 from n2vc.k8s_helm_conn import K8sHelmConnector
101 from n2vc.k8s_helm3_conn import K8sHelm3Connector
102 from n2vc.k8s_juju_conn import K8sJujuConnector
103
104 from osm_common.dbbase import DbException
105 from osm_common.fsbase import FsException
106
107 from osm_lcm.data_utils.database.database import Database
108 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
109 from osm_lcm.data_utils.wim import (
110 get_sdn_ports,
111 get_target_wim_attrs,
112 select_feasible_wim_account,
113 )
114
115 from n2vc.n2vc_juju_conn import N2VCJujuConnector
116 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
117
118 from osm_lcm.lcm_helm_conn import LCMHelmConn
119 from osm_lcm.osm_config import OsmConfigBuilder
120 from osm_lcm.prometheus import parse_job
121
122 from copy import copy, deepcopy
123 from time import time
124 from uuid import uuid4
125
126 from random import randint
127
128 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
129
130
131 class NsLcm(LcmBase):
132 SUBOPERATION_STATUS_NOT_FOUND = -1
133 SUBOPERATION_STATUS_NEW = -2
134 SUBOPERATION_STATUS_SKIP = -3
135 task_name_deploy_vca = "Deploying VCA"
136
137 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
138 """
139 Init, Connect to database, filesystem storage, and messaging
140 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
141 :return: None
142 """
143 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
144
145 self.db = Database().instance.db
146 self.fs = Filesystem().instance.fs
147 self.loop = loop
148 self.lcm_tasks = lcm_tasks
149 self.timeout = config.timeout
150 self.ro_config = config.RO
151 self.vca_config = config.VCA
152
153 # create N2VC connector
154 self.n2vc = N2VCJujuConnector(
155 log=self.logger,
156 loop=self.loop,
157 on_update_db=self._on_update_n2vc_db,
158 fs=self.fs,
159 db=self.db,
160 )
161
162 self.conn_helm_ee = LCMHelmConn(
163 log=self.logger,
164 loop=self.loop,
165 vca_config=self.vca_config,
166 on_update_db=self._on_update_n2vc_db,
167 )
168
169 self.k8sclusterhelm2 = K8sHelmConnector(
170 kubectl_command=self.vca_config.kubectlpath,
171 helm_command=self.vca_config.helmpath,
172 log=self.logger,
173 on_update_db=None,
174 fs=self.fs,
175 db=self.db,
176 )
177
178 self.k8sclusterhelm3 = K8sHelm3Connector(
179 kubectl_command=self.vca_config.kubectlpath,
180 helm_command=self.vca_config.helm3path,
181 fs=self.fs,
182 log=self.logger,
183 db=self.db,
184 on_update_db=None,
185 )
186
187 self.k8sclusterjuju = K8sJujuConnector(
188 kubectl_command=self.vca_config.kubectlpath,
189 juju_command=self.vca_config.jujupath,
190 log=self.logger,
191 loop=self.loop,
192 on_update_db=self._on_update_k8s_db,
193 fs=self.fs,
194 db=self.db,
195 )
196
197 self.k8scluster_map = {
198 "helm-chart": self.k8sclusterhelm2,
199 "helm-chart-v3": self.k8sclusterhelm3,
200 "chart": self.k8sclusterhelm3,
201 "juju-bundle": self.k8sclusterjuju,
202 "juju": self.k8sclusterjuju,
203 }
204
205 self.vca_map = {
206 "lxc_proxy_charm": self.n2vc,
207 "native_charm": self.n2vc,
208 "k8s_proxy_charm": self.n2vc,
209 "helm": self.conn_helm_ee,
210 "helm-v3": self.conn_helm_ee,
211 }
212
213 # create RO client
214 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
215
216 self.op_status_map = {
217 "instantiation": self.RO.status,
218 "termination": self.RO.status,
219 "migrate": self.RO.status,
220 "healing": self.RO.recreate_status,
221 "verticalscale": self.RO.status,
222 "start_stop_rebuild": self.RO.status,
223 }
224
225 @staticmethod
226 def increment_ip_mac(ip_mac, vm_index=1):
227 if not isinstance(ip_mac, str):
228 return ip_mac
229 try:
230 # try with ipv4 look for last dot
231 i = ip_mac.rfind(".")
232 if i > 0:
233 i += 1
234 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
235 # try with ipv6 or mac look for last colon. Operate in hex
236 i = ip_mac.rfind(":")
237 if i > 0:
238 i += 1
239 # format in hex, len can be 2 for mac or 4 for ipv6
240 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
241 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
242 )
243 except Exception:
244 pass
245 return None
246
247 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
248
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266
267 # remove last dot from path (if exists)
268 if path.endswith("."):
269 path = path[:-1]
270
271 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
272 # .format(table, filter, path, updated_data))
273 try:
274
275 nsr_id = filter.get("_id")
276
277 # read ns record from database
278 nsr = self.db.get_one(table="nsrs", q_filter=filter)
279 current_ns_status = nsr.get("nsState")
280
281 # get vca status for NS
282 status_dict = await self.n2vc.get_status(
283 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
284 )
285
286 # vcaStatus
287 db_dict = dict()
288 db_dict["vcaStatus"] = status_dict
289
290 # update configurationStatus for this VCA
291 try:
292 vca_index = int(path[path.rfind(".") + 1 :])
293
294 vca_list = deep_get(
295 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
296 )
297 vca_status = vca_list[vca_index].get("status")
298
299 configuration_status_list = nsr.get("configurationStatus")
300 config_status = configuration_status_list[vca_index].get("status")
301
302 if config_status == "BROKEN" and vca_status != "failed":
303 db_dict["configurationStatus"][vca_index] = "READY"
304 elif config_status != "BROKEN" and vca_status == "failed":
305 db_dict["configurationStatus"][vca_index] = "BROKEN"
306 except Exception as e:
307 # not update configurationStatus
308 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
309
310 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
311 # if nsState = 'DEGRADED' check if all is OK
312 is_degraded = False
313 if current_ns_status in ("READY", "DEGRADED"):
314 error_description = ""
315 # check machines
316 if status_dict.get("machines"):
317 for machine_id in status_dict.get("machines"):
318 machine = status_dict.get("machines").get(machine_id)
319 # check machine agent-status
320 if machine.get("agent-status"):
321 s = machine.get("agent-status").get("status")
322 if s != "started":
323 is_degraded = True
324 error_description += (
325 "machine {} agent-status={} ; ".format(
326 machine_id, s
327 )
328 )
329 # check machine instance status
330 if machine.get("instance-status"):
331 s = machine.get("instance-status").get("status")
332 if s != "running":
333 is_degraded = True
334 error_description += (
335 "machine {} instance-status={} ; ".format(
336 machine_id, s
337 )
338 )
339 # check applications
340 if status_dict.get("applications"):
341 for app_id in status_dict.get("applications"):
342 app = status_dict.get("applications").get(app_id)
343 # check application status
344 if app.get("status"):
345 s = app.get("status").get("status")
346 if s != "active":
347 is_degraded = True
348 error_description += (
349 "application {} status={} ; ".format(app_id, s)
350 )
351
352 if error_description:
353 db_dict["errorDescription"] = error_description
354 if current_ns_status == "READY" and is_degraded:
355 db_dict["nsState"] = "DEGRADED"
356 if current_ns_status == "DEGRADED" and not is_degraded:
357 db_dict["nsState"] = "READY"
358
359 # write to database
360 self.update_db_2("nsrs", nsr_id, db_dict)
361
362 except (asyncio.CancelledError, asyncio.TimeoutError):
363 raise
364 except Exception as e:
365 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
366
367 async def _on_update_k8s_db(
368 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
369 ):
370 """
371 Updating vca status in NSR record
372 :param cluster_uuid: UUID of a k8s cluster
373 :param kdu_instance: The unique name of the KDU instance
374 :param filter: To get nsr_id
375 :cluster_type: The cluster type (juju, k8s)
376 :return: none
377 """
378
379 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
380 # .format(cluster_uuid, kdu_instance, filter))
381
382 nsr_id = filter.get("_id")
383 try:
384 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
385 cluster_uuid=cluster_uuid,
386 kdu_instance=kdu_instance,
387 yaml_format=False,
388 complete_status=True,
389 vca_id=vca_id,
390 )
391
392 # vcaStatus
393 db_dict = dict()
394 db_dict["vcaStatus"] = {nsr_id: vca_status}
395
396 self.logger.debug(
397 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
398 )
399
400 # write to database
401 self.update_db_2("nsrs", nsr_id, db_dict)
402 except (asyncio.CancelledError, asyncio.TimeoutError):
403 raise
404 except Exception as e:
405 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
406
407 @staticmethod
408 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
409 try:
410 env = Environment(
411 undefined=StrictUndefined,
412 autoescape=select_autoescape(default_for_string=True, default=True),
413 )
414 template = env.from_string(cloud_init_text)
415 return template.render(additional_params or {})
416 except UndefinedError as e:
417 raise LcmException(
418 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
419 "file, must be provided in the instantiation parameters inside the "
420 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
421 )
422 except (TemplateError, TemplateNotFound) as e:
423 raise LcmException(
424 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
425 vnfd_id, vdu_id, e
426 )
427 )
428
429 def _get_vdu_cloud_init_content(self, vdu, vnfd):
430 cloud_init_content = cloud_init_file = None
431 try:
432 if vdu.get("cloud-init-file"):
433 base_folder = vnfd["_admin"]["storage"]
434 if base_folder["pkg-dir"]:
435 cloud_init_file = "{}/{}/cloud_init/{}".format(
436 base_folder["folder"],
437 base_folder["pkg-dir"],
438 vdu["cloud-init-file"],
439 )
440 else:
441 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
442 base_folder["folder"],
443 vdu["cloud-init-file"],
444 )
445 with self.fs.file_open(cloud_init_file, "r") as ci_file:
446 cloud_init_content = ci_file.read()
447 elif vdu.get("cloud-init"):
448 cloud_init_content = vdu["cloud-init"]
449
450 return cloud_init_content
451 except FsException as e:
452 raise LcmException(
453 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
454 vnfd["id"], vdu["id"], cloud_init_file, e
455 )
456 )
457
458 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
459 vdur = next(
460 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
461 )
462 additional_params = vdur.get("additionalParams")
463 return parse_yaml_strings(additional_params)
464
465 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
466 """
467 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
468 :param vnfd: input vnfd
469 :param new_id: overrides vnf id if provided
470 :param additionalParams: Instantiation params for VNFs provided
471 :param nsrId: Id of the NSR
472 :return: copy of vnfd
473 """
474 vnfd_RO = deepcopy(vnfd)
475 # remove unused by RO configuration, monitoring, scaling and internal keys
476 vnfd_RO.pop("_id", None)
477 vnfd_RO.pop("_admin", None)
478 vnfd_RO.pop("monitoring-param", None)
479 vnfd_RO.pop("scaling-group-descriptor", None)
480 vnfd_RO.pop("kdu", None)
481 vnfd_RO.pop("k8s-cluster", None)
482 if new_id:
483 vnfd_RO["id"] = new_id
484
485 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
486 for vdu in get_iterable(vnfd_RO, "vdu"):
487 vdu.pop("cloud-init-file", None)
488 vdu.pop("cloud-init", None)
489 return vnfd_RO
490
491 @staticmethod
492 def ip_profile_2_RO(ip_profile):
493 RO_ip_profile = deepcopy(ip_profile)
494 if "dns-server" in RO_ip_profile:
495 if isinstance(RO_ip_profile["dns-server"], list):
496 RO_ip_profile["dns-address"] = []
497 for ds in RO_ip_profile.pop("dns-server"):
498 RO_ip_profile["dns-address"].append(ds["address"])
499 else:
500 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
501 if RO_ip_profile.get("ip-version") == "ipv4":
502 RO_ip_profile["ip-version"] = "IPv4"
503 if RO_ip_profile.get("ip-version") == "ipv6":
504 RO_ip_profile["ip-version"] = "IPv6"
505 if "dhcp-params" in RO_ip_profile:
506 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
507 return RO_ip_profile
508
509 def _get_ro_vim_id_for_vim_account(self, vim_account):
510 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
511 if db_vim["_admin"]["operationalState"] != "ENABLED":
512 raise LcmException(
513 "VIM={} is not available. operationalState={}".format(
514 vim_account, db_vim["_admin"]["operationalState"]
515 )
516 )
517 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
518 return RO_vim_id
519
520 def get_ro_wim_id_for_wim_account(self, wim_account):
521 if isinstance(wim_account, str):
522 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
523 if db_wim["_admin"]["operationalState"] != "ENABLED":
524 raise LcmException(
525 "WIM={} is not available. operationalState={}".format(
526 wim_account, db_wim["_admin"]["operationalState"]
527 )
528 )
529 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
530 return RO_wim_id
531 else:
532 return wim_account
533
534 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
535
536 db_vdu_push_list = []
537 template_vdur = []
538 db_update = {"_admin.modified": time()}
539 if vdu_create:
540 for vdu_id, vdu_count in vdu_create.items():
541 vdur = next(
542 (
543 vdur
544 for vdur in reversed(db_vnfr["vdur"])
545 if vdur["vdu-id-ref"] == vdu_id
546 ),
547 None,
548 )
549 if not vdur:
550 # Read the template saved in the db:
551 self.logger.debug(
552 "No vdur in the database. Using the vdur-template to scale"
553 )
554 vdur_template = db_vnfr.get("vdur-template")
555 if not vdur_template:
556 raise LcmException(
557 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
558 vdu_id
559 )
560 )
561 vdur = vdur_template[0]
562 # Delete a template from the database after using it
563 self.db.set_one(
564 "vnfrs",
565 {"_id": db_vnfr["_id"]},
566 None,
567 pull={"vdur-template": {"_id": vdur["_id"]}},
568 )
569 for count in range(vdu_count):
570 vdur_copy = deepcopy(vdur)
571 vdur_copy["status"] = "BUILD"
572 vdur_copy["status-detailed"] = None
573 vdur_copy["ip-address"] = None
574 vdur_copy["_id"] = str(uuid4())
575 vdur_copy["count-index"] += count + 1
576 vdur_copy["id"] = "{}-{}".format(
577 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
578 )
579 vdur_copy.pop("vim_info", None)
580 for iface in vdur_copy["interfaces"]:
581 if iface.get("fixed-ip"):
582 iface["ip-address"] = self.increment_ip_mac(
583 iface["ip-address"], count + 1
584 )
585 else:
586 iface.pop("ip-address", None)
587 if iface.get("fixed-mac"):
588 iface["mac-address"] = self.increment_ip_mac(
589 iface["mac-address"], count + 1
590 )
591 else:
592 iface.pop("mac-address", None)
593 if db_vnfr["vdur"]:
594 iface.pop(
595 "mgmt_vnf", None
596 ) # only first vdu can be managment of vnf
597 db_vdu_push_list.append(vdur_copy)
598 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
599 if vdu_delete:
600 if len(db_vnfr["vdur"]) == 1:
601 # The scale will move to 0 instances
602 self.logger.debug(
603 "Scaling to 0 !, creating the template with the last vdur"
604 )
605 template_vdur = [db_vnfr["vdur"][0]]
606 for vdu_id, vdu_count in vdu_delete.items():
607 if mark_delete:
608 indexes_to_delete = [
609 iv[0]
610 for iv in enumerate(db_vnfr["vdur"])
611 if iv[1]["vdu-id-ref"] == vdu_id
612 ]
613 db_update.update(
614 {
615 "vdur.{}.status".format(i): "DELETING"
616 for i in indexes_to_delete[-vdu_count:]
617 }
618 )
619 else:
620 # it must be deleted one by one because common.db does not allow otherwise
621 vdus_to_delete = [
622 v
623 for v in reversed(db_vnfr["vdur"])
624 if v["vdu-id-ref"] == vdu_id
625 ]
626 for vdu in vdus_to_delete[:vdu_count]:
627 self.db.set_one(
628 "vnfrs",
629 {"_id": db_vnfr["_id"]},
630 None,
631 pull={"vdur": {"_id": vdu["_id"]}},
632 )
633 db_push = {}
634 if db_vdu_push_list:
635 db_push["vdur"] = db_vdu_push_list
636 if template_vdur:
637 db_push["vdur-template"] = template_vdur
638 if not db_push:
639 db_push = None
640 db_vnfr["vdur-template"] = template_vdur
641 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
642 # modify passed dictionary db_vnfr
643 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
644 db_vnfr["vdur"] = db_vnfr_["vdur"]
645
646 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
647 """
648 Updates database nsr with the RO info for the created vld
649 :param ns_update_nsr: dictionary to be filled with the updated info
650 :param db_nsr: content of db_nsr. This is also modified
651 :param nsr_desc_RO: nsr descriptor from RO
652 :return: Nothing, LcmException is raised on errors
653 """
654
655 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
656 for net_RO in get_iterable(nsr_desc_RO, "nets"):
657 if vld["id"] != net_RO.get("ns_net_osm_id"):
658 continue
659 vld["vim-id"] = net_RO.get("vim_net_id")
660 vld["name"] = net_RO.get("vim_name")
661 vld["status"] = net_RO.get("status")
662 vld["status-detailed"] = net_RO.get("error_msg")
663 ns_update_nsr["vld.{}".format(vld_index)] = vld
664 break
665 else:
666 raise LcmException(
667 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
668 )
669
670 def set_vnfr_at_error(self, db_vnfrs, error_text):
671 try:
672 for db_vnfr in db_vnfrs.values():
673 vnfr_update = {"status": "ERROR"}
674 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
675 if "status" not in vdur:
676 vdur["status"] = "ERROR"
677 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
678 if error_text:
679 vdur["status-detailed"] = str(error_text)
680 vnfr_update[
681 "vdur.{}.status-detailed".format(vdu_index)
682 ] = "ERROR"
683 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
684 except DbException as e:
685 self.logger.error("Cannot update vnf. {}".format(e))
686
687 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
688 """
689 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
690 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
691 :param nsr_desc_RO: nsr descriptor from RO
692 :return: Nothing, LcmException is raised on errors
693 """
694 for vnf_index, db_vnfr in db_vnfrs.items():
695 for vnf_RO in nsr_desc_RO["vnfs"]:
696 if vnf_RO["member_vnf_index"] != vnf_index:
697 continue
698 vnfr_update = {}
699 if vnf_RO.get("ip_address"):
700 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
701 "ip_address"
702 ].split(";")[0]
703 elif not db_vnfr.get("ip-address"):
704 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
705 raise LcmExceptionNoMgmtIP(
706 "ns member_vnf_index '{}' has no IP address".format(
707 vnf_index
708 )
709 )
710
711 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
712 vdur_RO_count_index = 0
713 if vdur.get("pdu-type"):
714 continue
715 for vdur_RO in get_iterable(vnf_RO, "vms"):
716 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
717 continue
718 if vdur["count-index"] != vdur_RO_count_index:
719 vdur_RO_count_index += 1
720 continue
721 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
722 if vdur_RO.get("ip_address"):
723 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
724 else:
725 vdur["ip-address"] = None
726 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
727 vdur["name"] = vdur_RO.get("vim_name")
728 vdur["status"] = vdur_RO.get("status")
729 vdur["status-detailed"] = vdur_RO.get("error_msg")
730 for ifacer in get_iterable(vdur, "interfaces"):
731 for interface_RO in get_iterable(vdur_RO, "interfaces"):
732 if ifacer["name"] == interface_RO.get("internal_name"):
733 ifacer["ip-address"] = interface_RO.get(
734 "ip_address"
735 )
736 ifacer["mac-address"] = interface_RO.get(
737 "mac_address"
738 )
739 break
740 else:
741 raise LcmException(
742 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
743 "from VIM info".format(
744 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
745 )
746 )
747 vnfr_update["vdur.{}".format(vdu_index)] = vdur
748 break
749 else:
750 raise LcmException(
751 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
752 "VIM info".format(
753 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
754 )
755 )
756
757 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
758 for net_RO in get_iterable(nsr_desc_RO, "nets"):
759 if vld["id"] != net_RO.get("vnf_net_osm_id"):
760 continue
761 vld["vim-id"] = net_RO.get("vim_net_id")
762 vld["name"] = net_RO.get("vim_name")
763 vld["status"] = net_RO.get("status")
764 vld["status-detailed"] = net_RO.get("error_msg")
765 vnfr_update["vld.{}".format(vld_index)] = vld
766 break
767 else:
768 raise LcmException(
769 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
770 vnf_index, vld["id"]
771 )
772 )
773
774 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
775 break
776
777 else:
778 raise LcmException(
779 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
780 vnf_index
781 )
782 )
783
784 def _get_ns_config_info(self, nsr_id):
785 """
786 Generates a mapping between vnf,vdu elements and the N2VC id
787 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
788 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
789 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
790 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
791 """
792 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
793 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
794 mapping = {}
795 ns_config_info = {"osm-config-mapping": mapping}
796 for vca in vca_deployed_list:
797 if not vca["member-vnf-index"]:
798 continue
799 if not vca["vdu_id"]:
800 mapping[vca["member-vnf-index"]] = vca["application"]
801 else:
802 mapping[
803 "{}.{}.{}".format(
804 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
805 )
806 ] = vca["application"]
807 return ns_config_info
808
809 async def _instantiate_ng_ro(
810 self,
811 logging_text,
812 nsr_id,
813 nsd,
814 db_nsr,
815 db_nslcmop,
816 db_vnfrs,
817 db_vnfds,
818 n2vc_key_list,
819 stage,
820 start_deploy,
821 timeout_ns_deploy,
822 ):
823
824 db_vims = {}
825
826 def get_vim_account(vim_account_id):
827 nonlocal db_vims
828 if vim_account_id in db_vims:
829 return db_vims[vim_account_id]
830 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
831 db_vims[vim_account_id] = db_vim
832 return db_vim
833
834 # modify target_vld info with instantiation parameters
835 def parse_vld_instantiation_params(
836 target_vim, target_vld, vld_params, target_sdn
837 ):
838 if vld_params.get("ip-profile"):
839 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
840 "ip-profile"
841 ]
842 if vld_params.get("provider-network"):
843 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
844 "provider-network"
845 ]
846 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
847 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
848 "provider-network"
849 ]["sdn-ports"]
850
851 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
852 # if wim_account_id is specified in vld_params, validate if it is feasible.
853 wim_account_id, db_wim = select_feasible_wim_account(
854 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
855 )
856
857 if wim_account_id:
858 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
859 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
860 # update vld_params with correct WIM account Id
861 vld_params["wimAccountId"] = wim_account_id
862
863 target_wim = "wim:{}".format(wim_account_id)
864 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
865 sdn_ports = get_sdn_ports(vld_params, db_wim)
866 if len(sdn_ports) > 0:
867 target_vld["vim_info"][target_wim] = target_wim_attrs
868 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
869
870 self.logger.debug(
871 "Target VLD with WIM data: {:s}".format(str(target_vld))
872 )
873
874 for param in ("vim-network-name", "vim-network-id"):
875 if vld_params.get(param):
876 if isinstance(vld_params[param], dict):
877 for vim, vim_net in vld_params[param].items():
878 other_target_vim = "vim:" + vim
879 populate_dict(
880 target_vld["vim_info"],
881 (other_target_vim, param.replace("-", "_")),
882 vim_net,
883 )
884 else: # isinstance str
885 target_vld["vim_info"][target_vim][
886 param.replace("-", "_")
887 ] = vld_params[param]
888 if vld_params.get("common_id"):
889 target_vld["common_id"] = vld_params.get("common_id")
890
891 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
892 def update_ns_vld_target(target, ns_params):
893 for vnf_params in ns_params.get("vnf", ()):
894 if vnf_params.get("vimAccountId"):
895 target_vnf = next(
896 (
897 vnfr
898 for vnfr in db_vnfrs.values()
899 if vnf_params["member-vnf-index"]
900 == vnfr["member-vnf-index-ref"]
901 ),
902 None,
903 )
904 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
905 if not vdur:
906 return
907 for a_index, a_vld in enumerate(target["ns"]["vld"]):
908 target_vld = find_in_list(
909 get_iterable(vdur, "interfaces"),
910 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
911 )
912
913 vld_params = find_in_list(
914 get_iterable(ns_params, "vld"),
915 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
916 )
917 if target_vld:
918
919 if vnf_params.get("vimAccountId") not in a_vld.get(
920 "vim_info", {}
921 ):
922 target_vim_network_list = [
923 v for _, v in a_vld.get("vim_info").items()
924 ]
925 target_vim_network_name = next(
926 (
927 item.get("vim_network_name", "")
928 for item in target_vim_network_list
929 ),
930 "",
931 )
932
933 target["ns"]["vld"][a_index].get("vim_info").update(
934 {
935 "vim:{}".format(vnf_params["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name,
937 }
938 }
939 )
940
941 if vld_params:
942 for param in ("vim-network-name", "vim-network-id"):
943 if vld_params.get(param) and isinstance(
944 vld_params[param], dict
945 ):
946 for vim, vim_net in vld_params[
947 param
948 ].items():
949 other_target_vim = "vim:" + vim
950 populate_dict(
951 target["ns"]["vld"][a_index].get(
952 "vim_info"
953 ),
954 (
955 other_target_vim,
956 param.replace("-", "_"),
957 ),
958 vim_net,
959 )
960
961 nslcmop_id = db_nslcmop["_id"]
962 target = {
963 "name": db_nsr["name"],
964 "ns": {"vld": []},
965 "vnf": [],
966 "image": deepcopy(db_nsr["image"]),
967 "flavor": deepcopy(db_nsr["flavor"]),
968 "action_id": nslcmop_id,
969 "cloud_init_content": {},
970 }
971 for image in target["image"]:
972 image["vim_info"] = {}
973 for flavor in target["flavor"]:
974 flavor["vim_info"] = {}
975 if db_nsr.get("affinity-or-anti-affinity-group"):
976 target["affinity-or-anti-affinity-group"] = deepcopy(
977 db_nsr["affinity-or-anti-affinity-group"]
978 )
979 for affinity_or_anti_affinity_group in target[
980 "affinity-or-anti-affinity-group"
981 ]:
982 affinity_or_anti_affinity_group["vim_info"] = {}
983
984 if db_nslcmop.get("lcmOperationType") != "instantiate":
985 # get parameters of instantiation:
986 db_nslcmop_instantiate = self.db.get_list(
987 "nslcmops",
988 {
989 "nsInstanceId": db_nslcmop["nsInstanceId"],
990 "lcmOperationType": "instantiate",
991 },
992 )[-1]
993 ns_params = db_nslcmop_instantiate.get("operationParams")
994 else:
995 ns_params = db_nslcmop.get("operationParams")
996 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
997 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
998
999 cp2target = {}
1000 for vld_index, vld in enumerate(db_nsr.get("vld")):
1001 target_vim = "vim:{}".format(ns_params["vimAccountId"])
1002 target_vld = {
1003 "id": vld["id"],
1004 "name": vld["name"],
1005 "mgmt-network": vld.get("mgmt-network", False),
1006 "type": vld.get("type"),
1007 "vim_info": {
1008 target_vim: {
1009 "vim_network_name": vld.get("vim-network-name"),
1010 "vim_account_id": ns_params["vimAccountId"],
1011 }
1012 },
1013 }
1014 # check if this network needs SDN assist
1015 if vld.get("pci-interfaces"):
1016 db_vim = get_vim_account(ns_params["vimAccountId"])
1017 sdnc_id = db_vim["config"].get("sdn-controller")
1018 if sdnc_id:
1019 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1020 target_sdn = "sdn:{}".format(sdnc_id)
1021 target_vld["vim_info"][target_sdn] = {
1022 "sdn": True,
1023 "target_vim": target_vim,
1024 "vlds": [sdn_vld],
1025 "type": vld.get("type"),
1026 }
1027
1028 nsd_vnf_profiles = get_vnf_profiles(nsd)
1029 for nsd_vnf_profile in nsd_vnf_profiles:
1030 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1031 if cp["virtual-link-profile-id"] == vld["id"]:
1032 cp2target[
1033 "member_vnf:{}.{}".format(
1034 cp["constituent-cpd-id"][0][
1035 "constituent-base-element-id"
1036 ],
1037 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1038 )
1039 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1040
1041 # check at nsd descriptor, if there is an ip-profile
1042 vld_params = {}
1043 nsd_vlp = find_in_list(
1044 get_virtual_link_profiles(nsd),
1045 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1046 == vld["id"],
1047 )
1048 if (
1049 nsd_vlp
1050 and nsd_vlp.get("virtual-link-protocol-data")
1051 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1052 ):
1053 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1054 "l3-protocol-data"
1055 ]
1056 ip_profile_dest_data = {}
1057 if "ip-version" in ip_profile_source_data:
1058 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1059 "ip-version"
1060 ]
1061 if "cidr" in ip_profile_source_data:
1062 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1063 "cidr"
1064 ]
1065 if "gateway-ip" in ip_profile_source_data:
1066 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1067 "gateway-ip"
1068 ]
1069 if "dhcp-enabled" in ip_profile_source_data:
1070 ip_profile_dest_data["dhcp-params"] = {
1071 "enabled": ip_profile_source_data["dhcp-enabled"]
1072 }
1073 vld_params["ip-profile"] = ip_profile_dest_data
1074
1075 # update vld_params with instantiation params
1076 vld_instantiation_params = find_in_list(
1077 get_iterable(ns_params, "vld"),
1078 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1079 )
1080 if vld_instantiation_params:
1081 vld_params.update(vld_instantiation_params)
1082 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1083 target["ns"]["vld"].append(target_vld)
1084 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1085 update_ns_vld_target(target, ns_params)
1086
1087 for vnfr in db_vnfrs.values():
1088 vnfd = find_in_list(
1089 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1090 )
1091 vnf_params = find_in_list(
1092 get_iterable(ns_params, "vnf"),
1093 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1094 )
1095 target_vnf = deepcopy(vnfr)
1096 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1097 for vld in target_vnf.get("vld", ()):
1098 # check if connected to a ns.vld, to fill target'
1099 vnf_cp = find_in_list(
1100 vnfd.get("int-virtual-link-desc", ()),
1101 lambda cpd: cpd.get("id") == vld["id"],
1102 )
1103 if vnf_cp:
1104 ns_cp = "member_vnf:{}.{}".format(
1105 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1106 )
1107 if cp2target.get(ns_cp):
1108 vld["target"] = cp2target[ns_cp]
1109
1110 vld["vim_info"] = {
1111 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1112 }
1113 # check if this network needs SDN assist
1114 target_sdn = None
1115 if vld.get("pci-interfaces"):
1116 db_vim = get_vim_account(vnfr["vim-account-id"])
1117 sdnc_id = db_vim["config"].get("sdn-controller")
1118 if sdnc_id:
1119 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1120 target_sdn = "sdn:{}".format(sdnc_id)
1121 vld["vim_info"][target_sdn] = {
1122 "sdn": True,
1123 "target_vim": target_vim,
1124 "vlds": [sdn_vld],
1125 "type": vld.get("type"),
1126 }
1127
1128 # check at vnfd descriptor, if there is an ip-profile
1129 vld_params = {}
1130 vnfd_vlp = find_in_list(
1131 get_virtual_link_profiles(vnfd),
1132 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1133 )
1134 if (
1135 vnfd_vlp
1136 and vnfd_vlp.get("virtual-link-protocol-data")
1137 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1138 ):
1139 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1140 "l3-protocol-data"
1141 ]
1142 ip_profile_dest_data = {}
1143 if "ip-version" in ip_profile_source_data:
1144 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1145 "ip-version"
1146 ]
1147 if "cidr" in ip_profile_source_data:
1148 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1149 "cidr"
1150 ]
1151 if "gateway-ip" in ip_profile_source_data:
1152 ip_profile_dest_data[
1153 "gateway-address"
1154 ] = ip_profile_source_data["gateway-ip"]
1155 if "dhcp-enabled" in ip_profile_source_data:
1156 ip_profile_dest_data["dhcp-params"] = {
1157 "enabled": ip_profile_source_data["dhcp-enabled"]
1158 }
1159
1160 vld_params["ip-profile"] = ip_profile_dest_data
1161 # update vld_params with instantiation params
1162 if vnf_params:
1163 vld_instantiation_params = find_in_list(
1164 get_iterable(vnf_params, "internal-vld"),
1165 lambda i_vld: i_vld["name"] == vld["id"],
1166 )
1167 if vld_instantiation_params:
1168 vld_params.update(vld_instantiation_params)
1169 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1170
1171 vdur_list = []
1172 for vdur in target_vnf.get("vdur", ()):
1173 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1174 continue # This vdu must not be created
1175 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1176
1177 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1178
1179 if ssh_keys_all:
1180 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1181 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1182 if (
1183 vdu_configuration
1184 and vdu_configuration.get("config-access")
1185 and vdu_configuration.get("config-access").get("ssh-access")
1186 ):
1187 vdur["ssh-keys"] = ssh_keys_all
1188 vdur["ssh-access-required"] = vdu_configuration[
1189 "config-access"
1190 ]["ssh-access"]["required"]
1191 elif (
1192 vnf_configuration
1193 and vnf_configuration.get("config-access")
1194 and vnf_configuration.get("config-access").get("ssh-access")
1195 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1196 ):
1197 vdur["ssh-keys"] = ssh_keys_all
1198 vdur["ssh-access-required"] = vnf_configuration[
1199 "config-access"
1200 ]["ssh-access"]["required"]
1201 elif ssh_keys_instantiation and find_in_list(
1202 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1203 ):
1204 vdur["ssh-keys"] = ssh_keys_instantiation
1205
1206 self.logger.debug("NS > vdur > {}".format(vdur))
1207
1208 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1209 # cloud-init
1210 if vdud.get("cloud-init-file"):
1211 vdur["cloud-init"] = "{}:file:{}".format(
1212 vnfd["_id"], vdud.get("cloud-init-file")
1213 )
1214 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1215 if vdur["cloud-init"] not in target["cloud_init_content"]:
1216 base_folder = vnfd["_admin"]["storage"]
1217 if base_folder["pkg-dir"]:
1218 cloud_init_file = "{}/{}/cloud_init/{}".format(
1219 base_folder["folder"],
1220 base_folder["pkg-dir"],
1221 vdud.get("cloud-init-file"),
1222 )
1223 else:
1224 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1225 base_folder["folder"],
1226 vdud.get("cloud-init-file"),
1227 )
1228 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1229 target["cloud_init_content"][
1230 vdur["cloud-init"]
1231 ] = ci_file.read()
1232 elif vdud.get("cloud-init"):
1233 vdur["cloud-init"] = "{}:vdu:{}".format(
1234 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1235 )
1236 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1237 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1238 "cloud-init"
1239 ]
1240 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1241 deploy_params_vdu = self._format_additional_params(
1242 vdur.get("additionalParams") or {}
1243 )
1244 deploy_params_vdu["OSM"] = get_osm_params(
1245 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1246 )
1247 vdur["additionalParams"] = deploy_params_vdu
1248
1249 # flavor
1250 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1251 if target_vim not in ns_flavor["vim_info"]:
1252 ns_flavor["vim_info"][target_vim] = {}
1253
1254 # deal with images
1255 # in case alternative images are provided we must check if they should be applied
1256 # for the vim_type, modify the vim_type taking into account
1257 ns_image_id = int(vdur["ns-image-id"])
1258 if vdur.get("alt-image-ids"):
1259 db_vim = get_vim_account(vnfr["vim-account-id"])
1260 vim_type = db_vim["vim_type"]
1261 for alt_image_id in vdur.get("alt-image-ids"):
1262 ns_alt_image = target["image"][int(alt_image_id)]
1263 if vim_type == ns_alt_image.get("vim-type"):
1264 # must use alternative image
1265 self.logger.debug(
1266 "use alternative image id: {}".format(alt_image_id)
1267 )
1268 ns_image_id = alt_image_id
1269 vdur["ns-image-id"] = ns_image_id
1270 break
1271 ns_image = target["image"][int(ns_image_id)]
1272 if target_vim not in ns_image["vim_info"]:
1273 ns_image["vim_info"][target_vim] = {}
1274
1275 # Affinity groups
1276 if vdur.get("affinity-or-anti-affinity-group-id"):
1277 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1278 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1279 if target_vim not in ns_ags["vim_info"]:
1280 ns_ags["vim_info"][target_vim] = {}
1281
1282 vdur["vim_info"] = {target_vim: {}}
1283 # instantiation parameters
1284 if vnf_params:
1285 vdu_instantiation_params = find_in_list(
1286 get_iterable(vnf_params, "vdu"),
1287 lambda i_vdu: i_vdu["id"] == vdud["id"],
1288 )
1289 if vdu_instantiation_params:
1290 # Parse the vdu_volumes from the instantiation params
1291 vdu_volumes = get_volumes_from_instantiation_params(
1292 vdu_instantiation_params, vdud
1293 )
1294 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1295 vdur_list.append(vdur)
1296 target_vnf["vdur"] = vdur_list
1297 target["vnf"].append(target_vnf)
1298
1299 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1300 desc = await self.RO.deploy(nsr_id, target)
1301 self.logger.debug("RO return > {}".format(desc))
1302 action_id = desc["action_id"]
1303 await self._wait_ng_ro(
1304 nsr_id,
1305 action_id,
1306 nslcmop_id,
1307 start_deploy,
1308 timeout_ns_deploy,
1309 stage,
1310 operation="instantiation",
1311 )
1312
1313 # Updating NSR
1314 db_nsr_update = {
1315 "_admin.deployed.RO.operational-status": "running",
1316 "detailed-status": " ".join(stage),
1317 }
1318 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 self.logger.debug(
1322 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1323 )
1324 return
1325
1326 async def _wait_ng_ro(
1327 self,
1328 nsr_id,
1329 action_id,
1330 nslcmop_id=None,
1331 start_time=None,
1332 timeout=600,
1333 stage=None,
1334 operation=None,
1335 ):
1336 detailed_status_old = None
1337 db_nsr_update = {}
1338 start_time = start_time or time()
1339 while time() <= start_time + timeout:
1340 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1341 self.logger.debug("Wait NG RO > {}".format(desc_status))
1342 if desc_status["status"] == "FAILED":
1343 raise NgRoException(desc_status["details"])
1344 elif desc_status["status"] == "BUILD":
1345 if stage:
1346 stage[2] = "VIM: ({})".format(desc_status["details"])
1347 elif desc_status["status"] == "DONE":
1348 if stage:
1349 stage[2] = "Deployed at VIM"
1350 break
1351 else:
1352 assert False, "ROclient.check_ns_status returns unknown {}".format(
1353 desc_status["status"]
1354 )
1355 if stage and nslcmop_id and stage[2] != detailed_status_old:
1356 detailed_status_old = stage[2]
1357 db_nsr_update["detailed-status"] = " ".join(stage)
1358 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1359 self._write_op_status(nslcmop_id, stage)
1360 await asyncio.sleep(15, loop=self.loop)
1361 else: # timeout_ns_deploy
1362 raise NgRoException("Timeout waiting ns to deploy")
1363
1364 async def _terminate_ng_ro(
1365 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1366 ):
1367 db_nsr_update = {}
1368 failed_detail = []
1369 action_id = None
1370 start_deploy = time()
1371 try:
1372 target = {
1373 "ns": {"vld": []},
1374 "vnf": [],
1375 "image": [],
1376 "flavor": [],
1377 "action_id": nslcmop_id,
1378 }
1379 desc = await self.RO.deploy(nsr_id, target)
1380 action_id = desc["action_id"]
1381 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1382 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1383 self.logger.debug(
1384 logging_text
1385 + "ns terminate action at RO. action_id={}".format(action_id)
1386 )
1387
1388 # wait until done
1389 delete_timeout = 20 * 60 # 20 minutes
1390 await self._wait_ng_ro(
1391 nsr_id,
1392 action_id,
1393 nslcmop_id,
1394 start_deploy,
1395 delete_timeout,
1396 stage,
1397 operation="termination",
1398 )
1399
1400 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1401 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1402 # delete all nsr
1403 await self.RO.delete(nsr_id)
1404 except Exception as e:
1405 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1406 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1407 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1408 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1409 self.logger.debug(
1410 logging_text + "RO_action_id={} already deleted".format(action_id)
1411 )
1412 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1413 failed_detail.append("delete conflict: {}".format(e))
1414 self.logger.debug(
1415 logging_text
1416 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1417 )
1418 else:
1419 failed_detail.append("delete error: {}".format(e))
1420 self.logger.error(
1421 logging_text
1422 + "RO_action_id={} delete error: {}".format(action_id, e)
1423 )
1424
1425 if failed_detail:
1426 stage[2] = "Error deleting from VIM"
1427 else:
1428 stage[2] = "Deleted from VIM"
1429 db_nsr_update["detailed-status"] = " ".join(stage)
1430 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1431 self._write_op_status(nslcmop_id, stage)
1432
1433 if failed_detail:
1434 raise LcmException("; ".join(failed_detail))
1435 return
1436
1437 async def instantiate_RO(
1438 self,
1439 logging_text,
1440 nsr_id,
1441 nsd,
1442 db_nsr,
1443 db_nslcmop,
1444 db_vnfrs,
1445 db_vnfds,
1446 n2vc_key_list,
1447 stage,
1448 ):
1449 """
1450 Instantiate at RO
1451 :param logging_text: preffix text to use at logging
1452 :param nsr_id: nsr identity
1453 :param nsd: database content of ns descriptor
1454 :param db_nsr: database content of ns record
1455 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1456 :param db_vnfrs:
1457 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1458 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1459 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1460 :return: None or exception
1461 """
1462 try:
1463 start_deploy = time()
1464 ns_params = db_nslcmop.get("operationParams")
1465 if ns_params and ns_params.get("timeout_ns_deploy"):
1466 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1467 else:
1468 timeout_ns_deploy = self.timeout.ns_deploy
1469
1470 # Check for and optionally request placement optimization. Database will be updated if placement activated
1471 stage[2] = "Waiting for Placement."
1472 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1473 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1474 for vnfr in db_vnfrs.values():
1475 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1476 break
1477 else:
1478 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1479
1480 return await self._instantiate_ng_ro(
1481 logging_text,
1482 nsr_id,
1483 nsd,
1484 db_nsr,
1485 db_nslcmop,
1486 db_vnfrs,
1487 db_vnfds,
1488 n2vc_key_list,
1489 stage,
1490 start_deploy,
1491 timeout_ns_deploy,
1492 )
1493 except Exception as e:
1494 stage[2] = "ERROR deploying at VIM"
1495 self.set_vnfr_at_error(db_vnfrs, str(e))
1496 self.logger.error(
1497 "Error deploying at VIM {}".format(e),
1498 exc_info=not isinstance(
1499 e,
1500 (
1501 ROclient.ROClientException,
1502 LcmException,
1503 DbException,
1504 NgRoException,
1505 ),
1506 ),
1507 )
1508 raise
1509
1510 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1511 """
1512 Wait for kdu to be up, get ip address
1513 :param logging_text: prefix use for logging
1514 :param nsr_id:
1515 :param vnfr_id:
1516 :param kdu_name:
1517 :return: IP address, K8s services
1518 """
1519
1520 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1521 nb_tries = 0
1522
1523 while nb_tries < 360:
1524 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1525 kdur = next(
1526 (
1527 x
1528 for x in get_iterable(db_vnfr, "kdur")
1529 if x.get("kdu-name") == kdu_name
1530 ),
1531 None,
1532 )
1533 if not kdur:
1534 raise LcmException(
1535 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1536 )
1537 if kdur.get("status"):
1538 if kdur["status"] in ("READY", "ENABLED"):
1539 return kdur.get("ip-address"), kdur.get("services")
1540 else:
1541 raise LcmException(
1542 "target KDU={} is in error state".format(kdu_name)
1543 )
1544
1545 await asyncio.sleep(10, loop=self.loop)
1546 nb_tries += 1
1547 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1548
1549 async def wait_vm_up_insert_key_ro(
1550 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1551 ):
1552 """
1553 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1554 :param logging_text: prefix use for logging
1555 :param nsr_id:
1556 :param vnfr_id:
1557 :param vdu_id:
1558 :param vdu_index:
1559 :param pub_key: public ssh key to inject, None to skip
1560 :param user: user to apply the public ssh key
1561 :return: IP address
1562 """
1563
1564 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1565 ro_nsr_id = None
1566 ip_address = None
1567 nb_tries = 0
1568 target_vdu_id = None
1569 ro_retries = 0
1570
1571 while True:
1572
1573 ro_retries += 1
1574 if ro_retries >= 360: # 1 hour
1575 raise LcmException(
1576 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1577 )
1578
1579 await asyncio.sleep(10, loop=self.loop)
1580
1581 # get ip address
1582 if not target_vdu_id:
1583 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1584
1585 if not vdu_id: # for the VNF case
1586 if db_vnfr.get("status") == "ERROR":
1587 raise LcmException(
1588 "Cannot inject ssh-key because target VNF is in error state"
1589 )
1590 ip_address = db_vnfr.get("ip-address")
1591 if not ip_address:
1592 continue
1593 vdur = next(
1594 (
1595 x
1596 for x in get_iterable(db_vnfr, "vdur")
1597 if x.get("ip-address") == ip_address
1598 ),
1599 None,
1600 )
1601 else: # VDU case
1602 vdur = next(
1603 (
1604 x
1605 for x in get_iterable(db_vnfr, "vdur")
1606 if x.get("vdu-id-ref") == vdu_id
1607 and x.get("count-index") == vdu_index
1608 ),
1609 None,
1610 )
1611
1612 if (
1613 not vdur and len(db_vnfr.get("vdur", ())) == 1
1614 ): # If only one, this should be the target vdu
1615 vdur = db_vnfr["vdur"][0]
1616 if not vdur:
1617 raise LcmException(
1618 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1619 vnfr_id, vdu_id, vdu_index
1620 )
1621 )
1622 # New generation RO stores information at "vim_info"
1623 ng_ro_status = None
1624 target_vim = None
1625 if vdur.get("vim_info"):
1626 target_vim = next(
1627 t for t in vdur["vim_info"]
1628 ) # there should be only one key
1629 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1630 if (
1631 vdur.get("pdu-type")
1632 or vdur.get("status") == "ACTIVE"
1633 or ng_ro_status == "ACTIVE"
1634 ):
1635 ip_address = vdur.get("ip-address")
1636 if not ip_address:
1637 continue
1638 target_vdu_id = vdur["vdu-id-ref"]
1639 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1640 raise LcmException(
1641 "Cannot inject ssh-key because target VM is in error state"
1642 )
1643
1644 if not target_vdu_id:
1645 continue
1646
1647 # inject public key into machine
1648 if pub_key and user:
1649 self.logger.debug(logging_text + "Inserting RO key")
1650 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1651 if vdur.get("pdu-type"):
1652 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1653 return ip_address
1654 try:
1655 ro_vm_id = "{}-{}".format(
1656 db_vnfr["member-vnf-index-ref"], target_vdu_id
1657 ) # TODO add vdu_index
1658 if self.ro_config.ng:
1659 target = {
1660 "action": {
1661 "action": "inject_ssh_key",
1662 "key": pub_key,
1663 "user": user,
1664 },
1665 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1666 }
1667 desc = await self.RO.deploy(nsr_id, target)
1668 action_id = desc["action_id"]
1669 await self._wait_ng_ro(
1670 nsr_id, action_id, timeout=600, operation="instantiation"
1671 )
1672 break
1673 else:
1674 # wait until NS is deployed at RO
1675 if not ro_nsr_id:
1676 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1677 ro_nsr_id = deep_get(
1678 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1679 )
1680 if not ro_nsr_id:
1681 continue
1682 result_dict = await self.RO.create_action(
1683 item="ns",
1684 item_id_name=ro_nsr_id,
1685 descriptor={
1686 "add_public_key": pub_key,
1687 "vms": [ro_vm_id],
1688 "user": user,
1689 },
1690 )
1691 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1692 if not result_dict or not isinstance(result_dict, dict):
1693 raise LcmException(
1694 "Unknown response from RO when injecting key"
1695 )
1696 for result in result_dict.values():
1697 if result.get("vim_result") == 200:
1698 break
1699 else:
1700 raise ROclient.ROClientException(
1701 "error injecting key: {}".format(
1702 result.get("description")
1703 )
1704 )
1705 break
1706 except NgRoException as e:
1707 raise LcmException(
1708 "Reaching max tries injecting key. Error: {}".format(e)
1709 )
1710 except ROclient.ROClientException as e:
1711 if not nb_tries:
1712 self.logger.debug(
1713 logging_text
1714 + "error injecting key: {}. Retrying until {} seconds".format(
1715 e, 20 * 10
1716 )
1717 )
1718 nb_tries += 1
1719 if nb_tries >= 20:
1720 raise LcmException(
1721 "Reaching max tries injecting key. Error: {}".format(e)
1722 )
1723 else:
1724 break
1725
1726 return ip_address
1727
1728 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1729 """
1730 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1731 """
1732 my_vca = vca_deployed_list[vca_index]
1733 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1734 # vdu or kdu: no dependencies
1735 return
1736 timeout = 300
1737 while timeout >= 0:
1738 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1739 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1740 configuration_status_list = db_nsr["configurationStatus"]
1741 for index, vca_deployed in enumerate(configuration_status_list):
1742 if index == vca_index:
1743 # myself
1744 continue
1745 if not my_vca.get("member-vnf-index") or (
1746 vca_deployed.get("member-vnf-index")
1747 == my_vca.get("member-vnf-index")
1748 ):
1749 internal_status = configuration_status_list[index].get("status")
1750 if internal_status == "READY":
1751 continue
1752 elif internal_status == "BROKEN":
1753 raise LcmException(
1754 "Configuration aborted because dependent charm/s has failed"
1755 )
1756 else:
1757 break
1758 else:
1759 # no dependencies, return
1760 return
1761 await asyncio.sleep(10)
1762 timeout -= 1
1763
1764 raise LcmException("Configuration aborted because dependent charm/s timeout")
1765
1766 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1767 vca_id = None
1768 if db_vnfr:
1769 vca_id = deep_get(db_vnfr, ("vca-id",))
1770 elif db_nsr:
1771 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1772 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1773 return vca_id
1774
1775 async def instantiate_N2VC(
1776 self,
1777 logging_text,
1778 vca_index,
1779 nsi_id,
1780 db_nsr,
1781 db_vnfr,
1782 vdu_id,
1783 kdu_name,
1784 vdu_index,
1785 config_descriptor,
1786 deploy_params,
1787 base_folder,
1788 nslcmop_id,
1789 stage,
1790 vca_type,
1791 vca_name,
1792 ee_config_descriptor,
1793 ):
1794 nsr_id = db_nsr["_id"]
1795 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1796 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1797 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1798 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1799 db_dict = {
1800 "collection": "nsrs",
1801 "filter": {"_id": nsr_id},
1802 "path": db_update_entry,
1803 }
1804 step = ""
1805 try:
1806
1807 element_type = "NS"
1808 element_under_configuration = nsr_id
1809
1810 vnfr_id = None
1811 if db_vnfr:
1812 vnfr_id = db_vnfr["_id"]
1813 osm_config["osm"]["vnf_id"] = vnfr_id
1814
1815 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1816
1817 if vca_type == "native_charm":
1818 index_number = 0
1819 else:
1820 index_number = vdu_index or 0
1821
1822 if vnfr_id:
1823 element_type = "VNF"
1824 element_under_configuration = vnfr_id
1825 namespace += ".{}-{}".format(vnfr_id, index_number)
1826 if vdu_id:
1827 namespace += ".{}-{}".format(vdu_id, index_number)
1828 element_type = "VDU"
1829 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1830 osm_config["osm"]["vdu_id"] = vdu_id
1831 elif kdu_name:
1832 namespace += ".{}".format(kdu_name)
1833 element_type = "KDU"
1834 element_under_configuration = kdu_name
1835 osm_config["osm"]["kdu_name"] = kdu_name
1836
1837 # Get artifact path
1838 if base_folder["pkg-dir"]:
1839 artifact_path = "{}/{}/{}/{}".format(
1840 base_folder["folder"],
1841 base_folder["pkg-dir"],
1842 "charms"
1843 if vca_type
1844 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1845 else "helm-charts",
1846 vca_name,
1847 )
1848 else:
1849 artifact_path = "{}/Scripts/{}/{}/".format(
1850 base_folder["folder"],
1851 "charms"
1852 if vca_type
1853 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1854 else "helm-charts",
1855 vca_name,
1856 )
1857
1858 self.logger.debug("Artifact path > {}".format(artifact_path))
1859
1860 # get initial_config_primitive_list that applies to this element
1861 initial_config_primitive_list = config_descriptor.get(
1862 "initial-config-primitive"
1863 )
1864
1865 self.logger.debug(
1866 "Initial config primitive list > {}".format(
1867 initial_config_primitive_list
1868 )
1869 )
1870
1871 # add config if not present for NS charm
1872 ee_descriptor_id = ee_config_descriptor.get("id")
1873 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1874 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1875 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1876 )
1877
1878 self.logger.debug(
1879 "Initial config primitive list #2 > {}".format(
1880 initial_config_primitive_list
1881 )
1882 )
1883 # n2vc_redesign STEP 3.1
1884 # find old ee_id if exists
1885 ee_id = vca_deployed.get("ee_id")
1886
1887 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1888 # create or register execution environment in VCA
1889 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1890
1891 self._write_configuration_status(
1892 nsr_id=nsr_id,
1893 vca_index=vca_index,
1894 status="CREATING",
1895 element_under_configuration=element_under_configuration,
1896 element_type=element_type,
1897 )
1898
1899 step = "create execution environment"
1900 self.logger.debug(logging_text + step)
1901
1902 ee_id = None
1903 credentials = None
1904 if vca_type == "k8s_proxy_charm":
1905 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1906 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1907 namespace=namespace,
1908 artifact_path=artifact_path,
1909 db_dict=db_dict,
1910 vca_id=vca_id,
1911 )
1912 elif vca_type == "helm" or vca_type == "helm-v3":
1913 ee_id, credentials = await self.vca_map[
1914 vca_type
1915 ].create_execution_environment(
1916 namespace=namespace,
1917 reuse_ee_id=ee_id,
1918 db_dict=db_dict,
1919 config=osm_config,
1920 artifact_path=artifact_path,
1921 chart_model=vca_name,
1922 vca_type=vca_type,
1923 )
1924 else:
1925 ee_id, credentials = await self.vca_map[
1926 vca_type
1927 ].create_execution_environment(
1928 namespace=namespace,
1929 reuse_ee_id=ee_id,
1930 db_dict=db_dict,
1931 vca_id=vca_id,
1932 )
1933
1934 elif vca_type == "native_charm":
1935 step = "Waiting to VM being up and getting IP address"
1936 self.logger.debug(logging_text + step)
1937 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1938 logging_text,
1939 nsr_id,
1940 vnfr_id,
1941 vdu_id,
1942 vdu_index,
1943 user=None,
1944 pub_key=None,
1945 )
1946 credentials = {"hostname": rw_mgmt_ip}
1947 # get username
1948 username = deep_get(
1949 config_descriptor, ("config-access", "ssh-access", "default-user")
1950 )
1951 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1952 # merged. Meanwhile let's get username from initial-config-primitive
1953 if not username and initial_config_primitive_list:
1954 for config_primitive in initial_config_primitive_list:
1955 for param in config_primitive.get("parameter", ()):
1956 if param["name"] == "ssh-username":
1957 username = param["value"]
1958 break
1959 if not username:
1960 raise LcmException(
1961 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1962 "'config-access.ssh-access.default-user'"
1963 )
1964 credentials["username"] = username
1965 # n2vc_redesign STEP 3.2
1966
1967 self._write_configuration_status(
1968 nsr_id=nsr_id,
1969 vca_index=vca_index,
1970 status="REGISTERING",
1971 element_under_configuration=element_under_configuration,
1972 element_type=element_type,
1973 )
1974
1975 step = "register execution environment {}".format(credentials)
1976 self.logger.debug(logging_text + step)
1977 ee_id = await self.vca_map[vca_type].register_execution_environment(
1978 credentials=credentials,
1979 namespace=namespace,
1980 db_dict=db_dict,
1981 vca_id=vca_id,
1982 )
1983
1984 # for compatibility with MON/POL modules, the need model and application name at database
1985 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1986 ee_id_parts = ee_id.split(".")
1987 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1988 if len(ee_id_parts) >= 2:
1989 model_name = ee_id_parts[0]
1990 application_name = ee_id_parts[1]
1991 db_nsr_update[db_update_entry + "model"] = model_name
1992 db_nsr_update[db_update_entry + "application"] = application_name
1993
1994 # n2vc_redesign STEP 3.3
1995 step = "Install configuration Software"
1996
1997 self._write_configuration_status(
1998 nsr_id=nsr_id,
1999 vca_index=vca_index,
2000 status="INSTALLING SW",
2001 element_under_configuration=element_under_configuration,
2002 element_type=element_type,
2003 other_update=db_nsr_update,
2004 )
2005
2006 # TODO check if already done
2007 self.logger.debug(logging_text + step)
2008 config = None
2009 if vca_type == "native_charm":
2010 config_primitive = next(
2011 (p for p in initial_config_primitive_list if p["name"] == "config"),
2012 None,
2013 )
2014 if config_primitive:
2015 config = self._map_primitive_params(
2016 config_primitive, {}, deploy_params
2017 )
2018 num_units = 1
2019 if vca_type == "lxc_proxy_charm":
2020 if element_type == "NS":
2021 num_units = db_nsr.get("config-units") or 1
2022 elif element_type == "VNF":
2023 num_units = db_vnfr.get("config-units") or 1
2024 elif element_type == "VDU":
2025 for v in db_vnfr["vdur"]:
2026 if vdu_id == v["vdu-id-ref"]:
2027 num_units = v.get("config-units") or 1
2028 break
2029 if vca_type != "k8s_proxy_charm":
2030 await self.vca_map[vca_type].install_configuration_sw(
2031 ee_id=ee_id,
2032 artifact_path=artifact_path,
2033 db_dict=db_dict,
2034 config=config,
2035 num_units=num_units,
2036 vca_id=vca_id,
2037 vca_type=vca_type,
2038 )
2039
2040 # write in db flag of configuration_sw already installed
2041 self.update_db_2(
2042 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2043 )
2044
2045 # add relations for this VCA (wait for other peers related with this VCA)
2046 await self._add_vca_relations(
2047 logging_text=logging_text,
2048 nsr_id=nsr_id,
2049 vca_type=vca_type,
2050 vca_index=vca_index,
2051 )
2052
2053 # if SSH access is required, then get execution environment SSH public
2054 # if native charm we have waited already to VM be UP
2055 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2056 pub_key = None
2057 user = None
2058 # self.logger.debug("get ssh key block")
2059 if deep_get(
2060 config_descriptor, ("config-access", "ssh-access", "required")
2061 ):
2062 # self.logger.debug("ssh key needed")
2063 # Needed to inject a ssh key
2064 user = deep_get(
2065 config_descriptor,
2066 ("config-access", "ssh-access", "default-user"),
2067 )
2068 step = "Install configuration Software, getting public ssh key"
2069 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2070 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2071 )
2072
2073 step = "Insert public key into VM user={} ssh_key={}".format(
2074 user, pub_key
2075 )
2076 else:
2077 # self.logger.debug("no need to get ssh key")
2078 step = "Waiting to VM being up and getting IP address"
2079 self.logger.debug(logging_text + step)
2080
2081 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2082 rw_mgmt_ip = None
2083
2084 # n2vc_redesign STEP 5.1
2085 # wait for RO (ip-address) Insert pub_key into VM
2086 if vnfr_id:
2087 if kdu_name:
2088 rw_mgmt_ip, services = await self.wait_kdu_up(
2089 logging_text, nsr_id, vnfr_id, kdu_name
2090 )
2091 vnfd = self.db.get_one(
2092 "vnfds_revisions",
2093 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2094 )
2095 kdu = get_kdu(vnfd, kdu_name)
2096 kdu_services = [
2097 service["name"] for service in get_kdu_services(kdu)
2098 ]
2099 exposed_services = []
2100 for service in services:
2101 if any(s in service["name"] for s in kdu_services):
2102 exposed_services.append(service)
2103 await self.vca_map[vca_type].exec_primitive(
2104 ee_id=ee_id,
2105 primitive_name="config",
2106 params_dict={
2107 "osm-config": json.dumps(
2108 OsmConfigBuilder(
2109 k8s={"services": exposed_services}
2110 ).build()
2111 )
2112 },
2113 vca_id=vca_id,
2114 )
2115
2116 # This verification is needed in order to avoid trying to add a public key
2117 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2118 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2119 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2120 # or it is a KNF)
2121 elif db_vnfr.get("vdur"):
2122 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2123 logging_text,
2124 nsr_id,
2125 vnfr_id,
2126 vdu_id,
2127 vdu_index,
2128 user=user,
2129 pub_key=pub_key,
2130 )
2131
2132 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2133
2134 # store rw_mgmt_ip in deploy params for later replacement
2135 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2136
2137 # n2vc_redesign STEP 6 Execute initial config primitive
2138 step = "execute initial config primitive"
2139
2140 # wait for dependent primitives execution (NS -> VNF -> VDU)
2141 if initial_config_primitive_list:
2142 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2143
2144 # stage, in function of element type: vdu, kdu, vnf or ns
2145 my_vca = vca_deployed_list[vca_index]
2146 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2147 # VDU or KDU
2148 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2149 elif my_vca.get("member-vnf-index"):
2150 # VNF
2151 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2152 else:
2153 # NS
2154 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2155
2156 self._write_configuration_status(
2157 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2158 )
2159
2160 self._write_op_status(op_id=nslcmop_id, stage=stage)
2161
2162 check_if_terminated_needed = True
2163 for initial_config_primitive in initial_config_primitive_list:
2164 # adding information on the vca_deployed if it is a NS execution environment
2165 if not vca_deployed["member-vnf-index"]:
2166 deploy_params["ns_config_info"] = json.dumps(
2167 self._get_ns_config_info(nsr_id)
2168 )
2169 # TODO check if already done
2170 primitive_params_ = self._map_primitive_params(
2171 initial_config_primitive, {}, deploy_params
2172 )
2173
2174 step = "execute primitive '{}' params '{}'".format(
2175 initial_config_primitive["name"], primitive_params_
2176 )
2177 self.logger.debug(logging_text + step)
2178 await self.vca_map[vca_type].exec_primitive(
2179 ee_id=ee_id,
2180 primitive_name=initial_config_primitive["name"],
2181 params_dict=primitive_params_,
2182 db_dict=db_dict,
2183 vca_id=vca_id,
2184 vca_type=vca_type,
2185 )
2186 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2187 if check_if_terminated_needed:
2188 if config_descriptor.get("terminate-config-primitive"):
2189 self.update_db_2(
2190 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2191 )
2192 check_if_terminated_needed = False
2193
2194 # TODO register in database that primitive is done
2195
2196 # STEP 7 Configure metrics
2197 if vca_type == "helm" or vca_type == "helm-v3":
2198 # TODO: review for those cases where the helm chart is a reference and
2199 # is not part of the NF package
2200 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2201 ee_id=ee_id,
2202 artifact_path=artifact_path,
2203 ee_config_descriptor=ee_config_descriptor,
2204 vnfr_id=vnfr_id,
2205 nsr_id=nsr_id,
2206 target_ip=rw_mgmt_ip,
2207 )
2208 if prometheus_jobs:
2209 self.update_db_2(
2210 "nsrs",
2211 nsr_id,
2212 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2213 )
2214
2215 for job in prometheus_jobs:
2216 self.db.set_one(
2217 "prometheus_jobs",
2218 {"job_name": job["job_name"]},
2219 job,
2220 upsert=True,
2221 fail_on_empty=False,
2222 )
2223
2224 step = "instantiated at VCA"
2225 self.logger.debug(logging_text + step)
2226
2227 self._write_configuration_status(
2228 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2229 )
2230
2231 except Exception as e: # TODO not use Exception but N2VC exception
2232 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2233 if not isinstance(
2234 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2235 ):
2236 self.logger.error(
2237 "Exception while {} : {}".format(step, e), exc_info=True
2238 )
2239 self._write_configuration_status(
2240 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2241 )
2242 raise LcmException("{} {}".format(step, e)) from e
2243
2244 def _write_ns_status(
2245 self,
2246 nsr_id: str,
2247 ns_state: str,
2248 current_operation: str,
2249 current_operation_id: str,
2250 error_description: str = None,
2251 error_detail: str = None,
2252 other_update: dict = None,
2253 ):
2254 """
2255 Update db_nsr fields.
2256 :param nsr_id:
2257 :param ns_state:
2258 :param current_operation:
2259 :param current_operation_id:
2260 :param error_description:
2261 :param error_detail:
2262 :param other_update: Other required changes at database if provided, will be cleared
2263 :return:
2264 """
2265 try:
2266 db_dict = other_update or {}
2267 db_dict[
2268 "_admin.nslcmop"
2269 ] = current_operation_id # for backward compatibility
2270 db_dict["_admin.current-operation"] = current_operation_id
2271 db_dict["_admin.operation-type"] = (
2272 current_operation if current_operation != "IDLE" else None
2273 )
2274 db_dict["currentOperation"] = current_operation
2275 db_dict["currentOperationID"] = current_operation_id
2276 db_dict["errorDescription"] = error_description
2277 db_dict["errorDetail"] = error_detail
2278
2279 if ns_state:
2280 db_dict["nsState"] = ns_state
2281 self.update_db_2("nsrs", nsr_id, db_dict)
2282 except DbException as e:
2283 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2284
2285 def _write_op_status(
2286 self,
2287 op_id: str,
2288 stage: list = None,
2289 error_message: str = None,
2290 queuePosition: int = 0,
2291 operation_state: str = None,
2292 other_update: dict = None,
2293 ):
2294 try:
2295 db_dict = other_update or {}
2296 db_dict["queuePosition"] = queuePosition
2297 if isinstance(stage, list):
2298 db_dict["stage"] = stage[0]
2299 db_dict["detailed-status"] = " ".join(stage)
2300 elif stage is not None:
2301 db_dict["stage"] = str(stage)
2302
2303 if error_message is not None:
2304 db_dict["errorMessage"] = error_message
2305 if operation_state is not None:
2306 db_dict["operationState"] = operation_state
2307 db_dict["statusEnteredTime"] = time()
2308 self.update_db_2("nslcmops", op_id, db_dict)
2309 except DbException as e:
2310 self.logger.warn(
2311 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2312 )
2313
2314 def _write_all_config_status(self, db_nsr: dict, status: str):
2315 try:
2316 nsr_id = db_nsr["_id"]
2317 # configurationStatus
2318 config_status = db_nsr.get("configurationStatus")
2319 if config_status:
2320 db_nsr_update = {
2321 "configurationStatus.{}.status".format(index): status
2322 for index, v in enumerate(config_status)
2323 if v
2324 }
2325 # update status
2326 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2327
2328 except DbException as e:
2329 self.logger.warn(
2330 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2331 )
2332
2333 def _write_configuration_status(
2334 self,
2335 nsr_id: str,
2336 vca_index: int,
2337 status: str = None,
2338 element_under_configuration: str = None,
2339 element_type: str = None,
2340 other_update: dict = None,
2341 ):
2342
2343 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2344 # .format(vca_index, status))
2345
2346 try:
2347 db_path = "configurationStatus.{}.".format(vca_index)
2348 db_dict = other_update or {}
2349 if status:
2350 db_dict[db_path + "status"] = status
2351 if element_under_configuration:
2352 db_dict[
2353 db_path + "elementUnderConfiguration"
2354 ] = element_under_configuration
2355 if element_type:
2356 db_dict[db_path + "elementType"] = element_type
2357 self.update_db_2("nsrs", nsr_id, db_dict)
2358 except DbException as e:
2359 self.logger.warn(
2360 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2361 status, nsr_id, vca_index, e
2362 )
2363 )
2364
2365 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2366 """
2367 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2368 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2369 Database is used because the result can be obtained from a different LCM worker in case of HA.
2370 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2371 :param db_nslcmop: database content of nslcmop
2372 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2373 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2374 computed 'vim-account-id'
2375 """
2376 modified = False
2377 nslcmop_id = db_nslcmop["_id"]
2378 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2379 if placement_engine == "PLA":
2380 self.logger.debug(
2381 logging_text + "Invoke and wait for placement optimization"
2382 )
2383 await self.msg.aiowrite(
2384 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2385 )
2386 db_poll_interval = 5
2387 wait = db_poll_interval * 10
2388 pla_result = None
2389 while not pla_result and wait >= 0:
2390 await asyncio.sleep(db_poll_interval)
2391 wait -= db_poll_interval
2392 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2393 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2394
2395 if not pla_result:
2396 raise LcmException(
2397 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2398 )
2399
2400 for pla_vnf in pla_result["vnf"]:
2401 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2402 if not pla_vnf.get("vimAccountId") or not vnfr:
2403 continue
2404 modified = True
2405 self.db.set_one(
2406 "vnfrs",
2407 {"_id": vnfr["_id"]},
2408 {"vim-account-id": pla_vnf["vimAccountId"]},
2409 )
2410 # Modifies db_vnfrs
2411 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2412 return modified
2413
2414 def update_nsrs_with_pla_result(self, params):
2415 try:
2416 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2417 self.update_db_2(
2418 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2419 )
2420 except Exception as e:
2421 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2422
2423 async def instantiate(self, nsr_id, nslcmop_id):
2424 """
2425
2426 :param nsr_id: ns instance to deploy
2427 :param nslcmop_id: operation to run
2428 :return:
2429 """
2430
2431 # Try to lock HA task here
2432 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2433 if not task_is_locked_by_me:
2434 self.logger.debug(
2435 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2436 )
2437 return
2438
2439 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2440 self.logger.debug(logging_text + "Enter")
2441
2442 # get all needed from database
2443
2444 # database nsrs record
2445 db_nsr = None
2446
2447 # database nslcmops record
2448 db_nslcmop = None
2449
2450 # update operation on nsrs
2451 db_nsr_update = {}
2452 # update operation on nslcmops
2453 db_nslcmop_update = {}
2454
2455 nslcmop_operation_state = None
2456 db_vnfrs = {} # vnf's info indexed by member-index
2457 # n2vc_info = {}
2458 tasks_dict_info = {} # from task to info text
2459 exc = None
2460 error_list = []
2461 stage = [
2462 "Stage 1/5: preparation of the environment.",
2463 "Waiting for previous operations to terminate.",
2464 "",
2465 ]
2466 # ^ stage, step, VIM progress
2467 try:
2468 # wait for any previous tasks in process
2469 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2470
2471 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2472 stage[1] = "Reading from database."
2473 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2474 db_nsr_update["detailed-status"] = "creating"
2475 db_nsr_update["operational-status"] = "init"
2476 self._write_ns_status(
2477 nsr_id=nsr_id,
2478 ns_state="BUILDING",
2479 current_operation="INSTANTIATING",
2480 current_operation_id=nslcmop_id,
2481 other_update=db_nsr_update,
2482 )
2483 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2484
2485 # read from db: operation
2486 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2487 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2488 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2489 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2490 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2491 )
2492 ns_params = db_nslcmop.get("operationParams")
2493 if ns_params and ns_params.get("timeout_ns_deploy"):
2494 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2495 else:
2496 timeout_ns_deploy = self.timeout.ns_deploy
2497
2498 # read from db: ns
2499 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2500 self.logger.debug(logging_text + stage[1])
2501 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2502 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2503 self.logger.debug(logging_text + stage[1])
2504 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2505 self.fs.sync(db_nsr["nsd-id"])
2506 db_nsr["nsd"] = nsd
2507 # nsr_name = db_nsr["name"] # TODO short-name??
2508
2509 # read from db: vnf's of this ns
2510 stage[1] = "Getting vnfrs from db."
2511 self.logger.debug(logging_text + stage[1])
2512 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2513
2514 # read from db: vnfd's for every vnf
2515 db_vnfds = [] # every vnfd data
2516
2517 # for each vnf in ns, read vnfd
2518 for vnfr in db_vnfrs_list:
2519 if vnfr.get("kdur"):
2520 kdur_list = []
2521 for kdur in vnfr["kdur"]:
2522 if kdur.get("additionalParams"):
2523 kdur["additionalParams"] = json.loads(
2524 kdur["additionalParams"]
2525 )
2526 kdur_list.append(kdur)
2527 vnfr["kdur"] = kdur_list
2528
2529 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2530 vnfd_id = vnfr["vnfd-id"]
2531 vnfd_ref = vnfr["vnfd-ref"]
2532 self.fs.sync(vnfd_id)
2533
2534 # if we haven't this vnfd, read it from db
2535 if vnfd_id not in db_vnfds:
2536 # read from db
2537 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2538 vnfd_id, vnfd_ref
2539 )
2540 self.logger.debug(logging_text + stage[1])
2541 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2542
2543 # store vnfd
2544 db_vnfds.append(vnfd)
2545
2546 # Get or generates the _admin.deployed.VCA list
2547 vca_deployed_list = None
2548 if db_nsr["_admin"].get("deployed"):
2549 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2550 if vca_deployed_list is None:
2551 vca_deployed_list = []
2552 configuration_status_list = []
2553 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2554 db_nsr_update["configurationStatus"] = configuration_status_list
2555 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2556 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2557 elif isinstance(vca_deployed_list, dict):
2558 # maintain backward compatibility. Change a dict to list at database
2559 vca_deployed_list = list(vca_deployed_list.values())
2560 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2561 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2562
2563 if not isinstance(
2564 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2565 ):
2566 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2567 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2568
2569 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2570 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2571 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2572 self.db.set_list(
2573 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2574 )
2575
2576 # n2vc_redesign STEP 2 Deploy Network Scenario
2577 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2578 self._write_op_status(op_id=nslcmop_id, stage=stage)
2579
2580 stage[1] = "Deploying KDUs."
2581 # self.logger.debug(logging_text + "Before deploy_kdus")
2582 # Call to deploy_kdus in case exists the "vdu:kdu" param
2583 await self.deploy_kdus(
2584 logging_text=logging_text,
2585 nsr_id=nsr_id,
2586 nslcmop_id=nslcmop_id,
2587 db_vnfrs=db_vnfrs,
2588 db_vnfds=db_vnfds,
2589 task_instantiation_info=tasks_dict_info,
2590 )
2591
2592 stage[1] = "Getting VCA public key."
2593 # n2vc_redesign STEP 1 Get VCA public ssh-key
2594 # feature 1429. Add n2vc public key to needed VMs
2595 n2vc_key = self.n2vc.get_public_key()
2596 n2vc_key_list = [n2vc_key]
2597 if self.vca_config.public_key:
2598 n2vc_key_list.append(self.vca_config.public_key)
2599
2600 stage[1] = "Deploying NS at VIM."
2601 task_ro = asyncio.ensure_future(
2602 self.instantiate_RO(
2603 logging_text=logging_text,
2604 nsr_id=nsr_id,
2605 nsd=nsd,
2606 db_nsr=db_nsr,
2607 db_nslcmop=db_nslcmop,
2608 db_vnfrs=db_vnfrs,
2609 db_vnfds=db_vnfds,
2610 n2vc_key_list=n2vc_key_list,
2611 stage=stage,
2612 )
2613 )
2614 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2615 tasks_dict_info[task_ro] = "Deploying at VIM"
2616
2617 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2618 stage[1] = "Deploying Execution Environments."
2619 self.logger.debug(logging_text + stage[1])
2620
2621 # create namespace and certificate if any helm based EE is present in the NS
2622 if check_helm_ee_in_ns(db_vnfds):
2623 # TODO: create EE namespace
2624 # create TLS certificates
2625 await self.vca_map["helm-v3"].create_tls_certificate(
2626 secret_name="ee-tls-{}".format(nsr_id),
2627 dns_prefix="*",
2628 nsr_id=nsr_id,
2629 usage="server auth",
2630 )
2631
2632 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2633 for vnf_profile in get_vnf_profiles(nsd):
2634 vnfd_id = vnf_profile["vnfd-id"]
2635 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2636 member_vnf_index = str(vnf_profile["id"])
2637 db_vnfr = db_vnfrs[member_vnf_index]
2638 base_folder = vnfd["_admin"]["storage"]
2639 vdu_id = None
2640 vdu_index = 0
2641 vdu_name = None
2642 kdu_name = None
2643
2644 # Get additional parameters
2645 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2646 if db_vnfr.get("additionalParamsForVnf"):
2647 deploy_params.update(
2648 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2649 )
2650
2651 descriptor_config = get_configuration(vnfd, vnfd["id"])
2652 if descriptor_config:
2653 self._deploy_n2vc(
2654 logging_text=logging_text
2655 + "member_vnf_index={} ".format(member_vnf_index),
2656 db_nsr=db_nsr,
2657 db_vnfr=db_vnfr,
2658 nslcmop_id=nslcmop_id,
2659 nsr_id=nsr_id,
2660 nsi_id=nsi_id,
2661 vnfd_id=vnfd_id,
2662 vdu_id=vdu_id,
2663 kdu_name=kdu_name,
2664 member_vnf_index=member_vnf_index,
2665 vdu_index=vdu_index,
2666 vdu_name=vdu_name,
2667 deploy_params=deploy_params,
2668 descriptor_config=descriptor_config,
2669 base_folder=base_folder,
2670 task_instantiation_info=tasks_dict_info,
2671 stage=stage,
2672 )
2673
2674 # Deploy charms for each VDU that supports one.
2675 for vdud in get_vdu_list(vnfd):
2676 vdu_id = vdud["id"]
2677 descriptor_config = get_configuration(vnfd, vdu_id)
2678 vdur = find_in_list(
2679 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2680 )
2681
2682 if vdur.get("additionalParams"):
2683 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2684 else:
2685 deploy_params_vdu = deploy_params
2686 deploy_params_vdu["OSM"] = get_osm_params(
2687 db_vnfr, vdu_id, vdu_count_index=0
2688 )
2689 vdud_count = get_number_of_instances(vnfd, vdu_id)
2690
2691 self.logger.debug("VDUD > {}".format(vdud))
2692 self.logger.debug(
2693 "Descriptor config > {}".format(descriptor_config)
2694 )
2695 if descriptor_config:
2696 vdu_name = None
2697 kdu_name = None
2698 for vdu_index in range(vdud_count):
2699 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2700 self._deploy_n2vc(
2701 logging_text=logging_text
2702 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2703 member_vnf_index, vdu_id, vdu_index
2704 ),
2705 db_nsr=db_nsr,
2706 db_vnfr=db_vnfr,
2707 nslcmop_id=nslcmop_id,
2708 nsr_id=nsr_id,
2709 nsi_id=nsi_id,
2710 vnfd_id=vnfd_id,
2711 vdu_id=vdu_id,
2712 kdu_name=kdu_name,
2713 member_vnf_index=member_vnf_index,
2714 vdu_index=vdu_index,
2715 vdu_name=vdu_name,
2716 deploy_params=deploy_params_vdu,
2717 descriptor_config=descriptor_config,
2718 base_folder=base_folder,
2719 task_instantiation_info=tasks_dict_info,
2720 stage=stage,
2721 )
2722 for kdud in get_kdu_list(vnfd):
2723 kdu_name = kdud["name"]
2724 descriptor_config = get_configuration(vnfd, kdu_name)
2725 if descriptor_config:
2726 vdu_id = None
2727 vdu_index = 0
2728 vdu_name = None
2729 kdur = next(
2730 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2731 )
2732 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2733 if kdur.get("additionalParams"):
2734 deploy_params_kdu.update(
2735 parse_yaml_strings(kdur["additionalParams"].copy())
2736 )
2737
2738 self._deploy_n2vc(
2739 logging_text=logging_text,
2740 db_nsr=db_nsr,
2741 db_vnfr=db_vnfr,
2742 nslcmop_id=nslcmop_id,
2743 nsr_id=nsr_id,
2744 nsi_id=nsi_id,
2745 vnfd_id=vnfd_id,
2746 vdu_id=vdu_id,
2747 kdu_name=kdu_name,
2748 member_vnf_index=member_vnf_index,
2749 vdu_index=vdu_index,
2750 vdu_name=vdu_name,
2751 deploy_params=deploy_params_kdu,
2752 descriptor_config=descriptor_config,
2753 base_folder=base_folder,
2754 task_instantiation_info=tasks_dict_info,
2755 stage=stage,
2756 )
2757
2758 # Check if this NS has a charm configuration
2759 descriptor_config = nsd.get("ns-configuration")
2760 if descriptor_config and descriptor_config.get("juju"):
2761 vnfd_id = None
2762 db_vnfr = None
2763 member_vnf_index = None
2764 vdu_id = None
2765 kdu_name = None
2766 vdu_index = 0
2767 vdu_name = None
2768
2769 # Get additional parameters
2770 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2771 if db_nsr.get("additionalParamsForNs"):
2772 deploy_params.update(
2773 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2774 )
2775 base_folder = nsd["_admin"]["storage"]
2776 self._deploy_n2vc(
2777 logging_text=logging_text,
2778 db_nsr=db_nsr,
2779 db_vnfr=db_vnfr,
2780 nslcmop_id=nslcmop_id,
2781 nsr_id=nsr_id,
2782 nsi_id=nsi_id,
2783 vnfd_id=vnfd_id,
2784 vdu_id=vdu_id,
2785 kdu_name=kdu_name,
2786 member_vnf_index=member_vnf_index,
2787 vdu_index=vdu_index,
2788 vdu_name=vdu_name,
2789 deploy_params=deploy_params,
2790 descriptor_config=descriptor_config,
2791 base_folder=base_folder,
2792 task_instantiation_info=tasks_dict_info,
2793 stage=stage,
2794 )
2795
2796 # rest of staff will be done at finally
2797
2798 except (
2799 ROclient.ROClientException,
2800 DbException,
2801 LcmException,
2802 N2VCException,
2803 ) as e:
2804 self.logger.error(
2805 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2806 )
2807 exc = e
2808 except asyncio.CancelledError:
2809 self.logger.error(
2810 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2811 )
2812 exc = "Operation was cancelled"
2813 except Exception as e:
2814 exc = traceback.format_exc()
2815 self.logger.critical(
2816 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2817 exc_info=True,
2818 )
2819 finally:
2820 if exc:
2821 error_list.append(str(exc))
2822 try:
2823 # wait for pending tasks
2824 if tasks_dict_info:
2825 stage[1] = "Waiting for instantiate pending tasks."
2826 self.logger.debug(logging_text + stage[1])
2827 error_list += await self._wait_for_tasks(
2828 logging_text,
2829 tasks_dict_info,
2830 timeout_ns_deploy,
2831 stage,
2832 nslcmop_id,
2833 nsr_id=nsr_id,
2834 )
2835 stage[1] = stage[2] = ""
2836 except asyncio.CancelledError:
2837 error_list.append("Cancelled")
2838 # TODO cancel all tasks
2839 except Exception as exc:
2840 error_list.append(str(exc))
2841
2842 # update operation-status
2843 db_nsr_update["operational-status"] = "running"
2844 # let's begin with VCA 'configured' status (later we can change it)
2845 db_nsr_update["config-status"] = "configured"
2846 for task, task_name in tasks_dict_info.items():
2847 if not task.done() or task.cancelled() or task.exception():
2848 if task_name.startswith(self.task_name_deploy_vca):
2849 # A N2VC task is pending
2850 db_nsr_update["config-status"] = "failed"
2851 else:
2852 # RO or KDU task is pending
2853 db_nsr_update["operational-status"] = "failed"
2854
2855 # update status at database
2856 if error_list:
2857 error_detail = ". ".join(error_list)
2858 self.logger.error(logging_text + error_detail)
2859 error_description_nslcmop = "{} Detail: {}".format(
2860 stage[0], error_detail
2861 )
2862 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2863 nslcmop_id, stage[0]
2864 )
2865
2866 db_nsr_update["detailed-status"] = (
2867 error_description_nsr + " Detail: " + error_detail
2868 )
2869 db_nslcmop_update["detailed-status"] = error_detail
2870 nslcmop_operation_state = "FAILED"
2871 ns_state = "BROKEN"
2872 else:
2873 error_detail = None
2874 error_description_nsr = error_description_nslcmop = None
2875 ns_state = "READY"
2876 db_nsr_update["detailed-status"] = "Done"
2877 db_nslcmop_update["detailed-status"] = "Done"
2878 nslcmop_operation_state = "COMPLETED"
2879
2880 if db_nsr:
2881 self._write_ns_status(
2882 nsr_id=nsr_id,
2883 ns_state=ns_state,
2884 current_operation="IDLE",
2885 current_operation_id=None,
2886 error_description=error_description_nsr,
2887 error_detail=error_detail,
2888 other_update=db_nsr_update,
2889 )
2890 self._write_op_status(
2891 op_id=nslcmop_id,
2892 stage="",
2893 error_message=error_description_nslcmop,
2894 operation_state=nslcmop_operation_state,
2895 other_update=db_nslcmop_update,
2896 )
2897
2898 if nslcmop_operation_state:
2899 try:
2900 await self.msg.aiowrite(
2901 "ns",
2902 "instantiated",
2903 {
2904 "nsr_id": nsr_id,
2905 "nslcmop_id": nslcmop_id,
2906 "operationState": nslcmop_operation_state,
2907 },
2908 loop=self.loop,
2909 )
2910 except Exception as e:
2911 self.logger.error(
2912 logging_text + "kafka_write notification Exception {}".format(e)
2913 )
2914
2915 self.logger.debug(logging_text + "Exit")
2916 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2917
2918 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2919 if vnfd_id not in cached_vnfds:
2920 cached_vnfds[vnfd_id] = self.db.get_one(
2921 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2922 )
2923 return cached_vnfds[vnfd_id]
2924
2925 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2926 if vnf_profile_id not in cached_vnfrs:
2927 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2928 "vnfrs",
2929 {
2930 "member-vnf-index-ref": vnf_profile_id,
2931 "nsr-id-ref": nsr_id,
2932 },
2933 )
2934 return cached_vnfrs[vnf_profile_id]
2935
2936 def _is_deployed_vca_in_relation(
2937 self, vca: DeployedVCA, relation: Relation
2938 ) -> bool:
2939 found = False
2940 for endpoint in (relation.provider, relation.requirer):
2941 if endpoint["kdu-resource-profile-id"]:
2942 continue
2943 found = (
2944 vca.vnf_profile_id == endpoint.vnf_profile_id
2945 and vca.vdu_profile_id == endpoint.vdu_profile_id
2946 and vca.execution_environment_ref == endpoint.execution_environment_ref
2947 )
2948 if found:
2949 break
2950 return found
2951
2952 def _update_ee_relation_data_with_implicit_data(
2953 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2954 ):
2955 ee_relation_data = safe_get_ee_relation(
2956 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2957 )
2958 ee_relation_level = EELevel.get_level(ee_relation_data)
2959 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2960 "execution-environment-ref"
2961 ]:
2962 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2963 vnfd_id = vnf_profile["vnfd-id"]
2964 project = nsd["_admin"]["projects_read"][0]
2965 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2966 entity_id = (
2967 vnfd_id
2968 if ee_relation_level == EELevel.VNF
2969 else ee_relation_data["vdu-profile-id"]
2970 )
2971 ee = get_juju_ee_ref(db_vnfd, entity_id)
2972 if not ee:
2973 raise Exception(
2974 f"not execution environments found for ee_relation {ee_relation_data}"
2975 )
2976 ee_relation_data["execution-environment-ref"] = ee["id"]
2977 return ee_relation_data
2978
2979 def _get_ns_relations(
2980 self,
2981 nsr_id: str,
2982 nsd: Dict[str, Any],
2983 vca: DeployedVCA,
2984 cached_vnfds: Dict[str, Any],
2985 ) -> List[Relation]:
2986 relations = []
2987 db_ns_relations = get_ns_configuration_relation_list(nsd)
2988 for r in db_ns_relations:
2989 provider_dict = None
2990 requirer_dict = None
2991 if all(key in r for key in ("provider", "requirer")):
2992 provider_dict = r["provider"]
2993 requirer_dict = r["requirer"]
2994 elif "entities" in r:
2995 provider_id = r["entities"][0]["id"]
2996 provider_dict = {
2997 "nsr-id": nsr_id,
2998 "endpoint": r["entities"][0]["endpoint"],
2999 }
3000 if provider_id != nsd["id"]:
3001 provider_dict["vnf-profile-id"] = provider_id
3002 requirer_id = r["entities"][1]["id"]
3003 requirer_dict = {
3004 "nsr-id": nsr_id,
3005 "endpoint": r["entities"][1]["endpoint"],
3006 }
3007 if requirer_id != nsd["id"]:
3008 requirer_dict["vnf-profile-id"] = requirer_id
3009 else:
3010 raise Exception(
3011 "provider/requirer or entities must be included in the relation."
3012 )
3013 relation_provider = self._update_ee_relation_data_with_implicit_data(
3014 nsr_id, nsd, provider_dict, cached_vnfds
3015 )
3016 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3017 nsr_id, nsd, requirer_dict, cached_vnfds
3018 )
3019 provider = EERelation(relation_provider)
3020 requirer = EERelation(relation_requirer)
3021 relation = Relation(r["name"], provider, requirer)
3022 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3023 if vca_in_relation:
3024 relations.append(relation)
3025 return relations
3026
3027 def _get_vnf_relations(
3028 self,
3029 nsr_id: str,
3030 nsd: Dict[str, Any],
3031 vca: DeployedVCA,
3032 cached_vnfds: Dict[str, Any],
3033 ) -> List[Relation]:
3034 relations = []
3035 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
3036 vnf_profile_id = vnf_profile["id"]
3037 vnfd_id = vnf_profile["vnfd-id"]
3038 project = nsd["_admin"]["projects_read"][0]
3039 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3040 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3041 for r in db_vnf_relations:
3042 provider_dict = None
3043 requirer_dict = None
3044 if all(key in r for key in ("provider", "requirer")):
3045 provider_dict = r["provider"]
3046 requirer_dict = r["requirer"]
3047 elif "entities" in r:
3048 provider_id = r["entities"][0]["id"]
3049 provider_dict = {
3050 "nsr-id": nsr_id,
3051 "vnf-profile-id": vnf_profile_id,
3052 "endpoint": r["entities"][0]["endpoint"],
3053 }
3054 if provider_id != vnfd_id:
3055 provider_dict["vdu-profile-id"] = provider_id
3056 requirer_id = r["entities"][1]["id"]
3057 requirer_dict = {
3058 "nsr-id": nsr_id,
3059 "vnf-profile-id": vnf_profile_id,
3060 "endpoint": r["entities"][1]["endpoint"],
3061 }
3062 if requirer_id != vnfd_id:
3063 requirer_dict["vdu-profile-id"] = requirer_id
3064 else:
3065 raise Exception(
3066 "provider/requirer or entities must be included in the relation."
3067 )
3068 relation_provider = self._update_ee_relation_data_with_implicit_data(
3069 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3070 )
3071 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3072 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3073 )
3074 provider = EERelation(relation_provider)
3075 requirer = EERelation(relation_requirer)
3076 relation = Relation(r["name"], provider, requirer)
3077 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3078 if vca_in_relation:
3079 relations.append(relation)
3080 return relations
3081
3082 def _get_kdu_resource_data(
3083 self,
3084 ee_relation: EERelation,
3085 db_nsr: Dict[str, Any],
3086 cached_vnfds: Dict[str, Any],
3087 ) -> DeployedK8sResource:
3088 nsd = get_nsd(db_nsr)
3089 vnf_profiles = get_vnf_profiles(nsd)
3090 vnfd_id = find_in_list(
3091 vnf_profiles,
3092 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3093 )["vnfd-id"]
3094 project = nsd["_admin"]["projects_read"][0]
3095 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3096 kdu_resource_profile = get_kdu_resource_profile(
3097 db_vnfd, ee_relation.kdu_resource_profile_id
3098 )
3099 kdu_name = kdu_resource_profile["kdu-name"]
3100 deployed_kdu, _ = get_deployed_kdu(
3101 db_nsr.get("_admin", ()).get("deployed", ()),
3102 kdu_name,
3103 ee_relation.vnf_profile_id,
3104 )
3105 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3106 return deployed_kdu
3107
3108 def _get_deployed_component(
3109 self,
3110 ee_relation: EERelation,
3111 db_nsr: Dict[str, Any],
3112 cached_vnfds: Dict[str, Any],
3113 ) -> DeployedComponent:
3114 nsr_id = db_nsr["_id"]
3115 deployed_component = None
3116 ee_level = EELevel.get_level(ee_relation)
3117 if ee_level == EELevel.NS:
3118 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3119 if vca:
3120 deployed_component = DeployedVCA(nsr_id, vca)
3121 elif ee_level == EELevel.VNF:
3122 vca = get_deployed_vca(
3123 db_nsr,
3124 {
3125 "vdu_id": None,
3126 "member-vnf-index": ee_relation.vnf_profile_id,
3127 "ee_descriptor_id": ee_relation.execution_environment_ref,
3128 },
3129 )
3130 if vca:
3131 deployed_component = DeployedVCA(nsr_id, vca)
3132 elif ee_level == EELevel.VDU:
3133 vca = get_deployed_vca(
3134 db_nsr,
3135 {
3136 "vdu_id": ee_relation.vdu_profile_id,
3137 "member-vnf-index": ee_relation.vnf_profile_id,
3138 "ee_descriptor_id": ee_relation.execution_environment_ref,
3139 },
3140 )
3141 if vca:
3142 deployed_component = DeployedVCA(nsr_id, vca)
3143 elif ee_level == EELevel.KDU:
3144 kdu_resource_data = self._get_kdu_resource_data(
3145 ee_relation, db_nsr, cached_vnfds
3146 )
3147 if kdu_resource_data:
3148 deployed_component = DeployedK8sResource(kdu_resource_data)
3149 return deployed_component
3150
3151 async def _add_relation(
3152 self,
3153 relation: Relation,
3154 vca_type: str,
3155 db_nsr: Dict[str, Any],
3156 cached_vnfds: Dict[str, Any],
3157 cached_vnfrs: Dict[str, Any],
3158 ) -> bool:
3159 deployed_provider = self._get_deployed_component(
3160 relation.provider, db_nsr, cached_vnfds
3161 )
3162 deployed_requirer = self._get_deployed_component(
3163 relation.requirer, db_nsr, cached_vnfds
3164 )
3165 if (
3166 deployed_provider
3167 and deployed_requirer
3168 and deployed_provider.config_sw_installed
3169 and deployed_requirer.config_sw_installed
3170 ):
3171 provider_db_vnfr = (
3172 self._get_vnfr(
3173 relation.provider.nsr_id,
3174 relation.provider.vnf_profile_id,
3175 cached_vnfrs,
3176 )
3177 if relation.provider.vnf_profile_id
3178 else None
3179 )
3180 requirer_db_vnfr = (
3181 self._get_vnfr(
3182 relation.requirer.nsr_id,
3183 relation.requirer.vnf_profile_id,
3184 cached_vnfrs,
3185 )
3186 if relation.requirer.vnf_profile_id
3187 else None
3188 )
3189 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3190 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3191 provider_relation_endpoint = RelationEndpoint(
3192 deployed_provider.ee_id,
3193 provider_vca_id,
3194 relation.provider.endpoint,
3195 )
3196 requirer_relation_endpoint = RelationEndpoint(
3197 deployed_requirer.ee_id,
3198 requirer_vca_id,
3199 relation.requirer.endpoint,
3200 )
3201 await self.vca_map[vca_type].add_relation(
3202 provider=provider_relation_endpoint,
3203 requirer=requirer_relation_endpoint,
3204 )
3205 # remove entry from relations list
3206 return True
3207 return False
3208
3209 async def _add_vca_relations(
3210 self,
3211 logging_text,
3212 nsr_id,
3213 vca_type: str,
3214 vca_index: int,
3215 timeout: int = 3600,
3216 ) -> bool:
3217
3218 # steps:
3219 # 1. find all relations for this VCA
3220 # 2. wait for other peers related
3221 # 3. add relations
3222
3223 try:
3224 # STEP 1: find all relations for this VCA
3225
3226 # read nsr record
3227 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3228 nsd = get_nsd(db_nsr)
3229
3230 # this VCA data
3231 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3232 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3233
3234 cached_vnfds = {}
3235 cached_vnfrs = {}
3236 relations = []
3237 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3238 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3239
3240 # if no relations, terminate
3241 if not relations:
3242 self.logger.debug(logging_text + " No relations")
3243 return True
3244
3245 self.logger.debug(logging_text + " adding relations {}".format(relations))
3246
3247 # add all relations
3248 start = time()
3249 while True:
3250 # check timeout
3251 now = time()
3252 if now - start >= timeout:
3253 self.logger.error(logging_text + " : timeout adding relations")
3254 return False
3255
3256 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3257 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3258
3259 # for each relation, find the VCA's related
3260 for relation in relations.copy():
3261 added = await self._add_relation(
3262 relation,
3263 vca_type,
3264 db_nsr,
3265 cached_vnfds,
3266 cached_vnfrs,
3267 )
3268 if added:
3269 relations.remove(relation)
3270
3271 if not relations:
3272 self.logger.debug("Relations added")
3273 break
3274 await asyncio.sleep(5.0)
3275
3276 return True
3277
3278 except Exception as e:
3279 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3280 return False
3281
3282 async def _install_kdu(
3283 self,
3284 nsr_id: str,
3285 nsr_db_path: str,
3286 vnfr_data: dict,
3287 kdu_index: int,
3288 kdud: dict,
3289 vnfd: dict,
3290 k8s_instance_info: dict,
3291 k8params: dict = None,
3292 timeout: int = 600,
3293 vca_id: str = None,
3294 ):
3295
3296 try:
3297 k8sclustertype = k8s_instance_info["k8scluster-type"]
3298 # Instantiate kdu
3299 db_dict_install = {
3300 "collection": "nsrs",
3301 "filter": {"_id": nsr_id},
3302 "path": nsr_db_path,
3303 }
3304
3305 if k8s_instance_info.get("kdu-deployment-name"):
3306 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3307 else:
3308 kdu_instance = self.k8scluster_map[
3309 k8sclustertype
3310 ].generate_kdu_instance_name(
3311 db_dict=db_dict_install,
3312 kdu_model=k8s_instance_info["kdu-model"],
3313 kdu_name=k8s_instance_info["kdu-name"],
3314 )
3315
3316 # Update the nsrs table with the kdu-instance value
3317 self.update_db_2(
3318 item="nsrs",
3319 _id=nsr_id,
3320 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3321 )
3322
3323 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3324 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3325 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3326 # namespace, this first verification could be removed, and the next step would be done for any kind
3327 # of KNF.
3328 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3329 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3330 if k8sclustertype in ("juju", "juju-bundle"):
3331 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3332 # that the user passed a namespace which he wants its KDU to be deployed in)
3333 if (
3334 self.db.count(
3335 table="nsrs",
3336 q_filter={
3337 "_id": nsr_id,
3338 "_admin.projects_write": k8s_instance_info["namespace"],
3339 "_admin.projects_read": k8s_instance_info["namespace"],
3340 },
3341 )
3342 > 0
3343 ):
3344 self.logger.debug(
3345 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3346 )
3347 self.update_db_2(
3348 item="nsrs",
3349 _id=nsr_id,
3350 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3351 )
3352 k8s_instance_info["namespace"] = kdu_instance
3353
3354 await self.k8scluster_map[k8sclustertype].install(
3355 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3356 kdu_model=k8s_instance_info["kdu-model"],
3357 atomic=True,
3358 params=k8params,
3359 db_dict=db_dict_install,
3360 timeout=timeout,
3361 kdu_name=k8s_instance_info["kdu-name"],
3362 namespace=k8s_instance_info["namespace"],
3363 kdu_instance=kdu_instance,
3364 vca_id=vca_id,
3365 )
3366
3367 # Obtain services to obtain management service ip
3368 services = await self.k8scluster_map[k8sclustertype].get_services(
3369 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3370 kdu_instance=kdu_instance,
3371 namespace=k8s_instance_info["namespace"],
3372 )
3373
3374 # Obtain management service info (if exists)
3375 vnfr_update_dict = {}
3376 kdu_config = get_configuration(vnfd, kdud["name"])
3377 if kdu_config:
3378 target_ee_list = kdu_config.get("execution-environment-list", [])
3379 else:
3380 target_ee_list = []
3381
3382 if services:
3383 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3384 mgmt_services = [
3385 service
3386 for service in kdud.get("service", [])
3387 if service.get("mgmt-service")
3388 ]
3389 for mgmt_service in mgmt_services:
3390 for service in services:
3391 if service["name"].startswith(mgmt_service["name"]):
3392 # Mgmt service found, Obtain service ip
3393 ip = service.get("external_ip", service.get("cluster_ip"))
3394 if isinstance(ip, list) and len(ip) == 1:
3395 ip = ip[0]
3396
3397 vnfr_update_dict[
3398 "kdur.{}.ip-address".format(kdu_index)
3399 ] = ip
3400
3401 # Check if must update also mgmt ip at the vnf
3402 service_external_cp = mgmt_service.get(
3403 "external-connection-point-ref"
3404 )
3405 if service_external_cp:
3406 if (
3407 deep_get(vnfd, ("mgmt-interface", "cp"))
3408 == service_external_cp
3409 ):
3410 vnfr_update_dict["ip-address"] = ip
3411
3412 if find_in_list(
3413 target_ee_list,
3414 lambda ee: ee.get(
3415 "external-connection-point-ref", ""
3416 )
3417 == service_external_cp,
3418 ):
3419 vnfr_update_dict[
3420 "kdur.{}.ip-address".format(kdu_index)
3421 ] = ip
3422 break
3423 else:
3424 self.logger.warn(
3425 "Mgmt service name: {} not found".format(
3426 mgmt_service["name"]
3427 )
3428 )
3429
3430 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3431 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3432
3433 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3434 if (
3435 kdu_config
3436 and kdu_config.get("initial-config-primitive")
3437 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3438 ):
3439 initial_config_primitive_list = kdu_config.get(
3440 "initial-config-primitive"
3441 )
3442 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3443
3444 for initial_config_primitive in initial_config_primitive_list:
3445 primitive_params_ = self._map_primitive_params(
3446 initial_config_primitive, {}, {}
3447 )
3448
3449 await asyncio.wait_for(
3450 self.k8scluster_map[k8sclustertype].exec_primitive(
3451 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3452 kdu_instance=kdu_instance,
3453 primitive_name=initial_config_primitive["name"],
3454 params=primitive_params_,
3455 db_dict=db_dict_install,
3456 vca_id=vca_id,
3457 ),
3458 timeout=timeout,
3459 )
3460
3461 except Exception as e:
3462 # Prepare update db with error and raise exception
3463 try:
3464 self.update_db_2(
3465 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3466 )
3467 self.update_db_2(
3468 "vnfrs",
3469 vnfr_data.get("_id"),
3470 {"kdur.{}.status".format(kdu_index): "ERROR"},
3471 )
3472 except Exception:
3473 # ignore to keep original exception
3474 pass
3475 # reraise original error
3476 raise
3477
3478 return kdu_instance
3479
3480 async def deploy_kdus(
3481 self,
3482 logging_text,
3483 nsr_id,
3484 nslcmop_id,
3485 db_vnfrs,
3486 db_vnfds,
3487 task_instantiation_info,
3488 ):
3489 # Launch kdus if present in the descriptor
3490
3491 k8scluster_id_2_uuic = {
3492 "helm-chart-v3": {},
3493 "helm-chart": {},
3494 "juju-bundle": {},
3495 }
3496
3497 async def _get_cluster_id(cluster_id, cluster_type):
3498 nonlocal k8scluster_id_2_uuic
3499 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3500 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3501
3502 # check if K8scluster is creating and wait look if previous tasks in process
3503 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3504 "k8scluster", cluster_id
3505 )
3506 if task_dependency:
3507 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3508 task_name, cluster_id
3509 )
3510 self.logger.debug(logging_text + text)
3511 await asyncio.wait(task_dependency, timeout=3600)
3512
3513 db_k8scluster = self.db.get_one(
3514 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3515 )
3516 if not db_k8scluster:
3517 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3518
3519 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3520 if not k8s_id:
3521 if cluster_type == "helm-chart-v3":
3522 try:
3523 # backward compatibility for existing clusters that have not been initialized for helm v3
3524 k8s_credentials = yaml.safe_dump(
3525 db_k8scluster.get("credentials")
3526 )
3527 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3528 k8s_credentials, reuse_cluster_uuid=cluster_id
3529 )
3530 db_k8scluster_update = {}
3531 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3532 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3533 db_k8scluster_update[
3534 "_admin.helm-chart-v3.created"
3535 ] = uninstall_sw
3536 db_k8scluster_update[
3537 "_admin.helm-chart-v3.operationalState"
3538 ] = "ENABLED"
3539 self.update_db_2(
3540 "k8sclusters", cluster_id, db_k8scluster_update
3541 )
3542 except Exception as e:
3543 self.logger.error(
3544 logging_text
3545 + "error initializing helm-v3 cluster: {}".format(str(e))
3546 )
3547 raise LcmException(
3548 "K8s cluster '{}' has not been initialized for '{}'".format(
3549 cluster_id, cluster_type
3550 )
3551 )
3552 else:
3553 raise LcmException(
3554 "K8s cluster '{}' has not been initialized for '{}'".format(
3555 cluster_id, cluster_type
3556 )
3557 )
3558 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3559 return k8s_id
3560
3561 logging_text += "Deploy kdus: "
3562 step = ""
3563 try:
3564 db_nsr_update = {"_admin.deployed.K8s": []}
3565 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3566
3567 index = 0
3568 updated_cluster_list = []
3569 updated_v3_cluster_list = []
3570
3571 for vnfr_data in db_vnfrs.values():
3572 vca_id = self.get_vca_id(vnfr_data, {})
3573 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3574 # Step 0: Prepare and set parameters
3575 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3576 vnfd_id = vnfr_data.get("vnfd-id")
3577 vnfd_with_id = find_in_list(
3578 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3579 )
3580 kdud = next(
3581 kdud
3582 for kdud in vnfd_with_id["kdu"]
3583 if kdud["name"] == kdur["kdu-name"]
3584 )
3585 namespace = kdur.get("k8s-namespace")
3586 kdu_deployment_name = kdur.get("kdu-deployment-name")
3587 if kdur.get("helm-chart"):
3588 kdumodel = kdur["helm-chart"]
3589 # Default version: helm3, if helm-version is v2 assign v2
3590 k8sclustertype = "helm-chart-v3"
3591 self.logger.debug("kdur: {}".format(kdur))
3592 if (
3593 kdur.get("helm-version")
3594 and kdur.get("helm-version") == "v2"
3595 ):
3596 k8sclustertype = "helm-chart"
3597 elif kdur.get("juju-bundle"):
3598 kdumodel = kdur["juju-bundle"]
3599 k8sclustertype = "juju-bundle"
3600 else:
3601 raise LcmException(
3602 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3603 "juju-bundle. Maybe an old NBI version is running".format(
3604 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3605 )
3606 )
3607 # check if kdumodel is a file and exists
3608 try:
3609 vnfd_with_id = find_in_list(
3610 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3611 )
3612 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3613 if storage: # may be not present if vnfd has not artifacts
3614 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3615 if storage["pkg-dir"]:
3616 filename = "{}/{}/{}s/{}".format(
3617 storage["folder"],
3618 storage["pkg-dir"],
3619 k8sclustertype,
3620 kdumodel,
3621 )
3622 else:
3623 filename = "{}/Scripts/{}s/{}".format(
3624 storage["folder"],
3625 k8sclustertype,
3626 kdumodel,
3627 )
3628 if self.fs.file_exists(
3629 filename, mode="file"
3630 ) or self.fs.file_exists(filename, mode="dir"):
3631 kdumodel = self.fs.path + filename
3632 except (asyncio.TimeoutError, asyncio.CancelledError):
3633 raise
3634 except Exception: # it is not a file
3635 pass
3636
3637 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3638 step = "Synchronize repos for k8s cluster '{}'".format(
3639 k8s_cluster_id
3640 )
3641 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3642
3643 # Synchronize repos
3644 if (
3645 k8sclustertype == "helm-chart"
3646 and cluster_uuid not in updated_cluster_list
3647 ) or (
3648 k8sclustertype == "helm-chart-v3"
3649 and cluster_uuid not in updated_v3_cluster_list
3650 ):
3651 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3652 self.k8scluster_map[k8sclustertype].synchronize_repos(
3653 cluster_uuid=cluster_uuid
3654 )
3655 )
3656 if del_repo_list or added_repo_dict:
3657 if k8sclustertype == "helm-chart":
3658 unset = {
3659 "_admin.helm_charts_added." + item: None
3660 for item in del_repo_list
3661 }
3662 updated = {
3663 "_admin.helm_charts_added." + item: name
3664 for item, name in added_repo_dict.items()
3665 }
3666 updated_cluster_list.append(cluster_uuid)
3667 elif k8sclustertype == "helm-chart-v3":
3668 unset = {
3669 "_admin.helm_charts_v3_added." + item: None
3670 for item in del_repo_list
3671 }
3672 updated = {
3673 "_admin.helm_charts_v3_added." + item: name
3674 for item, name in added_repo_dict.items()
3675 }
3676 updated_v3_cluster_list.append(cluster_uuid)
3677 self.logger.debug(
3678 logging_text + "repos synchronized on k8s cluster "
3679 "'{}' to_delete: {}, to_add: {}".format(
3680 k8s_cluster_id, del_repo_list, added_repo_dict
3681 )
3682 )
3683 self.db.set_one(
3684 "k8sclusters",
3685 {"_id": k8s_cluster_id},
3686 updated,
3687 unset=unset,
3688 )
3689
3690 # Instantiate kdu
3691 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3692 vnfr_data["member-vnf-index-ref"],
3693 kdur["kdu-name"],
3694 k8s_cluster_id,
3695 )
3696 k8s_instance_info = {
3697 "kdu-instance": None,
3698 "k8scluster-uuid": cluster_uuid,
3699 "k8scluster-type": k8sclustertype,
3700 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3701 "kdu-name": kdur["kdu-name"],
3702 "kdu-model": kdumodel,
3703 "namespace": namespace,
3704 "kdu-deployment-name": kdu_deployment_name,
3705 }
3706 db_path = "_admin.deployed.K8s.{}".format(index)
3707 db_nsr_update[db_path] = k8s_instance_info
3708 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3709 vnfd_with_id = find_in_list(
3710 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3711 )
3712 task = asyncio.ensure_future(
3713 self._install_kdu(
3714 nsr_id,
3715 db_path,
3716 vnfr_data,
3717 kdu_index,
3718 kdud,
3719 vnfd_with_id,
3720 k8s_instance_info,
3721 k8params=desc_params,
3722 timeout=1800,
3723 vca_id=vca_id,
3724 )
3725 )
3726 self.lcm_tasks.register(
3727 "ns",
3728 nsr_id,
3729 nslcmop_id,
3730 "instantiate_KDU-{}".format(index),
3731 task,
3732 )
3733 task_instantiation_info[task] = "Deploying KDU {}".format(
3734 kdur["kdu-name"]
3735 )
3736
3737 index += 1
3738
3739 except (LcmException, asyncio.CancelledError):
3740 raise
3741 except Exception as e:
3742 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3743 if isinstance(e, (N2VCException, DbException)):
3744 self.logger.error(logging_text + msg)
3745 else:
3746 self.logger.critical(logging_text + msg, exc_info=True)
3747 raise LcmException(msg)
3748 finally:
3749 if db_nsr_update:
3750 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3751
3752 def _deploy_n2vc(
3753 self,
3754 logging_text,
3755 db_nsr,
3756 db_vnfr,
3757 nslcmop_id,
3758 nsr_id,
3759 nsi_id,
3760 vnfd_id,
3761 vdu_id,
3762 kdu_name,
3763 member_vnf_index,
3764 vdu_index,
3765 vdu_name,
3766 deploy_params,
3767 descriptor_config,
3768 base_folder,
3769 task_instantiation_info,
3770 stage,
3771 ):
3772 # launch instantiate_N2VC in a asyncio task and register task object
3773 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3774 # if not found, create one entry and update database
3775 # fill db_nsr._admin.deployed.VCA.<index>
3776
3777 self.logger.debug(
3778 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3779 )
3780
3781 charm_name = ""
3782 get_charm_name = False
3783 if "execution-environment-list" in descriptor_config:
3784 ee_list = descriptor_config.get("execution-environment-list", [])
3785 elif "juju" in descriptor_config:
3786 ee_list = [descriptor_config] # ns charms
3787 if "execution-environment-list" not in descriptor_config:
3788 # charm name is only required for ns charms
3789 get_charm_name = True
3790 else: # other types as script are not supported
3791 ee_list = []
3792
3793 for ee_item in ee_list:
3794 self.logger.debug(
3795 logging_text
3796 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3797 ee_item.get("juju"), ee_item.get("helm-chart")
3798 )
3799 )
3800 ee_descriptor_id = ee_item.get("id")
3801 if ee_item.get("juju"):
3802 vca_name = ee_item["juju"].get("charm")
3803 if get_charm_name:
3804 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3805 vca_type = (
3806 "lxc_proxy_charm"
3807 if ee_item["juju"].get("charm") is not None
3808 else "native_charm"
3809 )
3810 if ee_item["juju"].get("cloud") == "k8s":
3811 vca_type = "k8s_proxy_charm"
3812 elif ee_item["juju"].get("proxy") is False:
3813 vca_type = "native_charm"
3814 elif ee_item.get("helm-chart"):
3815 vca_name = ee_item["helm-chart"]
3816 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3817 vca_type = "helm"
3818 else:
3819 vca_type = "helm-v3"
3820 else:
3821 self.logger.debug(
3822 logging_text + "skipping non juju neither charm configuration"
3823 )
3824 continue
3825
3826 vca_index = -1
3827 for vca_index, vca_deployed in enumerate(
3828 db_nsr["_admin"]["deployed"]["VCA"]
3829 ):
3830 if not vca_deployed:
3831 continue
3832 if (
3833 vca_deployed.get("member-vnf-index") == member_vnf_index
3834 and vca_deployed.get("vdu_id") == vdu_id
3835 and vca_deployed.get("kdu_name") == kdu_name
3836 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3837 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3838 ):
3839 break
3840 else:
3841 # not found, create one.
3842 target = (
3843 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3844 )
3845 if vdu_id:
3846 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3847 elif kdu_name:
3848 target += "/kdu/{}".format(kdu_name)
3849 vca_deployed = {
3850 "target_element": target,
3851 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3852 "member-vnf-index": member_vnf_index,
3853 "vdu_id": vdu_id,
3854 "kdu_name": kdu_name,
3855 "vdu_count_index": vdu_index,
3856 "operational-status": "init", # TODO revise
3857 "detailed-status": "", # TODO revise
3858 "step": "initial-deploy", # TODO revise
3859 "vnfd_id": vnfd_id,
3860 "vdu_name": vdu_name,
3861 "type": vca_type,
3862 "ee_descriptor_id": ee_descriptor_id,
3863 "charm_name": charm_name,
3864 }
3865 vca_index += 1
3866
3867 # create VCA and configurationStatus in db
3868 db_dict = {
3869 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3870 "configurationStatus.{}".format(vca_index): dict(),
3871 }
3872 self.update_db_2("nsrs", nsr_id, db_dict)
3873
3874 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3875
3876 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3877 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3878 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3879
3880 # Launch task
3881 task_n2vc = asyncio.ensure_future(
3882 self.instantiate_N2VC(
3883 logging_text=logging_text,
3884 vca_index=vca_index,
3885 nsi_id=nsi_id,
3886 db_nsr=db_nsr,
3887 db_vnfr=db_vnfr,
3888 vdu_id=vdu_id,
3889 kdu_name=kdu_name,
3890 vdu_index=vdu_index,
3891 deploy_params=deploy_params,
3892 config_descriptor=descriptor_config,
3893 base_folder=base_folder,
3894 nslcmop_id=nslcmop_id,
3895 stage=stage,
3896 vca_type=vca_type,
3897 vca_name=vca_name,
3898 ee_config_descriptor=ee_item,
3899 )
3900 )
3901 self.lcm_tasks.register(
3902 "ns",
3903 nsr_id,
3904 nslcmop_id,
3905 "instantiate_N2VC-{}".format(vca_index),
3906 task_n2vc,
3907 )
3908 task_instantiation_info[
3909 task_n2vc
3910 ] = self.task_name_deploy_vca + " {}.{}".format(
3911 member_vnf_index or "", vdu_id or ""
3912 )
3913
3914 @staticmethod
3915 def _create_nslcmop(nsr_id, operation, params):
3916 """
3917 Creates a ns-lcm-opp content to be stored at database.
3918 :param nsr_id: internal id of the instance
3919 :param operation: instantiate, terminate, scale, action, ...
3920 :param params: user parameters for the operation
3921 :return: dictionary following SOL005 format
3922 """
3923 # Raise exception if invalid arguments
3924 if not (nsr_id and operation and params):
3925 raise LcmException(
3926 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3927 )
3928 now = time()
3929 _id = str(uuid4())
3930 nslcmop = {
3931 "id": _id,
3932 "_id": _id,
3933 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3934 "operationState": "PROCESSING",
3935 "statusEnteredTime": now,
3936 "nsInstanceId": nsr_id,
3937 "lcmOperationType": operation,
3938 "startTime": now,
3939 "isAutomaticInvocation": False,
3940 "operationParams": params,
3941 "isCancelPending": False,
3942 "links": {
3943 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3944 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3945 },
3946 }
3947 return nslcmop
3948
3949 def _format_additional_params(self, params):
3950 params = params or {}
3951 for key, value in params.items():
3952 if str(value).startswith("!!yaml "):
3953 params[key] = yaml.safe_load(value[7:])
3954 return params
3955
3956 def _get_terminate_primitive_params(self, seq, vnf_index):
3957 primitive = seq.get("name")
3958 primitive_params = {}
3959 params = {
3960 "member_vnf_index": vnf_index,
3961 "primitive": primitive,
3962 "primitive_params": primitive_params,
3963 }
3964 desc_params = {}
3965 return self._map_primitive_params(seq, params, desc_params)
3966
3967 # sub-operations
3968
3969 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3970 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3971 if op.get("operationState") == "COMPLETED":
3972 # b. Skip sub-operation
3973 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3974 return self.SUBOPERATION_STATUS_SKIP
3975 else:
3976 # c. retry executing sub-operation
3977 # The sub-operation exists, and operationState != 'COMPLETED'
3978 # Update operationState = 'PROCESSING' to indicate a retry.
3979 operationState = "PROCESSING"
3980 detailed_status = "In progress"
3981 self._update_suboperation_status(
3982 db_nslcmop, op_index, operationState, detailed_status
3983 )
3984 # Return the sub-operation index
3985 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3986 # with arguments extracted from the sub-operation
3987 return op_index
3988
3989 # Find a sub-operation where all keys in a matching dictionary must match
3990 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3991 def _find_suboperation(self, db_nslcmop, match):
3992 if db_nslcmop and match:
3993 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3994 for i, op in enumerate(op_list):
3995 if all(op.get(k) == match[k] for k in match):
3996 return i
3997 return self.SUBOPERATION_STATUS_NOT_FOUND
3998
3999 # Update status for a sub-operation given its index
4000 def _update_suboperation_status(
4001 self, db_nslcmop, op_index, operationState, detailed_status
4002 ):
4003 # Update DB for HA tasks
4004 q_filter = {"_id": db_nslcmop["_id"]}
4005 update_dict = {
4006 "_admin.operations.{}.operationState".format(op_index): operationState,
4007 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
4008 }
4009 self.db.set_one(
4010 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
4011 )
4012
4013 # Add sub-operation, return the index of the added sub-operation
4014 # Optionally, set operationState, detailed-status, and operationType
4015 # Status and type are currently set for 'scale' sub-operations:
4016 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4017 # 'detailed-status' : status message
4018 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4019 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4020 def _add_suboperation(
4021 self,
4022 db_nslcmop,
4023 vnf_index,
4024 vdu_id,
4025 vdu_count_index,
4026 vdu_name,
4027 primitive,
4028 mapped_primitive_params,
4029 operationState=None,
4030 detailed_status=None,
4031 operationType=None,
4032 RO_nsr_id=None,
4033 RO_scaling_info=None,
4034 ):
4035 if not db_nslcmop:
4036 return self.SUBOPERATION_STATUS_NOT_FOUND
4037 # Get the "_admin.operations" list, if it exists
4038 db_nslcmop_admin = db_nslcmop.get("_admin", {})
4039 op_list = db_nslcmop_admin.get("operations")
4040 # Create or append to the "_admin.operations" list
4041 new_op = {
4042 "member_vnf_index": vnf_index,
4043 "vdu_id": vdu_id,
4044 "vdu_count_index": vdu_count_index,
4045 "primitive": primitive,
4046 "primitive_params": mapped_primitive_params,
4047 }
4048 if operationState:
4049 new_op["operationState"] = operationState
4050 if detailed_status:
4051 new_op["detailed-status"] = detailed_status
4052 if operationType:
4053 new_op["lcmOperationType"] = operationType
4054 if RO_nsr_id:
4055 new_op["RO_nsr_id"] = RO_nsr_id
4056 if RO_scaling_info:
4057 new_op["RO_scaling_info"] = RO_scaling_info
4058 if not op_list:
4059 # No existing operations, create key 'operations' with current operation as first list element
4060 db_nslcmop_admin.update({"operations": [new_op]})
4061 op_list = db_nslcmop_admin.get("operations")
4062 else:
4063 # Existing operations, append operation to list
4064 op_list.append(new_op)
4065
4066 db_nslcmop_update = {"_admin.operations": op_list}
4067 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4068 op_index = len(op_list) - 1
4069 return op_index
4070
4071 # Helper methods for scale() sub-operations
4072
4073 # pre-scale/post-scale:
4074 # Check for 3 different cases:
4075 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4076 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4077 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4078 def _check_or_add_scale_suboperation(
4079 self,
4080 db_nslcmop,
4081 vnf_index,
4082 vnf_config_primitive,
4083 primitive_params,
4084 operationType,
4085 RO_nsr_id=None,
4086 RO_scaling_info=None,
4087 ):
4088 # Find this sub-operation
4089 if RO_nsr_id and RO_scaling_info:
4090 operationType = "SCALE-RO"
4091 match = {
4092 "member_vnf_index": vnf_index,
4093 "RO_nsr_id": RO_nsr_id,
4094 "RO_scaling_info": RO_scaling_info,
4095 }
4096 else:
4097 match = {
4098 "member_vnf_index": vnf_index,
4099 "primitive": vnf_config_primitive,
4100 "primitive_params": primitive_params,
4101 "lcmOperationType": operationType,
4102 }
4103 op_index = self._find_suboperation(db_nslcmop, match)
4104 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4105 # a. New sub-operation
4106 # The sub-operation does not exist, add it.
4107 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4108 # The following parameters are set to None for all kind of scaling:
4109 vdu_id = None
4110 vdu_count_index = None
4111 vdu_name = None
4112 if RO_nsr_id and RO_scaling_info:
4113 vnf_config_primitive = None
4114 primitive_params = None
4115 else:
4116 RO_nsr_id = None
4117 RO_scaling_info = None
4118 # Initial status for sub-operation
4119 operationState = "PROCESSING"
4120 detailed_status = "In progress"
4121 # Add sub-operation for pre/post-scaling (zero or more operations)
4122 self._add_suboperation(
4123 db_nslcmop,
4124 vnf_index,
4125 vdu_id,
4126 vdu_count_index,
4127 vdu_name,
4128 vnf_config_primitive,
4129 primitive_params,
4130 operationState,
4131 detailed_status,
4132 operationType,
4133 RO_nsr_id,
4134 RO_scaling_info,
4135 )
4136 return self.SUBOPERATION_STATUS_NEW
4137 else:
4138 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4139 # or op_index (operationState != 'COMPLETED')
4140 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4141
4142 # Function to return execution_environment id
4143
4144 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4145 # TODO vdu_index_count
4146 for vca in vca_deployed_list:
4147 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4148 return vca["ee_id"]
4149
4150 async def destroy_N2VC(
4151 self,
4152 logging_text,
4153 db_nslcmop,
4154 vca_deployed,
4155 config_descriptor,
4156 vca_index,
4157 destroy_ee=True,
4158 exec_primitives=True,
4159 scaling_in=False,
4160 vca_id: str = None,
4161 ):
4162 """
4163 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4164 :param logging_text:
4165 :param db_nslcmop:
4166 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4167 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4168 :param vca_index: index in the database _admin.deployed.VCA
4169 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4170 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4171 not executed properly
4172 :param scaling_in: True destroys the application, False destroys the model
4173 :return: None or exception
4174 """
4175
4176 self.logger.debug(
4177 logging_text
4178 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4179 vca_index, vca_deployed, config_descriptor, destroy_ee
4180 )
4181 )
4182
4183 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4184
4185 # execute terminate_primitives
4186 if exec_primitives:
4187 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4188 config_descriptor.get("terminate-config-primitive"),
4189 vca_deployed.get("ee_descriptor_id"),
4190 )
4191 vdu_id = vca_deployed.get("vdu_id")
4192 vdu_count_index = vca_deployed.get("vdu_count_index")
4193 vdu_name = vca_deployed.get("vdu_name")
4194 vnf_index = vca_deployed.get("member-vnf-index")
4195 if terminate_primitives and vca_deployed.get("needed_terminate"):
4196 for seq in terminate_primitives:
4197 # For each sequence in list, get primitive and call _ns_execute_primitive()
4198 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4199 vnf_index, seq.get("name")
4200 )
4201 self.logger.debug(logging_text + step)
4202 # Create the primitive for each sequence, i.e. "primitive": "touch"
4203 primitive = seq.get("name")
4204 mapped_primitive_params = self._get_terminate_primitive_params(
4205 seq, vnf_index
4206 )
4207
4208 # Add sub-operation
4209 self._add_suboperation(
4210 db_nslcmop,
4211 vnf_index,
4212 vdu_id,
4213 vdu_count_index,
4214 vdu_name,
4215 primitive,
4216 mapped_primitive_params,
4217 )
4218 # Sub-operations: Call _ns_execute_primitive() instead of action()
4219 try:
4220 result, result_detail = await self._ns_execute_primitive(
4221 vca_deployed["ee_id"],
4222 primitive,
4223 mapped_primitive_params,
4224 vca_type=vca_type,
4225 vca_id=vca_id,
4226 )
4227 except LcmException:
4228 # this happens when VCA is not deployed. In this case it is not needed to terminate
4229 continue
4230 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4231 if result not in result_ok:
4232 raise LcmException(
4233 "terminate_primitive {} for vnf_member_index={} fails with "
4234 "error {}".format(seq.get("name"), vnf_index, result_detail)
4235 )
4236 # set that this VCA do not need terminated
4237 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4238 vca_index
4239 )
4240 self.update_db_2(
4241 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4242 )
4243
4244 # Delete Prometheus Jobs if any
4245 # This uses NSR_ID, so it will destroy any jobs under this index
4246 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4247
4248 if destroy_ee:
4249 await self.vca_map[vca_type].delete_execution_environment(
4250 vca_deployed["ee_id"],
4251 scaling_in=scaling_in,
4252 vca_type=vca_type,
4253 vca_id=vca_id,
4254 )
4255
4256 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4257 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4258 namespace = "." + db_nsr["_id"]
4259 try:
4260 await self.n2vc.delete_namespace(
4261 namespace=namespace,
4262 total_timeout=self.timeout.charm_delete,
4263 vca_id=vca_id,
4264 )
4265 except N2VCNotFound: # already deleted. Skip
4266 pass
4267 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4268
4269 async def _terminate_RO(
4270 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4271 ):
4272 """
4273 Terminates a deployment from RO
4274 :param logging_text:
4275 :param nsr_deployed: db_nsr._admin.deployed
4276 :param nsr_id:
4277 :param nslcmop_id:
4278 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4279 this method will update only the index 2, but it will write on database the concatenated content of the list
4280 :return:
4281 """
4282 db_nsr_update = {}
4283 failed_detail = []
4284 ro_nsr_id = ro_delete_action = None
4285 if nsr_deployed and nsr_deployed.get("RO"):
4286 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4287 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4288 try:
4289 if ro_nsr_id:
4290 stage[2] = "Deleting ns from VIM."
4291 db_nsr_update["detailed-status"] = " ".join(stage)
4292 self._write_op_status(nslcmop_id, stage)
4293 self.logger.debug(logging_text + stage[2])
4294 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4295 self._write_op_status(nslcmop_id, stage)
4296 desc = await self.RO.delete("ns", ro_nsr_id)
4297 ro_delete_action = desc["action_id"]
4298 db_nsr_update[
4299 "_admin.deployed.RO.nsr_delete_action_id"
4300 ] = ro_delete_action
4301 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4302 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4303 if ro_delete_action:
4304 # wait until NS is deleted from VIM
4305 stage[2] = "Waiting ns deleted from VIM."
4306 detailed_status_old = None
4307 self.logger.debug(
4308 logging_text
4309 + stage[2]
4310 + " RO_id={} ro_delete_action={}".format(
4311 ro_nsr_id, ro_delete_action
4312 )
4313 )
4314 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4315 self._write_op_status(nslcmop_id, stage)
4316
4317 delete_timeout = 20 * 60 # 20 minutes
4318 while delete_timeout > 0:
4319 desc = await self.RO.show(
4320 "ns",
4321 item_id_name=ro_nsr_id,
4322 extra_item="action",
4323 extra_item_id=ro_delete_action,
4324 )
4325
4326 # deploymentStatus
4327 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4328
4329 ns_status, ns_status_info = self.RO.check_action_status(desc)
4330 if ns_status == "ERROR":
4331 raise ROclient.ROClientException(ns_status_info)
4332 elif ns_status == "BUILD":
4333 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4334 elif ns_status == "ACTIVE":
4335 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4336 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4337 break
4338 else:
4339 assert (
4340 False
4341 ), "ROclient.check_action_status returns unknown {}".format(
4342 ns_status
4343 )
4344 if stage[2] != detailed_status_old:
4345 detailed_status_old = stage[2]
4346 db_nsr_update["detailed-status"] = " ".join(stage)
4347 self._write_op_status(nslcmop_id, stage)
4348 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4349 await asyncio.sleep(5, loop=self.loop)
4350 delete_timeout -= 5
4351 else: # delete_timeout <= 0:
4352 raise ROclient.ROClientException(
4353 "Timeout waiting ns deleted from VIM"
4354 )
4355
4356 except Exception as e:
4357 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4358 if (
4359 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4360 ): # not found
4361 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4362 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4363 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4364 self.logger.debug(
4365 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4366 )
4367 elif (
4368 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4369 ): # conflict
4370 failed_detail.append("delete conflict: {}".format(e))
4371 self.logger.debug(
4372 logging_text
4373 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4374 )
4375 else:
4376 failed_detail.append("delete error: {}".format(e))
4377 self.logger.error(
4378 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4379 )
4380
4381 # Delete nsd
4382 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4383 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4384 try:
4385 stage[2] = "Deleting nsd from RO."
4386 db_nsr_update["detailed-status"] = " ".join(stage)
4387 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4388 self._write_op_status(nslcmop_id, stage)
4389 await self.RO.delete("nsd", ro_nsd_id)
4390 self.logger.debug(
4391 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4392 )
4393 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4394 except Exception as e:
4395 if (
4396 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4397 ): # not found
4398 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4399 self.logger.debug(
4400 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4401 )
4402 elif (
4403 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4404 ): # conflict
4405 failed_detail.append(
4406 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4407 )
4408 self.logger.debug(logging_text + failed_detail[-1])
4409 else:
4410 failed_detail.append(
4411 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4412 )
4413 self.logger.error(logging_text + failed_detail[-1])
4414
4415 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4416 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4417 if not vnf_deployed or not vnf_deployed["id"]:
4418 continue
4419 try:
4420 ro_vnfd_id = vnf_deployed["id"]
4421 stage[
4422 2
4423 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4424 vnf_deployed["member-vnf-index"], ro_vnfd_id
4425 )
4426 db_nsr_update["detailed-status"] = " ".join(stage)
4427 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4428 self._write_op_status(nslcmop_id, stage)
4429 await self.RO.delete("vnfd", ro_vnfd_id)
4430 self.logger.debug(
4431 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4432 )
4433 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4434 except Exception as e:
4435 if (
4436 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4437 ): # not found
4438 db_nsr_update[
4439 "_admin.deployed.RO.vnfd.{}.id".format(index)
4440 ] = None
4441 self.logger.debug(
4442 logging_text
4443 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4444 )
4445 elif (
4446 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4447 ): # conflict
4448 failed_detail.append(
4449 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4450 )
4451 self.logger.debug(logging_text + failed_detail[-1])
4452 else:
4453 failed_detail.append(
4454 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4455 )
4456 self.logger.error(logging_text + failed_detail[-1])
4457
4458 if failed_detail:
4459 stage[2] = "Error deleting from VIM"
4460 else:
4461 stage[2] = "Deleted from VIM"
4462 db_nsr_update["detailed-status"] = " ".join(stage)
4463 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4464 self._write_op_status(nslcmop_id, stage)
4465
4466 if failed_detail:
4467 raise LcmException("; ".join(failed_detail))
4468
4469 async def terminate(self, nsr_id, nslcmop_id):
4470 # Try to lock HA task here
4471 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4472 if not task_is_locked_by_me:
4473 return
4474
4475 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4476 self.logger.debug(logging_text + "Enter")
4477 timeout_ns_terminate = self.timeout.ns_terminate
4478 db_nsr = None
4479 db_nslcmop = None
4480 operation_params = None
4481 exc = None
4482 error_list = [] # annotates all failed error messages
4483 db_nslcmop_update = {}
4484 autoremove = False # autoremove after terminated
4485 tasks_dict_info = {}
4486 db_nsr_update = {}
4487 stage = [
4488 "Stage 1/3: Preparing task.",
4489 "Waiting for previous operations to terminate.",
4490 "",
4491 ]
4492 # ^ contains [stage, step, VIM-status]
4493 try:
4494 # wait for any previous tasks in process
4495 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4496
4497 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4498 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4499 operation_params = db_nslcmop.get("operationParams") or {}
4500 if operation_params.get("timeout_ns_terminate"):
4501 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4502 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4503 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4504
4505 db_nsr_update["operational-status"] = "terminating"
4506 db_nsr_update["config-status"] = "terminating"
4507 self._write_ns_status(
4508 nsr_id=nsr_id,
4509 ns_state="TERMINATING",
4510 current_operation="TERMINATING",
4511 current_operation_id=nslcmop_id,
4512 other_update=db_nsr_update,
4513 )
4514 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4515 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4516 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4517 return
4518
4519 stage[1] = "Getting vnf descriptors from db."
4520 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4521 db_vnfrs_dict = {
4522 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4523 }
4524 db_vnfds_from_id = {}
4525 db_vnfds_from_member_index = {}
4526 # Loop over VNFRs
4527 for vnfr in db_vnfrs_list:
4528 vnfd_id = vnfr["vnfd-id"]
4529 if vnfd_id not in db_vnfds_from_id:
4530 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4531 db_vnfds_from_id[vnfd_id] = vnfd
4532 db_vnfds_from_member_index[
4533 vnfr["member-vnf-index-ref"]
4534 ] = db_vnfds_from_id[vnfd_id]
4535
4536 # Destroy individual execution environments when there are terminating primitives.
4537 # Rest of EE will be deleted at once
4538 # TODO - check before calling _destroy_N2VC
4539 # if not operation_params.get("skip_terminate_primitives"):#
4540 # or not vca.get("needed_terminate"):
4541 stage[0] = "Stage 2/3 execute terminating primitives."
4542 self.logger.debug(logging_text + stage[0])
4543 stage[1] = "Looking execution environment that needs terminate."
4544 self.logger.debug(logging_text + stage[1])
4545
4546 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4547 config_descriptor = None
4548 vca_member_vnf_index = vca.get("member-vnf-index")
4549 vca_id = self.get_vca_id(
4550 db_vnfrs_dict.get(vca_member_vnf_index)
4551 if vca_member_vnf_index
4552 else None,
4553 db_nsr,
4554 )
4555 if not vca or not vca.get("ee_id"):
4556 continue
4557 if not vca.get("member-vnf-index"):
4558 # ns
4559 config_descriptor = db_nsr.get("ns-configuration")
4560 elif vca.get("vdu_id"):
4561 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4562 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4563 elif vca.get("kdu_name"):
4564 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4565 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4566 else:
4567 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4568 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4569 vca_type = vca.get("type")
4570 exec_terminate_primitives = not operation_params.get(
4571 "skip_terminate_primitives"
4572 ) and vca.get("needed_terminate")
4573 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4574 # pending native charms
4575 destroy_ee = (
4576 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4577 )
4578 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4579 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4580 task = asyncio.ensure_future(
4581 self.destroy_N2VC(
4582 logging_text,
4583 db_nslcmop,
4584 vca,
4585 config_descriptor,
4586 vca_index,
4587 destroy_ee,
4588 exec_terminate_primitives,
4589 vca_id=vca_id,
4590 )
4591 )
4592 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4593
4594 # wait for pending tasks of terminate primitives
4595 if tasks_dict_info:
4596 self.logger.debug(
4597 logging_text
4598 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4599 )
4600 error_list = await self._wait_for_tasks(
4601 logging_text,
4602 tasks_dict_info,
4603 min(self.timeout.charm_delete, timeout_ns_terminate),
4604 stage,
4605 nslcmop_id,
4606 )
4607 tasks_dict_info.clear()
4608 if error_list:
4609 return # raise LcmException("; ".join(error_list))
4610
4611 # remove All execution environments at once
4612 stage[0] = "Stage 3/3 delete all."
4613
4614 if nsr_deployed.get("VCA"):
4615 stage[1] = "Deleting all execution environments."
4616 self.logger.debug(logging_text + stage[1])
4617 vca_id = self.get_vca_id({}, db_nsr)
4618 task_delete_ee = asyncio.ensure_future(
4619 asyncio.wait_for(
4620 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4621 timeout=self.timeout.charm_delete,
4622 )
4623 )
4624 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4625 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4626
4627 # Delete Namespace and Certificates if necessary
4628 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4629 await self.vca_map["helm-v3"].delete_tls_certificate(
4630 certificate_name=db_nslcmop["nsInstanceId"],
4631 )
4632 # TODO: Delete namespace
4633
4634 # Delete from k8scluster
4635 stage[1] = "Deleting KDUs."
4636 self.logger.debug(logging_text + stage[1])
4637 # print(nsr_deployed)
4638 for kdu in get_iterable(nsr_deployed, "K8s"):
4639 if not kdu or not kdu.get("kdu-instance"):
4640 continue
4641 kdu_instance = kdu.get("kdu-instance")
4642 if kdu.get("k8scluster-type") in self.k8scluster_map:
4643 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4644 vca_id = self.get_vca_id({}, db_nsr)
4645 task_delete_kdu_instance = asyncio.ensure_future(
4646 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4647 cluster_uuid=kdu.get("k8scluster-uuid"),
4648 kdu_instance=kdu_instance,
4649 vca_id=vca_id,
4650 namespace=kdu.get("namespace"),
4651 )
4652 )
4653 else:
4654 self.logger.error(
4655 logging_text
4656 + "Unknown k8s deployment type {}".format(
4657 kdu.get("k8scluster-type")
4658 )
4659 )
4660 continue
4661 tasks_dict_info[
4662 task_delete_kdu_instance
4663 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4664
4665 # remove from RO
4666 stage[1] = "Deleting ns from VIM."
4667 if self.ro_config.ng:
4668 task_delete_ro = asyncio.ensure_future(
4669 self._terminate_ng_ro(
4670 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4671 )
4672 )
4673 else:
4674 task_delete_ro = asyncio.ensure_future(
4675 self._terminate_RO(
4676 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4677 )
4678 )
4679 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4680
4681 # rest of staff will be done at finally
4682
4683 except (
4684 ROclient.ROClientException,
4685 DbException,
4686 LcmException,
4687 N2VCException,
4688 ) as e:
4689 self.logger.error(logging_text + "Exit Exception {}".format(e))
4690 exc = e
4691 except asyncio.CancelledError:
4692 self.logger.error(
4693 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4694 )
4695 exc = "Operation was cancelled"
4696 except Exception as e:
4697 exc = traceback.format_exc()
4698 self.logger.critical(
4699 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4700 exc_info=True,
4701 )
4702 finally:
4703 if exc:
4704 error_list.append(str(exc))
4705 try:
4706 # wait for pending tasks
4707 if tasks_dict_info:
4708 stage[1] = "Waiting for terminate pending tasks."
4709 self.logger.debug(logging_text + stage[1])
4710 error_list += await self._wait_for_tasks(
4711 logging_text,
4712 tasks_dict_info,
4713 timeout_ns_terminate,
4714 stage,
4715 nslcmop_id,
4716 )
4717 stage[1] = stage[2] = ""
4718 except asyncio.CancelledError:
4719 error_list.append("Cancelled")
4720 # TODO cancell all tasks
4721 except Exception as exc:
4722 error_list.append(str(exc))
4723 # update status at database
4724 if error_list:
4725 error_detail = "; ".join(error_list)
4726 # self.logger.error(logging_text + error_detail)
4727 error_description_nslcmop = "{} Detail: {}".format(
4728 stage[0], error_detail
4729 )
4730 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4731 nslcmop_id, stage[0]
4732 )
4733
4734 db_nsr_update["operational-status"] = "failed"
4735 db_nsr_update["detailed-status"] = (
4736 error_description_nsr + " Detail: " + error_detail
4737 )
4738 db_nslcmop_update["detailed-status"] = error_detail
4739 nslcmop_operation_state = "FAILED"
4740 ns_state = "BROKEN"
4741 else:
4742 error_detail = None
4743 error_description_nsr = error_description_nslcmop = None
4744 ns_state = "NOT_INSTANTIATED"
4745 db_nsr_update["operational-status"] = "terminated"
4746 db_nsr_update["detailed-status"] = "Done"
4747 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4748 db_nslcmop_update["detailed-status"] = "Done"
4749 nslcmop_operation_state = "COMPLETED"
4750
4751 if db_nsr:
4752 self._write_ns_status(
4753 nsr_id=nsr_id,
4754 ns_state=ns_state,
4755 current_operation="IDLE",
4756 current_operation_id=None,
4757 error_description=error_description_nsr,
4758 error_detail=error_detail,
4759 other_update=db_nsr_update,
4760 )
4761 self._write_op_status(
4762 op_id=nslcmop_id,
4763 stage="",
4764 error_message=error_description_nslcmop,
4765 operation_state=nslcmop_operation_state,
4766 other_update=db_nslcmop_update,
4767 )
4768 if ns_state == "NOT_INSTANTIATED":
4769 try:
4770 self.db.set_list(
4771 "vnfrs",
4772 {"nsr-id-ref": nsr_id},
4773 {"_admin.nsState": "NOT_INSTANTIATED"},
4774 )
4775 except DbException as e:
4776 self.logger.warn(
4777 logging_text
4778 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4779 nsr_id, e
4780 )
4781 )
4782 if operation_params:
4783 autoremove = operation_params.get("autoremove", False)
4784 if nslcmop_operation_state:
4785 try:
4786 await self.msg.aiowrite(
4787 "ns",
4788 "terminated",
4789 {
4790 "nsr_id": nsr_id,
4791 "nslcmop_id": nslcmop_id,
4792 "operationState": nslcmop_operation_state,
4793 "autoremove": autoremove,
4794 },
4795 loop=self.loop,
4796 )
4797 except Exception as e:
4798 self.logger.error(
4799 logging_text + "kafka_write notification Exception {}".format(e)
4800 )
4801
4802 self.logger.debug(logging_text + "Exit")
4803 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4804
4805 async def _wait_for_tasks(
4806 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4807 ):
4808 time_start = time()
4809 error_detail_list = []
4810 error_list = []
4811 pending_tasks = list(created_tasks_info.keys())
4812 num_tasks = len(pending_tasks)
4813 num_done = 0
4814 stage[1] = "{}/{}.".format(num_done, num_tasks)
4815 self._write_op_status(nslcmop_id, stage)
4816 while pending_tasks:
4817 new_error = None
4818 _timeout = timeout + time_start - time()
4819 done, pending_tasks = await asyncio.wait(
4820 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4821 )
4822 num_done += len(done)
4823 if not done: # Timeout
4824 for task in pending_tasks:
4825 new_error = created_tasks_info[task] + ": Timeout"
4826 error_detail_list.append(new_error)
4827 error_list.append(new_error)
4828 break
4829 for task in done:
4830 if task.cancelled():
4831 exc = "Cancelled"
4832 else:
4833 exc = task.exception()
4834 if exc:
4835 if isinstance(exc, asyncio.TimeoutError):
4836 exc = "Timeout"
4837 new_error = created_tasks_info[task] + ": {}".format(exc)
4838 error_list.append(created_tasks_info[task])
4839 error_detail_list.append(new_error)
4840 if isinstance(
4841 exc,
4842 (
4843 str,
4844 DbException,
4845 N2VCException,
4846 ROclient.ROClientException,
4847 LcmException,
4848 K8sException,
4849 NgRoException,
4850 ),
4851 ):
4852 self.logger.error(logging_text + new_error)
4853 else:
4854 exc_traceback = "".join(
4855 traceback.format_exception(None, exc, exc.__traceback__)
4856 )
4857 self.logger.error(
4858 logging_text
4859 + created_tasks_info[task]
4860 + " "
4861 + exc_traceback
4862 )
4863 else:
4864 self.logger.debug(
4865 logging_text + created_tasks_info[task] + ": Done"
4866 )
4867 stage[1] = "{}/{}.".format(num_done, num_tasks)
4868 if new_error:
4869 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4870 if nsr_id: # update also nsr
4871 self.update_db_2(
4872 "nsrs",
4873 nsr_id,
4874 {
4875 "errorDescription": "Error at: " + ", ".join(error_list),
4876 "errorDetail": ". ".join(error_detail_list),
4877 },
4878 )
4879 self._write_op_status(nslcmop_id, stage)
4880 return error_detail_list
4881
4882 @staticmethod
4883 def _map_primitive_params(primitive_desc, params, instantiation_params):
4884 """
4885 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4886 The default-value is used. If it is between < > it look for a value at instantiation_params
4887 :param primitive_desc: portion of VNFD/NSD that describes primitive
4888 :param params: Params provided by user
4889 :param instantiation_params: Instantiation params provided by user
4890 :return: a dictionary with the calculated params
4891 """
4892 calculated_params = {}
4893 for parameter in primitive_desc.get("parameter", ()):
4894 param_name = parameter["name"]
4895 if param_name in params:
4896 calculated_params[param_name] = params[param_name]
4897 elif "default-value" in parameter or "value" in parameter:
4898 if "value" in parameter:
4899 calculated_params[param_name] = parameter["value"]
4900 else:
4901 calculated_params[param_name] = parameter["default-value"]
4902 if (
4903 isinstance(calculated_params[param_name], str)
4904 and calculated_params[param_name].startswith("<")
4905 and calculated_params[param_name].endswith(">")
4906 ):
4907 if calculated_params[param_name][1:-1] in instantiation_params:
4908 calculated_params[param_name] = instantiation_params[
4909 calculated_params[param_name][1:-1]
4910 ]
4911 else:
4912 raise LcmException(
4913 "Parameter {} needed to execute primitive {} not provided".format(
4914 calculated_params[param_name], primitive_desc["name"]
4915 )
4916 )
4917 else:
4918 raise LcmException(
4919 "Parameter {} needed to execute primitive {} not provided".format(
4920 param_name, primitive_desc["name"]
4921 )
4922 )
4923
4924 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4925 calculated_params[param_name] = yaml.safe_dump(
4926 calculated_params[param_name], default_flow_style=True, width=256
4927 )
4928 elif isinstance(calculated_params[param_name], str) and calculated_params[
4929 param_name
4930 ].startswith("!!yaml "):
4931 calculated_params[param_name] = calculated_params[param_name][7:]
4932 if parameter.get("data-type") == "INTEGER":
4933 try:
4934 calculated_params[param_name] = int(calculated_params[param_name])
4935 except ValueError: # error converting string to int
4936 raise LcmException(
4937 "Parameter {} of primitive {} must be integer".format(
4938 param_name, primitive_desc["name"]
4939 )
4940 )
4941 elif parameter.get("data-type") == "BOOLEAN":
4942 calculated_params[param_name] = not (
4943 (str(calculated_params[param_name])).lower() == "false"
4944 )
4945
4946 # add always ns_config_info if primitive name is config
4947 if primitive_desc["name"] == "config":
4948 if "ns_config_info" in instantiation_params:
4949 calculated_params["ns_config_info"] = instantiation_params[
4950 "ns_config_info"
4951 ]
4952 return calculated_params
4953
4954 def _look_for_deployed_vca(
4955 self,
4956 deployed_vca,
4957 member_vnf_index,
4958 vdu_id,
4959 vdu_count_index,
4960 kdu_name=None,
4961 ee_descriptor_id=None,
4962 ):
4963 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4964 for vca in deployed_vca:
4965 if not vca:
4966 continue
4967 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4968 continue
4969 if (
4970 vdu_count_index is not None
4971 and vdu_count_index != vca["vdu_count_index"]
4972 ):
4973 continue
4974 if kdu_name and kdu_name != vca["kdu_name"]:
4975 continue
4976 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4977 continue
4978 break
4979 else:
4980 # vca_deployed not found
4981 raise LcmException(
4982 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4983 " is not deployed".format(
4984 member_vnf_index,
4985 vdu_id,
4986 vdu_count_index,
4987 kdu_name,
4988 ee_descriptor_id,
4989 )
4990 )
4991 # get ee_id
4992 ee_id = vca.get("ee_id")
4993 vca_type = vca.get(
4994 "type", "lxc_proxy_charm"
4995 ) # default value for backward compatibility - proxy charm
4996 if not ee_id:
4997 raise LcmException(
4998 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4999 "execution environment".format(
5000 member_vnf_index, vdu_id, kdu_name, vdu_count_index
5001 )
5002 )
5003 return ee_id, vca_type
5004
5005 async def _ns_execute_primitive(
5006 self,
5007 ee_id,
5008 primitive,
5009 primitive_params,
5010 retries=0,
5011 retries_interval=30,
5012 timeout=None,
5013 vca_type=None,
5014 db_dict=None,
5015 vca_id: str = None,
5016 ) -> (str, str):
5017 try:
5018 if primitive == "config":
5019 primitive_params = {"params": primitive_params}
5020
5021 vca_type = vca_type or "lxc_proxy_charm"
5022
5023 while retries >= 0:
5024 try:
5025 output = await asyncio.wait_for(
5026 self.vca_map[vca_type].exec_primitive(
5027 ee_id=ee_id,
5028 primitive_name=primitive,
5029 params_dict=primitive_params,
5030 progress_timeout=self.timeout.progress_primitive,
5031 total_timeout=self.timeout.primitive,
5032 db_dict=db_dict,
5033 vca_id=vca_id,
5034 vca_type=vca_type,
5035 ),
5036 timeout=timeout or self.timeout.primitive,
5037 )
5038 # execution was OK
5039 break
5040 except asyncio.CancelledError:
5041 raise
5042 except Exception as e:
5043 retries -= 1
5044 if retries >= 0:
5045 self.logger.debug(
5046 "Error executing action {} on {} -> {}".format(
5047 primitive, ee_id, e
5048 )
5049 )
5050 # wait and retry
5051 await asyncio.sleep(retries_interval, loop=self.loop)
5052 else:
5053 if isinstance(e, asyncio.TimeoutError):
5054 e = N2VCException(
5055 message="Timed out waiting for action to complete"
5056 )
5057 return "FAILED", getattr(e, "message", repr(e))
5058
5059 return "COMPLETED", output
5060
5061 except (LcmException, asyncio.CancelledError):
5062 raise
5063 except Exception as e:
5064 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5065
5066 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5067 """
5068 Updating the vca_status with latest juju information in nsrs record
5069 :param: nsr_id: Id of the nsr
5070 :param: nslcmop_id: Id of the nslcmop
5071 :return: None
5072 """
5073
5074 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5075 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5076 vca_id = self.get_vca_id({}, db_nsr)
5077 if db_nsr["_admin"]["deployed"]["K8s"]:
5078 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5079 cluster_uuid, kdu_instance, cluster_type = (
5080 k8s["k8scluster-uuid"],
5081 k8s["kdu-instance"],
5082 k8s["k8scluster-type"],
5083 )
5084 await self._on_update_k8s_db(
5085 cluster_uuid=cluster_uuid,
5086 kdu_instance=kdu_instance,
5087 filter={"_id": nsr_id},
5088 vca_id=vca_id,
5089 cluster_type=cluster_type,
5090 )
5091 else:
5092 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5093 table, filter = "nsrs", {"_id": nsr_id}
5094 path = "_admin.deployed.VCA.{}.".format(vca_index)
5095 await self._on_update_n2vc_db(table, filter, path, {})
5096
5097 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5098 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5099
5100 async def action(self, nsr_id, nslcmop_id):
5101 # Try to lock HA task here
5102 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5103 if not task_is_locked_by_me:
5104 return
5105
5106 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5107 self.logger.debug(logging_text + "Enter")
5108 # get all needed from database
5109 db_nsr = None
5110 db_nslcmop = None
5111 db_nsr_update = {}
5112 db_nslcmop_update = {}
5113 nslcmop_operation_state = None
5114 error_description_nslcmop = None
5115 exc = None
5116 try:
5117 # wait for any previous tasks in process
5118 step = "Waiting for previous operations to terminate"
5119 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5120
5121 self._write_ns_status(
5122 nsr_id=nsr_id,
5123 ns_state=None,
5124 current_operation="RUNNING ACTION",
5125 current_operation_id=nslcmop_id,
5126 )
5127
5128 step = "Getting information from database"
5129 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5130 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5131 if db_nslcmop["operationParams"].get("primitive_params"):
5132 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5133 db_nslcmop["operationParams"]["primitive_params"]
5134 )
5135
5136 nsr_deployed = db_nsr["_admin"].get("deployed")
5137 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5138 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5139 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5140 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5141 primitive = db_nslcmop["operationParams"]["primitive"]
5142 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5143 timeout_ns_action = db_nslcmop["operationParams"].get(
5144 "timeout_ns_action", self.timeout.primitive
5145 )
5146
5147 if vnf_index:
5148 step = "Getting vnfr from database"
5149 db_vnfr = self.db.get_one(
5150 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5151 )
5152 if db_vnfr.get("kdur"):
5153 kdur_list = []
5154 for kdur in db_vnfr["kdur"]:
5155 if kdur.get("additionalParams"):
5156 kdur["additionalParams"] = json.loads(
5157 kdur["additionalParams"]
5158 )
5159 kdur_list.append(kdur)
5160 db_vnfr["kdur"] = kdur_list
5161 step = "Getting vnfd from database"
5162 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5163
5164 # Sync filesystem before running a primitive
5165 self.fs.sync(db_vnfr["vnfd-id"])
5166 else:
5167 step = "Getting nsd from database"
5168 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5169
5170 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5171 # for backward compatibility
5172 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5173 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5174 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5175 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5176
5177 # look for primitive
5178 config_primitive_desc = descriptor_configuration = None
5179 if vdu_id:
5180 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5181 elif kdu_name:
5182 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5183 elif vnf_index:
5184 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5185 else:
5186 descriptor_configuration = db_nsd.get("ns-configuration")
5187
5188 if descriptor_configuration and descriptor_configuration.get(
5189 "config-primitive"
5190 ):
5191 for config_primitive in descriptor_configuration["config-primitive"]:
5192 if config_primitive["name"] == primitive:
5193 config_primitive_desc = config_primitive
5194 break
5195
5196 if not config_primitive_desc:
5197 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5198 raise LcmException(
5199 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5200 primitive
5201 )
5202 )
5203 primitive_name = primitive
5204 ee_descriptor_id = None
5205 else:
5206 primitive_name = config_primitive_desc.get(
5207 "execution-environment-primitive", primitive
5208 )
5209 ee_descriptor_id = config_primitive_desc.get(
5210 "execution-environment-ref"
5211 )
5212
5213 if vnf_index:
5214 if vdu_id:
5215 vdur = next(
5216 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5217 )
5218 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5219 elif kdu_name:
5220 kdur = next(
5221 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5222 )
5223 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5224 else:
5225 desc_params = parse_yaml_strings(
5226 db_vnfr.get("additionalParamsForVnf")
5227 )
5228 else:
5229 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5230 if kdu_name and get_configuration(db_vnfd, kdu_name):
5231 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5232 actions = set()
5233 for primitive in kdu_configuration.get("initial-config-primitive", []):
5234 actions.add(primitive["name"])
5235 for primitive in kdu_configuration.get("config-primitive", []):
5236 actions.add(primitive["name"])
5237 kdu = find_in_list(
5238 nsr_deployed["K8s"],
5239 lambda kdu: kdu_name == kdu["kdu-name"]
5240 and kdu["member-vnf-index"] == vnf_index,
5241 )
5242 kdu_action = (
5243 True
5244 if primitive_name in actions
5245 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5246 else False
5247 )
5248
5249 # TODO check if ns is in a proper status
5250 if kdu_name and (
5251 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5252 ):
5253 # kdur and desc_params already set from before
5254 if primitive_params:
5255 desc_params.update(primitive_params)
5256 # TODO Check if we will need something at vnf level
5257 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5258 if (
5259 kdu_name == kdu["kdu-name"]
5260 and kdu["member-vnf-index"] == vnf_index
5261 ):
5262 break
5263 else:
5264 raise LcmException(
5265 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5266 )
5267
5268 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5269 msg = "unknown k8scluster-type '{}'".format(
5270 kdu.get("k8scluster-type")
5271 )
5272 raise LcmException(msg)
5273
5274 db_dict = {
5275 "collection": "nsrs",
5276 "filter": {"_id": nsr_id},
5277 "path": "_admin.deployed.K8s.{}".format(index),
5278 }
5279 self.logger.debug(
5280 logging_text
5281 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5282 )
5283 step = "Executing kdu {}".format(primitive_name)
5284 if primitive_name == "upgrade":
5285 if desc_params.get("kdu_model"):
5286 kdu_model = desc_params.get("kdu_model")
5287 del desc_params["kdu_model"]
5288 else:
5289 kdu_model = kdu.get("kdu-model")
5290 parts = kdu_model.split(sep=":")
5291 if len(parts) == 2:
5292 kdu_model = parts[0]
5293 if desc_params.get("kdu_atomic_upgrade"):
5294 atomic_upgrade = desc_params.get(
5295 "kdu_atomic_upgrade"
5296 ).lower() in ("yes", "true", "1")
5297 del desc_params["kdu_atomic_upgrade"]
5298 else:
5299 atomic_upgrade = True
5300
5301 detailed_status = await asyncio.wait_for(
5302 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5303 cluster_uuid=kdu.get("k8scluster-uuid"),
5304 kdu_instance=kdu.get("kdu-instance"),
5305 atomic=atomic_upgrade,
5306 kdu_model=kdu_model,
5307 params=desc_params,
5308 db_dict=db_dict,
5309 timeout=timeout_ns_action,
5310 ),
5311 timeout=timeout_ns_action + 10,
5312 )
5313 self.logger.debug(
5314 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5315 )
5316 elif primitive_name == "rollback":
5317 detailed_status = await asyncio.wait_for(
5318 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5319 cluster_uuid=kdu.get("k8scluster-uuid"),
5320 kdu_instance=kdu.get("kdu-instance"),
5321 db_dict=db_dict,
5322 ),
5323 timeout=timeout_ns_action,
5324 )
5325 elif primitive_name == "status":
5326 detailed_status = await asyncio.wait_for(
5327 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5328 cluster_uuid=kdu.get("k8scluster-uuid"),
5329 kdu_instance=kdu.get("kdu-instance"),
5330 vca_id=vca_id,
5331 ),
5332 timeout=timeout_ns_action,
5333 )
5334 else:
5335 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5336 kdu["kdu-name"], nsr_id
5337 )
5338 params = self._map_primitive_params(
5339 config_primitive_desc, primitive_params, desc_params
5340 )
5341
5342 detailed_status = await asyncio.wait_for(
5343 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5344 cluster_uuid=kdu.get("k8scluster-uuid"),
5345 kdu_instance=kdu_instance,
5346 primitive_name=primitive_name,
5347 params=params,
5348 db_dict=db_dict,
5349 timeout=timeout_ns_action,
5350 vca_id=vca_id,
5351 ),
5352 timeout=timeout_ns_action,
5353 )
5354
5355 if detailed_status:
5356 nslcmop_operation_state = "COMPLETED"
5357 else:
5358 detailed_status = ""
5359 nslcmop_operation_state = "FAILED"
5360 else:
5361 ee_id, vca_type = self._look_for_deployed_vca(
5362 nsr_deployed["VCA"],
5363 member_vnf_index=vnf_index,
5364 vdu_id=vdu_id,
5365 vdu_count_index=vdu_count_index,
5366 ee_descriptor_id=ee_descriptor_id,
5367 )
5368 for vca_index, vca_deployed in enumerate(
5369 db_nsr["_admin"]["deployed"]["VCA"]
5370 ):
5371 if vca_deployed.get("member-vnf-index") == vnf_index:
5372 db_dict = {
5373 "collection": "nsrs",
5374 "filter": {"_id": nsr_id},
5375 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5376 }
5377 break
5378 (
5379 nslcmop_operation_state,
5380 detailed_status,
5381 ) = await self._ns_execute_primitive(
5382 ee_id,
5383 primitive=primitive_name,
5384 primitive_params=self._map_primitive_params(
5385 config_primitive_desc, primitive_params, desc_params
5386 ),
5387 timeout=timeout_ns_action,
5388 vca_type=vca_type,
5389 db_dict=db_dict,
5390 vca_id=vca_id,
5391 )
5392
5393 db_nslcmop_update["detailed-status"] = detailed_status
5394 error_description_nslcmop = (
5395 detailed_status if nslcmop_operation_state == "FAILED" else ""
5396 )
5397 self.logger.debug(
5398 logging_text
5399 + "Done with result {} {}".format(
5400 nslcmop_operation_state, detailed_status
5401 )
5402 )
5403 return # database update is called inside finally
5404
5405 except (DbException, LcmException, N2VCException, K8sException) as e:
5406 self.logger.error(logging_text + "Exit Exception {}".format(e))
5407 exc = e
5408 except asyncio.CancelledError:
5409 self.logger.error(
5410 logging_text + "Cancelled Exception while '{}'".format(step)
5411 )
5412 exc = "Operation was cancelled"
5413 except asyncio.TimeoutError:
5414 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5415 exc = "Timeout"
5416 except Exception as e:
5417 exc = traceback.format_exc()
5418 self.logger.critical(
5419 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5420 exc_info=True,
5421 )
5422 finally:
5423 if exc:
5424 db_nslcmop_update[
5425 "detailed-status"
5426 ] = (
5427 detailed_status
5428 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5429 nslcmop_operation_state = "FAILED"
5430 if db_nsr:
5431 self._write_ns_status(
5432 nsr_id=nsr_id,
5433 ns_state=db_nsr[
5434 "nsState"
5435 ], # TODO check if degraded. For the moment use previous status
5436 current_operation="IDLE",
5437 current_operation_id=None,
5438 # error_description=error_description_nsr,
5439 # error_detail=error_detail,
5440 other_update=db_nsr_update,
5441 )
5442
5443 self._write_op_status(
5444 op_id=nslcmop_id,
5445 stage="",
5446 error_message=error_description_nslcmop,
5447 operation_state=nslcmop_operation_state,
5448 other_update=db_nslcmop_update,
5449 )
5450
5451 if nslcmop_operation_state:
5452 try:
5453 await self.msg.aiowrite(
5454 "ns",
5455 "actioned",
5456 {
5457 "nsr_id": nsr_id,
5458 "nslcmop_id": nslcmop_id,
5459 "operationState": nslcmop_operation_state,
5460 },
5461 loop=self.loop,
5462 )
5463 except Exception as e:
5464 self.logger.error(
5465 logging_text + "kafka_write notification Exception {}".format(e)
5466 )
5467 self.logger.debug(logging_text + "Exit")
5468 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5469 return nslcmop_operation_state, detailed_status
5470
5471 async def terminate_vdus(
5472 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5473 ):
5474 """This method terminates VDUs
5475
5476 Args:
5477 db_vnfr: VNF instance record
5478 member_vnf_index: VNF index to identify the VDUs to be removed
5479 db_nsr: NS instance record
5480 update_db_nslcmops: Nslcmop update record
5481 """
5482 vca_scaling_info = []
5483 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5484 scaling_info["scaling_direction"] = "IN"
5485 scaling_info["vdu-delete"] = {}
5486 scaling_info["kdu-delete"] = {}
5487 db_vdur = db_vnfr.get("vdur")
5488 vdur_list = copy(db_vdur)
5489 count_index = 0
5490 for index, vdu in enumerate(vdur_list):
5491 vca_scaling_info.append(
5492 {
5493 "osm_vdu_id": vdu["vdu-id-ref"],
5494 "member-vnf-index": member_vnf_index,
5495 "type": "delete",
5496 "vdu_index": count_index,
5497 }
5498 )
5499 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5500 scaling_info["vdu"].append(
5501 {
5502 "name": vdu.get("name") or vdu.get("vdu-name"),
5503 "vdu_id": vdu["vdu-id-ref"],
5504 "interface": [],
5505 }
5506 )
5507 for interface in vdu["interfaces"]:
5508 scaling_info["vdu"][index]["interface"].append(
5509 {
5510 "name": interface["name"],
5511 "ip_address": interface["ip-address"],
5512 "mac_address": interface.get("mac-address"),
5513 }
5514 )
5515 self.logger.info("NS update scaling info{}".format(scaling_info))
5516 stage[2] = "Terminating VDUs"
5517 if scaling_info.get("vdu-delete"):
5518 # scale_process = "RO"
5519 if self.ro_config.ng:
5520 await self._scale_ng_ro(
5521 logging_text,
5522 db_nsr,
5523 update_db_nslcmops,
5524 db_vnfr,
5525 scaling_info,
5526 stage,
5527 )
5528
5529 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5530 """This method is to Remove VNF instances from NS.
5531
5532 Args:
5533 nsr_id: NS instance id
5534 nslcmop_id: nslcmop id of update
5535 vnf_instance_id: id of the VNF instance to be removed
5536
5537 Returns:
5538 result: (str, str) COMPLETED/FAILED, details
5539 """
5540 try:
5541 db_nsr_update = {}
5542 logging_text = "Task ns={} update ".format(nsr_id)
5543 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5544 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5545 if check_vnfr_count > 1:
5546 stage = ["", "", ""]
5547 step = "Getting nslcmop from database"
5548 self.logger.debug(
5549 step + " after having waited for previous tasks to be completed"
5550 )
5551 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5552 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5553 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5554 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5555 """ db_vnfr = self.db.get_one(
5556 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5557
5558 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5559 await self.terminate_vdus(
5560 db_vnfr,
5561 member_vnf_index,
5562 db_nsr,
5563 update_db_nslcmops,
5564 stage,
5565 logging_text,
5566 )
5567
5568 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5569 constituent_vnfr.remove(db_vnfr.get("_id"))
5570 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5571 "constituent-vnfr-ref"
5572 )
5573 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5574 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5575 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5576 return "COMPLETED", "Done"
5577 else:
5578 step = "Terminate VNF Failed with"
5579 raise LcmException(
5580 "{} Cannot terminate the last VNF in this NS.".format(
5581 vnf_instance_id
5582 )
5583 )
5584 except (LcmException, asyncio.CancelledError):
5585 raise
5586 except Exception as e:
5587 self.logger.debug("Error removing VNF {}".format(e))
5588 return "FAILED", "Error removing VNF {}".format(e)
5589
5590 async def _ns_redeploy_vnf(
5591 self,
5592 nsr_id,
5593 nslcmop_id,
5594 db_vnfd,
5595 db_vnfr,
5596 db_nsr,
5597 ):
5598 """This method updates and redeploys VNF instances
5599
5600 Args:
5601 nsr_id: NS instance id
5602 nslcmop_id: nslcmop id
5603 db_vnfd: VNF descriptor
5604 db_vnfr: VNF instance record
5605 db_nsr: NS instance record
5606
5607 Returns:
5608 result: (str, str) COMPLETED/FAILED, details
5609 """
5610 try:
5611 count_index = 0
5612 stage = ["", "", ""]
5613 logging_text = "Task ns={} update ".format(nsr_id)
5614 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5615 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5616
5617 # Terminate old VNF resources
5618 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5619 await self.terminate_vdus(
5620 db_vnfr,
5621 member_vnf_index,
5622 db_nsr,
5623 update_db_nslcmops,
5624 stage,
5625 logging_text,
5626 )
5627
5628 # old_vnfd_id = db_vnfr["vnfd-id"]
5629 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5630 new_db_vnfd = db_vnfd
5631 # new_vnfd_ref = new_db_vnfd["id"]
5632 # new_vnfd_id = vnfd_id
5633
5634 # Create VDUR
5635 new_vnfr_cp = []
5636 for cp in new_db_vnfd.get("ext-cpd", ()):
5637 vnf_cp = {
5638 "name": cp.get("id"),
5639 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5640 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5641 "id": cp.get("id"),
5642 }
5643 new_vnfr_cp.append(vnf_cp)
5644 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5645 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5646 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5647 new_vnfr_update = {
5648 "revision": latest_vnfd_revision,
5649 "connection-point": new_vnfr_cp,
5650 "vdur": new_vdur,
5651 "ip-address": "",
5652 }
5653 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5654 updated_db_vnfr = self.db.get_one(
5655 "vnfrs",
5656 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5657 )
5658
5659 # Instantiate new VNF resources
5660 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5661 vca_scaling_info = []
5662 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5663 scaling_info["scaling_direction"] = "OUT"
5664 scaling_info["vdu-create"] = {}
5665 scaling_info["kdu-create"] = {}
5666 vdud_instantiate_list = db_vnfd["vdu"]
5667 for index, vdud in enumerate(vdud_instantiate_list):
5668 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5669 if cloud_init_text:
5670 additional_params = (
5671 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5672 or {}
5673 )
5674 cloud_init_list = []
5675 if cloud_init_text:
5676 # TODO Information of its own ip is not available because db_vnfr is not updated.
5677 additional_params["OSM"] = get_osm_params(
5678 updated_db_vnfr, vdud["id"], 1
5679 )
5680 cloud_init_list.append(
5681 self._parse_cloud_init(
5682 cloud_init_text,
5683 additional_params,
5684 db_vnfd["id"],
5685 vdud["id"],
5686 )
5687 )
5688 vca_scaling_info.append(
5689 {
5690 "osm_vdu_id": vdud["id"],
5691 "member-vnf-index": member_vnf_index,
5692 "type": "create",
5693 "vdu_index": count_index,
5694 }
5695 )
5696 scaling_info["vdu-create"][vdud["id"]] = count_index
5697 if self.ro_config.ng:
5698 self.logger.debug(
5699 "New Resources to be deployed: {}".format(scaling_info)
5700 )
5701 await self._scale_ng_ro(
5702 logging_text,
5703 db_nsr,
5704 update_db_nslcmops,
5705 updated_db_vnfr,
5706 scaling_info,
5707 stage,
5708 )
5709 return "COMPLETED", "Done"
5710 except (LcmException, asyncio.CancelledError):
5711 raise
5712 except Exception as e:
5713 self.logger.debug("Error updating VNF {}".format(e))
5714 return "FAILED", "Error updating VNF {}".format(e)
5715
5716 async def _ns_charm_upgrade(
5717 self,
5718 ee_id,
5719 charm_id,
5720 charm_type,
5721 path,
5722 timeout: float = None,
5723 ) -> (str, str):
5724 """This method upgrade charms in VNF instances
5725
5726 Args:
5727 ee_id: Execution environment id
5728 path: Local path to the charm
5729 charm_id: charm-id
5730 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5731 timeout: (Float) Timeout for the ns update operation
5732
5733 Returns:
5734 result: (str, str) COMPLETED/FAILED, details
5735 """
5736 try:
5737 charm_type = charm_type or "lxc_proxy_charm"
5738 output = await self.vca_map[charm_type].upgrade_charm(
5739 ee_id=ee_id,
5740 path=path,
5741 charm_id=charm_id,
5742 charm_type=charm_type,
5743 timeout=timeout or self.timeout.ns_update,
5744 )
5745
5746 if output:
5747 return "COMPLETED", output
5748
5749 except (LcmException, asyncio.CancelledError):
5750 raise
5751
5752 except Exception as e:
5753
5754 self.logger.debug("Error upgrading charm {}".format(path))
5755
5756 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5757
5758 async def update(self, nsr_id, nslcmop_id):
5759 """Update NS according to different update types
5760
5761 This method performs upgrade of VNF instances then updates the revision
5762 number in VNF record
5763
5764 Args:
5765 nsr_id: Network service will be updated
5766 nslcmop_id: ns lcm operation id
5767
5768 Returns:
5769 It may raise DbException, LcmException, N2VCException, K8sException
5770
5771 """
5772 # Try to lock HA task here
5773 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5774 if not task_is_locked_by_me:
5775 return
5776
5777 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5778 self.logger.debug(logging_text + "Enter")
5779
5780 # Set the required variables to be filled up later
5781 db_nsr = None
5782 db_nslcmop_update = {}
5783 vnfr_update = {}
5784 nslcmop_operation_state = None
5785 db_nsr_update = {}
5786 error_description_nslcmop = ""
5787 exc = None
5788 change_type = "updated"
5789 detailed_status = ""
5790
5791 try:
5792 # wait for any previous tasks in process
5793 step = "Waiting for previous operations to terminate"
5794 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5795 self._write_ns_status(
5796 nsr_id=nsr_id,
5797 ns_state=None,
5798 current_operation="UPDATING",
5799 current_operation_id=nslcmop_id,
5800 )
5801
5802 step = "Getting nslcmop from database"
5803 db_nslcmop = self.db.get_one(
5804 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5805 )
5806 update_type = db_nslcmop["operationParams"]["updateType"]
5807
5808 step = "Getting nsr from database"
5809 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5810 old_operational_status = db_nsr["operational-status"]
5811 db_nsr_update["operational-status"] = "updating"
5812 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5813 nsr_deployed = db_nsr["_admin"].get("deployed")
5814
5815 if update_type == "CHANGE_VNFPKG":
5816
5817 # Get the input parameters given through update request
5818 vnf_instance_id = db_nslcmop["operationParams"][
5819 "changeVnfPackageData"
5820 ].get("vnfInstanceId")
5821
5822 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5823 "vnfdId"
5824 )
5825 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5826
5827 step = "Getting vnfr from database"
5828 db_vnfr = self.db.get_one(
5829 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5830 )
5831
5832 step = "Getting vnfds from database"
5833 # Latest VNFD
5834 latest_vnfd = self.db.get_one(
5835 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5836 )
5837 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5838
5839 # Current VNFD
5840 current_vnf_revision = db_vnfr.get("revision", 1)
5841 current_vnfd = self.db.get_one(
5842 "vnfds_revisions",
5843 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5844 fail_on_empty=False,
5845 )
5846 # Charm artifact paths will be filled up later
5847 (
5848 current_charm_artifact_path,
5849 target_charm_artifact_path,
5850 charm_artifact_paths,
5851 helm_artifacts,
5852 ) = ([], [], [], [])
5853
5854 step = "Checking if revision has changed in VNFD"
5855 if current_vnf_revision != latest_vnfd_revision:
5856
5857 change_type = "policy_updated"
5858
5859 # There is new revision of VNFD, update operation is required
5860 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5861 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5862
5863 step = "Removing the VNFD packages if they exist in the local path"
5864 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5865 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5866
5867 step = "Get the VNFD packages from FSMongo"
5868 self.fs.sync(from_path=latest_vnfd_path)
5869 self.fs.sync(from_path=current_vnfd_path)
5870
5871 step = (
5872 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5873 )
5874 current_base_folder = current_vnfd["_admin"]["storage"]
5875 latest_base_folder = latest_vnfd["_admin"]["storage"]
5876
5877 for vca_index, vca_deployed in enumerate(
5878 get_iterable(nsr_deployed, "VCA")
5879 ):
5880 vnf_index = db_vnfr.get("member-vnf-index-ref")
5881
5882 # Getting charm-id and charm-type
5883 if vca_deployed.get("member-vnf-index") == vnf_index:
5884 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5885 vca_type = vca_deployed.get("type")
5886 vdu_count_index = vca_deployed.get("vdu_count_index")
5887
5888 # Getting ee-id
5889 ee_id = vca_deployed.get("ee_id")
5890
5891 step = "Getting descriptor config"
5892 if current_vnfd.get("kdu"):
5893
5894 search_key = "kdu_name"
5895 else:
5896 search_key = "vnfd_id"
5897
5898 entity_id = vca_deployed.get(search_key)
5899
5900 descriptor_config = get_configuration(
5901 current_vnfd, entity_id
5902 )
5903
5904 if "execution-environment-list" in descriptor_config:
5905 ee_list = descriptor_config.get(
5906 "execution-environment-list", []
5907 )
5908 else:
5909 ee_list = []
5910
5911 # There could be several charm used in the same VNF
5912 for ee_item in ee_list:
5913 if ee_item.get("juju"):
5914
5915 step = "Getting charm name"
5916 charm_name = ee_item["juju"].get("charm")
5917
5918 step = "Setting Charm artifact paths"
5919 current_charm_artifact_path.append(
5920 get_charm_artifact_path(
5921 current_base_folder,
5922 charm_name,
5923 vca_type,
5924 current_vnf_revision,
5925 )
5926 )
5927 target_charm_artifact_path.append(
5928 get_charm_artifact_path(
5929 latest_base_folder,
5930 charm_name,
5931 vca_type,
5932 latest_vnfd_revision,
5933 )
5934 )
5935 elif ee_item.get("helm-chart"):
5936 # add chart to list and all parameters
5937 step = "Getting helm chart name"
5938 chart_name = ee_item.get("helm-chart")
5939 if (
5940 ee_item.get("helm-version")
5941 and ee_item.get("helm-version") == "v2"
5942 ):
5943 vca_type = "helm"
5944 else:
5945 vca_type = "helm-v3"
5946 step = "Setting Helm chart artifact paths"
5947
5948 helm_artifacts.append(
5949 {
5950 "current_artifact_path": get_charm_artifact_path(
5951 current_base_folder,
5952 chart_name,
5953 vca_type,
5954 current_vnf_revision,
5955 ),
5956 "target_artifact_path": get_charm_artifact_path(
5957 latest_base_folder,
5958 chart_name,
5959 vca_type,
5960 latest_vnfd_revision,
5961 ),
5962 "ee_id": ee_id,
5963 "vca_index": vca_index,
5964 "vdu_index": vdu_count_index,
5965 }
5966 )
5967
5968 charm_artifact_paths = zip(
5969 current_charm_artifact_path, target_charm_artifact_path
5970 )
5971
5972 step = "Checking if software version has changed in VNFD"
5973 if find_software_version(current_vnfd) != find_software_version(
5974 latest_vnfd
5975 ):
5976
5977 step = "Checking if existing VNF has charm"
5978 for current_charm_path, target_charm_path in list(
5979 charm_artifact_paths
5980 ):
5981 if current_charm_path:
5982 raise LcmException(
5983 "Software version change is not supported as VNF instance {} has charm.".format(
5984 vnf_instance_id
5985 )
5986 )
5987
5988 # There is no change in the charm package, then redeploy the VNF
5989 # based on new descriptor
5990 step = "Redeploying VNF"
5991 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5992 (result, detailed_status) = await self._ns_redeploy_vnf(
5993 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5994 )
5995 if result == "FAILED":
5996 nslcmop_operation_state = result
5997 error_description_nslcmop = detailed_status
5998 db_nslcmop_update["detailed-status"] = detailed_status
5999 self.logger.debug(
6000 logging_text
6001 + " step {} Done with result {} {}".format(
6002 step, nslcmop_operation_state, detailed_status
6003 )
6004 )
6005
6006 else:
6007 step = "Checking if any charm package has changed or not"
6008 for current_charm_path, target_charm_path in list(
6009 charm_artifact_paths
6010 ):
6011 if (
6012 current_charm_path
6013 and target_charm_path
6014 and self.check_charm_hash_changed(
6015 current_charm_path, target_charm_path
6016 )
6017 ):
6018
6019 step = "Checking whether VNF uses juju bundle"
6020 if check_juju_bundle_existence(current_vnfd):
6021
6022 raise LcmException(
6023 "Charm upgrade is not supported for the instance which"
6024 " uses juju-bundle: {}".format(
6025 check_juju_bundle_existence(current_vnfd)
6026 )
6027 )
6028
6029 step = "Upgrading Charm"
6030 (
6031 result,
6032 detailed_status,
6033 ) = await self._ns_charm_upgrade(
6034 ee_id=ee_id,
6035 charm_id=vca_id,
6036 charm_type=vca_type,
6037 path=self.fs.path + target_charm_path,
6038 timeout=timeout_seconds,
6039 )
6040
6041 if result == "FAILED":
6042 nslcmop_operation_state = result
6043 error_description_nslcmop = detailed_status
6044
6045 db_nslcmop_update["detailed-status"] = detailed_status
6046 self.logger.debug(
6047 logging_text
6048 + " step {} Done with result {} {}".format(
6049 step, nslcmop_operation_state, detailed_status
6050 )
6051 )
6052
6053 step = "Updating policies"
6054 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6055 result = "COMPLETED"
6056 detailed_status = "Done"
6057 db_nslcmop_update["detailed-status"] = "Done"
6058
6059 # helm base EE
6060 for item in helm_artifacts:
6061 if not (
6062 item["current_artifact_path"]
6063 and item["target_artifact_path"]
6064 and self.check_charm_hash_changed(
6065 item["current_artifact_path"],
6066 item["target_artifact_path"],
6067 )
6068 ):
6069 continue
6070 db_update_entry = "_admin.deployed.VCA.{}.".format(
6071 item["vca_index"]
6072 )
6073 vnfr_id = db_vnfr["_id"]
6074 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
6075 db_dict = {
6076 "collection": "nsrs",
6077 "filter": {"_id": nsr_id},
6078 "path": db_update_entry,
6079 }
6080 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
6081 await self.vca_map[vca_type].upgrade_execution_environment(
6082 namespace=namespace,
6083 helm_id=helm_id,
6084 db_dict=db_dict,
6085 config=osm_config,
6086 artifact_path=item["target_artifact_path"],
6087 vca_type=vca_type,
6088 )
6089 vnf_id = db_vnfr.get("vnfd-ref")
6090 config_descriptor = get_configuration(latest_vnfd, vnf_id)
6091 self.logger.debug("get ssh key block")
6092 rw_mgmt_ip = None
6093 if deep_get(
6094 config_descriptor,
6095 ("config-access", "ssh-access", "required"),
6096 ):
6097 # Needed to inject a ssh key
6098 user = deep_get(
6099 config_descriptor,
6100 ("config-access", "ssh-access", "default-user"),
6101 )
6102 step = (
6103 "Install configuration Software, getting public ssh key"
6104 )
6105 pub_key = await self.vca_map[
6106 vca_type
6107 ].get_ee_ssh_public__key(
6108 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
6109 )
6110
6111 step = (
6112 "Insert public key into VM user={} ssh_key={}".format(
6113 user, pub_key
6114 )
6115 )
6116 self.logger.debug(logging_text + step)
6117
6118 # wait for RO (ip-address) Insert pub_key into VM
6119 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
6120 logging_text,
6121 nsr_id,
6122 vnfr_id,
6123 None,
6124 item["vdu_index"],
6125 user=user,
6126 pub_key=pub_key,
6127 )
6128
6129 initial_config_primitive_list = config_descriptor.get(
6130 "initial-config-primitive"
6131 )
6132 config_primitive = next(
6133 (
6134 p
6135 for p in initial_config_primitive_list
6136 if p["name"] == "config"
6137 ),
6138 None,
6139 )
6140 if not config_primitive:
6141 continue
6142
6143 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6144 if rw_mgmt_ip:
6145 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
6146 if db_vnfr.get("additionalParamsForVnf"):
6147 deploy_params.update(
6148 parse_yaml_strings(
6149 db_vnfr["additionalParamsForVnf"].copy()
6150 )
6151 )
6152 primitive_params_ = self._map_primitive_params(
6153 config_primitive, {}, deploy_params
6154 )
6155
6156 step = "execute primitive '{}' params '{}'".format(
6157 config_primitive["name"], primitive_params_
6158 )
6159 self.logger.debug(logging_text + step)
6160 await self.vca_map[vca_type].exec_primitive(
6161 ee_id=ee_id,
6162 primitive_name=config_primitive["name"],
6163 params_dict=primitive_params_,
6164 db_dict=db_dict,
6165 vca_id=vca_id,
6166 vca_type=vca_type,
6167 )
6168
6169 step = "Updating policies"
6170 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6171 detailed_status = "Done"
6172 db_nslcmop_update["detailed-status"] = "Done"
6173
6174 # If nslcmop_operation_state is None, so any operation is not failed.
6175 if not nslcmop_operation_state:
6176 nslcmop_operation_state = "COMPLETED"
6177
6178 # If update CHANGE_VNFPKG nslcmop_operation is successful
6179 # vnf revision need to be updated
6180 vnfr_update["revision"] = latest_vnfd_revision
6181 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
6182
6183 self.logger.debug(
6184 logging_text
6185 + " task Done with result {} {}".format(
6186 nslcmop_operation_state, detailed_status
6187 )
6188 )
6189 elif update_type == "REMOVE_VNF":
6190 # This part is included in https://osm.etsi.org/gerrit/11876
6191 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
6192 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
6193 member_vnf_index = db_vnfr["member-vnf-index-ref"]
6194 step = "Removing VNF"
6195 (result, detailed_status) = await self.remove_vnf(
6196 nsr_id, nslcmop_id, vnf_instance_id
6197 )
6198 if result == "FAILED":
6199 nslcmop_operation_state = result
6200 error_description_nslcmop = detailed_status
6201 db_nslcmop_update["detailed-status"] = detailed_status
6202 change_type = "vnf_terminated"
6203 if not nslcmop_operation_state:
6204 nslcmop_operation_state = "COMPLETED"
6205 self.logger.debug(
6206 logging_text
6207 + " task Done with result {} {}".format(
6208 nslcmop_operation_state, detailed_status
6209 )
6210 )
6211
6212 elif update_type == "OPERATE_VNF":
6213 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
6214 "vnfInstanceId"
6215 ]
6216 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
6217 "changeStateTo"
6218 ]
6219 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
6220 "additionalParam"
6221 ]
6222 (result, detailed_status) = await self.rebuild_start_stop(
6223 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
6224 )
6225 if result == "FAILED":
6226 nslcmop_operation_state = result
6227 error_description_nslcmop = detailed_status
6228 db_nslcmop_update["detailed-status"] = detailed_status
6229 if not nslcmop_operation_state:
6230 nslcmop_operation_state = "COMPLETED"
6231 self.logger.debug(
6232 logging_text
6233 + " task Done with result {} {}".format(
6234 nslcmop_operation_state, detailed_status
6235 )
6236 )
6237
6238 # If nslcmop_operation_state is None, so any operation is not failed.
6239 # All operations are executed in overall.
6240 if not nslcmop_operation_state:
6241 nslcmop_operation_state = "COMPLETED"
6242 db_nsr_update["operational-status"] = old_operational_status
6243
6244 except (DbException, LcmException, N2VCException, K8sException) as e:
6245 self.logger.error(logging_text + "Exit Exception {}".format(e))
6246 exc = e
6247 except asyncio.CancelledError:
6248 self.logger.error(
6249 logging_text + "Cancelled Exception while '{}'".format(step)
6250 )
6251 exc = "Operation was cancelled"
6252 except asyncio.TimeoutError:
6253 self.logger.error(logging_text + "Timeout while '{}'".format(step))
6254 exc = "Timeout"
6255 except Exception as e:
6256 exc = traceback.format_exc()
6257 self.logger.critical(
6258 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6259 exc_info=True,
6260 )
6261 finally:
6262 if exc:
6263 db_nslcmop_update[
6264 "detailed-status"
6265 ] = (
6266 detailed_status
6267 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6268 nslcmop_operation_state = "FAILED"
6269 db_nsr_update["operational-status"] = old_operational_status
6270 if db_nsr:
6271 self._write_ns_status(
6272 nsr_id=nsr_id,
6273 ns_state=db_nsr["nsState"],
6274 current_operation="IDLE",
6275 current_operation_id=None,
6276 other_update=db_nsr_update,
6277 )
6278
6279 self._write_op_status(
6280 op_id=nslcmop_id,
6281 stage="",
6282 error_message=error_description_nslcmop,
6283 operation_state=nslcmop_operation_state,
6284 other_update=db_nslcmop_update,
6285 )
6286
6287 if nslcmop_operation_state:
6288 try:
6289 msg = {
6290 "nsr_id": nsr_id,
6291 "nslcmop_id": nslcmop_id,
6292 "operationState": nslcmop_operation_state,
6293 }
6294 if change_type in ("vnf_terminated", "policy_updated"):
6295 msg.update({"vnf_member_index": member_vnf_index})
6296 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6297 except Exception as e:
6298 self.logger.error(
6299 logging_text + "kafka_write notification Exception {}".format(e)
6300 )
6301 self.logger.debug(logging_text + "Exit")
6302 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6303 return nslcmop_operation_state, detailed_status
6304
6305 async def scale(self, nsr_id, nslcmop_id):
6306 # Try to lock HA task here
6307 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6308 if not task_is_locked_by_me:
6309 return
6310
6311 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6312 stage = ["", "", ""]
6313 tasks_dict_info = {}
6314 # ^ stage, step, VIM progress
6315 self.logger.debug(logging_text + "Enter")
6316 # get all needed from database
6317 db_nsr = None
6318 db_nslcmop_update = {}
6319 db_nsr_update = {}
6320 exc = None
6321 # in case of error, indicates what part of scale was failed to put nsr at error status
6322 scale_process = None
6323 old_operational_status = ""
6324 old_config_status = ""
6325 nsi_id = None
6326 try:
6327 # wait for any previous tasks in process
6328 step = "Waiting for previous operations to terminate"
6329 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6330 self._write_ns_status(
6331 nsr_id=nsr_id,
6332 ns_state=None,
6333 current_operation="SCALING",
6334 current_operation_id=nslcmop_id,
6335 )
6336
6337 step = "Getting nslcmop from database"
6338 self.logger.debug(
6339 step + " after having waited for previous tasks to be completed"
6340 )
6341 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6342
6343 step = "Getting nsr from database"
6344 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6345 old_operational_status = db_nsr["operational-status"]
6346 old_config_status = db_nsr["config-status"]
6347
6348 step = "Parsing scaling parameters"
6349 db_nsr_update["operational-status"] = "scaling"
6350 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6351 nsr_deployed = db_nsr["_admin"].get("deployed")
6352
6353 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6354 "scaleByStepData"
6355 ]["member-vnf-index"]
6356 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6357 "scaleByStepData"
6358 ]["scaling-group-descriptor"]
6359 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6360 # for backward compatibility
6361 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6362 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6363 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6364 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6365
6366 step = "Getting vnfr from database"
6367 db_vnfr = self.db.get_one(
6368 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6369 )
6370
6371 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6372
6373 step = "Getting vnfd from database"
6374 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6375
6376 base_folder = db_vnfd["_admin"]["storage"]
6377
6378 step = "Getting scaling-group-descriptor"
6379 scaling_descriptor = find_in_list(
6380 get_scaling_aspect(db_vnfd),
6381 lambda scale_desc: scale_desc["name"] == scaling_group,
6382 )
6383 if not scaling_descriptor:
6384 raise LcmException(
6385 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6386 "at vnfd:scaling-group-descriptor".format(scaling_group)
6387 )
6388
6389 step = "Sending scale order to VIM"
6390 # TODO check if ns is in a proper status
6391 nb_scale_op = 0
6392 if not db_nsr["_admin"].get("scaling-group"):
6393 self.update_db_2(
6394 "nsrs",
6395 nsr_id,
6396 {
6397 "_admin.scaling-group": [
6398 {"name": scaling_group, "nb-scale-op": 0}
6399 ]
6400 },
6401 )
6402 admin_scale_index = 0
6403 else:
6404 for admin_scale_index, admin_scale_info in enumerate(
6405 db_nsr["_admin"]["scaling-group"]
6406 ):
6407 if admin_scale_info["name"] == scaling_group:
6408 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6409 break
6410 else: # not found, set index one plus last element and add new entry with the name
6411 admin_scale_index += 1
6412 db_nsr_update[
6413 "_admin.scaling-group.{}.name".format(admin_scale_index)
6414 ] = scaling_group
6415
6416 vca_scaling_info = []
6417 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6418 if scaling_type == "SCALE_OUT":
6419 if "aspect-delta-details" not in scaling_descriptor:
6420 raise LcmException(
6421 "Aspect delta details not fount in scaling descriptor {}".format(
6422 scaling_descriptor["name"]
6423 )
6424 )
6425 # count if max-instance-count is reached
6426 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6427
6428 scaling_info["scaling_direction"] = "OUT"
6429 scaling_info["vdu-create"] = {}
6430 scaling_info["kdu-create"] = {}
6431 for delta in deltas:
6432 for vdu_delta in delta.get("vdu-delta", {}):
6433 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6434 # vdu_index also provides the number of instance of the targeted vdu
6435 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6436 cloud_init_text = self._get_vdu_cloud_init_content(
6437 vdud, db_vnfd
6438 )
6439 if cloud_init_text:
6440 additional_params = (
6441 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6442 or {}
6443 )
6444 cloud_init_list = []
6445
6446 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6447 max_instance_count = 10
6448 if vdu_profile and "max-number-of-instances" in vdu_profile:
6449 max_instance_count = vdu_profile.get(
6450 "max-number-of-instances", 10
6451 )
6452
6453 default_instance_num = get_number_of_instances(
6454 db_vnfd, vdud["id"]
6455 )
6456 instances_number = vdu_delta.get("number-of-instances", 1)
6457 nb_scale_op += instances_number
6458
6459 new_instance_count = nb_scale_op + default_instance_num
6460 # Control if new count is over max and vdu count is less than max.
6461 # Then assign new instance count
6462 if new_instance_count > max_instance_count > vdu_count:
6463 instances_number = new_instance_count - max_instance_count
6464 else:
6465 instances_number = instances_number
6466
6467 if new_instance_count > max_instance_count:
6468 raise LcmException(
6469 "reached the limit of {} (max-instance-count) "
6470 "scaling-out operations for the "
6471 "scaling-group-descriptor '{}'".format(
6472 nb_scale_op, scaling_group
6473 )
6474 )
6475 for x in range(vdu_delta.get("number-of-instances", 1)):
6476 if cloud_init_text:
6477 # TODO Information of its own ip is not available because db_vnfr is not updated.
6478 additional_params["OSM"] = get_osm_params(
6479 db_vnfr, vdu_delta["id"], vdu_index + x
6480 )
6481 cloud_init_list.append(
6482 self._parse_cloud_init(
6483 cloud_init_text,
6484 additional_params,
6485 db_vnfd["id"],
6486 vdud["id"],
6487 )
6488 )
6489 vca_scaling_info.append(
6490 {
6491 "osm_vdu_id": vdu_delta["id"],
6492 "member-vnf-index": vnf_index,
6493 "type": "create",
6494 "vdu_index": vdu_index + x,
6495 }
6496 )
6497 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6498 for kdu_delta in delta.get("kdu-resource-delta", {}):
6499 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6500 kdu_name = kdu_profile["kdu-name"]
6501 resource_name = kdu_profile.get("resource-name", "")
6502
6503 # Might have different kdus in the same delta
6504 # Should have list for each kdu
6505 if not scaling_info["kdu-create"].get(kdu_name, None):
6506 scaling_info["kdu-create"][kdu_name] = []
6507
6508 kdur = get_kdur(db_vnfr, kdu_name)
6509 if kdur.get("helm-chart"):
6510 k8s_cluster_type = "helm-chart-v3"
6511 self.logger.debug("kdur: {}".format(kdur))
6512 if (
6513 kdur.get("helm-version")
6514 and kdur.get("helm-version") == "v2"
6515 ):
6516 k8s_cluster_type = "helm-chart"
6517 elif kdur.get("juju-bundle"):
6518 k8s_cluster_type = "juju-bundle"
6519 else:
6520 raise LcmException(
6521 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6522 "juju-bundle. Maybe an old NBI version is running".format(
6523 db_vnfr["member-vnf-index-ref"], kdu_name
6524 )
6525 )
6526
6527 max_instance_count = 10
6528 if kdu_profile and "max-number-of-instances" in kdu_profile:
6529 max_instance_count = kdu_profile.get(
6530 "max-number-of-instances", 10
6531 )
6532
6533 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6534 deployed_kdu, _ = get_deployed_kdu(
6535 nsr_deployed, kdu_name, vnf_index
6536 )
6537 if deployed_kdu is None:
6538 raise LcmException(
6539 "KDU '{}' for vnf '{}' not deployed".format(
6540 kdu_name, vnf_index
6541 )
6542 )
6543 kdu_instance = deployed_kdu.get("kdu-instance")
6544 instance_num = await self.k8scluster_map[
6545 k8s_cluster_type
6546 ].get_scale_count(
6547 resource_name,
6548 kdu_instance,
6549 vca_id=vca_id,
6550 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6551 kdu_model=deployed_kdu.get("kdu-model"),
6552 )
6553 kdu_replica_count = instance_num + kdu_delta.get(
6554 "number-of-instances", 1
6555 )
6556
6557 # Control if new count is over max and instance_num is less than max.
6558 # Then assign max instance number to kdu replica count
6559 if kdu_replica_count > max_instance_count > instance_num:
6560 kdu_replica_count = max_instance_count
6561 if kdu_replica_count > max_instance_count:
6562 raise LcmException(
6563 "reached the limit of {} (max-instance-count) "
6564 "scaling-out operations for the "
6565 "scaling-group-descriptor '{}'".format(
6566 instance_num, scaling_group
6567 )
6568 )
6569
6570 for x in range(kdu_delta.get("number-of-instances", 1)):
6571 vca_scaling_info.append(
6572 {
6573 "osm_kdu_id": kdu_name,
6574 "member-vnf-index": vnf_index,
6575 "type": "create",
6576 "kdu_index": instance_num + x - 1,
6577 }
6578 )
6579 scaling_info["kdu-create"][kdu_name].append(
6580 {
6581 "member-vnf-index": vnf_index,
6582 "type": "create",
6583 "k8s-cluster-type": k8s_cluster_type,
6584 "resource-name": resource_name,
6585 "scale": kdu_replica_count,
6586 }
6587 )
6588 elif scaling_type == "SCALE_IN":
6589 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6590
6591 scaling_info["scaling_direction"] = "IN"
6592 scaling_info["vdu-delete"] = {}
6593 scaling_info["kdu-delete"] = {}
6594
6595 for delta in deltas:
6596 for vdu_delta in delta.get("vdu-delta", {}):
6597 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6598 min_instance_count = 0
6599 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6600 if vdu_profile and "min-number-of-instances" in vdu_profile:
6601 min_instance_count = vdu_profile["min-number-of-instances"]
6602
6603 default_instance_num = get_number_of_instances(
6604 db_vnfd, vdu_delta["id"]
6605 )
6606 instance_num = vdu_delta.get("number-of-instances", 1)
6607 nb_scale_op -= instance_num
6608
6609 new_instance_count = nb_scale_op + default_instance_num
6610
6611 if new_instance_count < min_instance_count < vdu_count:
6612 instances_number = min_instance_count - new_instance_count
6613 else:
6614 instances_number = instance_num
6615
6616 if new_instance_count < min_instance_count:
6617 raise LcmException(
6618 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6619 "scaling-group-descriptor '{}'".format(
6620 nb_scale_op, scaling_group
6621 )
6622 )
6623 for x in range(vdu_delta.get("number-of-instances", 1)):
6624 vca_scaling_info.append(
6625 {
6626 "osm_vdu_id": vdu_delta["id"],
6627 "member-vnf-index": vnf_index,
6628 "type": "delete",
6629 "vdu_index": vdu_index - 1 - x,
6630 }
6631 )
6632 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6633 for kdu_delta in delta.get("kdu-resource-delta", {}):
6634 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6635 kdu_name = kdu_profile["kdu-name"]
6636 resource_name = kdu_profile.get("resource-name", "")
6637
6638 if not scaling_info["kdu-delete"].get(kdu_name, None):
6639 scaling_info["kdu-delete"][kdu_name] = []
6640
6641 kdur = get_kdur(db_vnfr, kdu_name)
6642 if kdur.get("helm-chart"):
6643 k8s_cluster_type = "helm-chart-v3"
6644 self.logger.debug("kdur: {}".format(kdur))
6645 if (
6646 kdur.get("helm-version")
6647 and kdur.get("helm-version") == "v2"
6648 ):
6649 k8s_cluster_type = "helm-chart"
6650 elif kdur.get("juju-bundle"):
6651 k8s_cluster_type = "juju-bundle"
6652 else:
6653 raise LcmException(
6654 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6655 "juju-bundle. Maybe an old NBI version is running".format(
6656 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6657 )
6658 )
6659
6660 min_instance_count = 0
6661 if kdu_profile and "min-number-of-instances" in kdu_profile:
6662 min_instance_count = kdu_profile["min-number-of-instances"]
6663
6664 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6665 deployed_kdu, _ = get_deployed_kdu(
6666 nsr_deployed, kdu_name, vnf_index
6667 )
6668 if deployed_kdu is None:
6669 raise LcmException(
6670 "KDU '{}' for vnf '{}' not deployed".format(
6671 kdu_name, vnf_index
6672 )
6673 )
6674 kdu_instance = deployed_kdu.get("kdu-instance")
6675 instance_num = await self.k8scluster_map[
6676 k8s_cluster_type
6677 ].get_scale_count(
6678 resource_name,
6679 kdu_instance,
6680 vca_id=vca_id,
6681 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6682 kdu_model=deployed_kdu.get("kdu-model"),
6683 )
6684 kdu_replica_count = instance_num - kdu_delta.get(
6685 "number-of-instances", 1
6686 )
6687
6688 if kdu_replica_count < min_instance_count < instance_num:
6689 kdu_replica_count = min_instance_count
6690 if kdu_replica_count < min_instance_count:
6691 raise LcmException(
6692 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6693 "scaling-group-descriptor '{}'".format(
6694 instance_num, scaling_group
6695 )
6696 )
6697
6698 for x in range(kdu_delta.get("number-of-instances", 1)):
6699 vca_scaling_info.append(
6700 {
6701 "osm_kdu_id": kdu_name,
6702 "member-vnf-index": vnf_index,
6703 "type": "delete",
6704 "kdu_index": instance_num - x - 1,
6705 }
6706 )
6707 scaling_info["kdu-delete"][kdu_name].append(
6708 {
6709 "member-vnf-index": vnf_index,
6710 "type": "delete",
6711 "k8s-cluster-type": k8s_cluster_type,
6712 "resource-name": resource_name,
6713 "scale": kdu_replica_count,
6714 }
6715 )
6716
6717 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6718 vdu_delete = copy(scaling_info.get("vdu-delete"))
6719 if scaling_info["scaling_direction"] == "IN":
6720 for vdur in reversed(db_vnfr["vdur"]):
6721 if vdu_delete.get(vdur["vdu-id-ref"]):
6722 vdu_delete[vdur["vdu-id-ref"]] -= 1
6723 scaling_info["vdu"].append(
6724 {
6725 "name": vdur.get("name") or vdur.get("vdu-name"),
6726 "vdu_id": vdur["vdu-id-ref"],
6727 "interface": [],
6728 }
6729 )
6730 for interface in vdur["interfaces"]:
6731 scaling_info["vdu"][-1]["interface"].append(
6732 {
6733 "name": interface["name"],
6734 "ip_address": interface["ip-address"],
6735 "mac_address": interface.get("mac-address"),
6736 }
6737 )
6738 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6739
6740 # PRE-SCALE BEGIN
6741 step = "Executing pre-scale vnf-config-primitive"
6742 if scaling_descriptor.get("scaling-config-action"):
6743 for scaling_config_action in scaling_descriptor[
6744 "scaling-config-action"
6745 ]:
6746 if (
6747 scaling_config_action.get("trigger") == "pre-scale-in"
6748 and scaling_type == "SCALE_IN"
6749 ) or (
6750 scaling_config_action.get("trigger") == "pre-scale-out"
6751 and scaling_type == "SCALE_OUT"
6752 ):
6753 vnf_config_primitive = scaling_config_action[
6754 "vnf-config-primitive-name-ref"
6755 ]
6756 step = db_nslcmop_update[
6757 "detailed-status"
6758 ] = "executing pre-scale scaling-config-action '{}'".format(
6759 vnf_config_primitive
6760 )
6761
6762 # look for primitive
6763 for config_primitive in (
6764 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6765 ).get("config-primitive", ()):
6766 if config_primitive["name"] == vnf_config_primitive:
6767 break
6768 else:
6769 raise LcmException(
6770 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6771 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6772 "primitive".format(scaling_group, vnf_config_primitive)
6773 )
6774
6775 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6776 if db_vnfr.get("additionalParamsForVnf"):
6777 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6778
6779 scale_process = "VCA"
6780 db_nsr_update["config-status"] = "configuring pre-scaling"
6781 primitive_params = self._map_primitive_params(
6782 config_primitive, {}, vnfr_params
6783 )
6784
6785 # Pre-scale retry check: Check if this sub-operation has been executed before
6786 op_index = self._check_or_add_scale_suboperation(
6787 db_nslcmop,
6788 vnf_index,
6789 vnf_config_primitive,
6790 primitive_params,
6791 "PRE-SCALE",
6792 )
6793 if op_index == self.SUBOPERATION_STATUS_SKIP:
6794 # Skip sub-operation
6795 result = "COMPLETED"
6796 result_detail = "Done"
6797 self.logger.debug(
6798 logging_text
6799 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6800 vnf_config_primitive, result, result_detail
6801 )
6802 )
6803 else:
6804 if op_index == self.SUBOPERATION_STATUS_NEW:
6805 # New sub-operation: Get index of this sub-operation
6806 op_index = (
6807 len(db_nslcmop.get("_admin", {}).get("operations"))
6808 - 1
6809 )
6810 self.logger.debug(
6811 logging_text
6812 + "vnf_config_primitive={} New sub-operation".format(
6813 vnf_config_primitive
6814 )
6815 )
6816 else:
6817 # retry: Get registered params for this existing sub-operation
6818 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6819 op_index
6820 ]
6821 vnf_index = op.get("member_vnf_index")
6822 vnf_config_primitive = op.get("primitive")
6823 primitive_params = op.get("primitive_params")
6824 self.logger.debug(
6825 logging_text
6826 + "vnf_config_primitive={} Sub-operation retry".format(
6827 vnf_config_primitive
6828 )
6829 )
6830 # Execute the primitive, either with new (first-time) or registered (reintent) args
6831 ee_descriptor_id = config_primitive.get(
6832 "execution-environment-ref"
6833 )
6834 primitive_name = config_primitive.get(
6835 "execution-environment-primitive", vnf_config_primitive
6836 )
6837 ee_id, vca_type = self._look_for_deployed_vca(
6838 nsr_deployed["VCA"],
6839 member_vnf_index=vnf_index,
6840 vdu_id=None,
6841 vdu_count_index=None,
6842 ee_descriptor_id=ee_descriptor_id,
6843 )
6844 result, result_detail = await self._ns_execute_primitive(
6845 ee_id,
6846 primitive_name,
6847 primitive_params,
6848 vca_type=vca_type,
6849 vca_id=vca_id,
6850 )
6851 self.logger.debug(
6852 logging_text
6853 + "vnf_config_primitive={} Done with result {} {}".format(
6854 vnf_config_primitive, result, result_detail
6855 )
6856 )
6857 # Update operationState = COMPLETED | FAILED
6858 self._update_suboperation_status(
6859 db_nslcmop, op_index, result, result_detail
6860 )
6861
6862 if result == "FAILED":
6863 raise LcmException(result_detail)
6864 db_nsr_update["config-status"] = old_config_status
6865 scale_process = None
6866 # PRE-SCALE END
6867
6868 db_nsr_update[
6869 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6870 ] = nb_scale_op
6871 db_nsr_update[
6872 "_admin.scaling-group.{}.time".format(admin_scale_index)
6873 ] = time()
6874
6875 # SCALE-IN VCA - BEGIN
6876 if vca_scaling_info:
6877 step = db_nslcmop_update[
6878 "detailed-status"
6879 ] = "Deleting the execution environments"
6880 scale_process = "VCA"
6881 for vca_info in vca_scaling_info:
6882 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6883 member_vnf_index = str(vca_info["member-vnf-index"])
6884 self.logger.debug(
6885 logging_text + "vdu info: {}".format(vca_info)
6886 )
6887 if vca_info.get("osm_vdu_id"):
6888 vdu_id = vca_info["osm_vdu_id"]
6889 vdu_index = int(vca_info["vdu_index"])
6890 stage[
6891 1
6892 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6893 member_vnf_index, vdu_id, vdu_index
6894 )
6895 stage[2] = step = "Scaling in VCA"
6896 self._write_op_status(op_id=nslcmop_id, stage=stage)
6897 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6898 config_update = db_nsr["configurationStatus"]
6899 for vca_index, vca in enumerate(vca_update):
6900 if (
6901 (vca or vca.get("ee_id"))
6902 and vca["member-vnf-index"] == member_vnf_index
6903 and vca["vdu_count_index"] == vdu_index
6904 ):
6905 if vca.get("vdu_id"):
6906 config_descriptor = get_configuration(
6907 db_vnfd, vca.get("vdu_id")
6908 )
6909 elif vca.get("kdu_name"):
6910 config_descriptor = get_configuration(
6911 db_vnfd, vca.get("kdu_name")
6912 )
6913 else:
6914 config_descriptor = get_configuration(
6915 db_vnfd, db_vnfd["id"]
6916 )
6917 operation_params = (
6918 db_nslcmop.get("operationParams") or {}
6919 )
6920 exec_terminate_primitives = not operation_params.get(
6921 "skip_terminate_primitives"
6922 ) and vca.get("needed_terminate")
6923 task = asyncio.ensure_future(
6924 asyncio.wait_for(
6925 self.destroy_N2VC(
6926 logging_text,
6927 db_nslcmop,
6928 vca,
6929 config_descriptor,
6930 vca_index,
6931 destroy_ee=True,
6932 exec_primitives=exec_terminate_primitives,
6933 scaling_in=True,
6934 vca_id=vca_id,
6935 ),
6936 timeout=self.timeout.charm_delete,
6937 )
6938 )
6939 tasks_dict_info[task] = "Terminating VCA {}".format(
6940 vca.get("ee_id")
6941 )
6942 del vca_update[vca_index]
6943 del config_update[vca_index]
6944 # wait for pending tasks of terminate primitives
6945 if tasks_dict_info:
6946 self.logger.debug(
6947 logging_text
6948 + "Waiting for tasks {}".format(
6949 list(tasks_dict_info.keys())
6950 )
6951 )
6952 error_list = await self._wait_for_tasks(
6953 logging_text,
6954 tasks_dict_info,
6955 min(
6956 self.timeout.charm_delete, self.timeout.ns_terminate
6957 ),
6958 stage,
6959 nslcmop_id,
6960 )
6961 tasks_dict_info.clear()
6962 if error_list:
6963 raise LcmException("; ".join(error_list))
6964
6965 db_vca_and_config_update = {
6966 "_admin.deployed.VCA": vca_update,
6967 "configurationStatus": config_update,
6968 }
6969 self.update_db_2(
6970 "nsrs", db_nsr["_id"], db_vca_and_config_update
6971 )
6972 scale_process = None
6973 # SCALE-IN VCA - END
6974
6975 # SCALE RO - BEGIN
6976 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6977 scale_process = "RO"
6978 if self.ro_config.ng:
6979 await self._scale_ng_ro(
6980 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6981 )
6982 scaling_info.pop("vdu-create", None)
6983 scaling_info.pop("vdu-delete", None)
6984
6985 scale_process = None
6986 # SCALE RO - END
6987
6988 # SCALE KDU - BEGIN
6989 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6990 scale_process = "KDU"
6991 await self._scale_kdu(
6992 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6993 )
6994 scaling_info.pop("kdu-create", None)
6995 scaling_info.pop("kdu-delete", None)
6996
6997 scale_process = None
6998 # SCALE KDU - END
6999
7000 if db_nsr_update:
7001 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7002
7003 # SCALE-UP VCA - BEGIN
7004 if vca_scaling_info:
7005 step = db_nslcmop_update[
7006 "detailed-status"
7007 ] = "Creating new execution environments"
7008 scale_process = "VCA"
7009 for vca_info in vca_scaling_info:
7010 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
7011 member_vnf_index = str(vca_info["member-vnf-index"])
7012 self.logger.debug(
7013 logging_text + "vdu info: {}".format(vca_info)
7014 )
7015 vnfd_id = db_vnfr["vnfd-ref"]
7016 if vca_info.get("osm_vdu_id"):
7017 vdu_index = int(vca_info["vdu_index"])
7018 deploy_params = {"OSM": get_osm_params(db_vnfr)}
7019 if db_vnfr.get("additionalParamsForVnf"):
7020 deploy_params.update(
7021 parse_yaml_strings(
7022 db_vnfr["additionalParamsForVnf"].copy()
7023 )
7024 )
7025 descriptor_config = get_configuration(
7026 db_vnfd, db_vnfd["id"]
7027 )
7028 if descriptor_config:
7029 vdu_id = None
7030 vdu_name = None
7031 kdu_name = None
7032 self._deploy_n2vc(
7033 logging_text=logging_text
7034 + "member_vnf_index={} ".format(member_vnf_index),
7035 db_nsr=db_nsr,
7036 db_vnfr=db_vnfr,
7037 nslcmop_id=nslcmop_id,
7038 nsr_id=nsr_id,
7039 nsi_id=nsi_id,
7040 vnfd_id=vnfd_id,
7041 vdu_id=vdu_id,
7042 kdu_name=kdu_name,
7043 member_vnf_index=member_vnf_index,
7044 vdu_index=vdu_index,
7045 vdu_name=vdu_name,
7046 deploy_params=deploy_params,
7047 descriptor_config=descriptor_config,
7048 base_folder=base_folder,
7049 task_instantiation_info=tasks_dict_info,
7050 stage=stage,
7051 )
7052 vdu_id = vca_info["osm_vdu_id"]
7053 vdur = find_in_list(
7054 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
7055 )
7056 descriptor_config = get_configuration(db_vnfd, vdu_id)
7057 if vdur.get("additionalParams"):
7058 deploy_params_vdu = parse_yaml_strings(
7059 vdur["additionalParams"]
7060 )
7061 else:
7062 deploy_params_vdu = deploy_params
7063 deploy_params_vdu["OSM"] = get_osm_params(
7064 db_vnfr, vdu_id, vdu_count_index=vdu_index
7065 )
7066 if descriptor_config:
7067 vdu_name = None
7068 kdu_name = None
7069 stage[
7070 1
7071 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7072 member_vnf_index, vdu_id, vdu_index
7073 )
7074 stage[2] = step = "Scaling out VCA"
7075 self._write_op_status(op_id=nslcmop_id, stage=stage)
7076 self._deploy_n2vc(
7077 logging_text=logging_text
7078 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7079 member_vnf_index, vdu_id, vdu_index
7080 ),
7081 db_nsr=db_nsr,
7082 db_vnfr=db_vnfr,
7083 nslcmop_id=nslcmop_id,
7084 nsr_id=nsr_id,
7085 nsi_id=nsi_id,
7086 vnfd_id=vnfd_id,
7087 vdu_id=vdu_id,
7088 kdu_name=kdu_name,
7089 member_vnf_index=member_vnf_index,
7090 vdu_index=vdu_index,
7091 vdu_name=vdu_name,
7092 deploy_params=deploy_params_vdu,
7093 descriptor_config=descriptor_config,
7094 base_folder=base_folder,
7095 task_instantiation_info=tasks_dict_info,
7096 stage=stage,
7097 )
7098 # SCALE-UP VCA - END
7099 scale_process = None
7100
7101 # POST-SCALE BEGIN
7102 # execute primitive service POST-SCALING
7103 step = "Executing post-scale vnf-config-primitive"
7104 if scaling_descriptor.get("scaling-config-action"):
7105 for scaling_config_action in scaling_descriptor[
7106 "scaling-config-action"
7107 ]:
7108 if (
7109 scaling_config_action.get("trigger") == "post-scale-in"
7110 and scaling_type == "SCALE_IN"
7111 ) or (
7112 scaling_config_action.get("trigger") == "post-scale-out"
7113 and scaling_type == "SCALE_OUT"
7114 ):
7115 vnf_config_primitive = scaling_config_action[
7116 "vnf-config-primitive-name-ref"
7117 ]
7118 step = db_nslcmop_update[
7119 "detailed-status"
7120 ] = "executing post-scale scaling-config-action '{}'".format(
7121 vnf_config_primitive
7122 )
7123
7124 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
7125 if db_vnfr.get("additionalParamsForVnf"):
7126 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
7127
7128 # look for primitive
7129 for config_primitive in (
7130 get_configuration(db_vnfd, db_vnfd["id"]) or {}
7131 ).get("config-primitive", ()):
7132 if config_primitive["name"] == vnf_config_primitive:
7133 break
7134 else:
7135 raise LcmException(
7136 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7137 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7138 "config-primitive".format(
7139 scaling_group, vnf_config_primitive
7140 )
7141 )
7142 scale_process = "VCA"
7143 db_nsr_update["config-status"] = "configuring post-scaling"
7144 primitive_params = self._map_primitive_params(
7145 config_primitive, {}, vnfr_params
7146 )
7147
7148 # Post-scale retry check: Check if this sub-operation has been executed before
7149 op_index = self._check_or_add_scale_suboperation(
7150 db_nslcmop,
7151 vnf_index,
7152 vnf_config_primitive,
7153 primitive_params,
7154 "POST-SCALE",
7155 )
7156 if op_index == self.SUBOPERATION_STATUS_SKIP:
7157 # Skip sub-operation
7158 result = "COMPLETED"
7159 result_detail = "Done"
7160 self.logger.debug(
7161 logging_text
7162 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7163 vnf_config_primitive, result, result_detail
7164 )
7165 )
7166 else:
7167 if op_index == self.SUBOPERATION_STATUS_NEW:
7168 # New sub-operation: Get index of this sub-operation
7169 op_index = (
7170 len(db_nslcmop.get("_admin", {}).get("operations"))
7171 - 1
7172 )
7173 self.logger.debug(
7174 logging_text
7175 + "vnf_config_primitive={} New sub-operation".format(
7176 vnf_config_primitive
7177 )
7178 )
7179 else:
7180 # retry: Get registered params for this existing sub-operation
7181 op = db_nslcmop.get("_admin", {}).get("operations", [])[
7182 op_index
7183 ]
7184 vnf_index = op.get("member_vnf_index")
7185 vnf_config_primitive = op.get("primitive")
7186 primitive_params = op.get("primitive_params")
7187 self.logger.debug(
7188 logging_text
7189 + "vnf_config_primitive={} Sub-operation retry".format(
7190 vnf_config_primitive
7191 )
7192 )
7193 # Execute the primitive, either with new (first-time) or registered (reintent) args
7194 ee_descriptor_id = config_primitive.get(
7195 "execution-environment-ref"
7196 )
7197 primitive_name = config_primitive.get(
7198 "execution-environment-primitive", vnf_config_primitive
7199 )
7200 ee_id, vca_type = self._look_for_deployed_vca(
7201 nsr_deployed["VCA"],
7202 member_vnf_index=vnf_index,
7203 vdu_id=None,
7204 vdu_count_index=None,
7205 ee_descriptor_id=ee_descriptor_id,
7206 )
7207 result, result_detail = await self._ns_execute_primitive(
7208 ee_id,
7209 primitive_name,
7210 primitive_params,
7211 vca_type=vca_type,
7212 vca_id=vca_id,
7213 )
7214 self.logger.debug(
7215 logging_text
7216 + "vnf_config_primitive={} Done with result {} {}".format(
7217 vnf_config_primitive, result, result_detail
7218 )
7219 )
7220 # Update operationState = COMPLETED | FAILED
7221 self._update_suboperation_status(
7222 db_nslcmop, op_index, result, result_detail
7223 )
7224
7225 if result == "FAILED":
7226 raise LcmException(result_detail)
7227 db_nsr_update["config-status"] = old_config_status
7228 scale_process = None
7229 # POST-SCALE END
7230
7231 db_nsr_update[
7232 "detailed-status"
7233 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7234 db_nsr_update["operational-status"] = (
7235 "running"
7236 if old_operational_status == "failed"
7237 else old_operational_status
7238 )
7239 db_nsr_update["config-status"] = old_config_status
7240 return
7241 except (
7242 ROclient.ROClientException,
7243 DbException,
7244 LcmException,
7245 NgRoException,
7246 ) as e:
7247 self.logger.error(logging_text + "Exit Exception {}".format(e))
7248 exc = e
7249 except asyncio.CancelledError:
7250 self.logger.error(
7251 logging_text + "Cancelled Exception while '{}'".format(step)
7252 )
7253 exc = "Operation was cancelled"
7254 except Exception as e:
7255 exc = traceback.format_exc()
7256 self.logger.critical(
7257 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7258 exc_info=True,
7259 )
7260 finally:
7261 self._write_ns_status(
7262 nsr_id=nsr_id,
7263 ns_state=None,
7264 current_operation="IDLE",
7265 current_operation_id=None,
7266 )
7267 if tasks_dict_info:
7268 stage[1] = "Waiting for instantiate pending tasks."
7269 self.logger.debug(logging_text + stage[1])
7270 exc = await self._wait_for_tasks(
7271 logging_text,
7272 tasks_dict_info,
7273 self.timeout.ns_deploy,
7274 stage,
7275 nslcmop_id,
7276 nsr_id=nsr_id,
7277 )
7278 if exc:
7279 db_nslcmop_update[
7280 "detailed-status"
7281 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7282 nslcmop_operation_state = "FAILED"
7283 if db_nsr:
7284 db_nsr_update["operational-status"] = old_operational_status
7285 db_nsr_update["config-status"] = old_config_status
7286 db_nsr_update["detailed-status"] = ""
7287 if scale_process:
7288 if "VCA" in scale_process:
7289 db_nsr_update["config-status"] = "failed"
7290 if "RO" in scale_process:
7291 db_nsr_update["operational-status"] = "failed"
7292 db_nsr_update[
7293 "detailed-status"
7294 ] = "FAILED scaling nslcmop={} {}: {}".format(
7295 nslcmop_id, step, exc
7296 )
7297 else:
7298 error_description_nslcmop = None
7299 nslcmop_operation_state = "COMPLETED"
7300 db_nslcmop_update["detailed-status"] = "Done"
7301
7302 self._write_op_status(
7303 op_id=nslcmop_id,
7304 stage="",
7305 error_message=error_description_nslcmop,
7306 operation_state=nslcmop_operation_state,
7307 other_update=db_nslcmop_update,
7308 )
7309 if db_nsr:
7310 self._write_ns_status(
7311 nsr_id=nsr_id,
7312 ns_state=None,
7313 current_operation="IDLE",
7314 current_operation_id=None,
7315 other_update=db_nsr_update,
7316 )
7317
7318 if nslcmop_operation_state:
7319 try:
7320 msg = {
7321 "nsr_id": nsr_id,
7322 "nslcmop_id": nslcmop_id,
7323 "operationState": nslcmop_operation_state,
7324 }
7325 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7326 except Exception as e:
7327 self.logger.error(
7328 logging_text + "kafka_write notification Exception {}".format(e)
7329 )
7330 self.logger.debug(logging_text + "Exit")
7331 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7332
7333 async def _scale_kdu(
7334 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7335 ):
7336 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7337 for kdu_name in _scaling_info:
7338 for kdu_scaling_info in _scaling_info[kdu_name]:
7339 deployed_kdu, index = get_deployed_kdu(
7340 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7341 )
7342 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7343 kdu_instance = deployed_kdu["kdu-instance"]
7344 kdu_model = deployed_kdu.get("kdu-model")
7345 scale = int(kdu_scaling_info["scale"])
7346 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7347
7348 db_dict = {
7349 "collection": "nsrs",
7350 "filter": {"_id": nsr_id},
7351 "path": "_admin.deployed.K8s.{}".format(index),
7352 }
7353
7354 step = "scaling application {}".format(
7355 kdu_scaling_info["resource-name"]
7356 )
7357 self.logger.debug(logging_text + step)
7358
7359 if kdu_scaling_info["type"] == "delete":
7360 kdu_config = get_configuration(db_vnfd, kdu_name)
7361 if (
7362 kdu_config
7363 and kdu_config.get("terminate-config-primitive")
7364 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7365 ):
7366 terminate_config_primitive_list = kdu_config.get(
7367 "terminate-config-primitive"
7368 )
7369 terminate_config_primitive_list.sort(
7370 key=lambda val: int(val["seq"])
7371 )
7372
7373 for (
7374 terminate_config_primitive
7375 ) in terminate_config_primitive_list:
7376 primitive_params_ = self._map_primitive_params(
7377 terminate_config_primitive, {}, {}
7378 )
7379 step = "execute terminate config primitive"
7380 self.logger.debug(logging_text + step)
7381 await asyncio.wait_for(
7382 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7383 cluster_uuid=cluster_uuid,
7384 kdu_instance=kdu_instance,
7385 primitive_name=terminate_config_primitive["name"],
7386 params=primitive_params_,
7387 db_dict=db_dict,
7388 total_timeout=self.timeout.primitive,
7389 vca_id=vca_id,
7390 ),
7391 timeout=self.timeout.primitive
7392 * self.timeout.primitive_outer_factor,
7393 )
7394
7395 await asyncio.wait_for(
7396 self.k8scluster_map[k8s_cluster_type].scale(
7397 kdu_instance=kdu_instance,
7398 scale=scale,
7399 resource_name=kdu_scaling_info["resource-name"],
7400 total_timeout=self.timeout.scale_on_error,
7401 vca_id=vca_id,
7402 cluster_uuid=cluster_uuid,
7403 kdu_model=kdu_model,
7404 atomic=True,
7405 db_dict=db_dict,
7406 ),
7407 timeout=self.timeout.scale_on_error
7408 * self.timeout.scale_on_error_outer_factor,
7409 )
7410
7411 if kdu_scaling_info["type"] == "create":
7412 kdu_config = get_configuration(db_vnfd, kdu_name)
7413 if (
7414 kdu_config
7415 and kdu_config.get("initial-config-primitive")
7416 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7417 ):
7418 initial_config_primitive_list = kdu_config.get(
7419 "initial-config-primitive"
7420 )
7421 initial_config_primitive_list.sort(
7422 key=lambda val: int(val["seq"])
7423 )
7424
7425 for initial_config_primitive in initial_config_primitive_list:
7426 primitive_params_ = self._map_primitive_params(
7427 initial_config_primitive, {}, {}
7428 )
7429 step = "execute initial config primitive"
7430 self.logger.debug(logging_text + step)
7431 await asyncio.wait_for(
7432 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7433 cluster_uuid=cluster_uuid,
7434 kdu_instance=kdu_instance,
7435 primitive_name=initial_config_primitive["name"],
7436 params=primitive_params_,
7437 db_dict=db_dict,
7438 vca_id=vca_id,
7439 ),
7440 timeout=600,
7441 )
7442
7443 async def _scale_ng_ro(
7444 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7445 ):
7446 nsr_id = db_nslcmop["nsInstanceId"]
7447 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7448 db_vnfrs = {}
7449
7450 # read from db: vnfd's for every vnf
7451 db_vnfds = []
7452
7453 # for each vnf in ns, read vnfd
7454 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7455 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7456 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7457 # if we haven't this vnfd, read it from db
7458 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7459 # read from db
7460 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7461 db_vnfds.append(vnfd)
7462 n2vc_key = self.n2vc.get_public_key()
7463 n2vc_key_list = [n2vc_key]
7464 self.scale_vnfr(
7465 db_vnfr,
7466 vdu_scaling_info.get("vdu-create"),
7467 vdu_scaling_info.get("vdu-delete"),
7468 mark_delete=True,
7469 )
7470 # db_vnfr has been updated, update db_vnfrs to use it
7471 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7472 await self._instantiate_ng_ro(
7473 logging_text,
7474 nsr_id,
7475 db_nsd,
7476 db_nsr,
7477 db_nslcmop,
7478 db_vnfrs,
7479 db_vnfds,
7480 n2vc_key_list,
7481 stage=stage,
7482 start_deploy=time(),
7483 timeout_ns_deploy=self.timeout.ns_deploy,
7484 )
7485 if vdu_scaling_info.get("vdu-delete"):
7486 self.scale_vnfr(
7487 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7488 )
7489
7490 async def extract_prometheus_scrape_jobs(
7491 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7492 ):
7493 # look if exist a file called 'prometheus*.j2' and
7494 artifact_content = self.fs.dir_ls(artifact_path)
7495 job_file = next(
7496 (
7497 f
7498 for f in artifact_content
7499 if f.startswith("prometheus") and f.endswith(".j2")
7500 ),
7501 None,
7502 )
7503 if not job_file:
7504 return
7505 with self.fs.file_open((artifact_path, job_file), "r") as f:
7506 job_data = f.read()
7507
7508 # TODO get_service
7509 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7510 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7511 host_port = "80"
7512 vnfr_id = vnfr_id.replace("-", "")
7513 variables = {
7514 "JOB_NAME": vnfr_id,
7515 "TARGET_IP": target_ip,
7516 "EXPORTER_POD_IP": host_name,
7517 "EXPORTER_POD_PORT": host_port,
7518 }
7519 job_list = parse_job(job_data, variables)
7520 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7521 for job in job_list:
7522 if (
7523 not isinstance(job.get("job_name"), str)
7524 or vnfr_id not in job["job_name"]
7525 ):
7526 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7527 job["nsr_id"] = nsr_id
7528 job["vnfr_id"] = vnfr_id
7529 return job_list
7530
7531 async def rebuild_start_stop(
7532 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7533 ):
7534 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7535 self.logger.info(logging_text + "Enter")
7536 stage = ["Preparing the environment", ""]
7537 # database nsrs record
7538 db_nsr_update = {}
7539 vdu_vim_name = None
7540 vim_vm_id = None
7541 # in case of error, indicates what part of scale was failed to put nsr at error status
7542 start_deploy = time()
7543 try:
7544 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7545 vim_account_id = db_vnfr.get("vim-account-id")
7546 vim_info_key = "vim:" + vim_account_id
7547 vdu_id = additional_param["vdu_id"]
7548 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7549 vdur = find_in_list(
7550 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7551 )
7552 if vdur:
7553 vdu_vim_name = vdur["name"]
7554 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7555 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7556 else:
7557 raise LcmException("Target vdu is not found")
7558 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7559 # wait for any previous tasks in process
7560 stage[1] = "Waiting for previous operations to terminate"
7561 self.logger.info(stage[1])
7562 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7563
7564 stage[1] = "Reading from database."
7565 self.logger.info(stage[1])
7566 self._write_ns_status(
7567 nsr_id=nsr_id,
7568 ns_state=None,
7569 current_operation=operation_type.upper(),
7570 current_operation_id=nslcmop_id,
7571 )
7572 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7573
7574 # read from db: ns
7575 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7576 db_nsr_update["operational-status"] = operation_type
7577 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7578 # Payload for RO
7579 desc = {
7580 operation_type: {
7581 "vim_vm_id": vim_vm_id,
7582 "vnf_id": vnf_id,
7583 "vdu_index": additional_param["count-index"],
7584 "vdu_id": vdur["id"],
7585 "target_vim": target_vim,
7586 "vim_account_id": vim_account_id,
7587 }
7588 }
7589 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7590 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7591 self.logger.info("ro nsr id: {}".format(nsr_id))
7592 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7593 self.logger.info("response from RO: {}".format(result_dict))
7594 action_id = result_dict["action_id"]
7595 await self._wait_ng_ro(
7596 nsr_id,
7597 action_id,
7598 nslcmop_id,
7599 start_deploy,
7600 self.timeout.operate,
7601 None,
7602 "start_stop_rebuild",
7603 )
7604 return "COMPLETED", "Done"
7605 except (ROclient.ROClientException, DbException, LcmException) as e:
7606 self.logger.error("Exit Exception {}".format(e))
7607 exc = e
7608 except asyncio.CancelledError:
7609 self.logger.error("Cancelled Exception while '{}'".format(stage))
7610 exc = "Operation was cancelled"
7611 except Exception as e:
7612 exc = traceback.format_exc()
7613 self.logger.critical(
7614 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7615 )
7616 return "FAILED", "Error in operate VNF {}".format(exc)
7617
7618 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7619 """
7620 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7621
7622 :param: vim_account_id: VIM Account ID
7623
7624 :return: (cloud_name, cloud_credential)
7625 """
7626 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7627 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7628
7629 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7630 """
7631 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7632
7633 :param: vim_account_id: VIM Account ID
7634
7635 :return: (cloud_name, cloud_credential)
7636 """
7637 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7638 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7639
7640 async def migrate(self, nsr_id, nslcmop_id):
7641 """
7642 Migrate VNFs and VDUs instances in a NS
7643
7644 :param: nsr_id: NS Instance ID
7645 :param: nslcmop_id: nslcmop ID of migrate
7646
7647 """
7648 # Try to lock HA task here
7649 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7650 if not task_is_locked_by_me:
7651 return
7652 logging_text = "Task ns={} migrate ".format(nsr_id)
7653 self.logger.debug(logging_text + "Enter")
7654 # get all needed from database
7655 db_nslcmop = None
7656 db_nslcmop_update = {}
7657 nslcmop_operation_state = None
7658 db_nsr_update = {}
7659 target = {}
7660 exc = None
7661 # in case of error, indicates what part of scale was failed to put nsr at error status
7662 start_deploy = time()
7663
7664 try:
7665 # wait for any previous tasks in process
7666 step = "Waiting for previous operations to terminate"
7667 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7668
7669 self._write_ns_status(
7670 nsr_id=nsr_id,
7671 ns_state=None,
7672 current_operation="MIGRATING",
7673 current_operation_id=nslcmop_id,
7674 )
7675 step = "Getting nslcmop from database"
7676 self.logger.debug(
7677 step + " after having waited for previous tasks to be completed"
7678 )
7679 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7680 migrate_params = db_nslcmop.get("operationParams")
7681
7682 target = {}
7683 target.update(migrate_params)
7684 desc = await self.RO.migrate(nsr_id, target)
7685 self.logger.debug("RO return > {}".format(desc))
7686 action_id = desc["action_id"]
7687 await self._wait_ng_ro(
7688 nsr_id,
7689 action_id,
7690 nslcmop_id,
7691 start_deploy,
7692 self.timeout.migrate,
7693 operation="migrate",
7694 )
7695 except (ROclient.ROClientException, DbException, LcmException) as e:
7696 self.logger.error("Exit Exception {}".format(e))
7697 exc = e
7698 except asyncio.CancelledError:
7699 self.logger.error("Cancelled Exception while '{}'".format(step))
7700 exc = "Operation was cancelled"
7701 except Exception as e:
7702 exc = traceback.format_exc()
7703 self.logger.critical(
7704 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7705 )
7706 finally:
7707 self._write_ns_status(
7708 nsr_id=nsr_id,
7709 ns_state=None,
7710 current_operation="IDLE",
7711 current_operation_id=None,
7712 )
7713 if exc:
7714 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7715 nslcmop_operation_state = "FAILED"
7716 else:
7717 nslcmop_operation_state = "COMPLETED"
7718 db_nslcmop_update["detailed-status"] = "Done"
7719 db_nsr_update["detailed-status"] = "Done"
7720
7721 self._write_op_status(
7722 op_id=nslcmop_id,
7723 stage="",
7724 error_message="",
7725 operation_state=nslcmop_operation_state,
7726 other_update=db_nslcmop_update,
7727 )
7728 if nslcmop_operation_state:
7729 try:
7730 msg = {
7731 "nsr_id": nsr_id,
7732 "nslcmop_id": nslcmop_id,
7733 "operationState": nslcmop_operation_state,
7734 }
7735 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7736 except Exception as e:
7737 self.logger.error(
7738 logging_text + "kafka_write notification Exception {}".format(e)
7739 )
7740 self.logger.debug(logging_text + "Exit")
7741 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7742
7743 async def heal(self, nsr_id, nslcmop_id):
7744 """
7745 Heal NS
7746
7747 :param nsr_id: ns instance to heal
7748 :param nslcmop_id: operation to run
7749 :return:
7750 """
7751
7752 # Try to lock HA task here
7753 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7754 if not task_is_locked_by_me:
7755 return
7756
7757 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7758 stage = ["", "", ""]
7759 tasks_dict_info = {}
7760 # ^ stage, step, VIM progress
7761 self.logger.debug(logging_text + "Enter")
7762 # get all needed from database
7763 db_nsr = None
7764 db_nslcmop_update = {}
7765 db_nsr_update = {}
7766 db_vnfrs = {} # vnf's info indexed by _id
7767 exc = None
7768 old_operational_status = ""
7769 old_config_status = ""
7770 nsi_id = None
7771 try:
7772 # wait for any previous tasks in process
7773 step = "Waiting for previous operations to terminate"
7774 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7775 self._write_ns_status(
7776 nsr_id=nsr_id,
7777 ns_state=None,
7778 current_operation="HEALING",
7779 current_operation_id=nslcmop_id,
7780 )
7781
7782 step = "Getting nslcmop from database"
7783 self.logger.debug(
7784 step + " after having waited for previous tasks to be completed"
7785 )
7786 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7787
7788 step = "Getting nsr from database"
7789 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7790 old_operational_status = db_nsr["operational-status"]
7791 old_config_status = db_nsr["config-status"]
7792
7793 db_nsr_update = {
7794 "_admin.deployed.RO.operational-status": "healing",
7795 }
7796 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7797
7798 step = "Sending heal order to VIM"
7799 await self.heal_RO(
7800 logging_text=logging_text,
7801 nsr_id=nsr_id,
7802 db_nslcmop=db_nslcmop,
7803 stage=stage,
7804 )
7805 # VCA tasks
7806 # read from db: nsd
7807 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7808 self.logger.debug(logging_text + stage[1])
7809 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7810 self.fs.sync(db_nsr["nsd-id"])
7811 db_nsr["nsd"] = nsd
7812 # read from db: vnfr's of this ns
7813 step = "Getting vnfrs from db"
7814 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7815 for vnfr in db_vnfrs_list:
7816 db_vnfrs[vnfr["_id"]] = vnfr
7817 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7818
7819 # Check for each target VNF
7820 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7821 for target_vnf in target_list:
7822 # Find this VNF in the list from DB
7823 vnfr_id = target_vnf.get("vnfInstanceId", None)
7824 if vnfr_id:
7825 db_vnfr = db_vnfrs[vnfr_id]
7826 vnfd_id = db_vnfr.get("vnfd-id")
7827 vnfd_ref = db_vnfr.get("vnfd-ref")
7828 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7829 base_folder = vnfd["_admin"]["storage"]
7830 vdu_id = None
7831 vdu_index = 0
7832 vdu_name = None
7833 kdu_name = None
7834 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7835 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7836
7837 # Check each target VDU and deploy N2VC
7838 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7839 "vdu", []
7840 )
7841 if not target_vdu_list:
7842 # Codigo nuevo para crear diccionario
7843 target_vdu_list = []
7844 for existing_vdu in db_vnfr.get("vdur"):
7845 vdu_name = existing_vdu.get("vdu-name", None)
7846 vdu_index = existing_vdu.get("count-index", 0)
7847 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7848 "run-day1", False
7849 )
7850 vdu_to_be_healed = {
7851 "vdu-id": vdu_name,
7852 "count-index": vdu_index,
7853 "run-day1": vdu_run_day1,
7854 }
7855 target_vdu_list.append(vdu_to_be_healed)
7856 for target_vdu in target_vdu_list:
7857 deploy_params_vdu = target_vdu
7858 # Set run-day1 vnf level value if not vdu level value exists
7859 if not deploy_params_vdu.get("run-day1") and target_vnf[
7860 "additionalParams"
7861 ].get("run-day1"):
7862 deploy_params_vdu["run-day1"] = target_vnf[
7863 "additionalParams"
7864 ].get("run-day1")
7865 vdu_name = target_vdu.get("vdu-id", None)
7866 # TODO: Get vdu_id from vdud.
7867 vdu_id = vdu_name
7868 # For multi instance VDU count-index is mandatory
7869 # For single session VDU count-indes is 0
7870 vdu_index = target_vdu.get("count-index", 0)
7871
7872 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7873 stage[1] = "Deploying Execution Environments."
7874 self.logger.debug(logging_text + stage[1])
7875
7876 # VNF Level charm. Normal case when proxy charms.
7877 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7878 descriptor_config = get_configuration(vnfd, vnfd_ref)
7879 if descriptor_config:
7880 # Continue if healed machine is management machine
7881 vnf_ip_address = db_vnfr.get("ip-address")
7882 target_instance = None
7883 for instance in db_vnfr.get("vdur", None):
7884 if (
7885 instance["vdu-name"] == vdu_name
7886 and instance["count-index"] == vdu_index
7887 ):
7888 target_instance = instance
7889 break
7890 if vnf_ip_address == target_instance.get("ip-address"):
7891 self._heal_n2vc(
7892 logging_text=logging_text
7893 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7894 member_vnf_index, vdu_name, vdu_index
7895 ),
7896 db_nsr=db_nsr,
7897 db_vnfr=db_vnfr,
7898 nslcmop_id=nslcmop_id,
7899 nsr_id=nsr_id,
7900 nsi_id=nsi_id,
7901 vnfd_id=vnfd_ref,
7902 vdu_id=None,
7903 kdu_name=None,
7904 member_vnf_index=member_vnf_index,
7905 vdu_index=0,
7906 vdu_name=None,
7907 deploy_params=deploy_params_vdu,
7908 descriptor_config=descriptor_config,
7909 base_folder=base_folder,
7910 task_instantiation_info=tasks_dict_info,
7911 stage=stage,
7912 )
7913
7914 # VDU Level charm. Normal case with native charms.
7915 descriptor_config = get_configuration(vnfd, vdu_name)
7916 if descriptor_config:
7917 self._heal_n2vc(
7918 logging_text=logging_text
7919 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7920 member_vnf_index, vdu_name, vdu_index
7921 ),
7922 db_nsr=db_nsr,
7923 db_vnfr=db_vnfr,
7924 nslcmop_id=nslcmop_id,
7925 nsr_id=nsr_id,
7926 nsi_id=nsi_id,
7927 vnfd_id=vnfd_ref,
7928 vdu_id=vdu_id,
7929 kdu_name=kdu_name,
7930 member_vnf_index=member_vnf_index,
7931 vdu_index=vdu_index,
7932 vdu_name=vdu_name,
7933 deploy_params=deploy_params_vdu,
7934 descriptor_config=descriptor_config,
7935 base_folder=base_folder,
7936 task_instantiation_info=tasks_dict_info,
7937 stage=stage,
7938 )
7939
7940 except (
7941 ROclient.ROClientException,
7942 DbException,
7943 LcmException,
7944 NgRoException,
7945 ) as e:
7946 self.logger.error(logging_text + "Exit Exception {}".format(e))
7947 exc = e
7948 except asyncio.CancelledError:
7949 self.logger.error(
7950 logging_text + "Cancelled Exception while '{}'".format(step)
7951 )
7952 exc = "Operation was cancelled"
7953 except Exception as e:
7954 exc = traceback.format_exc()
7955 self.logger.critical(
7956 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7957 exc_info=True,
7958 )
7959 finally:
7960 if tasks_dict_info:
7961 stage[1] = "Waiting for healing pending tasks."
7962 self.logger.debug(logging_text + stage[1])
7963 exc = await self._wait_for_tasks(
7964 logging_text,
7965 tasks_dict_info,
7966 self.timeout.ns_deploy,
7967 stage,
7968 nslcmop_id,
7969 nsr_id=nsr_id,
7970 )
7971 if exc:
7972 db_nslcmop_update[
7973 "detailed-status"
7974 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7975 nslcmop_operation_state = "FAILED"
7976 if db_nsr:
7977 db_nsr_update["operational-status"] = old_operational_status
7978 db_nsr_update["config-status"] = old_config_status
7979 db_nsr_update[
7980 "detailed-status"
7981 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7982 for task, task_name in tasks_dict_info.items():
7983 if not task.done() or task.cancelled() or task.exception():
7984 if task_name.startswith(self.task_name_deploy_vca):
7985 # A N2VC task is pending
7986 db_nsr_update["config-status"] = "failed"
7987 else:
7988 # RO task is pending
7989 db_nsr_update["operational-status"] = "failed"
7990 else:
7991 error_description_nslcmop = None
7992 nslcmop_operation_state = "COMPLETED"
7993 db_nslcmop_update["detailed-status"] = "Done"
7994 db_nsr_update["detailed-status"] = "Done"
7995 db_nsr_update["operational-status"] = "running"
7996 db_nsr_update["config-status"] = "configured"
7997
7998 self._write_op_status(
7999 op_id=nslcmop_id,
8000 stage="",
8001 error_message=error_description_nslcmop,
8002 operation_state=nslcmop_operation_state,
8003 other_update=db_nslcmop_update,
8004 )
8005 if db_nsr:
8006 self._write_ns_status(
8007 nsr_id=nsr_id,
8008 ns_state=None,
8009 current_operation="IDLE",
8010 current_operation_id=None,
8011 other_update=db_nsr_update,
8012 )
8013
8014 if nslcmop_operation_state:
8015 try:
8016 msg = {
8017 "nsr_id": nsr_id,
8018 "nslcmop_id": nslcmop_id,
8019 "operationState": nslcmop_operation_state,
8020 }
8021 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
8022 except Exception as e:
8023 self.logger.error(
8024 logging_text + "kafka_write notification Exception {}".format(e)
8025 )
8026 self.logger.debug(logging_text + "Exit")
8027 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
8028
8029 async def heal_RO(
8030 self,
8031 logging_text,
8032 nsr_id,
8033 db_nslcmop,
8034 stage,
8035 ):
8036 """
8037 Heal at RO
8038 :param logging_text: preffix text to use at logging
8039 :param nsr_id: nsr identity
8040 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8041 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8042 :return: None or exception
8043 """
8044
8045 def get_vim_account(vim_account_id):
8046 nonlocal db_vims
8047 if vim_account_id in db_vims:
8048 return db_vims[vim_account_id]
8049 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
8050 db_vims[vim_account_id] = db_vim
8051 return db_vim
8052
8053 try:
8054 start_heal = time()
8055 ns_params = db_nslcmop.get("operationParams")
8056 if ns_params and ns_params.get("timeout_ns_heal"):
8057 timeout_ns_heal = ns_params["timeout_ns_heal"]
8058 else:
8059 timeout_ns_heal = self.timeout.ns_heal
8060
8061 db_vims = {}
8062
8063 nslcmop_id = db_nslcmop["_id"]
8064 target = {
8065 "action_id": nslcmop_id,
8066 }
8067 self.logger.warning(
8068 "db_nslcmop={} and timeout_ns_heal={}".format(
8069 db_nslcmop, timeout_ns_heal
8070 )
8071 )
8072 target.update(db_nslcmop.get("operationParams", {}))
8073
8074 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
8075 desc = await self.RO.recreate(nsr_id, target)
8076 self.logger.debug("RO return > {}".format(desc))
8077 action_id = desc["action_id"]
8078 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8079 await self._wait_ng_ro(
8080 nsr_id,
8081 action_id,
8082 nslcmop_id,
8083 start_heal,
8084 timeout_ns_heal,
8085 stage,
8086 operation="healing",
8087 )
8088
8089 # Updating NSR
8090 db_nsr_update = {
8091 "_admin.deployed.RO.operational-status": "running",
8092 "detailed-status": " ".join(stage),
8093 }
8094 self.update_db_2("nsrs", nsr_id, db_nsr_update)
8095 self._write_op_status(nslcmop_id, stage)
8096 self.logger.debug(
8097 logging_text + "ns healed at RO. RO_id={}".format(action_id)
8098 )
8099
8100 except Exception as e:
8101 stage[2] = "ERROR healing at VIM"
8102 # self.set_vnfr_at_error(db_vnfrs, str(e))
8103 self.logger.error(
8104 "Error healing at VIM {}".format(e),
8105 exc_info=not isinstance(
8106 e,
8107 (
8108 ROclient.ROClientException,
8109 LcmException,
8110 DbException,
8111 NgRoException,
8112 ),
8113 ),
8114 )
8115 raise
8116
8117 def _heal_n2vc(
8118 self,
8119 logging_text,
8120 db_nsr,
8121 db_vnfr,
8122 nslcmop_id,
8123 nsr_id,
8124 nsi_id,
8125 vnfd_id,
8126 vdu_id,
8127 kdu_name,
8128 member_vnf_index,
8129 vdu_index,
8130 vdu_name,
8131 deploy_params,
8132 descriptor_config,
8133 base_folder,
8134 task_instantiation_info,
8135 stage,
8136 ):
8137 # launch instantiate_N2VC in a asyncio task and register task object
8138 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8139 # if not found, create one entry and update database
8140 # fill db_nsr._admin.deployed.VCA.<index>
8141
8142 self.logger.debug(
8143 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
8144 )
8145
8146 charm_name = ""
8147 get_charm_name = False
8148 if "execution-environment-list" in descriptor_config:
8149 ee_list = descriptor_config.get("execution-environment-list", [])
8150 elif "juju" in descriptor_config:
8151 ee_list = [descriptor_config] # ns charms
8152 if "execution-environment-list" not in descriptor_config:
8153 # charm name is only required for ns charms
8154 get_charm_name = True
8155 else: # other types as script are not supported
8156 ee_list = []
8157
8158 for ee_item in ee_list:
8159 self.logger.debug(
8160 logging_text
8161 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8162 ee_item.get("juju"), ee_item.get("helm-chart")
8163 )
8164 )
8165 ee_descriptor_id = ee_item.get("id")
8166 if ee_item.get("juju"):
8167 vca_name = ee_item["juju"].get("charm")
8168 if get_charm_name:
8169 charm_name = self.find_charm_name(db_nsr, str(vca_name))
8170 vca_type = (
8171 "lxc_proxy_charm"
8172 if ee_item["juju"].get("charm") is not None
8173 else "native_charm"
8174 )
8175 if ee_item["juju"].get("cloud") == "k8s":
8176 vca_type = "k8s_proxy_charm"
8177 elif ee_item["juju"].get("proxy") is False:
8178 vca_type = "native_charm"
8179 elif ee_item.get("helm-chart"):
8180 vca_name = ee_item["helm-chart"]
8181 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8182 vca_type = "helm"
8183 else:
8184 vca_type = "helm-v3"
8185 else:
8186 self.logger.debug(
8187 logging_text + "skipping non juju neither charm configuration"
8188 )
8189 continue
8190
8191 vca_index = -1
8192 for vca_index, vca_deployed in enumerate(
8193 db_nsr["_admin"]["deployed"]["VCA"]
8194 ):
8195 if not vca_deployed:
8196 continue
8197 if (
8198 vca_deployed.get("member-vnf-index") == member_vnf_index
8199 and vca_deployed.get("vdu_id") == vdu_id
8200 and vca_deployed.get("kdu_name") == kdu_name
8201 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8202 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8203 ):
8204 break
8205 else:
8206 # not found, create one.
8207 target = (
8208 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8209 )
8210 if vdu_id:
8211 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8212 elif kdu_name:
8213 target += "/kdu/{}".format(kdu_name)
8214 vca_deployed = {
8215 "target_element": target,
8216 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8217 "member-vnf-index": member_vnf_index,
8218 "vdu_id": vdu_id,
8219 "kdu_name": kdu_name,
8220 "vdu_count_index": vdu_index,
8221 "operational-status": "init", # TODO revise
8222 "detailed-status": "", # TODO revise
8223 "step": "initial-deploy", # TODO revise
8224 "vnfd_id": vnfd_id,
8225 "vdu_name": vdu_name,
8226 "type": vca_type,
8227 "ee_descriptor_id": ee_descriptor_id,
8228 "charm_name": charm_name,
8229 }
8230 vca_index += 1
8231
8232 # create VCA and configurationStatus in db
8233 db_dict = {
8234 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8235 "configurationStatus.{}".format(vca_index): dict(),
8236 }
8237 self.update_db_2("nsrs", nsr_id, db_dict)
8238
8239 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8240
8241 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8242 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8243 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8244
8245 # Launch task
8246 task_n2vc = asyncio.ensure_future(
8247 self.heal_N2VC(
8248 logging_text=logging_text,
8249 vca_index=vca_index,
8250 nsi_id=nsi_id,
8251 db_nsr=db_nsr,
8252 db_vnfr=db_vnfr,
8253 vdu_id=vdu_id,
8254 kdu_name=kdu_name,
8255 vdu_index=vdu_index,
8256 deploy_params=deploy_params,
8257 config_descriptor=descriptor_config,
8258 base_folder=base_folder,
8259 nslcmop_id=nslcmop_id,
8260 stage=stage,
8261 vca_type=vca_type,
8262 vca_name=vca_name,
8263 ee_config_descriptor=ee_item,
8264 )
8265 )
8266 self.lcm_tasks.register(
8267 "ns",
8268 nsr_id,
8269 nslcmop_id,
8270 "instantiate_N2VC-{}".format(vca_index),
8271 task_n2vc,
8272 )
8273 task_instantiation_info[
8274 task_n2vc
8275 ] = self.task_name_deploy_vca + " {}.{}".format(
8276 member_vnf_index or "", vdu_id or ""
8277 )
8278
8279 async def heal_N2VC(
8280 self,
8281 logging_text,
8282 vca_index,
8283 nsi_id,
8284 db_nsr,
8285 db_vnfr,
8286 vdu_id,
8287 kdu_name,
8288 vdu_index,
8289 config_descriptor,
8290 deploy_params,
8291 base_folder,
8292 nslcmop_id,
8293 stage,
8294 vca_type,
8295 vca_name,
8296 ee_config_descriptor,
8297 ):
8298 nsr_id = db_nsr["_id"]
8299 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8300 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8301 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8302 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8303 db_dict = {
8304 "collection": "nsrs",
8305 "filter": {"_id": nsr_id},
8306 "path": db_update_entry,
8307 }
8308 step = ""
8309 try:
8310
8311 element_type = "NS"
8312 element_under_configuration = nsr_id
8313
8314 vnfr_id = None
8315 if db_vnfr:
8316 vnfr_id = db_vnfr["_id"]
8317 osm_config["osm"]["vnf_id"] = vnfr_id
8318
8319 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8320
8321 if vca_type == "native_charm":
8322 index_number = 0
8323 else:
8324 index_number = vdu_index or 0
8325
8326 if vnfr_id:
8327 element_type = "VNF"
8328 element_under_configuration = vnfr_id
8329 namespace += ".{}-{}".format(vnfr_id, index_number)
8330 if vdu_id:
8331 namespace += ".{}-{}".format(vdu_id, index_number)
8332 element_type = "VDU"
8333 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8334 osm_config["osm"]["vdu_id"] = vdu_id
8335 elif kdu_name:
8336 namespace += ".{}".format(kdu_name)
8337 element_type = "KDU"
8338 element_under_configuration = kdu_name
8339 osm_config["osm"]["kdu_name"] = kdu_name
8340
8341 # Get artifact path
8342 if base_folder["pkg-dir"]:
8343 artifact_path = "{}/{}/{}/{}".format(
8344 base_folder["folder"],
8345 base_folder["pkg-dir"],
8346 "charms"
8347 if vca_type
8348 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8349 else "helm-charts",
8350 vca_name,
8351 )
8352 else:
8353 artifact_path = "{}/Scripts/{}/{}/".format(
8354 base_folder["folder"],
8355 "charms"
8356 if vca_type
8357 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8358 else "helm-charts",
8359 vca_name,
8360 )
8361
8362 self.logger.debug("Artifact path > {}".format(artifact_path))
8363
8364 # get initial_config_primitive_list that applies to this element
8365 initial_config_primitive_list = config_descriptor.get(
8366 "initial-config-primitive"
8367 )
8368
8369 self.logger.debug(
8370 "Initial config primitive list > {}".format(
8371 initial_config_primitive_list
8372 )
8373 )
8374
8375 # add config if not present for NS charm
8376 ee_descriptor_id = ee_config_descriptor.get("id")
8377 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8378 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8379 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8380 )
8381
8382 self.logger.debug(
8383 "Initial config primitive list #2 > {}".format(
8384 initial_config_primitive_list
8385 )
8386 )
8387 # n2vc_redesign STEP 3.1
8388 # find old ee_id if exists
8389 ee_id = vca_deployed.get("ee_id")
8390
8391 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8392 # create or register execution environment in VCA. Only for native charms when healing
8393 if vca_type == "native_charm":
8394 step = "Waiting to VM being up and getting IP address"
8395 self.logger.debug(logging_text + step)
8396 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8397 logging_text,
8398 nsr_id,
8399 vnfr_id,
8400 vdu_id,
8401 vdu_index,
8402 user=None,
8403 pub_key=None,
8404 )
8405 credentials = {"hostname": rw_mgmt_ip}
8406 # get username
8407 username = deep_get(
8408 config_descriptor, ("config-access", "ssh-access", "default-user")
8409 )
8410 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8411 # merged. Meanwhile let's get username from initial-config-primitive
8412 if not username and initial_config_primitive_list:
8413 for config_primitive in initial_config_primitive_list:
8414 for param in config_primitive.get("parameter", ()):
8415 if param["name"] == "ssh-username":
8416 username = param["value"]
8417 break
8418 if not username:
8419 raise LcmException(
8420 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8421 "'config-access.ssh-access.default-user'"
8422 )
8423 credentials["username"] = username
8424
8425 # n2vc_redesign STEP 3.2
8426 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8427 self._write_configuration_status(
8428 nsr_id=nsr_id,
8429 vca_index=vca_index,
8430 status="REGISTERING",
8431 element_under_configuration=element_under_configuration,
8432 element_type=element_type,
8433 )
8434
8435 step = "register execution environment {}".format(credentials)
8436 self.logger.debug(logging_text + step)
8437 ee_id = await self.vca_map[vca_type].register_execution_environment(
8438 credentials=credentials,
8439 namespace=namespace,
8440 db_dict=db_dict,
8441 vca_id=vca_id,
8442 )
8443
8444 # update ee_id en db
8445 db_dict_ee_id = {
8446 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8447 }
8448 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8449
8450 # for compatibility with MON/POL modules, the need model and application name at database
8451 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8452 # Not sure if this need to be done when healing
8453 """
8454 ee_id_parts = ee_id.split(".")
8455 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8456 if len(ee_id_parts) >= 2:
8457 model_name = ee_id_parts[0]
8458 application_name = ee_id_parts[1]
8459 db_nsr_update[db_update_entry + "model"] = model_name
8460 db_nsr_update[db_update_entry + "application"] = application_name
8461 """
8462
8463 # n2vc_redesign STEP 3.3
8464 # Install configuration software. Only for native charms.
8465 step = "Install configuration Software"
8466
8467 self._write_configuration_status(
8468 nsr_id=nsr_id,
8469 vca_index=vca_index,
8470 status="INSTALLING SW",
8471 element_under_configuration=element_under_configuration,
8472 element_type=element_type,
8473 # other_update=db_nsr_update,
8474 other_update=None,
8475 )
8476
8477 # TODO check if already done
8478 self.logger.debug(logging_text + step)
8479 config = None
8480 if vca_type == "native_charm":
8481 config_primitive = next(
8482 (p for p in initial_config_primitive_list if p["name"] == "config"),
8483 None,
8484 )
8485 if config_primitive:
8486 config = self._map_primitive_params(
8487 config_primitive, {}, deploy_params
8488 )
8489 await self.vca_map[vca_type].install_configuration_sw(
8490 ee_id=ee_id,
8491 artifact_path=artifact_path,
8492 db_dict=db_dict,
8493 config=config,
8494 num_units=1,
8495 vca_id=vca_id,
8496 vca_type=vca_type,
8497 )
8498
8499 # write in db flag of configuration_sw already installed
8500 self.update_db_2(
8501 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8502 )
8503
8504 # Not sure if this need to be done when healing
8505 """
8506 # add relations for this VCA (wait for other peers related with this VCA)
8507 await self._add_vca_relations(
8508 logging_text=logging_text,
8509 nsr_id=nsr_id,
8510 vca_type=vca_type,
8511 vca_index=vca_index,
8512 )
8513 """
8514
8515 # if SSH access is required, then get execution environment SSH public
8516 # if native charm we have waited already to VM be UP
8517 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8518 pub_key = None
8519 user = None
8520 # self.logger.debug("get ssh key block")
8521 if deep_get(
8522 config_descriptor, ("config-access", "ssh-access", "required")
8523 ):
8524 # self.logger.debug("ssh key needed")
8525 # Needed to inject a ssh key
8526 user = deep_get(
8527 config_descriptor,
8528 ("config-access", "ssh-access", "default-user"),
8529 )
8530 step = "Install configuration Software, getting public ssh key"
8531 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8532 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8533 )
8534
8535 step = "Insert public key into VM user={} ssh_key={}".format(
8536 user, pub_key
8537 )
8538 else:
8539 # self.logger.debug("no need to get ssh key")
8540 step = "Waiting to VM being up and getting IP address"
8541 self.logger.debug(logging_text + step)
8542
8543 # n2vc_redesign STEP 5.1
8544 # wait for RO (ip-address) Insert pub_key into VM
8545 # IMPORTANT: We need do wait for RO to complete healing operation.
8546 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8547 if vnfr_id:
8548 if kdu_name:
8549 rw_mgmt_ip = await self.wait_kdu_up(
8550 logging_text, nsr_id, vnfr_id, kdu_name
8551 )
8552 else:
8553 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8554 logging_text,
8555 nsr_id,
8556 vnfr_id,
8557 vdu_id,
8558 vdu_index,
8559 user=user,
8560 pub_key=pub_key,
8561 )
8562 else:
8563 rw_mgmt_ip = None # This is for a NS configuration
8564
8565 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8566
8567 # store rw_mgmt_ip in deploy params for later replacement
8568 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8569
8570 # Day1 operations.
8571 # get run-day1 operation parameter
8572 runDay1 = deploy_params.get("run-day1", False)
8573 self.logger.debug(
8574 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8575 )
8576 if runDay1:
8577 # n2vc_redesign STEP 6 Execute initial config primitive
8578 step = "execute initial config primitive"
8579
8580 # wait for dependent primitives execution (NS -> VNF -> VDU)
8581 if initial_config_primitive_list:
8582 await self._wait_dependent_n2vc(
8583 nsr_id, vca_deployed_list, vca_index
8584 )
8585
8586 # stage, in function of element type: vdu, kdu, vnf or ns
8587 my_vca = vca_deployed_list[vca_index]
8588 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8589 # VDU or KDU
8590 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8591 elif my_vca.get("member-vnf-index"):
8592 # VNF
8593 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8594 else:
8595 # NS
8596 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8597
8598 self._write_configuration_status(
8599 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8600 )
8601
8602 self._write_op_status(op_id=nslcmop_id, stage=stage)
8603
8604 check_if_terminated_needed = True
8605 for initial_config_primitive in initial_config_primitive_list:
8606 # adding information on the vca_deployed if it is a NS execution environment
8607 if not vca_deployed["member-vnf-index"]:
8608 deploy_params["ns_config_info"] = json.dumps(
8609 self._get_ns_config_info(nsr_id)
8610 )
8611 # TODO check if already done
8612 primitive_params_ = self._map_primitive_params(
8613 initial_config_primitive, {}, deploy_params
8614 )
8615
8616 step = "execute primitive '{}' params '{}'".format(
8617 initial_config_primitive["name"], primitive_params_
8618 )
8619 self.logger.debug(logging_text + step)
8620 await self.vca_map[vca_type].exec_primitive(
8621 ee_id=ee_id,
8622 primitive_name=initial_config_primitive["name"],
8623 params_dict=primitive_params_,
8624 db_dict=db_dict,
8625 vca_id=vca_id,
8626 vca_type=vca_type,
8627 )
8628 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8629 if check_if_terminated_needed:
8630 if config_descriptor.get("terminate-config-primitive"):
8631 self.update_db_2(
8632 "nsrs",
8633 nsr_id,
8634 {db_update_entry + "needed_terminate": True},
8635 )
8636 check_if_terminated_needed = False
8637
8638 # TODO register in database that primitive is done
8639
8640 # STEP 7 Configure metrics
8641 # Not sure if this need to be done when healing
8642 """
8643 if vca_type == "helm" or vca_type == "helm-v3":
8644 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8645 ee_id=ee_id,
8646 artifact_path=artifact_path,
8647 ee_config_descriptor=ee_config_descriptor,
8648 vnfr_id=vnfr_id,
8649 nsr_id=nsr_id,
8650 target_ip=rw_mgmt_ip,
8651 )
8652 if prometheus_jobs:
8653 self.update_db_2(
8654 "nsrs",
8655 nsr_id,
8656 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8657 )
8658
8659 for job in prometheus_jobs:
8660 self.db.set_one(
8661 "prometheus_jobs",
8662 {"job_name": job["job_name"]},
8663 job,
8664 upsert=True,
8665 fail_on_empty=False,
8666 )
8667
8668 """
8669 step = "instantiated at VCA"
8670 self.logger.debug(logging_text + step)
8671
8672 self._write_configuration_status(
8673 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8674 )
8675
8676 except Exception as e: # TODO not use Exception but N2VC exception
8677 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8678 if not isinstance(
8679 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8680 ):
8681 self.logger.error(
8682 "Exception while {} : {}".format(step, e), exc_info=True
8683 )
8684 self._write_configuration_status(
8685 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8686 )
8687 raise LcmException("{} {}".format(step, e)) from e
8688
8689 async def _wait_heal_ro(
8690 self,
8691 nsr_id,
8692 timeout=600,
8693 ):
8694 start_time = time()
8695 while time() <= start_time + timeout:
8696 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8697 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8698 "operational-status"
8699 ]
8700 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8701 if operational_status_ro != "healing":
8702 break
8703 await asyncio.sleep(15, loop=self.loop)
8704 else: # timeout_ns_deploy
8705 raise NgRoException("Timeout waiting ns to deploy")
8706
8707 async def vertical_scale(self, nsr_id, nslcmop_id):
8708 """
8709 Vertical Scale the VDUs in a NS
8710
8711 :param: nsr_id: NS Instance ID
8712 :param: nslcmop_id: nslcmop ID of migrate
8713
8714 """
8715 # Try to lock HA task here
8716 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8717 if not task_is_locked_by_me:
8718 return
8719 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8720 self.logger.debug(logging_text + "Enter")
8721 # get all needed from database
8722 db_nslcmop = None
8723 db_nslcmop_update = {}
8724 nslcmop_operation_state = None
8725 db_nsr_update = {}
8726 target = {}
8727 exc = None
8728 # in case of error, indicates what part of scale was failed to put nsr at error status
8729 start_deploy = time()
8730
8731 try:
8732 # wait for any previous tasks in process
8733 step = "Waiting for previous operations to terminate"
8734 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8735
8736 self._write_ns_status(
8737 nsr_id=nsr_id,
8738 ns_state=None,
8739 current_operation="VerticalScale",
8740 current_operation_id=nslcmop_id,
8741 )
8742 step = "Getting nslcmop from database"
8743 self.logger.debug(
8744 step + " after having waited for previous tasks to be completed"
8745 )
8746 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8747 operationParams = db_nslcmop.get("operationParams")
8748 target = {}
8749 target.update(operationParams)
8750 desc = await self.RO.vertical_scale(nsr_id, target)
8751 self.logger.debug("RO return > {}".format(desc))
8752 action_id = desc["action_id"]
8753 await self._wait_ng_ro(
8754 nsr_id,
8755 action_id,
8756 nslcmop_id,
8757 start_deploy,
8758 self.timeout.verticalscale,
8759 operation="verticalscale",
8760 )
8761 except (ROclient.ROClientException, DbException, LcmException) as e:
8762 self.logger.error("Exit Exception {}".format(e))
8763 exc = e
8764 except asyncio.CancelledError:
8765 self.logger.error("Cancelled Exception while '{}'".format(step))
8766 exc = "Operation was cancelled"
8767 except Exception as e:
8768 exc = traceback.format_exc()
8769 self.logger.critical(
8770 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8771 )
8772 finally:
8773 self._write_ns_status(
8774 nsr_id=nsr_id,
8775 ns_state=None,
8776 current_operation="IDLE",
8777 current_operation_id=None,
8778 )
8779 if exc:
8780 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8781 nslcmop_operation_state = "FAILED"
8782 else:
8783 nslcmop_operation_state = "COMPLETED"
8784 db_nslcmop_update["detailed-status"] = "Done"
8785 db_nsr_update["detailed-status"] = "Done"
8786
8787 self._write_op_status(
8788 op_id=nslcmop_id,
8789 stage="",
8790 error_message="",
8791 operation_state=nslcmop_operation_state,
8792 other_update=db_nslcmop_update,
8793 )
8794 if nslcmop_operation_state:
8795 try:
8796 msg = {
8797 "nsr_id": nsr_id,
8798 "nslcmop_id": nslcmop_id,
8799 "operationState": nslcmop_operation_state,
8800 }
8801 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8802 except Exception as e:
8803 self.logger.error(
8804 logging_text + "kafka_write notification Exception {}".format(e)
8805 )
8806 self.logger.debug(logging_text + "Exit")
8807 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")