Feature 10975: Get vim-flavor-id from instantiation params
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.lcm_config import LcmCfg
38 from osm_lcm.data_utils.nsr import (
39 get_deployed_kdu,
40 get_deployed_vca,
41 get_deployed_vca_list,
42 get_nsd,
43 )
44 from osm_lcm.data_utils.vca import (
45 DeployedComponent,
46 DeployedK8sResource,
47 DeployedVCA,
48 EELevel,
49 Relation,
50 EERelation,
51 safe_get_ee_relation,
52 )
53 from osm_lcm.ng_ro import NgRoClient, NgRoException
54 from osm_lcm.lcm_utils import (
55 LcmException,
56 LcmExceptionNoMgmtIP,
57 LcmBase,
58 deep_get,
59 get_iterable,
60 populate_dict,
61 check_juju_bundle_existence,
62 get_charm_artifact_path,
63 get_ee_id_parts,
64 vld_to_ro_ip_profile,
65 )
66 from osm_lcm.data_utils.nsd import (
67 get_ns_configuration_relation_list,
68 get_vnf_profile,
69 get_vnf_profiles,
70 )
71 from osm_lcm.data_utils.vnfd import (
72 get_kdu,
73 get_kdu_services,
74 get_relation_list,
75 get_vdu_list,
76 get_vdu_profile,
77 get_ee_sorted_initial_config_primitive_list,
78 get_ee_sorted_terminate_config_primitive_list,
79 get_kdu_list,
80 get_virtual_link_profiles,
81 get_vdu,
82 get_configuration,
83 get_vdu_index,
84 get_scaling_aspect,
85 get_number_of_instances,
86 get_juju_ee_ref,
87 get_kdu_resource_profile,
88 find_software_version,
89 check_helm_ee_in_ns,
90 )
91 from osm_lcm.data_utils.list_utils import find_in_list
92 from osm_lcm.data_utils.vnfr import (
93 get_osm_params,
94 get_vdur_index,
95 get_kdur,
96 get_volumes_from_instantiation_params,
97 )
98 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
99 from osm_lcm.data_utils.database.vim_account import VimAccountDB
100 from n2vc.definitions import RelationEndpoint
101 from n2vc.k8s_helm_conn import K8sHelmConnector
102 from n2vc.k8s_helm3_conn import K8sHelm3Connector
103 from n2vc.k8s_juju_conn import K8sJujuConnector
104
105 from osm_common.dbbase import DbException
106 from osm_common.fsbase import FsException
107
108 from osm_lcm.data_utils.database.database import Database
109 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
110 from osm_lcm.data_utils.wim import (
111 get_sdn_ports,
112 get_target_wim_attrs,
113 select_feasible_wim_account,
114 )
115
116 from n2vc.n2vc_juju_conn import N2VCJujuConnector
117 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
118
119 from osm_lcm.lcm_helm_conn import LCMHelmConn
120 from osm_lcm.osm_config import OsmConfigBuilder
121 from osm_lcm.prometheus import parse_job
122
123 from copy import copy, deepcopy
124 from time import time
125 from uuid import uuid4
126
127 from random import randint
128
129 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
130
131
132 class NsLcm(LcmBase):
133 SUBOPERATION_STATUS_NOT_FOUND = -1
134 SUBOPERATION_STATUS_NEW = -2
135 SUBOPERATION_STATUS_SKIP = -3
136 task_name_deploy_vca = "Deploying VCA"
137
138 def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
139 """
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
142 :return: None
143 """
144 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
145
146 self.db = Database().instance.db
147 self.fs = Filesystem().instance.fs
148 self.loop = loop
149 self.lcm_tasks = lcm_tasks
150 self.timeout = config.timeout
151 self.ro_config = config.RO
152 self.vca_config = config.VCA
153
154 # create N2VC connector
155 self.n2vc = N2VCJujuConnector(
156 log=self.logger,
157 loop=self.loop,
158 on_update_db=self._on_update_n2vc_db,
159 fs=self.fs,
160 db=self.db,
161 )
162
163 self.conn_helm_ee = LCMHelmConn(
164 log=self.logger,
165 loop=self.loop,
166 vca_config=self.vca_config,
167 on_update_db=self._on_update_n2vc_db,
168 )
169
170 self.k8sclusterhelm2 = K8sHelmConnector(
171 kubectl_command=self.vca_config.kubectlpath,
172 helm_command=self.vca_config.helmpath,
173 log=self.logger,
174 on_update_db=None,
175 fs=self.fs,
176 db=self.db,
177 )
178
179 self.k8sclusterhelm3 = K8sHelm3Connector(
180 kubectl_command=self.vca_config.kubectlpath,
181 helm_command=self.vca_config.helm3path,
182 fs=self.fs,
183 log=self.logger,
184 db=self.db,
185 on_update_db=None,
186 )
187
188 self.k8sclusterjuju = K8sJujuConnector(
189 kubectl_command=self.vca_config.kubectlpath,
190 juju_command=self.vca_config.jujupath,
191 log=self.logger,
192 loop=self.loop,
193 on_update_db=self._on_update_k8s_db,
194 fs=self.fs,
195 db=self.db,
196 )
197
198 self.k8scluster_map = {
199 "helm-chart": self.k8sclusterhelm2,
200 "helm-chart-v3": self.k8sclusterhelm3,
201 "chart": self.k8sclusterhelm3,
202 "juju-bundle": self.k8sclusterjuju,
203 "juju": self.k8sclusterjuju,
204 }
205
206 self.vca_map = {
207 "lxc_proxy_charm": self.n2vc,
208 "native_charm": self.n2vc,
209 "k8s_proxy_charm": self.n2vc,
210 "helm": self.conn_helm_ee,
211 "helm-v3": self.conn_helm_ee,
212 }
213
214 # create RO client
215 self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
216
217 self.op_status_map = {
218 "instantiation": self.RO.status,
219 "termination": self.RO.status,
220 "migrate": self.RO.status,
221 "healing": self.RO.recreate_status,
222 "verticalscale": self.RO.status,
223 "start_stop_rebuild": self.RO.status,
224 }
225
226 @staticmethod
227 def increment_ip_mac(ip_mac, vm_index=1):
228 if not isinstance(ip_mac, str):
229 return ip_mac
230 try:
231 # try with ipv4 look for last dot
232 i = ip_mac.rfind(".")
233 if i > 0:
234 i += 1
235 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
236 # try with ipv6 or mac look for last colon. Operate in hex
237 i = ip_mac.rfind(":")
238 if i > 0:
239 i += 1
240 # format in hex, len can be 2 for mac or 4 for ipv6
241 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
242 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
243 )
244 except Exception:
245 pass
246 return None
247
248 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
250
251 try:
252 # TODO filter RO descriptor fields...
253
254 # write to database
255 db_dict = dict()
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict["deploymentStatus"] = ro_descriptor
258 self.update_db_2("nsrs", nsrs_id, db_dict)
259
260 except Exception as e:
261 self.logger.warn(
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
263 )
264
265 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
266 # remove last dot from path (if exists)
267 if path.endswith("."):
268 path = path[:-1]
269
270 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
271 # .format(table, filter, path, updated_data))
272 try:
273 nsr_id = filter.get("_id")
274
275 # read ns record from database
276 nsr = self.db.get_one(table="nsrs", q_filter=filter)
277 current_ns_status = nsr.get("nsState")
278
279 # get vca status for NS
280 status_dict = await self.n2vc.get_status(
281 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
282 )
283
284 # vcaStatus
285 db_dict = dict()
286 db_dict["vcaStatus"] = status_dict
287
288 # update configurationStatus for this VCA
289 try:
290 vca_index = int(path[path.rfind(".") + 1 :])
291
292 vca_list = deep_get(
293 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
294 )
295 vca_status = vca_list[vca_index].get("status")
296
297 configuration_status_list = nsr.get("configurationStatus")
298 config_status = configuration_status_list[vca_index].get("status")
299
300 if config_status == "BROKEN" and vca_status != "failed":
301 db_dict["configurationStatus"][vca_index] = "READY"
302 elif config_status != "BROKEN" and vca_status == "failed":
303 db_dict["configurationStatus"][vca_index] = "BROKEN"
304 except Exception as e:
305 # not update configurationStatus
306 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
307
308 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
309 # if nsState = 'DEGRADED' check if all is OK
310 is_degraded = False
311 if current_ns_status in ("READY", "DEGRADED"):
312 error_description = ""
313 # check machines
314 if status_dict.get("machines"):
315 for machine_id in status_dict.get("machines"):
316 machine = status_dict.get("machines").get(machine_id)
317 # check machine agent-status
318 if machine.get("agent-status"):
319 s = machine.get("agent-status").get("status")
320 if s != "started":
321 is_degraded = True
322 error_description += (
323 "machine {} agent-status={} ; ".format(
324 machine_id, s
325 )
326 )
327 # check machine instance status
328 if machine.get("instance-status"):
329 s = machine.get("instance-status").get("status")
330 if s != "running":
331 is_degraded = True
332 error_description += (
333 "machine {} instance-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check applications
338 if status_dict.get("applications"):
339 for app_id in status_dict.get("applications"):
340 app = status_dict.get("applications").get(app_id)
341 # check application status
342 if app.get("status"):
343 s = app.get("status").get("status")
344 if s != "active":
345 is_degraded = True
346 error_description += (
347 "application {} status={} ; ".format(app_id, s)
348 )
349
350 if error_description:
351 db_dict["errorDescription"] = error_description
352 if current_ns_status == "READY" and is_degraded:
353 db_dict["nsState"] = "DEGRADED"
354 if current_ns_status == "DEGRADED" and not is_degraded:
355 db_dict["nsState"] = "READY"
356
357 # write to database
358 self.update_db_2("nsrs", nsr_id, db_dict)
359
360 except (asyncio.CancelledError, asyncio.TimeoutError):
361 raise
362 except Exception as e:
363 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
364
365 async def _on_update_k8s_db(
366 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
367 ):
368 """
369 Updating vca status in NSR record
370 :param cluster_uuid: UUID of a k8s cluster
371 :param kdu_instance: The unique name of the KDU instance
372 :param filter: To get nsr_id
373 :cluster_type: The cluster type (juju, k8s)
374 :return: none
375 """
376
377 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
378 # .format(cluster_uuid, kdu_instance, filter))
379
380 nsr_id = filter.get("_id")
381 try:
382 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
383 cluster_uuid=cluster_uuid,
384 kdu_instance=kdu_instance,
385 yaml_format=False,
386 complete_status=True,
387 vca_id=vca_id,
388 )
389
390 # vcaStatus
391 db_dict = dict()
392 db_dict["vcaStatus"] = {nsr_id: vca_status}
393
394 self.logger.debug(
395 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
396 )
397
398 # write to database
399 self.update_db_2("nsrs", nsr_id, db_dict)
400 except (asyncio.CancelledError, asyncio.TimeoutError):
401 raise
402 except Exception as e:
403 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
404
405 @staticmethod
406 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
407 try:
408 env = Environment(
409 undefined=StrictUndefined,
410 autoescape=select_autoescape(default_for_string=True, default=True),
411 )
412 template = env.from_string(cloud_init_text)
413 return template.render(additional_params or {})
414 except UndefinedError as e:
415 raise LcmException(
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
419 )
420 except (TemplateError, TemplateNotFound) as e:
421 raise LcmException(
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
423 vnfd_id, vdu_id, e
424 )
425 )
426
427 def _get_vdu_cloud_init_content(self, vdu, vnfd):
428 cloud_init_content = cloud_init_file = None
429 try:
430 if vdu.get("cloud-init-file"):
431 base_folder = vnfd["_admin"]["storage"]
432 if base_folder["pkg-dir"]:
433 cloud_init_file = "{}/{}/cloud_init/{}".format(
434 base_folder["folder"],
435 base_folder["pkg-dir"],
436 vdu["cloud-init-file"],
437 )
438 else:
439 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
440 base_folder["folder"],
441 vdu["cloud-init-file"],
442 )
443 with self.fs.file_open(cloud_init_file, "r") as ci_file:
444 cloud_init_content = ci_file.read()
445 elif vdu.get("cloud-init"):
446 cloud_init_content = vdu["cloud-init"]
447
448 return cloud_init_content
449 except FsException as e:
450 raise LcmException(
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd["id"], vdu["id"], cloud_init_file, e
453 )
454 )
455
456 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
457 vdur = next(
458 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
459 )
460 additional_params = vdur.get("additionalParams")
461 return parse_yaml_strings(additional_params)
462
463 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
464 """
465 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
466 :param vnfd: input vnfd
467 :param new_id: overrides vnf id if provided
468 :param additionalParams: Instantiation params for VNFs provided
469 :param nsrId: Id of the NSR
470 :return: copy of vnfd
471 """
472 vnfd_RO = deepcopy(vnfd)
473 # remove unused by RO configuration, monitoring, scaling and internal keys
474 vnfd_RO.pop("_id", None)
475 vnfd_RO.pop("_admin", None)
476 vnfd_RO.pop("monitoring-param", None)
477 vnfd_RO.pop("scaling-group-descriptor", None)
478 vnfd_RO.pop("kdu", None)
479 vnfd_RO.pop("k8s-cluster", None)
480 if new_id:
481 vnfd_RO["id"] = new_id
482
483 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
484 for vdu in get_iterable(vnfd_RO, "vdu"):
485 vdu.pop("cloud-init-file", None)
486 vdu.pop("cloud-init", None)
487 return vnfd_RO
488
489 @staticmethod
490 def ip_profile_2_RO(ip_profile):
491 RO_ip_profile = deepcopy(ip_profile)
492 if "dns-server" in RO_ip_profile:
493 if isinstance(RO_ip_profile["dns-server"], list):
494 RO_ip_profile["dns-address"] = []
495 for ds in RO_ip_profile.pop("dns-server"):
496 RO_ip_profile["dns-address"].append(ds["address"])
497 else:
498 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
499 if RO_ip_profile.get("ip-version") == "ipv4":
500 RO_ip_profile["ip-version"] = "IPv4"
501 if RO_ip_profile.get("ip-version") == "ipv6":
502 RO_ip_profile["ip-version"] = "IPv6"
503 if "dhcp-params" in RO_ip_profile:
504 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
505 return RO_ip_profile
506
507 def _get_ro_vim_id_for_vim_account(self, vim_account):
508 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
509 if db_vim["_admin"]["operationalState"] != "ENABLED":
510 raise LcmException(
511 "VIM={} is not available. operationalState={}".format(
512 vim_account, db_vim["_admin"]["operationalState"]
513 )
514 )
515 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
516 return RO_vim_id
517
518 def get_ro_wim_id_for_wim_account(self, wim_account):
519 if isinstance(wim_account, str):
520 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
521 if db_wim["_admin"]["operationalState"] != "ENABLED":
522 raise LcmException(
523 "WIM={} is not available. operationalState={}".format(
524 wim_account, db_wim["_admin"]["operationalState"]
525 )
526 )
527 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
528 return RO_wim_id
529 else:
530 return wim_account
531
532 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
533 db_vdu_push_list = []
534 template_vdur = []
535 db_update = {"_admin.modified": time()}
536 if vdu_create:
537 for vdu_id, vdu_count in vdu_create.items():
538 vdur = next(
539 (
540 vdur
541 for vdur in reversed(db_vnfr["vdur"])
542 if vdur["vdu-id-ref"] == vdu_id
543 ),
544 None,
545 )
546 if not vdur:
547 # Read the template saved in the db:
548 self.logger.debug(
549 "No vdur in the database. Using the vdur-template to scale"
550 )
551 vdur_template = db_vnfr.get("vdur-template")
552 if not vdur_template:
553 raise LcmException(
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
555 vdu_id
556 )
557 )
558 vdur = vdur_template[0]
559 # Delete a template from the database after using it
560 self.db.set_one(
561 "vnfrs",
562 {"_id": db_vnfr["_id"]},
563 None,
564 pull={"vdur-template": {"_id": vdur["_id"]}},
565 )
566 for count in range(vdu_count):
567 vdur_copy = deepcopy(vdur)
568 vdur_copy["status"] = "BUILD"
569 vdur_copy["status-detailed"] = None
570 vdur_copy["ip-address"] = None
571 vdur_copy["_id"] = str(uuid4())
572 vdur_copy["count-index"] += count + 1
573 vdur_copy["id"] = "{}-{}".format(
574 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
575 )
576 vdur_copy.pop("vim_info", None)
577 for iface in vdur_copy["interfaces"]:
578 if iface.get("fixed-ip"):
579 iface["ip-address"] = self.increment_ip_mac(
580 iface["ip-address"], count + 1
581 )
582 else:
583 iface.pop("ip-address", None)
584 if iface.get("fixed-mac"):
585 iface["mac-address"] = self.increment_ip_mac(
586 iface["mac-address"], count + 1
587 )
588 else:
589 iface.pop("mac-address", None)
590 if db_vnfr["vdur"]:
591 iface.pop(
592 "mgmt_vnf", None
593 ) # only first vdu can be managment of vnf
594 db_vdu_push_list.append(vdur_copy)
595 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
596 if vdu_delete:
597 if len(db_vnfr["vdur"]) == 1:
598 # The scale will move to 0 instances
599 self.logger.debug(
600 "Scaling to 0 !, creating the template with the last vdur"
601 )
602 template_vdur = [db_vnfr["vdur"][0]]
603 for vdu_id, vdu_count in vdu_delete.items():
604 if mark_delete:
605 indexes_to_delete = [
606 iv[0]
607 for iv in enumerate(db_vnfr["vdur"])
608 if iv[1]["vdu-id-ref"] == vdu_id
609 ]
610 db_update.update(
611 {
612 "vdur.{}.status".format(i): "DELETING"
613 for i in indexes_to_delete[-vdu_count:]
614 }
615 )
616 else:
617 # it must be deleted one by one because common.db does not allow otherwise
618 vdus_to_delete = [
619 v
620 for v in reversed(db_vnfr["vdur"])
621 if v["vdu-id-ref"] == vdu_id
622 ]
623 for vdu in vdus_to_delete[:vdu_count]:
624 self.db.set_one(
625 "vnfrs",
626 {"_id": db_vnfr["_id"]},
627 None,
628 pull={"vdur": {"_id": vdu["_id"]}},
629 )
630 db_push = {}
631 if db_vdu_push_list:
632 db_push["vdur"] = db_vdu_push_list
633 if template_vdur:
634 db_push["vdur-template"] = template_vdur
635 if not db_push:
636 db_push = None
637 db_vnfr["vdur-template"] = template_vdur
638 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
639 # modify passed dictionary db_vnfr
640 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
641 db_vnfr["vdur"] = db_vnfr_["vdur"]
642
643 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
644 """
645 Updates database nsr with the RO info for the created vld
646 :param ns_update_nsr: dictionary to be filled with the updated info
647 :param db_nsr: content of db_nsr. This is also modified
648 :param nsr_desc_RO: nsr descriptor from RO
649 :return: Nothing, LcmException is raised on errors
650 """
651
652 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
653 for net_RO in get_iterable(nsr_desc_RO, "nets"):
654 if vld["id"] != net_RO.get("ns_net_osm_id"):
655 continue
656 vld["vim-id"] = net_RO.get("vim_net_id")
657 vld["name"] = net_RO.get("vim_name")
658 vld["status"] = net_RO.get("status")
659 vld["status-detailed"] = net_RO.get("error_msg")
660 ns_update_nsr["vld.{}".format(vld_index)] = vld
661 break
662 else:
663 raise LcmException(
664 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
665 )
666
667 def set_vnfr_at_error(self, db_vnfrs, error_text):
668 try:
669 for db_vnfr in db_vnfrs.values():
670 vnfr_update = {"status": "ERROR"}
671 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
672 if "status" not in vdur:
673 vdur["status"] = "ERROR"
674 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
675 if error_text:
676 vdur["status-detailed"] = str(error_text)
677 vnfr_update[
678 "vdur.{}.status-detailed".format(vdu_index)
679 ] = "ERROR"
680 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
681 except DbException as e:
682 self.logger.error("Cannot update vnf. {}".format(e))
683
684 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
685 """
686 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
687 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
688 :param nsr_desc_RO: nsr descriptor from RO
689 :return: Nothing, LcmException is raised on errors
690 """
691 for vnf_index, db_vnfr in db_vnfrs.items():
692 for vnf_RO in nsr_desc_RO["vnfs"]:
693 if vnf_RO["member_vnf_index"] != vnf_index:
694 continue
695 vnfr_update = {}
696 if vnf_RO.get("ip_address"):
697 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
698 "ip_address"
699 ].split(";")[0]
700 elif not db_vnfr.get("ip-address"):
701 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
702 raise LcmExceptionNoMgmtIP(
703 "ns member_vnf_index '{}' has no IP address".format(
704 vnf_index
705 )
706 )
707
708 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
709 vdur_RO_count_index = 0
710 if vdur.get("pdu-type"):
711 continue
712 for vdur_RO in get_iterable(vnf_RO, "vms"):
713 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
714 continue
715 if vdur["count-index"] != vdur_RO_count_index:
716 vdur_RO_count_index += 1
717 continue
718 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
719 if vdur_RO.get("ip_address"):
720 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
721 else:
722 vdur["ip-address"] = None
723 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
724 vdur["name"] = vdur_RO.get("vim_name")
725 vdur["status"] = vdur_RO.get("status")
726 vdur["status-detailed"] = vdur_RO.get("error_msg")
727 for ifacer in get_iterable(vdur, "interfaces"):
728 for interface_RO in get_iterable(vdur_RO, "interfaces"):
729 if ifacer["name"] == interface_RO.get("internal_name"):
730 ifacer["ip-address"] = interface_RO.get(
731 "ip_address"
732 )
733 ifacer["mac-address"] = interface_RO.get(
734 "mac_address"
735 )
736 break
737 else:
738 raise LcmException(
739 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
740 "from VIM info".format(
741 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
742 )
743 )
744 vnfr_update["vdur.{}".format(vdu_index)] = vdur
745 break
746 else:
747 raise LcmException(
748 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
749 "VIM info".format(
750 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
751 )
752 )
753
754 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
755 for net_RO in get_iterable(nsr_desc_RO, "nets"):
756 if vld["id"] != net_RO.get("vnf_net_osm_id"):
757 continue
758 vld["vim-id"] = net_RO.get("vim_net_id")
759 vld["name"] = net_RO.get("vim_name")
760 vld["status"] = net_RO.get("status")
761 vld["status-detailed"] = net_RO.get("error_msg")
762 vnfr_update["vld.{}".format(vld_index)] = vld
763 break
764 else:
765 raise LcmException(
766 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
767 vnf_index, vld["id"]
768 )
769 )
770
771 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
772 break
773
774 else:
775 raise LcmException(
776 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
777 vnf_index
778 )
779 )
780
781 def _get_ns_config_info(self, nsr_id):
782 """
783 Generates a mapping between vnf,vdu elements and the N2VC id
784 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
785 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
786 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
787 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
788 """
789 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
790 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
791 mapping = {}
792 ns_config_info = {"osm-config-mapping": mapping}
793 for vca in vca_deployed_list:
794 if not vca["member-vnf-index"]:
795 continue
796 if not vca["vdu_id"]:
797 mapping[vca["member-vnf-index"]] = vca["application"]
798 else:
799 mapping[
800 "{}.{}.{}".format(
801 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
802 )
803 ] = vca["application"]
804 return ns_config_info
805
806 async def _instantiate_ng_ro(
807 self,
808 logging_text,
809 nsr_id,
810 nsd,
811 db_nsr,
812 db_nslcmop,
813 db_vnfrs,
814 db_vnfds,
815 n2vc_key_list,
816 stage,
817 start_deploy,
818 timeout_ns_deploy,
819 ):
820 db_vims = {}
821
822 def get_vim_account(vim_account_id):
823 nonlocal db_vims
824 if vim_account_id in db_vims:
825 return db_vims[vim_account_id]
826 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
827 db_vims[vim_account_id] = db_vim
828 return db_vim
829
830 # modify target_vld info with instantiation parameters
831 def parse_vld_instantiation_params(
832 target_vim, target_vld, vld_params, target_sdn
833 ):
834 if vld_params.get("ip-profile"):
835 target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile(
836 vld_params["ip-profile"]
837 )
838 if vld_params.get("provider-network"):
839 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
840 "provider-network"
841 ]
842 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
843 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
844 "provider-network"
845 ]["sdn-ports"]
846
847 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
848 # if wim_account_id is specified in vld_params, validate if it is feasible.
849 wim_account_id, db_wim = select_feasible_wim_account(
850 db_nsr, db_vnfrs, target_vld, vld_params, self.logger
851 )
852
853 if wim_account_id:
854 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
855 self.logger.info("WIM selected: {:s}".format(str(wim_account_id)))
856 # update vld_params with correct WIM account Id
857 vld_params["wimAccountId"] = wim_account_id
858
859 target_wim = "wim:{}".format(wim_account_id)
860 target_wim_attrs = get_target_wim_attrs(nsr_id, target_vld, vld_params)
861 sdn_ports = get_sdn_ports(vld_params, db_wim)
862 if len(sdn_ports) > 0:
863 target_vld["vim_info"][target_wim] = target_wim_attrs
864 target_vld["vim_info"][target_wim]["sdn-ports"] = sdn_ports
865
866 self.logger.debug(
867 "Target VLD with WIM data: {:s}".format(str(target_vld))
868 )
869
870 for param in ("vim-network-name", "vim-network-id"):
871 if vld_params.get(param):
872 if isinstance(vld_params[param], dict):
873 for vim, vim_net in vld_params[param].items():
874 other_target_vim = "vim:" + vim
875 populate_dict(
876 target_vld["vim_info"],
877 (other_target_vim, param.replace("-", "_")),
878 vim_net,
879 )
880 else: # isinstance str
881 target_vld["vim_info"][target_vim][
882 param.replace("-", "_")
883 ] = vld_params[param]
884 if vld_params.get("common_id"):
885 target_vld["common_id"] = vld_params.get("common_id")
886
887 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
888 def update_ns_vld_target(target, ns_params):
889 for vnf_params in ns_params.get("vnf", ()):
890 if vnf_params.get("vimAccountId"):
891 target_vnf = next(
892 (
893 vnfr
894 for vnfr in db_vnfrs.values()
895 if vnf_params["member-vnf-index"]
896 == vnfr["member-vnf-index-ref"]
897 ),
898 None,
899 )
900 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
901 if not vdur:
902 return
903 for a_index, a_vld in enumerate(target["ns"]["vld"]):
904 target_vld = find_in_list(
905 get_iterable(vdur, "interfaces"),
906 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
907 )
908
909 vld_params = find_in_list(
910 get_iterable(ns_params, "vld"),
911 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
912 )
913 if target_vld:
914 if vnf_params.get("vimAccountId") not in a_vld.get(
915 "vim_info", {}
916 ):
917 target_vim_network_list = [
918 v for _, v in a_vld.get("vim_info").items()
919 ]
920 target_vim_network_name = next(
921 (
922 item.get("vim_network_name", "")
923 for item in target_vim_network_list
924 ),
925 "",
926 )
927
928 target["ns"]["vld"][a_index].get("vim_info").update(
929 {
930 "vim:{}".format(vnf_params["vimAccountId"]): {
931 "vim_network_name": target_vim_network_name,
932 }
933 }
934 )
935
936 if vld_params:
937 for param in ("vim-network-name", "vim-network-id"):
938 if vld_params.get(param) and isinstance(
939 vld_params[param], dict
940 ):
941 for vim, vim_net in vld_params[
942 param
943 ].items():
944 other_target_vim = "vim:" + vim
945 populate_dict(
946 target["ns"]["vld"][a_index].get(
947 "vim_info"
948 ),
949 (
950 other_target_vim,
951 param.replace("-", "_"),
952 ),
953 vim_net,
954 )
955
956 nslcmop_id = db_nslcmop["_id"]
957 target = {
958 "name": db_nsr["name"],
959 "ns": {"vld": []},
960 "vnf": [],
961 "image": deepcopy(db_nsr["image"]),
962 "flavor": deepcopy(db_nsr["flavor"]),
963 "action_id": nslcmop_id,
964 "cloud_init_content": {},
965 }
966 for image in target["image"]:
967 image["vim_info"] = {}
968 for flavor in target["flavor"]:
969 flavor["vim_info"] = {}
970 if db_nsr.get("affinity-or-anti-affinity-group"):
971 target["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr["affinity-or-anti-affinity-group"]
973 )
974 for affinity_or_anti_affinity_group in target[
975 "affinity-or-anti-affinity-group"
976 ]:
977 affinity_or_anti_affinity_group["vim_info"] = {}
978
979 if db_nslcmop.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate = self.db.get_list(
982 "nslcmops",
983 {
984 "nsInstanceId": db_nslcmop["nsInstanceId"],
985 "lcmOperationType": "instantiate",
986 },
987 )[-1]
988 ns_params = db_nslcmop_instantiate.get("operationParams")
989 else:
990 ns_params = db_nslcmop.get("operationParams")
991 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
992 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
993
994 cp2target = {}
995 for vld_index, vld in enumerate(db_nsr.get("vld")):
996 target_vim = "vim:{}".format(ns_params["vimAccountId"])
997 target_vld = {
998 "id": vld["id"],
999 "name": vld["name"],
1000 "mgmt-network": vld.get("mgmt-network", False),
1001 "type": vld.get("type"),
1002 "vim_info": {
1003 target_vim: {
1004 "vim_network_name": vld.get("vim-network-name"),
1005 "vim_account_id": ns_params["vimAccountId"],
1006 }
1007 },
1008 }
1009 # check if this network needs SDN assist
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(ns_params["vimAccountId"])
1012 if vim_config := db_vim.get("config"):
1013 if sdnc_id := vim_config.get("sdn-controller"):
1014 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 target_vld["vim_info"][target_sdn] = {
1017 "sdn": True,
1018 "target_vim": target_vim,
1019 "vlds": [sdn_vld],
1020 "type": vld.get("type"),
1021 }
1022
1023 nsd_vnf_profiles = get_vnf_profiles(nsd)
1024 for nsd_vnf_profile in nsd_vnf_profiles:
1025 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1026 if cp["virtual-link-profile-id"] == vld["id"]:
1027 cp2target[
1028 "member_vnf:{}.{}".format(
1029 cp["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1031 ],
1032 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1033 )
1034 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1035
1036 # check at nsd descriptor, if there is an ip-profile
1037 vld_params = {}
1038 nsd_vlp = find_in_list(
1039 get_virtual_link_profiles(nsd),
1040 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1041 == vld["id"],
1042 )
1043 if (
1044 nsd_vlp
1045 and nsd_vlp.get("virtual-link-protocol-data")
1046 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1047 ):
1048 vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][
1049 "l3-protocol-data"
1050 ]
1051
1052 # update vld_params with instantiation params
1053 vld_instantiation_params = find_in_list(
1054 get_iterable(ns_params, "vld"),
1055 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1056 )
1057 if vld_instantiation_params:
1058 vld_params.update(vld_instantiation_params)
1059 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1060 target["ns"]["vld"].append(target_vld)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target, ns_params)
1063
1064 for vnfr in db_vnfrs.values():
1065 vnfd = find_in_list(
1066 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1067 )
1068 vnf_params = find_in_list(
1069 get_iterable(ns_params, "vnf"),
1070 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1071 )
1072 target_vnf = deepcopy(vnfr)
1073 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1074 for vld in target_vnf.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp = find_in_list(
1077 vnfd.get("int-virtual-link-desc", ()),
1078 lambda cpd: cpd.get("id") == vld["id"],
1079 )
1080 if vnf_cp:
1081 ns_cp = "member_vnf:{}.{}".format(
1082 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1083 )
1084 if cp2target.get(ns_cp):
1085 vld["target"] = cp2target[ns_cp]
1086
1087 vld["vim_info"] = {
1088 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1089 }
1090 # check if this network needs SDN assist
1091 target_sdn = None
1092 if vld.get("pci-interfaces"):
1093 db_vim = get_vim_account(vnfr["vim-account-id"])
1094 sdnc_id = db_vim["config"].get("sdn-controller")
1095 if sdnc_id:
1096 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1097 target_sdn = "sdn:{}".format(sdnc_id)
1098 vld["vim_info"][target_sdn] = {
1099 "sdn": True,
1100 "target_vim": target_vim,
1101 "vlds": [sdn_vld],
1102 "type": vld.get("type"),
1103 }
1104
1105 # check at vnfd descriptor, if there is an ip-profile
1106 vld_params = {}
1107 vnfd_vlp = find_in_list(
1108 get_virtual_link_profiles(vnfd),
1109 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1110 )
1111 if (
1112 vnfd_vlp
1113 and vnfd_vlp.get("virtual-link-protocol-data")
1114 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1115 ):
1116 vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][
1117 "l3-protocol-data"
1118 ]
1119 # update vld_params with instantiation params
1120 if vnf_params:
1121 vld_instantiation_params = find_in_list(
1122 get_iterable(vnf_params, "internal-vld"),
1123 lambda i_vld: i_vld["name"] == vld["id"],
1124 )
1125 if vld_instantiation_params:
1126 vld_params.update(vld_instantiation_params)
1127 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1128
1129 vdur_list = []
1130 for vdur in target_vnf.get("vdur", ()):
1131 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1134
1135 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1136
1137 if ssh_keys_all:
1138 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1139 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1140 if (
1141 vdu_configuration
1142 and vdu_configuration.get("config-access")
1143 and vdu_configuration.get("config-access").get("ssh-access")
1144 ):
1145 vdur["ssh-keys"] = ssh_keys_all
1146 vdur["ssh-access-required"] = vdu_configuration[
1147 "config-access"
1148 ]["ssh-access"]["required"]
1149 elif (
1150 vnf_configuration
1151 and vnf_configuration.get("config-access")
1152 and vnf_configuration.get("config-access").get("ssh-access")
1153 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1154 ):
1155 vdur["ssh-keys"] = ssh_keys_all
1156 vdur["ssh-access-required"] = vnf_configuration[
1157 "config-access"
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation and find_in_list(
1160 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1161 ):
1162 vdur["ssh-keys"] = ssh_keys_instantiation
1163
1164 self.logger.debug("NS > vdur > {}".format(vdur))
1165
1166 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1167 # cloud-init
1168 if vdud.get("cloud-init-file"):
1169 vdur["cloud-init"] = "{}:file:{}".format(
1170 vnfd["_id"], vdud.get("cloud-init-file")
1171 )
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur["cloud-init"] not in target["cloud_init_content"]:
1174 base_folder = vnfd["_admin"]["storage"]
1175 if base_folder["pkg-dir"]:
1176 cloud_init_file = "{}/{}/cloud_init/{}".format(
1177 base_folder["folder"],
1178 base_folder["pkg-dir"],
1179 vdud.get("cloud-init-file"),
1180 )
1181 else:
1182 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1183 base_folder["folder"],
1184 vdud.get("cloud-init-file"),
1185 )
1186 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1187 target["cloud_init_content"][
1188 vdur["cloud-init"]
1189 ] = ci_file.read()
1190 elif vdud.get("cloud-init"):
1191 vdur["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1193 )
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1196 "cloud-init"
1197 ]
1198 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1199 deploy_params_vdu = self._format_additional_params(
1200 vdur.get("additionalParams") or {}
1201 )
1202 deploy_params_vdu["OSM"] = get_osm_params(
1203 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1204 )
1205 vdur["additionalParams"] = deploy_params_vdu
1206
1207 # flavor
1208 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1209 if target_vim not in ns_flavor["vim_info"]:
1210 ns_flavor["vim_info"][target_vim] = {}
1211
1212 # deal with images
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id = int(vdur["ns-image-id"])
1216 if vdur.get("alt-image-ids"):
1217 db_vim = get_vim_account(vnfr["vim-account-id"])
1218 vim_type = db_vim["vim_type"]
1219 for alt_image_id in vdur.get("alt-image-ids"):
1220 ns_alt_image = target["image"][int(alt_image_id)]
1221 if vim_type == ns_alt_image.get("vim-type"):
1222 # must use alternative image
1223 self.logger.debug(
1224 "use alternative image id: {}".format(alt_image_id)
1225 )
1226 ns_image_id = alt_image_id
1227 vdur["ns-image-id"] = ns_image_id
1228 break
1229 ns_image = target["image"][int(ns_image_id)]
1230 if target_vim not in ns_image["vim_info"]:
1231 ns_image["vim_info"][target_vim] = {}
1232
1233 # Affinity groups
1234 if vdur.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1236 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1237 if target_vim not in ns_ags["vim_info"]:
1238 ns_ags["vim_info"][target_vim] = {}
1239
1240 vdur["vim_info"] = {target_vim: {}}
1241 # instantiation parameters
1242 if vnf_params:
1243 vdu_instantiation_params = find_in_list(
1244 get_iterable(vnf_params, "vdu"),
1245 lambda i_vdu: i_vdu["id"] == vdud["id"],
1246 )
1247 if vdu_instantiation_params:
1248 # Parse the vdu_volumes from the instantiation params
1249 vdu_volumes = get_volumes_from_instantiation_params(
1250 vdu_instantiation_params, vdud
1251 )
1252 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1253 vdur["additionalParams"]["OSM"][
1254 "vim_flavor_id"
1255 ] = vdu_instantiation_params.get("vim-flavor-id")
1256 vdur_list.append(vdur)
1257 target_vnf["vdur"] = vdur_list
1258 target["vnf"].append(target_vnf)
1259
1260 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1261 desc = await self.RO.deploy(nsr_id, target)
1262 self.logger.debug("RO return > {}".format(desc))
1263 action_id = desc["action_id"]
1264 await self._wait_ng_ro(
1265 nsr_id,
1266 action_id,
1267 nslcmop_id,
1268 start_deploy,
1269 timeout_ns_deploy,
1270 stage,
1271 operation="instantiation",
1272 )
1273
1274 # Updating NSR
1275 db_nsr_update = {
1276 "_admin.deployed.RO.operational-status": "running",
1277 "detailed-status": " ".join(stage),
1278 }
1279 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1280 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1281 self._write_op_status(nslcmop_id, stage)
1282 self.logger.debug(
1283 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1284 )
1285 return
1286
1287 async def _wait_ng_ro(
1288 self,
1289 nsr_id,
1290 action_id,
1291 nslcmop_id=None,
1292 start_time=None,
1293 timeout=600,
1294 stage=None,
1295 operation=None,
1296 ):
1297 detailed_status_old = None
1298 db_nsr_update = {}
1299 start_time = start_time or time()
1300 while time() <= start_time + timeout:
1301 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1302 self.logger.debug("Wait NG RO > {}".format(desc_status))
1303 if desc_status["status"] == "FAILED":
1304 raise NgRoException(desc_status["details"])
1305 elif desc_status["status"] == "BUILD":
1306 if stage:
1307 stage[2] = "VIM: ({})".format(desc_status["details"])
1308 elif desc_status["status"] == "DONE":
1309 if stage:
1310 stage[2] = "Deployed at VIM"
1311 break
1312 else:
1313 assert False, "ROclient.check_ns_status returns unknown {}".format(
1314 desc_status["status"]
1315 )
1316 if stage and nslcmop_id and stage[2] != detailed_status_old:
1317 detailed_status_old = stage[2]
1318 db_nsr_update["detailed-status"] = " ".join(stage)
1319 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1320 self._write_op_status(nslcmop_id, stage)
1321 await asyncio.sleep(15, loop=self.loop)
1322 else: # timeout_ns_deploy
1323 raise NgRoException("Timeout waiting ns to deploy")
1324
1325 async def _terminate_ng_ro(
1326 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1327 ):
1328 db_nsr_update = {}
1329 failed_detail = []
1330 action_id = None
1331 start_deploy = time()
1332 try:
1333 target = {
1334 "ns": {"vld": []},
1335 "vnf": [],
1336 "image": [],
1337 "flavor": [],
1338 "action_id": nslcmop_id,
1339 }
1340 desc = await self.RO.deploy(nsr_id, target)
1341 action_id = desc["action_id"]
1342 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1343 self.logger.debug(
1344 logging_text
1345 + "ns terminate action at RO. action_id={}".format(action_id)
1346 )
1347
1348 # wait until done
1349 delete_timeout = 20 * 60 # 20 minutes
1350 await self._wait_ng_ro(
1351 nsr_id,
1352 action_id,
1353 nslcmop_id,
1354 start_deploy,
1355 delete_timeout,
1356 stage,
1357 operation="termination",
1358 )
1359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1360 # delete all nsr
1361 await self.RO.delete(nsr_id)
1362 except NgRoException as e:
1363 if e.http_code == 404: # not found
1364 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1365 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1366 self.logger.debug(
1367 logging_text + "RO_action_id={} already deleted".format(action_id)
1368 )
1369 elif e.http_code == 409: # conflict
1370 failed_detail.append("delete conflict: {}".format(e))
1371 self.logger.debug(
1372 logging_text
1373 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1374 )
1375 else:
1376 failed_detail.append("delete error: {}".format(e))
1377 self.logger.error(
1378 logging_text
1379 + "RO_action_id={} delete error: {}".format(action_id, e)
1380 )
1381 except Exception as e:
1382 failed_detail.append("delete error: {}".format(e))
1383 self.logger.error(
1384 logging_text + "RO_action_id={} delete error: {}".format(action_id, e)
1385 )
1386
1387 if failed_detail:
1388 stage[2] = "Error deleting from VIM"
1389 else:
1390 stage[2] = "Deleted from VIM"
1391 db_nsr_update["detailed-status"] = " ".join(stage)
1392 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1393 self._write_op_status(nslcmop_id, stage)
1394
1395 if failed_detail:
1396 raise LcmException("; ".join(failed_detail))
1397 return
1398
1399 async def instantiate_RO(
1400 self,
1401 logging_text,
1402 nsr_id,
1403 nsd,
1404 db_nsr,
1405 db_nslcmop,
1406 db_vnfrs,
1407 db_vnfds,
1408 n2vc_key_list,
1409 stage,
1410 ):
1411 """
1412 Instantiate at RO
1413 :param logging_text: preffix text to use at logging
1414 :param nsr_id: nsr identity
1415 :param nsd: database content of ns descriptor
1416 :param db_nsr: database content of ns record
1417 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1418 :param db_vnfrs:
1419 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1420 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1421 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1422 :return: None or exception
1423 """
1424 try:
1425 start_deploy = time()
1426 ns_params = db_nslcmop.get("operationParams")
1427 if ns_params and ns_params.get("timeout_ns_deploy"):
1428 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1429 else:
1430 timeout_ns_deploy = self.timeout.ns_deploy
1431
1432 # Check for and optionally request placement optimization. Database will be updated if placement activated
1433 stage[2] = "Waiting for Placement."
1434 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1435 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1436 for vnfr in db_vnfrs.values():
1437 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1438 break
1439 else:
1440 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1441
1442 return await self._instantiate_ng_ro(
1443 logging_text,
1444 nsr_id,
1445 nsd,
1446 db_nsr,
1447 db_nslcmop,
1448 db_vnfrs,
1449 db_vnfds,
1450 n2vc_key_list,
1451 stage,
1452 start_deploy,
1453 timeout_ns_deploy,
1454 )
1455 except Exception as e:
1456 stage[2] = "ERROR deploying at VIM"
1457 self.set_vnfr_at_error(db_vnfrs, str(e))
1458 self.logger.error(
1459 "Error deploying at VIM {}".format(e),
1460 exc_info=not isinstance(
1461 e,
1462 (
1463 ROclient.ROClientException,
1464 LcmException,
1465 DbException,
1466 NgRoException,
1467 ),
1468 ),
1469 )
1470 raise
1471
1472 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1473 """
1474 Wait for kdu to be up, get ip address
1475 :param logging_text: prefix use for logging
1476 :param nsr_id:
1477 :param vnfr_id:
1478 :param kdu_name:
1479 :return: IP address, K8s services
1480 """
1481
1482 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1483 nb_tries = 0
1484
1485 while nb_tries < 360:
1486 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1487 kdur = next(
1488 (
1489 x
1490 for x in get_iterable(db_vnfr, "kdur")
1491 if x.get("kdu-name") == kdu_name
1492 ),
1493 None,
1494 )
1495 if not kdur:
1496 raise LcmException(
1497 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1498 )
1499 if kdur.get("status"):
1500 if kdur["status"] in ("READY", "ENABLED"):
1501 return kdur.get("ip-address"), kdur.get("services")
1502 else:
1503 raise LcmException(
1504 "target KDU={} is in error state".format(kdu_name)
1505 )
1506
1507 await asyncio.sleep(10, loop=self.loop)
1508 nb_tries += 1
1509 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1510
1511 async def wait_vm_up_insert_key_ro(
1512 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1513 ):
1514 """
1515 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1516 :param logging_text: prefix use for logging
1517 :param nsr_id:
1518 :param vnfr_id:
1519 :param vdu_id:
1520 :param vdu_index:
1521 :param pub_key: public ssh key to inject, None to skip
1522 :param user: user to apply the public ssh key
1523 :return: IP address
1524 """
1525
1526 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1527 ip_address = None
1528 target_vdu_id = None
1529 ro_retries = 0
1530
1531 while True:
1532 ro_retries += 1
1533 if ro_retries >= 360: # 1 hour
1534 raise LcmException(
1535 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1536 )
1537
1538 await asyncio.sleep(10, loop=self.loop)
1539
1540 # get ip address
1541 if not target_vdu_id:
1542 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1543
1544 if not vdu_id: # for the VNF case
1545 if db_vnfr.get("status") == "ERROR":
1546 raise LcmException(
1547 "Cannot inject ssh-key because target VNF is in error state"
1548 )
1549 ip_address = db_vnfr.get("ip-address")
1550 if not ip_address:
1551 continue
1552 vdur = next(
1553 (
1554 x
1555 for x in get_iterable(db_vnfr, "vdur")
1556 if x.get("ip-address") == ip_address
1557 ),
1558 None,
1559 )
1560 else: # VDU case
1561 vdur = next(
1562 (
1563 x
1564 for x in get_iterable(db_vnfr, "vdur")
1565 if x.get("vdu-id-ref") == vdu_id
1566 and x.get("count-index") == vdu_index
1567 ),
1568 None,
1569 )
1570
1571 if (
1572 not vdur and len(db_vnfr.get("vdur", ())) == 1
1573 ): # If only one, this should be the target vdu
1574 vdur = db_vnfr["vdur"][0]
1575 if not vdur:
1576 raise LcmException(
1577 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1578 vnfr_id, vdu_id, vdu_index
1579 )
1580 )
1581 # New generation RO stores information at "vim_info"
1582 ng_ro_status = None
1583 target_vim = None
1584 if vdur.get("vim_info"):
1585 target_vim = next(
1586 t for t in vdur["vim_info"]
1587 ) # there should be only one key
1588 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1589 if (
1590 vdur.get("pdu-type")
1591 or vdur.get("status") == "ACTIVE"
1592 or ng_ro_status == "ACTIVE"
1593 ):
1594 ip_address = vdur.get("ip-address")
1595 if not ip_address:
1596 continue
1597 target_vdu_id = vdur["vdu-id-ref"]
1598 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1599 raise LcmException(
1600 "Cannot inject ssh-key because target VM is in error state"
1601 )
1602
1603 if not target_vdu_id:
1604 continue
1605
1606 # inject public key into machine
1607 if pub_key and user:
1608 self.logger.debug(logging_text + "Inserting RO key")
1609 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1610 if vdur.get("pdu-type"):
1611 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1612 return ip_address
1613 try:
1614 target = {
1615 "action": {
1616 "action": "inject_ssh_key",
1617 "key": pub_key,
1618 "user": user,
1619 },
1620 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1621 }
1622 desc = await self.RO.deploy(nsr_id, target)
1623 action_id = desc["action_id"]
1624 await self._wait_ng_ro(
1625 nsr_id, action_id, timeout=600, operation="instantiation"
1626 )
1627 break
1628 except NgRoException as e:
1629 raise LcmException(
1630 "Reaching max tries injecting key. Error: {}".format(e)
1631 )
1632 else:
1633 break
1634
1635 return ip_address
1636
1637 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1638 """
1639 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1640 """
1641 my_vca = vca_deployed_list[vca_index]
1642 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1643 # vdu or kdu: no dependencies
1644 return
1645 timeout = 300
1646 while timeout >= 0:
1647 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1648 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1649 configuration_status_list = db_nsr["configurationStatus"]
1650 for index, vca_deployed in enumerate(configuration_status_list):
1651 if index == vca_index:
1652 # myself
1653 continue
1654 if not my_vca.get("member-vnf-index") or (
1655 vca_deployed.get("member-vnf-index")
1656 == my_vca.get("member-vnf-index")
1657 ):
1658 internal_status = configuration_status_list[index].get("status")
1659 if internal_status == "READY":
1660 continue
1661 elif internal_status == "BROKEN":
1662 raise LcmException(
1663 "Configuration aborted because dependent charm/s has failed"
1664 )
1665 else:
1666 break
1667 else:
1668 # no dependencies, return
1669 return
1670 await asyncio.sleep(10)
1671 timeout -= 1
1672
1673 raise LcmException("Configuration aborted because dependent charm/s timeout")
1674
1675 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1676 vca_id = None
1677 if db_vnfr:
1678 vca_id = deep_get(db_vnfr, ("vca-id",))
1679 elif db_nsr:
1680 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1681 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1682 return vca_id
1683
1684 async def instantiate_N2VC(
1685 self,
1686 logging_text,
1687 vca_index,
1688 nsi_id,
1689 db_nsr,
1690 db_vnfr,
1691 vdu_id,
1692 kdu_name,
1693 vdu_index,
1694 kdu_index,
1695 config_descriptor,
1696 deploy_params,
1697 base_folder,
1698 nslcmop_id,
1699 stage,
1700 vca_type,
1701 vca_name,
1702 ee_config_descriptor,
1703 ):
1704 nsr_id = db_nsr["_id"]
1705 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1706 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1707 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1708 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1709 db_dict = {
1710 "collection": "nsrs",
1711 "filter": {"_id": nsr_id},
1712 "path": db_update_entry,
1713 }
1714 step = ""
1715 try:
1716 element_type = "NS"
1717 element_under_configuration = nsr_id
1718
1719 vnfr_id = None
1720 if db_vnfr:
1721 vnfr_id = db_vnfr["_id"]
1722 osm_config["osm"]["vnf_id"] = vnfr_id
1723
1724 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1725
1726 if vca_type == "native_charm":
1727 index_number = 0
1728 else:
1729 index_number = vdu_index or 0
1730
1731 if vnfr_id:
1732 element_type = "VNF"
1733 element_under_configuration = vnfr_id
1734 namespace += ".{}-{}".format(vnfr_id, index_number)
1735 if vdu_id:
1736 namespace += ".{}-{}".format(vdu_id, index_number)
1737 element_type = "VDU"
1738 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1739 osm_config["osm"]["vdu_id"] = vdu_id
1740 elif kdu_name:
1741 namespace += ".{}".format(kdu_name)
1742 element_type = "KDU"
1743 element_under_configuration = kdu_name
1744 osm_config["osm"]["kdu_name"] = kdu_name
1745
1746 # Get artifact path
1747 if base_folder["pkg-dir"]:
1748 artifact_path = "{}/{}/{}/{}".format(
1749 base_folder["folder"],
1750 base_folder["pkg-dir"],
1751 "charms"
1752 if vca_type
1753 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1754 else "helm-charts",
1755 vca_name,
1756 )
1757 else:
1758 artifact_path = "{}/Scripts/{}/{}/".format(
1759 base_folder["folder"],
1760 "charms"
1761 if vca_type
1762 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1763 else "helm-charts",
1764 vca_name,
1765 )
1766
1767 self.logger.debug("Artifact path > {}".format(artifact_path))
1768
1769 # get initial_config_primitive_list that applies to this element
1770 initial_config_primitive_list = config_descriptor.get(
1771 "initial-config-primitive"
1772 )
1773
1774 self.logger.debug(
1775 "Initial config primitive list > {}".format(
1776 initial_config_primitive_list
1777 )
1778 )
1779
1780 # add config if not present for NS charm
1781 ee_descriptor_id = ee_config_descriptor.get("id")
1782 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1783 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1784 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1785 )
1786
1787 self.logger.debug(
1788 "Initial config primitive list #2 > {}".format(
1789 initial_config_primitive_list
1790 )
1791 )
1792 # n2vc_redesign STEP 3.1
1793 # find old ee_id if exists
1794 ee_id = vca_deployed.get("ee_id")
1795
1796 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1797 # create or register execution environment in VCA
1798 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1799 self._write_configuration_status(
1800 nsr_id=nsr_id,
1801 vca_index=vca_index,
1802 status="CREATING",
1803 element_under_configuration=element_under_configuration,
1804 element_type=element_type,
1805 )
1806
1807 step = "create execution environment"
1808 self.logger.debug(logging_text + step)
1809
1810 ee_id = None
1811 credentials = None
1812 if vca_type == "k8s_proxy_charm":
1813 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1814 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1815 namespace=namespace,
1816 artifact_path=artifact_path,
1817 db_dict=db_dict,
1818 vca_id=vca_id,
1819 )
1820 elif vca_type == "helm" or vca_type == "helm-v3":
1821 ee_id, credentials = await self.vca_map[
1822 vca_type
1823 ].create_execution_environment(
1824 namespace=namespace,
1825 reuse_ee_id=ee_id,
1826 db_dict=db_dict,
1827 config=osm_config,
1828 artifact_path=artifact_path,
1829 chart_model=vca_name,
1830 vca_type=vca_type,
1831 )
1832 else:
1833 ee_id, credentials = await self.vca_map[
1834 vca_type
1835 ].create_execution_environment(
1836 namespace=namespace,
1837 reuse_ee_id=ee_id,
1838 db_dict=db_dict,
1839 vca_id=vca_id,
1840 )
1841
1842 elif vca_type == "native_charm":
1843 step = "Waiting to VM being up and getting IP address"
1844 self.logger.debug(logging_text + step)
1845 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1846 logging_text,
1847 nsr_id,
1848 vnfr_id,
1849 vdu_id,
1850 vdu_index,
1851 user=None,
1852 pub_key=None,
1853 )
1854 credentials = {"hostname": rw_mgmt_ip}
1855 # get username
1856 username = deep_get(
1857 config_descriptor, ("config-access", "ssh-access", "default-user")
1858 )
1859 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1860 # merged. Meanwhile let's get username from initial-config-primitive
1861 if not username and initial_config_primitive_list:
1862 for config_primitive in initial_config_primitive_list:
1863 for param in config_primitive.get("parameter", ()):
1864 if param["name"] == "ssh-username":
1865 username = param["value"]
1866 break
1867 if not username:
1868 raise LcmException(
1869 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1870 "'config-access.ssh-access.default-user'"
1871 )
1872 credentials["username"] = username
1873 # n2vc_redesign STEP 3.2
1874
1875 self._write_configuration_status(
1876 nsr_id=nsr_id,
1877 vca_index=vca_index,
1878 status="REGISTERING",
1879 element_under_configuration=element_under_configuration,
1880 element_type=element_type,
1881 )
1882
1883 step = "register execution environment {}".format(credentials)
1884 self.logger.debug(logging_text + step)
1885 ee_id = await self.vca_map[vca_type].register_execution_environment(
1886 credentials=credentials,
1887 namespace=namespace,
1888 db_dict=db_dict,
1889 vca_id=vca_id,
1890 )
1891
1892 # for compatibility with MON/POL modules, the need model and application name at database
1893 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1894 ee_id_parts = ee_id.split(".")
1895 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1896 if len(ee_id_parts) >= 2:
1897 model_name = ee_id_parts[0]
1898 application_name = ee_id_parts[1]
1899 db_nsr_update[db_update_entry + "model"] = model_name
1900 db_nsr_update[db_update_entry + "application"] = application_name
1901
1902 # n2vc_redesign STEP 3.3
1903 step = "Install configuration Software"
1904
1905 self._write_configuration_status(
1906 nsr_id=nsr_id,
1907 vca_index=vca_index,
1908 status="INSTALLING SW",
1909 element_under_configuration=element_under_configuration,
1910 element_type=element_type,
1911 other_update=db_nsr_update,
1912 )
1913
1914 # TODO check if already done
1915 self.logger.debug(logging_text + step)
1916 config = None
1917 if vca_type == "native_charm":
1918 config_primitive = next(
1919 (p for p in initial_config_primitive_list if p["name"] == "config"),
1920 None,
1921 )
1922 if config_primitive:
1923 config = self._map_primitive_params(
1924 config_primitive, {}, deploy_params
1925 )
1926 num_units = 1
1927 if vca_type == "lxc_proxy_charm":
1928 if element_type == "NS":
1929 num_units = db_nsr.get("config-units") or 1
1930 elif element_type == "VNF":
1931 num_units = db_vnfr.get("config-units") or 1
1932 elif element_type == "VDU":
1933 for v in db_vnfr["vdur"]:
1934 if vdu_id == v["vdu-id-ref"]:
1935 num_units = v.get("config-units") or 1
1936 break
1937 if vca_type != "k8s_proxy_charm":
1938 await self.vca_map[vca_type].install_configuration_sw(
1939 ee_id=ee_id,
1940 artifact_path=artifact_path,
1941 db_dict=db_dict,
1942 config=config,
1943 num_units=num_units,
1944 vca_id=vca_id,
1945 vca_type=vca_type,
1946 )
1947
1948 # write in db flag of configuration_sw already installed
1949 self.update_db_2(
1950 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
1951 )
1952
1953 # add relations for this VCA (wait for other peers related with this VCA)
1954 is_relation_added = await self._add_vca_relations(
1955 logging_text=logging_text,
1956 nsr_id=nsr_id,
1957 vca_type=vca_type,
1958 vca_index=vca_index,
1959 )
1960
1961 if not is_relation_added:
1962 raise LcmException("Relations could not be added to VCA.")
1963
1964 # if SSH access is required, then get execution environment SSH public
1965 # if native charm we have waited already to VM be UP
1966 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1967 pub_key = None
1968 user = None
1969 # self.logger.debug("get ssh key block")
1970 if deep_get(
1971 config_descriptor, ("config-access", "ssh-access", "required")
1972 ):
1973 # self.logger.debug("ssh key needed")
1974 # Needed to inject a ssh key
1975 user = deep_get(
1976 config_descriptor,
1977 ("config-access", "ssh-access", "default-user"),
1978 )
1979 step = "Install configuration Software, getting public ssh key"
1980 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
1981 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
1982 )
1983
1984 step = "Insert public key into VM user={} ssh_key={}".format(
1985 user, pub_key
1986 )
1987 else:
1988 # self.logger.debug("no need to get ssh key")
1989 step = "Waiting to VM being up and getting IP address"
1990 self.logger.debug(logging_text + step)
1991
1992 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1993 rw_mgmt_ip = None
1994
1995 # n2vc_redesign STEP 5.1
1996 # wait for RO (ip-address) Insert pub_key into VM
1997 if vnfr_id:
1998 if kdu_name:
1999 rw_mgmt_ip, services = await self.wait_kdu_up(
2000 logging_text, nsr_id, vnfr_id, kdu_name
2001 )
2002 vnfd = self.db.get_one(
2003 "vnfds_revisions",
2004 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2005 )
2006 kdu = get_kdu(vnfd, kdu_name)
2007 kdu_services = [
2008 service["name"] for service in get_kdu_services(kdu)
2009 ]
2010 exposed_services = []
2011 for service in services:
2012 if any(s in service["name"] for s in kdu_services):
2013 exposed_services.append(service)
2014 await self.vca_map[vca_type].exec_primitive(
2015 ee_id=ee_id,
2016 primitive_name="config",
2017 params_dict={
2018 "osm-config": json.dumps(
2019 OsmConfigBuilder(
2020 k8s={"services": exposed_services}
2021 ).build()
2022 )
2023 },
2024 vca_id=vca_id,
2025 )
2026
2027 # This verification is needed in order to avoid trying to add a public key
2028 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2029 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2030 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2031 # or it is a KNF)
2032 elif db_vnfr.get("vdur"):
2033 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2034 logging_text,
2035 nsr_id,
2036 vnfr_id,
2037 vdu_id,
2038 vdu_index,
2039 user=user,
2040 pub_key=pub_key,
2041 )
2042
2043 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2044
2045 # store rw_mgmt_ip in deploy params for later replacement
2046 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2047
2048 # n2vc_redesign STEP 6 Execute initial config primitive
2049 step = "execute initial config primitive"
2050
2051 # wait for dependent primitives execution (NS -> VNF -> VDU)
2052 if initial_config_primitive_list:
2053 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2054
2055 # stage, in function of element type: vdu, kdu, vnf or ns
2056 my_vca = vca_deployed_list[vca_index]
2057 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2058 # VDU or KDU
2059 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2060 elif my_vca.get("member-vnf-index"):
2061 # VNF
2062 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2063 else:
2064 # NS
2065 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2066
2067 self._write_configuration_status(
2068 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2069 )
2070
2071 self._write_op_status(op_id=nslcmop_id, stage=stage)
2072
2073 check_if_terminated_needed = True
2074 for initial_config_primitive in initial_config_primitive_list:
2075 # adding information on the vca_deployed if it is a NS execution environment
2076 if not vca_deployed["member-vnf-index"]:
2077 deploy_params["ns_config_info"] = json.dumps(
2078 self._get_ns_config_info(nsr_id)
2079 )
2080 # TODO check if already done
2081 primitive_params_ = self._map_primitive_params(
2082 initial_config_primitive, {}, deploy_params
2083 )
2084
2085 step = "execute primitive '{}' params '{}'".format(
2086 initial_config_primitive["name"], primitive_params_
2087 )
2088 self.logger.debug(logging_text + step)
2089 await self.vca_map[vca_type].exec_primitive(
2090 ee_id=ee_id,
2091 primitive_name=initial_config_primitive["name"],
2092 params_dict=primitive_params_,
2093 db_dict=db_dict,
2094 vca_id=vca_id,
2095 vca_type=vca_type,
2096 )
2097 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2098 if check_if_terminated_needed:
2099 if config_descriptor.get("terminate-config-primitive"):
2100 self.update_db_2(
2101 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2102 )
2103 check_if_terminated_needed = False
2104
2105 # TODO register in database that primitive is done
2106
2107 # STEP 7 Configure metrics
2108 if vca_type == "helm" or vca_type == "helm-v3":
2109 # TODO: review for those cases where the helm chart is a reference and
2110 # is not part of the NF package
2111 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2112 ee_id=ee_id,
2113 artifact_path=artifact_path,
2114 ee_config_descriptor=ee_config_descriptor,
2115 vnfr_id=vnfr_id,
2116 nsr_id=nsr_id,
2117 target_ip=rw_mgmt_ip,
2118 element_type=element_type,
2119 vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""),
2120 vdu_id=vdu_id,
2121 vdu_index=vdu_index,
2122 kdu_name=kdu_name,
2123 kdu_index=kdu_index,
2124 )
2125 if prometheus_jobs:
2126 self.update_db_2(
2127 "nsrs",
2128 nsr_id,
2129 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2130 )
2131
2132 for job in prometheus_jobs:
2133 self.db.set_one(
2134 "prometheus_jobs",
2135 {"job_name": job["job_name"]},
2136 job,
2137 upsert=True,
2138 fail_on_empty=False,
2139 )
2140
2141 step = "instantiated at VCA"
2142 self.logger.debug(logging_text + step)
2143
2144 self._write_configuration_status(
2145 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2146 )
2147
2148 except Exception as e: # TODO not use Exception but N2VC exception
2149 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2150 if not isinstance(
2151 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2152 ):
2153 self.logger.error(
2154 "Exception while {} : {}".format(step, e), exc_info=True
2155 )
2156 self._write_configuration_status(
2157 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2158 )
2159 raise LcmException("{}. {}".format(step, e)) from e
2160
2161 def _write_ns_status(
2162 self,
2163 nsr_id: str,
2164 ns_state: str,
2165 current_operation: str,
2166 current_operation_id: str,
2167 error_description: str = None,
2168 error_detail: str = None,
2169 other_update: dict = None,
2170 ):
2171 """
2172 Update db_nsr fields.
2173 :param nsr_id:
2174 :param ns_state:
2175 :param current_operation:
2176 :param current_operation_id:
2177 :param error_description:
2178 :param error_detail:
2179 :param other_update: Other required changes at database if provided, will be cleared
2180 :return:
2181 """
2182 try:
2183 db_dict = other_update or {}
2184 db_dict[
2185 "_admin.nslcmop"
2186 ] = current_operation_id # for backward compatibility
2187 db_dict["_admin.current-operation"] = current_operation_id
2188 db_dict["_admin.operation-type"] = (
2189 current_operation if current_operation != "IDLE" else None
2190 )
2191 db_dict["currentOperation"] = current_operation
2192 db_dict["currentOperationID"] = current_operation_id
2193 db_dict["errorDescription"] = error_description
2194 db_dict["errorDetail"] = error_detail
2195
2196 if ns_state:
2197 db_dict["nsState"] = ns_state
2198 self.update_db_2("nsrs", nsr_id, db_dict)
2199 except DbException as e:
2200 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2201
2202 def _write_op_status(
2203 self,
2204 op_id: str,
2205 stage: list = None,
2206 error_message: str = None,
2207 queuePosition: int = 0,
2208 operation_state: str = None,
2209 other_update: dict = None,
2210 ):
2211 try:
2212 db_dict = other_update or {}
2213 db_dict["queuePosition"] = queuePosition
2214 if isinstance(stage, list):
2215 db_dict["stage"] = stage[0]
2216 db_dict["detailed-status"] = " ".join(stage)
2217 elif stage is not None:
2218 db_dict["stage"] = str(stage)
2219
2220 if error_message is not None:
2221 db_dict["errorMessage"] = error_message
2222 if operation_state is not None:
2223 db_dict["operationState"] = operation_state
2224 db_dict["statusEnteredTime"] = time()
2225 self.update_db_2("nslcmops", op_id, db_dict)
2226 except DbException as e:
2227 self.logger.warn(
2228 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2229 )
2230
2231 def _write_all_config_status(self, db_nsr: dict, status: str):
2232 try:
2233 nsr_id = db_nsr["_id"]
2234 # configurationStatus
2235 config_status = db_nsr.get("configurationStatus")
2236 if config_status:
2237 db_nsr_update = {
2238 "configurationStatus.{}.status".format(index): status
2239 for index, v in enumerate(config_status)
2240 if v
2241 }
2242 # update status
2243 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2244
2245 except DbException as e:
2246 self.logger.warn(
2247 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2248 )
2249
2250 def _write_configuration_status(
2251 self,
2252 nsr_id: str,
2253 vca_index: int,
2254 status: str = None,
2255 element_under_configuration: str = None,
2256 element_type: str = None,
2257 other_update: dict = None,
2258 ):
2259 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2260 # .format(vca_index, status))
2261
2262 try:
2263 db_path = "configurationStatus.{}.".format(vca_index)
2264 db_dict = other_update or {}
2265 if status:
2266 db_dict[db_path + "status"] = status
2267 if element_under_configuration:
2268 db_dict[
2269 db_path + "elementUnderConfiguration"
2270 ] = element_under_configuration
2271 if element_type:
2272 db_dict[db_path + "elementType"] = element_type
2273 self.update_db_2("nsrs", nsr_id, db_dict)
2274 except DbException as e:
2275 self.logger.warn(
2276 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2277 status, nsr_id, vca_index, e
2278 )
2279 )
2280
2281 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2282 """
2283 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2284 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2285 Database is used because the result can be obtained from a different LCM worker in case of HA.
2286 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2287 :param db_nslcmop: database content of nslcmop
2288 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2289 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2290 computed 'vim-account-id'
2291 """
2292 modified = False
2293 nslcmop_id = db_nslcmop["_id"]
2294 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2295 if placement_engine == "PLA":
2296 self.logger.debug(
2297 logging_text + "Invoke and wait for placement optimization"
2298 )
2299 await self.msg.aiowrite(
2300 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2301 )
2302 db_poll_interval = 5
2303 wait = db_poll_interval * 10
2304 pla_result = None
2305 while not pla_result and wait >= 0:
2306 await asyncio.sleep(db_poll_interval)
2307 wait -= db_poll_interval
2308 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2309 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2310
2311 if not pla_result:
2312 raise LcmException(
2313 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2314 )
2315
2316 for pla_vnf in pla_result["vnf"]:
2317 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2318 if not pla_vnf.get("vimAccountId") or not vnfr:
2319 continue
2320 modified = True
2321 self.db.set_one(
2322 "vnfrs",
2323 {"_id": vnfr["_id"]},
2324 {"vim-account-id": pla_vnf["vimAccountId"]},
2325 )
2326 # Modifies db_vnfrs
2327 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2328 return modified
2329
2330 def update_nsrs_with_pla_result(self, params):
2331 try:
2332 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2333 self.update_db_2(
2334 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2335 )
2336 except Exception as e:
2337 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2338
2339 async def instantiate(self, nsr_id, nslcmop_id):
2340 """
2341
2342 :param nsr_id: ns instance to deploy
2343 :param nslcmop_id: operation to run
2344 :return:
2345 """
2346
2347 # Try to lock HA task here
2348 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2349 if not task_is_locked_by_me:
2350 self.logger.debug(
2351 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2352 )
2353 return
2354
2355 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2356 self.logger.debug(logging_text + "Enter")
2357
2358 # get all needed from database
2359
2360 # database nsrs record
2361 db_nsr = None
2362
2363 # database nslcmops record
2364 db_nslcmop = None
2365
2366 # update operation on nsrs
2367 db_nsr_update = {}
2368 # update operation on nslcmops
2369 db_nslcmop_update = {}
2370
2371 timeout_ns_deploy = self.timeout.ns_deploy
2372
2373 nslcmop_operation_state = None
2374 db_vnfrs = {} # vnf's info indexed by member-index
2375 # n2vc_info = {}
2376 tasks_dict_info = {} # from task to info text
2377 exc = None
2378 error_list = []
2379 stage = [
2380 "Stage 1/5: preparation of the environment.",
2381 "Waiting for previous operations to terminate.",
2382 "",
2383 ]
2384 # ^ stage, step, VIM progress
2385 try:
2386 # wait for any previous tasks in process
2387 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2388
2389 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2390 stage[1] = "Reading from database."
2391 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2392 db_nsr_update["detailed-status"] = "creating"
2393 db_nsr_update["operational-status"] = "init"
2394 self._write_ns_status(
2395 nsr_id=nsr_id,
2396 ns_state="BUILDING",
2397 current_operation="INSTANTIATING",
2398 current_operation_id=nslcmop_id,
2399 other_update=db_nsr_update,
2400 )
2401 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2402
2403 # read from db: operation
2404 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2405 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2406 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2407 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2408 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2409 )
2410 ns_params = db_nslcmop.get("operationParams")
2411 if ns_params and ns_params.get("timeout_ns_deploy"):
2412 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2413
2414 # read from db: ns
2415 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2416 self.logger.debug(logging_text + stage[1])
2417 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2418 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2419 self.logger.debug(logging_text + stage[1])
2420 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2421 self.fs.sync(db_nsr["nsd-id"])
2422 db_nsr["nsd"] = nsd
2423 # nsr_name = db_nsr["name"] # TODO short-name??
2424
2425 # read from db: vnf's of this ns
2426 stage[1] = "Getting vnfrs from db."
2427 self.logger.debug(logging_text + stage[1])
2428 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2429
2430 # read from db: vnfd's for every vnf
2431 db_vnfds = [] # every vnfd data
2432
2433 # for each vnf in ns, read vnfd
2434 for vnfr in db_vnfrs_list:
2435 if vnfr.get("kdur"):
2436 kdur_list = []
2437 for kdur in vnfr["kdur"]:
2438 if kdur.get("additionalParams"):
2439 kdur["additionalParams"] = json.loads(
2440 kdur["additionalParams"]
2441 )
2442 kdur_list.append(kdur)
2443 vnfr["kdur"] = kdur_list
2444
2445 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2446 vnfd_id = vnfr["vnfd-id"]
2447 vnfd_ref = vnfr["vnfd-ref"]
2448 self.fs.sync(vnfd_id)
2449
2450 # if we haven't this vnfd, read it from db
2451 if vnfd_id not in db_vnfds:
2452 # read from db
2453 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2454 vnfd_id, vnfd_ref
2455 )
2456 self.logger.debug(logging_text + stage[1])
2457 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2458
2459 # store vnfd
2460 db_vnfds.append(vnfd)
2461
2462 # Get or generates the _admin.deployed.VCA list
2463 vca_deployed_list = None
2464 if db_nsr["_admin"].get("deployed"):
2465 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2466 if vca_deployed_list is None:
2467 vca_deployed_list = []
2468 configuration_status_list = []
2469 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2470 db_nsr_update["configurationStatus"] = configuration_status_list
2471 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2472 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2473 elif isinstance(vca_deployed_list, dict):
2474 # maintain backward compatibility. Change a dict to list at database
2475 vca_deployed_list = list(vca_deployed_list.values())
2476 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2477 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2478
2479 if not isinstance(
2480 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2481 ):
2482 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2483 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2484
2485 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2486 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2487 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2488 self.db.set_list(
2489 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2490 )
2491
2492 # n2vc_redesign STEP 2 Deploy Network Scenario
2493 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2494 self._write_op_status(op_id=nslcmop_id, stage=stage)
2495
2496 stage[1] = "Deploying KDUs."
2497 # self.logger.debug(logging_text + "Before deploy_kdus")
2498 # Call to deploy_kdus in case exists the "vdu:kdu" param
2499 await self.deploy_kdus(
2500 logging_text=logging_text,
2501 nsr_id=nsr_id,
2502 nslcmop_id=nslcmop_id,
2503 db_vnfrs=db_vnfrs,
2504 db_vnfds=db_vnfds,
2505 task_instantiation_info=tasks_dict_info,
2506 )
2507
2508 stage[1] = "Getting VCA public key."
2509 # n2vc_redesign STEP 1 Get VCA public ssh-key
2510 # feature 1429. Add n2vc public key to needed VMs
2511 n2vc_key = self.n2vc.get_public_key()
2512 n2vc_key_list = [n2vc_key]
2513 if self.vca_config.public_key:
2514 n2vc_key_list.append(self.vca_config.public_key)
2515
2516 stage[1] = "Deploying NS at VIM."
2517 task_ro = asyncio.ensure_future(
2518 self.instantiate_RO(
2519 logging_text=logging_text,
2520 nsr_id=nsr_id,
2521 nsd=nsd,
2522 db_nsr=db_nsr,
2523 db_nslcmop=db_nslcmop,
2524 db_vnfrs=db_vnfrs,
2525 db_vnfds=db_vnfds,
2526 n2vc_key_list=n2vc_key_list,
2527 stage=stage,
2528 )
2529 )
2530 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2531 tasks_dict_info[task_ro] = "Deploying at VIM"
2532
2533 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2534 stage[1] = "Deploying Execution Environments."
2535 self.logger.debug(logging_text + stage[1])
2536
2537 # create namespace and certificate if any helm based EE is present in the NS
2538 if check_helm_ee_in_ns(db_vnfds):
2539 # TODO: create EE namespace
2540 # create TLS certificates
2541 await self.vca_map["helm-v3"].create_tls_certificate(
2542 secret_name="ee-tls-{}".format(nsr_id),
2543 dns_prefix="*",
2544 nsr_id=nsr_id,
2545 usage="server auth",
2546 )
2547
2548 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2549 for vnf_profile in get_vnf_profiles(nsd):
2550 vnfd_id = vnf_profile["vnfd-id"]
2551 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2552 member_vnf_index = str(vnf_profile["id"])
2553 db_vnfr = db_vnfrs[member_vnf_index]
2554 base_folder = vnfd["_admin"]["storage"]
2555 vdu_id = None
2556 vdu_index = 0
2557 vdu_name = None
2558 kdu_name = None
2559 kdu_index = None
2560
2561 # Get additional parameters
2562 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2563 if db_vnfr.get("additionalParamsForVnf"):
2564 deploy_params.update(
2565 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2566 )
2567
2568 descriptor_config = get_configuration(vnfd, vnfd["id"])
2569 if descriptor_config:
2570 self._deploy_n2vc(
2571 logging_text=logging_text
2572 + "member_vnf_index={} ".format(member_vnf_index),
2573 db_nsr=db_nsr,
2574 db_vnfr=db_vnfr,
2575 nslcmop_id=nslcmop_id,
2576 nsr_id=nsr_id,
2577 nsi_id=nsi_id,
2578 vnfd_id=vnfd_id,
2579 vdu_id=vdu_id,
2580 kdu_name=kdu_name,
2581 member_vnf_index=member_vnf_index,
2582 vdu_index=vdu_index,
2583 kdu_index=kdu_index,
2584 vdu_name=vdu_name,
2585 deploy_params=deploy_params,
2586 descriptor_config=descriptor_config,
2587 base_folder=base_folder,
2588 task_instantiation_info=tasks_dict_info,
2589 stage=stage,
2590 )
2591
2592 # Deploy charms for each VDU that supports one.
2593 for vdud in get_vdu_list(vnfd):
2594 vdu_id = vdud["id"]
2595 descriptor_config = get_configuration(vnfd, vdu_id)
2596 vdur = find_in_list(
2597 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2598 )
2599
2600 if vdur.get("additionalParams"):
2601 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2602 else:
2603 deploy_params_vdu = deploy_params
2604 deploy_params_vdu["OSM"] = get_osm_params(
2605 db_vnfr, vdu_id, vdu_count_index=0
2606 )
2607 vdud_count = get_number_of_instances(vnfd, vdu_id)
2608
2609 self.logger.debug("VDUD > {}".format(vdud))
2610 self.logger.debug(
2611 "Descriptor config > {}".format(descriptor_config)
2612 )
2613 if descriptor_config:
2614 vdu_name = None
2615 kdu_name = None
2616 kdu_index = None
2617 for vdu_index in range(vdud_count):
2618 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2619 self._deploy_n2vc(
2620 logging_text=logging_text
2621 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2622 member_vnf_index, vdu_id, vdu_index
2623 ),
2624 db_nsr=db_nsr,
2625 db_vnfr=db_vnfr,
2626 nslcmop_id=nslcmop_id,
2627 nsr_id=nsr_id,
2628 nsi_id=nsi_id,
2629 vnfd_id=vnfd_id,
2630 vdu_id=vdu_id,
2631 kdu_name=kdu_name,
2632 kdu_index=kdu_index,
2633 member_vnf_index=member_vnf_index,
2634 vdu_index=vdu_index,
2635 vdu_name=vdu_name,
2636 deploy_params=deploy_params_vdu,
2637 descriptor_config=descriptor_config,
2638 base_folder=base_folder,
2639 task_instantiation_info=tasks_dict_info,
2640 stage=stage,
2641 )
2642 for kdud in get_kdu_list(vnfd):
2643 kdu_name = kdud["name"]
2644 descriptor_config = get_configuration(vnfd, kdu_name)
2645 if descriptor_config:
2646 vdu_id = None
2647 vdu_index = 0
2648 vdu_name = None
2649 kdu_index, kdur = next(
2650 x
2651 for x in enumerate(db_vnfr["kdur"])
2652 if x[1]["kdu-name"] == kdu_name
2653 )
2654 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2655 if kdur.get("additionalParams"):
2656 deploy_params_kdu.update(
2657 parse_yaml_strings(kdur["additionalParams"].copy())
2658 )
2659
2660 self._deploy_n2vc(
2661 logging_text=logging_text,
2662 db_nsr=db_nsr,
2663 db_vnfr=db_vnfr,
2664 nslcmop_id=nslcmop_id,
2665 nsr_id=nsr_id,
2666 nsi_id=nsi_id,
2667 vnfd_id=vnfd_id,
2668 vdu_id=vdu_id,
2669 kdu_name=kdu_name,
2670 member_vnf_index=member_vnf_index,
2671 vdu_index=vdu_index,
2672 kdu_index=kdu_index,
2673 vdu_name=vdu_name,
2674 deploy_params=deploy_params_kdu,
2675 descriptor_config=descriptor_config,
2676 base_folder=base_folder,
2677 task_instantiation_info=tasks_dict_info,
2678 stage=stage,
2679 )
2680
2681 # Check if this NS has a charm configuration
2682 descriptor_config = nsd.get("ns-configuration")
2683 if descriptor_config and descriptor_config.get("juju"):
2684 vnfd_id = None
2685 db_vnfr = None
2686 member_vnf_index = None
2687 vdu_id = None
2688 kdu_name = None
2689 kdu_index = None
2690 vdu_index = 0
2691 vdu_name = None
2692
2693 # Get additional parameters
2694 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2695 if db_nsr.get("additionalParamsForNs"):
2696 deploy_params.update(
2697 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2698 )
2699 base_folder = nsd["_admin"]["storage"]
2700 self._deploy_n2vc(
2701 logging_text=logging_text,
2702 db_nsr=db_nsr,
2703 db_vnfr=db_vnfr,
2704 nslcmop_id=nslcmop_id,
2705 nsr_id=nsr_id,
2706 nsi_id=nsi_id,
2707 vnfd_id=vnfd_id,
2708 vdu_id=vdu_id,
2709 kdu_name=kdu_name,
2710 member_vnf_index=member_vnf_index,
2711 vdu_index=vdu_index,
2712 kdu_index=kdu_index,
2713 vdu_name=vdu_name,
2714 deploy_params=deploy_params,
2715 descriptor_config=descriptor_config,
2716 base_folder=base_folder,
2717 task_instantiation_info=tasks_dict_info,
2718 stage=stage,
2719 )
2720
2721 # rest of staff will be done at finally
2722
2723 except (
2724 ROclient.ROClientException,
2725 DbException,
2726 LcmException,
2727 N2VCException,
2728 ) as e:
2729 self.logger.error(
2730 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2731 )
2732 exc = e
2733 except asyncio.CancelledError:
2734 self.logger.error(
2735 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2736 )
2737 exc = "Operation was cancelled"
2738 except Exception as e:
2739 exc = traceback.format_exc()
2740 self.logger.critical(
2741 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2742 exc_info=True,
2743 )
2744 finally:
2745 if exc:
2746 error_list.append(str(exc))
2747 try:
2748 # wait for pending tasks
2749 if tasks_dict_info:
2750 stage[1] = "Waiting for instantiate pending tasks."
2751 self.logger.debug(logging_text + stage[1])
2752 error_list += await self._wait_for_tasks(
2753 logging_text,
2754 tasks_dict_info,
2755 timeout_ns_deploy,
2756 stage,
2757 nslcmop_id,
2758 nsr_id=nsr_id,
2759 )
2760 stage[1] = stage[2] = ""
2761 except asyncio.CancelledError:
2762 error_list.append("Cancelled")
2763 # TODO cancel all tasks
2764 except Exception as exc:
2765 error_list.append(str(exc))
2766
2767 # update operation-status
2768 db_nsr_update["operational-status"] = "running"
2769 # let's begin with VCA 'configured' status (later we can change it)
2770 db_nsr_update["config-status"] = "configured"
2771 for task, task_name in tasks_dict_info.items():
2772 if not task.done() or task.cancelled() or task.exception():
2773 if task_name.startswith(self.task_name_deploy_vca):
2774 # A N2VC task is pending
2775 db_nsr_update["config-status"] = "failed"
2776 else:
2777 # RO or KDU task is pending
2778 db_nsr_update["operational-status"] = "failed"
2779
2780 # update status at database
2781 if error_list:
2782 error_detail = ". ".join(error_list)
2783 self.logger.error(logging_text + error_detail)
2784 error_description_nslcmop = "{} Detail: {}".format(
2785 stage[0], error_detail
2786 )
2787 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2788 nslcmop_id, stage[0]
2789 )
2790
2791 db_nsr_update["detailed-status"] = (
2792 error_description_nsr + " Detail: " + error_detail
2793 )
2794 db_nslcmop_update["detailed-status"] = error_detail
2795 nslcmop_operation_state = "FAILED"
2796 ns_state = "BROKEN"
2797 else:
2798 error_detail = None
2799 error_description_nsr = error_description_nslcmop = None
2800 ns_state = "READY"
2801 db_nsr_update["detailed-status"] = "Done"
2802 db_nslcmop_update["detailed-status"] = "Done"
2803 nslcmop_operation_state = "COMPLETED"
2804
2805 if db_nsr:
2806 self._write_ns_status(
2807 nsr_id=nsr_id,
2808 ns_state=ns_state,
2809 current_operation="IDLE",
2810 current_operation_id=None,
2811 error_description=error_description_nsr,
2812 error_detail=error_detail,
2813 other_update=db_nsr_update,
2814 )
2815 self._write_op_status(
2816 op_id=nslcmop_id,
2817 stage="",
2818 error_message=error_description_nslcmop,
2819 operation_state=nslcmop_operation_state,
2820 other_update=db_nslcmop_update,
2821 )
2822
2823 if nslcmop_operation_state:
2824 try:
2825 await self.msg.aiowrite(
2826 "ns",
2827 "instantiated",
2828 {
2829 "nsr_id": nsr_id,
2830 "nslcmop_id": nslcmop_id,
2831 "operationState": nslcmop_operation_state,
2832 },
2833 loop=self.loop,
2834 )
2835 except Exception as e:
2836 self.logger.error(
2837 logging_text + "kafka_write notification Exception {}".format(e)
2838 )
2839
2840 self.logger.debug(logging_text + "Exit")
2841 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2842
2843 def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]):
2844 if vnfd_id not in cached_vnfds:
2845 cached_vnfds[vnfd_id] = self.db.get_one(
2846 "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read}
2847 )
2848 return cached_vnfds[vnfd_id]
2849
2850 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2851 if vnf_profile_id not in cached_vnfrs:
2852 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2853 "vnfrs",
2854 {
2855 "member-vnf-index-ref": vnf_profile_id,
2856 "nsr-id-ref": nsr_id,
2857 },
2858 )
2859 return cached_vnfrs[vnf_profile_id]
2860
2861 def _is_deployed_vca_in_relation(
2862 self, vca: DeployedVCA, relation: Relation
2863 ) -> bool:
2864 found = False
2865 for endpoint in (relation.provider, relation.requirer):
2866 if endpoint["kdu-resource-profile-id"]:
2867 continue
2868 found = (
2869 vca.vnf_profile_id == endpoint.vnf_profile_id
2870 and vca.vdu_profile_id == endpoint.vdu_profile_id
2871 and vca.execution_environment_ref == endpoint.execution_environment_ref
2872 )
2873 if found:
2874 break
2875 return found
2876
2877 def _update_ee_relation_data_with_implicit_data(
2878 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2879 ):
2880 ee_relation_data = safe_get_ee_relation(
2881 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2882 )
2883 ee_relation_level = EELevel.get_level(ee_relation_data)
2884 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2885 "execution-environment-ref"
2886 ]:
2887 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2888 vnfd_id = vnf_profile["vnfd-id"]
2889 project = nsd["_admin"]["projects_read"][0]
2890 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2891 entity_id = (
2892 vnfd_id
2893 if ee_relation_level == EELevel.VNF
2894 else ee_relation_data["vdu-profile-id"]
2895 )
2896 ee = get_juju_ee_ref(db_vnfd, entity_id)
2897 if not ee:
2898 raise Exception(
2899 f"not execution environments found for ee_relation {ee_relation_data}"
2900 )
2901 ee_relation_data["execution-environment-ref"] = ee["id"]
2902 return ee_relation_data
2903
2904 def _get_ns_relations(
2905 self,
2906 nsr_id: str,
2907 nsd: Dict[str, Any],
2908 vca: DeployedVCA,
2909 cached_vnfds: Dict[str, Any],
2910 ) -> List[Relation]:
2911 relations = []
2912 db_ns_relations = get_ns_configuration_relation_list(nsd)
2913 for r in db_ns_relations:
2914 provider_dict = None
2915 requirer_dict = None
2916 if all(key in r for key in ("provider", "requirer")):
2917 provider_dict = r["provider"]
2918 requirer_dict = r["requirer"]
2919 elif "entities" in r:
2920 provider_id = r["entities"][0]["id"]
2921 provider_dict = {
2922 "nsr-id": nsr_id,
2923 "endpoint": r["entities"][0]["endpoint"],
2924 }
2925 if provider_id != nsd["id"]:
2926 provider_dict["vnf-profile-id"] = provider_id
2927 requirer_id = r["entities"][1]["id"]
2928 requirer_dict = {
2929 "nsr-id": nsr_id,
2930 "endpoint": r["entities"][1]["endpoint"],
2931 }
2932 if requirer_id != nsd["id"]:
2933 requirer_dict["vnf-profile-id"] = requirer_id
2934 else:
2935 raise Exception(
2936 "provider/requirer or entities must be included in the relation."
2937 )
2938 relation_provider = self._update_ee_relation_data_with_implicit_data(
2939 nsr_id, nsd, provider_dict, cached_vnfds
2940 )
2941 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2942 nsr_id, nsd, requirer_dict, cached_vnfds
2943 )
2944 provider = EERelation(relation_provider)
2945 requirer = EERelation(relation_requirer)
2946 relation = Relation(r["name"], provider, requirer)
2947 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2948 if vca_in_relation:
2949 relations.append(relation)
2950 return relations
2951
2952 def _get_vnf_relations(
2953 self,
2954 nsr_id: str,
2955 nsd: Dict[str, Any],
2956 vca: DeployedVCA,
2957 cached_vnfds: Dict[str, Any],
2958 ) -> List[Relation]:
2959 relations = []
2960 if vca.target_element == "ns":
2961 self.logger.debug("VCA is a NS charm, not a VNF.")
2962 return relations
2963 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2964 vnf_profile_id = vnf_profile["id"]
2965 vnfd_id = vnf_profile["vnfd-id"]
2966 project = nsd["_admin"]["projects_read"][0]
2967 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
2968 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2969 for r in db_vnf_relations:
2970 provider_dict = None
2971 requirer_dict = None
2972 if all(key in r for key in ("provider", "requirer")):
2973 provider_dict = r["provider"]
2974 requirer_dict = r["requirer"]
2975 elif "entities" in r:
2976 provider_id = r["entities"][0]["id"]
2977 provider_dict = {
2978 "nsr-id": nsr_id,
2979 "vnf-profile-id": vnf_profile_id,
2980 "endpoint": r["entities"][0]["endpoint"],
2981 }
2982 if provider_id != vnfd_id:
2983 provider_dict["vdu-profile-id"] = provider_id
2984 requirer_id = r["entities"][1]["id"]
2985 requirer_dict = {
2986 "nsr-id": nsr_id,
2987 "vnf-profile-id": vnf_profile_id,
2988 "endpoint": r["entities"][1]["endpoint"],
2989 }
2990 if requirer_id != vnfd_id:
2991 requirer_dict["vdu-profile-id"] = requirer_id
2992 else:
2993 raise Exception(
2994 "provider/requirer or entities must be included in the relation."
2995 )
2996 relation_provider = self._update_ee_relation_data_with_implicit_data(
2997 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
2998 )
2999 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3000 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3001 )
3002 provider = EERelation(relation_provider)
3003 requirer = EERelation(relation_requirer)
3004 relation = Relation(r["name"], provider, requirer)
3005 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3006 if vca_in_relation:
3007 relations.append(relation)
3008 return relations
3009
3010 def _get_kdu_resource_data(
3011 self,
3012 ee_relation: EERelation,
3013 db_nsr: Dict[str, Any],
3014 cached_vnfds: Dict[str, Any],
3015 ) -> DeployedK8sResource:
3016 nsd = get_nsd(db_nsr)
3017 vnf_profiles = get_vnf_profiles(nsd)
3018 vnfd_id = find_in_list(
3019 vnf_profiles,
3020 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3021 )["vnfd-id"]
3022 project = nsd["_admin"]["projects_read"][0]
3023 db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds)
3024 kdu_resource_profile = get_kdu_resource_profile(
3025 db_vnfd, ee_relation.kdu_resource_profile_id
3026 )
3027 kdu_name = kdu_resource_profile["kdu-name"]
3028 deployed_kdu, _ = get_deployed_kdu(
3029 db_nsr.get("_admin", ()).get("deployed", ()),
3030 kdu_name,
3031 ee_relation.vnf_profile_id,
3032 )
3033 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3034 return deployed_kdu
3035
3036 def _get_deployed_component(
3037 self,
3038 ee_relation: EERelation,
3039 db_nsr: Dict[str, Any],
3040 cached_vnfds: Dict[str, Any],
3041 ) -> DeployedComponent:
3042 nsr_id = db_nsr["_id"]
3043 deployed_component = None
3044 ee_level = EELevel.get_level(ee_relation)
3045 if ee_level == EELevel.NS:
3046 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3047 if vca:
3048 deployed_component = DeployedVCA(nsr_id, vca)
3049 elif ee_level == EELevel.VNF:
3050 vca = get_deployed_vca(
3051 db_nsr,
3052 {
3053 "vdu_id": None,
3054 "member-vnf-index": ee_relation.vnf_profile_id,
3055 "ee_descriptor_id": ee_relation.execution_environment_ref,
3056 },
3057 )
3058 if vca:
3059 deployed_component = DeployedVCA(nsr_id, vca)
3060 elif ee_level == EELevel.VDU:
3061 vca = get_deployed_vca(
3062 db_nsr,
3063 {
3064 "vdu_id": ee_relation.vdu_profile_id,
3065 "member-vnf-index": ee_relation.vnf_profile_id,
3066 "ee_descriptor_id": ee_relation.execution_environment_ref,
3067 },
3068 )
3069 if vca:
3070 deployed_component = DeployedVCA(nsr_id, vca)
3071 elif ee_level == EELevel.KDU:
3072 kdu_resource_data = self._get_kdu_resource_data(
3073 ee_relation, db_nsr, cached_vnfds
3074 )
3075 if kdu_resource_data:
3076 deployed_component = DeployedK8sResource(kdu_resource_data)
3077 return deployed_component
3078
3079 async def _add_relation(
3080 self,
3081 relation: Relation,
3082 vca_type: str,
3083 db_nsr: Dict[str, Any],
3084 cached_vnfds: Dict[str, Any],
3085 cached_vnfrs: Dict[str, Any],
3086 ) -> bool:
3087 deployed_provider = self._get_deployed_component(
3088 relation.provider, db_nsr, cached_vnfds
3089 )
3090 deployed_requirer = self._get_deployed_component(
3091 relation.requirer, db_nsr, cached_vnfds
3092 )
3093 if (
3094 deployed_provider
3095 and deployed_requirer
3096 and deployed_provider.config_sw_installed
3097 and deployed_requirer.config_sw_installed
3098 ):
3099 provider_db_vnfr = (
3100 self._get_vnfr(
3101 relation.provider.nsr_id,
3102 relation.provider.vnf_profile_id,
3103 cached_vnfrs,
3104 )
3105 if relation.provider.vnf_profile_id
3106 else None
3107 )
3108 requirer_db_vnfr = (
3109 self._get_vnfr(
3110 relation.requirer.nsr_id,
3111 relation.requirer.vnf_profile_id,
3112 cached_vnfrs,
3113 )
3114 if relation.requirer.vnf_profile_id
3115 else None
3116 )
3117 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3118 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3119 provider_relation_endpoint = RelationEndpoint(
3120 deployed_provider.ee_id,
3121 provider_vca_id,
3122 relation.provider.endpoint,
3123 )
3124 requirer_relation_endpoint = RelationEndpoint(
3125 deployed_requirer.ee_id,
3126 requirer_vca_id,
3127 relation.requirer.endpoint,
3128 )
3129 try:
3130 await self.vca_map[vca_type].add_relation(
3131 provider=provider_relation_endpoint,
3132 requirer=requirer_relation_endpoint,
3133 )
3134 except N2VCException as exception:
3135 self.logger.error(exception)
3136 raise LcmException(exception)
3137 return True
3138 return False
3139
3140 async def _add_vca_relations(
3141 self,
3142 logging_text,
3143 nsr_id,
3144 vca_type: str,
3145 vca_index: int,
3146 timeout: int = 3600,
3147 ) -> bool:
3148 # steps:
3149 # 1. find all relations for this VCA
3150 # 2. wait for other peers related
3151 # 3. add relations
3152
3153 try:
3154 # STEP 1: find all relations for this VCA
3155
3156 # read nsr record
3157 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3158 nsd = get_nsd(db_nsr)
3159
3160 # this VCA data
3161 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3162 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3163
3164 cached_vnfds = {}
3165 cached_vnfrs = {}
3166 relations = []
3167 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3168 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3169
3170 # if no relations, terminate
3171 if not relations:
3172 self.logger.debug(logging_text + " No relations")
3173 return True
3174
3175 self.logger.debug(logging_text + " adding relations {}".format(relations))
3176
3177 # add all relations
3178 start = time()
3179 while True:
3180 # check timeout
3181 now = time()
3182 if now - start >= timeout:
3183 self.logger.error(logging_text + " : timeout adding relations")
3184 return False
3185
3186 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3187 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3188
3189 # for each relation, find the VCA's related
3190 for relation in relations.copy():
3191 added = await self._add_relation(
3192 relation,
3193 vca_type,
3194 db_nsr,
3195 cached_vnfds,
3196 cached_vnfrs,
3197 )
3198 if added:
3199 relations.remove(relation)
3200
3201 if not relations:
3202 self.logger.debug("Relations added")
3203 break
3204 await asyncio.sleep(5.0)
3205
3206 return True
3207
3208 except Exception as e:
3209 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3210 return False
3211
3212 async def _install_kdu(
3213 self,
3214 nsr_id: str,
3215 nsr_db_path: str,
3216 vnfr_data: dict,
3217 kdu_index: int,
3218 kdud: dict,
3219 vnfd: dict,
3220 k8s_instance_info: dict,
3221 k8params: dict = None,
3222 timeout: int = 600,
3223 vca_id: str = None,
3224 ):
3225 try:
3226 k8sclustertype = k8s_instance_info["k8scluster-type"]
3227 # Instantiate kdu
3228 db_dict_install = {
3229 "collection": "nsrs",
3230 "filter": {"_id": nsr_id},
3231 "path": nsr_db_path,
3232 }
3233
3234 if k8s_instance_info.get("kdu-deployment-name"):
3235 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3236 else:
3237 kdu_instance = self.k8scluster_map[
3238 k8sclustertype
3239 ].generate_kdu_instance_name(
3240 db_dict=db_dict_install,
3241 kdu_model=k8s_instance_info["kdu-model"],
3242 kdu_name=k8s_instance_info["kdu-name"],
3243 )
3244
3245 # Update the nsrs table with the kdu-instance value
3246 self.update_db_2(
3247 item="nsrs",
3248 _id=nsr_id,
3249 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3250 )
3251
3252 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3253 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3254 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3255 # namespace, this first verification could be removed, and the next step would be done for any kind
3256 # of KNF.
3257 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3258 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3259 if k8sclustertype in ("juju", "juju-bundle"):
3260 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3261 # that the user passed a namespace which he wants its KDU to be deployed in)
3262 if (
3263 self.db.count(
3264 table="nsrs",
3265 q_filter={
3266 "_id": nsr_id,
3267 "_admin.projects_write": k8s_instance_info["namespace"],
3268 "_admin.projects_read": k8s_instance_info["namespace"],
3269 },
3270 )
3271 > 0
3272 ):
3273 self.logger.debug(
3274 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3275 )
3276 self.update_db_2(
3277 item="nsrs",
3278 _id=nsr_id,
3279 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3280 )
3281 k8s_instance_info["namespace"] = kdu_instance
3282
3283 await self.k8scluster_map[k8sclustertype].install(
3284 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3285 kdu_model=k8s_instance_info["kdu-model"],
3286 atomic=True,
3287 params=k8params,
3288 db_dict=db_dict_install,
3289 timeout=timeout,
3290 kdu_name=k8s_instance_info["kdu-name"],
3291 namespace=k8s_instance_info["namespace"],
3292 kdu_instance=kdu_instance,
3293 vca_id=vca_id,
3294 )
3295
3296 # Obtain services to obtain management service ip
3297 services = await self.k8scluster_map[k8sclustertype].get_services(
3298 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3299 kdu_instance=kdu_instance,
3300 namespace=k8s_instance_info["namespace"],
3301 )
3302
3303 # Obtain management service info (if exists)
3304 vnfr_update_dict = {}
3305 kdu_config = get_configuration(vnfd, kdud["name"])
3306 if kdu_config:
3307 target_ee_list = kdu_config.get("execution-environment-list", [])
3308 else:
3309 target_ee_list = []
3310
3311 if services:
3312 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3313 mgmt_services = [
3314 service
3315 for service in kdud.get("service", [])
3316 if service.get("mgmt-service")
3317 ]
3318 for mgmt_service in mgmt_services:
3319 for service in services:
3320 if service["name"].startswith(mgmt_service["name"]):
3321 # Mgmt service found, Obtain service ip
3322 ip = service.get("external_ip", service.get("cluster_ip"))
3323 if isinstance(ip, list) and len(ip) == 1:
3324 ip = ip[0]
3325
3326 vnfr_update_dict[
3327 "kdur.{}.ip-address".format(kdu_index)
3328 ] = ip
3329
3330 # Check if must update also mgmt ip at the vnf
3331 service_external_cp = mgmt_service.get(
3332 "external-connection-point-ref"
3333 )
3334 if service_external_cp:
3335 if (
3336 deep_get(vnfd, ("mgmt-interface", "cp"))
3337 == service_external_cp
3338 ):
3339 vnfr_update_dict["ip-address"] = ip
3340
3341 if find_in_list(
3342 target_ee_list,
3343 lambda ee: ee.get(
3344 "external-connection-point-ref", ""
3345 )
3346 == service_external_cp,
3347 ):
3348 vnfr_update_dict[
3349 "kdur.{}.ip-address".format(kdu_index)
3350 ] = ip
3351 break
3352 else:
3353 self.logger.warn(
3354 "Mgmt service name: {} not found".format(
3355 mgmt_service["name"]
3356 )
3357 )
3358
3359 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3360 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3361
3362 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3363 if (
3364 kdu_config
3365 and kdu_config.get("initial-config-primitive")
3366 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3367 ):
3368 initial_config_primitive_list = kdu_config.get(
3369 "initial-config-primitive"
3370 )
3371 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3372
3373 for initial_config_primitive in initial_config_primitive_list:
3374 primitive_params_ = self._map_primitive_params(
3375 initial_config_primitive, {}, {}
3376 )
3377
3378 await asyncio.wait_for(
3379 self.k8scluster_map[k8sclustertype].exec_primitive(
3380 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3381 kdu_instance=kdu_instance,
3382 primitive_name=initial_config_primitive["name"],
3383 params=primitive_params_,
3384 db_dict=db_dict_install,
3385 vca_id=vca_id,
3386 ),
3387 timeout=timeout,
3388 )
3389
3390 except Exception as e:
3391 # Prepare update db with error and raise exception
3392 try:
3393 self.update_db_2(
3394 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3395 )
3396 self.update_db_2(
3397 "vnfrs",
3398 vnfr_data.get("_id"),
3399 {"kdur.{}.status".format(kdu_index): "ERROR"},
3400 )
3401 except Exception:
3402 # ignore to keep original exception
3403 pass
3404 # reraise original error
3405 raise
3406
3407 return kdu_instance
3408
3409 async def deploy_kdus(
3410 self,
3411 logging_text,
3412 nsr_id,
3413 nslcmop_id,
3414 db_vnfrs,
3415 db_vnfds,
3416 task_instantiation_info,
3417 ):
3418 # Launch kdus if present in the descriptor
3419
3420 k8scluster_id_2_uuic = {
3421 "helm-chart-v3": {},
3422 "helm-chart": {},
3423 "juju-bundle": {},
3424 }
3425
3426 async def _get_cluster_id(cluster_id, cluster_type):
3427 nonlocal k8scluster_id_2_uuic
3428 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3429 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3430
3431 # check if K8scluster is creating and wait look if previous tasks in process
3432 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3433 "k8scluster", cluster_id
3434 )
3435 if task_dependency:
3436 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3437 task_name, cluster_id
3438 )
3439 self.logger.debug(logging_text + text)
3440 await asyncio.wait(task_dependency, timeout=3600)
3441
3442 db_k8scluster = self.db.get_one(
3443 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3444 )
3445 if not db_k8scluster:
3446 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3447
3448 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3449 if not k8s_id:
3450 if cluster_type == "helm-chart-v3":
3451 try:
3452 # backward compatibility for existing clusters that have not been initialized for helm v3
3453 k8s_credentials = yaml.safe_dump(
3454 db_k8scluster.get("credentials")
3455 )
3456 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3457 k8s_credentials, reuse_cluster_uuid=cluster_id
3458 )
3459 db_k8scluster_update = {}
3460 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3461 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3462 db_k8scluster_update[
3463 "_admin.helm-chart-v3.created"
3464 ] = uninstall_sw
3465 db_k8scluster_update[
3466 "_admin.helm-chart-v3.operationalState"
3467 ] = "ENABLED"
3468 self.update_db_2(
3469 "k8sclusters", cluster_id, db_k8scluster_update
3470 )
3471 except Exception as e:
3472 self.logger.error(
3473 logging_text
3474 + "error initializing helm-v3 cluster: {}".format(str(e))
3475 )
3476 raise LcmException(
3477 "K8s cluster '{}' has not been initialized for '{}'".format(
3478 cluster_id, cluster_type
3479 )
3480 )
3481 else:
3482 raise LcmException(
3483 "K8s cluster '{}' has not been initialized for '{}'".format(
3484 cluster_id, cluster_type
3485 )
3486 )
3487 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3488 return k8s_id
3489
3490 logging_text += "Deploy kdus: "
3491 step = ""
3492 try:
3493 db_nsr_update = {"_admin.deployed.K8s": []}
3494 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3495
3496 index = 0
3497 updated_cluster_list = []
3498 updated_v3_cluster_list = []
3499
3500 for vnfr_data in db_vnfrs.values():
3501 vca_id = self.get_vca_id(vnfr_data, {})
3502 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3503 # Step 0: Prepare and set parameters
3504 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3505 vnfd_id = vnfr_data.get("vnfd-id")
3506 vnfd_with_id = find_in_list(
3507 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3508 )
3509 kdud = next(
3510 kdud
3511 for kdud in vnfd_with_id["kdu"]
3512 if kdud["name"] == kdur["kdu-name"]
3513 )
3514 namespace = kdur.get("k8s-namespace")
3515 kdu_deployment_name = kdur.get("kdu-deployment-name")
3516 if kdur.get("helm-chart"):
3517 kdumodel = kdur["helm-chart"]
3518 # Default version: helm3, if helm-version is v2 assign v2
3519 k8sclustertype = "helm-chart-v3"
3520 self.logger.debug("kdur: {}".format(kdur))
3521 if (
3522 kdur.get("helm-version")
3523 and kdur.get("helm-version") == "v2"
3524 ):
3525 k8sclustertype = "helm-chart"
3526 elif kdur.get("juju-bundle"):
3527 kdumodel = kdur["juju-bundle"]
3528 k8sclustertype = "juju-bundle"
3529 else:
3530 raise LcmException(
3531 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3532 "juju-bundle. Maybe an old NBI version is running".format(
3533 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3534 )
3535 )
3536 # check if kdumodel is a file and exists
3537 try:
3538 vnfd_with_id = find_in_list(
3539 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3540 )
3541 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3542 if storage: # may be not present if vnfd has not artifacts
3543 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3544 if storage["pkg-dir"]:
3545 filename = "{}/{}/{}s/{}".format(
3546 storage["folder"],
3547 storage["pkg-dir"],
3548 k8sclustertype,
3549 kdumodel,
3550 )
3551 else:
3552 filename = "{}/Scripts/{}s/{}".format(
3553 storage["folder"],
3554 k8sclustertype,
3555 kdumodel,
3556 )
3557 if self.fs.file_exists(
3558 filename, mode="file"
3559 ) or self.fs.file_exists(filename, mode="dir"):
3560 kdumodel = self.fs.path + filename
3561 except (asyncio.TimeoutError, asyncio.CancelledError):
3562 raise
3563 except Exception: # it is not a file
3564 pass
3565
3566 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3567 step = "Synchronize repos for k8s cluster '{}'".format(
3568 k8s_cluster_id
3569 )
3570 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3571
3572 # Synchronize repos
3573 if (
3574 k8sclustertype == "helm-chart"
3575 and cluster_uuid not in updated_cluster_list
3576 ) or (
3577 k8sclustertype == "helm-chart-v3"
3578 and cluster_uuid not in updated_v3_cluster_list
3579 ):
3580 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3581 self.k8scluster_map[k8sclustertype].synchronize_repos(
3582 cluster_uuid=cluster_uuid
3583 )
3584 )
3585 if del_repo_list or added_repo_dict:
3586 if k8sclustertype == "helm-chart":
3587 unset = {
3588 "_admin.helm_charts_added." + item: None
3589 for item in del_repo_list
3590 }
3591 updated = {
3592 "_admin.helm_charts_added." + item: name
3593 for item, name in added_repo_dict.items()
3594 }
3595 updated_cluster_list.append(cluster_uuid)
3596 elif k8sclustertype == "helm-chart-v3":
3597 unset = {
3598 "_admin.helm_charts_v3_added." + item: None
3599 for item in del_repo_list
3600 }
3601 updated = {
3602 "_admin.helm_charts_v3_added." + item: name
3603 for item, name in added_repo_dict.items()
3604 }
3605 updated_v3_cluster_list.append(cluster_uuid)
3606 self.logger.debug(
3607 logging_text + "repos synchronized on k8s cluster "
3608 "'{}' to_delete: {}, to_add: {}".format(
3609 k8s_cluster_id, del_repo_list, added_repo_dict
3610 )
3611 )
3612 self.db.set_one(
3613 "k8sclusters",
3614 {"_id": k8s_cluster_id},
3615 updated,
3616 unset=unset,
3617 )
3618
3619 # Instantiate kdu
3620 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3621 vnfr_data["member-vnf-index-ref"],
3622 kdur["kdu-name"],
3623 k8s_cluster_id,
3624 )
3625 k8s_instance_info = {
3626 "kdu-instance": None,
3627 "k8scluster-uuid": cluster_uuid,
3628 "k8scluster-type": k8sclustertype,
3629 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3630 "kdu-name": kdur["kdu-name"],
3631 "kdu-model": kdumodel,
3632 "namespace": namespace,
3633 "kdu-deployment-name": kdu_deployment_name,
3634 }
3635 db_path = "_admin.deployed.K8s.{}".format(index)
3636 db_nsr_update[db_path] = k8s_instance_info
3637 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3638 vnfd_with_id = find_in_list(
3639 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3640 )
3641 task = asyncio.ensure_future(
3642 self._install_kdu(
3643 nsr_id,
3644 db_path,
3645 vnfr_data,
3646 kdu_index,
3647 kdud,
3648 vnfd_with_id,
3649 k8s_instance_info,
3650 k8params=desc_params,
3651 timeout=1800,
3652 vca_id=vca_id,
3653 )
3654 )
3655 self.lcm_tasks.register(
3656 "ns",
3657 nsr_id,
3658 nslcmop_id,
3659 "instantiate_KDU-{}".format(index),
3660 task,
3661 )
3662 task_instantiation_info[task] = "Deploying KDU {}".format(
3663 kdur["kdu-name"]
3664 )
3665
3666 index += 1
3667
3668 except (LcmException, asyncio.CancelledError):
3669 raise
3670 except Exception as e:
3671 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3672 if isinstance(e, (N2VCException, DbException)):
3673 self.logger.error(logging_text + msg)
3674 else:
3675 self.logger.critical(logging_text + msg, exc_info=True)
3676 raise LcmException(msg)
3677 finally:
3678 if db_nsr_update:
3679 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3680
3681 def _deploy_n2vc(
3682 self,
3683 logging_text,
3684 db_nsr,
3685 db_vnfr,
3686 nslcmop_id,
3687 nsr_id,
3688 nsi_id,
3689 vnfd_id,
3690 vdu_id,
3691 kdu_name,
3692 member_vnf_index,
3693 vdu_index,
3694 kdu_index,
3695 vdu_name,
3696 deploy_params,
3697 descriptor_config,
3698 base_folder,
3699 task_instantiation_info,
3700 stage,
3701 ):
3702 # launch instantiate_N2VC in a asyncio task and register task object
3703 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3704 # if not found, create one entry and update database
3705 # fill db_nsr._admin.deployed.VCA.<index>
3706
3707 self.logger.debug(
3708 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3709 )
3710
3711 charm_name = ""
3712 get_charm_name = False
3713 if "execution-environment-list" in descriptor_config:
3714 ee_list = descriptor_config.get("execution-environment-list", [])
3715 elif "juju" in descriptor_config:
3716 ee_list = [descriptor_config] # ns charms
3717 if "execution-environment-list" not in descriptor_config:
3718 # charm name is only required for ns charms
3719 get_charm_name = True
3720 else: # other types as script are not supported
3721 ee_list = []
3722
3723 for ee_item in ee_list:
3724 self.logger.debug(
3725 logging_text
3726 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3727 ee_item.get("juju"), ee_item.get("helm-chart")
3728 )
3729 )
3730 ee_descriptor_id = ee_item.get("id")
3731 if ee_item.get("juju"):
3732 vca_name = ee_item["juju"].get("charm")
3733 if get_charm_name:
3734 charm_name = self.find_charm_name(db_nsr, str(vca_name))
3735 vca_type = (
3736 "lxc_proxy_charm"
3737 if ee_item["juju"].get("charm") is not None
3738 else "native_charm"
3739 )
3740 if ee_item["juju"].get("cloud") == "k8s":
3741 vca_type = "k8s_proxy_charm"
3742 elif ee_item["juju"].get("proxy") is False:
3743 vca_type = "native_charm"
3744 elif ee_item.get("helm-chart"):
3745 vca_name = ee_item["helm-chart"]
3746 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3747 vca_type = "helm"
3748 else:
3749 vca_type = "helm-v3"
3750 else:
3751 self.logger.debug(
3752 logging_text + "skipping non juju neither charm configuration"
3753 )
3754 continue
3755
3756 vca_index = -1
3757 for vca_index, vca_deployed in enumerate(
3758 db_nsr["_admin"]["deployed"]["VCA"]
3759 ):
3760 if not vca_deployed:
3761 continue
3762 if (
3763 vca_deployed.get("member-vnf-index") == member_vnf_index
3764 and vca_deployed.get("vdu_id") == vdu_id
3765 and vca_deployed.get("kdu_name") == kdu_name
3766 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3767 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3768 ):
3769 break
3770 else:
3771 # not found, create one.
3772 target = (
3773 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3774 )
3775 if vdu_id:
3776 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3777 elif kdu_name:
3778 target += "/kdu/{}".format(kdu_name)
3779 vca_deployed = {
3780 "target_element": target,
3781 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3782 "member-vnf-index": member_vnf_index,
3783 "vdu_id": vdu_id,
3784 "kdu_name": kdu_name,
3785 "vdu_count_index": vdu_index,
3786 "operational-status": "init", # TODO revise
3787 "detailed-status": "", # TODO revise
3788 "step": "initial-deploy", # TODO revise
3789 "vnfd_id": vnfd_id,
3790 "vdu_name": vdu_name,
3791 "type": vca_type,
3792 "ee_descriptor_id": ee_descriptor_id,
3793 "charm_name": charm_name,
3794 }
3795 vca_index += 1
3796
3797 # create VCA and configurationStatus in db
3798 db_dict = {
3799 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3800 "configurationStatus.{}".format(vca_index): dict(),
3801 }
3802 self.update_db_2("nsrs", nsr_id, db_dict)
3803
3804 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3805
3806 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3807 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3808 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3809
3810 # Launch task
3811 task_n2vc = asyncio.ensure_future(
3812 self.instantiate_N2VC(
3813 logging_text=logging_text,
3814 vca_index=vca_index,
3815 nsi_id=nsi_id,
3816 db_nsr=db_nsr,
3817 db_vnfr=db_vnfr,
3818 vdu_id=vdu_id,
3819 kdu_name=kdu_name,
3820 vdu_index=vdu_index,
3821 kdu_index=kdu_index,
3822 deploy_params=deploy_params,
3823 config_descriptor=descriptor_config,
3824 base_folder=base_folder,
3825 nslcmop_id=nslcmop_id,
3826 stage=stage,
3827 vca_type=vca_type,
3828 vca_name=vca_name,
3829 ee_config_descriptor=ee_item,
3830 )
3831 )
3832 self.lcm_tasks.register(
3833 "ns",
3834 nsr_id,
3835 nslcmop_id,
3836 "instantiate_N2VC-{}".format(vca_index),
3837 task_n2vc,
3838 )
3839 task_instantiation_info[
3840 task_n2vc
3841 ] = self.task_name_deploy_vca + " {}.{}".format(
3842 member_vnf_index or "", vdu_id or ""
3843 )
3844
3845 @staticmethod
3846 def _create_nslcmop(nsr_id, operation, params):
3847 """
3848 Creates a ns-lcm-opp content to be stored at database.
3849 :param nsr_id: internal id of the instance
3850 :param operation: instantiate, terminate, scale, action, ...
3851 :param params: user parameters for the operation
3852 :return: dictionary following SOL005 format
3853 """
3854 # Raise exception if invalid arguments
3855 if not (nsr_id and operation and params):
3856 raise LcmException(
3857 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3858 )
3859 now = time()
3860 _id = str(uuid4())
3861 nslcmop = {
3862 "id": _id,
3863 "_id": _id,
3864 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3865 "operationState": "PROCESSING",
3866 "statusEnteredTime": now,
3867 "nsInstanceId": nsr_id,
3868 "lcmOperationType": operation,
3869 "startTime": now,
3870 "isAutomaticInvocation": False,
3871 "operationParams": params,
3872 "isCancelPending": False,
3873 "links": {
3874 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3875 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3876 },
3877 }
3878 return nslcmop
3879
3880 def _format_additional_params(self, params):
3881 params = params or {}
3882 for key, value in params.items():
3883 if str(value).startswith("!!yaml "):
3884 params[key] = yaml.safe_load(value[7:])
3885 return params
3886
3887 def _get_terminate_primitive_params(self, seq, vnf_index):
3888 primitive = seq.get("name")
3889 primitive_params = {}
3890 params = {
3891 "member_vnf_index": vnf_index,
3892 "primitive": primitive,
3893 "primitive_params": primitive_params,
3894 }
3895 desc_params = {}
3896 return self._map_primitive_params(seq, params, desc_params)
3897
3898 # sub-operations
3899
3900 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3901 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3902 if op.get("operationState") == "COMPLETED":
3903 # b. Skip sub-operation
3904 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3905 return self.SUBOPERATION_STATUS_SKIP
3906 else:
3907 # c. retry executing sub-operation
3908 # The sub-operation exists, and operationState != 'COMPLETED'
3909 # Update operationState = 'PROCESSING' to indicate a retry.
3910 operationState = "PROCESSING"
3911 detailed_status = "In progress"
3912 self._update_suboperation_status(
3913 db_nslcmop, op_index, operationState, detailed_status
3914 )
3915 # Return the sub-operation index
3916 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3917 # with arguments extracted from the sub-operation
3918 return op_index
3919
3920 # Find a sub-operation where all keys in a matching dictionary must match
3921 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3922 def _find_suboperation(self, db_nslcmop, match):
3923 if db_nslcmop and match:
3924 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3925 for i, op in enumerate(op_list):
3926 if all(op.get(k) == match[k] for k in match):
3927 return i
3928 return self.SUBOPERATION_STATUS_NOT_FOUND
3929
3930 # Update status for a sub-operation given its index
3931 def _update_suboperation_status(
3932 self, db_nslcmop, op_index, operationState, detailed_status
3933 ):
3934 # Update DB for HA tasks
3935 q_filter = {"_id": db_nslcmop["_id"]}
3936 update_dict = {
3937 "_admin.operations.{}.operationState".format(op_index): operationState,
3938 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3939 }
3940 self.db.set_one(
3941 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3942 )
3943
3944 # Add sub-operation, return the index of the added sub-operation
3945 # Optionally, set operationState, detailed-status, and operationType
3946 # Status and type are currently set for 'scale' sub-operations:
3947 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3948 # 'detailed-status' : status message
3949 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3950 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3951 def _add_suboperation(
3952 self,
3953 db_nslcmop,
3954 vnf_index,
3955 vdu_id,
3956 vdu_count_index,
3957 vdu_name,
3958 primitive,
3959 mapped_primitive_params,
3960 operationState=None,
3961 detailed_status=None,
3962 operationType=None,
3963 RO_nsr_id=None,
3964 RO_scaling_info=None,
3965 ):
3966 if not db_nslcmop:
3967 return self.SUBOPERATION_STATUS_NOT_FOUND
3968 # Get the "_admin.operations" list, if it exists
3969 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3970 op_list = db_nslcmop_admin.get("operations")
3971 # Create or append to the "_admin.operations" list
3972 new_op = {
3973 "member_vnf_index": vnf_index,
3974 "vdu_id": vdu_id,
3975 "vdu_count_index": vdu_count_index,
3976 "primitive": primitive,
3977 "primitive_params": mapped_primitive_params,
3978 }
3979 if operationState:
3980 new_op["operationState"] = operationState
3981 if detailed_status:
3982 new_op["detailed-status"] = detailed_status
3983 if operationType:
3984 new_op["lcmOperationType"] = operationType
3985 if RO_nsr_id:
3986 new_op["RO_nsr_id"] = RO_nsr_id
3987 if RO_scaling_info:
3988 new_op["RO_scaling_info"] = RO_scaling_info
3989 if not op_list:
3990 # No existing operations, create key 'operations' with current operation as first list element
3991 db_nslcmop_admin.update({"operations": [new_op]})
3992 op_list = db_nslcmop_admin.get("operations")
3993 else:
3994 # Existing operations, append operation to list
3995 op_list.append(new_op)
3996
3997 db_nslcmop_update = {"_admin.operations": op_list}
3998 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
3999 op_index = len(op_list) - 1
4000 return op_index
4001
4002 # Helper methods for scale() sub-operations
4003
4004 # pre-scale/post-scale:
4005 # Check for 3 different cases:
4006 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4007 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4008 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4009 def _check_or_add_scale_suboperation(
4010 self,
4011 db_nslcmop,
4012 vnf_index,
4013 vnf_config_primitive,
4014 primitive_params,
4015 operationType,
4016 RO_nsr_id=None,
4017 RO_scaling_info=None,
4018 ):
4019 # Find this sub-operation
4020 if RO_nsr_id and RO_scaling_info:
4021 operationType = "SCALE-RO"
4022 match = {
4023 "member_vnf_index": vnf_index,
4024 "RO_nsr_id": RO_nsr_id,
4025 "RO_scaling_info": RO_scaling_info,
4026 }
4027 else:
4028 match = {
4029 "member_vnf_index": vnf_index,
4030 "primitive": vnf_config_primitive,
4031 "primitive_params": primitive_params,
4032 "lcmOperationType": operationType,
4033 }
4034 op_index = self._find_suboperation(db_nslcmop, match)
4035 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4036 # a. New sub-operation
4037 # The sub-operation does not exist, add it.
4038 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4039 # The following parameters are set to None for all kind of scaling:
4040 vdu_id = None
4041 vdu_count_index = None
4042 vdu_name = None
4043 if RO_nsr_id and RO_scaling_info:
4044 vnf_config_primitive = None
4045 primitive_params = None
4046 else:
4047 RO_nsr_id = None
4048 RO_scaling_info = None
4049 # Initial status for sub-operation
4050 operationState = "PROCESSING"
4051 detailed_status = "In progress"
4052 # Add sub-operation for pre/post-scaling (zero or more operations)
4053 self._add_suboperation(
4054 db_nslcmop,
4055 vnf_index,
4056 vdu_id,
4057 vdu_count_index,
4058 vdu_name,
4059 vnf_config_primitive,
4060 primitive_params,
4061 operationState,
4062 detailed_status,
4063 operationType,
4064 RO_nsr_id,
4065 RO_scaling_info,
4066 )
4067 return self.SUBOPERATION_STATUS_NEW
4068 else:
4069 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4070 # or op_index (operationState != 'COMPLETED')
4071 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4072
4073 # Function to return execution_environment id
4074
4075 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4076 # TODO vdu_index_count
4077 for vca in vca_deployed_list:
4078 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4079 return vca["ee_id"]
4080
4081 async def destroy_N2VC(
4082 self,
4083 logging_text,
4084 db_nslcmop,
4085 vca_deployed,
4086 config_descriptor,
4087 vca_index,
4088 destroy_ee=True,
4089 exec_primitives=True,
4090 scaling_in=False,
4091 vca_id: str = None,
4092 ):
4093 """
4094 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4095 :param logging_text:
4096 :param db_nslcmop:
4097 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4098 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4099 :param vca_index: index in the database _admin.deployed.VCA
4100 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4101 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4102 not executed properly
4103 :param scaling_in: True destroys the application, False destroys the model
4104 :return: None or exception
4105 """
4106
4107 self.logger.debug(
4108 logging_text
4109 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4110 vca_index, vca_deployed, config_descriptor, destroy_ee
4111 )
4112 )
4113
4114 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4115
4116 # execute terminate_primitives
4117 if exec_primitives:
4118 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4119 config_descriptor.get("terminate-config-primitive"),
4120 vca_deployed.get("ee_descriptor_id"),
4121 )
4122 vdu_id = vca_deployed.get("vdu_id")
4123 vdu_count_index = vca_deployed.get("vdu_count_index")
4124 vdu_name = vca_deployed.get("vdu_name")
4125 vnf_index = vca_deployed.get("member-vnf-index")
4126 if terminate_primitives and vca_deployed.get("needed_terminate"):
4127 for seq in terminate_primitives:
4128 # For each sequence in list, get primitive and call _ns_execute_primitive()
4129 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4130 vnf_index, seq.get("name")
4131 )
4132 self.logger.debug(logging_text + step)
4133 # Create the primitive for each sequence, i.e. "primitive": "touch"
4134 primitive = seq.get("name")
4135 mapped_primitive_params = self._get_terminate_primitive_params(
4136 seq, vnf_index
4137 )
4138
4139 # Add sub-operation
4140 self._add_suboperation(
4141 db_nslcmop,
4142 vnf_index,
4143 vdu_id,
4144 vdu_count_index,
4145 vdu_name,
4146 primitive,
4147 mapped_primitive_params,
4148 )
4149 # Sub-operations: Call _ns_execute_primitive() instead of action()
4150 try:
4151 result, result_detail = await self._ns_execute_primitive(
4152 vca_deployed["ee_id"],
4153 primitive,
4154 mapped_primitive_params,
4155 vca_type=vca_type,
4156 vca_id=vca_id,
4157 )
4158 except LcmException:
4159 # this happens when VCA is not deployed. In this case it is not needed to terminate
4160 continue
4161 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4162 if result not in result_ok:
4163 raise LcmException(
4164 "terminate_primitive {} for vnf_member_index={} fails with "
4165 "error {}".format(seq.get("name"), vnf_index, result_detail)
4166 )
4167 # set that this VCA do not need terminated
4168 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4169 vca_index
4170 )
4171 self.update_db_2(
4172 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4173 )
4174
4175 # Delete Prometheus Jobs if any
4176 # This uses NSR_ID, so it will destroy any jobs under this index
4177 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4178
4179 if destroy_ee:
4180 await self.vca_map[vca_type].delete_execution_environment(
4181 vca_deployed["ee_id"],
4182 scaling_in=scaling_in,
4183 vca_type=vca_type,
4184 vca_id=vca_id,
4185 )
4186
4187 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4188 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4189 namespace = "." + db_nsr["_id"]
4190 try:
4191 await self.n2vc.delete_namespace(
4192 namespace=namespace,
4193 total_timeout=self.timeout.charm_delete,
4194 vca_id=vca_id,
4195 )
4196 except N2VCNotFound: # already deleted. Skip
4197 pass
4198 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4199
4200 async def terminate(self, nsr_id, nslcmop_id):
4201 # Try to lock HA task here
4202 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4203 if not task_is_locked_by_me:
4204 return
4205
4206 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4207 self.logger.debug(logging_text + "Enter")
4208 timeout_ns_terminate = self.timeout.ns_terminate
4209 db_nsr = None
4210 db_nslcmop = None
4211 operation_params = None
4212 exc = None
4213 error_list = [] # annotates all failed error messages
4214 db_nslcmop_update = {}
4215 autoremove = False # autoremove after terminated
4216 tasks_dict_info = {}
4217 db_nsr_update = {}
4218 stage = [
4219 "Stage 1/3: Preparing task.",
4220 "Waiting for previous operations to terminate.",
4221 "",
4222 ]
4223 # ^ contains [stage, step, VIM-status]
4224 try:
4225 # wait for any previous tasks in process
4226 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4227
4228 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4229 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4230 operation_params = db_nslcmop.get("operationParams") or {}
4231 if operation_params.get("timeout_ns_terminate"):
4232 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4233 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4234 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4235
4236 db_nsr_update["operational-status"] = "terminating"
4237 db_nsr_update["config-status"] = "terminating"
4238 self._write_ns_status(
4239 nsr_id=nsr_id,
4240 ns_state="TERMINATING",
4241 current_operation="TERMINATING",
4242 current_operation_id=nslcmop_id,
4243 other_update=db_nsr_update,
4244 )
4245 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4246 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4247 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4248 return
4249
4250 stage[1] = "Getting vnf descriptors from db."
4251 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4252 db_vnfrs_dict = {
4253 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4254 }
4255 db_vnfds_from_id = {}
4256 db_vnfds_from_member_index = {}
4257 # Loop over VNFRs
4258 for vnfr in db_vnfrs_list:
4259 vnfd_id = vnfr["vnfd-id"]
4260 if vnfd_id not in db_vnfds_from_id:
4261 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4262 db_vnfds_from_id[vnfd_id] = vnfd
4263 db_vnfds_from_member_index[
4264 vnfr["member-vnf-index-ref"]
4265 ] = db_vnfds_from_id[vnfd_id]
4266
4267 # Destroy individual execution environments when there are terminating primitives.
4268 # Rest of EE will be deleted at once
4269 # TODO - check before calling _destroy_N2VC
4270 # if not operation_params.get("skip_terminate_primitives"):#
4271 # or not vca.get("needed_terminate"):
4272 stage[0] = "Stage 2/3 execute terminating primitives."
4273 self.logger.debug(logging_text + stage[0])
4274 stage[1] = "Looking execution environment that needs terminate."
4275 self.logger.debug(logging_text + stage[1])
4276
4277 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4278 config_descriptor = None
4279 vca_member_vnf_index = vca.get("member-vnf-index")
4280 vca_id = self.get_vca_id(
4281 db_vnfrs_dict.get(vca_member_vnf_index)
4282 if vca_member_vnf_index
4283 else None,
4284 db_nsr,
4285 )
4286 if not vca or not vca.get("ee_id"):
4287 continue
4288 if not vca.get("member-vnf-index"):
4289 # ns
4290 config_descriptor = db_nsr.get("ns-configuration")
4291 elif vca.get("vdu_id"):
4292 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4293 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4294 elif vca.get("kdu_name"):
4295 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4296 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4297 else:
4298 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4299 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4300 vca_type = vca.get("type")
4301 exec_terminate_primitives = not operation_params.get(
4302 "skip_terminate_primitives"
4303 ) and vca.get("needed_terminate")
4304 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4305 # pending native charms
4306 destroy_ee = (
4307 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4308 )
4309 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4310 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4311 task = asyncio.ensure_future(
4312 self.destroy_N2VC(
4313 logging_text,
4314 db_nslcmop,
4315 vca,
4316 config_descriptor,
4317 vca_index,
4318 destroy_ee,
4319 exec_terminate_primitives,
4320 vca_id=vca_id,
4321 )
4322 )
4323 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4324
4325 # wait for pending tasks of terminate primitives
4326 if tasks_dict_info:
4327 self.logger.debug(
4328 logging_text
4329 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4330 )
4331 error_list = await self._wait_for_tasks(
4332 logging_text,
4333 tasks_dict_info,
4334 min(self.timeout.charm_delete, timeout_ns_terminate),
4335 stage,
4336 nslcmop_id,
4337 )
4338 tasks_dict_info.clear()
4339 if error_list:
4340 return # raise LcmException("; ".join(error_list))
4341
4342 # remove All execution environments at once
4343 stage[0] = "Stage 3/3 delete all."
4344
4345 if nsr_deployed.get("VCA"):
4346 stage[1] = "Deleting all execution environments."
4347 self.logger.debug(logging_text + stage[1])
4348 vca_id = self.get_vca_id({}, db_nsr)
4349 task_delete_ee = asyncio.ensure_future(
4350 asyncio.wait_for(
4351 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4352 timeout=self.timeout.charm_delete,
4353 )
4354 )
4355 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4356 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4357
4358 # Delete Namespace and Certificates if necessary
4359 if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
4360 await self.vca_map["helm-v3"].delete_tls_certificate(
4361 certificate_name=db_nslcmop["nsInstanceId"],
4362 )
4363 # TODO: Delete namespace
4364
4365 # Delete from k8scluster
4366 stage[1] = "Deleting KDUs."
4367 self.logger.debug(logging_text + stage[1])
4368 # print(nsr_deployed)
4369 for kdu in get_iterable(nsr_deployed, "K8s"):
4370 if not kdu or not kdu.get("kdu-instance"):
4371 continue
4372 kdu_instance = kdu.get("kdu-instance")
4373 if kdu.get("k8scluster-type") in self.k8scluster_map:
4374 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4375 vca_id = self.get_vca_id({}, db_nsr)
4376 task_delete_kdu_instance = asyncio.ensure_future(
4377 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4378 cluster_uuid=kdu.get("k8scluster-uuid"),
4379 kdu_instance=kdu_instance,
4380 vca_id=vca_id,
4381 namespace=kdu.get("namespace"),
4382 )
4383 )
4384 else:
4385 self.logger.error(
4386 logging_text
4387 + "Unknown k8s deployment type {}".format(
4388 kdu.get("k8scluster-type")
4389 )
4390 )
4391 continue
4392 tasks_dict_info[
4393 task_delete_kdu_instance
4394 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4395
4396 # remove from RO
4397 stage[1] = "Deleting ns from VIM."
4398 if self.ro_config.ng:
4399 task_delete_ro = asyncio.ensure_future(
4400 self._terminate_ng_ro(
4401 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4402 )
4403 )
4404 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4405
4406 # rest of staff will be done at finally
4407
4408 except (
4409 ROclient.ROClientException,
4410 DbException,
4411 LcmException,
4412 N2VCException,
4413 ) as e:
4414 self.logger.error(logging_text + "Exit Exception {}".format(e))
4415 exc = e
4416 except asyncio.CancelledError:
4417 self.logger.error(
4418 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4419 )
4420 exc = "Operation was cancelled"
4421 except Exception as e:
4422 exc = traceback.format_exc()
4423 self.logger.critical(
4424 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4425 exc_info=True,
4426 )
4427 finally:
4428 if exc:
4429 error_list.append(str(exc))
4430 try:
4431 # wait for pending tasks
4432 if tasks_dict_info:
4433 stage[1] = "Waiting for terminate pending tasks."
4434 self.logger.debug(logging_text + stage[1])
4435 error_list += await self._wait_for_tasks(
4436 logging_text,
4437 tasks_dict_info,
4438 timeout_ns_terminate,
4439 stage,
4440 nslcmop_id,
4441 )
4442 stage[1] = stage[2] = ""
4443 except asyncio.CancelledError:
4444 error_list.append("Cancelled")
4445 # TODO cancell all tasks
4446 except Exception as exc:
4447 error_list.append(str(exc))
4448 # update status at database
4449 if error_list:
4450 error_detail = "; ".join(error_list)
4451 # self.logger.error(logging_text + error_detail)
4452 error_description_nslcmop = "{} Detail: {}".format(
4453 stage[0], error_detail
4454 )
4455 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4456 nslcmop_id, stage[0]
4457 )
4458
4459 db_nsr_update["operational-status"] = "failed"
4460 db_nsr_update["detailed-status"] = (
4461 error_description_nsr + " Detail: " + error_detail
4462 )
4463 db_nslcmop_update["detailed-status"] = error_detail
4464 nslcmop_operation_state = "FAILED"
4465 ns_state = "BROKEN"
4466 else:
4467 error_detail = None
4468 error_description_nsr = error_description_nslcmop = None
4469 ns_state = "NOT_INSTANTIATED"
4470 db_nsr_update["operational-status"] = "terminated"
4471 db_nsr_update["detailed-status"] = "Done"
4472 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4473 db_nslcmop_update["detailed-status"] = "Done"
4474 nslcmop_operation_state = "COMPLETED"
4475
4476 if db_nsr:
4477 self._write_ns_status(
4478 nsr_id=nsr_id,
4479 ns_state=ns_state,
4480 current_operation="IDLE",
4481 current_operation_id=None,
4482 error_description=error_description_nsr,
4483 error_detail=error_detail,
4484 other_update=db_nsr_update,
4485 )
4486 self._write_op_status(
4487 op_id=nslcmop_id,
4488 stage="",
4489 error_message=error_description_nslcmop,
4490 operation_state=nslcmop_operation_state,
4491 other_update=db_nslcmop_update,
4492 )
4493 if ns_state == "NOT_INSTANTIATED":
4494 try:
4495 self.db.set_list(
4496 "vnfrs",
4497 {"nsr-id-ref": nsr_id},
4498 {"_admin.nsState": "NOT_INSTANTIATED"},
4499 )
4500 except DbException as e:
4501 self.logger.warn(
4502 logging_text
4503 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4504 nsr_id, e
4505 )
4506 )
4507 if operation_params:
4508 autoremove = operation_params.get("autoremove", False)
4509 if nslcmop_operation_state:
4510 try:
4511 await self.msg.aiowrite(
4512 "ns",
4513 "terminated",
4514 {
4515 "nsr_id": nsr_id,
4516 "nslcmop_id": nslcmop_id,
4517 "operationState": nslcmop_operation_state,
4518 "autoremove": autoremove,
4519 },
4520 loop=self.loop,
4521 )
4522 except Exception as e:
4523 self.logger.error(
4524 logging_text + "kafka_write notification Exception {}".format(e)
4525 )
4526
4527 self.logger.debug(logging_text + "Exit")
4528 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4529
4530 async def _wait_for_tasks(
4531 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4532 ):
4533 time_start = time()
4534 error_detail_list = []
4535 error_list = []
4536 pending_tasks = list(created_tasks_info.keys())
4537 num_tasks = len(pending_tasks)
4538 num_done = 0
4539 stage[1] = "{}/{}.".format(num_done, num_tasks)
4540 self._write_op_status(nslcmop_id, stage)
4541 while pending_tasks:
4542 new_error = None
4543 _timeout = timeout + time_start - time()
4544 done, pending_tasks = await asyncio.wait(
4545 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4546 )
4547 num_done += len(done)
4548 if not done: # Timeout
4549 for task in pending_tasks:
4550 new_error = created_tasks_info[task] + ": Timeout"
4551 error_detail_list.append(new_error)
4552 error_list.append(new_error)
4553 break
4554 for task in done:
4555 if task.cancelled():
4556 exc = "Cancelled"
4557 else:
4558 exc = task.exception()
4559 if exc:
4560 if isinstance(exc, asyncio.TimeoutError):
4561 exc = "Timeout"
4562 new_error = created_tasks_info[task] + ": {}".format(exc)
4563 error_list.append(created_tasks_info[task])
4564 error_detail_list.append(new_error)
4565 if isinstance(
4566 exc,
4567 (
4568 str,
4569 DbException,
4570 N2VCException,
4571 ROclient.ROClientException,
4572 LcmException,
4573 K8sException,
4574 NgRoException,
4575 ),
4576 ):
4577 self.logger.error(logging_text + new_error)
4578 else:
4579 exc_traceback = "".join(
4580 traceback.format_exception(None, exc, exc.__traceback__)
4581 )
4582 self.logger.error(
4583 logging_text
4584 + created_tasks_info[task]
4585 + " "
4586 + exc_traceback
4587 )
4588 else:
4589 self.logger.debug(
4590 logging_text + created_tasks_info[task] + ": Done"
4591 )
4592 stage[1] = "{}/{}.".format(num_done, num_tasks)
4593 if new_error:
4594 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4595 if nsr_id: # update also nsr
4596 self.update_db_2(
4597 "nsrs",
4598 nsr_id,
4599 {
4600 "errorDescription": "Error at: " + ", ".join(error_list),
4601 "errorDetail": ". ".join(error_detail_list),
4602 },
4603 )
4604 self._write_op_status(nslcmop_id, stage)
4605 return error_detail_list
4606
4607 @staticmethod
4608 def _map_primitive_params(primitive_desc, params, instantiation_params):
4609 """
4610 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4611 The default-value is used. If it is between < > it look for a value at instantiation_params
4612 :param primitive_desc: portion of VNFD/NSD that describes primitive
4613 :param params: Params provided by user
4614 :param instantiation_params: Instantiation params provided by user
4615 :return: a dictionary with the calculated params
4616 """
4617 calculated_params = {}
4618 for parameter in primitive_desc.get("parameter", ()):
4619 param_name = parameter["name"]
4620 if param_name in params:
4621 calculated_params[param_name] = params[param_name]
4622 elif "default-value" in parameter or "value" in parameter:
4623 if "value" in parameter:
4624 calculated_params[param_name] = parameter["value"]
4625 else:
4626 calculated_params[param_name] = parameter["default-value"]
4627 if (
4628 isinstance(calculated_params[param_name], str)
4629 and calculated_params[param_name].startswith("<")
4630 and calculated_params[param_name].endswith(">")
4631 ):
4632 if calculated_params[param_name][1:-1] in instantiation_params:
4633 calculated_params[param_name] = instantiation_params[
4634 calculated_params[param_name][1:-1]
4635 ]
4636 else:
4637 raise LcmException(
4638 "Parameter {} needed to execute primitive {} not provided".format(
4639 calculated_params[param_name], primitive_desc["name"]
4640 )
4641 )
4642 else:
4643 raise LcmException(
4644 "Parameter {} needed to execute primitive {} not provided".format(
4645 param_name, primitive_desc["name"]
4646 )
4647 )
4648
4649 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4650 calculated_params[param_name] = yaml.safe_dump(
4651 calculated_params[param_name], default_flow_style=True, width=256
4652 )
4653 elif isinstance(calculated_params[param_name], str) and calculated_params[
4654 param_name
4655 ].startswith("!!yaml "):
4656 calculated_params[param_name] = calculated_params[param_name][7:]
4657 if parameter.get("data-type") == "INTEGER":
4658 try:
4659 calculated_params[param_name] = int(calculated_params[param_name])
4660 except ValueError: # error converting string to int
4661 raise LcmException(
4662 "Parameter {} of primitive {} must be integer".format(
4663 param_name, primitive_desc["name"]
4664 )
4665 )
4666 elif parameter.get("data-type") == "BOOLEAN":
4667 calculated_params[param_name] = not (
4668 (str(calculated_params[param_name])).lower() == "false"
4669 )
4670
4671 # add always ns_config_info if primitive name is config
4672 if primitive_desc["name"] == "config":
4673 if "ns_config_info" in instantiation_params:
4674 calculated_params["ns_config_info"] = instantiation_params[
4675 "ns_config_info"
4676 ]
4677 return calculated_params
4678
4679 def _look_for_deployed_vca(
4680 self,
4681 deployed_vca,
4682 member_vnf_index,
4683 vdu_id,
4684 vdu_count_index,
4685 kdu_name=None,
4686 ee_descriptor_id=None,
4687 ):
4688 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4689 for vca in deployed_vca:
4690 if not vca:
4691 continue
4692 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4693 continue
4694 if (
4695 vdu_count_index is not None
4696 and vdu_count_index != vca["vdu_count_index"]
4697 ):
4698 continue
4699 if kdu_name and kdu_name != vca["kdu_name"]:
4700 continue
4701 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4702 continue
4703 break
4704 else:
4705 # vca_deployed not found
4706 raise LcmException(
4707 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4708 " is not deployed".format(
4709 member_vnf_index,
4710 vdu_id,
4711 vdu_count_index,
4712 kdu_name,
4713 ee_descriptor_id,
4714 )
4715 )
4716 # get ee_id
4717 ee_id = vca.get("ee_id")
4718 vca_type = vca.get(
4719 "type", "lxc_proxy_charm"
4720 ) # default value for backward compatibility - proxy charm
4721 if not ee_id:
4722 raise LcmException(
4723 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4724 "execution environment".format(
4725 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4726 )
4727 )
4728 return ee_id, vca_type
4729
4730 async def _ns_execute_primitive(
4731 self,
4732 ee_id,
4733 primitive,
4734 primitive_params,
4735 retries=0,
4736 retries_interval=30,
4737 timeout=None,
4738 vca_type=None,
4739 db_dict=None,
4740 vca_id: str = None,
4741 ) -> (str, str):
4742 try:
4743 if primitive == "config":
4744 primitive_params = {"params": primitive_params}
4745
4746 vca_type = vca_type or "lxc_proxy_charm"
4747
4748 while retries >= 0:
4749 try:
4750 output = await asyncio.wait_for(
4751 self.vca_map[vca_type].exec_primitive(
4752 ee_id=ee_id,
4753 primitive_name=primitive,
4754 params_dict=primitive_params,
4755 progress_timeout=self.timeout.progress_primitive,
4756 total_timeout=self.timeout.primitive,
4757 db_dict=db_dict,
4758 vca_id=vca_id,
4759 vca_type=vca_type,
4760 ),
4761 timeout=timeout or self.timeout.primitive,
4762 )
4763 # execution was OK
4764 break
4765 except asyncio.CancelledError:
4766 raise
4767 except Exception as e:
4768 retries -= 1
4769 if retries >= 0:
4770 self.logger.debug(
4771 "Error executing action {} on {} -> {}".format(
4772 primitive, ee_id, e
4773 )
4774 )
4775 # wait and retry
4776 await asyncio.sleep(retries_interval, loop=self.loop)
4777 else:
4778 if isinstance(e, asyncio.TimeoutError):
4779 e = N2VCException(
4780 message="Timed out waiting for action to complete"
4781 )
4782 return "FAILED", getattr(e, "message", repr(e))
4783
4784 return "COMPLETED", output
4785
4786 except (LcmException, asyncio.CancelledError):
4787 raise
4788 except Exception as e:
4789 return "FAIL", "Error executing action {}: {}".format(primitive, e)
4790
4791 async def vca_status_refresh(self, nsr_id, nslcmop_id):
4792 """
4793 Updating the vca_status with latest juju information in nsrs record
4794 :param: nsr_id: Id of the nsr
4795 :param: nslcmop_id: Id of the nslcmop
4796 :return: None
4797 """
4798
4799 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
4800 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4801 vca_id = self.get_vca_id({}, db_nsr)
4802 if db_nsr["_admin"]["deployed"]["K8s"]:
4803 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
4804 cluster_uuid, kdu_instance, cluster_type = (
4805 k8s["k8scluster-uuid"],
4806 k8s["kdu-instance"],
4807 k8s["k8scluster-type"],
4808 )
4809 await self._on_update_k8s_db(
4810 cluster_uuid=cluster_uuid,
4811 kdu_instance=kdu_instance,
4812 filter={"_id": nsr_id},
4813 vca_id=vca_id,
4814 cluster_type=cluster_type,
4815 )
4816 else:
4817 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
4818 table, filter = "nsrs", {"_id": nsr_id}
4819 path = "_admin.deployed.VCA.{}.".format(vca_index)
4820 await self._on_update_n2vc_db(table, filter, path, {})
4821
4822 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
4823 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
4824
4825 async def action(self, nsr_id, nslcmop_id):
4826 # Try to lock HA task here
4827 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4828 if not task_is_locked_by_me:
4829 return
4830
4831 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
4832 self.logger.debug(logging_text + "Enter")
4833 # get all needed from database
4834 db_nsr = None
4835 db_nslcmop = None
4836 db_nsr_update = {}
4837 db_nslcmop_update = {}
4838 nslcmop_operation_state = None
4839 error_description_nslcmop = None
4840 exc = None
4841 step = ""
4842 try:
4843 # wait for any previous tasks in process
4844 step = "Waiting for previous operations to terminate"
4845 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4846
4847 self._write_ns_status(
4848 nsr_id=nsr_id,
4849 ns_state=None,
4850 current_operation="RUNNING ACTION",
4851 current_operation_id=nslcmop_id,
4852 )
4853
4854 step = "Getting information from database"
4855 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4856 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4857 if db_nslcmop["operationParams"].get("primitive_params"):
4858 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
4859 db_nslcmop["operationParams"]["primitive_params"]
4860 )
4861
4862 nsr_deployed = db_nsr["_admin"].get("deployed")
4863 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4864 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4865 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
4866 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4867 primitive = db_nslcmop["operationParams"]["primitive"]
4868 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
4869 timeout_ns_action = db_nslcmop["operationParams"].get(
4870 "timeout_ns_action", self.timeout.primitive
4871 )
4872
4873 if vnf_index:
4874 step = "Getting vnfr from database"
4875 db_vnfr = self.db.get_one(
4876 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
4877 )
4878 if db_vnfr.get("kdur"):
4879 kdur_list = []
4880 for kdur in db_vnfr["kdur"]:
4881 if kdur.get("additionalParams"):
4882 kdur["additionalParams"] = json.loads(
4883 kdur["additionalParams"]
4884 )
4885 kdur_list.append(kdur)
4886 db_vnfr["kdur"] = kdur_list
4887 step = "Getting vnfd from database"
4888 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4889
4890 # Sync filesystem before running a primitive
4891 self.fs.sync(db_vnfr["vnfd-id"])
4892 else:
4893 step = "Getting nsd from database"
4894 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4895
4896 vca_id = self.get_vca_id(db_vnfr, db_nsr)
4897 # for backward compatibility
4898 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4899 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4900 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4901 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4902
4903 # look for primitive
4904 config_primitive_desc = descriptor_configuration = None
4905 if vdu_id:
4906 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
4907 elif kdu_name:
4908 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
4909 elif vnf_index:
4910 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
4911 else:
4912 descriptor_configuration = db_nsd.get("ns-configuration")
4913
4914 if descriptor_configuration and descriptor_configuration.get(
4915 "config-primitive"
4916 ):
4917 for config_primitive in descriptor_configuration["config-primitive"]:
4918 if config_primitive["name"] == primitive:
4919 config_primitive_desc = config_primitive
4920 break
4921
4922 if not config_primitive_desc:
4923 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
4924 raise LcmException(
4925 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
4926 primitive
4927 )
4928 )
4929 primitive_name = primitive
4930 ee_descriptor_id = None
4931 else:
4932 primitive_name = config_primitive_desc.get(
4933 "execution-environment-primitive", primitive
4934 )
4935 ee_descriptor_id = config_primitive_desc.get(
4936 "execution-environment-ref"
4937 )
4938
4939 if vnf_index:
4940 if vdu_id:
4941 vdur = next(
4942 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
4943 )
4944 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
4945 elif kdu_name:
4946 kdur = next(
4947 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
4948 )
4949 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
4950 else:
4951 desc_params = parse_yaml_strings(
4952 db_vnfr.get("additionalParamsForVnf")
4953 )
4954 else:
4955 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
4956 if kdu_name and get_configuration(db_vnfd, kdu_name):
4957 kdu_configuration = get_configuration(db_vnfd, kdu_name)
4958 actions = set()
4959 for primitive in kdu_configuration.get("initial-config-primitive", []):
4960 actions.add(primitive["name"])
4961 for primitive in kdu_configuration.get("config-primitive", []):
4962 actions.add(primitive["name"])
4963 kdu = find_in_list(
4964 nsr_deployed["K8s"],
4965 lambda kdu: kdu_name == kdu["kdu-name"]
4966 and kdu["member-vnf-index"] == vnf_index,
4967 )
4968 kdu_action = (
4969 True
4970 if primitive_name in actions
4971 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
4972 else False
4973 )
4974
4975 # TODO check if ns is in a proper status
4976 if kdu_name and (
4977 primitive_name in ("upgrade", "rollback", "status") or kdu_action
4978 ):
4979 # kdur and desc_params already set from before
4980 if primitive_params:
4981 desc_params.update(primitive_params)
4982 # TODO Check if we will need something at vnf level
4983 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
4984 if (
4985 kdu_name == kdu["kdu-name"]
4986 and kdu["member-vnf-index"] == vnf_index
4987 ):
4988 break
4989 else:
4990 raise LcmException(
4991 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
4992 )
4993
4994 if kdu.get("k8scluster-type") not in self.k8scluster_map:
4995 msg = "unknown k8scluster-type '{}'".format(
4996 kdu.get("k8scluster-type")
4997 )
4998 raise LcmException(msg)
4999
5000 db_dict = {
5001 "collection": "nsrs",
5002 "filter": {"_id": nsr_id},
5003 "path": "_admin.deployed.K8s.{}".format(index),
5004 }
5005 self.logger.debug(
5006 logging_text
5007 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5008 )
5009 step = "Executing kdu {}".format(primitive_name)
5010 if primitive_name == "upgrade":
5011 if desc_params.get("kdu_model"):
5012 kdu_model = desc_params.get("kdu_model")
5013 del desc_params["kdu_model"]
5014 else:
5015 kdu_model = kdu.get("kdu-model")
5016 if kdu_model.count("/") < 2: # helm chart is not embedded
5017 parts = kdu_model.split(sep=":")
5018 if len(parts) == 2:
5019 kdu_model = parts[0]
5020 if desc_params.get("kdu_atomic_upgrade"):
5021 atomic_upgrade = desc_params.get(
5022 "kdu_atomic_upgrade"
5023 ).lower() in ("yes", "true", "1")
5024 del desc_params["kdu_atomic_upgrade"]
5025 else:
5026 atomic_upgrade = True
5027
5028 detailed_status = await asyncio.wait_for(
5029 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5030 cluster_uuid=kdu.get("k8scluster-uuid"),
5031 kdu_instance=kdu.get("kdu-instance"),
5032 atomic=atomic_upgrade,
5033 kdu_model=kdu_model,
5034 params=desc_params,
5035 db_dict=db_dict,
5036 timeout=timeout_ns_action,
5037 ),
5038 timeout=timeout_ns_action + 10,
5039 )
5040 self.logger.debug(
5041 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5042 )
5043 elif primitive_name == "rollback":
5044 detailed_status = await asyncio.wait_for(
5045 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5046 cluster_uuid=kdu.get("k8scluster-uuid"),
5047 kdu_instance=kdu.get("kdu-instance"),
5048 db_dict=db_dict,
5049 ),
5050 timeout=timeout_ns_action,
5051 )
5052 elif primitive_name == "status":
5053 detailed_status = await asyncio.wait_for(
5054 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5055 cluster_uuid=kdu.get("k8scluster-uuid"),
5056 kdu_instance=kdu.get("kdu-instance"),
5057 vca_id=vca_id,
5058 ),
5059 timeout=timeout_ns_action,
5060 )
5061 else:
5062 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5063 kdu["kdu-name"], nsr_id
5064 )
5065 params = self._map_primitive_params(
5066 config_primitive_desc, primitive_params, desc_params
5067 )
5068
5069 detailed_status = await asyncio.wait_for(
5070 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5071 cluster_uuid=kdu.get("k8scluster-uuid"),
5072 kdu_instance=kdu_instance,
5073 primitive_name=primitive_name,
5074 params=params,
5075 db_dict=db_dict,
5076 timeout=timeout_ns_action,
5077 vca_id=vca_id,
5078 ),
5079 timeout=timeout_ns_action,
5080 )
5081
5082 if detailed_status:
5083 nslcmop_operation_state = "COMPLETED"
5084 else:
5085 detailed_status = ""
5086 nslcmop_operation_state = "FAILED"
5087 else:
5088 ee_id, vca_type = self._look_for_deployed_vca(
5089 nsr_deployed["VCA"],
5090 member_vnf_index=vnf_index,
5091 vdu_id=vdu_id,
5092 vdu_count_index=vdu_count_index,
5093 ee_descriptor_id=ee_descriptor_id,
5094 )
5095 for vca_index, vca_deployed in enumerate(
5096 db_nsr["_admin"]["deployed"]["VCA"]
5097 ):
5098 if vca_deployed.get("member-vnf-index") == vnf_index:
5099 db_dict = {
5100 "collection": "nsrs",
5101 "filter": {"_id": nsr_id},
5102 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5103 }
5104 break
5105 (
5106 nslcmop_operation_state,
5107 detailed_status,
5108 ) = await self._ns_execute_primitive(
5109 ee_id,
5110 primitive=primitive_name,
5111 primitive_params=self._map_primitive_params(
5112 config_primitive_desc, primitive_params, desc_params
5113 ),
5114 timeout=timeout_ns_action,
5115 vca_type=vca_type,
5116 db_dict=db_dict,
5117 vca_id=vca_id,
5118 )
5119
5120 db_nslcmop_update["detailed-status"] = detailed_status
5121 error_description_nslcmop = (
5122 detailed_status if nslcmop_operation_state == "FAILED" else ""
5123 )
5124 self.logger.debug(
5125 logging_text
5126 + "Done with result {} {}".format(
5127 nslcmop_operation_state, detailed_status
5128 )
5129 )
5130 return # database update is called inside finally
5131
5132 except (DbException, LcmException, N2VCException, K8sException) as e:
5133 self.logger.error(logging_text + "Exit Exception {}".format(e))
5134 exc = e
5135 except asyncio.CancelledError:
5136 self.logger.error(
5137 logging_text + "Cancelled Exception while '{}'".format(step)
5138 )
5139 exc = "Operation was cancelled"
5140 except asyncio.TimeoutError:
5141 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5142 exc = "Timeout"
5143 except Exception as e:
5144 exc = traceback.format_exc()
5145 self.logger.critical(
5146 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5147 exc_info=True,
5148 )
5149 finally:
5150 if exc:
5151 db_nslcmop_update[
5152 "detailed-status"
5153 ] = (
5154 detailed_status
5155 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5156 nslcmop_operation_state = "FAILED"
5157 if db_nsr:
5158 self._write_ns_status(
5159 nsr_id=nsr_id,
5160 ns_state=db_nsr[
5161 "nsState"
5162 ], # TODO check if degraded. For the moment use previous status
5163 current_operation="IDLE",
5164 current_operation_id=None,
5165 # error_description=error_description_nsr,
5166 # error_detail=error_detail,
5167 other_update=db_nsr_update,
5168 )
5169
5170 self._write_op_status(
5171 op_id=nslcmop_id,
5172 stage="",
5173 error_message=error_description_nslcmop,
5174 operation_state=nslcmop_operation_state,
5175 other_update=db_nslcmop_update,
5176 )
5177
5178 if nslcmop_operation_state:
5179 try:
5180 await self.msg.aiowrite(
5181 "ns",
5182 "actioned",
5183 {
5184 "nsr_id": nsr_id,
5185 "nslcmop_id": nslcmop_id,
5186 "operationState": nslcmop_operation_state,
5187 },
5188 loop=self.loop,
5189 )
5190 except Exception as e:
5191 self.logger.error(
5192 logging_text + "kafka_write notification Exception {}".format(e)
5193 )
5194 self.logger.debug(logging_text + "Exit")
5195 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5196 return nslcmop_operation_state, detailed_status
5197
5198 async def terminate_vdus(
5199 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5200 ):
5201 """This method terminates VDUs
5202
5203 Args:
5204 db_vnfr: VNF instance record
5205 member_vnf_index: VNF index to identify the VDUs to be removed
5206 db_nsr: NS instance record
5207 update_db_nslcmops: Nslcmop update record
5208 """
5209 vca_scaling_info = []
5210 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5211 scaling_info["scaling_direction"] = "IN"
5212 scaling_info["vdu-delete"] = {}
5213 scaling_info["kdu-delete"] = {}
5214 db_vdur = db_vnfr.get("vdur")
5215 vdur_list = copy(db_vdur)
5216 count_index = 0
5217 for index, vdu in enumerate(vdur_list):
5218 vca_scaling_info.append(
5219 {
5220 "osm_vdu_id": vdu["vdu-id-ref"],
5221 "member-vnf-index": member_vnf_index,
5222 "type": "delete",
5223 "vdu_index": count_index,
5224 }
5225 )
5226 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5227 scaling_info["vdu"].append(
5228 {
5229 "name": vdu.get("name") or vdu.get("vdu-name"),
5230 "vdu_id": vdu["vdu-id-ref"],
5231 "interface": [],
5232 }
5233 )
5234 for interface in vdu["interfaces"]:
5235 scaling_info["vdu"][index]["interface"].append(
5236 {
5237 "name": interface["name"],
5238 "ip_address": interface["ip-address"],
5239 "mac_address": interface.get("mac-address"),
5240 }
5241 )
5242 self.logger.info("NS update scaling info{}".format(scaling_info))
5243 stage[2] = "Terminating VDUs"
5244 if scaling_info.get("vdu-delete"):
5245 # scale_process = "RO"
5246 if self.ro_config.ng:
5247 await self._scale_ng_ro(
5248 logging_text,
5249 db_nsr,
5250 update_db_nslcmops,
5251 db_vnfr,
5252 scaling_info,
5253 stage,
5254 )
5255
5256 async def remove_vnf(self, nsr_id, nslcmop_id, vnf_instance_id):
5257 """This method is to Remove VNF instances from NS.
5258
5259 Args:
5260 nsr_id: NS instance id
5261 nslcmop_id: nslcmop id of update
5262 vnf_instance_id: id of the VNF instance to be removed
5263
5264 Returns:
5265 result: (str, str) COMPLETED/FAILED, details
5266 """
5267 try:
5268 db_nsr_update = {}
5269 logging_text = "Task ns={} update ".format(nsr_id)
5270 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5271 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5272 if check_vnfr_count > 1:
5273 stage = ["", "", ""]
5274 step = "Getting nslcmop from database"
5275 self.logger.debug(
5276 step + " after having waited for previous tasks to be completed"
5277 )
5278 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5279 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5280 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5281 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5282 """ db_vnfr = self.db.get_one(
5283 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5284
5285 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5286 await self.terminate_vdus(
5287 db_vnfr,
5288 member_vnf_index,
5289 db_nsr,
5290 update_db_nslcmops,
5291 stage,
5292 logging_text,
5293 )
5294
5295 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5296 constituent_vnfr.remove(db_vnfr.get("_id"))
5297 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get(
5298 "constituent-vnfr-ref"
5299 )
5300 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5301 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5303 return "COMPLETED", "Done"
5304 else:
5305 step = "Terminate VNF Failed with"
5306 raise LcmException(
5307 "{} Cannot terminate the last VNF in this NS.".format(
5308 vnf_instance_id
5309 )
5310 )
5311 except (LcmException, asyncio.CancelledError):
5312 raise
5313 except Exception as e:
5314 self.logger.debug("Error removing VNF {}".format(e))
5315 return "FAILED", "Error removing VNF {}".format(e)
5316
5317 async def _ns_redeploy_vnf(
5318 self,
5319 nsr_id,
5320 nslcmop_id,
5321 db_vnfd,
5322 db_vnfr,
5323 db_nsr,
5324 ):
5325 """This method updates and redeploys VNF instances
5326
5327 Args:
5328 nsr_id: NS instance id
5329 nslcmop_id: nslcmop id
5330 db_vnfd: VNF descriptor
5331 db_vnfr: VNF instance record
5332 db_nsr: NS instance record
5333
5334 Returns:
5335 result: (str, str) COMPLETED/FAILED, details
5336 """
5337 try:
5338 count_index = 0
5339 stage = ["", "", ""]
5340 logging_text = "Task ns={} update ".format(nsr_id)
5341 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5342 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5343
5344 # Terminate old VNF resources
5345 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5346 await self.terminate_vdus(
5347 db_vnfr,
5348 member_vnf_index,
5349 db_nsr,
5350 update_db_nslcmops,
5351 stage,
5352 logging_text,
5353 )
5354
5355 # old_vnfd_id = db_vnfr["vnfd-id"]
5356 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5357 new_db_vnfd = db_vnfd
5358 # new_vnfd_ref = new_db_vnfd["id"]
5359 # new_vnfd_id = vnfd_id
5360
5361 # Create VDUR
5362 new_vnfr_cp = []
5363 for cp in new_db_vnfd.get("ext-cpd", ()):
5364 vnf_cp = {
5365 "name": cp.get("id"),
5366 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5367 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5368 "id": cp.get("id"),
5369 }
5370 new_vnfr_cp.append(vnf_cp)
5371 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5372 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5373 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5374 new_vnfr_update = {
5375 "revision": latest_vnfd_revision,
5376 "connection-point": new_vnfr_cp,
5377 "vdur": new_vdur,
5378 "ip-address": "",
5379 }
5380 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5381 updated_db_vnfr = self.db.get_one(
5382 "vnfrs",
5383 {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id},
5384 )
5385
5386 # Instantiate new VNF resources
5387 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5388 vca_scaling_info = []
5389 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5390 scaling_info["scaling_direction"] = "OUT"
5391 scaling_info["vdu-create"] = {}
5392 scaling_info["kdu-create"] = {}
5393 vdud_instantiate_list = db_vnfd["vdu"]
5394 for index, vdud in enumerate(vdud_instantiate_list):
5395 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
5396 if cloud_init_text:
5397 additional_params = (
5398 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5399 or {}
5400 )
5401 cloud_init_list = []
5402 if cloud_init_text:
5403 # TODO Information of its own ip is not available because db_vnfr is not updated.
5404 additional_params["OSM"] = get_osm_params(
5405 updated_db_vnfr, vdud["id"], 1
5406 )
5407 cloud_init_list.append(
5408 self._parse_cloud_init(
5409 cloud_init_text,
5410 additional_params,
5411 db_vnfd["id"],
5412 vdud["id"],
5413 )
5414 )
5415 vca_scaling_info.append(
5416 {
5417 "osm_vdu_id": vdud["id"],
5418 "member-vnf-index": member_vnf_index,
5419 "type": "create",
5420 "vdu_index": count_index,
5421 }
5422 )
5423 scaling_info["vdu-create"][vdud["id"]] = count_index
5424 if self.ro_config.ng:
5425 self.logger.debug(
5426 "New Resources to be deployed: {}".format(scaling_info)
5427 )
5428 await self._scale_ng_ro(
5429 logging_text,
5430 db_nsr,
5431 update_db_nslcmops,
5432 updated_db_vnfr,
5433 scaling_info,
5434 stage,
5435 )
5436 return "COMPLETED", "Done"
5437 except (LcmException, asyncio.CancelledError):
5438 raise
5439 except Exception as e:
5440 self.logger.debug("Error updating VNF {}".format(e))
5441 return "FAILED", "Error updating VNF {}".format(e)
5442
5443 async def _ns_charm_upgrade(
5444 self,
5445 ee_id,
5446 charm_id,
5447 charm_type,
5448 path,
5449 timeout: float = None,
5450 ) -> (str, str):
5451 """This method upgrade charms in VNF instances
5452
5453 Args:
5454 ee_id: Execution environment id
5455 path: Local path to the charm
5456 charm_id: charm-id
5457 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5458 timeout: (Float) Timeout for the ns update operation
5459
5460 Returns:
5461 result: (str, str) COMPLETED/FAILED, details
5462 """
5463 try:
5464 charm_type = charm_type or "lxc_proxy_charm"
5465 output = await self.vca_map[charm_type].upgrade_charm(
5466 ee_id=ee_id,
5467 path=path,
5468 charm_id=charm_id,
5469 charm_type=charm_type,
5470 timeout=timeout or self.timeout.ns_update,
5471 )
5472
5473 if output:
5474 return "COMPLETED", output
5475
5476 except (LcmException, asyncio.CancelledError):
5477 raise
5478
5479 except Exception as e:
5480 self.logger.debug("Error upgrading charm {}".format(path))
5481
5482 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5483
5484 async def update(self, nsr_id, nslcmop_id):
5485 """Update NS according to different update types
5486
5487 This method performs upgrade of VNF instances then updates the revision
5488 number in VNF record
5489
5490 Args:
5491 nsr_id: Network service will be updated
5492 nslcmop_id: ns lcm operation id
5493
5494 Returns:
5495 It may raise DbException, LcmException, N2VCException, K8sException
5496
5497 """
5498 # Try to lock HA task here
5499 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5500 if not task_is_locked_by_me:
5501 return
5502
5503 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5504 self.logger.debug(logging_text + "Enter")
5505
5506 # Set the required variables to be filled up later
5507 db_nsr = None
5508 db_nslcmop_update = {}
5509 vnfr_update = {}
5510 nslcmop_operation_state = None
5511 db_nsr_update = {}
5512 error_description_nslcmop = ""
5513 exc = None
5514 change_type = "updated"
5515 detailed_status = ""
5516 member_vnf_index = None
5517
5518 try:
5519 # wait for any previous tasks in process
5520 step = "Waiting for previous operations to terminate"
5521 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5522 self._write_ns_status(
5523 nsr_id=nsr_id,
5524 ns_state=None,
5525 current_operation="UPDATING",
5526 current_operation_id=nslcmop_id,
5527 )
5528
5529 step = "Getting nslcmop from database"
5530 db_nslcmop = self.db.get_one(
5531 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5532 )
5533 update_type = db_nslcmop["operationParams"]["updateType"]
5534
5535 step = "Getting nsr from database"
5536 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5537 old_operational_status = db_nsr["operational-status"]
5538 db_nsr_update["operational-status"] = "updating"
5539 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5540 nsr_deployed = db_nsr["_admin"].get("deployed")
5541
5542 if update_type == "CHANGE_VNFPKG":
5543 # Get the input parameters given through update request
5544 vnf_instance_id = db_nslcmop["operationParams"][
5545 "changeVnfPackageData"
5546 ].get("vnfInstanceId")
5547
5548 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5549 "vnfdId"
5550 )
5551 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5552
5553 step = "Getting vnfr from database"
5554 db_vnfr = self.db.get_one(
5555 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5556 )
5557
5558 step = "Getting vnfds from database"
5559 # Latest VNFD
5560 latest_vnfd = self.db.get_one(
5561 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5562 )
5563 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5564
5565 # Current VNFD
5566 current_vnf_revision = db_vnfr.get("revision", 1)
5567 current_vnfd = self.db.get_one(
5568 "vnfds_revisions",
5569 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5570 fail_on_empty=False,
5571 )
5572 # Charm artifact paths will be filled up later
5573 (
5574 current_charm_artifact_path,
5575 target_charm_artifact_path,
5576 charm_artifact_paths,
5577 helm_artifacts,
5578 ) = ([], [], [], [])
5579
5580 step = "Checking if revision has changed in VNFD"
5581 if current_vnf_revision != latest_vnfd_revision:
5582 change_type = "policy_updated"
5583
5584 # There is new revision of VNFD, update operation is required
5585 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5586 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5587
5588 step = "Removing the VNFD packages if they exist in the local path"
5589 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5590 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5591
5592 step = "Get the VNFD packages from FSMongo"
5593 self.fs.sync(from_path=latest_vnfd_path)
5594 self.fs.sync(from_path=current_vnfd_path)
5595
5596 step = (
5597 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5598 )
5599 current_base_folder = current_vnfd["_admin"]["storage"]
5600 latest_base_folder = latest_vnfd["_admin"]["storage"]
5601
5602 for vca_index, vca_deployed in enumerate(
5603 get_iterable(nsr_deployed, "VCA")
5604 ):
5605 vnf_index = db_vnfr.get("member-vnf-index-ref")
5606
5607 # Getting charm-id and charm-type
5608 if vca_deployed.get("member-vnf-index") == vnf_index:
5609 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5610 vca_type = vca_deployed.get("type")
5611 vdu_count_index = vca_deployed.get("vdu_count_index")
5612
5613 # Getting ee-id
5614 ee_id = vca_deployed.get("ee_id")
5615
5616 step = "Getting descriptor config"
5617 if current_vnfd.get("kdu"):
5618 search_key = "kdu_name"
5619 else:
5620 search_key = "vnfd_id"
5621
5622 entity_id = vca_deployed.get(search_key)
5623
5624 descriptor_config = get_configuration(
5625 current_vnfd, entity_id
5626 )
5627
5628 if "execution-environment-list" in descriptor_config:
5629 ee_list = descriptor_config.get(
5630 "execution-environment-list", []
5631 )
5632 else:
5633 ee_list = []
5634
5635 # There could be several charm used in the same VNF
5636 for ee_item in ee_list:
5637 if ee_item.get("juju"):
5638 step = "Getting charm name"
5639 charm_name = ee_item["juju"].get("charm")
5640
5641 step = "Setting Charm artifact paths"
5642 current_charm_artifact_path.append(
5643 get_charm_artifact_path(
5644 current_base_folder,
5645 charm_name,
5646 vca_type,
5647 current_vnf_revision,
5648 )
5649 )
5650 target_charm_artifact_path.append(
5651 get_charm_artifact_path(
5652 latest_base_folder,
5653 charm_name,
5654 vca_type,
5655 latest_vnfd_revision,
5656 )
5657 )
5658 elif ee_item.get("helm-chart"):
5659 # add chart to list and all parameters
5660 step = "Getting helm chart name"
5661 chart_name = ee_item.get("helm-chart")
5662 if (
5663 ee_item.get("helm-version")
5664 and ee_item.get("helm-version") == "v2"
5665 ):
5666 vca_type = "helm"
5667 else:
5668 vca_type = "helm-v3"
5669 step = "Setting Helm chart artifact paths"
5670
5671 helm_artifacts.append(
5672 {
5673 "current_artifact_path": get_charm_artifact_path(
5674 current_base_folder,
5675 chart_name,
5676 vca_type,
5677 current_vnf_revision,
5678 ),
5679 "target_artifact_path": get_charm_artifact_path(
5680 latest_base_folder,
5681 chart_name,
5682 vca_type,
5683 latest_vnfd_revision,
5684 ),
5685 "ee_id": ee_id,
5686 "vca_index": vca_index,
5687 "vdu_index": vdu_count_index,
5688 }
5689 )
5690
5691 charm_artifact_paths = zip(
5692 current_charm_artifact_path, target_charm_artifact_path
5693 )
5694
5695 step = "Checking if software version has changed in VNFD"
5696 if find_software_version(current_vnfd) != find_software_version(
5697 latest_vnfd
5698 ):
5699 step = "Checking if existing VNF has charm"
5700 for current_charm_path, target_charm_path in list(
5701 charm_artifact_paths
5702 ):
5703 if current_charm_path:
5704 raise LcmException(
5705 "Software version change is not supported as VNF instance {} has charm.".format(
5706 vnf_instance_id
5707 )
5708 )
5709
5710 # There is no change in the charm package, then redeploy the VNF
5711 # based on new descriptor
5712 step = "Redeploying VNF"
5713 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5714 (result, detailed_status) = await self._ns_redeploy_vnf(
5715 nsr_id, nslcmop_id, latest_vnfd, db_vnfr, db_nsr
5716 )
5717 if result == "FAILED":
5718 nslcmop_operation_state = result
5719 error_description_nslcmop = detailed_status
5720 db_nslcmop_update["detailed-status"] = detailed_status
5721 self.logger.debug(
5722 logging_text
5723 + " step {} Done with result {} {}".format(
5724 step, nslcmop_operation_state, detailed_status
5725 )
5726 )
5727
5728 else:
5729 step = "Checking if any charm package has changed or not"
5730 for current_charm_path, target_charm_path in list(
5731 charm_artifact_paths
5732 ):
5733 if (
5734 current_charm_path
5735 and target_charm_path
5736 and self.check_charm_hash_changed(
5737 current_charm_path, target_charm_path
5738 )
5739 ):
5740 step = "Checking whether VNF uses juju bundle"
5741 if check_juju_bundle_existence(current_vnfd):
5742 raise LcmException(
5743 "Charm upgrade is not supported for the instance which"
5744 " uses juju-bundle: {}".format(
5745 check_juju_bundle_existence(current_vnfd)
5746 )
5747 )
5748
5749 step = "Upgrading Charm"
5750 (
5751 result,
5752 detailed_status,
5753 ) = await self._ns_charm_upgrade(
5754 ee_id=ee_id,
5755 charm_id=vca_id,
5756 charm_type=vca_type,
5757 path=self.fs.path + target_charm_path,
5758 timeout=timeout_seconds,
5759 )
5760
5761 if result == "FAILED":
5762 nslcmop_operation_state = result
5763 error_description_nslcmop = detailed_status
5764
5765 db_nslcmop_update["detailed-status"] = detailed_status
5766 self.logger.debug(
5767 logging_text
5768 + " step {} Done with result {} {}".format(
5769 step, nslcmop_operation_state, detailed_status
5770 )
5771 )
5772
5773 step = "Updating policies"
5774 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5775 result = "COMPLETED"
5776 detailed_status = "Done"
5777 db_nslcmop_update["detailed-status"] = "Done"
5778
5779 # helm base EE
5780 for item in helm_artifacts:
5781 if not (
5782 item["current_artifact_path"]
5783 and item["target_artifact_path"]
5784 and self.check_charm_hash_changed(
5785 item["current_artifact_path"],
5786 item["target_artifact_path"],
5787 )
5788 ):
5789 continue
5790 db_update_entry = "_admin.deployed.VCA.{}.".format(
5791 item["vca_index"]
5792 )
5793 vnfr_id = db_vnfr["_id"]
5794 osm_config = {"osm": {"ns_id": nsr_id, "vnf_id": vnfr_id}}
5795 db_dict = {
5796 "collection": "nsrs",
5797 "filter": {"_id": nsr_id},
5798 "path": db_update_entry,
5799 }
5800 vca_type, namespace, helm_id = get_ee_id_parts(item["ee_id"])
5801 await self.vca_map[vca_type].upgrade_execution_environment(
5802 namespace=namespace,
5803 helm_id=helm_id,
5804 db_dict=db_dict,
5805 config=osm_config,
5806 artifact_path=item["target_artifact_path"],
5807 vca_type=vca_type,
5808 )
5809 vnf_id = db_vnfr.get("vnfd-ref")
5810 config_descriptor = get_configuration(latest_vnfd, vnf_id)
5811 self.logger.debug("get ssh key block")
5812 rw_mgmt_ip = None
5813 if deep_get(
5814 config_descriptor,
5815 ("config-access", "ssh-access", "required"),
5816 ):
5817 # Needed to inject a ssh key
5818 user = deep_get(
5819 config_descriptor,
5820 ("config-access", "ssh-access", "default-user"),
5821 )
5822 step = (
5823 "Install configuration Software, getting public ssh key"
5824 )
5825 pub_key = await self.vca_map[
5826 vca_type
5827 ].get_ee_ssh_public__key(
5828 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
5829 )
5830
5831 step = (
5832 "Insert public key into VM user={} ssh_key={}".format(
5833 user, pub_key
5834 )
5835 )
5836 self.logger.debug(logging_text + step)
5837
5838 # wait for RO (ip-address) Insert pub_key into VM
5839 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
5840 logging_text,
5841 nsr_id,
5842 vnfr_id,
5843 None,
5844 item["vdu_index"],
5845 user=user,
5846 pub_key=pub_key,
5847 )
5848
5849 initial_config_primitive_list = config_descriptor.get(
5850 "initial-config-primitive"
5851 )
5852 config_primitive = next(
5853 (
5854 p
5855 for p in initial_config_primitive_list
5856 if p["name"] == "config"
5857 ),
5858 None,
5859 )
5860 if not config_primitive:
5861 continue
5862
5863 deploy_params = {"OSM": get_osm_params(db_vnfr)}
5864 if rw_mgmt_ip:
5865 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
5866 if db_vnfr.get("additionalParamsForVnf"):
5867 deploy_params.update(
5868 parse_yaml_strings(
5869 db_vnfr["additionalParamsForVnf"].copy()
5870 )
5871 )
5872 primitive_params_ = self._map_primitive_params(
5873 config_primitive, {}, deploy_params
5874 )
5875
5876 step = "execute primitive '{}' params '{}'".format(
5877 config_primitive["name"], primitive_params_
5878 )
5879 self.logger.debug(logging_text + step)
5880 await self.vca_map[vca_type].exec_primitive(
5881 ee_id=ee_id,
5882 primitive_name=config_primitive["name"],
5883 params_dict=primitive_params_,
5884 db_dict=db_dict,
5885 vca_id=vca_id,
5886 vca_type=vca_type,
5887 )
5888
5889 step = "Updating policies"
5890 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5891 detailed_status = "Done"
5892 db_nslcmop_update["detailed-status"] = "Done"
5893
5894 # If nslcmop_operation_state is None, so any operation is not failed.
5895 if not nslcmop_operation_state:
5896 nslcmop_operation_state = "COMPLETED"
5897
5898 # If update CHANGE_VNFPKG nslcmop_operation is successful
5899 # vnf revision need to be updated
5900 vnfr_update["revision"] = latest_vnfd_revision
5901 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5902
5903 self.logger.debug(
5904 logging_text
5905 + " task Done with result {} {}".format(
5906 nslcmop_operation_state, detailed_status
5907 )
5908 )
5909 elif update_type == "REMOVE_VNF":
5910 # This part is included in https://osm.etsi.org/gerrit/11876
5911 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5912 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5913 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5914 step = "Removing VNF"
5915 (result, detailed_status) = await self.remove_vnf(
5916 nsr_id, nslcmop_id, vnf_instance_id
5917 )
5918 if result == "FAILED":
5919 nslcmop_operation_state = result
5920 error_description_nslcmop = detailed_status
5921 db_nslcmop_update["detailed-status"] = detailed_status
5922 change_type = "vnf_terminated"
5923 if not nslcmop_operation_state:
5924 nslcmop_operation_state = "COMPLETED"
5925 self.logger.debug(
5926 logging_text
5927 + " task Done with result {} {}".format(
5928 nslcmop_operation_state, detailed_status
5929 )
5930 )
5931
5932 elif update_type == "OPERATE_VNF":
5933 vnf_id = db_nslcmop["operationParams"]["operateVnfData"][
5934 "vnfInstanceId"
5935 ]
5936 operation_type = db_nslcmop["operationParams"]["operateVnfData"][
5937 "changeStateTo"
5938 ]
5939 additional_param = db_nslcmop["operationParams"]["operateVnfData"][
5940 "additionalParam"
5941 ]
5942 (result, detailed_status) = await self.rebuild_start_stop(
5943 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5944 )
5945 if result == "FAILED":
5946 nslcmop_operation_state = result
5947 error_description_nslcmop = detailed_status
5948 db_nslcmop_update["detailed-status"] = detailed_status
5949 if not nslcmop_operation_state:
5950 nslcmop_operation_state = "COMPLETED"
5951 self.logger.debug(
5952 logging_text
5953 + " task Done with result {} {}".format(
5954 nslcmop_operation_state, detailed_status
5955 )
5956 )
5957
5958 # If nslcmop_operation_state is None, so any operation is not failed.
5959 # All operations are executed in overall.
5960 if not nslcmop_operation_state:
5961 nslcmop_operation_state = "COMPLETED"
5962 db_nsr_update["operational-status"] = old_operational_status
5963
5964 except (DbException, LcmException, N2VCException, K8sException) as e:
5965 self.logger.error(logging_text + "Exit Exception {}".format(e))
5966 exc = e
5967 except asyncio.CancelledError:
5968 self.logger.error(
5969 logging_text + "Cancelled Exception while '{}'".format(step)
5970 )
5971 exc = "Operation was cancelled"
5972 except asyncio.TimeoutError:
5973 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5974 exc = "Timeout"
5975 except Exception as e:
5976 exc = traceback.format_exc()
5977 self.logger.critical(
5978 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5979 exc_info=True,
5980 )
5981 finally:
5982 if exc:
5983 db_nslcmop_update[
5984 "detailed-status"
5985 ] = (
5986 detailed_status
5987 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5988 nslcmop_operation_state = "FAILED"
5989 db_nsr_update["operational-status"] = old_operational_status
5990 if db_nsr:
5991 self._write_ns_status(
5992 nsr_id=nsr_id,
5993 ns_state=db_nsr["nsState"],
5994 current_operation="IDLE",
5995 current_operation_id=None,
5996 other_update=db_nsr_update,
5997 )
5998
5999 self._write_op_status(
6000 op_id=nslcmop_id,
6001 stage="",
6002 error_message=error_description_nslcmop,
6003 operation_state=nslcmop_operation_state,
6004 other_update=db_nslcmop_update,
6005 )
6006
6007 if nslcmop_operation_state:
6008 try:
6009 msg = {
6010 "nsr_id": nsr_id,
6011 "nslcmop_id": nslcmop_id,
6012 "operationState": nslcmop_operation_state,
6013 }
6014 if (
6015 change_type in ("vnf_terminated", "policy_updated")
6016 and member_vnf_index
6017 ):
6018 msg.update({"vnf_member_index": member_vnf_index})
6019 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6020 except Exception as e:
6021 self.logger.error(
6022 logging_text + "kafka_write notification Exception {}".format(e)
6023 )
6024 self.logger.debug(logging_text + "Exit")
6025 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6026 return nslcmop_operation_state, detailed_status
6027
6028 async def scale(self, nsr_id, nslcmop_id):
6029 # Try to lock HA task here
6030 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6031 if not task_is_locked_by_me:
6032 return
6033
6034 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6035 stage = ["", "", ""]
6036 tasks_dict_info = {}
6037 # ^ stage, step, VIM progress
6038 self.logger.debug(logging_text + "Enter")
6039 # get all needed from database
6040 db_nsr = None
6041 db_nslcmop_update = {}
6042 db_nsr_update = {}
6043 exc = None
6044 # in case of error, indicates what part of scale was failed to put nsr at error status
6045 scale_process = None
6046 old_operational_status = ""
6047 old_config_status = ""
6048 nsi_id = None
6049 try:
6050 # wait for any previous tasks in process
6051 step = "Waiting for previous operations to terminate"
6052 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6053 self._write_ns_status(
6054 nsr_id=nsr_id,
6055 ns_state=None,
6056 current_operation="SCALING",
6057 current_operation_id=nslcmop_id,
6058 )
6059
6060 step = "Getting nslcmop from database"
6061 self.logger.debug(
6062 step + " after having waited for previous tasks to be completed"
6063 )
6064 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6065
6066 step = "Getting nsr from database"
6067 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6068 old_operational_status = db_nsr["operational-status"]
6069 old_config_status = db_nsr["config-status"]
6070
6071 step = "Parsing scaling parameters"
6072 db_nsr_update["operational-status"] = "scaling"
6073 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6074 nsr_deployed = db_nsr["_admin"].get("deployed")
6075
6076 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6077 "scaleByStepData"
6078 ]["member-vnf-index"]
6079 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6080 "scaleByStepData"
6081 ]["scaling-group-descriptor"]
6082 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6083 # for backward compatibility
6084 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6085 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6086 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6087 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6088
6089 step = "Getting vnfr from database"
6090 db_vnfr = self.db.get_one(
6091 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6092 )
6093
6094 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6095
6096 step = "Getting vnfd from database"
6097 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6098
6099 base_folder = db_vnfd["_admin"]["storage"]
6100
6101 step = "Getting scaling-group-descriptor"
6102 scaling_descriptor = find_in_list(
6103 get_scaling_aspect(db_vnfd),
6104 lambda scale_desc: scale_desc["name"] == scaling_group,
6105 )
6106 if not scaling_descriptor:
6107 raise LcmException(
6108 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6109 "at vnfd:scaling-group-descriptor".format(scaling_group)
6110 )
6111
6112 step = "Sending scale order to VIM"
6113 # TODO check if ns is in a proper status
6114 nb_scale_op = 0
6115 if not db_nsr["_admin"].get("scaling-group"):
6116 self.update_db_2(
6117 "nsrs",
6118 nsr_id,
6119 {
6120 "_admin.scaling-group": [
6121 {"name": scaling_group, "nb-scale-op": 0}
6122 ]
6123 },
6124 )
6125 admin_scale_index = 0
6126 else:
6127 for admin_scale_index, admin_scale_info in enumerate(
6128 db_nsr["_admin"]["scaling-group"]
6129 ):
6130 if admin_scale_info["name"] == scaling_group:
6131 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6132 break
6133 else: # not found, set index one plus last element and add new entry with the name
6134 admin_scale_index += 1
6135 db_nsr_update[
6136 "_admin.scaling-group.{}.name".format(admin_scale_index)
6137 ] = scaling_group
6138
6139 vca_scaling_info = []
6140 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6141 if scaling_type == "SCALE_OUT":
6142 if "aspect-delta-details" not in scaling_descriptor:
6143 raise LcmException(
6144 "Aspect delta details not fount in scaling descriptor {}".format(
6145 scaling_descriptor["name"]
6146 )
6147 )
6148 # count if max-instance-count is reached
6149 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6150
6151 scaling_info["scaling_direction"] = "OUT"
6152 scaling_info["vdu-create"] = {}
6153 scaling_info["kdu-create"] = {}
6154 for delta in deltas:
6155 for vdu_delta in delta.get("vdu-delta", {}):
6156 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6157 # vdu_index also provides the number of instance of the targeted vdu
6158 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6159 cloud_init_text = self._get_vdu_cloud_init_content(
6160 vdud, db_vnfd
6161 )
6162 if cloud_init_text:
6163 additional_params = (
6164 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6165 or {}
6166 )
6167 cloud_init_list = []
6168
6169 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6170 max_instance_count = 10
6171 if vdu_profile and "max-number-of-instances" in vdu_profile:
6172 max_instance_count = vdu_profile.get(
6173 "max-number-of-instances", 10
6174 )
6175
6176 default_instance_num = get_number_of_instances(
6177 db_vnfd, vdud["id"]
6178 )
6179 instances_number = vdu_delta.get("number-of-instances", 1)
6180 nb_scale_op += instances_number
6181
6182 new_instance_count = nb_scale_op + default_instance_num
6183 # Control if new count is over max and vdu count is less than max.
6184 # Then assign new instance count
6185 if new_instance_count > max_instance_count > vdu_count:
6186 instances_number = new_instance_count - max_instance_count
6187 else:
6188 instances_number = instances_number
6189
6190 if new_instance_count > max_instance_count:
6191 raise LcmException(
6192 "reached the limit of {} (max-instance-count) "
6193 "scaling-out operations for the "
6194 "scaling-group-descriptor '{}'".format(
6195 nb_scale_op, scaling_group
6196 )
6197 )
6198 for x in range(vdu_delta.get("number-of-instances", 1)):
6199 if cloud_init_text:
6200 # TODO Information of its own ip is not available because db_vnfr is not updated.
6201 additional_params["OSM"] = get_osm_params(
6202 db_vnfr, vdu_delta["id"], vdu_index + x
6203 )
6204 cloud_init_list.append(
6205 self._parse_cloud_init(
6206 cloud_init_text,
6207 additional_params,
6208 db_vnfd["id"],
6209 vdud["id"],
6210 )
6211 )
6212 vca_scaling_info.append(
6213 {
6214 "osm_vdu_id": vdu_delta["id"],
6215 "member-vnf-index": vnf_index,
6216 "type": "create",
6217 "vdu_index": vdu_index + x,
6218 }
6219 )
6220 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6221 for kdu_delta in delta.get("kdu-resource-delta", {}):
6222 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6223 kdu_name = kdu_profile["kdu-name"]
6224 resource_name = kdu_profile.get("resource-name", "")
6225
6226 # Might have different kdus in the same delta
6227 # Should have list for each kdu
6228 if not scaling_info["kdu-create"].get(kdu_name, None):
6229 scaling_info["kdu-create"][kdu_name] = []
6230
6231 kdur = get_kdur(db_vnfr, kdu_name)
6232 if kdur.get("helm-chart"):
6233 k8s_cluster_type = "helm-chart-v3"
6234 self.logger.debug("kdur: {}".format(kdur))
6235 if (
6236 kdur.get("helm-version")
6237 and kdur.get("helm-version") == "v2"
6238 ):
6239 k8s_cluster_type = "helm-chart"
6240 elif kdur.get("juju-bundle"):
6241 k8s_cluster_type = "juju-bundle"
6242 else:
6243 raise LcmException(
6244 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6245 "juju-bundle. Maybe an old NBI version is running".format(
6246 db_vnfr["member-vnf-index-ref"], kdu_name
6247 )
6248 )
6249
6250 max_instance_count = 10
6251 if kdu_profile and "max-number-of-instances" in kdu_profile:
6252 max_instance_count = kdu_profile.get(
6253 "max-number-of-instances", 10
6254 )
6255
6256 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6257 deployed_kdu, _ = get_deployed_kdu(
6258 nsr_deployed, kdu_name, vnf_index
6259 )
6260 if deployed_kdu is None:
6261 raise LcmException(
6262 "KDU '{}' for vnf '{}' not deployed".format(
6263 kdu_name, vnf_index
6264 )
6265 )
6266 kdu_instance = deployed_kdu.get("kdu-instance")
6267 instance_num = await self.k8scluster_map[
6268 k8s_cluster_type
6269 ].get_scale_count(
6270 resource_name,
6271 kdu_instance,
6272 vca_id=vca_id,
6273 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6274 kdu_model=deployed_kdu.get("kdu-model"),
6275 )
6276 kdu_replica_count = instance_num + kdu_delta.get(
6277 "number-of-instances", 1
6278 )
6279
6280 # Control if new count is over max and instance_num is less than max.
6281 # Then assign max instance number to kdu replica count
6282 if kdu_replica_count > max_instance_count > instance_num:
6283 kdu_replica_count = max_instance_count
6284 if kdu_replica_count > max_instance_count:
6285 raise LcmException(
6286 "reached the limit of {} (max-instance-count) "
6287 "scaling-out operations for the "
6288 "scaling-group-descriptor '{}'".format(
6289 instance_num, scaling_group
6290 )
6291 )
6292
6293 for x in range(kdu_delta.get("number-of-instances", 1)):
6294 vca_scaling_info.append(
6295 {
6296 "osm_kdu_id": kdu_name,
6297 "member-vnf-index": vnf_index,
6298 "type": "create",
6299 "kdu_index": instance_num + x - 1,
6300 }
6301 )
6302 scaling_info["kdu-create"][kdu_name].append(
6303 {
6304 "member-vnf-index": vnf_index,
6305 "type": "create",
6306 "k8s-cluster-type": k8s_cluster_type,
6307 "resource-name": resource_name,
6308 "scale": kdu_replica_count,
6309 }
6310 )
6311 elif scaling_type == "SCALE_IN":
6312 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6313
6314 scaling_info["scaling_direction"] = "IN"
6315 scaling_info["vdu-delete"] = {}
6316 scaling_info["kdu-delete"] = {}
6317
6318 for delta in deltas:
6319 for vdu_delta in delta.get("vdu-delta", {}):
6320 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6321 min_instance_count = 0
6322 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6323 if vdu_profile and "min-number-of-instances" in vdu_profile:
6324 min_instance_count = vdu_profile["min-number-of-instances"]
6325
6326 default_instance_num = get_number_of_instances(
6327 db_vnfd, vdu_delta["id"]
6328 )
6329 instance_num = vdu_delta.get("number-of-instances", 1)
6330 nb_scale_op -= instance_num
6331
6332 new_instance_count = nb_scale_op + default_instance_num
6333
6334 if new_instance_count < min_instance_count < vdu_count:
6335 instances_number = min_instance_count - new_instance_count
6336 else:
6337 instances_number = instance_num
6338
6339 if new_instance_count < min_instance_count:
6340 raise LcmException(
6341 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6342 "scaling-group-descriptor '{}'".format(
6343 nb_scale_op, scaling_group
6344 )
6345 )
6346 for x in range(vdu_delta.get("number-of-instances", 1)):
6347 vca_scaling_info.append(
6348 {
6349 "osm_vdu_id": vdu_delta["id"],
6350 "member-vnf-index": vnf_index,
6351 "type": "delete",
6352 "vdu_index": vdu_index - 1 - x,
6353 }
6354 )
6355 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6356 for kdu_delta in delta.get("kdu-resource-delta", {}):
6357 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6358 kdu_name = kdu_profile["kdu-name"]
6359 resource_name = kdu_profile.get("resource-name", "")
6360
6361 if not scaling_info["kdu-delete"].get(kdu_name, None):
6362 scaling_info["kdu-delete"][kdu_name] = []
6363
6364 kdur = get_kdur(db_vnfr, kdu_name)
6365 if kdur.get("helm-chart"):
6366 k8s_cluster_type = "helm-chart-v3"
6367 self.logger.debug("kdur: {}".format(kdur))
6368 if (
6369 kdur.get("helm-version")
6370 and kdur.get("helm-version") == "v2"
6371 ):
6372 k8s_cluster_type = "helm-chart"
6373 elif kdur.get("juju-bundle"):
6374 k8s_cluster_type = "juju-bundle"
6375 else:
6376 raise LcmException(
6377 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6378 "juju-bundle. Maybe an old NBI version is running".format(
6379 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6380 )
6381 )
6382
6383 min_instance_count = 0
6384 if kdu_profile and "min-number-of-instances" in kdu_profile:
6385 min_instance_count = kdu_profile["min-number-of-instances"]
6386
6387 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6388 deployed_kdu, _ = get_deployed_kdu(
6389 nsr_deployed, kdu_name, vnf_index
6390 )
6391 if deployed_kdu is None:
6392 raise LcmException(
6393 "KDU '{}' for vnf '{}' not deployed".format(
6394 kdu_name, vnf_index
6395 )
6396 )
6397 kdu_instance = deployed_kdu.get("kdu-instance")
6398 instance_num = await self.k8scluster_map[
6399 k8s_cluster_type
6400 ].get_scale_count(
6401 resource_name,
6402 kdu_instance,
6403 vca_id=vca_id,
6404 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6405 kdu_model=deployed_kdu.get("kdu-model"),
6406 )
6407 kdu_replica_count = instance_num - kdu_delta.get(
6408 "number-of-instances", 1
6409 )
6410
6411 if kdu_replica_count < min_instance_count < instance_num:
6412 kdu_replica_count = min_instance_count
6413 if kdu_replica_count < min_instance_count:
6414 raise LcmException(
6415 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6416 "scaling-group-descriptor '{}'".format(
6417 instance_num, scaling_group
6418 )
6419 )
6420
6421 for x in range(kdu_delta.get("number-of-instances", 1)):
6422 vca_scaling_info.append(
6423 {
6424 "osm_kdu_id": kdu_name,
6425 "member-vnf-index": vnf_index,
6426 "type": "delete",
6427 "kdu_index": instance_num - x - 1,
6428 }
6429 )
6430 scaling_info["kdu-delete"][kdu_name].append(
6431 {
6432 "member-vnf-index": vnf_index,
6433 "type": "delete",
6434 "k8s-cluster-type": k8s_cluster_type,
6435 "resource-name": resource_name,
6436 "scale": kdu_replica_count,
6437 }
6438 )
6439
6440 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6441 vdu_delete = copy(scaling_info.get("vdu-delete"))
6442 if scaling_info["scaling_direction"] == "IN":
6443 for vdur in reversed(db_vnfr["vdur"]):
6444 if vdu_delete.get(vdur["vdu-id-ref"]):
6445 vdu_delete[vdur["vdu-id-ref"]] -= 1
6446 scaling_info["vdu"].append(
6447 {
6448 "name": vdur.get("name") or vdur.get("vdu-name"),
6449 "vdu_id": vdur["vdu-id-ref"],
6450 "interface": [],
6451 }
6452 )
6453 for interface in vdur["interfaces"]:
6454 scaling_info["vdu"][-1]["interface"].append(
6455 {
6456 "name": interface["name"],
6457 "ip_address": interface["ip-address"],
6458 "mac_address": interface.get("mac-address"),
6459 }
6460 )
6461 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6462
6463 # PRE-SCALE BEGIN
6464 step = "Executing pre-scale vnf-config-primitive"
6465 if scaling_descriptor.get("scaling-config-action"):
6466 for scaling_config_action in scaling_descriptor[
6467 "scaling-config-action"
6468 ]:
6469 if (
6470 scaling_config_action.get("trigger") == "pre-scale-in"
6471 and scaling_type == "SCALE_IN"
6472 ) or (
6473 scaling_config_action.get("trigger") == "pre-scale-out"
6474 and scaling_type == "SCALE_OUT"
6475 ):
6476 vnf_config_primitive = scaling_config_action[
6477 "vnf-config-primitive-name-ref"
6478 ]
6479 step = db_nslcmop_update[
6480 "detailed-status"
6481 ] = "executing pre-scale scaling-config-action '{}'".format(
6482 vnf_config_primitive
6483 )
6484
6485 # look for primitive
6486 for config_primitive in (
6487 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6488 ).get("config-primitive", ()):
6489 if config_primitive["name"] == vnf_config_primitive:
6490 break
6491 else:
6492 raise LcmException(
6493 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6494 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6495 "primitive".format(scaling_group, vnf_config_primitive)
6496 )
6497
6498 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6499 if db_vnfr.get("additionalParamsForVnf"):
6500 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6501
6502 scale_process = "VCA"
6503 db_nsr_update["config-status"] = "configuring pre-scaling"
6504 primitive_params = self._map_primitive_params(
6505 config_primitive, {}, vnfr_params
6506 )
6507
6508 # Pre-scale retry check: Check if this sub-operation has been executed before
6509 op_index = self._check_or_add_scale_suboperation(
6510 db_nslcmop,
6511 vnf_index,
6512 vnf_config_primitive,
6513 primitive_params,
6514 "PRE-SCALE",
6515 )
6516 if op_index == self.SUBOPERATION_STATUS_SKIP:
6517 # Skip sub-operation
6518 result = "COMPLETED"
6519 result_detail = "Done"
6520 self.logger.debug(
6521 logging_text
6522 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6523 vnf_config_primitive, result, result_detail
6524 )
6525 )
6526 else:
6527 if op_index == self.SUBOPERATION_STATUS_NEW:
6528 # New sub-operation: Get index of this sub-operation
6529 op_index = (
6530 len(db_nslcmop.get("_admin", {}).get("operations"))
6531 - 1
6532 )
6533 self.logger.debug(
6534 logging_text
6535 + "vnf_config_primitive={} New sub-operation".format(
6536 vnf_config_primitive
6537 )
6538 )
6539 else:
6540 # retry: Get registered params for this existing sub-operation
6541 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6542 op_index
6543 ]
6544 vnf_index = op.get("member_vnf_index")
6545 vnf_config_primitive = op.get("primitive")
6546 primitive_params = op.get("primitive_params")
6547 self.logger.debug(
6548 logging_text
6549 + "vnf_config_primitive={} Sub-operation retry".format(
6550 vnf_config_primitive
6551 )
6552 )
6553 # Execute the primitive, either with new (first-time) or registered (reintent) args
6554 ee_descriptor_id = config_primitive.get(
6555 "execution-environment-ref"
6556 )
6557 primitive_name = config_primitive.get(
6558 "execution-environment-primitive", vnf_config_primitive
6559 )
6560 ee_id, vca_type = self._look_for_deployed_vca(
6561 nsr_deployed["VCA"],
6562 member_vnf_index=vnf_index,
6563 vdu_id=None,
6564 vdu_count_index=None,
6565 ee_descriptor_id=ee_descriptor_id,
6566 )
6567 result, result_detail = await self._ns_execute_primitive(
6568 ee_id,
6569 primitive_name,
6570 primitive_params,
6571 vca_type=vca_type,
6572 vca_id=vca_id,
6573 )
6574 self.logger.debug(
6575 logging_text
6576 + "vnf_config_primitive={} Done with result {} {}".format(
6577 vnf_config_primitive, result, result_detail
6578 )
6579 )
6580 # Update operationState = COMPLETED | FAILED
6581 self._update_suboperation_status(
6582 db_nslcmop, op_index, result, result_detail
6583 )
6584
6585 if result == "FAILED":
6586 raise LcmException(result_detail)
6587 db_nsr_update["config-status"] = old_config_status
6588 scale_process = None
6589 # PRE-SCALE END
6590
6591 db_nsr_update[
6592 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6593 ] = nb_scale_op
6594 db_nsr_update[
6595 "_admin.scaling-group.{}.time".format(admin_scale_index)
6596 ] = time()
6597
6598 # SCALE-IN VCA - BEGIN
6599 if vca_scaling_info:
6600 step = db_nslcmop_update[
6601 "detailed-status"
6602 ] = "Deleting the execution environments"
6603 scale_process = "VCA"
6604 for vca_info in vca_scaling_info:
6605 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6606 member_vnf_index = str(vca_info["member-vnf-index"])
6607 self.logger.debug(
6608 logging_text + "vdu info: {}".format(vca_info)
6609 )
6610 if vca_info.get("osm_vdu_id"):
6611 vdu_id = vca_info["osm_vdu_id"]
6612 vdu_index = int(vca_info["vdu_index"])
6613 stage[
6614 1
6615 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6616 member_vnf_index, vdu_id, vdu_index
6617 )
6618 stage[2] = step = "Scaling in VCA"
6619 self._write_op_status(op_id=nslcmop_id, stage=stage)
6620 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6621 config_update = db_nsr["configurationStatus"]
6622 for vca_index, vca in enumerate(vca_update):
6623 if (
6624 (vca or vca.get("ee_id"))
6625 and vca["member-vnf-index"] == member_vnf_index
6626 and vca["vdu_count_index"] == vdu_index
6627 ):
6628 if vca.get("vdu_id"):
6629 config_descriptor = get_configuration(
6630 db_vnfd, vca.get("vdu_id")
6631 )
6632 elif vca.get("kdu_name"):
6633 config_descriptor = get_configuration(
6634 db_vnfd, vca.get("kdu_name")
6635 )
6636 else:
6637 config_descriptor = get_configuration(
6638 db_vnfd, db_vnfd["id"]
6639 )
6640 operation_params = (
6641 db_nslcmop.get("operationParams") or {}
6642 )
6643 exec_terminate_primitives = not operation_params.get(
6644 "skip_terminate_primitives"
6645 ) and vca.get("needed_terminate")
6646 task = asyncio.ensure_future(
6647 asyncio.wait_for(
6648 self.destroy_N2VC(
6649 logging_text,
6650 db_nslcmop,
6651 vca,
6652 config_descriptor,
6653 vca_index,
6654 destroy_ee=True,
6655 exec_primitives=exec_terminate_primitives,
6656 scaling_in=True,
6657 vca_id=vca_id,
6658 ),
6659 timeout=self.timeout.charm_delete,
6660 )
6661 )
6662 tasks_dict_info[task] = "Terminating VCA {}".format(
6663 vca.get("ee_id")
6664 )
6665 del vca_update[vca_index]
6666 del config_update[vca_index]
6667 # wait for pending tasks of terminate primitives
6668 if tasks_dict_info:
6669 self.logger.debug(
6670 logging_text
6671 + "Waiting for tasks {}".format(
6672 list(tasks_dict_info.keys())
6673 )
6674 )
6675 error_list = await self._wait_for_tasks(
6676 logging_text,
6677 tasks_dict_info,
6678 min(
6679 self.timeout.charm_delete, self.timeout.ns_terminate
6680 ),
6681 stage,
6682 nslcmop_id,
6683 )
6684 tasks_dict_info.clear()
6685 if error_list:
6686 raise LcmException("; ".join(error_list))
6687
6688 db_vca_and_config_update = {
6689 "_admin.deployed.VCA": vca_update,
6690 "configurationStatus": config_update,
6691 }
6692 self.update_db_2(
6693 "nsrs", db_nsr["_id"], db_vca_and_config_update
6694 )
6695 scale_process = None
6696 # SCALE-IN VCA - END
6697
6698 # SCALE RO - BEGIN
6699 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6700 scale_process = "RO"
6701 if self.ro_config.ng:
6702 await self._scale_ng_ro(
6703 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6704 )
6705 scaling_info.pop("vdu-create", None)
6706 scaling_info.pop("vdu-delete", None)
6707
6708 scale_process = None
6709 # SCALE RO - END
6710
6711 # SCALE KDU - BEGIN
6712 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6713 scale_process = "KDU"
6714 await self._scale_kdu(
6715 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6716 )
6717 scaling_info.pop("kdu-create", None)
6718 scaling_info.pop("kdu-delete", None)
6719
6720 scale_process = None
6721 # SCALE KDU - END
6722
6723 if db_nsr_update:
6724 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6725
6726 # SCALE-UP VCA - BEGIN
6727 if vca_scaling_info:
6728 step = db_nslcmop_update[
6729 "detailed-status"
6730 ] = "Creating new execution environments"
6731 scale_process = "VCA"
6732 for vca_info in vca_scaling_info:
6733 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6734 member_vnf_index = str(vca_info["member-vnf-index"])
6735 self.logger.debug(
6736 logging_text + "vdu info: {}".format(vca_info)
6737 )
6738 vnfd_id = db_vnfr["vnfd-ref"]
6739 if vca_info.get("osm_vdu_id"):
6740 vdu_index = int(vca_info["vdu_index"])
6741 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6742 if db_vnfr.get("additionalParamsForVnf"):
6743 deploy_params.update(
6744 parse_yaml_strings(
6745 db_vnfr["additionalParamsForVnf"].copy()
6746 )
6747 )
6748 descriptor_config = get_configuration(
6749 db_vnfd, db_vnfd["id"]
6750 )
6751 if descriptor_config:
6752 vdu_id = None
6753 vdu_name = None
6754 kdu_name = None
6755 kdu_index = None
6756 self._deploy_n2vc(
6757 logging_text=logging_text
6758 + "member_vnf_index={} ".format(member_vnf_index),
6759 db_nsr=db_nsr,
6760 db_vnfr=db_vnfr,
6761 nslcmop_id=nslcmop_id,
6762 nsr_id=nsr_id,
6763 nsi_id=nsi_id,
6764 vnfd_id=vnfd_id,
6765 vdu_id=vdu_id,
6766 kdu_name=kdu_name,
6767 kdu_index=kdu_index,
6768 member_vnf_index=member_vnf_index,
6769 vdu_index=vdu_index,
6770 vdu_name=vdu_name,
6771 deploy_params=deploy_params,
6772 descriptor_config=descriptor_config,
6773 base_folder=base_folder,
6774 task_instantiation_info=tasks_dict_info,
6775 stage=stage,
6776 )
6777 vdu_id = vca_info["osm_vdu_id"]
6778 vdur = find_in_list(
6779 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6780 )
6781 descriptor_config = get_configuration(db_vnfd, vdu_id)
6782 if vdur.get("additionalParams"):
6783 deploy_params_vdu = parse_yaml_strings(
6784 vdur["additionalParams"]
6785 )
6786 else:
6787 deploy_params_vdu = deploy_params
6788 deploy_params_vdu["OSM"] = get_osm_params(
6789 db_vnfr, vdu_id, vdu_count_index=vdu_index
6790 )
6791 if descriptor_config:
6792 vdu_name = None
6793 kdu_name = None
6794 kdu_index = None
6795 stage[
6796 1
6797 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6798 member_vnf_index, vdu_id, vdu_index
6799 )
6800 stage[2] = step = "Scaling out VCA"
6801 self._write_op_status(op_id=nslcmop_id, stage=stage)
6802 self._deploy_n2vc(
6803 logging_text=logging_text
6804 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6805 member_vnf_index, vdu_id, vdu_index
6806 ),
6807 db_nsr=db_nsr,
6808 db_vnfr=db_vnfr,
6809 nslcmop_id=nslcmop_id,
6810 nsr_id=nsr_id,
6811 nsi_id=nsi_id,
6812 vnfd_id=vnfd_id,
6813 vdu_id=vdu_id,
6814 kdu_name=kdu_name,
6815 member_vnf_index=member_vnf_index,
6816 vdu_index=vdu_index,
6817 kdu_index=kdu_index,
6818 vdu_name=vdu_name,
6819 deploy_params=deploy_params_vdu,
6820 descriptor_config=descriptor_config,
6821 base_folder=base_folder,
6822 task_instantiation_info=tasks_dict_info,
6823 stage=stage,
6824 )
6825 # SCALE-UP VCA - END
6826 scale_process = None
6827
6828 # POST-SCALE BEGIN
6829 # execute primitive service POST-SCALING
6830 step = "Executing post-scale vnf-config-primitive"
6831 if scaling_descriptor.get("scaling-config-action"):
6832 for scaling_config_action in scaling_descriptor[
6833 "scaling-config-action"
6834 ]:
6835 if (
6836 scaling_config_action.get("trigger") == "post-scale-in"
6837 and scaling_type == "SCALE_IN"
6838 ) or (
6839 scaling_config_action.get("trigger") == "post-scale-out"
6840 and scaling_type == "SCALE_OUT"
6841 ):
6842 vnf_config_primitive = scaling_config_action[
6843 "vnf-config-primitive-name-ref"
6844 ]
6845 step = db_nslcmop_update[
6846 "detailed-status"
6847 ] = "executing post-scale scaling-config-action '{}'".format(
6848 vnf_config_primitive
6849 )
6850
6851 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6852 if db_vnfr.get("additionalParamsForVnf"):
6853 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6854
6855 # look for primitive
6856 for config_primitive in (
6857 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6858 ).get("config-primitive", ()):
6859 if config_primitive["name"] == vnf_config_primitive:
6860 break
6861 else:
6862 raise LcmException(
6863 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6864 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6865 "config-primitive".format(
6866 scaling_group, vnf_config_primitive
6867 )
6868 )
6869 scale_process = "VCA"
6870 db_nsr_update["config-status"] = "configuring post-scaling"
6871 primitive_params = self._map_primitive_params(
6872 config_primitive, {}, vnfr_params
6873 )
6874
6875 # Post-scale retry check: Check if this sub-operation has been executed before
6876 op_index = self._check_or_add_scale_suboperation(
6877 db_nslcmop,
6878 vnf_index,
6879 vnf_config_primitive,
6880 primitive_params,
6881 "POST-SCALE",
6882 )
6883 if op_index == self.SUBOPERATION_STATUS_SKIP:
6884 # Skip sub-operation
6885 result = "COMPLETED"
6886 result_detail = "Done"
6887 self.logger.debug(
6888 logging_text
6889 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6890 vnf_config_primitive, result, result_detail
6891 )
6892 )
6893 else:
6894 if op_index == self.SUBOPERATION_STATUS_NEW:
6895 # New sub-operation: Get index of this sub-operation
6896 op_index = (
6897 len(db_nslcmop.get("_admin", {}).get("operations"))
6898 - 1
6899 )
6900 self.logger.debug(
6901 logging_text
6902 + "vnf_config_primitive={} New sub-operation".format(
6903 vnf_config_primitive
6904 )
6905 )
6906 else:
6907 # retry: Get registered params for this existing sub-operation
6908 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6909 op_index
6910 ]
6911 vnf_index = op.get("member_vnf_index")
6912 vnf_config_primitive = op.get("primitive")
6913 primitive_params = op.get("primitive_params")
6914 self.logger.debug(
6915 logging_text
6916 + "vnf_config_primitive={} Sub-operation retry".format(
6917 vnf_config_primitive
6918 )
6919 )
6920 # Execute the primitive, either with new (first-time) or registered (reintent) args
6921 ee_descriptor_id = config_primitive.get(
6922 "execution-environment-ref"
6923 )
6924 primitive_name = config_primitive.get(
6925 "execution-environment-primitive", vnf_config_primitive
6926 )
6927 ee_id, vca_type = self._look_for_deployed_vca(
6928 nsr_deployed["VCA"],
6929 member_vnf_index=vnf_index,
6930 vdu_id=None,
6931 vdu_count_index=None,
6932 ee_descriptor_id=ee_descriptor_id,
6933 )
6934 result, result_detail = await self._ns_execute_primitive(
6935 ee_id,
6936 primitive_name,
6937 primitive_params,
6938 vca_type=vca_type,
6939 vca_id=vca_id,
6940 )
6941 self.logger.debug(
6942 logging_text
6943 + "vnf_config_primitive={} Done with result {} {}".format(
6944 vnf_config_primitive, result, result_detail
6945 )
6946 )
6947 # Update operationState = COMPLETED | FAILED
6948 self._update_suboperation_status(
6949 db_nslcmop, op_index, result, result_detail
6950 )
6951
6952 if result == "FAILED":
6953 raise LcmException(result_detail)
6954 db_nsr_update["config-status"] = old_config_status
6955 scale_process = None
6956 # POST-SCALE END
6957
6958 db_nsr_update[
6959 "detailed-status"
6960 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6961 db_nsr_update["operational-status"] = (
6962 "running"
6963 if old_operational_status == "failed"
6964 else old_operational_status
6965 )
6966 db_nsr_update["config-status"] = old_config_status
6967 return
6968 except (
6969 ROclient.ROClientException,
6970 DbException,
6971 LcmException,
6972 NgRoException,
6973 ) as e:
6974 self.logger.error(logging_text + "Exit Exception {}".format(e))
6975 exc = e
6976 except asyncio.CancelledError:
6977 self.logger.error(
6978 logging_text + "Cancelled Exception while '{}'".format(step)
6979 )
6980 exc = "Operation was cancelled"
6981 except Exception as e:
6982 exc = traceback.format_exc()
6983 self.logger.critical(
6984 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6985 exc_info=True,
6986 )
6987 finally:
6988 self._write_ns_status(
6989 nsr_id=nsr_id,
6990 ns_state=None,
6991 current_operation="IDLE",
6992 current_operation_id=None,
6993 )
6994 if tasks_dict_info:
6995 stage[1] = "Waiting for instantiate pending tasks."
6996 self.logger.debug(logging_text + stage[1])
6997 exc = await self._wait_for_tasks(
6998 logging_text,
6999 tasks_dict_info,
7000 self.timeout.ns_deploy,
7001 stage,
7002 nslcmop_id,
7003 nsr_id=nsr_id,
7004 )
7005 if exc:
7006 db_nslcmop_update[
7007 "detailed-status"
7008 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7009 nslcmop_operation_state = "FAILED"
7010 if db_nsr:
7011 db_nsr_update["operational-status"] = old_operational_status
7012 db_nsr_update["config-status"] = old_config_status
7013 db_nsr_update["detailed-status"] = ""
7014 if scale_process:
7015 if "VCA" in scale_process:
7016 db_nsr_update["config-status"] = "failed"
7017 if "RO" in scale_process:
7018 db_nsr_update["operational-status"] = "failed"
7019 db_nsr_update[
7020 "detailed-status"
7021 ] = "FAILED scaling nslcmop={} {}: {}".format(
7022 nslcmop_id, step, exc
7023 )
7024 else:
7025 error_description_nslcmop = None
7026 nslcmop_operation_state = "COMPLETED"
7027 db_nslcmop_update["detailed-status"] = "Done"
7028
7029 self._write_op_status(
7030 op_id=nslcmop_id,
7031 stage="",
7032 error_message=error_description_nslcmop,
7033 operation_state=nslcmop_operation_state,
7034 other_update=db_nslcmop_update,
7035 )
7036 if db_nsr:
7037 self._write_ns_status(
7038 nsr_id=nsr_id,
7039 ns_state=None,
7040 current_operation="IDLE",
7041 current_operation_id=None,
7042 other_update=db_nsr_update,
7043 )
7044
7045 if nslcmop_operation_state:
7046 try:
7047 msg = {
7048 "nsr_id": nsr_id,
7049 "nslcmop_id": nslcmop_id,
7050 "operationState": nslcmop_operation_state,
7051 }
7052 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7053 except Exception as e:
7054 self.logger.error(
7055 logging_text + "kafka_write notification Exception {}".format(e)
7056 )
7057 self.logger.debug(logging_text + "Exit")
7058 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7059
7060 async def _scale_kdu(
7061 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7062 ):
7063 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7064 for kdu_name in _scaling_info:
7065 for kdu_scaling_info in _scaling_info[kdu_name]:
7066 deployed_kdu, index = get_deployed_kdu(
7067 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7068 )
7069 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7070 kdu_instance = deployed_kdu["kdu-instance"]
7071 kdu_model = deployed_kdu.get("kdu-model")
7072 scale = int(kdu_scaling_info["scale"])
7073 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7074
7075 db_dict = {
7076 "collection": "nsrs",
7077 "filter": {"_id": nsr_id},
7078 "path": "_admin.deployed.K8s.{}".format(index),
7079 }
7080
7081 step = "scaling application {}".format(
7082 kdu_scaling_info["resource-name"]
7083 )
7084 self.logger.debug(logging_text + step)
7085
7086 if kdu_scaling_info["type"] == "delete":
7087 kdu_config = get_configuration(db_vnfd, kdu_name)
7088 if (
7089 kdu_config
7090 and kdu_config.get("terminate-config-primitive")
7091 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7092 ):
7093 terminate_config_primitive_list = kdu_config.get(
7094 "terminate-config-primitive"
7095 )
7096 terminate_config_primitive_list.sort(
7097 key=lambda val: int(val["seq"])
7098 )
7099
7100 for (
7101 terminate_config_primitive
7102 ) in terminate_config_primitive_list:
7103 primitive_params_ = self._map_primitive_params(
7104 terminate_config_primitive, {}, {}
7105 )
7106 step = "execute terminate config primitive"
7107 self.logger.debug(logging_text + step)
7108 await asyncio.wait_for(
7109 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7110 cluster_uuid=cluster_uuid,
7111 kdu_instance=kdu_instance,
7112 primitive_name=terminate_config_primitive["name"],
7113 params=primitive_params_,
7114 db_dict=db_dict,
7115 total_timeout=self.timeout.primitive,
7116 vca_id=vca_id,
7117 ),
7118 timeout=self.timeout.primitive
7119 * self.timeout.primitive_outer_factor,
7120 )
7121
7122 await asyncio.wait_for(
7123 self.k8scluster_map[k8s_cluster_type].scale(
7124 kdu_instance=kdu_instance,
7125 scale=scale,
7126 resource_name=kdu_scaling_info["resource-name"],
7127 total_timeout=self.timeout.scale_on_error,
7128 vca_id=vca_id,
7129 cluster_uuid=cluster_uuid,
7130 kdu_model=kdu_model,
7131 atomic=True,
7132 db_dict=db_dict,
7133 ),
7134 timeout=self.timeout.scale_on_error
7135 * self.timeout.scale_on_error_outer_factor,
7136 )
7137
7138 if kdu_scaling_info["type"] == "create":
7139 kdu_config = get_configuration(db_vnfd, kdu_name)
7140 if (
7141 kdu_config
7142 and kdu_config.get("initial-config-primitive")
7143 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7144 ):
7145 initial_config_primitive_list = kdu_config.get(
7146 "initial-config-primitive"
7147 )
7148 initial_config_primitive_list.sort(
7149 key=lambda val: int(val["seq"])
7150 )
7151
7152 for initial_config_primitive in initial_config_primitive_list:
7153 primitive_params_ = self._map_primitive_params(
7154 initial_config_primitive, {}, {}
7155 )
7156 step = "execute initial config primitive"
7157 self.logger.debug(logging_text + step)
7158 await asyncio.wait_for(
7159 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7160 cluster_uuid=cluster_uuid,
7161 kdu_instance=kdu_instance,
7162 primitive_name=initial_config_primitive["name"],
7163 params=primitive_params_,
7164 db_dict=db_dict,
7165 vca_id=vca_id,
7166 ),
7167 timeout=600,
7168 )
7169
7170 async def _scale_ng_ro(
7171 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7172 ):
7173 nsr_id = db_nslcmop["nsInstanceId"]
7174 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7175 db_vnfrs = {}
7176
7177 # read from db: vnfd's for every vnf
7178 db_vnfds = []
7179
7180 # for each vnf in ns, read vnfd
7181 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7182 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7183 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7184 # if we haven't this vnfd, read it from db
7185 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7186 # read from db
7187 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7188 db_vnfds.append(vnfd)
7189 n2vc_key = self.n2vc.get_public_key()
7190 n2vc_key_list = [n2vc_key]
7191 self.scale_vnfr(
7192 db_vnfr,
7193 vdu_scaling_info.get("vdu-create"),
7194 vdu_scaling_info.get("vdu-delete"),
7195 mark_delete=True,
7196 )
7197 # db_vnfr has been updated, update db_vnfrs to use it
7198 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7199 await self._instantiate_ng_ro(
7200 logging_text,
7201 nsr_id,
7202 db_nsd,
7203 db_nsr,
7204 db_nslcmop,
7205 db_vnfrs,
7206 db_vnfds,
7207 n2vc_key_list,
7208 stage=stage,
7209 start_deploy=time(),
7210 timeout_ns_deploy=self.timeout.ns_deploy,
7211 )
7212 if vdu_scaling_info.get("vdu-delete"):
7213 self.scale_vnfr(
7214 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7215 )
7216
7217 async def extract_prometheus_scrape_jobs(
7218 self,
7219 ee_id: str,
7220 artifact_path: str,
7221 ee_config_descriptor: dict,
7222 vnfr_id: str,
7223 nsr_id: str,
7224 target_ip: str,
7225 element_type: str,
7226 vnf_member_index: str = "",
7227 vdu_id: str = "",
7228 vdu_index: int = None,
7229 kdu_name: str = "",
7230 kdu_index: int = None,
7231 ) -> dict:
7232 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7233 This method will wait until the corresponding VDU or KDU is fully instantiated
7234
7235 Args:
7236 ee_id (str): Execution Environment ID
7237 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7238 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7239 vnfr_id (str): VNFR ID where this EE applies
7240 nsr_id (str): NSR ID where this EE applies
7241 target_ip (str): VDU/KDU instance IP address
7242 element_type (str): NS or VNF or VDU or KDU
7243 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7244 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7245 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7246 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7247 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7248
7249 Raises:
7250 LcmException: When the VDU or KDU instance was not found in an hour
7251
7252 Returns:
7253 _type_: Prometheus jobs
7254 """
7255 # default the vdur and kdur names to an empty string, to avoid any later
7256 # problem with Prometheus when the element type is not VDU or KDU
7257 vdur_name = ""
7258 kdur_name = ""
7259
7260 # look if exist a file called 'prometheus*.j2' and
7261 artifact_content = self.fs.dir_ls(artifact_path)
7262 job_file = next(
7263 (
7264 f
7265 for f in artifact_content
7266 if f.startswith("prometheus") and f.endswith(".j2")
7267 ),
7268 None,
7269 )
7270 if not job_file:
7271 return
7272 with self.fs.file_open((artifact_path, job_file), "r") as f:
7273 job_data = f.read()
7274
7275 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7276 if element_type in ("VDU", "KDU"):
7277 for _ in range(360):
7278 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
7279 if vdu_id and vdu_index is not None:
7280 vdur = next(
7281 (
7282 x
7283 for x in get_iterable(db_vnfr, "vdur")
7284 if (
7285 x.get("vdu-id-ref") == vdu_id
7286 and x.get("count-index") == vdu_index
7287 )
7288 ),
7289 {},
7290 )
7291 if vdur.get("name"):
7292 vdur_name = vdur.get("name")
7293 break
7294 if kdu_name and kdu_index is not None:
7295 kdur = next(
7296 (
7297 x
7298 for x in get_iterable(db_vnfr, "kdur")
7299 if (
7300 x.get("kdu-name") == kdu_name
7301 and x.get("count-index") == kdu_index
7302 )
7303 ),
7304 {},
7305 )
7306 if kdur.get("name"):
7307 kdur_name = kdur.get("name")
7308 break
7309
7310 await asyncio.sleep(10, loop=self.loop)
7311 else:
7312 if vdu_id and vdu_index is not None:
7313 raise LcmException(
7314 f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7315 )
7316 if kdu_name and kdu_index is not None:
7317 raise LcmException(
7318 f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7319 )
7320
7321 # TODO get_service
7322 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7323 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7324 host_port = "80"
7325 vnfr_id = vnfr_id.replace("-", "")
7326 variables = {
7327 "JOB_NAME": vnfr_id,
7328 "TARGET_IP": target_ip,
7329 "EXPORTER_POD_IP": host_name,
7330 "EXPORTER_POD_PORT": host_port,
7331 "NSR_ID": nsr_id,
7332 "VNF_MEMBER_INDEX": vnf_member_index,
7333 "VDUR_NAME": vdur_name,
7334 "KDUR_NAME": kdur_name,
7335 "ELEMENT_TYPE": element_type,
7336 }
7337 job_list = parse_job(job_data, variables)
7338 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7339 for job in job_list:
7340 if (
7341 not isinstance(job.get("job_name"), str)
7342 or vnfr_id not in job["job_name"]
7343 ):
7344 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7345 job["nsr_id"] = nsr_id
7346 job["vnfr_id"] = vnfr_id
7347 return job_list
7348
7349 async def rebuild_start_stop(
7350 self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
7351 ):
7352 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7353 self.logger.info(logging_text + "Enter")
7354 stage = ["Preparing the environment", ""]
7355 # database nsrs record
7356 db_nsr_update = {}
7357 vdu_vim_name = None
7358 vim_vm_id = None
7359 # in case of error, indicates what part of scale was failed to put nsr at error status
7360 start_deploy = time()
7361 try:
7362 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7363 vim_account_id = db_vnfr.get("vim-account-id")
7364 vim_info_key = "vim:" + vim_account_id
7365 vdu_id = additional_param["vdu_id"]
7366 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7367 vdur = find_in_list(
7368 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7369 )
7370 if vdur:
7371 vdu_vim_name = vdur["name"]
7372 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7373 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7374 else:
7375 raise LcmException("Target vdu is not found")
7376 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7377 # wait for any previous tasks in process
7378 stage[1] = "Waiting for previous operations to terminate"
7379 self.logger.info(stage[1])
7380 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7381
7382 stage[1] = "Reading from database."
7383 self.logger.info(stage[1])
7384 self._write_ns_status(
7385 nsr_id=nsr_id,
7386 ns_state=None,
7387 current_operation=operation_type.upper(),
7388 current_operation_id=nslcmop_id,
7389 )
7390 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7391
7392 # read from db: ns
7393 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7394 db_nsr_update["operational-status"] = operation_type
7395 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7396 # Payload for RO
7397 desc = {
7398 operation_type: {
7399 "vim_vm_id": vim_vm_id,
7400 "vnf_id": vnf_id,
7401 "vdu_index": additional_param["count-index"],
7402 "vdu_id": vdur["id"],
7403 "target_vim": target_vim,
7404 "vim_account_id": vim_account_id,
7405 }
7406 }
7407 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7408 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7409 self.logger.info("ro nsr id: {}".format(nsr_id))
7410 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7411 self.logger.info("response from RO: {}".format(result_dict))
7412 action_id = result_dict["action_id"]
7413 await self._wait_ng_ro(
7414 nsr_id,
7415 action_id,
7416 nslcmop_id,
7417 start_deploy,
7418 self.timeout.operate,
7419 None,
7420 "start_stop_rebuild",
7421 )
7422 return "COMPLETED", "Done"
7423 except (ROclient.ROClientException, DbException, LcmException) as e:
7424 self.logger.error("Exit Exception {}".format(e))
7425 exc = e
7426 except asyncio.CancelledError:
7427 self.logger.error("Cancelled Exception while '{}'".format(stage))
7428 exc = "Operation was cancelled"
7429 except Exception as e:
7430 exc = traceback.format_exc()
7431 self.logger.critical(
7432 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7433 )
7434 return "FAILED", "Error in operate VNF {}".format(exc)
7435
7436 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7437 """
7438 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7439
7440 :param: vim_account_id: VIM Account ID
7441
7442 :return: (cloud_name, cloud_credential)
7443 """
7444 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7445 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7446
7447 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7448 """
7449 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7450
7451 :param: vim_account_id: VIM Account ID
7452
7453 :return: (cloud_name, cloud_credential)
7454 """
7455 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7456 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7457
7458 async def migrate(self, nsr_id, nslcmop_id):
7459 """
7460 Migrate VNFs and VDUs instances in a NS
7461
7462 :param: nsr_id: NS Instance ID
7463 :param: nslcmop_id: nslcmop ID of migrate
7464
7465 """
7466 # Try to lock HA task here
7467 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7468 if not task_is_locked_by_me:
7469 return
7470 logging_text = "Task ns={} migrate ".format(nsr_id)
7471 self.logger.debug(logging_text + "Enter")
7472 # get all needed from database
7473 db_nslcmop = None
7474 db_nslcmop_update = {}
7475 nslcmop_operation_state = None
7476 db_nsr_update = {}
7477 target = {}
7478 exc = None
7479 # in case of error, indicates what part of scale was failed to put nsr at error status
7480 start_deploy = time()
7481
7482 try:
7483 # wait for any previous tasks in process
7484 step = "Waiting for previous operations to terminate"
7485 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7486
7487 self._write_ns_status(
7488 nsr_id=nsr_id,
7489 ns_state=None,
7490 current_operation="MIGRATING",
7491 current_operation_id=nslcmop_id,
7492 )
7493 step = "Getting nslcmop from database"
7494 self.logger.debug(
7495 step + " after having waited for previous tasks to be completed"
7496 )
7497 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7498 migrate_params = db_nslcmop.get("operationParams")
7499
7500 target = {}
7501 target.update(migrate_params)
7502 desc = await self.RO.migrate(nsr_id, target)
7503 self.logger.debug("RO return > {}".format(desc))
7504 action_id = desc["action_id"]
7505 await self._wait_ng_ro(
7506 nsr_id,
7507 action_id,
7508 nslcmop_id,
7509 start_deploy,
7510 self.timeout.migrate,
7511 operation="migrate",
7512 )
7513 except (ROclient.ROClientException, DbException, LcmException) as e:
7514 self.logger.error("Exit Exception {}".format(e))
7515 exc = e
7516 except asyncio.CancelledError:
7517 self.logger.error("Cancelled Exception while '{}'".format(step))
7518 exc = "Operation was cancelled"
7519 except Exception as e:
7520 exc = traceback.format_exc()
7521 self.logger.critical(
7522 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7523 )
7524 finally:
7525 self._write_ns_status(
7526 nsr_id=nsr_id,
7527 ns_state=None,
7528 current_operation="IDLE",
7529 current_operation_id=None,
7530 )
7531 if exc:
7532 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7533 nslcmop_operation_state = "FAILED"
7534 else:
7535 nslcmop_operation_state = "COMPLETED"
7536 db_nslcmop_update["detailed-status"] = "Done"
7537 db_nsr_update["detailed-status"] = "Done"
7538
7539 self._write_op_status(
7540 op_id=nslcmop_id,
7541 stage="",
7542 error_message="",
7543 operation_state=nslcmop_operation_state,
7544 other_update=db_nslcmop_update,
7545 )
7546 if nslcmop_operation_state:
7547 try:
7548 msg = {
7549 "nsr_id": nsr_id,
7550 "nslcmop_id": nslcmop_id,
7551 "operationState": nslcmop_operation_state,
7552 }
7553 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7554 except Exception as e:
7555 self.logger.error(
7556 logging_text + "kafka_write notification Exception {}".format(e)
7557 )
7558 self.logger.debug(logging_text + "Exit")
7559 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7560
7561 async def heal(self, nsr_id, nslcmop_id):
7562 """
7563 Heal NS
7564
7565 :param nsr_id: ns instance to heal
7566 :param nslcmop_id: operation to run
7567 :return:
7568 """
7569
7570 # Try to lock HA task here
7571 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7572 if not task_is_locked_by_me:
7573 return
7574
7575 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7576 stage = ["", "", ""]
7577 tasks_dict_info = {}
7578 # ^ stage, step, VIM progress
7579 self.logger.debug(logging_text + "Enter")
7580 # get all needed from database
7581 db_nsr = None
7582 db_nslcmop_update = {}
7583 db_nsr_update = {}
7584 db_vnfrs = {} # vnf's info indexed by _id
7585 exc = None
7586 old_operational_status = ""
7587 old_config_status = ""
7588 nsi_id = None
7589 try:
7590 # wait for any previous tasks in process
7591 step = "Waiting for previous operations to terminate"
7592 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7593 self._write_ns_status(
7594 nsr_id=nsr_id,
7595 ns_state=None,
7596 current_operation="HEALING",
7597 current_operation_id=nslcmop_id,
7598 )
7599
7600 step = "Getting nslcmop from database"
7601 self.logger.debug(
7602 step + " after having waited for previous tasks to be completed"
7603 )
7604 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7605
7606 step = "Getting nsr from database"
7607 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7608 old_operational_status = db_nsr["operational-status"]
7609 old_config_status = db_nsr["config-status"]
7610
7611 db_nsr_update = {
7612 "_admin.deployed.RO.operational-status": "healing",
7613 }
7614 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7615
7616 step = "Sending heal order to VIM"
7617 await self.heal_RO(
7618 logging_text=logging_text,
7619 nsr_id=nsr_id,
7620 db_nslcmop=db_nslcmop,
7621 stage=stage,
7622 )
7623 # VCA tasks
7624 # read from db: nsd
7625 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7626 self.logger.debug(logging_text + stage[1])
7627 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7628 self.fs.sync(db_nsr["nsd-id"])
7629 db_nsr["nsd"] = nsd
7630 # read from db: vnfr's of this ns
7631 step = "Getting vnfrs from db"
7632 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7633 for vnfr in db_vnfrs_list:
7634 db_vnfrs[vnfr["_id"]] = vnfr
7635 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7636
7637 # Check for each target VNF
7638 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7639 for target_vnf in target_list:
7640 # Find this VNF in the list from DB
7641 vnfr_id = target_vnf.get("vnfInstanceId", None)
7642 if vnfr_id:
7643 db_vnfr = db_vnfrs[vnfr_id]
7644 vnfd_id = db_vnfr.get("vnfd-id")
7645 vnfd_ref = db_vnfr.get("vnfd-ref")
7646 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7647 base_folder = vnfd["_admin"]["storage"]
7648 vdu_id = None
7649 vdu_index = 0
7650 vdu_name = None
7651 kdu_name = None
7652 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7653 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7654
7655 # Check each target VDU and deploy N2VC
7656 target_vdu_list = target_vnf.get("additionalParams", {}).get(
7657 "vdu", []
7658 )
7659 if not target_vdu_list:
7660 # Codigo nuevo para crear diccionario
7661 target_vdu_list = []
7662 for existing_vdu in db_vnfr.get("vdur"):
7663 vdu_name = existing_vdu.get("vdu-name", None)
7664 vdu_index = existing_vdu.get("count-index", 0)
7665 vdu_run_day1 = target_vnf.get("additionalParams", {}).get(
7666 "run-day1", False
7667 )
7668 vdu_to_be_healed = {
7669 "vdu-id": vdu_name,
7670 "count-index": vdu_index,
7671 "run-day1": vdu_run_day1,
7672 }
7673 target_vdu_list.append(vdu_to_be_healed)
7674 for target_vdu in target_vdu_list:
7675 deploy_params_vdu = target_vdu
7676 # Set run-day1 vnf level value if not vdu level value exists
7677 if not deploy_params_vdu.get("run-day1") and target_vnf[
7678 "additionalParams"
7679 ].get("run-day1"):
7680 deploy_params_vdu["run-day1"] = target_vnf[
7681 "additionalParams"
7682 ].get("run-day1")
7683 vdu_name = target_vdu.get("vdu-id", None)
7684 # TODO: Get vdu_id from vdud.
7685 vdu_id = vdu_name
7686 # For multi instance VDU count-index is mandatory
7687 # For single session VDU count-indes is 0
7688 vdu_index = target_vdu.get("count-index", 0)
7689
7690 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7691 stage[1] = "Deploying Execution Environments."
7692 self.logger.debug(logging_text + stage[1])
7693
7694 # VNF Level charm. Normal case when proxy charms.
7695 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7696 descriptor_config = get_configuration(vnfd, vnfd_ref)
7697 if descriptor_config:
7698 # Continue if healed machine is management machine
7699 vnf_ip_address = db_vnfr.get("ip-address")
7700 target_instance = None
7701 for instance in db_vnfr.get("vdur", None):
7702 if (
7703 instance["vdu-name"] == vdu_name
7704 and instance["count-index"] == vdu_index
7705 ):
7706 target_instance = instance
7707 break
7708 if vnf_ip_address == target_instance.get("ip-address"):
7709 self._heal_n2vc(
7710 logging_text=logging_text
7711 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7712 member_vnf_index, vdu_name, vdu_index
7713 ),
7714 db_nsr=db_nsr,
7715 db_vnfr=db_vnfr,
7716 nslcmop_id=nslcmop_id,
7717 nsr_id=nsr_id,
7718 nsi_id=nsi_id,
7719 vnfd_id=vnfd_ref,
7720 vdu_id=None,
7721 kdu_name=None,
7722 member_vnf_index=member_vnf_index,
7723 vdu_index=0,
7724 vdu_name=None,
7725 deploy_params=deploy_params_vdu,
7726 descriptor_config=descriptor_config,
7727 base_folder=base_folder,
7728 task_instantiation_info=tasks_dict_info,
7729 stage=stage,
7730 )
7731
7732 # VDU Level charm. Normal case with native charms.
7733 descriptor_config = get_configuration(vnfd, vdu_name)
7734 if descriptor_config:
7735 self._heal_n2vc(
7736 logging_text=logging_text
7737 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7738 member_vnf_index, vdu_name, vdu_index
7739 ),
7740 db_nsr=db_nsr,
7741 db_vnfr=db_vnfr,
7742 nslcmop_id=nslcmop_id,
7743 nsr_id=nsr_id,
7744 nsi_id=nsi_id,
7745 vnfd_id=vnfd_ref,
7746 vdu_id=vdu_id,
7747 kdu_name=kdu_name,
7748 member_vnf_index=member_vnf_index,
7749 vdu_index=vdu_index,
7750 vdu_name=vdu_name,
7751 deploy_params=deploy_params_vdu,
7752 descriptor_config=descriptor_config,
7753 base_folder=base_folder,
7754 task_instantiation_info=tasks_dict_info,
7755 stage=stage,
7756 )
7757
7758 except (
7759 ROclient.ROClientException,
7760 DbException,
7761 LcmException,
7762 NgRoException,
7763 ) as e:
7764 self.logger.error(logging_text + "Exit Exception {}".format(e))
7765 exc = e
7766 except asyncio.CancelledError:
7767 self.logger.error(
7768 logging_text + "Cancelled Exception while '{}'".format(step)
7769 )
7770 exc = "Operation was cancelled"
7771 except Exception as e:
7772 exc = traceback.format_exc()
7773 self.logger.critical(
7774 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7775 exc_info=True,
7776 )
7777 finally:
7778 if tasks_dict_info:
7779 stage[1] = "Waiting for healing pending tasks."
7780 self.logger.debug(logging_text + stage[1])
7781 exc = await self._wait_for_tasks(
7782 logging_text,
7783 tasks_dict_info,
7784 self.timeout.ns_deploy,
7785 stage,
7786 nslcmop_id,
7787 nsr_id=nsr_id,
7788 )
7789 if exc:
7790 db_nslcmop_update[
7791 "detailed-status"
7792 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7793 nslcmop_operation_state = "FAILED"
7794 if db_nsr:
7795 db_nsr_update["operational-status"] = old_operational_status
7796 db_nsr_update["config-status"] = old_config_status
7797 db_nsr_update[
7798 "detailed-status"
7799 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
7800 for task, task_name in tasks_dict_info.items():
7801 if not task.done() or task.cancelled() or task.exception():
7802 if task_name.startswith(self.task_name_deploy_vca):
7803 # A N2VC task is pending
7804 db_nsr_update["config-status"] = "failed"
7805 else:
7806 # RO task is pending
7807 db_nsr_update["operational-status"] = "failed"
7808 else:
7809 error_description_nslcmop = None
7810 nslcmop_operation_state = "COMPLETED"
7811 db_nslcmop_update["detailed-status"] = "Done"
7812 db_nsr_update["detailed-status"] = "Done"
7813 db_nsr_update["operational-status"] = "running"
7814 db_nsr_update["config-status"] = "configured"
7815
7816 self._write_op_status(
7817 op_id=nslcmop_id,
7818 stage="",
7819 error_message=error_description_nslcmop,
7820 operation_state=nslcmop_operation_state,
7821 other_update=db_nslcmop_update,
7822 )
7823 if db_nsr:
7824 self._write_ns_status(
7825 nsr_id=nsr_id,
7826 ns_state=None,
7827 current_operation="IDLE",
7828 current_operation_id=None,
7829 other_update=db_nsr_update,
7830 )
7831
7832 if nslcmop_operation_state:
7833 try:
7834 msg = {
7835 "nsr_id": nsr_id,
7836 "nslcmop_id": nslcmop_id,
7837 "operationState": nslcmop_operation_state,
7838 }
7839 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7840 except Exception as e:
7841 self.logger.error(
7842 logging_text + "kafka_write notification Exception {}".format(e)
7843 )
7844 self.logger.debug(logging_text + "Exit")
7845 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7846
7847 async def heal_RO(
7848 self,
7849 logging_text,
7850 nsr_id,
7851 db_nslcmop,
7852 stage,
7853 ):
7854 """
7855 Heal at RO
7856 :param logging_text: preffix text to use at logging
7857 :param nsr_id: nsr identity
7858 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7859 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7860 :return: None or exception
7861 """
7862
7863 def get_vim_account(vim_account_id):
7864 nonlocal db_vims
7865 if vim_account_id in db_vims:
7866 return db_vims[vim_account_id]
7867 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7868 db_vims[vim_account_id] = db_vim
7869 return db_vim
7870
7871 try:
7872 start_heal = time()
7873 ns_params = db_nslcmop.get("operationParams")
7874 if ns_params and ns_params.get("timeout_ns_heal"):
7875 timeout_ns_heal = ns_params["timeout_ns_heal"]
7876 else:
7877 timeout_ns_heal = self.timeout.ns_heal
7878
7879 db_vims = {}
7880
7881 nslcmop_id = db_nslcmop["_id"]
7882 target = {
7883 "action_id": nslcmop_id,
7884 }
7885 self.logger.warning(
7886 "db_nslcmop={} and timeout_ns_heal={}".format(
7887 db_nslcmop, timeout_ns_heal
7888 )
7889 )
7890 target.update(db_nslcmop.get("operationParams", {}))
7891
7892 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7893 desc = await self.RO.recreate(nsr_id, target)
7894 self.logger.debug("RO return > {}".format(desc))
7895 action_id = desc["action_id"]
7896 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7897 await self._wait_ng_ro(
7898 nsr_id,
7899 action_id,
7900 nslcmop_id,
7901 start_heal,
7902 timeout_ns_heal,
7903 stage,
7904 operation="healing",
7905 )
7906
7907 # Updating NSR
7908 db_nsr_update = {
7909 "_admin.deployed.RO.operational-status": "running",
7910 "detailed-status": " ".join(stage),
7911 }
7912 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7913 self._write_op_status(nslcmop_id, stage)
7914 self.logger.debug(
7915 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7916 )
7917
7918 except Exception as e:
7919 stage[2] = "ERROR healing at VIM"
7920 # self.set_vnfr_at_error(db_vnfrs, str(e))
7921 self.logger.error(
7922 "Error healing at VIM {}".format(e),
7923 exc_info=not isinstance(
7924 e,
7925 (
7926 ROclient.ROClientException,
7927 LcmException,
7928 DbException,
7929 NgRoException,
7930 ),
7931 ),
7932 )
7933 raise
7934
7935 def _heal_n2vc(
7936 self,
7937 logging_text,
7938 db_nsr,
7939 db_vnfr,
7940 nslcmop_id,
7941 nsr_id,
7942 nsi_id,
7943 vnfd_id,
7944 vdu_id,
7945 kdu_name,
7946 member_vnf_index,
7947 vdu_index,
7948 vdu_name,
7949 deploy_params,
7950 descriptor_config,
7951 base_folder,
7952 task_instantiation_info,
7953 stage,
7954 ):
7955 # launch instantiate_N2VC in a asyncio task and register task object
7956 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7957 # if not found, create one entry and update database
7958 # fill db_nsr._admin.deployed.VCA.<index>
7959
7960 self.logger.debug(
7961 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7962 )
7963
7964 charm_name = ""
7965 get_charm_name = False
7966 if "execution-environment-list" in descriptor_config:
7967 ee_list = descriptor_config.get("execution-environment-list", [])
7968 elif "juju" in descriptor_config:
7969 ee_list = [descriptor_config] # ns charms
7970 if "execution-environment-list" not in descriptor_config:
7971 # charm name is only required for ns charms
7972 get_charm_name = True
7973 else: # other types as script are not supported
7974 ee_list = []
7975
7976 for ee_item in ee_list:
7977 self.logger.debug(
7978 logging_text
7979 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7980 ee_item.get("juju"), ee_item.get("helm-chart")
7981 )
7982 )
7983 ee_descriptor_id = ee_item.get("id")
7984 if ee_item.get("juju"):
7985 vca_name = ee_item["juju"].get("charm")
7986 if get_charm_name:
7987 charm_name = self.find_charm_name(db_nsr, str(vca_name))
7988 vca_type = (
7989 "lxc_proxy_charm"
7990 if ee_item["juju"].get("charm") is not None
7991 else "native_charm"
7992 )
7993 if ee_item["juju"].get("cloud") == "k8s":
7994 vca_type = "k8s_proxy_charm"
7995 elif ee_item["juju"].get("proxy") is False:
7996 vca_type = "native_charm"
7997 elif ee_item.get("helm-chart"):
7998 vca_name = ee_item["helm-chart"]
7999 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
8000 vca_type = "helm"
8001 else:
8002 vca_type = "helm-v3"
8003 else:
8004 self.logger.debug(
8005 logging_text + "skipping non juju neither charm configuration"
8006 )
8007 continue
8008
8009 vca_index = -1
8010 for vca_index, vca_deployed in enumerate(
8011 db_nsr["_admin"]["deployed"]["VCA"]
8012 ):
8013 if not vca_deployed:
8014 continue
8015 if (
8016 vca_deployed.get("member-vnf-index") == member_vnf_index
8017 and vca_deployed.get("vdu_id") == vdu_id
8018 and vca_deployed.get("kdu_name") == kdu_name
8019 and vca_deployed.get("vdu_count_index", 0) == vdu_index
8020 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
8021 ):
8022 break
8023 else:
8024 # not found, create one.
8025 target = (
8026 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
8027 )
8028 if vdu_id:
8029 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
8030 elif kdu_name:
8031 target += "/kdu/{}".format(kdu_name)
8032 vca_deployed = {
8033 "target_element": target,
8034 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8035 "member-vnf-index": member_vnf_index,
8036 "vdu_id": vdu_id,
8037 "kdu_name": kdu_name,
8038 "vdu_count_index": vdu_index,
8039 "operational-status": "init", # TODO revise
8040 "detailed-status": "", # TODO revise
8041 "step": "initial-deploy", # TODO revise
8042 "vnfd_id": vnfd_id,
8043 "vdu_name": vdu_name,
8044 "type": vca_type,
8045 "ee_descriptor_id": ee_descriptor_id,
8046 "charm_name": charm_name,
8047 }
8048 vca_index += 1
8049
8050 # create VCA and configurationStatus in db
8051 db_dict = {
8052 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
8053 "configurationStatus.{}".format(vca_index): dict(),
8054 }
8055 self.update_db_2("nsrs", nsr_id, db_dict)
8056
8057 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
8058
8059 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
8060 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
8061 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
8062
8063 # Launch task
8064 task_n2vc = asyncio.ensure_future(
8065 self.heal_N2VC(
8066 logging_text=logging_text,
8067 vca_index=vca_index,
8068 nsi_id=nsi_id,
8069 db_nsr=db_nsr,
8070 db_vnfr=db_vnfr,
8071 vdu_id=vdu_id,
8072 kdu_name=kdu_name,
8073 vdu_index=vdu_index,
8074 deploy_params=deploy_params,
8075 config_descriptor=descriptor_config,
8076 base_folder=base_folder,
8077 nslcmop_id=nslcmop_id,
8078 stage=stage,
8079 vca_type=vca_type,
8080 vca_name=vca_name,
8081 ee_config_descriptor=ee_item,
8082 )
8083 )
8084 self.lcm_tasks.register(
8085 "ns",
8086 nsr_id,
8087 nslcmop_id,
8088 "instantiate_N2VC-{}".format(vca_index),
8089 task_n2vc,
8090 )
8091 task_instantiation_info[
8092 task_n2vc
8093 ] = self.task_name_deploy_vca + " {}.{}".format(
8094 member_vnf_index or "", vdu_id or ""
8095 )
8096
8097 async def heal_N2VC(
8098 self,
8099 logging_text,
8100 vca_index,
8101 nsi_id,
8102 db_nsr,
8103 db_vnfr,
8104 vdu_id,
8105 kdu_name,
8106 vdu_index,
8107 config_descriptor,
8108 deploy_params,
8109 base_folder,
8110 nslcmop_id,
8111 stage,
8112 vca_type,
8113 vca_name,
8114 ee_config_descriptor,
8115 ):
8116 nsr_id = db_nsr["_id"]
8117 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
8118 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
8119 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
8120 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
8121 db_dict = {
8122 "collection": "nsrs",
8123 "filter": {"_id": nsr_id},
8124 "path": db_update_entry,
8125 }
8126 step = ""
8127 try:
8128 element_type = "NS"
8129 element_under_configuration = nsr_id
8130
8131 vnfr_id = None
8132 if db_vnfr:
8133 vnfr_id = db_vnfr["_id"]
8134 osm_config["osm"]["vnf_id"] = vnfr_id
8135
8136 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8137
8138 if vca_type == "native_charm":
8139 index_number = 0
8140 else:
8141 index_number = vdu_index or 0
8142
8143 if vnfr_id:
8144 element_type = "VNF"
8145 element_under_configuration = vnfr_id
8146 namespace += ".{}-{}".format(vnfr_id, index_number)
8147 if vdu_id:
8148 namespace += ".{}-{}".format(vdu_id, index_number)
8149 element_type = "VDU"
8150 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8151 osm_config["osm"]["vdu_id"] = vdu_id
8152 elif kdu_name:
8153 namespace += ".{}".format(kdu_name)
8154 element_type = "KDU"
8155 element_under_configuration = kdu_name
8156 osm_config["osm"]["kdu_name"] = kdu_name
8157
8158 # Get artifact path
8159 if base_folder["pkg-dir"]:
8160 artifact_path = "{}/{}/{}/{}".format(
8161 base_folder["folder"],
8162 base_folder["pkg-dir"],
8163 "charms"
8164 if vca_type
8165 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8166 else "helm-charts",
8167 vca_name,
8168 )
8169 else:
8170 artifact_path = "{}/Scripts/{}/{}/".format(
8171 base_folder["folder"],
8172 "charms"
8173 if vca_type
8174 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8175 else "helm-charts",
8176 vca_name,
8177 )
8178
8179 self.logger.debug("Artifact path > {}".format(artifact_path))
8180
8181 # get initial_config_primitive_list that applies to this element
8182 initial_config_primitive_list = config_descriptor.get(
8183 "initial-config-primitive"
8184 )
8185
8186 self.logger.debug(
8187 "Initial config primitive list > {}".format(
8188 initial_config_primitive_list
8189 )
8190 )
8191
8192 # add config if not present for NS charm
8193 ee_descriptor_id = ee_config_descriptor.get("id")
8194 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8195 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8196 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8197 )
8198
8199 self.logger.debug(
8200 "Initial config primitive list #2 > {}".format(
8201 initial_config_primitive_list
8202 )
8203 )
8204 # n2vc_redesign STEP 3.1
8205 # find old ee_id if exists
8206 ee_id = vca_deployed.get("ee_id")
8207
8208 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8209 # create or register execution environment in VCA. Only for native charms when healing
8210 if vca_type == "native_charm":
8211 step = "Waiting to VM being up and getting IP address"
8212 self.logger.debug(logging_text + step)
8213 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8214 logging_text,
8215 nsr_id,
8216 vnfr_id,
8217 vdu_id,
8218 vdu_index,
8219 user=None,
8220 pub_key=None,
8221 )
8222 credentials = {"hostname": rw_mgmt_ip}
8223 # get username
8224 username = deep_get(
8225 config_descriptor, ("config-access", "ssh-access", "default-user")
8226 )
8227 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8228 # merged. Meanwhile let's get username from initial-config-primitive
8229 if not username and initial_config_primitive_list:
8230 for config_primitive in initial_config_primitive_list:
8231 for param in config_primitive.get("parameter", ()):
8232 if param["name"] == "ssh-username":
8233 username = param["value"]
8234 break
8235 if not username:
8236 raise LcmException(
8237 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8238 "'config-access.ssh-access.default-user'"
8239 )
8240 credentials["username"] = username
8241
8242 # n2vc_redesign STEP 3.2
8243 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8244 self._write_configuration_status(
8245 nsr_id=nsr_id,
8246 vca_index=vca_index,
8247 status="REGISTERING",
8248 element_under_configuration=element_under_configuration,
8249 element_type=element_type,
8250 )
8251
8252 step = "register execution environment {}".format(credentials)
8253 self.logger.debug(logging_text + step)
8254 ee_id = await self.vca_map[vca_type].register_execution_environment(
8255 credentials=credentials,
8256 namespace=namespace,
8257 db_dict=db_dict,
8258 vca_id=vca_id,
8259 )
8260
8261 # update ee_id en db
8262 db_dict_ee_id = {
8263 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8264 }
8265 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8266
8267 # for compatibility with MON/POL modules, the need model and application name at database
8268 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8269 # Not sure if this need to be done when healing
8270 """
8271 ee_id_parts = ee_id.split(".")
8272 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8273 if len(ee_id_parts) >= 2:
8274 model_name = ee_id_parts[0]
8275 application_name = ee_id_parts[1]
8276 db_nsr_update[db_update_entry + "model"] = model_name
8277 db_nsr_update[db_update_entry + "application"] = application_name
8278 """
8279
8280 # n2vc_redesign STEP 3.3
8281 # Install configuration software. Only for native charms.
8282 step = "Install configuration Software"
8283
8284 self._write_configuration_status(
8285 nsr_id=nsr_id,
8286 vca_index=vca_index,
8287 status="INSTALLING SW",
8288 element_under_configuration=element_under_configuration,
8289 element_type=element_type,
8290 # other_update=db_nsr_update,
8291 other_update=None,
8292 )
8293
8294 # TODO check if already done
8295 self.logger.debug(logging_text + step)
8296 config = None
8297 if vca_type == "native_charm":
8298 config_primitive = next(
8299 (p for p in initial_config_primitive_list if p["name"] == "config"),
8300 None,
8301 )
8302 if config_primitive:
8303 config = self._map_primitive_params(
8304 config_primitive, {}, deploy_params
8305 )
8306 await self.vca_map[vca_type].install_configuration_sw(
8307 ee_id=ee_id,
8308 artifact_path=artifact_path,
8309 db_dict=db_dict,
8310 config=config,
8311 num_units=1,
8312 vca_id=vca_id,
8313 vca_type=vca_type,
8314 )
8315
8316 # write in db flag of configuration_sw already installed
8317 self.update_db_2(
8318 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8319 )
8320
8321 # Not sure if this need to be done when healing
8322 """
8323 # add relations for this VCA (wait for other peers related with this VCA)
8324 await self._add_vca_relations(
8325 logging_text=logging_text,
8326 nsr_id=nsr_id,
8327 vca_type=vca_type,
8328 vca_index=vca_index,
8329 )
8330 """
8331
8332 # if SSH access is required, then get execution environment SSH public
8333 # if native charm we have waited already to VM be UP
8334 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8335 pub_key = None
8336 user = None
8337 # self.logger.debug("get ssh key block")
8338 if deep_get(
8339 config_descriptor, ("config-access", "ssh-access", "required")
8340 ):
8341 # self.logger.debug("ssh key needed")
8342 # Needed to inject a ssh key
8343 user = deep_get(
8344 config_descriptor,
8345 ("config-access", "ssh-access", "default-user"),
8346 )
8347 step = "Install configuration Software, getting public ssh key"
8348 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8349 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8350 )
8351
8352 step = "Insert public key into VM user={} ssh_key={}".format(
8353 user, pub_key
8354 )
8355 else:
8356 # self.logger.debug("no need to get ssh key")
8357 step = "Waiting to VM being up and getting IP address"
8358 self.logger.debug(logging_text + step)
8359
8360 # n2vc_redesign STEP 5.1
8361 # wait for RO (ip-address) Insert pub_key into VM
8362 # IMPORTANT: We need do wait for RO to complete healing operation.
8363 await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
8364 if vnfr_id:
8365 if kdu_name:
8366 rw_mgmt_ip = await self.wait_kdu_up(
8367 logging_text, nsr_id, vnfr_id, kdu_name
8368 )
8369 else:
8370 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8371 logging_text,
8372 nsr_id,
8373 vnfr_id,
8374 vdu_id,
8375 vdu_index,
8376 user=user,
8377 pub_key=pub_key,
8378 )
8379 else:
8380 rw_mgmt_ip = None # This is for a NS configuration
8381
8382 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8383
8384 # store rw_mgmt_ip in deploy params for later replacement
8385 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8386
8387 # Day1 operations.
8388 # get run-day1 operation parameter
8389 runDay1 = deploy_params.get("run-day1", False)
8390 self.logger.debug(
8391 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id, vdu_id, runDay1)
8392 )
8393 if runDay1:
8394 # n2vc_redesign STEP 6 Execute initial config primitive
8395 step = "execute initial config primitive"
8396
8397 # wait for dependent primitives execution (NS -> VNF -> VDU)
8398 if initial_config_primitive_list:
8399 await self._wait_dependent_n2vc(
8400 nsr_id, vca_deployed_list, vca_index
8401 )
8402
8403 # stage, in function of element type: vdu, kdu, vnf or ns
8404 my_vca = vca_deployed_list[vca_index]
8405 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8406 # VDU or KDU
8407 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8408 elif my_vca.get("member-vnf-index"):
8409 # VNF
8410 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8411 else:
8412 # NS
8413 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8414
8415 self._write_configuration_status(
8416 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8417 )
8418
8419 self._write_op_status(op_id=nslcmop_id, stage=stage)
8420
8421 check_if_terminated_needed = True
8422 for initial_config_primitive in initial_config_primitive_list:
8423 # adding information on the vca_deployed if it is a NS execution environment
8424 if not vca_deployed["member-vnf-index"]:
8425 deploy_params["ns_config_info"] = json.dumps(
8426 self._get_ns_config_info(nsr_id)
8427 )
8428 # TODO check if already done
8429 primitive_params_ = self._map_primitive_params(
8430 initial_config_primitive, {}, deploy_params
8431 )
8432
8433 step = "execute primitive '{}' params '{}'".format(
8434 initial_config_primitive["name"], primitive_params_
8435 )
8436 self.logger.debug(logging_text + step)
8437 await self.vca_map[vca_type].exec_primitive(
8438 ee_id=ee_id,
8439 primitive_name=initial_config_primitive["name"],
8440 params_dict=primitive_params_,
8441 db_dict=db_dict,
8442 vca_id=vca_id,
8443 vca_type=vca_type,
8444 )
8445 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8446 if check_if_terminated_needed:
8447 if config_descriptor.get("terminate-config-primitive"):
8448 self.update_db_2(
8449 "nsrs",
8450 nsr_id,
8451 {db_update_entry + "needed_terminate": True},
8452 )
8453 check_if_terminated_needed = False
8454
8455 # TODO register in database that primitive is done
8456
8457 # STEP 7 Configure metrics
8458 # Not sure if this need to be done when healing
8459 """
8460 if vca_type == "helm" or vca_type == "helm-v3":
8461 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8462 ee_id=ee_id,
8463 artifact_path=artifact_path,
8464 ee_config_descriptor=ee_config_descriptor,
8465 vnfr_id=vnfr_id,
8466 nsr_id=nsr_id,
8467 target_ip=rw_mgmt_ip,
8468 )
8469 if prometheus_jobs:
8470 self.update_db_2(
8471 "nsrs",
8472 nsr_id,
8473 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8474 )
8475
8476 for job in prometheus_jobs:
8477 self.db.set_one(
8478 "prometheus_jobs",
8479 {"job_name": job["job_name"]},
8480 job,
8481 upsert=True,
8482 fail_on_empty=False,
8483 )
8484
8485 """
8486 step = "instantiated at VCA"
8487 self.logger.debug(logging_text + step)
8488
8489 self._write_configuration_status(
8490 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8491 )
8492
8493 except Exception as e: # TODO not use Exception but N2VC exception
8494 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8495 if not isinstance(
8496 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8497 ):
8498 self.logger.error(
8499 "Exception while {} : {}".format(step, e), exc_info=True
8500 )
8501 self._write_configuration_status(
8502 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8503 )
8504 raise LcmException("{} {}".format(step, e)) from e
8505
8506 async def _wait_heal_ro(
8507 self,
8508 nsr_id,
8509 timeout=600,
8510 ):
8511 start_time = time()
8512 while time() <= start_time + timeout:
8513 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8514 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"][
8515 "operational-status"
8516 ]
8517 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8518 if operational_status_ro != "healing":
8519 break
8520 await asyncio.sleep(15, loop=self.loop)
8521 else: # timeout_ns_deploy
8522 raise NgRoException("Timeout waiting ns to deploy")
8523
8524 async def vertical_scale(self, nsr_id, nslcmop_id):
8525 """
8526 Vertical Scale the VDUs in a NS
8527
8528 :param: nsr_id: NS Instance ID
8529 :param: nslcmop_id: nslcmop ID of migrate
8530
8531 """
8532 # Try to lock HA task here
8533 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8534 if not task_is_locked_by_me:
8535 return
8536 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8537 self.logger.debug(logging_text + "Enter")
8538 # get all needed from database
8539 db_nslcmop = None
8540 db_nslcmop_update = {}
8541 nslcmop_operation_state = None
8542 db_nsr_update = {}
8543 target = {}
8544 exc = None
8545 # in case of error, indicates what part of scale was failed to put nsr at error status
8546 start_deploy = time()
8547
8548 try:
8549 # wait for any previous tasks in process
8550 step = "Waiting for previous operations to terminate"
8551 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
8552
8553 self._write_ns_status(
8554 nsr_id=nsr_id,
8555 ns_state=None,
8556 current_operation="VerticalScale",
8557 current_operation_id=nslcmop_id,
8558 )
8559 step = "Getting nslcmop from database"
8560 self.logger.debug(
8561 step + " after having waited for previous tasks to be completed"
8562 )
8563 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8564 operationParams = db_nslcmop.get("operationParams")
8565 target = {}
8566 target.update(operationParams)
8567 desc = await self.RO.vertical_scale(nsr_id, target)
8568 self.logger.debug("RO return > {}".format(desc))
8569 action_id = desc["action_id"]
8570 await self._wait_ng_ro(
8571 nsr_id,
8572 action_id,
8573 nslcmop_id,
8574 start_deploy,
8575 self.timeout.verticalscale,
8576 operation="verticalscale",
8577 )
8578 except (ROclient.ROClientException, DbException, LcmException) as e:
8579 self.logger.error("Exit Exception {}".format(e))
8580 exc = e
8581 except asyncio.CancelledError:
8582 self.logger.error("Cancelled Exception while '{}'".format(step))
8583 exc = "Operation was cancelled"
8584 except Exception as e:
8585 exc = traceback.format_exc()
8586 self.logger.critical(
8587 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
8588 )
8589 finally:
8590 self._write_ns_status(
8591 nsr_id=nsr_id,
8592 ns_state=None,
8593 current_operation="IDLE",
8594 current_operation_id=None,
8595 )
8596 if exc:
8597 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
8598 nslcmop_operation_state = "FAILED"
8599 else:
8600 nslcmop_operation_state = "COMPLETED"
8601 db_nslcmop_update["detailed-status"] = "Done"
8602 db_nsr_update["detailed-status"] = "Done"
8603
8604 self._write_op_status(
8605 op_id=nslcmop_id,
8606 stage="",
8607 error_message="",
8608 operation_state=nslcmop_operation_state,
8609 other_update=db_nslcmop_update,
8610 )
8611 if nslcmop_operation_state:
8612 try:
8613 msg = {
8614 "nsr_id": nsr_id,
8615 "nslcmop_id": nslcmop_id,
8616 "operationState": nslcmop_operation_state,
8617 }
8618 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8619 except Exception as e:
8620 self.logger.error(
8621 logging_text + "kafka_write notification Exception {}".format(e)
8622 )
8623 self.logger.debug(logging_text + "Exit")
8624 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")